Commit 5938b227 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'z85230-cleanups'



Peng Li says:

====================
net: z85230: clean up some code style issues

This patchset clean up some code style issues.

---
Change Log:
V1 -> V2:
1, fix the comments from Andrew, add commit message to [patch 04/11]
   about remove volatile.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 0dca2c74 2b28b711
Loading
Loading
Loading
Loading
+422 −571
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-or-later
/*
 *
 *	(c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
/*	(c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
 *	(c) Copyright 2000, 2001 Red Hat Inc
 *
 *	Development of this driver was funded by Equiinet Ltd
@@ -55,7 +53,6 @@

#include "z85230.h"


/**
 *	z8530_read_port - Architecture specific interface function
 *	@p: port to read
@@ -75,6 +72,7 @@
static inline int z8530_read_port(unsigned long p)
{
	u8 r = inb(Z8530_PORT_OF(p));

	if (p & Z8530_PORT_SLEEP) /* gcc should figure this out efficiently ! */
		udelay(5);
	return r;
@@ -95,7 +93,6 @@ static inline int z8530_read_port(unsigned long p)
 *	dread 5uS sanity delay.
 */


static inline void z8530_write_port(unsigned long p, u8 d)
{
	outb(d, Z8530_PORT_OF(p));
@@ -103,12 +100,9 @@ static inline void z8530_write_port(unsigned long p, u8 d)
		udelay(5);
}



static void z8530_rx_done(struct z8530_channel *c);
static void z8530_tx_done(struct z8530_channel *c);


/**
 *	read_zsreg - Read a register from a Z85230
 *	@c: Z8530 channel to read from (2 per chip)
@@ -138,6 +132,7 @@ static inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
static inline u8 read_zsdata(struct z8530_channel *c)
{
	u8 r;

	r = z8530_read_port(c->dataio);
	return r;
}
@@ -159,7 +154,6 @@ static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val)
	if (reg)
		z8530_write_port(c->ctrlio, reg);
	z8530_write_port(c->ctrlio, val);

}

/**
@@ -182,36 +176,27 @@ static inline void write_zsctrl(struct z8530_channel *c, u8 val)
 *
 *	Write directly to the data register on the Z8530
 */


static inline void write_zsdata(struct z8530_channel *c, u8 val)
{
	z8530_write_port(c->dataio, val);
}

/*
 *	Register loading parameters for a dead port
/*	Register loading parameters for a dead port
 */

u8 z8530_dead_port[]=
{
u8 z8530_dead_port[] = {
	255
};

EXPORT_SYMBOL(z8530_dead_port);

/*
 *	Register loading parameters for currently supported circuit types
/*	Register loading parameters for currently supported circuit types
 */


/*
 *	Data clocked by telco end. This is the correct data for the UK
/*	Data clocked by telco end. This is the correct data for the UK
 *	"kilostream" service, and most other similar services.
 */

u8 z8530_hdlc_kilostream[]=
{
u8 z8530_hdlc_kilostream[] = {
	4,	SYNC_ENAB | SDLC | X1CLK,
	2,	0,	/* No vector */
	1,	0,
@@ -228,15 +213,12 @@ u8 z8530_hdlc_kilostream[]=
	9,	NV | MIE | NORESET,
	255
};

EXPORT_SYMBOL(z8530_hdlc_kilostream);

/*
 *	As above but for enhanced chips.
/*	As above but for enhanced chips.
 */

u8 z8530_hdlc_kilostream_85230[]=
{
u8 z8530_hdlc_kilostream_85230[] = {
	4,	SYNC_ENAB | SDLC | X1CLK,
	2,	0,	/* No vector */
	1,	0,
@@ -255,7 +237,6 @@ u8 z8530_hdlc_kilostream_85230[]=

	255
};

EXPORT_SYMBOL(z8530_hdlc_kilostream_85230);

/**
@@ -276,8 +257,7 @@ static void z8530_flush_fifo(struct z8530_channel *c)
	read_zsreg(c, R1);
	read_zsreg(c, R1);
	read_zsreg(c, R1);
	if(c->dev->type==Z85230)
	{
	if (c->dev->type == Z85230) {
		read_zsreg(c, R1);
		read_zsreg(c, R1);
		read_zsreg(c, R1);
@@ -333,51 +313,39 @@ static void z8530_rx(struct z8530_channel *c)
{
	u8 ch, stat;

	while(1)
	{
	while (1) {
		/* FIFO empty ? */
		if (!(read_zsreg(c, R0) & 1))
			break;
		ch = read_zsdata(c);
		stat = read_zsreg(c, R1);

		/*
		 *	Overrun ?
		/*	Overrun ?
		 */
		if(c->count < c->max)
		{
		if (c->count < c->max) {
			*c->dptr++ = ch;
			c->count++;
		}

		if(stat&END_FR)
		{
		
			/*
			 *	Error ?
		if (stat & END_FR) {
			/*	Error ?
			 */
			if(stat&(Rx_OVR|CRC_ERR))
			{
			if (stat & (Rx_OVR | CRC_ERR)) {
				/* Rewind the buffer and return */
				if (c->skb)
					c->dptr = c->skb->data;
				c->count = 0;
				if(stat&Rx_OVR)
				{
				if (stat & Rx_OVR) {
					pr_warn("%s: overrun\n", c->dev->name);
					c->rx_overrun++;
				}
				if(stat&CRC_ERR)
				{
				if (stat & CRC_ERR) {
					c->rx_crc_err++;
					/* printk("crc error\n"); */
				}
				/* Shove the frame upstream */
			}
			else
			{
				/*
				 *	Drop the lock for RX processing, or
			} else {
				/*	Drop the lock for RX processing, or
				 *	there are deadlocks
				 */
				z8530_rx_done(c);
@@ -385,14 +353,12 @@ static void z8530_rx(struct z8530_channel *c)
			}
		}
	}
	/*
	 *	Clear irq
	/*	Clear irq
	 */
	write_zsctrl(c, ERR_RES);
	write_zsctrl(c, RES_H_IUS);
}


/**
 *	z8530_tx - Handle a PIO transmit event
 *	@c: Z8530 channel to process
@@ -410,22 +376,18 @@ static void z8530_tx(struct z8530_channel *c)
		if (!(read_zsreg(c, R0) & 4))
			return;
		c->txcount--;
		/*
		 *	Shovel out the byte
		/*	Shovel out the byte
		 */
		write_zsreg(c, R8, *c->tx_ptr++);
		write_zsctrl(c, RES_H_IUS);
		/* We are about to underflow */
		if(c->txcount==0)
		{
		if (c->txcount == 0) {
			write_zsctrl(c, RES_EOM_L);
			write_zsreg(c, R10, c->regs[10] & ~ABUNDER);
		}
	}

	
	/*
	 *	End of frame TX - fire another one
	/*	End of frame TX - fire another one
	 */

	write_zsctrl(c, RES_Tx_P);
@@ -460,8 +422,7 @@ static void z8530_status(struct z8530_channel *chan)
		z8530_tx_done(chan);
	}

	if (altered & chan->dcdcheck)
	{
	if (altered & chan->dcdcheck) {
		if (status & chan->dcdcheck) {
			pr_info("%s: DCD raised\n", chan->dev->name);
			write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
@@ -474,7 +435,6 @@ static void z8530_status(struct z8530_channel *chan)
			if (chan->netdevice)
				netif_carrier_off(chan->netdevice);
		}

	}
	write_zsctrl(chan, RES_EXT_INT);
	write_zsctrl(chan, RES_H_IUS);
@@ -485,7 +445,6 @@ struct z8530_irqhandler z8530_sync = {
	.tx = z8530_tx,
	.status = z8530_status,
};

EXPORT_SYMBOL(z8530_sync);

/**
@@ -500,8 +459,7 @@ EXPORT_SYMBOL(z8530_sync);

static void z8530_dma_rx(struct z8530_channel *chan)
{
	if(chan->rxdma_on)
	{
	if (chan->rxdma_on) {
		/* Special condition check only */
		u8 status;

@@ -511,14 +469,11 @@ static void z8530_dma_rx(struct z8530_channel *chan)
		status = read_zsreg(chan, R1);

		if (status & END_FR)
		{
			z8530_rx_done(chan);	/* Fire up the next one */
		}		

		write_zsctrl(chan, ERR_RES);
		write_zsctrl(chan, RES_H_IUS);
	}
	else
	{
	} else {
		/* DMA is off right now, drain the slow way */
		z8530_rx(chan);
	}
@@ -531,11 +486,9 @@ static void z8530_dma_rx(struct z8530_channel *chan)
 *	We have received an interrupt while doing DMA transmissions. It
 *	shouldn't happen. Scream loudly if it does.
 */
 
static void z8530_dma_tx(struct z8530_channel *chan)
{
	if(!chan->dma_tx)
	{
	if (!chan->dma_tx) {
		pr_warn("Hey who turned the DMA off?\n");
		z8530_tx(chan);
		return;
@@ -554,7 +507,6 @@ static void z8530_dma_tx(struct z8530_channel *chan)
 *	and kick the next packet out. Secondly we may see a DCD change.
 *
 */
 
static void z8530_dma_status(struct z8530_channel *chan)
{
	u8 status, altered;
@@ -564,11 +516,8 @@ static void z8530_dma_status(struct z8530_channel *chan)

	chan->status = status;


	if(chan->dma_tx)
	{
		if(status&TxEOM)
		{
	if (chan->dma_tx) {
		if (status & TxEOM) {
			unsigned long flags;

			flags = claim_dma_lock();
@@ -580,8 +529,7 @@ static void z8530_dma_status(struct z8530_channel *chan)
		}
	}

	if (altered & chan->dcdcheck)
	{
	if (altered & chan->dcdcheck) {
		if (status & chan->dcdcheck) {
			pr_info("%s: DCD raised\n", chan->dev->name);
			write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
@@ -621,11 +569,9 @@ static struct z8530_irqhandler z8530_txdma_sync = {
 *	(eg the MacII) we must clear the interrupt cause or die.
 */


static void z8530_rx_clear(struct z8530_channel *c)
{
	/*
	 *	Data and status bytes
	/*	Data and status bytes
	 */
	u8 stat;

@@ -634,8 +580,7 @@ static void z8530_rx_clear(struct z8530_channel *c)

	if (stat & END_FR)
		write_zsctrl(c, RES_Rx_CRC);
	/*
	 *	Clear irq
	/*	Clear irq
	 */
	write_zsctrl(c, ERR_RES);
	write_zsctrl(c, RES_H_IUS);
@@ -668,6 +613,7 @@ static void z8530_tx_clear(struct z8530_channel *c)
static void z8530_status_clear(struct z8530_channel *chan)
{
	u8 status = read_zsreg(chan, R0);

	if (status & TxEOM)
		write_zsctrl(chan, ERR_RES);
	write_zsctrl(chan, RES_EXT_INT);
@@ -679,8 +625,6 @@ struct z8530_irqhandler z8530_nop = {
	.tx = z8530_tx_clear,
	.status = z8530_status_clear,
};


EXPORT_SYMBOL(z8530_nop);

/**
@@ -707,8 +651,7 @@ irqreturn_t z8530_interrupt(int irq, void *dev_id)
	int work = 0;
	struct z8530_irqhandler *irqs;

	if(locker)
	{
	if (locker) {
		pr_err("IRQ re-enter\n");
		return IRQ_NONE;
	}
@@ -716,23 +659,23 @@ irqreturn_t z8530_interrupt(int irq, void *dev_id)

	spin_lock(&dev->lock);

	while(++work<5000)
	{

	while (++work < 5000) {
		intr = read_zsreg(&dev->chanA, R3);
		if(!(intr & (CHARxIP|CHATxIP|CHAEXT|CHBRxIP|CHBTxIP|CHBEXT)))
		if (!(intr &
		   (CHARxIP | CHATxIP | CHAEXT | CHBRxIP | CHBTxIP | CHBEXT)))
			break;

		/* This holds the IRQ status. On the 8530 you must read it from chan 
		   A even though it applies to the whole chip */
		/* This holds the IRQ status. On the 8530 you must read it
		 * from chan A even though it applies to the whole chip
		 */

		/* Now walk the chip and see what it is wanting - it may be
		   an IRQ for someone else remember */
		 * an IRQ for someone else remember
		 */

		irqs = dev->chanA.irqs;

		if(intr & (CHARxIP|CHATxIP|CHAEXT))
		{
		if (intr & (CHARxIP | CHATxIP | CHAEXT)) {
			if (intr & CHARxIP)
				irqs->rx(&dev->chanA);
			if (intr & CHATxIP)
@@ -743,8 +686,7 @@ irqreturn_t z8530_interrupt(int irq, void *dev_id)

		irqs = dev->chanB.irqs;

		if(intr & (CHBRxIP|CHBTxIP|CHBEXT))
		{
		if (intr & (CHBRxIP | CHBTxIP | CHBEXT)) {
			if (intr & CHBRxIP)
				irqs->rx(&dev->chanB);
			if (intr & CHBTxIP)
@@ -761,18 +703,15 @@ irqreturn_t z8530_interrupt(int irq, void *dev_id)
	locker = 0;
	return IRQ_HANDLED;
}

EXPORT_SYMBOL(z8530_interrupt);

static const u8 reg_init[16]=
{
static const u8 reg_init[16] = {
	0, 0, 0, 0,
	0, 0, 0, 0,
	0, 0, 0, 0,
	0x55, 0, 0, 0
};


/**
 *	z8530_sync_open - Open a Z8530 channel for PIO
 *	@dev:	The network interface we are using
@@ -781,7 +720,6 @@ static const u8 reg_init[16]=
 *	Switch a Z8530 into synchronous mode without DMA assist. We
 *	raise the RTS/DTR and commence network operation.
 */
 
int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
{
	unsigned long flags;
@@ -807,8 +745,6 @@ int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
	spin_unlock_irqrestore(c->lock, flags);
	return 0;
}


EXPORT_SYMBOL(z8530_sync_open);

/**
@@ -819,7 +755,6 @@ EXPORT_SYMBOL(z8530_sync_open);
 *	Close down a Z8530 interface and switch its interrupt handlers
 *	to discard future events.
 */
 
int z8530_sync_close(struct net_device *dev, struct z8530_channel *c)
{
	u8 chk;
@@ -837,7 +772,6 @@ int z8530_sync_close(struct net_device *dev, struct z8530_channel *c)
	spin_unlock_irqrestore(c->lock, flags);
	return 0;
}

EXPORT_SYMBOL(z8530_sync_close);

/**
@@ -849,7 +783,6 @@ EXPORT_SYMBOL(z8530_sync_close);
 *	ISA DMA channels must be available for this to work. We assume ISA
 *	DMA driven I/O and PC limits on access.
 */
 
int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
{
	unsigned long cflags, dflags;
@@ -859,14 +792,13 @@ int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
	c->count = 0;
	c->skb = NULL;
	c->skb2 = NULL;
	/*
	 *	Load the DMA interfaces up

	/*	Load the DMA interfaces up
	 */
	c->rxdma_on = 0;
	c->txdma_on = 0;

	/*
	 *	Allocate the DMA flip buffers. Limit by page size.
	/*	Allocate the DMA flip buffers. Limit by page size.
	 *	Everyone runs 1500 mtu or less on wan links so this
	 *	should be fine.
	 */
@@ -875,13 +807,12 @@ int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
		return -EMSGSIZE;

	c->rx_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
	if(c->rx_buf[0]==NULL)
	if (!c->rx_buf[0])
		return -ENOBUFS;
	c->rx_buf[1] = c->rx_buf[0] + PAGE_SIZE / 2;

	c->tx_dma_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
	if(c->tx_dma_buf[0]==NULL)
	{
	if (!c->tx_dma_buf[0]) {
		free_page((unsigned long)c->rx_buf[0]);
		c->rx_buf[0] = NULL;
		return -ENOBUFS;
@@ -893,14 +824,12 @@ int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
	c->dma_num = 0;
	c->dma_ready = 1;

	/*
	 *	Enable DMA control mode
	/*	Enable DMA control mode
	 */

	spin_lock_irqsave(c->lock, cflags);

	/*
	 *	TX DMA via DIR/REQ
	/*	TX DMA via DIR/REQ
	 */

	c->regs[R14] |= DTRREQ;
@@ -909,8 +838,7 @@ int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
	c->regs[R1] &= ~TxINT_ENAB;
	write_zsreg(c, R1, c->regs[R1]);

	/*
	 *	RX DMA via W/Req
	/*	RX DMA via W/Req
	 */

	c->regs[R1] |= WT_FN_RDYFN;
@@ -921,12 +849,10 @@ int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
	c->regs[R1] |= WT_RDY_ENAB;
	write_zsreg(c, R1, c->regs[R1]);

	/*
	 *	DMA interrupts
	/*	DMA interrupts
	 */

	/*
	 *	Set up the DMA configuration
	/*	Set up the DMA configuration
	 */

	dflags = claim_dma_lock();
@@ -945,8 +871,7 @@ int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)

	release_dma_lock(dflags);

	/*
	 *	Select the DMA interrupt handlers
	/*	Select the DMA interrupt handlers
	 */

	c->rxdma_on = 1;
@@ -961,7 +886,6 @@ int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)

	return 0;
}

EXPORT_SYMBOL(z8530_sync_dma_open);

/**
@@ -972,7 +896,6 @@ EXPORT_SYMBOL(z8530_sync_dma_open);
 *	Shut down a DMA mode synchronous interface. Halt the DMA, and
 *	free the buffers.
 */
 
int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)
{
	u8 chk;
@@ -982,8 +905,7 @@ int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)
	c->max = 0;
	c->sync = 0;

	/*
	 *	Disable the PC DMA channels
	/*	Disable the PC DMA channels
	 */

	flags = claim_dma_lock();
@@ -1001,8 +923,7 @@ int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)

	spin_lock_irqsave(c->lock, flags);

	/*
	 *	Disable DMA control mode
	/*	Disable DMA control mode
	 */

	c->regs[R1] &= ~WT_RDY_ENAB;
@@ -1013,13 +934,11 @@ int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)
	c->regs[R14] &= ~DTRREQ;
	write_zsreg(c, R14, c->regs[R14]);

	if(c->rx_buf[0])
	{
	if (c->rx_buf[0]) {
		free_page((unsigned long)c->rx_buf[0]);
		c->rx_buf[0] = NULL;
	}
	if(c->tx_dma_buf[0])
	{
	if (c->tx_dma_buf[0]) {
		free_page((unsigned  long)c->tx_dma_buf[0]);
		c->tx_dma_buf[0] = NULL;
	}
@@ -1031,7 +950,6 @@ int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)

	return 0;
}

EXPORT_SYMBOL(z8530_sync_dma_close);

/**
@@ -1055,8 +973,7 @@ int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
	c->skb = NULL;
	c->skb2 = NULL;

	/*
	 *	Allocate the DMA flip buffers. Limit by page size.
	/*	Allocate the DMA flip buffers. Limit by page size.
	 *	Everyone runs 1500 mtu or less on wan links so this
	 *	should be fine.
	 */
@@ -1065,23 +982,20 @@ int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
		return -EMSGSIZE;

	c->tx_dma_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
	if(c->tx_dma_buf[0]==NULL)
	if (!c->tx_dma_buf[0])
		return -ENOBUFS;

	c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE / 2;


	spin_lock_irqsave(c->lock, cflags);

	/*
	 *	Load the PIO receive ring
	/*	Load the PIO receive ring
	 */

	z8530_rx_done(c);
	z8530_rx_done(c);

	/*
	 *	Load the DMA interfaces up
	/*	Load the DMA interfaces up
	 */

	c->rxdma_on = 0;
@@ -1092,12 +1006,10 @@ int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
	c->dma_ready = 1;
	c->dma_tx = 1;

	/*
	 *	Enable DMA control mode
	/*	Enable DMA control mode
	 */

	/*
	 *	TX DMA via DIR/REQ
	/*	TX DMA via DIR/REQ
	 */
	c->regs[R14] |= DTRREQ;
	write_zsreg(c, R14, c->regs[R14]);
@@ -1105,8 +1017,7 @@ int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
	c->regs[R1] &= ~TxINT_ENAB;
	write_zsreg(c, R1, c->regs[R1]);

	/*
	 *	Set up the DMA configuration
	/*	Set up the DMA configuration
	 */

	dflags = claim_dma_lock();
@@ -1118,8 +1029,7 @@ int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)

	release_dma_lock(dflags);

	/*
	 *	Select the DMA interrupt handlers
	/*	Select the DMA interrupt handlers
	 */

	c->rxdma_on = 0;
@@ -1133,7 +1043,6 @@ int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)

	return 0;
}

EXPORT_SYMBOL(z8530_sync_txdma_open);

/**
@@ -1150,15 +1059,13 @@ int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
	unsigned long dflags, cflags;
	u8 chk;

	
	spin_lock_irqsave(c->lock, cflags);

	c->irqs = &z8530_nop;
	c->max = 0;
	c->sync = 0;

	/*
	 *	Disable the PC DMA channels
	/*	Disable the PC DMA channels
	 */

	dflags = claim_dma_lock();
@@ -1170,8 +1077,7 @@ int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)

	release_dma_lock(dflags);

	/*
	 *	Disable DMA control mode
	/*	Disable DMA control mode
	 */

	c->regs[R1] &= ~WT_RDY_ENAB;
@@ -1182,8 +1088,7 @@ int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
	c->regs[R14] &= ~DTRREQ;
	write_zsreg(c, R14, c->regs[R14]);

	if(c->tx_dma_buf[0])
	{
	if (c->tx_dma_buf[0]) {
		free_page((unsigned long)c->tx_dma_buf[0]);
		c->tx_dma_buf[0] = NULL;
	}
@@ -1194,17 +1099,12 @@ int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
	spin_unlock_irqrestore(c->lock, cflags);
	return 0;
}


EXPORT_SYMBOL(z8530_sync_txdma_close);


/*
 *	Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny
/*	Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny
 *	it exists...
 */
 
static const char *z8530_type_name[]={
static const char * const z8530_type_name[] = {
	"Z8530",
	"Z85C30",
	"Z85230"
@@ -1230,17 +1130,15 @@ void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io)
		Z8530_PORT_OF(io),
		dev->irq);
}

EXPORT_SYMBOL(z8530_describe);

/*
 *	Locked operation part of the z8530 init code
/*	Locked operation part of the z8530 init code
 */
 
static inline int do_z8530_init(struct z8530_dev *dev)
{
	/* NOP the interrupt handlers first - we might get a
	   floating IRQ transition when we reset the chip */
	 * floating IRQ transition when we reset the chip
	 */
	dev->chanA.irqs = &z8530_nop;
	dev->chanB.irqs = &z8530_nop;
	dev->chanA.dcdcheck = DCD;
@@ -1259,19 +1157,16 @@ static inline int do_z8530_init(struct z8530_dev *dev)

	dev->type = Z8530;

	/*
	 *	See the application note.
	/*	See the application note.
	 */

	write_zsreg(&dev->chanA, R15, 0x01);

	/*
	 *	If we can set the low bit of R15 then
	/*	If we can set the low bit of R15 then
	 *	the chip is enhanced.
	 */

	if(read_zsreg(&dev->chanA, R15)==0x01)
	{
	if (read_zsreg(&dev->chanA, R15) == 0x01) {
		/* This C30 versus 230 detect is from Klaus Kudielka's dmascc */
		/* Put a char in the fifo */
		write_zsreg(&dev->chanA, R8, 0);
@@ -1281,16 +1176,14 @@ static inline int do_z8530_init(struct z8530_dev *dev)
			dev->type = Z85C30;	/* Z85C30, 1 byte FIFO */
	}

	/*
	 *	The code assumes R7' and friends are
	/*	The code assumes R7' and friends are
	 *	off. Use write_zsext() for these and keep
	 *	this bit clear.
	 */

	write_zsreg(&dev->chanA, R15, 0);

	/*
	 *	At this point it looks like the chip is behaving
	/*	At this point it looks like the chip is behaving
	 */

	memcpy(dev->chanA.regs, reg_init, 16);
@@ -1332,8 +1225,6 @@ int z8530_init(struct z8530_dev *dev)

	return ret;
}


EXPORT_SYMBOL(z8530_init);

/**
@@ -1346,7 +1237,6 @@ EXPORT_SYMBOL(z8530_init);
 *
 *	This is called without the lock held
 */
 
int z8530_shutdown(struct z8530_dev *dev)
{
	unsigned long flags;
@@ -1361,7 +1251,6 @@ int z8530_shutdown(struct z8530_dev *dev)
	spin_unlock_irqrestore(&dev->lock, flags);
	return 0;
}

EXPORT_SYMBOL(z8530_shutdown);

/**
@@ -1381,9 +1270,9 @@ int z8530_channel_load(struct z8530_channel *c, u8 *rtable)

	spin_lock_irqsave(c->lock, flags);

	while(*rtable!=255)
	{
	while (*rtable != 255) {
		int reg = *rtable++;

		if (reg > 0x0F)
			write_zsreg(c, R15, c->regs[15] | 1);
		write_zsreg(c, reg & 0x0F, *rtable);
@@ -1405,10 +1294,8 @@ int z8530_channel_load(struct z8530_channel *c, u8 *rtable)
	spin_unlock_irqrestore(c->lock, flags);
	return 0;
}

EXPORT_SYMBOL(z8530_channel_load);


/**
 *	z8530_tx_begin - Begin packet transmission
 *	@c: The Z8530 channel to kick
@@ -1426,6 +1313,7 @@ EXPORT_SYMBOL(z8530_channel_load);
static void z8530_tx_begin(struct z8530_channel *c)
{
	unsigned long flags;

	if (c->tx_skb)
		return;

@@ -1433,34 +1321,25 @@ static void z8530_tx_begin(struct z8530_channel *c)
	c->tx_next_skb = NULL;
	c->tx_ptr = c->tx_next_ptr;

	if(c->tx_skb==NULL)
	{
	if (!c->tx_skb) {
		/* Idle on */
		if(c->dma_tx)
		{
		if (c->dma_tx) {
			flags = claim_dma_lock();
			disable_dma(c->txdma);
			/*
			 *	Check if we crapped out.
			/*	Check if we crapped out.
			 */
			if (get_dma_residue(c->txdma))
			{
			if (get_dma_residue(c->txdma)) {
				c->netdevice->stats.tx_dropped++;
				c->netdevice->stats.tx_fifo_errors++;
			}
			release_dma_lock(flags);
		}
		c->txcount = 0;
	}
	else
	{
	} else {
		c->txcount = c->tx_skb->len;

		
		if(c->dma_tx)
		{
			/*
			 *	FIXME. DMA is broken for the original 8530,
		if (c->dma_tx) {
			/*	FIXME. DMA is broken for the original 8530,
			 *	on the older parts we need to set a flag and
			 *	wait for a further TX interrupt to fire this
			 *	stage off
@@ -1469,13 +1348,10 @@ static void z8530_tx_begin(struct z8530_channel *c)
			flags = claim_dma_lock();
			disable_dma(c->txdma);

			/*
			 *	These two are needed by the 8530/85C30
			/*	These two are needed by the 8530/85C30
			 *	and must be issued when idling.
			 */
			 
			if(c->dev->type!=Z85230)
			{
			if (c->dev->type != Z85230) {
				write_zsctrl(c, RES_Tx_CRC);
				write_zsctrl(c, RES_EOM_L);
			}
@@ -1487,24 +1363,18 @@ static void z8530_tx_begin(struct z8530_channel *c)
			release_dma_lock(flags);
			write_zsctrl(c, RES_EOM_L);
			write_zsreg(c, R5, c->regs[R5] | TxENAB);
		}
		else
		{

		} else {
			/* ABUNDER off */
			write_zsreg(c, R10, c->regs[10]);
			write_zsctrl(c, RES_Tx_CRC);

			while(c->txcount && (read_zsreg(c,R0)&Tx_BUF_EMP))
			{		
			while (c->txcount && (read_zsreg(c, R0) & Tx_BUF_EMP)) {
				write_zsreg(c, R8, *c->tx_ptr++);
				c->txcount--;
			}

		}
	}
	/*
	 *	Since we emptied tx_skb we can ask for more
	/*	Since we emptied tx_skb we can ask for more
	 */
	netif_wake_queue(c->netdevice);
}
@@ -1525,7 +1395,7 @@ static void z8530_tx_done(struct z8530_channel *c)
	struct sk_buff *skb;

	/* Actually this can happen.*/
	if (c->tx_skb == NULL)
	if (!c->tx_skb)
		return;

	skb = c->tx_skb;
@@ -1544,12 +1414,10 @@ static void z8530_tx_done(struct z8530_channel *c)
 *	We point the receive handler at this function when idle. Instead
 *	of processing the frames we get to throw them away.
 */
 
void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
{
	dev_kfree_skb_any(skb);
}

EXPORT_SYMBOL(z8530_null_rx);

/**
@@ -1564,31 +1432,23 @@ EXPORT_SYMBOL(z8530_null_rx);
 *
 *	Called with the lock held
 */
 
static void z8530_rx_done(struct z8530_channel *c)
{
	struct sk_buff *skb;
	int ct;

	/*
	 *	Is our receive engine in DMA mode
	/*	Is our receive engine in DMA mode
	 */
	 
	if(c->rxdma_on)
	{
		/*
		 *	Save the ready state and the buffer currently
	if (c->rxdma_on) {
		/*	Save the ready state and the buffer currently
		 *	being used as the DMA target
		 */
		 
		int ready = c->dma_ready;
		unsigned char *rxb = c->rx_buf[c->dma_num];
		unsigned long flags;

		/*
		 *	Complete this DMA. Necessary to find the length
		/*	Complete this DMA. Necessary to find the length
		 */
		 
		flags = claim_dma_lock();

		disable_dma(c->rxdma);
@@ -1599,13 +1459,11 @@ static void z8530_rx_done(struct z8530_channel *c)
			ct = 2;	/* Shit happens.. */
		c->dma_ready = 0;

		/*
		 *	Normal case: the other slot is free, start the next DMA
		/*	Normal case: the other slot is free, start the next DMA
		 *	into it immediately.
		 */

		if(ready)
		{
		if (ready) {
			c->dma_num ^= 1;
			set_dma_mode(c->rxdma, DMA_MODE_READ | 0x10);
			set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
@@ -1613,18 +1471,19 @@ static void z8530_rx_done(struct z8530_channel *c)
			c->rxdma_on = 1;
			enable_dma(c->rxdma);
			/* Stop any frames that we missed the head of
			   from passing */
			 * from passing
			 */
			write_zsreg(c, R0, RES_Rx_CRC);
		}
		else
		} else {
			/* Can't occur as we dont reenable the DMA irq until
			   after the flip is done */
			 * after the flip is done
			 */
			netdev_warn(c->netdevice, "DMA flip overrun!\n");
		}

		release_dma_lock(flags);

		/*
		 *	Shove the old buffer into an sk_buff. We can't DMA
		/*	Shove the old buffer into an sk_buff. We can't DMA
		 *	directly into one on a PC - it might be above the 16Mb
		 *	boundary. Optimisation - we could check to see if we
		 *	can avoid the copy. Optimisation 2 - make the memcpy
@@ -1632,7 +1491,7 @@ static void z8530_rx_done(struct z8530_channel *c)
		 */

		skb = dev_alloc_skb(ct);
		if (skb == NULL) {
		if (!skb) {
			c->netdevice->stats.rx_dropped++;
			netdev_warn(c->netdevice, "Memory squeeze\n");
		} else {
@@ -1646,8 +1505,7 @@ static void z8530_rx_done(struct z8530_channel *c)
		RT_LOCK;
		skb = c->skb;

		/*
		 *	The game we play for non DMA is similar. We want to
		/*	The game we play for non DMA is similar. We want to
		 *	get the controller set up for the next packet as fast
		 *	as possible. We potentially only have one byte + the
		 *	fifo length for this. Thus we want to flip to the new
@@ -1673,15 +1531,13 @@ static void z8530_rx_done(struct z8530_channel *c)
		RT_UNLOCK;

		c->skb2 = dev_alloc_skb(c->mtu);
		if (c->skb2 == NULL)
			netdev_warn(c->netdevice, "memory squeeze\n");
		else
		if (c->skb2)
			skb_put(c->skb2, c->mtu);

		c->netdevice->stats.rx_packets++;
		c->netdevice->stats.rx_bytes += ct;
	}
	/*
	 *	If we received a frame we must now process it.
	/*	If we received a frame we must now process it.
	 */
	if (skb) {
		skb_trim(skb, ct);
@@ -1703,6 +1559,7 @@ static void z8530_rx_done(struct z8530_channel *c)
static inline int spans_boundary(struct sk_buff *skb)
{
	unsigned long a = (unsigned long)skb->data;

	a ^= (a + skb->len);
	if (a & 0x00010000)	/* If the 64K bit is different.. */
		return 1;
@@ -1722,7 +1579,6 @@ static inline int spans_boundary(struct sk_buff *skb)
 *	Called from the network code. The lock is not held at this
 *	point.
 */

netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
{
	unsigned long flags;
@@ -1731,18 +1587,15 @@ netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
	if (c->tx_next_skb)
		return NETDEV_TX_BUSY;

	
	/* PC SPECIFIC - DMA limits */
	
	/*
	 *	If we will DMA the transmit and its gone over the ISA bus
	/*	If we will DMA the transmit and its gone over the ISA bus
	 *	limit, then copy to the flip buffer
	 */

	if(c->dma_tx && ((unsigned long)(virt_to_bus(skb->data+skb->len))>=16*1024*1024 || spans_boundary(skb)))
	{
		/* 
		 *	Send the flip buffer, and flip the flippy bit.
	if (c->dma_tx &&
	    ((unsigned long)(virt_to_bus(skb->data + skb->len)) >=
	    16 * 1024 * 1024 || spans_boundary(skb))) {
		/*	Send the flip buffer, and flip the flippy bit.
		 *	We don't care which is used when just so long as
		 *	we never use the same buffer twice in a row. Since
		 *	only one buffer can be going out at a time the other
@@ -1751,9 +1604,9 @@ netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
		c->tx_next_ptr = c->tx_dma_buf[c->tx_dma_used];
		c->tx_dma_used ^= 1;	/* Flip temp buffer */
		skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len);
	}
	else
	} else {
		c->tx_next_ptr = skb->data;
	}
	RT_LOCK;
	c->tx_next_skb = skb;
	RT_UNLOCK;
@@ -1764,11 +1617,9 @@ netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)

	return NETDEV_TX_OK;
}

EXPORT_SYMBOL(z8530_queue_xmit);

/*
 *	Module support
/*	Module support
 */
static const char banner[] __initconst =
	KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n";