Commit bed6e865 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'net-remove-kmap_atomic'



Anirudh Venkataramanan says:

====================
net: Remove uses of kmap_atomic()

kmap_atomic() is being deprecated. This little series replaces the last
few uses of kmap_atomic() in the networking subsystem.

This series triggered a suggestion [1] that perhaps the Sun Cassini,
LDOM Virtual Switch Driver and the LDOM virtual network drivers should be
removed completely. I plan to do this in a follow up patchset. For
completeness, this series still includes kmap_atomic() conversions that
apply to the above referenced drivers. If for some reason we choose to not
remove these drivers, at least they won't be using kmap_atomic() anymore.

Also, the following maintainer entries for the Chelsio driver seem to be
defunct:

  Vinay Kumar Yadav <vinay.yadav@chelsio.com>
  Rohit Maheshwari <rohitm@chelsio.com>

I can submit a follow up patch to remove these entries, but thought
maybe the folks over at Chelsio would want to look into this first.

Changes v1 -> v2:
  Use memcpy_from_page() in patches 2/6 and 4/6
  Add new patch for the thunderbolt driver
  Update commit messages and cover letter
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 8781994a c3a8d375
Loading
Loading
Loading
Loading
+12 −14
Original line number Diff line number Diff line
@@ -1839,9 +1839,7 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
		 */
		if (prior_data_len) {
			int i = 0;
			u8 *data = NULL;
			skb_frag_t *f;
			u8 *vaddr;
			int frag_size = 0, frag_delta = 0;

			while (remaining > 0) {
@@ -1853,24 +1851,24 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
				i++;
			}
			f = &record->frags[i];
			vaddr = kmap_atomic(skb_frag_page(f));

			data = vaddr + skb_frag_off(f)  + remaining;
			frag_delta = skb_frag_size(f) - remaining;

			if (frag_delta >= prior_data_len) {
				memcpy(prior_data, data, prior_data_len);
				kunmap_atomic(vaddr);
				memcpy_from_page(prior_data, skb_frag_page(f),
						 skb_frag_off(f) + remaining,
						 prior_data_len);
			} else {
				memcpy(prior_data, data, frag_delta);
				kunmap_atomic(vaddr);
				memcpy_from_page(prior_data, skb_frag_page(f),
						 skb_frag_off(f) + remaining,
						 frag_delta);

				/* get the next page */
				f = &record->frags[i + 1];
				vaddr = kmap_atomic(skb_frag_page(f));
				data = vaddr + skb_frag_off(f);
				memcpy(prior_data + frag_delta,
				       data, (prior_data_len - frag_delta));
				kunmap_atomic(vaddr);

				memcpy_from_page(prior_data + frag_delta,
						 skb_frag_page(f),
						 skb_frag_off(f),
						 prior_data_len - frag_delta);
			}
			/* reset tcp_seq as per the prior_data_required len */
			tcp_seq -= prior_data_len;
+2 −2
Original line number Diff line number Diff line
@@ -207,11 +207,11 @@ static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
		u8 *vaddr;

		vaddr = kmap_atomic(skb_frag_page(f));
		vaddr = kmap_local_page(skb_frag_page(f));

		efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + skb_frag_off(f),
					   skb_frag_size(f), copy_buf);
		kunmap_atomic(vaddr);
		kunmap_local(vaddr);
	}

	EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->frag_list);
+15 −33
Original line number Diff line number Diff line
@@ -90,8 +90,6 @@
#include <linux/uaccess.h>
#include <linux/jiffies.h>

#define cas_page_map(x)      kmap_atomic((x))
#define cas_page_unmap(x)    kunmap_atomic((x))
#define CAS_NCPUS            num_online_cpus()

#define cas_skb_release(x)  netif_rx(x)
@@ -1915,7 +1913,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
	int off, swivel = RX_SWIVEL_OFF_VAL;
	struct cas_page *page;
	struct sk_buff *skb;
	void *addr, *crcaddr;
	void *crcaddr;
	__sum16 csum;
	char *p;

@@ -1936,7 +1934,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
	skb_reserve(skb, swivel);

	p = skb->data;
	addr = crcaddr = NULL;
	crcaddr = NULL;
	if (hlen) { /* always copy header pages */
		i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
		page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
@@ -1948,12 +1946,10 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
			i += cp->crc_size;
		dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
					i, DMA_FROM_DEVICE);
		addr = cas_page_map(page->buffer);
		memcpy(p, addr + off, i);
		memcpy(p, page_address(page->buffer) + off, i);
		dma_sync_single_for_device(&cp->pdev->dev,
					   page->dma_addr + off, i,
					   DMA_FROM_DEVICE);
		cas_page_unmap(addr);
		RX_USED_ADD(page, 0x100);
		p += hlen;
		swivel = 0;
@@ -1984,12 +1980,11 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
		/* make sure we always copy a header */
		swivel = 0;
		if (p == (char *) skb->data) { /* not split */
			addr = cas_page_map(page->buffer);
			memcpy(p, addr + off, RX_COPY_MIN);
			memcpy(p, page_address(page->buffer) + off,
			       RX_COPY_MIN);
			dma_sync_single_for_device(&cp->pdev->dev,
						   page->dma_addr + off, i,
						   DMA_FROM_DEVICE);
			cas_page_unmap(addr);
			off += RX_COPY_MIN;
			swivel = RX_COPY_MIN;
			RX_USED_ADD(page, cp->mtu_stride);
@@ -2036,10 +2031,8 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
			RX_USED_ADD(page, hlen + cp->crc_size);
		}

		if (cp->crc_size) {
			addr = cas_page_map(page->buffer);
			crcaddr  = addr + off + hlen;
		}
		if (cp->crc_size)
			crcaddr = page_address(page->buffer) + off + hlen;

	} else {
		/* copying packet */
@@ -2061,12 +2054,10 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
			i += cp->crc_size;
		dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
					i, DMA_FROM_DEVICE);
		addr = cas_page_map(page->buffer);
		memcpy(p, addr + off, i);
		memcpy(p, page_address(page->buffer) + off, i);
		dma_sync_single_for_device(&cp->pdev->dev,
					   page->dma_addr + off, i,
					   DMA_FROM_DEVICE);
		cas_page_unmap(addr);
		if (p == (char *) skb->data) /* not split */
			RX_USED_ADD(page, cp->mtu_stride);
		else
@@ -2081,20 +2072,17 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
						page->dma_addr,
						dlen + cp->crc_size,
						DMA_FROM_DEVICE);
			addr = cas_page_map(page->buffer);
			memcpy(p, addr, dlen + cp->crc_size);
			memcpy(p, page_address(page->buffer), dlen + cp->crc_size);
			dma_sync_single_for_device(&cp->pdev->dev,
						   page->dma_addr,
						   dlen + cp->crc_size,
						   DMA_FROM_DEVICE);
			cas_page_unmap(addr);
			RX_USED_ADD(page, dlen + cp->crc_size);
		}
end_copy_pkt:
		if (cp->crc_size) {
			addr    = NULL;
		if (cp->crc_size)
			crcaddr = skb->data + alloclen;
		}

		skb_put(skb, alloclen);
	}

@@ -2103,8 +2091,6 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
		/* checksum includes FCS. strip it out. */
		csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
					      csum_unfold(csum)));
		if (addr)
			cas_page_unmap(addr);
	}
	skb->protocol = eth_type_trans(skb, cp->dev);
	if (skb->protocol == htons(ETH_P_IP)) {
@@ -2793,18 +2779,14 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,

		tabort = cas_calc_tabort(cp, skb_frag_off(fragp), len);
		if (unlikely(tabort)) {
			void *addr;

			/* NOTE: len is always > tabort */
			cas_write_txd(cp, ring, entry, mapping, len - tabort,
				      ctrl, 0);
			entry = TX_DESC_NEXT(ring, entry);

			addr = cas_page_map(skb_frag_page(fragp));
			memcpy(tx_tiny_buf(cp, ring, entry),
			       addr + skb_frag_off(fragp) + len - tabort,
			memcpy_from_page(tx_tiny_buf(cp, ring, entry),
					 skb_frag_page(fragp),
					 skb_frag_off(fragp) + len - tabort,
					 tabort);
			cas_page_unmap(addr);
			mapping = tx_tiny_map(cp, ring, entry, tentry);
			len     = tabort;
		}
+2 −2
Original line number Diff line number Diff line
@@ -1085,13 +1085,13 @@ static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb,
		u8 *vaddr;

		if (nc < ncookies) {
			vaddr = kmap_atomic(skb_frag_page(f));
			vaddr = kmap_local_page(skb_frag_page(f));
			blen = skb_frag_size(f);
			blen += 8 - (blen & 7);
			err = ldc_map_single(lp, vaddr + skb_frag_off(f),
					     blen, cookies + nc, ncookies - nc,
					     map_perm);
			kunmap_atomic(vaddr);
			kunmap_local(vaddr);
		} else {
			err = -EMSGSIZE;
		}
+4 −4
Original line number Diff line number Diff line
@@ -1051,7 +1051,7 @@ static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num,
	const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num];

	*len = skb_frag_size(frag);
	return kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag);
	return kmap_local_page(skb_frag_page(frag)) + skb_frag_off(frag);
}

static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
@@ -1109,7 +1109,7 @@ static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
			dest += len;

			if (unmap) {
				kunmap_atomic(src);
				kunmap_local(src);
				unmap = false;
			}

@@ -1147,7 +1147,7 @@ static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
		dest += len;

		if (unmap) {
			kunmap_atomic(src);
			kunmap_local(src);
			unmap = false;
		}

@@ -1162,7 +1162,7 @@ static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
	memcpy(dest, src, data_len);

	if (unmap)
		kunmap_atomic(src);
		kunmap_local(src);

	if (!tbnet_xmit_csum_and_map(net, skb, frames, frame_index + 1))
		goto err_drop;