Commit 2551a922 authored by Kalle Valo's avatar Kalle Valo
Browse files

Merge tag 'mt76-for-kvalo-2022-12-01' of https://github.com/nbd168/wireless

mt76 patches for 6.2

- fixes
- WED support for mt7986 + mt7915 for flow offloading
- new driver for the mt7996 wifi-7 chipset
parents 94b9b9de f23a0cea
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -34,3 +34,4 @@ source "drivers/net/wireless/mediatek/mt76/mt7603/Kconfig"
source "drivers/net/wireless/mediatek/mt76/mt7615/Kconfig"
source "drivers/net/wireless/mediatek/mt76/mt7915/Kconfig"
source "drivers/net/wireless/mediatek/mt76/mt7921/Kconfig"
source "drivers/net/wireless/mediatek/mt76/mt7996/Kconfig"
+1 −0
Original line number Diff line number Diff line
@@ -35,3 +35,4 @@ obj-$(CONFIG_MT7603E) += mt7603/
obj-$(CONFIG_MT7615_COMMON) += mt7615/
obj-$(CONFIG_MT7915E) += mt7915/
obj-$(CONFIG_MT7921_COMMON) += mt7921/
obj-$(CONFIG_MT7996E) += mt7996/
+0 −19
Original line number Diff line number Diff line
@@ -100,23 +100,6 @@ void mt76_seq_puts_array(struct seq_file *file, const char *str,
}
EXPORT_SYMBOL_GPL(mt76_seq_puts_array);

static int mt76_read_rate_txpower(struct seq_file *s, void *data)
{
	struct mt76_dev *dev = dev_get_drvdata(s->private);

	mt76_seq_puts_array(s, "CCK", dev->rate_power.cck,
			    ARRAY_SIZE(dev->rate_power.cck));
	mt76_seq_puts_array(s, "OFDM", dev->rate_power.ofdm,
			    ARRAY_SIZE(dev->rate_power.ofdm));
	mt76_seq_puts_array(s, "STBC", dev->rate_power.stbc,
			    ARRAY_SIZE(dev->rate_power.stbc));
	mt76_seq_puts_array(s, "HT", dev->rate_power.ht,
			    ARRAY_SIZE(dev->rate_power.ht));
	mt76_seq_puts_array(s, "VHT", dev->rate_power.vht,
			    ARRAY_SIZE(dev->rate_power.vht));
	return 0;
}

struct dentry *
mt76_register_debugfs_fops(struct mt76_phy *phy,
			   const struct file_operations *ops)
@@ -137,8 +120,6 @@ mt76_register_debugfs_fops(struct mt76_phy *phy,
	debugfs_create_blob("eeprom", 0400, dir, &dev->eeprom);
	if (dev->otp.data)
		debugfs_create_blob("otp", 0400, dir, &dev->otp);
	debugfs_create_devm_seqfile(dev->dev, "rate_txpower", dir,
				    mt76_read_rate_txpower);
	debugfs_create_devm_seqfile(dev->dev, "rx-queues", dir,
				    mt76_rx_queues_read);

+194 −50
Original line number Diff line number Diff line
@@ -59,6 +59,19 @@ mt76_alloc_txwi(struct mt76_dev *dev)
	return t;
}

static struct mt76_txwi_cache *
mt76_alloc_rxwi(struct mt76_dev *dev)
{
	struct mt76_txwi_cache *t;

	t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC);
	if (!t)
		return NULL;

	t->ptr = NULL;
	return t;
}

static struct mt76_txwi_cache *
__mt76_get_txwi(struct mt76_dev *dev)
{
@@ -75,6 +88,22 @@ __mt76_get_txwi(struct mt76_dev *dev)
	return t;
}

static struct mt76_txwi_cache *
__mt76_get_rxwi(struct mt76_dev *dev)
{
	struct mt76_txwi_cache *t = NULL;

	spin_lock(&dev->wed_lock);
	if (!list_empty(&dev->rxwi_cache)) {
		t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
				     list);
		list_del(&t->list);
	}
	spin_unlock(&dev->wed_lock);

	return t;
}

static struct mt76_txwi_cache *
mt76_get_txwi(struct mt76_dev *dev)
{
@@ -86,6 +115,18 @@ mt76_get_txwi(struct mt76_dev *dev)
	return mt76_alloc_txwi(dev);
}

struct mt76_txwi_cache *
mt76_get_rxwi(struct mt76_dev *dev)
{
	struct mt76_txwi_cache *t = __mt76_get_rxwi(dev);

	if (t)
		return t;

	return mt76_alloc_rxwi(dev);
}
EXPORT_SYMBOL_GPL(mt76_get_rxwi);

void
mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
{
@@ -98,6 +139,18 @@ mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
}
EXPORT_SYMBOL_GPL(mt76_put_txwi);

void
mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
{
	if (!t)
		return;

	spin_lock(&dev->wed_lock);
	list_add(&t->list, &dev->rxwi_cache);
	spin_unlock(&dev->wed_lock);
}
EXPORT_SYMBOL_GPL(mt76_put_rxwi);

static void
mt76_free_pending_txwi(struct mt76_dev *dev)
{
@@ -112,6 +165,20 @@ mt76_free_pending_txwi(struct mt76_dev *dev)
	local_bh_enable();
}

static void
mt76_free_pending_rxwi(struct mt76_dev *dev)
{
	struct mt76_txwi_cache *t;

	local_bh_disable();
	while ((t = __mt76_get_rxwi(dev)) != NULL) {
		if (t->ptr)
			skb_free_frag(t->ptr);
		kfree(t);
	}
	local_bh_enable();
}

static void
mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
{
@@ -148,11 +215,6 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
	u32 ctrl;
	int i, idx = -1;

	if (txwi) {
		q->entry[q->head].txwi = DMA_DUMMY_DATA;
		q->entry[q->head].skip_buf0 = true;
	}

	for (i = 0; i < nbufs; i += 2, buf += 2) {
		u32 buf0 = buf[0].addr, buf1 = 0;

@@ -162,6 +224,25 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
		desc = &q->desc[idx];
		entry = &q->entry[idx];

		if ((q->flags & MT_QFLAG_WED) &&
		    FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
			struct mt76_txwi_cache *t = txwi;
			int rx_token;

			if (!t)
				return -ENOMEM;

			rx_token = mt76_rx_token_consume(dev, (void *)skb, t,
							 buf[0].addr);
			buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token);
			ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len) |
			       MT_DMA_CTL_TO_HOST;
		} else {
			if (txwi) {
				q->entry[q->head].txwi = DMA_DUMMY_DATA;
				q->entry[q->head].skip_buf0 = true;
			}

			if (buf[0].skip_unmap)
				entry->skip_buf0 = true;
			entry->skip_buf1 = i == nbufs - 1;
@@ -183,6 +264,7 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
				ctrl |= MT_DMA_CTL_LAST_SEC0;
			else if (i == nbufs - 2)
				ctrl |= MT_DMA_CTL_LAST_SEC1;
		}

		WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
		WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
@@ -272,33 +354,60 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)

static void *
mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
		 int *len, u32 *info, bool *more)
		 int *len, u32 *info, bool *more, bool *drop)
{
	struct mt76_queue_entry *e = &q->entry[idx];
	struct mt76_desc *desc = &q->desc[idx];
	dma_addr_t buf_addr;
	void *buf = e->buf;
	int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
	void *buf;

	buf_addr = e->dma_addr[0];
	if (len) {
		u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
		*len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
		*more = !(ctl & MT_DMA_CTL_LAST_SEC0);
		u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
		*len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
		*more = !(ctrl & MT_DMA_CTL_LAST_SEC0);
	}

	if (info)
		*info = le32_to_cpu(desc->info);

	dma_unmap_single(dev->dma_dev, buf_addr, buf_len, DMA_FROM_DEVICE);
	if ((q->flags & MT_QFLAG_WED) &&
	    FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
		u32 token = FIELD_GET(MT_DMA_CTL_TOKEN,
				      le32_to_cpu(desc->buf1));
		struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token);

		if (!t)
			return NULL;

		dma_unmap_single(dev->dma_dev, t->dma_addr,
				 SKB_WITH_OVERHEAD(q->buf_size),
				 DMA_FROM_DEVICE);

		buf = t->ptr;
		t->dma_addr = 0;
		t->ptr = NULL;

		mt76_put_rxwi(dev, t);

		if (drop) {
			u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));

			*drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A |
					   MT_DMA_CTL_DROP));
		}
	} else {
		buf = e->buf;
		e->buf = NULL;
		dma_unmap_single(dev->dma_dev, e->dma_addr[0],
				 SKB_WITH_OVERHEAD(q->buf_size),
				 DMA_FROM_DEVICE);
	}

	return buf;
}

static void *
mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
		 int *len, u32 *info, bool *more)
		 int *len, u32 *info, bool *more, bool *drop)
{
	int idx = q->tail;

@@ -314,7 +423,7 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
	q->tail = (q->tail + 1) % q->ndesc;
	q->queued--;

	return mt76_dma_get_buf(dev, q, idx, len, info, more);
	return mt76_dma_get_buf(dev, q, idx, len, info, more, drop);
}

static int
@@ -441,14 +550,26 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
	return ret;
}

static struct page_frag_cache *
mt76_dma_rx_get_frag_cache(struct mt76_dev *dev, struct mt76_queue *q)
{
	struct page_frag_cache *rx_page = &q->rx_page;

#ifdef CONFIG_NET_MEDIATEK_SOC_WED
	if ((q->flags & MT_QFLAG_WED) &&
	    FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX)
		rx_page = &dev->mmio.wed.rx_buf_ring.rx_page;
#endif
	return rx_page;
}

static int
mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
{
	dma_addr_t addr;
	void *buf;
	int frames = 0;
	struct page_frag_cache *rx_page = mt76_dma_rx_get_frag_cache(dev, q);
	int len = SKB_WITH_OVERHEAD(q->buf_size);
	int offset = q->buf_offset;
	int frames = 0, offset = q->buf_offset;
	dma_addr_t addr;

	if (!q->ndesc)
		return 0;
@@ -456,9 +577,18 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
	spin_lock_bh(&q->lock);

	while (q->queued < q->ndesc - 1) {
		struct mt76_txwi_cache *t = NULL;
		struct mt76_queue_buf qbuf;
		void *buf = NULL;

		if ((q->flags & MT_QFLAG_WED) &&
		    FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
			t = mt76_get_rxwi(dev);
			if (!t)
				break;
		}

		buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
		buf = page_frag_alloc(rx_page, q->buf_size, GFP_ATOMIC);
		if (!buf)
			break;

@@ -471,7 +601,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
		qbuf.addr = addr + offset;
		qbuf.len = len - offset;
		qbuf.skip_unmap = false;
		mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
		mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, t);
		frames++;
	}

@@ -517,6 +647,11 @@ mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
		if (!ret)
			q->wed_regs = wed->txfree_ring.reg_base;
		break;
	case MT76_WED_Q_RX:
		ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs);
		if (!ret)
			q->wed_regs = wed->rx_ring[ring].reg_base;
		break;
	default:
		ret = -EINVAL;
	}
@@ -574,7 +709,7 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)

	spin_lock_bh(&q->lock);
	do {
		buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
		buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL);
		if (!buf)
			break;

@@ -615,7 +750,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)

static void
mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
		  int len, bool more)
		  int len, bool more, u32 info)
{
	struct sk_buff *skb = q->rx_head;
	struct skb_shared_info *shinfo = skb_shinfo(skb);
@@ -635,7 +770,7 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,

	q->rx_head = NULL;
	if (nr_frags < ARRAY_SIZE(shinfo->frags))
		dev->drv->rx_skb(dev, q - dev->q_rx, skb);
		dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
	else
		dev_kfree_skb(skb);
}
@@ -656,6 +791,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
	}

	while (done < budget) {
		bool drop = false;
		u32 info;

		if (check_ddone) {
@@ -666,10 +802,14 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
				break;
		}

		data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
		data = mt76_dma_dequeue(dev, q, false, &len, &info, &more,
					&drop);
		if (!data)
			break;

		if (drop)
			goto free_frag;

		if (q->rx_head)
			data_len = q->buf_size;
		else
@@ -682,7 +822,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
		}

		if (q->rx_head) {
			mt76_add_fragment(dev, q, data, len, more);
			mt76_add_fragment(dev, q, data, len, more, info);
			continue;
		}

@@ -706,7 +846,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
			continue;
		}

		dev->drv->rx_skb(dev, q - dev->q_rx, skb);
		dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
		continue;

free_frag:
@@ -803,11 +943,15 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
		mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true);

	mt76_for_each_q_rx(dev, i) {
		struct mt76_queue *q = &dev->q_rx[i];

		netif_napi_del(&dev->napi[i]);
		mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
		if (FIELD_GET(MT_QFLAG_WED_TYPE, q->flags))
			mt76_dma_rx_cleanup(dev, q);
	}

	mt76_free_pending_txwi(dev);
	mt76_free_pending_rxwi(dev);

	if (mtk_wed_device_active(&dev->mmio.wed))
		mtk_wed_device_detach(&dev->mmio.wed);
+8 −0
Original line number Diff line number Diff line
@@ -15,6 +15,14 @@
#define MT_DMA_CTL_SD_LEN0		GENMASK(29, 16)
#define MT_DMA_CTL_LAST_SEC0		BIT(30)
#define MT_DMA_CTL_DMA_DONE		BIT(31)
#define MT_DMA_CTL_TO_HOST		BIT(8)
#define MT_DMA_CTL_TO_HOST_A		BIT(12)
#define MT_DMA_CTL_DROP			BIT(14)
#define MT_DMA_CTL_TOKEN		GENMASK(31, 16)

#define MT_DMA_PPE_CPU_REASON		GENMASK(15, 11)
#define MT_DMA_PPE_ENTRY		GENMASK(30, 16)
#define MT_DMA_INFO_PPE_VLD		BIT(31)

#define MT_DMA_HDR_LEN			4
#define MT_RX_INFO_LEN			4
Loading