Commit edb5b63e authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files
Kalle Valo says:

====================
wireless fixes for v6.2

Third set of fixes for v6.2. This time most of them are for drivers,
only one revert for mac80211. For an important mt76 fix we had to
cherry pick two commits from wireless-next.

* tag 'wireless-2023-01-18' of git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless:
  Revert "wifi: mac80211: fix memory leak in ieee80211_if_add()"
  wifi: mt76: dma: fix a regression in adding rx buffers
  wifi: mt76: handle possible mt76_rx_token_consume failures
  wifi: mt76: dma: do not increment queue head if mt76_dma_add_buf fails
  wifi: rndis_wlan: Prevent buffer overflow in rndis_query_oid
  wifi: brcmfmac: fix regression for Broadcom PCIe wifi devices
  wifi: brcmfmac: avoid NULL-deref in survey dump for 2G only device
  wifi: brcmfmac: avoid handling disabled channels for survey dump
====================

Link: https://lore.kernel.org/r/20230118073749.AF061C433EF@smtp.kernel.org


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents b9fb10d1 80f8a66d
Loading
Loading
Loading
Loading
+19 −18
Original line number Diff line number Diff line
@@ -7937,6 +7937,9 @@ cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
	struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));

	if (chan->flags & IEEE80211_CHAN_DISABLED)
		return -EINVAL;

	/* set_channel */
	chspec = channel_to_chanspec(&cfg->d11inf, chan);
	if (chspec != INVCHANSPEC) {
@@ -7961,7 +7964,7 @@ brcmf_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *ndev,
	struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
	struct brcmf_dump_survey survey = {};
	struct ieee80211_supported_band *band;
	struct ieee80211_channel *chan;
	enum nl80211_band band_id;
	struct cca_msrmnt_query req;
	u32 noise;
	int err;
@@ -7974,26 +7977,25 @@ brcmf_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *ndev,
		return -EBUSY;
	}

	band = wiphy->bands[NL80211_BAND_2GHZ];
	if (band && idx >= band->n_channels) {
	for (band_id = 0; band_id < NUM_NL80211_BANDS; band_id++) {
		band = wiphy->bands[band_id];
		if (!band)
			continue;
		if (idx >= band->n_channels) {
			idx -= band->n_channels;
		band = NULL;
			continue;
		}

	if (!band || idx >= band->n_channels) {
		band = wiphy->bands[NL80211_BAND_5GHZ];
		if (idx >= band->n_channels)
			return -ENOENT;
		info->channel = &band->channels[idx];
		break;
	}
	if (band_id == NUM_NL80211_BANDS)
		return -ENOENT;

	/* Setting current channel to the requested channel */
	chan = &band->channels[idx];
	err = cfg80211_set_channel(wiphy, ndev, chan, NL80211_CHAN_HT20);
	if (err) {
		info->channel = chan;
	info->filled = 0;
	if (cfg80211_set_channel(wiphy, ndev, info->channel, NL80211_CHAN_HT20))
		return 0;
	}

	/* Disable mpc */
	brcmf_set_mpc(ifp, 0);
@@ -8028,7 +8030,6 @@ brcmf_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *ndev,
	if (err)
		goto exit;

	info->channel = chan;
	info->noise = noise;
	info->time = ACS_MSRMNT_DELAY;
	info->time_busy = ACS_MSRMNT_DELAY - survey.idle;
@@ -8040,7 +8041,7 @@ brcmf_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *ndev,
		SURVEY_INFO_TIME_TX;

	brcmf_dbg(INFO, "OBSS dump: channel %d: survey duration %d\n",
		  ieee80211_frequency_to_channel(chan->center_freq),
		  ieee80211_frequency_to_channel(info->channel->center_freq),
		  ACS_MSRMNT_DELAY);
	brcmf_dbg(INFO, "noise(%d) busy(%llu) rx(%llu) tx(%llu)\n",
		  info->noise, info->time_busy, info->time_rx, info->time_tx);
+1 −1
Original line number Diff line number Diff line
@@ -1228,7 +1228,7 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
				BRCMF_NROF_H2D_COMMON_MSGRINGS;
		max_completionrings = BRCMF_NROF_D2H_COMMON_MSGRINGS;
	}
	if (max_flowrings > 256) {
	if (max_flowrings > 512) {
		brcmf_err(bus, "invalid max_flowrings(%d)\n", max_flowrings);
		return -EIO;
	}
+80 −51
Original line number Diff line number Diff line
@@ -205,6 +205,52 @@ mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
	mt76_dma_sync_idx(dev, q);
}

static int
mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
		    struct mt76_queue_buf *buf, void *data)
{
	struct mt76_desc *desc = &q->desc[q->head];
	struct mt76_queue_entry *entry = &q->entry[q->head];
	struct mt76_txwi_cache *txwi = NULL;
	u32 buf1 = 0, ctrl;
	int idx = q->head;
	int rx_token;

	ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);

	if ((q->flags & MT_QFLAG_WED) &&
	    FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
		txwi = mt76_get_rxwi(dev);
		if (!txwi)
			return -ENOMEM;

		rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr);
		if (rx_token < 0) {
			mt76_put_rxwi(dev, txwi);
			return -ENOMEM;
		}

		buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token);
		ctrl |= MT_DMA_CTL_TO_HOST;
	}

	WRITE_ONCE(desc->buf0, cpu_to_le32(buf->addr));
	WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
	WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
	WRITE_ONCE(desc->info, 0);

	entry->dma_addr[0] = buf->addr;
	entry->dma_len[0] = buf->len;
	entry->txwi = txwi;
	entry->buf = data;
	entry->wcid = 0xffff;
	entry->skip_buf1 = true;
	q->head = (q->head + 1) % q->ndesc;
	q->queued++;

	return idx;
}

static int
mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
		 struct mt76_queue_buf *buf, int nbufs, u32 info,
@@ -212,37 +258,23 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
{
	struct mt76_queue_entry *entry;
	struct mt76_desc *desc;
	u32 ctrl;
	int i, idx = -1;
	u32 ctrl, next;

	if (txwi) {
		q->entry[q->head].txwi = DMA_DUMMY_DATA;
		q->entry[q->head].skip_buf0 = true;
	}

	for (i = 0; i < nbufs; i += 2, buf += 2) {
		u32 buf0 = buf[0].addr, buf1 = 0;

		idx = q->head;
		q->head = (q->head + 1) % q->ndesc;
		next = (q->head + 1) % q->ndesc;

		desc = &q->desc[idx];
		entry = &q->entry[idx];

		if ((q->flags & MT_QFLAG_WED) &&
		    FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
			struct mt76_txwi_cache *t = txwi;
			int rx_token;

			if (!t)
				return -ENOMEM;

			rx_token = mt76_rx_token_consume(dev, (void *)skb, t,
							 buf[0].addr);
			buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token);
			ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len) |
			       MT_DMA_CTL_TO_HOST;
		} else {
			if (txwi) {
				q->entry[q->head].txwi = DMA_DUMMY_DATA;
				q->entry[q->head].skip_buf0 = true;
			}

		if (buf[0].skip_unmap)
			entry->skip_buf0 = true;
		entry->skip_buf1 = i == nbufs - 1;
@@ -264,13 +296,13 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
			ctrl |= MT_DMA_CTL_LAST_SEC0;
		else if (i == nbufs - 2)
			ctrl |= MT_DMA_CTL_LAST_SEC1;
		}

		WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
		WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
		WRITE_ONCE(desc->info, cpu_to_le32(info));
		WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));

		q->head = next;
		q->queued++;
	}

@@ -577,17 +609,9 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
	spin_lock_bh(&q->lock);

	while (q->queued < q->ndesc - 1) {
		struct mt76_txwi_cache *t = NULL;
		struct mt76_queue_buf qbuf;
		void *buf = NULL;

		if ((q->flags & MT_QFLAG_WED) &&
		    FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
			t = mt76_get_rxwi(dev);
			if (!t)
				break;
		}

		buf = page_frag_alloc(rx_page, q->buf_size, GFP_ATOMIC);
		if (!buf)
			break;
@@ -601,7 +625,12 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
		qbuf.addr = addr + offset;
		qbuf.len = len - offset;
		qbuf.skip_unmap = false;
		mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, t);
		if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
			dma_unmap_single(dev->dma_dev, addr, len,
					 DMA_FROM_DEVICE);
			skb_free_frag(buf);
			break;
		}
		frames++;
	}

+7 −0
Original line number Diff line number Diff line
@@ -653,6 +653,13 @@ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)

		desc->buf0 = cpu_to_le32(phy_addr);
		token = mt76_rx_token_consume(&dev->mt76, ptr, t, phy_addr);
		if (token < 0) {
			dma_unmap_single(dev->mt76.dma_dev, phy_addr,
					 wed->wlan.rx_size, DMA_TO_DEVICE);
			skb_free_frag(ptr);
			goto unmap;
		}

		desc->token |= cpu_to_le32(FIELD_PREP(MT_DMA_CTL_TOKEN,
						      token));
		desc++;
+4 −3
Original line number Diff line number Diff line
@@ -764,10 +764,11 @@ int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
	spin_lock_bh(&dev->rx_token_lock);
	token = idr_alloc(&dev->rx_token, t, 0, dev->rx_token_size,
			  GFP_ATOMIC);
	spin_unlock_bh(&dev->rx_token_lock);

	if (token >= 0) {
		t->ptr = ptr;
		t->dma_addr = phys;
	}
	spin_unlock_bh(&dev->rx_token_lock);

	return token;
}
Loading