Commit 4f44d326 authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files
Kalle Valo says:

====================
wireless fixes for v6.3

Third set of fixes for v6.3. mt76 has two kernel crash fixes and
adding back 160 MHz channel support for mt7915. mac80211 has fixes for
a race in transmit path and two mesh related fixes. iwlwifi also has
fixes for races.

* tag 'wireless-2023-03-23' of git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless:
  wifi: mac80211: fix mesh path discovery based on unicast packets
  wifi: mac80211: fix qos on mesh interfaces
  wifi: iwlwifi: mvm: protect TXQ list manipulation
  wifi: iwlwifi: mvm: fix mvmtxq->stopped handling
  wifi: mac80211: Serialize ieee80211_handle_wake_tx_queue()
  wifi: mwifiex: mark OF related data as maybe unused
  wifi: mt76: connac: do not check WED status for non-mmio devices
  wifi: mt76: mt7915: add back 160MHz channel width support for MT7915
  wifi: mt76: do not run mt76_unregister_device() on unregistered hw
====================

Link: https://lore.kernel.org/r/20230323110332.C4FE4C433D2@smtp.kernel.org


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents b1de5c78 f355f701
Loading
Loading
Loading
Loading
+18 −32
Original line number Diff line number Diff line
@@ -732,7 +732,10 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)

	rcu_read_lock();
	do {
		while (likely(!mvmtxq->stopped &&
		while (likely(!test_bit(IWL_MVM_TXQ_STATE_STOP_FULL,
					&mvmtxq->state) &&
			      !test_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT,
					&mvmtxq->state) &&
			      !test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))) {
			skb = ieee80211_tx_dequeue(hw, txq);

@@ -757,43 +760,26 @@ static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw,
	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
	struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);

	/*
	 * Please note that racing is handled very carefully here:
	 * mvmtxq->txq_id is updated during allocation, and mvmtxq->list is
	 * deleted afterwards.
	 * This means that if:
	 * mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list):
	 *	queue is allocated and we can TX.
	 * mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list):
	 *	a race, should defer the frame.
	 * mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list):
	 *	need to allocate the queue and defer the frame.
	 * mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list):
	 *	queue is already scheduled for allocation, no need to allocate,
	 *	should defer the frame.
	 */

	/* If the queue is allocated TX and return. */
	if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) {
		/*
		 * Check that list is empty to avoid a race where txq_id is
		 * already updated, but the queue allocation work wasn't
		 * finished
		 */
		if (unlikely(txq->sta && !list_empty(&mvmtxq->list)))
			return;

	if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) ||
	    !txq->sta) {
		iwl_mvm_mac_itxq_xmit(hw, txq);
		return;
	}

	/* The list is being deleted only after the queue is fully allocated. */
	if (!list_empty(&mvmtxq->list))
		return;
	/* iwl_mvm_mac_itxq_xmit() will later be called by the worker
	 * to handle any packets we leave on the txq now
	 */

	spin_lock_bh(&mvm->add_stream_lock);
	/* The list is being deleted only after the queue is fully allocated. */
	if (list_empty(&mvmtxq->list) &&
	    /* recheck under lock */
	    !test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) {
		list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs);
		schedule_work(&mvm->add_stream_wk);
	}
	spin_unlock_bh(&mvm->add_stream_lock);
}

#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...)		\
	do {								\
+5 −1
Original line number Diff line number Diff line
@@ -729,7 +729,10 @@ struct iwl_mvm_txq {
	struct list_head list;
	u16 txq_id;
	atomic_t tx_request;
	bool stopped;
#define IWL_MVM_TXQ_STATE_STOP_FULL	0
#define IWL_MVM_TXQ_STATE_STOP_REDIRECT	1
#define IWL_MVM_TXQ_STATE_READY		2
	unsigned long state;
};

static inline struct iwl_mvm_txq *
@@ -827,6 +830,7 @@ struct iwl_mvm {
		struct iwl_mvm_tvqm_txq_info tvqm_info[IWL_MAX_TVQM_QUEUES];
	};
	struct work_struct add_stream_wk; /* To add streams to queues */
	spinlock_t add_stream_lock;

	const char *nvm_file_name;
	struct iwl_nvm_data *nvm_data;
+5 −1
Original line number Diff line number Diff line
@@ -1195,6 +1195,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
	INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
	INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
	INIT_LIST_HEAD(&mvm->add_stream_txqs);
	spin_lock_init(&mvm->add_stream_lock);

	init_waitqueue_head(&mvm->rx_sync_waitq);

@@ -1691,7 +1692,10 @@ static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode,

		txq = sta->txq[tid];
		mvmtxq = iwl_mvm_txq_from_mac80211(txq);
		mvmtxq->stopped = !start;
		if (start)
			clear_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state);
		else
			set_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state);

		if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST)
			iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
+24 −5
Original line number Diff line number Diff line
@@ -384,8 +384,11 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
		struct iwl_mvm_txq *mvmtxq =
			iwl_mvm_txq_from_tid(sta, tid);

		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
		spin_lock_bh(&mvm->add_stream_lock);
		list_del_init(&mvmtxq->list);
		clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
		spin_unlock_bh(&mvm->add_stream_lock);
	}

	/* Regardless if this is a reserved TXQ for a STA - mark it as false */
@@ -479,8 +482,11 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
			disable_agg_tids |= BIT(tid);
		mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;

		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
		spin_lock_bh(&mvm->add_stream_lock);
		list_del_init(&mvmtxq->list);
		clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
		spin_unlock_bh(&mvm->add_stream_lock);
	}

	mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
@@ -693,7 +699,7 @@ static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
			    queue, iwl_mvm_ac_to_tx_fifo[ac]);

	/* Stop the queue and wait for it to empty */
	txq->stopped = true;
	set_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state);

	ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
	if (ret) {
@@ -736,7 +742,7 @@ static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,

out:
	/* Continue using the queue */
	txq->stopped = false;
	clear_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state);

	return ret;
}
@@ -1444,12 +1450,22 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
		 * a queue in the function itself.
		 */
		if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {
			spin_lock_bh(&mvm->add_stream_lock);
			list_del_init(&mvmtxq->list);
			spin_unlock_bh(&mvm->add_stream_lock);
			continue;
		}

		list_del_init(&mvmtxq->list);
		/* now we're ready, any remaining races/concurrency will be
		 * handled in iwl_mvm_mac_itxq_xmit()
		 */
		set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);

		local_bh_disable();
		spin_lock(&mvm->add_stream_lock);
		list_del_init(&mvmtxq->list);
		spin_unlock(&mvm->add_stream_lock);

		iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
		local_bh_enable();
	}
@@ -1864,8 +1880,11 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
		struct iwl_mvm_txq *mvmtxq =
			iwl_mvm_txq_from_mac80211(sta->txq[i]);

		spin_lock_bh(&mvm->add_stream_lock);
		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
		list_del_init(&mvmtxq->list);
		clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
		spin_unlock_bh(&mvm->add_stream_lock);
	}
}

+1 −1
Original line number Diff line number Diff line
@@ -172,7 +172,7 @@ static const struct mwifiex_pcie_device mwifiex_pcie8997 = {
	.can_ext_scan = true,
};

static const struct of_device_id mwifiex_pcie_of_match_table[] = {
static const struct of_device_id mwifiex_pcie_of_match_table[] __maybe_unused = {
	{ .compatible = "pci11ab,2b42" },
	{ .compatible = "pci1b4b,2b42" },
	{ }
Loading