Commit bcef57ea authored by P Praneesh's avatar P Praneesh Committed by Kalle Valo
Browse files

ath11k: add branch predictors in dp_tx path



Add branch prediction in dp_tx code path in tx and tx completion handlers.
Also in ath11k_dp_tx_complete_msdu , the pointer that is returned by
rcu_dereference() is not dereferenced. so it is preferable to use
rcu_access_pointer() here.

Tested-on: IPQ8074 hw2.0 AHB WLAN.HK.2.4.0.1-01734-QCAHKSWPL_SILICONZ-1 v2

Co-developed-by: default avatarSriram R <srirrama@codeaurora.org>
Signed-off-by: default avatarSriram R <srirrama@codeaurora.org>
Signed-off-by: default avatarJouni Malinen <jouni@codeaurora.org>
Signed-off-by: default avatarP Praneesh <ppranees@codeaurora.org>
Signed-off-by: default avatarKalle Valo <kvalo@codeaurora.org>
Link: https://lore.kernel.org/r/1630560820-21905-12-git-send-email-ppranees@codeaurora.org
parent cbfbed49
Loading
Loading
Loading
Loading
+24 −28
Original line number Diff line number Diff line
@@ -95,11 +95,11 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
	u8 ring_selector = 0, ring_map = 0;
	bool tcl_ring_retry;

	if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
	if (unlikely(test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)))
		return -ESHUTDOWN;

	if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
	    !ieee80211_is_data(hdr->frame_control))
	if (unlikely(!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
		     !ieee80211_is_data(hdr->frame_control)))
		return -ENOTSUPP;

	pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1);
@@ -127,7 +127,7 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
			DP_TX_IDR_SIZE - 1, GFP_ATOMIC);
	spin_unlock_bh(&tx_ring->tx_idr_lock);

	if (ret < 0) {
	if (unlikely(ret < 0)) {
		if (ring_map == (BIT(ab->hw_params.max_tx_ring) - 1)) {
			atomic_inc(&ab->soc_stats.tx_err.misc_fail);
			return -ENOSPC;
@@ -152,7 +152,7 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
		ti.meta_data_flags = arvif->tcl_metadata;
	}

	if (ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW) {
	if (unlikely(ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW)) {
		if (skb_cb->flags & ATH11K_SKB_CIPHER_SET) {
			ti.encrypt_type =
				ath11k_dp_tx_get_encrypt_type(skb_cb->cipher);
@@ -173,8 +173,8 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
	ti.bss_ast_idx = arvif->ast_idx;
	ti.dscp_tid_tbl_idx = 0;

	if (skb->ip_summed == CHECKSUM_PARTIAL &&
	    ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW) {
	if (likely(skb->ip_summed == CHECKSUM_PARTIAL &&
		   ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW)) {
		ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN, 1) |
			     FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN, 1) |
			     FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN, 1) |
@@ -211,7 +211,7 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
	}

	ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
	if (dma_mapping_error(ab->dev, ti.paddr)) {
	if (unlikely(dma_mapping_error(ab->dev, ti.paddr))) {
		atomic_inc(&ab->soc_stats.tx_err.misc_fail);
		ath11k_warn(ab, "failed to DMA map data Tx buffer\n");
		ret = -ENOMEM;
@@ -231,7 +231,7 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
	ath11k_hal_srng_access_begin(ab, tcl_ring);

	hal_tcl_desc = (void *)ath11k_hal_srng_src_get_next_entry(ab, tcl_ring);
	if (!hal_tcl_desc) {
	if (unlikely(!hal_tcl_desc)) {
		/* NOTE: It is highly unlikely we'll be running out of tcl_ring
		 * desc because the desc is directly enqueued onto hw queue.
		 */
@@ -245,7 +245,7 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
		 * checking this ring earlier for each pkt tx.
		 * Restart ring selection if some rings are not checked yet.
		 */
		if (ring_map != (BIT(ab->hw_params.max_tx_ring) - 1) &&
		if (unlikely(ring_map != (BIT(ab->hw_params.max_tx_ring)) - 1) &&
		    ab->hw_params.max_tx_ring > 1) {
			tcl_ring_retry = true;
			ring_selector++;
@@ -327,7 +327,7 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,

	spin_lock_bh(&tx_ring->tx_idr_lock);
	msdu = idr_find(&tx_ring->txbuf_idr, ts->msdu_id);
	if (!msdu) {
	if (unlikely(!msdu)) {
		ath11k_warn(ab, "htt tx completion for unknown msdu_id %d\n",
			    ts->msdu_id);
		spin_unlock_bh(&tx_ring->tx_idr_lock);
@@ -435,16 +435,14 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,

	dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);

	rcu_read_lock();

	if (!rcu_dereference(ab->pdevs_active[ar->pdev_idx])) {
	if (unlikely(!rcu_access_pointer(ab->pdevs_active[ar->pdev_idx]))) {
		dev_kfree_skb_any(msdu);
		goto exit;
		return;
	}

	if (!skb_cb->vif) {
	if (unlikely(!skb_cb->vif)) {
		dev_kfree_skb_any(msdu);
		goto exit;
		return;
	}

	info = IEEE80211_SKB_CB(msdu);
@@ -465,7 +463,7 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
	    (info->flags & IEEE80211_TX_CTL_NO_ACK))
		info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;

	if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) {
	if (unlikely(ath11k_debugfs_is_extd_tx_stats_enabled(ar))) {
		if (ts->flags & HAL_TX_STATUS_FLAGS_FIRST_MSDU) {
			if (ar->last_ppdu_id == 0) {
				ar->last_ppdu_id = ts->ppdu_id;
@@ -494,9 +492,6 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
	 */

	ieee80211_tx_status(ar->hw, msdu);

exit:
	rcu_read_unlock();
}

static inline void ath11k_dp_tx_status_parse(struct ath11k_base *ab,
@@ -505,11 +500,11 @@ static inline void ath11k_dp_tx_status_parse(struct ath11k_base *ab,
{
	ts->buf_rel_source =
		FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE, desc->info0);
	if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
	    ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)
	if (unlikely(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
		     ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM))
		return;

	if (ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)
	if (unlikely(ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW))
		return;

	ts->status = FIELD_GET(HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON,
@@ -556,8 +551,9 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
			ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head);
	}

	if ((ath11k_hal_srng_dst_peek(ab, status_ring) != NULL) &&
	    (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) == tx_ring->tx_status_tail)) {
	if (unlikely((ath11k_hal_srng_dst_peek(ab, status_ring) != NULL) &&
		     (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) ==
		      tx_ring->tx_status_tail))) {
		/* TODO: Process pending tx_status messages when kfifo_is_full() */
		ath11k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
	}
@@ -580,7 +576,7 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
		mac_id = FIELD_GET(DP_TX_DESC_ID_MAC_ID, desc_id);
		msdu_id = FIELD_GET(DP_TX_DESC_ID_MSDU_ID, desc_id);

		if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) {
		if (unlikely(ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)) {
			ath11k_dp_tx_process_htt_tx_complete(ab,
							     (void *)tx_status,
							     mac_id, msdu_id,
@@ -590,7 +586,7 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)

		spin_lock_bh(&tx_ring->tx_idr_lock);
		msdu = idr_find(&tx_ring->txbuf_idr, msdu_id);
		if (!msdu) {
		if (unlikely(!msdu)) {
			ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
				    msdu_id);
			spin_unlock_bh(&tx_ring->tx_idr_lock);
+1 −1
Original line number Diff line number Diff line
@@ -5283,7 +5283,7 @@ static void ath11k_mac_op_tx(struct ieee80211_hw *hw,
		arsta = (struct ath11k_sta *)control->sta->drv_priv;

	ret = ath11k_dp_tx(ar, arvif, arsta, skb);
	if (ret) {
	if (unlikely(ret)) {
		ath11k_warn(ar->ab, "failed to transmit frame %d\n", ret);
		ieee80211_free_txskb(ar->hw, skb);
	}