Commit e7a08121 authored by David S. Miller's avatar David S. Miller
Browse files

Merge tag 'wireless-drivers-next-2020-09-11' of...

Merge tag 'wireless-drivers-next-2020-09-11' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next



Kalle Valo says:

====================
wireless-drivers-next patches for v5.10

First set of patches for v5.10. Most noteworthy here is ath11k getting
initial support for QCA6390 and IPQ6018 devices. But most of the
patches are cleanup: W=1 warning fixes, fallthrough keywords, DMA API
changes and tasklet API changes.

Major changes:

ath10k

* support SDIO firmware codedumps

* support station specific TID configurations

ath11k

* add support for IPQ6018

* add support for QCA6390 PCI devices

ath9k

* add support for NL80211_EXT_FEATURE_CAN_REPLACE_PTK0 to improve PTK0
  rekeying

wcn36xx

* add support for TX ack
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9984c0bb 5941d003
Loading
Loading
Loading
Loading
+3 −1
Original line number Diff line number Diff line
@@ -65,7 +65,8 @@ Optional properties:
				     the length can vary between hw versions.
- <supply-name>-supply: handle to the regulator device tree node
			   optional "supply-name" are "vdd-0.8-cx-mx",
			   "vdd-1.8-xo", "vdd-1.3-rfa" and "vdd-3.3-ch0".
			   "vdd-1.8-xo", "vdd-1.3-rfa", "vdd-3.3-ch0",
			   and "vdd-3.3-ch1".
- memory-region:
	Usage: optional
	Value type: <phandle>
@@ -204,6 +205,7 @@ wifi@18000000 {
		vdd-1.8-xo-supply = <&vreg_l7a_1p8>;
		vdd-1.3-rfa-supply = <&vreg_l17a_1p3>;
		vdd-3.3-ch0-supply = <&vreg_l25a_3p3>;
		vdd-3.3-ch1-supply = <&vreg_l26a_3p3>;
		memory-region = <&wifi_msa_mem>;
		iommus = <&apps_smmu 0x0040 0x1>;
		qcom,msa-fixed-perm;
+3 −1
Original line number Diff line number Diff line
@@ -17,7 +17,9 @@ description: |

properties:
  compatible:
    const: qcom,ipq8074-wifi
    enum:
      - qcom,ipq8074-wifi
      - qcom,ipq6018-wifi

  reg:
    maxItems: 1
+40 −43
Original line number Diff line number Diff line
@@ -324,8 +324,8 @@ static void adm8211_interrupt_tci(struct ieee80211_hw *dev)

		/* TODO: check TDES0_STATUS_TUF and TDES0_STATUS_TRO */

		pci_unmap_single(priv->pdev, info->mapping,
				 info->skb->len, PCI_DMA_TODEVICE);
		dma_unmap_single(&priv->pdev->dev, info->mapping,
				 info->skb->len, DMA_TO_DEVICE);

		ieee80211_tx_info_clear_status(txi);

@@ -382,34 +382,33 @@ static void adm8211_interrupt_rci(struct ieee80211_hw *dev)
		} else if (pktlen < RX_COPY_BREAK) {
			skb = dev_alloc_skb(pktlen);
			if (skb) {
				pci_dma_sync_single_for_cpu(
					priv->pdev,
				dma_sync_single_for_cpu(&priv->pdev->dev,
							priv->rx_buffers[entry].mapping,
					pktlen, PCI_DMA_FROMDEVICE);
							pktlen,
							DMA_FROM_DEVICE);
				skb_put_data(skb,
					     skb_tail_pointer(priv->rx_buffers[entry].skb),
					     pktlen);
				pci_dma_sync_single_for_device(
					priv->pdev,
				dma_sync_single_for_device(&priv->pdev->dev,
							   priv->rx_buffers[entry].mapping,
					RX_PKT_SIZE, PCI_DMA_FROMDEVICE);
							   RX_PKT_SIZE,
							   DMA_FROM_DEVICE);
			}
		} else {
			newskb = dev_alloc_skb(RX_PKT_SIZE);
			if (newskb) {
				skb = priv->rx_buffers[entry].skb;
				skb_put(skb, pktlen);
				pci_unmap_single(
					priv->pdev,
				dma_unmap_single(&priv->pdev->dev,
						 priv->rx_buffers[entry].mapping,
					RX_PKT_SIZE, PCI_DMA_FROMDEVICE);
						 RX_PKT_SIZE, DMA_FROM_DEVICE);
				priv->rx_buffers[entry].skb = newskb;
				priv->rx_buffers[entry].mapping =
					pci_map_single(priv->pdev,
					dma_map_single(&priv->pdev->dev,
						       skb_tail_pointer(newskb),
						       RX_PKT_SIZE,
						       PCI_DMA_FROMDEVICE);
				if (pci_dma_mapping_error(priv->pdev,
						       DMA_FROM_DEVICE);
				if (dma_mapping_error(&priv->pdev->dev,
						      priv->rx_buffers[entry].mapping)) {
					priv->rx_buffers[entry].skb = NULL;
					dev_kfree_skb(newskb);
@@ -1449,11 +1448,11 @@ static int adm8211_init_rings(struct ieee80211_hw *dev)
		rx_info->skb = dev_alloc_skb(RX_PKT_SIZE);
		if (rx_info->skb == NULL)
			break;
		rx_info->mapping = pci_map_single(priv->pdev,
		rx_info->mapping = dma_map_single(&priv->pdev->dev,
						  skb_tail_pointer(rx_info->skb),
						  RX_PKT_SIZE,
						  PCI_DMA_FROMDEVICE);
		if (pci_dma_mapping_error(priv->pdev, rx_info->mapping)) {
						  DMA_FROM_DEVICE);
		if (dma_mapping_error(&priv->pdev->dev, rx_info->mapping)) {
			dev_kfree_skb(rx_info->skb);
			rx_info->skb = NULL;
			break;
@@ -1490,10 +1489,9 @@ static void adm8211_free_rings(struct ieee80211_hw *dev)
		if (!priv->rx_buffers[i].skb)
			continue;

		pci_unmap_single(
			priv->pdev,
			priv->rx_buffers[i].mapping,
			RX_PKT_SIZE, PCI_DMA_FROMDEVICE);
		dma_unmap_single(&priv->pdev->dev,
				 priv->rx_buffers[i].mapping, RX_PKT_SIZE,
				 DMA_FROM_DEVICE);

		dev_kfree_skb(priv->rx_buffers[i].skb);
	}
@@ -1502,10 +1500,9 @@ static void adm8211_free_rings(struct ieee80211_hw *dev)
		if (!priv->tx_buffers[i].skb)
			continue;

		pci_unmap_single(priv->pdev,
		dma_unmap_single(&priv->pdev->dev,
				 priv->tx_buffers[i].mapping,
				 priv->tx_buffers[i].skb->len,
				 PCI_DMA_TODEVICE);
				 priv->tx_buffers[i].skb->len, DMA_TO_DEVICE);

		dev_kfree_skb(priv->tx_buffers[i].skb);
	}
@@ -1632,9 +1629,9 @@ static int adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
	unsigned int entry;
	u32 flag;

	mapping = pci_map_single(priv->pdev, skb->data, skb->len,
				 PCI_DMA_TODEVICE);
	if (pci_dma_mapping_error(priv->pdev, mapping))
	mapping = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
				 DMA_TO_DEVICE);
	if (dma_mapping_error(&priv->pdev->dev, mapping))
		return -ENOMEM;

	spin_lock_irqsave(&priv->lock, flags);
@@ -1745,8 +1742,8 @@ static int adm8211_alloc_rings(struct ieee80211_hw *dev)
	/* Allocate TX/RX descriptors */
	ring_size = sizeof(struct adm8211_desc) * priv->rx_ring_size +
		    sizeof(struct adm8211_desc) * priv->tx_ring_size;
	priv->rx_ring = pci_alloc_consistent(priv->pdev, ring_size,
					     &priv->rx_ring_dma);
	priv->rx_ring = dma_alloc_coherent(&priv->pdev->dev, ring_size,
					   &priv->rx_ring_dma, GFP_KERNEL);

	if (!priv->rx_ring) {
		kfree(priv->rx_buffers);
@@ -1818,8 +1815,8 @@ static int adm8211_probe(struct pci_dev *pdev,
		return err; /* someone else grabbed it? don't disable it */
	}

	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) ||
	    pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) ||
	    dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
		printk(KERN_ERR "%s (adm8211): No suitable DMA available\n",
		       pci_name(pdev));
		goto err_free_reg;
@@ -1929,7 +1926,7 @@ static int adm8211_probe(struct pci_dev *pdev,
	kfree(priv->eeprom);

 err_free_desc:
	pci_free_consistent(pdev,
	dma_free_coherent(&pdev->dev,
			  sizeof(struct adm8211_desc) * priv->rx_ring_size +
			  sizeof(struct adm8211_desc) * priv->tx_ring_size,
			  priv->rx_ring, priv->rx_ring_dma);
@@ -1962,7 +1959,7 @@ static void adm8211_remove(struct pci_dev *pdev)

	priv = dev->priv;

	pci_free_consistent(pdev,
	dma_free_coherent(&pdev->dev,
			  sizeof(struct adm8211_desc) * priv->rx_ring_size +
			  sizeof(struct adm8211_desc) * priv->tx_ring_size,
			  priv->rx_ring, priv->rx_ring_dma);
+2 −8
Original line number Diff line number Diff line
@@ -12,18 +12,11 @@

void ath10k_bmi_start(struct ath10k *ar)
{
	int ret;

	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n");

	ar->bmi.done_sent = false;

	/* Enable hardware clock to speed up firmware download */
	if (ar->hw_params.hw_ops->enable_pll_clk) {
		ret = ar->hw_params.hw_ops->enable_pll_clk(ar);
		ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi enable pll ret %d\n", ret);
	}
}
EXPORT_SYMBOL(ath10k_bmi_start);

int ath10k_bmi_done(struct ath10k *ar)
{
@@ -197,6 +190,7 @@ int ath10k_bmi_read_memory(struct ath10k *ar,

	return 0;
}
EXPORT_SYMBOL(ath10k_bmi_read_memory);

int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val)
{
+43 −38
Original line number Diff line number Diff line
@@ -1299,29 +1299,24 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
	struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
	u32 ctrl_addr = ce_state->ctrl_addr;

	spin_lock_bh(&ce->ce_lock);

	/* Clear the copy-complete interrupts that will be handled here. */
	/*
	 * Clear before handling
	 *
	 * Misc CE interrupts are not being handled, but still need
	 * to be cleared.
	 *
	 * NOTE: When the last copy engine interrupt is cleared the
	 * hardware will go to sleep.  Once this happens any access to
	 * the CE registers can cause a hardware fault.
	 */
	ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
					  wm_regs->cc_mask);

	spin_unlock_bh(&ce->ce_lock);
					  wm_regs->cc_mask | wm_regs->wm_mask);

	if (ce_state->recv_cb)
		ce_state->recv_cb(ce_state);

	if (ce_state->send_cb)
		ce_state->send_cb(ce_state);

	spin_lock_bh(&ce->ce_lock);

	/*
	 * Misc CE interrupts are not being handled, but still need
	 * to be cleared.
	 */
	ath10k_ce_engine_int_status_clear(ar, ctrl_addr, wm_regs->wm_mask);

	spin_unlock_bh(&ce->ce_lock);
}
EXPORT_SYMBOL(ath10k_ce_per_engine_service);

@@ -1372,17 +1367,15 @@ static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state)
	ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
}

int ath10k_ce_disable_interrupts(struct ath10k *ar)
void ath10k_ce_disable_interrupt(struct ath10k *ar, int ce_id)
{
	struct ath10k_ce *ce = ath10k_ce_priv(ar);
	struct ath10k_ce_pipe *ce_state;
	u32 ctrl_addr;
	int ce_id;

	for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
	ce_state  = &ce->ce_states[ce_id];
	if (ce_state->attr_flags & CE_ATTR_POLL)
			continue;
		return;

	ctrl_addr = ath10k_ce_base_address(ar, ce_id);

@@ -1390,27 +1383,39 @@ int ath10k_ce_disable_interrupts(struct ath10k *ar)
	ath10k_ce_error_intr_disable(ar, ctrl_addr);
	ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
}
EXPORT_SYMBOL(ath10k_ce_disable_interrupt);

	return 0;
void ath10k_ce_disable_interrupts(struct ath10k *ar)
{
	int ce_id;

	for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
		ath10k_ce_disable_interrupt(ar, ce_id);
}
EXPORT_SYMBOL(ath10k_ce_disable_interrupts);

void ath10k_ce_enable_interrupts(struct ath10k *ar)
void ath10k_ce_enable_interrupt(struct ath10k *ar, int ce_id)
{
	struct ath10k_ce *ce = ath10k_ce_priv(ar);
	int ce_id;
	struct ath10k_ce_pipe *ce_state;

	/* Enable interrupts for copy engine that
	 * are not using polling mode.
	 */
	for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
	ce_state  = &ce->ce_states[ce_id];
	if (ce_state->attr_flags & CE_ATTR_POLL)
			continue;
		return;

	ath10k_ce_per_engine_handler_adjust(ce_state);
}
EXPORT_SYMBOL(ath10k_ce_enable_interrupt);

void ath10k_ce_enable_interrupts(struct ath10k *ar)
{
	int ce_id;

	/* Enable interrupts for copy engine that
	 * are not using polling mode.
	 */
	for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
		ath10k_ce_enable_interrupt(ar, ce_id);
}
EXPORT_SYMBOL(ath10k_ce_enable_interrupts);

@@ -1555,7 +1560,7 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
		ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
		if (ret) {
			dma_free_coherent(ar->dev,
					  (nentries * sizeof(struct ce_desc_64) +
					  (nentries * sizeof(struct ce_desc) +
					   CE_DESC_RING_ALIGN),
					  src_ring->base_addr_owner_space_unaligned,
					  base_addr);
Loading