Commit d20ef656 authored by Haijun Liu's avatar Haijun Liu Committed by David S. Miller
Browse files

net: wwan: t7xx: Add AP CLDMA



The t7xx device contains two Cross Layer DMA (CLDMA) interfaces to
communicate with AP and Modem processors respectively. So far only
MD-CLDMA was being used, this patch enables AP-CLDMA.

Rename small Application Processor (sAP) to AP.

Signed-off-by: default avatarHaijun Liu <haijun.liu@mediatek.com>
Co-developed-by: default avatarMadhusmita Sahu <madhusmita.sahu@intel.com>
Signed-off-by: default avatarMadhusmita Sahu <madhusmita.sahu@intel.com>
Signed-off-by: default avatarMoises Veleta <moises.veleta@linux.intel.com>
Signed-off-by: default avatarDevegowda Chandrashekar <chandrashekar.devegowda@intel.com>
Signed-off-by: default avatarM Chetan Kumar <m.chetan.kumar@linux.intel.com>
Reviewed-by: default avatarIlpo Järvinen <ilpo.jarvinen@linux.intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 0630f64d
Loading
Loading
Loading
Loading
+11 −6
Original line number Diff line number Diff line
@@ -1064,13 +1064,18 @@ static void t7xx_hw_info_init(struct cldma_ctrl *md_ctrl)
	struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
	u32 phy_ao_base, phy_pd_base;

	if (md_ctrl->hif_id != CLDMA_ID_MD)
		return;
	hw_info->hw_mode = MODE_BIT_64;

	if (md_ctrl->hif_id == CLDMA_ID_MD) {
		phy_ao_base = CLDMA1_AO_BASE;
		phy_pd_base = CLDMA1_PD_BASE;
		hw_info->phy_interrupt_id = CLDMA1_INT;
	hw_info->hw_mode = MODE_BIT_64;
	} else {
		phy_ao_base = CLDMA0_AO_BASE;
		phy_pd_base = CLDMA0_PD_BASE;
		hw_info->phy_interrupt_id = CLDMA0_INT;
	}

	hw_info->ap_ao_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
						      pbase->pcie_dev_reg_trsl_addr, phy_ao_base);
	hw_info->ap_pdn_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
+1 −1
Original line number Diff line number Diff line
@@ -34,7 +34,7 @@
/**
 * enum cldma_id - Identifiers for CLDMA HW units.
 * @CLDMA_ID_MD: Modem control channel.
 * @CLDMA_ID_AP: Application Processor control channel (not used at the moment).
 * @CLDMA_ID_AP: Application Processor control channel.
 * @CLDMA_NUM:   Number of CLDMA HW units available.
 */
enum cldma_id {
+1 −0
Original line number Diff line number Diff line
@@ -25,6 +25,7 @@
			 D2H_INT_EXCEPTION_CLEARQ_DONE |	\
			 D2H_INT_EXCEPTION_ALLQ_RESET |		\
			 D2H_INT_PORT_ENUM |			\
			 D2H_INT_ASYNC_AP_HK |			\
			 D2H_INT_ASYNC_MD_HK)

void t7xx_mhccif_mask_set(struct t7xx_pci_dev *t7xx_dev, u32 val);
+70 −15
Original line number Diff line number Diff line
@@ -44,6 +44,7 @@
#include "t7xx_state_monitor.h"

#define RT_ID_MD_PORT_ENUM	0
#define RT_ID_AP_PORT_ENUM	1
/* Modem feature query identification code - "ICCC" */
#define MD_FEATURE_QUERY_ID	0x49434343

@@ -296,6 +297,7 @@ static void t7xx_md_exception(struct t7xx_modem *md, enum hif_ex_stage stage)
	}

	t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_MD], stage);
	t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_AP], stage);

	if (stage == HIF_EX_INIT)
		t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_ACK);
@@ -424,7 +426,7 @@ static int t7xx_parse_host_rt_data(struct t7xx_fsm_ctl *ctl, struct t7xx_sys_inf
		if (ft_spt_st != MTK_FEATURE_MUST_BE_SUPPORTED)
			return -EINVAL;

		if (i == RT_ID_MD_PORT_ENUM)
		if (i == RT_ID_MD_PORT_ENUM || i == RT_ID_AP_PORT_ENUM)
			t7xx_port_enum_msg_handler(ctl->md, rt_feature->data);
	}

@@ -454,12 +456,12 @@ static int t7xx_core_reset(struct t7xx_modem *md)
	return 0;
}

static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_fsm_ctl *ctl,
static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_sys_info *core_info,
				 struct t7xx_fsm_ctl *ctl,
				 enum t7xx_fsm_event_state event_id,
				 enum t7xx_fsm_event_state err_detect)
{
	struct t7xx_fsm_event *event = NULL, *event_next;
	struct t7xx_sys_info *core_info = &md->core_md;
	struct device *dev = &md->t7xx_dev->pdev->dev;
	unsigned long flags;
	int ret;
@@ -529,19 +531,33 @@ static void t7xx_md_hk_wq(struct work_struct *work)
	t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]);
	t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2);
	md->core_md.handshake_ongoing = true;
	t7xx_core_hk_handler(md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT);
	t7xx_core_hk_handler(md, &md->core_md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT);
}

static void t7xx_ap_hk_wq(struct work_struct *work)
{
	struct t7xx_modem *md = container_of(work, struct t7xx_modem, ap_handshake_work);
	struct t7xx_fsm_ctl *ctl = md->fsm_ctl;

	 /* Clear the HS2 EXIT event appended in t7xx_core_reset(). */
	t7xx_fsm_clr_event(ctl, FSM_EVENT_AP_HS2_EXIT);
	t7xx_cldma_stop(md->md_ctrl[CLDMA_ID_AP]);
	t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP]);
	t7xx_cldma_start(md->md_ctrl[CLDMA_ID_AP]);
	md->core_ap.handshake_ongoing = true;
	t7xx_core_hk_handler(md, &md->core_ap, ctl, FSM_EVENT_AP_HS2, FSM_EVENT_AP_HS2_EXIT);
}

void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id)
{
	struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
	void __iomem *mhccif_base;
	unsigned int int_sta;
	unsigned long flags;

	switch (evt_id) {
	case FSM_PRE_START:
		t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM);
		t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM | D2H_INT_ASYNC_MD_HK |
						   D2H_INT_ASYNC_AP_HK);
		break;

	case FSM_START:
@@ -554,16 +570,26 @@ void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id)
			ctl->exp_flg = true;
			md->exp_id &= ~D2H_INT_EXCEPTION_INIT;
			md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
			md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
		} else if (ctl->exp_flg) {
			md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
		} else if (md->exp_id & D2H_INT_ASYNC_MD_HK) {
			md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
		} else {
			void __iomem *mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base;

			if (md->exp_id & D2H_INT_ASYNC_MD_HK) {
				queue_work(md->handshake_wq, &md->handshake_work);
				md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
			mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base;
				iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK);
				t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
		} else {
			t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
			}

			if (md->exp_id & D2H_INT_ASYNC_AP_HK) {
				queue_work(md->ap_handshake_wq, &md->ap_handshake_work);
				md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
				iowrite32(D2H_INT_ASYNC_AP_HK, mhccif_base + REG_EP2RC_SW_INT_ACK);
				t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK);
			}
		}
		spin_unlock_irqrestore(&md->exp_lock, flags);

@@ -576,6 +602,7 @@ void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id)

	case FSM_READY:
		t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
		t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK);
		break;

	default:
@@ -627,6 +654,19 @@ static struct t7xx_modem *t7xx_md_alloc(struct t7xx_pci_dev *t7xx_dev)
	md->core_md.feature_set[RT_ID_MD_PORT_ENUM] &= ~FEATURE_MSK;
	md->core_md.feature_set[RT_ID_MD_PORT_ENUM] |=
		FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED);

	md->ap_handshake_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI,
					      0, "ap_hk_wq");
	if (!md->ap_handshake_wq) {
		destroy_workqueue(md->handshake_wq);
		return NULL;
	}

	INIT_WORK(&md->ap_handshake_work, t7xx_ap_hk_wq);
	md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] &= ~FEATURE_MSK;
	md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] |=
		FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED);

	return md;
}

@@ -638,6 +678,7 @@ int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev)
	md->exp_id = 0;
	t7xx_fsm_reset(md);
	t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]);
	t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_AP]);
	t7xx_port_proxy_reset(md->port_prox);
	md->md_init_finish = true;
	return t7xx_core_reset(md);
@@ -667,6 +708,10 @@ int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev)
	if (ret)
		goto err_destroy_hswq;

	ret = t7xx_cldma_alloc(CLDMA_ID_AP, t7xx_dev);
	if (ret)
		goto err_destroy_hswq;

	ret = t7xx_fsm_init(md);
	if (ret)
		goto err_destroy_hswq;
@@ -679,12 +724,16 @@ int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev)
	if (ret)
		goto err_uninit_ccmni;

	ret = t7xx_port_proxy_init(md);
	ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_AP]);
	if (ret)
		goto err_uninit_md_cldma;

	ret = t7xx_port_proxy_init(md);
	if (ret)
		goto err_uninit_ap_cldma;

	ret = t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_START, 0);
	if (ret) /* fsm_uninit flushes cmd queue */
	if (ret) /* t7xx_fsm_uninit() flushes cmd queue */
		goto err_uninit_proxy;

	t7xx_md_sys_sw_init(t7xx_dev);
@@ -694,6 +743,9 @@ int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev)
err_uninit_proxy:
	t7xx_port_proxy_uninit(md->port_prox);

err_uninit_ap_cldma:
	t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]);

err_uninit_md_cldma:
	t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);

@@ -705,6 +757,7 @@ int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev)

err_destroy_hswq:
	destroy_workqueue(md->handshake_wq);
	destroy_workqueue(md->ap_handshake_wq);
	dev_err(&t7xx_dev->pdev->dev, "Modem init failed\n");
	return ret;
}
@@ -720,8 +773,10 @@ void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev)

	t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
	t7xx_port_proxy_uninit(md->port_prox);
	t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]);
	t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
	t7xx_ccmni_exit(t7xx_dev);
	t7xx_fsm_uninit(md);
	destroy_workqueue(md->handshake_wq);
	destroy_workqueue(md->ap_handshake_wq);
}
+3 −0
Original line number Diff line number Diff line
@@ -66,10 +66,13 @@ struct t7xx_modem {
	struct cldma_ctrl		*md_ctrl[CLDMA_NUM];
	struct t7xx_pci_dev		*t7xx_dev;
	struct t7xx_sys_info		core_md;
	struct t7xx_sys_info		core_ap;
	bool				md_init_finish;
	bool				rgu_irq_asserted;
	struct workqueue_struct		*handshake_wq;
	struct work_struct		handshake_work;
	struct workqueue_struct		*ap_handshake_wq;
	struct work_struct		ap_handshake_work;
	struct t7xx_fsm_ctl		*fsm_ctl;
	struct port_proxy		*port_prox;
	unsigned int			exp_id;
Loading