Commit 1f52d7b6 authored by M Chetan Kumar's avatar M Chetan Kumar Committed by David S. Miller
Browse files

net: wwan: iosm: Enable M.2 7360 WWAN card support



This patch enables Intel M.2 7360 WWAN card support on
IOSM Driver.

Control path implementation is a reuse whereas data path
implementation it uses a different protocol called as MUX
Aggregation. The major portion of this patch covers the MUX
Aggregation protocol implementation used for IP traffic
communication.

For M.2 7360 WWAN card, driver exposes 2 wwan AT ports for
control communication.  The user space application or the
modem manager to use wwan AT port for data path establishment.

During probe, driver reads the mux protocol device capability
register to know the mux protocol version supported by device.
Base on which the right mux protocol is initialized for data
path communication.

An overview of an Aggregation Protocol
1>  An IP packet is encapsulated with 16 octet padding header
    to form a Datagram & the start offset of the Datagram is
    indexed into Datagram Header (DH).
2>  Multiple such Datagrams are composed & the start offset of
    each DH is indexed into Datagram Table Header (DTH).
3>  The Datagram Table (DT) is IP session specific & table_length
    item in DTH holds the number of composed datagram pertaining
    to that particular IP session.
4>  And finally the offset of first DTH is indexed into DBH (Datagram
    Block Header).

So in TX/RX flow Datagram Block (Datagram Block Header + Payload)is
exchanged between driver & device.

Signed-off-by: default avatarM Chetan Kumar <m.chetan.kumar@linux.intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f126ec9d
Loading
Loading
Loading
Loading
+51 −3
Original line number Diff line number Diff line
@@ -114,17 +114,35 @@ ipc_imem_fast_update_timer_cb(struct hrtimer *hr_timer)
	return HRTIMER_NORESTART;
}

static int ipc_imem_tq_adb_timer_cb(struct iosm_imem *ipc_imem, int arg,
				    void *msg, size_t size)
{
	ipc_mux_ul_adb_finish(ipc_imem->mux);
	return 0;
}

static enum hrtimer_restart
ipc_imem_adb_timer_cb(struct hrtimer *hr_timer)
{
	struct iosm_imem *ipc_imem =
		container_of(hr_timer, struct iosm_imem, adb_timer);

	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_adb_timer_cb, 0,
				 NULL, 0, false);
	return HRTIMER_NORESTART;
}

static int ipc_imem_setup_cp_mux_cap_init(struct iosm_imem *ipc_imem,
					  struct ipc_mux_config *cfg)
{
	ipc_mmio_update_cp_capability(ipc_imem->mmio);

	if (!ipc_imem->mmio->has_mux_lite) {
	if (ipc_imem->mmio->mux_protocol == MUX_UNKNOWN) {
		dev_err(ipc_imem->dev, "Failed to get Mux capability.");
		return -EINVAL;
	}

	cfg->protocol = MUX_LITE;
	cfg->protocol = ipc_imem->mmio->mux_protocol;

	cfg->ul_flow = (ipc_imem->mmio->has_ul_flow_credit == 1) ?
			       MUX_UL_ON_CREDITS :
@@ -153,6 +171,10 @@ void ipc_imem_msg_send_feature_set(struct iosm_imem *ipc_imem,
				      IPC_MSG_PREP_FEATURE_SET, &prep_args);
}

/**
 * ipc_imem_td_update_timer_start - Starts the TD Update Timer if not started.
 * @ipc_imem:                       Pointer to imem data-struct
 */
void ipc_imem_td_update_timer_start(struct iosm_imem *ipc_imem)
{
	/* Use the TD update timer only in the runtime phase */
@@ -179,6 +201,21 @@ void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer)
		hrtimer_cancel(hr_timer);
}

/**
 * ipc_imem_adb_timer_start -	Starts the adb Timer if not starting.
 * @ipc_imem:			Pointer to imem data-struct
 */
void ipc_imem_adb_timer_start(struct iosm_imem *ipc_imem)
{
	if (!hrtimer_active(&ipc_imem->adb_timer)) {
		ipc_imem->hrtimer_period =
			ktime_set(0, IOSM_AGGR_MUX_ADB_FINISH_TIMEOUT_NSEC);
		hrtimer_start(&ipc_imem->adb_timer,
			      ipc_imem->hrtimer_period,
			      HRTIMER_MODE_REL);
	}
}

bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
{
	struct ipc_mem_channel *channel;
@@ -550,6 +587,11 @@ static void ipc_imem_run_state_worker(struct work_struct *instance)
	while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) {
		if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) {
			ipc_imem->ipc_port[ctrl_chl_idx] = NULL;
			if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7360_ID &&
			    chnl_cfg_port.wwan_port_type == WWAN_PORT_MBIM) {
				ctrl_chl_idx++;
				continue;
			}
			if (chnl_cfg_port.wwan_port_type != WWAN_PORT_UNKNOWN) {
				ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL,
						      chnl_cfg_port,
@@ -680,8 +722,11 @@ static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq)
	}

	/* Try to generate new ADB or ADGH. */
	if (ipc_mux_ul_data_encode(ipc_imem->mux))
	if (ipc_mux_ul_data_encode(ipc_imem->mux)) {
		ipc_imem_td_update_timer_start(ipc_imem);
		if (ipc_imem->mux->protocol == MUX_AGGREGATION)
			ipc_imem_adb_timer_start(ipc_imem);
	}

	/* Continue the send procedure with accumulated SIO or NETIF packets.
	 * Reset the debounce flags.
@@ -1330,6 +1375,9 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
		     HRTIMER_MODE_REL);
	ipc_imem->td_alloc_timer.function = ipc_imem_td_alloc_timer_cb;

	hrtimer_init(&ipc_imem->adb_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	ipc_imem->adb_timer.function = ipc_imem_adb_timer_cb;

	if (ipc_imem_config(ipc_imem)) {
		dev_err(ipc_imem->dev, "failed to initialize the imem");
		goto imem_config_fail;
+5 −0
Original line number Diff line number Diff line
@@ -317,6 +317,7 @@ enum ipc_phase {
 * @tdupdate_timer:		Delay the TD update doorbell.
 * @fast_update_timer:		forced head pointer update delay timer.
 * @td_alloc_timer:		Timer for DL pipe TD allocation retry
 * @adb_timer:			Timer for finishing the ADB.
 * @rom_exit_code:		Mapped boot rom exit code.
 * @enter_runtime:		1 means the transition to runtime phase was
 *				executed.
@@ -364,6 +365,7 @@ struct iosm_imem {
	struct hrtimer tdupdate_timer;
	struct hrtimer fast_update_timer;
	struct hrtimer td_alloc_timer;
	struct hrtimer adb_timer;
	enum rom_exit_code rom_exit_code;
	u32 enter_runtime;
	struct completion ul_pend_sem;
@@ -593,4 +595,7 @@ void ipc_imem_channel_init(struct iosm_imem *ipc_imem, enum ipc_ctype ctype,
 * Returns: 0 on success, -1 on failure
 */
int ipc_imem_devlink_trigger_chip_info(struct iosm_imem *ipc_imem);

void ipc_imem_adb_timer_start(struct iosm_imem *ipc_imem);

#endif
+4 −2
Original line number Diff line number Diff line
@@ -10,6 +10,7 @@
#include <linux/slab.h>

#include "iosm_ipc_mmio.h"
#include "iosm_ipc_mux.h"

/* Definition of MMIO offsets
 * note that MMIO_CI offsets are relative to end of chip info structure
@@ -71,8 +72,9 @@ void ipc_mmio_update_cp_capability(struct iosm_mmio *ipc_mmio)
	ver = ipc_mmio_get_cp_version(ipc_mmio);
	cp_cap = ioread32(ipc_mmio->base + ipc_mmio->offset.cp_capability);

	ipc_mmio->has_mux_lite = (ver >= IOSM_CP_VERSION) &&
				 !(cp_cap & DL_AGGR) && !(cp_cap & UL_AGGR);
	ipc_mmio->mux_protocol = ((ver >= IOSM_CP_VERSION) && (cp_cap &
				 (UL_AGGR | DL_AGGR))) ? MUX_AGGREGATION
				 : MUX_LITE;

	ipc_mmio->has_ul_flow_credit =
		(ver >= IOSM_CP_VERSION) && (cp_cap & UL_FLOW_CREDIT);
+3 −3
Original line number Diff line number Diff line
@@ -72,7 +72,7 @@ struct mmio_offset {
 * @context_info_addr:	Physical base address of context info structure
 * @chip_info_version:	Version of chip info structure
 * @chip_info_size:	Size of chip info structure
 * @has_mux_lite:	It doesn't support mux aggergation
 * @mux_protocol:	mux protocol
 * @has_ul_flow_credit:	Ul flow credit support
 * @has_slp_no_prot:	Device sleep no protocol support
 * @has_mcr_support:	Usage of mcr support
@@ -84,8 +84,8 @@ struct iosm_mmio {
	phys_addr_t context_info_addr;
	unsigned int chip_info_version;
	unsigned int chip_info_size;
	u8 has_mux_lite:1,
	   has_ul_flow_credit:1,
	u32 mux_protocol;
	u8 has_ul_flow_credit:1,
	   has_slp_no_prot:1,
	   has_mcr_support:1;
};
+20 −1
Original line number Diff line number Diff line
@@ -279,9 +279,10 @@ struct iosm_mux *ipc_mux_init(struct ipc_mux_config *mux_cfg,
			      struct iosm_imem *imem)
{
	struct iosm_mux *ipc_mux = kzalloc(sizeof(*ipc_mux), GFP_KERNEL);
	int i, ul_tds, ul_td_size;
	int i, j, ul_tds, ul_td_size;
	struct sk_buff_head *free_list;
	struct sk_buff *skb;
	int qlt_size;

	if (!ipc_mux)
		return NULL;
@@ -321,6 +322,24 @@ struct iosm_mux *ipc_mux_init(struct ipc_mux_config *mux_cfg,
	ipc_mux->channel_id = -1;
	ipc_mux->channel = NULL;

	if (ipc_mux->protocol != MUX_LITE) {
		qlt_size = offsetof(struct mux_qlth, ql) +
				MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);

		for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) {
			ipc_mux->ul_adb.pp_qlt[i] = kzalloc(qlt_size,
							    GFP_ATOMIC);
			if (!ipc_mux->ul_adb.pp_qlt[i]) {
				for (j = i - 1; j >= 0; j--)
					kfree(ipc_mux->ul_adb.pp_qlt[j]);
				return NULL;
			}
		}

		ul_td_size = IPC_MEM_MAX_UL_ADB_BUF_SIZE;
		ul_tds = IPC_MEM_MAX_TDS_MUX_AGGR_UL;
	}

	/* Allocate the list of UL ADB. */
	for (i = 0; i < ul_tds; i++) {
		dma_addr_t mapping;
Loading