Commit 007f26f0 authored by Haijun Liu's avatar Haijun Liu Committed by David S. Miller
Browse files

net: wwan: t7xx: Infrastructure for early port configuration



To support cases such as FW update or Core dump, the t7xx device
is capable of signaling the host that a special port needs
to be created before the handshake phase.

This patch adds the infrastructure required to create the
early ports which also requires a different configuration of
CLDMA queues.

Signed-off-by: default avatarHaijun Liu <haijun.liu@mediatek.com>
Co-developed-by: default avatarMadhusmita Sahu <madhusmita.sahu@intel.com>
Signed-off-by: default avatarMadhusmita Sahu <madhusmita.sahu@intel.com>
Signed-off-by: default avatarRicardo Martinez <ricardo.martinez@linux.intel.com>
Signed-off-by: default avatarDevegowda Chandrashekar <chandrashekar.devegowda@intel.com>
Signed-off-by: default avatarM Chetan Kumar <m.chetan.kumar@linux.intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d20ef656
Loading
Loading
Loading
Loading
+30 −8
Original line number Diff line number Diff line
@@ -57,8 +57,6 @@
#define CHECK_Q_STOP_TIMEOUT_US		1000000
#define CHECK_Q_STOP_STEP_US		10000

#define CLDMA_JUMBO_BUFF_SZ		(63 * 1024 + sizeof(struct ccci_header))

static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
				     enum mtk_txrx tx_rx, unsigned int index)
{
@@ -993,6 +991,34 @@ int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb
	return ret;
}

static void t7xx_cldma_adjust_config(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id)
{
	int qno;

	for (qno = 0; qno < CLDMA_RXQ_NUM; qno++) {
		md_ctrl->rx_ring[qno].pkt_size = CLDMA_SHARED_Q_BUFF_SZ;
		md_ctrl->rxq[qno].q_type = CLDMA_SHARED_Q;
	}

	md_ctrl->rx_ring[CLDMA_RXQ_NUM - 1].pkt_size = CLDMA_JUMBO_BUFF_SZ;

	for (qno = 0; qno < CLDMA_TXQ_NUM; qno++) {
		md_ctrl->tx_ring[qno].pkt_size = CLDMA_SHARED_Q_BUFF_SZ;
		md_ctrl->txq[qno].q_type = CLDMA_SHARED_Q;
	}

	if (cfg_id == CLDMA_DEDICATED_Q_CFG) {
		md_ctrl->rxq[DOWNLOAD_PORT_ID].q_type = CLDMA_DEDICATED_Q;
		md_ctrl->txq[DOWNLOAD_PORT_ID].q_type = CLDMA_DEDICATED_Q;
		md_ctrl->tx_ring[DOWNLOAD_PORT_ID].pkt_size = CLDMA_DEDICATED_Q_BUFF_SZ;
		md_ctrl->rx_ring[DOWNLOAD_PORT_ID].pkt_size = CLDMA_DEDICATED_Q_BUFF_SZ;
		md_ctrl->rxq[DUMP_PORT_ID].q_type = CLDMA_DEDICATED_Q;
		md_ctrl->txq[DUMP_PORT_ID].q_type = CLDMA_DEDICATED_Q;
		md_ctrl->tx_ring[DUMP_PORT_ID].pkt_size = CLDMA_DEDICATED_Q_BUFF_SZ;
		md_ctrl->rx_ring[DUMP_PORT_ID].pkt_size = CLDMA_DEDICATED_Q_BUFF_SZ;
	}
}

static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl)
{
	char dma_pool_name[32];
@@ -1021,11 +1047,6 @@ static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl)
	}

	for (j = 0; j < CLDMA_RXQ_NUM; j++) {
		md_ctrl->rx_ring[j].pkt_size = CLDMA_MTU;

		if (j == CLDMA_RXQ_NUM - 1)
			md_ctrl->rx_ring[j].pkt_size = CLDMA_JUMBO_BUFF_SZ;

		ret = t7xx_cldma_rx_ring_init(md_ctrl, &md_ctrl->rx_ring[j]);
		if (ret) {
			dev_err(md_ctrl->dev, "Control RX ring init fail\n");
@@ -1329,9 +1350,10 @@ int t7xx_cldma_init(struct cldma_ctrl *md_ctrl)
	return -ENOMEM;
}

void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl)
void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id)
{
	t7xx_cldma_late_release(md_ctrl);
	t7xx_cldma_adjust_config(md_ctrl, cfg_id);
	t7xx_cldma_late_init(md_ctrl);
}

+21 −3
Original line number Diff line number Diff line
@@ -31,6 +31,10 @@
#include "t7xx_cldma.h"
#include "t7xx_pci.h"

#define CLDMA_JUMBO_BUFF_SZ		(63 * 1024 + sizeof(struct ccci_header))
#define CLDMA_SHARED_Q_BUFF_SZ		3584
#define CLDMA_DEDICATED_Q_BUFF_SZ	2048

/**
 * enum cldma_id - Identifiers for CLDMA HW units.
 * @CLDMA_ID_MD: Modem control channel.
@@ -55,6 +59,16 @@ struct cldma_gpd {
	__le16 not_used2;
};

enum cldma_queue_type {
	CLDMA_SHARED_Q,
	CLDMA_DEDICATED_Q,
};

enum cldma_cfg {
	CLDMA_SHARED_Q_CFG,
	CLDMA_DEDICATED_Q_CFG,
};

struct cldma_request {
	struct cldma_gpd *gpd;	/* Virtual address for CPU */
	dma_addr_t gpd_addr;	/* Physical address for DMA */
@@ -77,6 +91,7 @@ struct cldma_queue {
	struct cldma_request *tr_done;
	struct cldma_request *rx_refill;
	struct cldma_request *tx_next;
	enum cldma_queue_type q_type;
	int budget;			/* Same as ring buffer size by default */
	spinlock_t ring_lock;
	wait_queue_head_t req_wq;	/* Only for TX */
@@ -104,17 +119,20 @@ struct cldma_ctrl {
	int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb);
};

enum cldma_txq_rxq_port_id {
	DOWNLOAD_PORT_ID = 0,
	DUMP_PORT_ID = 1
};

#define GPD_FLAGS_HWO		BIT(0)
#define GPD_FLAGS_IOC		BIT(7)
#define GPD_DMAPOOL_ALIGN	16

#define CLDMA_MTU		3584	/* 3.5kB */

int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev);
void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl);
int t7xx_cldma_init(struct cldma_ctrl *md_ctrl);
void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl);
void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl);
void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id);
void t7xx_cldma_start(struct cldma_ctrl *md_ctrl);
int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl);
void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl);
+2 −2
Original line number Diff line number Diff line
@@ -527,7 +527,7 @@ static void t7xx_md_hk_wq(struct work_struct *work)

	/* Clear the HS2 EXIT event appended in core_reset() */
	t7xx_fsm_clr_event(ctl, FSM_EVENT_MD_HS2_EXIT);
	t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD]);
	t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD], CLDMA_SHARED_Q_CFG);
	t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]);
	t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2);
	md->core_md.handshake_ongoing = true;
@@ -542,7 +542,7 @@ static void t7xx_ap_hk_wq(struct work_struct *work)
	 /* Clear the HS2 EXIT event appended in t7xx_core_reset(). */
	t7xx_fsm_clr_event(ctl, FSM_EVENT_AP_HS2_EXIT);
	t7xx_cldma_stop(md->md_ctrl[CLDMA_ID_AP]);
	t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP]);
	t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP], CLDMA_SHARED_Q_CFG);
	t7xx_cldma_start(md->md_ctrl[CLDMA_ID_AP]);
	md->core_ap.handshake_ongoing = true;
	t7xx_core_hk_handler(md, &md->core_ap, ctl, FSM_EVENT_AP_HS2, FSM_EVENT_AP_HS2_EXIT);
+3 −0
Original line number Diff line number Diff line
@@ -100,6 +100,7 @@ struct t7xx_port_conf {
	struct port_ops		*ops;
	char			*name;
	enum wwan_port_type	port_type;
	bool			is_early_port;
};

struct t7xx_port {
@@ -130,9 +131,11 @@ struct t7xx_port {
	struct task_struct		*thread;
};

int t7xx_get_port_mtu(struct t7xx_port *port);
struct sk_buff *t7xx_port_alloc_skb(int payload);
struct sk_buff *t7xx_ctrl_alloc_skb(int payload);
int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb);
int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb);
int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header,
		       unsigned int ex_msg);
int t7xx_port_send_ctl_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int msg,
+107 −11
Original line number Diff line number Diff line
@@ -88,6 +88,20 @@ static const struct t7xx_port_conf t7xx_md_port_conf[] = {
	},
};

static struct t7xx_port_conf t7xx_early_port_conf[] = {
	{
		.tx_ch = 0xffff,
		.rx_ch = 0xffff,
		.txq_index = 1,
		.rxq_index = 1,
		.txq_exp_index = 1,
		.rxq_exp_index = 1,
		.path_id = CLDMA_ID_AP,
		.is_early_port = true,
		.name = "ttyDUMP",
	},
};

static struct t7xx_port *t7xx_proxy_get_port_by_ch(struct port_proxy *port_prox, enum port_ch ch)
{
	const struct t7xx_port_conf *port_conf;
@@ -202,7 +216,17 @@ int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb)
	return 0;
}

static int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb)
int t7xx_get_port_mtu(struct t7xx_port *port)
{
	enum cldma_id path_id = port->port_conf->path_id;
	int tx_qno = t7xx_port_get_queue_no(port);
	struct cldma_ctrl *md_ctrl;

	md_ctrl = port->t7xx_dev->md->md_ctrl[path_id];
	return md_ctrl->tx_ring[tx_qno].pkt_size;
}

int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb)
{
	enum cldma_id path_id = port->port_conf->path_id;
	struct cldma_ctrl *md_ctrl;
@@ -317,6 +341,26 @@ static void t7xx_proxy_setup_ch_mapping(struct port_proxy *port_prox)
	}
}

static int t7xx_port_proxy_recv_skb_from_queue(struct t7xx_pci_dev *t7xx_dev,
					       struct cldma_queue *queue, struct sk_buff *skb)
{
	struct port_proxy *port_prox = t7xx_dev->md->port_prox;
	const struct t7xx_port_conf *port_conf;
	struct t7xx_port *port;
	int ret;

	port = port_prox->ports;
	port_conf = port->port_conf;

	ret = port_conf->ops->recv_skb(port, skb);
	if (ret < 0 && ret != -ENOBUFS) {
		dev_err(port->dev, "drop on RX ch %d, %d\n", port_conf->rx_ch, ret);
		dev_kfree_skb_any(skb);
	}

	return ret;
}

static struct t7xx_port *t7xx_port_proxy_find_port(struct t7xx_pci_dev *t7xx_dev,
						   struct cldma_queue *queue, u16 channel)
{
@@ -338,6 +382,22 @@ static struct t7xx_port *t7xx_port_proxy_find_port(struct t7xx_pci_dev *t7xx_dev
	return NULL;
}

struct t7xx_port *t7xx_port_proxy_get_port_by_name(struct port_proxy *port_prox, char *port_name)
{
	const struct t7xx_port_conf *port_conf;
	struct t7xx_port *port;
	int i;

	for_each_proxy_port(i, port, port_prox) {
		port_conf = port->port_conf;

		if (!strncmp(port_conf->name, port_name, strlen(port_conf->name)))
			return port;
	}

	return NULL;
}

/**
 * t7xx_port_proxy_recv_skb() - Dispatch received skb.
 * @queue: CLDMA queue.
@@ -358,6 +418,9 @@ static int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *s
	u16 seq_num, channel;
	int ret;

	if (queue->q_type == CLDMA_DEDICATED_Q)
		return t7xx_port_proxy_recv_skb_from_queue(t7xx_dev, queue, skb);

	channel = FIELD_GET(CCCI_H_CHN_FLD, le32_to_cpu(ccci_h->status));
	if (t7xx_fsm_get_md_state(ctl) == MD_STATE_INVALID) {
		dev_err_ratelimited(dev, "Packet drop on channel 0x%x, modem not ready\n", channel);
@@ -372,6 +435,7 @@ static int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *s

	seq_num = t7xx_port_next_rx_seq_num(port, ccci_h);
	port_conf = port->port_conf;
	if (!port->port_conf->is_early_port)
		skb_pull(skb, sizeof(*ccci_h));

	ret = port_conf->ops->recv_skb(port, skb);
@@ -439,26 +503,58 @@ static void t7xx_proxy_init_all_ports(struct t7xx_modem *md)
	t7xx_proxy_setup_ch_mapping(port_prox);
}

void t7xx_port_proxy_set_cfg(struct t7xx_modem *md, enum port_cfg_id cfg_id)
{
	struct port_proxy *port_prox = md->port_prox;
	const struct t7xx_port_conf *port_conf;
	struct device *dev = port_prox->dev;
	unsigned int port_count;
	struct t7xx_port *port;
	int i;

	if (port_prox->cfg_id == cfg_id)
		return;

	if (port_prox->cfg_id != PORT_CFG_ID_INVALID) {
		for_each_proxy_port(i, port, port_prox)
			port->port_conf->ops->uninit(port);

		devm_kfree(dev, port_prox->ports);
	}

	if (cfg_id == PORT_CFG_ID_EARLY) {
		port_conf = t7xx_early_port_conf;
		port_count = ARRAY_SIZE(t7xx_early_port_conf);
	} else {
		port_conf = t7xx_md_port_conf;
		port_count = ARRAY_SIZE(t7xx_md_port_conf);
	}

	port_prox->ports = devm_kzalloc(dev, sizeof(struct t7xx_port) * port_count, GFP_KERNEL);
	if (!port_prox->ports)
		return;

	for (i = 0; i < port_count; i++)
		port_prox->ports[i].port_conf = &port_conf[i];

	port_prox->cfg_id = cfg_id;
	port_prox->port_count = port_count;
	t7xx_proxy_init_all_ports(md);
}

static int t7xx_proxy_alloc(struct t7xx_modem *md)
{
	unsigned int port_count = ARRAY_SIZE(t7xx_md_port_conf);
	struct device *dev = &md->t7xx_dev->pdev->dev;
	struct port_proxy *port_prox;
	int i;

	port_prox = devm_kzalloc(dev, sizeof(*port_prox) + sizeof(struct t7xx_port) * port_count,
				 GFP_KERNEL);
	port_prox = devm_kzalloc(dev, sizeof(*port_prox), GFP_KERNEL);
	if (!port_prox)
		return -ENOMEM;

	md->port_prox = port_prox;
	port_prox->dev = dev;
	t7xx_port_proxy_set_cfg(md, PORT_CFG_ID_EARLY);

	for (i = 0; i < port_count; i++)
		port_prox->ports[i].port_conf = &t7xx_md_port_conf[i];

	port_prox->port_count = port_count;
	t7xx_proxy_init_all_ports(md);
	return 0;
}

Loading