Commit 12804793 authored by Edward Cree's avatar Edward Cree Committed by David S. Miller
Browse files

sfc: decouple TXQ type from label



Make it possible to have an arbitrary mapping from types to labels,
 because when we add inner-csum-offload TXQs there will no longer be a
 convenient nesting hierarchy of NIC types (EF10 will have inner-csum
 TXQs, while Siena will have HIGHPRI).
Correct a misleading comment on efx_hard_start_xmit().

Signed-off-by: default avatarEdward Cree <ecree@solarflare.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 4a681bf3
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
@@ -2146,6 +2146,7 @@ static int efx_ef10_irq_test_generate(struct efx_nic *efx)

static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue)
{
	tx_queue->type = tx_queue->label & EFX_TXQ_TYPE_OFFLOAD;
	return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
				    (tx_queue->ptr_mask + 1) *
				    sizeof(efx_qword_t),
@@ -2254,7 +2255,7 @@ static u32 efx_ef10_tso_versions(struct efx_nic *efx)

static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
{
	bool csum_offload = tx_queue->label & EFX_TXQ_TYPE_OFFLOAD;
	bool csum_offload = tx_queue->type & EFX_TXQ_TYPE_OFFLOAD;
	struct efx_channel *channel = tx_queue->channel;
	struct efx_nic *efx = tx_queue->efx;
	struct efx_ef10_nic_data *nic_data;
@@ -2880,7 +2881,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
	/* Get the transmit queue */
	tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
	tx_queue = efx_channel_get_tx_queue(channel,
					    tx_ev_q_label % EFX_TXQ_TYPES);
					    tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);

	if (!tx_queue->timestamping) {
		/* Transmit completion */
+5 −5
Original line number Diff line number Diff line
@@ -151,7 +151,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
	 */

	n_xdp_tx = num_possible_cpus();
	n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, EFX_TXQ_TYPES);
	n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, EFX_MAX_TXQ_PER_CHANNEL);

	vec_count = pci_msix_vec_count(efx->pci_dev);
	if (vec_count < 0)
@@ -179,7 +179,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
		efx->xdp_tx_queue_count = 0;
	} else {
		efx->n_xdp_channels = n_xdp_ev;
		efx->xdp_tx_per_channel = EFX_TXQ_TYPES;
		efx->xdp_tx_per_channel = EFX_MAX_TXQ_PER_CHANNEL;
		efx->xdp_tx_queue_count = n_xdp_tx;
		n_channels += n_xdp_ev;
		netif_dbg(efx, drv, efx->net_dev,
@@ -520,7 +520,7 @@ static struct efx_channel *efx_alloc_channel(struct efx_nic *efx, int i)
	channel->channel = i;
	channel->type = &efx_default_channel_type;

	for (j = 0; j < EFX_TXQ_TYPES; j++) {
	for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) {
		tx_queue = &channel->tx_queue[j];
		tx_queue->efx = efx;
		tx_queue->queue = -1;
@@ -594,7 +594,7 @@ struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel)
	channel->napi_str.state = 0;
	memset(&channel->eventq, 0, sizeof(channel->eventq));

	for (j = 0; j < EFX_TXQ_TYPES; j++) {
	for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) {
		tx_queue = &channel->tx_queue[j];
		if (tx_queue->channel)
			tx_queue->channel = channel;
@@ -894,7 +894,7 @@ int efx_set_channels(struct efx_nic *efx)
						  xdp_queue_number, tx_queue->queue);
					/* We may have a few left-over XDP TX
					 * queues owing to xdp_tx_queue_count
					 * not dividing evenly by EFX_TXQ_TYPES.
					 * not dividing evenly by EFX_MAX_TXQ_PER_CHANNEL.
					 * We still allocate and probe those
					 * TXQs, but never use them.
					 */
+1 −1
Original line number Diff line number Diff line
@@ -407,7 +407,7 @@ static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
				snprintf(strings, ETH_GSTRING_LEN,
					 "tx-%u.tx_packets",
					 channel->tx_queue[0].queue /
					 EFX_TXQ_TYPES);
					 EFX_MAX_TXQ_PER_CHANNEL);

				strings += ETH_GSTRING_LEN;
			}
+11 −9
Original line number Diff line number Diff line
@@ -372,6 +372,8 @@ int efx_farch_tx_probe(struct efx_tx_queue *tx_queue)
	struct efx_nic *efx = tx_queue->efx;
	unsigned entries;

	tx_queue->type = ((tx_queue->label & 1) ? EFX_TXQ_TYPE_OFFLOAD : 0) |
			 ((tx_queue->label & 2) ? EFX_TXQ_TYPE_HIGHPRI : 0);
	entries = tx_queue->ptr_mask + 1;
	return efx_alloc_special_buffer(efx, &tx_queue->txd,
					entries * sizeof(efx_qword_t));
@@ -379,7 +381,7 @@ int efx_farch_tx_probe(struct efx_tx_queue *tx_queue)

void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
{
	int csum = tx_queue->label & EFX_TXQ_TYPE_OFFLOAD;
	int csum = tx_queue->type & EFX_TXQ_TYPE_OFFLOAD;
	struct efx_nic *efx = tx_queue->efx;
	efx_oword_t reg;

@@ -409,7 +411,7 @@ void efx_farch_tx_init(struct efx_tx_queue *tx_queue)

	EFX_POPULATE_OWORD_1(reg,
			     FRF_BZ_TX_PACE,
			     (tx_queue->label & EFX_TXQ_TYPE_HIGHPRI) ?
			     (tx_queue->type & EFX_TXQ_TYPE_HIGHPRI) ?
			     FFE_BZ_TX_PACE_OFF :
			     FFE_BZ_TX_PACE_RESERVED);
	efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL, tx_queue->queue);
@@ -832,13 +834,13 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
		tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
		tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
		tx_queue = efx_channel_get_tx_queue(
			channel, tx_ev_q_label % EFX_TXQ_TYPES);
			channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
		efx_xmit_done(tx_queue, tx_ev_desc_ptr);
	} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
		/* Rewrite the FIFO write pointer */
		tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
		tx_queue = efx_channel_get_tx_queue(
			channel, tx_ev_q_label % EFX_TXQ_TYPES);
			channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);

		netif_tx_lock(efx->net_dev);
		efx_farch_notify_tx_desc(tx_queue);
@@ -1080,9 +1082,9 @@ efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
	int qid;

	qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
	if (qid < EFX_TXQ_TYPES * (efx->n_tx_channels + efx->n_extra_tx_channels)) {
		tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
					    qid % EFX_TXQ_TYPES);
	if (qid < EFX_MAX_TXQ_PER_CHANNEL * (efx->n_tx_channels + efx->n_extra_tx_channels)) {
		tx_queue = efx_get_tx_queue(efx, qid / EFX_MAX_TXQ_PER_CHANNEL,
					    qid % EFX_MAX_TXQ_PER_CHANNEL);
		if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
			efx_farch_magic_event(tx_queue->channel,
					      EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
@@ -1675,10 +1677,10 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
	 * and the descriptor caches for those channels.
	 */
	buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
		       total_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
		       total_tx_channels * EFX_MAX_TXQ_PER_CHANNEL * EFX_MAX_DMAQ_SIZE +
		       efx->n_channels * EFX_MAX_EVQ_SIZE)
		      * sizeof(efx_qword_t) / EFX_BUF_SIZE);
	vi_count = max(efx->n_channels, total_tx_channels * EFX_TXQ_TYPES);
	vi_count = max(efx->n_channels, total_tx_channels * EFX_MAX_TXQ_PER_CHANNEL);

#ifdef CONFIG_SFC_SRIOV
	if (efx->type->sriov_wanted) {
+1 −1
Original line number Diff line number Diff line
@@ -164,7 +164,7 @@ int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2)
{
	MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
						       EFX_BUF_SIZE));
	bool csum_offload = tx_queue->label & EFX_TXQ_TYPE_OFFLOAD;
	bool csum_offload = tx_queue->type & EFX_TXQ_TYPE_OFFLOAD;
	size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
	struct efx_channel *channel = tx_queue->channel;
	struct efx_nic *efx = tx_queue->efx;
Loading