Commit d20e5880 authored by David S. Miller's avatar David S. Miller
Browse files

Merge tag 'linux-can-next-for-5.15-20210725' of...

Merge tag 'linux-can-next-for-5.15-20210725' of git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next



linux-can-next-for-5.15-20210725

Marc Kleine-Budde says:

====================
pull-request: can-next 2021-07-25

this is a pull request of 46 patches for net-next/master.

The first 6 patches target the CAN J1939 protocol. One is from
gushengxian, fixing a grammatical error, 5 are by me fixing a checkpatch
warning, make use of the fallthrough pseudo-keyword, and use
consistent variable naming.

The next 3 patches target the rx-offload helper, are by me and improve
the performance and fix the local softirq work pending error, when
napi_schedule() is called from threaded IRQ context.

The next 3 patches are by Vincent Mailhol and me update the CAN
bittiming and transmitter delay compensation, the documentation for
the struct can_tdc is fixed, clear data_bittiming if FD mode is turned
off and a redundant check is removed.

Followed by 4 patches targeting the m_can driver. Faiz Abbas's patches
add support for CAN PHY via the generic phy subsystem. Yang Yingliang
converts the driver to use devm_platform_ioremap_resource_byname().
And a patch by me which removes the unused support for custom bit
timing.

Andy Shevchenko contributes 2 patches for the mcp251xfd driver to
prepare the driver for ACPI support. A patch by me adds support for
shared IRQ handlers.

Zhen Lei contributes 3 patches to convert the esd_usb2, janz-ican3 and
the at91_can driver to make use of the DEVICE_ATTR_RO/RW() macros.

The next 8 patches are by Peng Li and provide general cleanups for the
at91_can driver.

The next 7 patches target the peak driver. Frist 2 cleanup patches by
me for the peak_pci driver, followed by Stephane Grosjean' patch to
print the name and firmware version of the detected hardware. The
peak_usb driver gets a cleanup patch, loopback and one-shot mode and
an upgrading of the bus state change handling in Stephane Grosjean's
patches.

Vincent Mailhol provides 6 cleanup patches for the etas_es58x driver.

In the last 3 patches Angelo Dureghello add support for the mcf5441x
SoC to the flexcan driver.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents af996031 8dad5561
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -107,6 +107,9 @@ properties:
  can-transceiver:
    $ref: can-transceiver.yaml#

  phys:
    maxItems: 1

required:
  - compatible
  - reg
+2 −1
Original line number Diff line number Diff line
@@ -97,7 +97,8 @@ config CAN_AT91

config CAN_FLEXCAN
	tristate "Support for Freescale FLEXCAN based chips"
	depends on OF && HAS_IOMEM
	depends on OF || COLDFIRE || COMPILE_TEST
	depends on HAS_IOMEM
	help
	  Say Y here if you want to support for Freescale FlexCAN.

+63 −74
Original line number Diff line number Diff line
@@ -43,14 +43,14 @@ enum at91_reg {
};

/* Mailbox registers (0 <= i <= 15) */
#define AT91_MMR(i)		(enum at91_reg)(0x200 + ((i) * 0x20))
#define AT91_MAM(i)		(enum at91_reg)(0x204 + ((i) * 0x20))
#define AT91_MID(i)		(enum at91_reg)(0x208 + ((i) * 0x20))
#define AT91_MFID(i)		(enum at91_reg)(0x20C + ((i) * 0x20))
#define AT91_MSR(i)		(enum at91_reg)(0x210 + ((i) * 0x20))
#define AT91_MDL(i)		(enum at91_reg)(0x214 + ((i) * 0x20))
#define AT91_MDH(i)		(enum at91_reg)(0x218 + ((i) * 0x20))
#define AT91_MCR(i)		(enum at91_reg)(0x21C + ((i) * 0x20))
#define AT91_MMR(i)		((enum at91_reg)(0x200 + ((i) * 0x20)))
#define AT91_MAM(i)		((enum at91_reg)(0x204 + ((i) * 0x20)))
#define AT91_MID(i)		((enum at91_reg)(0x208 + ((i) * 0x20)))
#define AT91_MFID(i)		((enum at91_reg)(0x20C + ((i) * 0x20)))
#define AT91_MSR(i)		((enum at91_reg)(0x210 + ((i) * 0x20)))
#define AT91_MDL(i)		((enum at91_reg)(0x214 + ((i) * 0x20)))
#define AT91_MDH(i)		((enum at91_reg)(0x218 + ((i) * 0x20)))
#define AT91_MCR(i)		((enum at91_reg)(0x21C + ((i) * 0x20)))

/* Register bits */
#define AT91_MR_CANEN		BIT(0)
@@ -87,19 +87,19 @@ enum at91_mb_mode {
};

/* Interrupt mask bits */
#define AT91_IRQ_ERRA		(1 << 16)
#define AT91_IRQ_WARN		(1 << 17)
#define AT91_IRQ_ERRP		(1 << 18)
#define AT91_IRQ_BOFF		(1 << 19)
#define AT91_IRQ_SLEEP		(1 << 20)
#define AT91_IRQ_WAKEUP		(1 << 21)
#define AT91_IRQ_TOVF		(1 << 22)
#define AT91_IRQ_TSTP		(1 << 23)
#define AT91_IRQ_CERR		(1 << 24)
#define AT91_IRQ_SERR		(1 << 25)
#define AT91_IRQ_AERR		(1 << 26)
#define AT91_IRQ_FERR		(1 << 27)
#define AT91_IRQ_BERR		(1 << 28)
#define AT91_IRQ_ERRA		BIT(16)
#define AT91_IRQ_WARN		BIT(17)
#define AT91_IRQ_ERRP		BIT(18)
#define AT91_IRQ_BOFF		BIT(19)
#define AT91_IRQ_SLEEP		BIT(20)
#define AT91_IRQ_WAKEUP		BIT(21)
#define AT91_IRQ_TOVF		BIT(22)
#define AT91_IRQ_TSTP		BIT(23)
#define AT91_IRQ_CERR		BIT(24)
#define AT91_IRQ_SERR		BIT(25)
#define AT91_IRQ_AERR		BIT(26)
#define AT91_IRQ_FERR		BIT(27)
#define AT91_IRQ_BERR		BIT(28)

#define AT91_IRQ_ERR_ALL	(0x1fff0000)
#define AT91_IRQ_ERR_FRAME	(AT91_IRQ_CERR | AT91_IRQ_SERR | \
@@ -287,7 +287,8 @@ static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg,
}

static inline void set_mb_mode_prio(const struct at91_priv *priv,
		unsigned int mb, enum at91_mb_mode mode, int prio)
				    unsigned int mb, enum at91_mb_mode mode,
				    int prio)
{
	at91_write(priv, AT91_MMR(mb), (mode << 24) | (prio << 16));
}
@@ -316,8 +317,7 @@ static void at91_setup_mailboxes(struct net_device *dev)
	unsigned int i;
	u32 reg_mid;

	/*
	 * Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first
	/* Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first
	 * mailbox is disabled. The next 11 mailboxes are used as a
	 * reception FIFO. The last mailbox is configured with
	 * overwrite option. The overwrite flag indicates a FIFO
@@ -423,8 +423,7 @@ static void at91_chip_stop(struct net_device *dev, enum can_state state)
	priv->can.state = state;
}

/*
 * theory of operation:
/* theory of operation:
 *
 * According to the datasheet priority 0 is the highest priority, 15
 * is the lowest. If two mailboxes have the same priority level the
@@ -486,8 +485,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
	/* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
	can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv), 0);

	/*
	 * we have to stop the queue and deliver all messages in case
	/* we have to stop the queue and deliver all messages in case
	 * of a prio+mb counter wrap around. This is the case if
	 * tx_next buffer prio and mailbox equals 0.
	 *
@@ -515,6 +513,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
static inline void at91_activate_rx_low(const struct at91_priv *priv)
{
	u32 mask = get_mb_rx_low_mask(priv);

	at91_write(priv, AT91_TCR, mask);
}

@@ -529,6 +528,7 @@ static inline void at91_activate_rx_mb(const struct at91_priv *priv,
				       unsigned int mb)
{
	u32 mask = 1 << mb;

	at91_write(priv, AT91_TCR, mask);
}

@@ -582,9 +582,9 @@ static void at91_read_mb(struct net_device *dev, unsigned int mb,
	reg_msr = at91_read(priv, AT91_MSR(mb));
	cf->len = can_cc_dlc2len((reg_msr >> 16) & 0xf);

	if (reg_msr & AT91_MSR_MRTR)
	if (reg_msr & AT91_MSR_MRTR) {
		cf->can_id |= CAN_RTR_FLAG;
	else {
	} else {
		*(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
		*(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
	}
@@ -796,8 +796,7 @@ static int at91_poll(struct napi_struct *napi, int quota)
	if (reg_sr & get_irq_mb_rx(priv))
		work_done += at91_poll_rx(dev, quota - work_done);

	/*
	 * The error bits are clear on read,
	/* The error bits are clear on read,
	 * so use saved value from irq handler.
	 */
	reg_sr |= priv->reg_sr;
@@ -807,6 +806,7 @@ static int at91_poll(struct napi_struct *napi, int quota)
	if (work_done < quota) {
		/* enable IRQs for frame errors and all mailboxes >= rx_next */
		u32 reg_ier = AT91_IRQ_ERR_FRAME;

		reg_ier |= get_irq_mb_rx(priv) & ~AT91_MB_MASK(priv->rx_next);

		napi_complete_done(napi, work_done);
@@ -816,8 +816,7 @@ static int at91_poll(struct napi_struct *napi, int quota)
	return work_done;
}

/*
 * theory of operation:
/* theory of operation:
 *
 * priv->tx_echo holds the number of the oldest can_frame put for
 * transmission into the hardware, but not yet ACKed by the CAN tx
@@ -846,8 +845,7 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
		/* Disable irq for this TX mailbox */
		at91_write(priv, AT91_IDR, 1 << mb);

		/*
		 * only echo if mailbox signals us a transfer
		/* only echo if mailbox signals us a transfer
		 * complete (MSR_MRDY). Otherwise it's a tansfer
		 * abort. "can_bus_off()" takes care about the skbs
		 * parked in the echo queue.
@@ -862,8 +860,7 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
		}
	}

	/*
	 * restart queue if we don't have a wrap around but restart if
	/* restart queue if we don't have a wrap around but restart if
	 * we get a TX int for the last can frame directly before a
	 * wrap around.
	 */
@@ -883,8 +880,7 @@ static void at91_irq_err_state(struct net_device *dev,

	switch (priv->can.state) {
	case CAN_STATE_ERROR_ACTIVE:
		/*
		 * from: ERROR_ACTIVE
		/* from: ERROR_ACTIVE
		 * to  : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF
		 * =>  : there was a warning int
		 */
@@ -900,8 +896,7 @@ static void at91_irq_err_state(struct net_device *dev,
		}
		fallthrough;
	case CAN_STATE_ERROR_WARNING:
		/*
		 * from: ERROR_ACTIVE, ERROR_WARNING
		/* from: ERROR_ACTIVE, ERROR_WARNING
		 * to  : ERROR_PASSIVE, BUS_OFF
		 * =>  : error passive int
		 */
@@ -917,8 +912,7 @@ static void at91_irq_err_state(struct net_device *dev,
		}
		break;
	case CAN_STATE_BUS_OFF:
		/*
		 * from: BUS_OFF
		/* from: BUS_OFF
		 * to  : ERROR_ACTIVE, ERROR_WARNING, ERROR_PASSIVE
		 */
		if (new_state <= CAN_STATE_ERROR_PASSIVE) {
@@ -935,12 +929,10 @@ static void at91_irq_err_state(struct net_device *dev,
		break;
	}


	/* process state changes depending on the new state */
	switch (new_state) {
	case CAN_STATE_ERROR_ACTIVE:
		/*
		 * actually we want to enable AT91_IRQ_WARN here, but
		/* actually we want to enable AT91_IRQ_WARN here, but
		 * it screws up the system under certain
		 * circumstances. so just enable AT91_IRQ_ERRP, thus
		 * the "fallthrough"
@@ -1004,7 +996,6 @@ static int at91_get_state_by_bec(const struct net_device *dev,
	return 0;
}


static void at91_irq_err(struct net_device *dev)
{
	struct at91_priv *priv = netdev_priv(dev);
@@ -1018,15 +1009,15 @@ static void at91_irq_err(struct net_device *dev)
		reg_sr = at91_read(priv, AT91_SR);

		/* we need to look at the unmasked reg_sr */
		if (unlikely(reg_sr & AT91_IRQ_BOFF))
		if (unlikely(reg_sr & AT91_IRQ_BOFF)) {
			new_state = CAN_STATE_BUS_OFF;
		else if (unlikely(reg_sr & AT91_IRQ_ERRP))
		} else if (unlikely(reg_sr & AT91_IRQ_ERRP)) {
			new_state = CAN_STATE_ERROR_PASSIVE;
		else if (unlikely(reg_sr & AT91_IRQ_WARN))
		} else if (unlikely(reg_sr & AT91_IRQ_WARN)) {
			new_state = CAN_STATE_ERROR_WARNING;
		else if (likely(reg_sr & AT91_IRQ_ERRA))
		} else if (likely(reg_sr & AT91_IRQ_ERRA)) {
			new_state = CAN_STATE_ERROR_ACTIVE;
		else {
		} else {
			netdev_err(dev, "BUG! hardware in undefined state\n");
			return;
		}
@@ -1053,8 +1044,7 @@ static void at91_irq_err(struct net_device *dev)
	priv->can.state = new_state;
}

/*
 * interrupt handler
/* interrupt handler
 */
static irqreturn_t at91_irq(int irq, void *dev_id)
{
@@ -1075,8 +1065,7 @@ static irqreturn_t at91_irq(int irq, void *dev_id)

	/* Receive or error interrupt? -> napi */
	if (reg_sr & (get_irq_mb_rx(priv) | AT91_IRQ_ERR_FRAME)) {
		/*
		 * The error bits are clear on read,
		/* The error bits are clear on read,
		 * save for later use.
		 */
		priv->reg_sr = reg_sr;
@@ -1133,8 +1122,7 @@ static int at91_open(struct net_device *dev)
	return err;
}

/*
 * stop CAN bus activity
/* stop CAN bus activity
 */
static int at91_close(struct net_device *dev)
{
@@ -1176,7 +1164,7 @@ static const struct net_device_ops at91_netdev_ops = {
	.ndo_change_mtu = can_change_mtu,
};

static ssize_t at91_sysfs_show_mb0_id(struct device *dev,
static ssize_t mb0_id_show(struct device *dev,
			   struct device_attribute *attr, char *buf)
{
	struct at91_priv *priv = netdev_priv(to_net_dev(dev));
@@ -1187,8 +1175,9 @@ static ssize_t at91_sysfs_show_mb0_id(struct device *dev,
		return snprintf(buf, PAGE_SIZE, "0x%03x\n", priv->mb0_id);
}

static ssize_t at91_sysfs_set_mb0_id(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t count)
static ssize_t mb0_id_store(struct device *dev,
			    struct device_attribute *attr,
			    const char *buf, size_t count)
{
	struct net_device *ndev = to_net_dev(dev);
	struct at91_priv *priv = netdev_priv(ndev);
@@ -1222,7 +1211,7 @@ static ssize_t at91_sysfs_set_mb0_id(struct device *dev,
	return ret;
}

static DEVICE_ATTR(mb0_id, 0644, at91_sysfs_show_mb0_id, at91_sysfs_set_mb0_id);
static DEVICE_ATTR_RW(mb0_id);

static struct attribute *at91_sysfs_attrs[] = {
	&dev_attr_mb0_id.attr,
+6 −3
Original line number Diff line number Diff line
@@ -47,7 +47,7 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
	}

	if (data[IFLA_CAN_DATA_BITTIMING]) {
		if (!is_can_fd || !data[IFLA_CAN_BITTIMING])
		if (!is_can_fd)
			return -EOPNOTSUPP;
	}

@@ -132,10 +132,13 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
		priv->ctrlmode |= maskedflags;

		/* CAN_CTRLMODE_FD can only be set when driver supports FD */
		if (priv->ctrlmode & CAN_CTRLMODE_FD)
		if (priv->ctrlmode & CAN_CTRLMODE_FD) {
			dev->mtu = CANFD_MTU;
		else
		} else {
			dev->mtu = CAN_MTU;
			memset(&priv->data_bittiming, 0,
			       sizeof(priv->data_bittiming));
		}
	}

	if (data[IFLA_CAN_RESTART_MS]) {
+56 −34
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2014      Protonic Holland,
 *                         David Jander
 * Copyright (C) 2014-2017 Pengutronix,
 * Copyright (C) 2014-2021 Pengutronix,
 *                         Marc Kleine-Budde <kernel@pengutronix.de>
 */

@@ -174,10 +174,8 @@ can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
					 u64 pending)
{
	struct sk_buff_head skb_queue;
	unsigned int i;

	__skb_queue_head_init(&skb_queue);
	int received = 0;

	for (i = offload->mb_first;
	     can_rx_offload_le(offload, i, offload->mb_last);
@@ -191,26 +189,12 @@ int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
		if (IS_ERR_OR_NULL(skb))
			continue;

		__skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
	}

	if (!skb_queue_empty(&skb_queue)) {
		unsigned long flags;
		u32 queue_len;

		spin_lock_irqsave(&offload->skb_queue.lock, flags);
		skb_queue_splice_tail(&skb_queue, &offload->skb_queue);
		spin_unlock_irqrestore(&offload->skb_queue.lock, flags);

		queue_len = skb_queue_len(&offload->skb_queue);
		if (queue_len > offload->skb_queue_len_max / 8)
			netdev_dbg(offload->dev, "%s: queue_len=%d\n",
				   __func__, queue_len);

		can_rx_offload_schedule(offload);
		__skb_queue_add_sort(&offload->skb_irq_queue, skb,
				     can_rx_offload_compare);
		received++;
	}

	return skb_queue_len(&skb_queue);
	return received;
}
EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);

@@ -226,13 +210,10 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
		if (!skb)
			break;

		skb_queue_tail(&offload->skb_queue, skb);
		__skb_queue_tail(&offload->skb_irq_queue, skb);
		received++;
	}

	if (received)
		can_rx_offload_schedule(offload);

	return received;
}
EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
@@ -241,7 +222,6 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
				struct sk_buff *skb, u32 timestamp)
{
	struct can_rx_offload_cb *cb;
	unsigned long flags;

	if (skb_queue_len(&offload->skb_queue) >
	    offload->skb_queue_len_max) {
@@ -252,11 +232,8 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
	cb = can_rx_offload_get_cb(skb);
	cb->timestamp = timestamp;

	spin_lock_irqsave(&offload->skb_queue.lock, flags);
	__skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare);
	spin_unlock_irqrestore(&offload->skb_queue.lock, flags);

	can_rx_offload_schedule(offload);
	__skb_queue_add_sort(&offload->skb_irq_queue, skb,
			     can_rx_offload_compare);

	return 0;
}
@@ -295,13 +272,56 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload,
		return -ENOBUFS;
	}

	skb_queue_tail(&offload->skb_queue, skb);
	can_rx_offload_schedule(offload);
	__skb_queue_tail(&offload->skb_irq_queue, skb);

	return 0;
}
EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);

void can_rx_offload_irq_finish(struct can_rx_offload *offload)
{
	unsigned long flags;
	int queue_len;

	if (skb_queue_empty_lockless(&offload->skb_irq_queue))
		return;

	spin_lock_irqsave(&offload->skb_queue.lock, flags);
	skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue);
	spin_unlock_irqrestore(&offload->skb_queue.lock, flags);

	queue_len = skb_queue_len(&offload->skb_queue);
	if (queue_len > offload->skb_queue_len_max / 8)
		netdev_dbg(offload->dev, "%s: queue_len=%d\n",
			   __func__, queue_len);

	napi_schedule(&offload->napi);
}
EXPORT_SYMBOL_GPL(can_rx_offload_irq_finish);

void can_rx_offload_threaded_irq_finish(struct can_rx_offload *offload)
{
	unsigned long flags;
	int queue_len;

	if (skb_queue_empty_lockless(&offload->skb_irq_queue))
		return;

	spin_lock_irqsave(&offload->skb_queue.lock, flags);
	skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue);
	spin_unlock_irqrestore(&offload->skb_queue.lock, flags);

	queue_len = skb_queue_len(&offload->skb_queue);
	if (queue_len > offload->skb_queue_len_max / 8)
		netdev_dbg(offload->dev, "%s: queue_len=%d\n",
			   __func__, queue_len);

	local_bh_disable();
	napi_schedule(&offload->napi);
	local_bh_enable();
}
EXPORT_SYMBOL_GPL(can_rx_offload_threaded_irq_finish);

static int can_rx_offload_init_queue(struct net_device *dev,
				     struct can_rx_offload *offload,
				     unsigned int weight)
@@ -312,6 +332,7 @@ static int can_rx_offload_init_queue(struct net_device *dev,
	offload->skb_queue_len_max = 2 << fls(weight);
	offload->skb_queue_len_max *= 4;
	skb_queue_head_init(&offload->skb_queue);
	__skb_queue_head_init(&offload->skb_irq_queue);

	netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);

@@ -373,5 +394,6 @@ void can_rx_offload_del(struct can_rx_offload *offload)
{
	netif_napi_del(&offload->napi);
	skb_queue_purge(&offload->skb_queue);
	__skb_queue_purge(&offload->skb_irq_queue);
}
EXPORT_SYMBOL_GPL(can_rx_offload_del);
Loading