Commit 7c28da8b authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'ibmvnic-coding-style'



Lijun Pan says:

====================
ibmvnic: a set of fixes of coding style

This series address several coding style problems.

v2: rebased on top of tree. Add the Reviewed-by tag from v1 reviews.
    patch 8/8 is new.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 260b6971 8a96c80e
Loading
Loading
Loading
Loading
+32 −39
Original line number Diff line number Diff line
@@ -115,7 +115,7 @@ struct ibmvnic_stat {

#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
			     offsetof(struct ibmvnic_statistics, stat))
#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off))))

static const struct ibmvnic_stat ibmvnic_stats[] = {
	{"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
@@ -1221,8 +1221,7 @@ static int ibmvnic_open(struct net_device *netdev)
	rc = __ibmvnic_open(netdev);

out:
	/*
	 * If open fails due to a pending failover, set device state and
	/* If open fails due to a pending failover, set device state and
	 * return. Device operation will be handled by reset routine.
	 */
	if (rc && adapter->failover_pending) {
@@ -1946,8 +1945,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
	if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
		rtnl_lock();

	/*
	 * Now that we have the rtnl lock, clear any pending failover.
	/* Now that we have the rtnl lock, clear any pending failover.
	 * This will ensure ibmvnic_open() has either completed or will
	 * block until failover is complete.
	 */
@@ -2042,9 +2040,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
		}

		rc = ibmvnic_login(netdev);
		if (rc) {
		if (rc)
			goto out;
		}

		if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
			rc = init_resources(adapter);
@@ -2249,8 +2246,7 @@ static void __ibmvnic_reset(struct work_struct *work)
		spin_unlock_irqrestore(&adapter->state_lock, flags);

		if (adapter->force_reset_recovery) {
			/*
			 * Since we are doing a hard reset now, clear the
			/* Since we are doing a hard reset now, clear the
			 * failover_pending flag so we don't ignore any
			 * future MOBILITY or other resets.
			 */
@@ -2322,8 +2318,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
	unsigned long flags;
	int ret;

	/*
	 * If failover is pending don't schedule any other reset.
	/* If failover is pending don't schedule any other reset.
	 * Instead let the failover complete. If there is already a
	 * a failover reset scheduled, we will detect and drop the
	 * duplicate reset when walking the ->rwi_list below.
@@ -2338,7 +2333,8 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,

	if (adapter->state == VNIC_PROBING) {
		netdev_warn(netdev, "Adapter reset during probe\n");
		ret = adapter->init_done_rc = EAGAIN;
		adapter->init_done_rc = EAGAIN;
		ret = EAGAIN;
		goto err;
	}

@@ -2445,9 +2441,8 @@ static int ibmvnic_poll(struct napi_struct *napi, int budget)
		if (!pending_scrq(adapter, rx_scrq))
			break;
		next = ibmvnic_next_scrq(adapter, rx_scrq);
		rx_buff =
		    (struct ibmvnic_rx_buff *)be64_to_cpu(next->
							  rx_comp.correlator);
		rx_buff = (struct ibmvnic_rx_buff *)
			  be64_to_cpu(next->rx_comp.correlator);
		/* do error checking */
		if (next->rx_comp.rc) {
			netdev_dbg(netdev, "rx buffer returned with rc %x\n",
@@ -2638,9 +2633,9 @@ static void ibmvnic_get_drvinfo(struct net_device *netdev,
{
	struct ibmvnic_adapter *adapter = netdev_priv(netdev);

	strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
	strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
	strlcpy(info->fw_version, adapter->fw_version,
	strscpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
	strscpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
	strscpy(info->fw_version, adapter->fw_version,
		sizeof(info->fw_version));
}

@@ -2752,7 +2747,6 @@ static int ibmvnic_set_channels(struct net_device *netdev,
			    channels->rx_count, channels->tx_count,
			    adapter->req_rx_queues, adapter->req_tx_queues);
	return ret;

}

static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -2841,8 +2835,8 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
		return;

	for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
		data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
						ibmvnic_stats[i].offset));
		data[i] = be64_to_cpu(IBMVNIC_GET_STAT
				      (adapter, ibmvnic_stats[i].offset));

	for (j = 0; j < adapter->req_tx_queues; j++) {
		data[i] = adapter->tx_stats_buffers[j].packets;
@@ -2882,6 +2876,7 @@ static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)

	return 0;
}

static const struct ethtool_ops ibmvnic_ethtool_ops = {
	.get_drvinfo		= ibmvnic_get_drvinfo,
	.get_msglevel		= ibmvnic_get_msglevel,
@@ -3151,7 +3146,7 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
		/* H_EOI would fail with rc = H_FUNCTION when running
		 * in XIVE mode which is expected, but not an error.
		 */
		if (rc && (rc != H_FUNCTION))
		if (rc && rc != H_FUNCTION)
			dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
				val, rc);
	}
@@ -3654,8 +3649,8 @@ static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
	int rc;

	netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
		   (unsigned long int)cpu_to_be64(u64_crq[0]),
		   (unsigned long int)cpu_to_be64(u64_crq[1]));
		   (unsigned long)cpu_to_be64(u64_crq[0]),
		   (unsigned long)cpu_to_be64(u64_crq[1]));

	if (!adapter->crq.active &&
	    crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
@@ -3860,15 +3855,15 @@ static int send_login(struct ibmvnic_adapter *adapter)

	for (i = 0; i < adapter->req_tx_queues; i++) {
		if (adapter->tx_scrq[i]) {
			tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
						   crq_num);
			tx_list_p[i] =
				cpu_to_be64(adapter->tx_scrq[i]->crq_num);
		}
	}

	for (i = 0; i < adapter->req_rx_queues; i++) {
		if (adapter->rx_scrq[i]) {
			rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
						   crq_num);
			rx_list_p[i] =
				cpu_to_be64(adapter->rx_scrq[i]->crq_num);
		}
	}

@@ -3884,7 +3879,7 @@ static int send_login(struct ibmvnic_adapter *adapter)
	netdev_dbg(adapter->netdev, "Login Buffer:\n");
	for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
		netdev_dbg(adapter->netdev, "%016lx\n",
			   ((unsigned long int *)(adapter->login_buf))[i]);
			   ((unsigned long *)(adapter->login_buf))[i]);
	}

	memset(&crq, 0, sizeof(crq));
@@ -4252,7 +4247,7 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
	netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
	for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
		netdev_dbg(adapter->netdev, "%016lx\n",
			   ((unsigned long int *)(buf))[i]);
			   ((unsigned long *)(buf))[i]);

	netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
	netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
@@ -4411,8 +4406,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
	case PARTIALSUCCESS:
		dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
			 *req_value,
			 (long int)be64_to_cpu(crq->request_capability_rsp.
					       number), name);
			 (long)be64_to_cpu(crq->request_capability_rsp.number),
			 name);

		if (be16_to_cpu(crq->request_capability_rsp.capability) ==
		    REQ_MTU) {
@@ -4482,7 +4477,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
	netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
	for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
		netdev_dbg(adapter->netdev, "%016lx\n",
			   ((unsigned long int *)(adapter->login_rsp_buf))[i]);
			   ((unsigned long *)(adapter->login_rsp_buf))[i]);
	}

	/* Sanity checks */
@@ -4825,8 +4820,8 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
	long rc;

	netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
		   (unsigned long int)cpu_to_be64(u64_crq[0]),
		   (unsigned long int)cpu_to_be64(u64_crq[1]));
		   (unsigned long)cpu_to_be64(u64_crq[0]),
		   (unsigned long)cpu_to_be64(u64_crq[1]));
	switch (gen_crq->first) {
	case IBMVNIC_CRQ_INIT_RSP:
		switch (gen_crq->cmd) {
@@ -5300,8 +5295,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
	netdev->ethtool_ops = &ibmvnic_ethtool_ops;
	SET_NETDEV_DEV(netdev, &dev->dev);

	spin_lock_init(&adapter->stats_lock);

	INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
	INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
			  __ibmvnic_delayed_reset);
+6 −5
Original line number Diff line number Diff line
@@ -845,6 +845,7 @@ struct ibmvnic_crq_queue {
	union ibmvnic_crq *msgs;
	int size, cur;
	dma_addr_t msg_token;
	/* Used for serialization of msgs, cur */
	spinlock_t lock;
	bool active;
	char name[32];
@@ -876,6 +877,7 @@ struct ibmvnic_sub_crq_queue {
	unsigned int irq;
	unsigned int pool_index;
	int scrq_num;
	/* Used for serialization of msgs, cur */
	spinlock_t lock;
	struct sk_buff *rx_skb_top;
	struct ibmvnic_adapter *adapter;
@@ -985,7 +987,6 @@ struct ibmvnic_adapter {
	struct ibmvnic_statistics stats;
	dma_addr_t stats_token;
	struct completion stats_done;
	spinlock_t stats_lock;
	int replenish_no_mem;
	int replenish_add_buff_success;
	int replenish_add_buff_failure;
@@ -1080,9 +1081,12 @@ struct ibmvnic_adapter {

	struct tasklet_struct tasklet;
	enum vnic_state state;
	/* Used for serializatin of state field */
	spinlock_t state_lock;
	enum ibmvnic_reset_reason reset_reason;
	spinlock_t rwi_lock;
	struct list_head rwi_list;
	/* Used for serialization of rwi_list */
	spinlock_t rwi_lock;
	struct work_struct ibmvnic_reset;
	struct delayed_work ibmvnic_delayed_reset;
	unsigned long resetting;
@@ -1096,7 +1100,4 @@ struct ibmvnic_adapter {

	struct ibmvnic_tunables desired;
	struct ibmvnic_tunables fallback;

	/* Used for serializatin of state field */
	spinlock_t state_lock;
};