Unverified Commit 7e7169f7 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!7903 virtio_ring : keep used_wrap_counter in

Merge Pull Request from: @ci-robot 
 
PR sync from: Zheng Zengkai <zhengzengkai@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/YTXOSFNULZ3BUKQL37FIUYQUTONZHB7V/ 
Backport 5.19-rc5 patch to fix kernel panic caused by netperf/iperf3 test.

huangjie.albert (1):
  virtio_ring : keep used_wrap_counter in vq->last_used_idx


-- 
2.20.1
 
https://gitee.com/openeuler/kernel/issues/I9RQAS 
 
Link:https://gitee.com/openeuler/kernel/pulls/7903

 

Reviewed-by: default avatarKevin Zhu <zhukeqian1@huawei.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
parents 9c27a05e f0d6dbed
Loading
Loading
Loading
Loading
+48 −28
Original line number Diff line number Diff line
@@ -110,7 +110,12 @@ struct vring_virtqueue {
	/* Number we've added since last sync. */
	unsigned int num_added;

	/* Last used index we've seen. */
	/* Last used index  we've seen.
	 * for split ring, it just contains last used index
	 * for packed ring:
	 * bits up to VRING_PACKED_EVENT_F_WRAP_CTR include the last used index.
	 * bits from VRING_PACKED_EVENT_F_WRAP_CTR include the used wrap counter.
	 */
	u16 last_used_idx;

	union {
@@ -149,9 +154,6 @@ struct vring_virtqueue {
			/* Driver ring wrap counter. */
			bool avail_wrap_counter;

			/* Device ring wrap counter. */
			bool used_wrap_counter;

			/* Avail used flags. */
			u16 avail_used_flags;

@@ -911,6 +913,16 @@ static struct virtqueue *vring_create_virtqueue_split(
 * Packed ring specific functions - *_packed().
 */

static inline bool packed_used_wrap_counter(u16 last_used_idx)
{
	return !!(last_used_idx & (1 << VRING_PACKED_EVENT_F_WRAP_CTR));
}

static inline u16 packed_last_used(u16 last_used_idx)
{
	return last_used_idx & ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR));
}
 
static void vring_unmap_state_packed(const struct vring_virtqueue *vq,
				     struct vring_desc_extra_packed *state)
{
@@ -1349,8 +1361,14 @@ static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,

static inline bool more_used_packed(const struct vring_virtqueue *vq)
{
	return is_used_desc_packed(vq, vq->last_used_idx,
			vq->packed.used_wrap_counter);
	u16 last_used;
	u16 last_used_idx;
	bool used_wrap_counter;

	last_used_idx = READ_ONCE(vq->last_used_idx);
	last_used = packed_last_used(last_used_idx);
	used_wrap_counter = packed_used_wrap_counter(last_used_idx);
	return is_used_desc_packed(vq, last_used, used_wrap_counter);
}

static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
@@ -1358,7 +1376,8 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
					  void **ctx)
{
	struct vring_virtqueue *vq = to_vvq(_vq);
	u16 last_used, id;
	u16 last_used, id, last_used_idx;
	bool used_wrap_counter;
	void *ret;

	START_USE(vq);
@@ -1377,7 +1396,9 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
	/* Only get used elements after they have been exposed by host. */
	virtio_rmb(vq->weak_barriers);

	last_used = vq->last_used_idx;
	last_used_idx = READ_ONCE(vq->last_used_idx);
	used_wrap_counter = packed_used_wrap_counter(last_used_idx);
	last_used = packed_last_used(last_used_idx);
	id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
	*len = le32_to_cpu(vq->packed.vring.desc[last_used].len);

@@ -1394,12 +1415,15 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
	ret = vq->packed.desc_state[id].data;
	detach_buf_packed(vq, id, ctx);

	vq->last_used_idx += vq->packed.desc_state[id].num;
	if (unlikely(vq->last_used_idx >= vq->packed.vring.num)) {
		vq->last_used_idx -= vq->packed.vring.num;
		vq->packed.used_wrap_counter ^= 1;
	last_used += vq->packed.desc_state[id].num;
	if (unlikely(last_used >= vq->packed.vring.num)) {
		last_used -= vq->packed.vring.num;
		used_wrap_counter ^= 1;
	}

	last_used = (last_used | (used_wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
	WRITE_ONCE(vq->last_used_idx, last_used);

	/*
	 * If we expect an interrupt for the next entry, tell host
	 * by writing event index and flush out the write before
@@ -1408,9 +1432,7 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
	if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
		virtio_store_mb(vq->weak_barriers,
				&vq->packed.vring.driver->off_wrap,
				cpu_to_le16(vq->last_used_idx |
					(vq->packed.used_wrap_counter <<
					 VRING_PACKED_EVENT_F_WRAP_CTR)));
				cpu_to_le16(vq->last_used_idx));

	LAST_ADD_TIME_INVALID(vq);

@@ -1442,9 +1464,7 @@ static unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)

	if (vq->event) {
		vq->packed.vring.driver->off_wrap =
			cpu_to_le16(vq->last_used_idx |
				(vq->packed.used_wrap_counter <<
				 VRING_PACKED_EVENT_F_WRAP_CTR));
			cpu_to_le16(vq->last_used_idx);
		/*
		 * We need to update event offset and event wrap
		 * counter first before updating event flags.
@@ -1461,8 +1481,7 @@ static unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
	}

	END_USE(vq);
	return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter <<
			VRING_PACKED_EVENT_F_WRAP_CTR);
	return vq->last_used_idx;
}

static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
@@ -1480,7 +1499,7 @@ static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
{
	struct vring_virtqueue *vq = to_vvq(_vq);
	u16 used_idx, wrap_counter;
	u16 used_idx, wrap_counter, last_used_idx;
	u16 bufs;

	START_USE(vq);
@@ -1493,9 +1512,10 @@ static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
	if (vq->event) {
		/* TODO: tune this threshold */
		bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
		wrap_counter = vq->packed.used_wrap_counter;
		last_used_idx = READ_ONCE(vq->last_used_idx);
		wrap_counter = packed_used_wrap_counter(last_used_idx);

		used_idx = vq->last_used_idx + bufs;
		used_idx = packed_last_used(last_used_idx) + bufs;
		if (used_idx >= vq->packed.vring.num) {
			used_idx -= vq->packed.vring.num;
			wrap_counter ^= 1;
@@ -1525,9 +1545,10 @@ static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
	 */
	virtio_mb(vq->weak_barriers);

	if (is_used_desc_packed(vq,
				vq->last_used_idx,
				vq->packed.used_wrap_counter)) {
	last_used_idx = READ_ONCE(vq->last_used_idx);
	wrap_counter = packed_used_wrap_counter(last_used_idx);
	used_idx = packed_last_used(last_used_idx);
	if (is_used_desc_packed(vq, used_idx, wrap_counter)) {
		END_USE(vq);
		return false;
	}
@@ -1614,7 +1635,7 @@ static struct virtqueue *vring_create_virtqueue_packed(
	vq->notify = notify;
	vq->weak_barriers = weak_barriers;
	vq->broken = false;
	vq->last_used_idx = 0;
	vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR);
	vq->num_added = 0;
	vq->packed_ring = true;
	vq->use_dma_api = vring_use_dma_api(vdev);
@@ -1644,7 +1665,6 @@ static struct virtqueue *vring_create_virtqueue_packed(

	vq->packed.next_avail_idx = 0;
	vq->packed.avail_wrap_counter = 1;
	vq->packed.used_wrap_counter = 1;
	vq->packed.event_flags_shadow = 0;
	vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;