Commit faa91b83 authored by Tirthendu Sarkar's avatar Tirthendu Sarkar Committed by Alexei Starovoitov
Browse files

xsk: move xdp_buff's data length check to xsk_rcv_check



If the data in xdp_buff exceeds the xsk frame length, the packet needs
to be dropped. This check is currently being done in __xsk_rcv(). Move
the described logic to xsk_rcv_check() so that such a xdp_buff will
only be dropped if the application does not support multi-buffer
(absence of XDP_USE_SG bind flag). This is applicable for all cases:
copy mode, zero copy mode as well as skb mode.

Signed-off-by: default avatarTirthendu Sarkar <tirthendu.sarkar@intel.com>
Link: https://lore.kernel.org/r/20230719132421.584801-5-maciej.fijalkowski@intel.com


Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 556444c4
Loading
Loading
Loading
Loading
+13 −14
Original line number Diff line number Diff line
@@ -177,18 +177,11 @@ static void xsk_copy_xdp(struct xdp_buff *to, struct xdp_buff *from, u32 len)
	memcpy(to_buf, from_buf, len + metalen);
}

static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
{
	struct xdp_buff_xsk *xskb;
	struct xdp_buff *xsk_xdp;
	int err;
	u32 len;

	len = xdp->data_end - xdp->data;
	if (len > xsk_pool_get_rx_frame_size(xs->pool)) {
		xs->rx_dropped++;
		return -ENOSPC;
	}

	xsk_xdp = xsk_buff_alloc(xs->pool);
	if (!xsk_xdp) {
@@ -224,7 +217,7 @@ static bool xsk_is_bound(struct xdp_sock *xs)
	return false;
}

static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp)
static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
{
	if (!xsk_is_bound(xs))
		return -ENXIO;
@@ -232,6 +225,11 @@ static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp)
	if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
		return -EINVAL;

	if (len > xsk_pool_get_rx_frame_size(xs->pool)) {
		xs->rx_dropped++;
		return -ENOSPC;
	}

	sk_mark_napi_id_once_xdp(&xs->sk, xdp);
	return 0;
}
@@ -245,12 +243,13 @@ static void xsk_flush(struct xdp_sock *xs)

int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
{
	u32 len = xdp_get_buff_len(xdp);
	int err;

	spin_lock_bh(&xs->rx_lock);
	err = xsk_rcv_check(xs, xdp);
	err = xsk_rcv_check(xs, xdp, len);
	if (!err) {
		err = __xsk_rcv(xs, xdp);
		err = __xsk_rcv(xs, xdp, len);
		xsk_flush(xs);
	}
	spin_unlock_bh(&xs->rx_lock);
@@ -259,10 +258,10 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)

static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
{
	u32 len = xdp_get_buff_len(xdp);
	int err;
	u32 len;

	err = xsk_rcv_check(xs, xdp);
	err = xsk_rcv_check(xs, xdp, len);
	if (err)
		return err;

@@ -271,7 +270,7 @@ static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
		return xsk_rcv_zc(xs, xdp, len);
	}

	err = __xsk_rcv(xs, xdp);
	err = __xsk_rcv(xs, xdp, len);
	if (!err)
		xdp_return_buff(xdp);
	return err;