Commit 8973854b authored by Joshua Washington's avatar Joshua Washington Committed by Wang Liang
Browse files

gve: guard XSK operations on the existence of queues

stable inclusion
from stable-v6.6.70
commit 771d66f2bd8c4dba1286a9163ab982cecd825718
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/IBJ6P5
CVE: CVE-2024-57933

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=771d66f2bd8c4dba1286a9163ab982cecd825718



--------------------------------

commit 40338d7987d810fcaa95c500b1068a52b08eec9b upstream.

This patch predicates the enabling and disabling of XSK pools on the
existence of queues. As it stands, if the interface is down, disabling
or enabling XSK pools would result in a crash, as the RX queue pointer
would be NULL. XSK pool registration will occur as part of the next
interface up.

Similarly, xsk_wakeup needs be guarded against queues disappearing
while the function is executing, so a check against the
GVE_PRIV_FLAGS_NAPI_ENABLED flag is added to synchronize with the
disabling of the bit and the synchronize_net() in gve_turndown.

Fixes: fd8e4032 ("gve: Add AF_XDP zero-copy support for GQI-QPL format")
Cc: stable@vger.kernel.org
Signed-off-by: default avatarJoshua Washington <joshwash@google.com>
Signed-off-by: default avatarPraveen Kaligineedi <pkaligineedi@google.com>
Reviewed-by: default avatarPraveen Kaligineedi <pkaligineedi@google.com>
Reviewed-by: default avatarShailend Chand <shailend@google.com>
Reviewed-by: default avatarWillem de Bruijn <willemb@google.com>
Reviewed-by: default avatarLarysa Zaremba <larysa.zaremba@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: default avatarWang Liang <wangliang74@huawei.com>
parent f8c09dd6
Loading
Loading
Loading
Loading
+10 −12
Original line number Diff line number Diff line
@@ -1528,8 +1528,8 @@ static int gve_xsk_pool_enable(struct net_device *dev,
	if (err)
		return err;

	/* If XDP prog is not installed, return */
	if (!priv->xdp_prog)
	/* If XDP prog is not installed or interface is down, return. */
	if (!priv->xdp_prog || !netif_running(dev))
		return 0;

	rx = &priv->rx[qid];
@@ -1574,21 +1574,16 @@ static int gve_xsk_pool_disable(struct net_device *dev,
	if (qid >= priv->rx_cfg.num_queues)
		return -EINVAL;

	/* If XDP prog is not installed, unmap DMA and return */
	if (!priv->xdp_prog)
		goto done;

	tx_qid = gve_xdp_tx_queue_id(priv, qid);
	if (!netif_running(dev)) {
		priv->rx[qid].xsk_pool = NULL;
		xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq);
		priv->tx[tx_qid].xsk_pool = NULL;
	/* If XDP prog is not installed or interface is down, unmap DMA and
	 * return.
	 */
	if (!priv->xdp_prog || !netif_running(dev))
		goto done;
	}

	napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi;
	napi_disable(napi_rx); /* make sure current rx poll is done */

	tx_qid = gve_xdp_tx_queue_id(priv, qid);
	napi_tx = &priv->ntfy_blocks[priv->tx[tx_qid].ntfy_id].napi;
	napi_disable(napi_tx); /* make sure current tx poll is done */

@@ -1616,6 +1611,9 @@ static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
	struct gve_priv *priv = netdev_priv(dev);
	int tx_queue_id = gve_xdp_tx_queue_id(priv, queue_id);

	if (!gve_get_napi_enabled(priv))
		return -ENETDOWN;

	if (queue_id >= priv->rx_cfg.num_queues || !priv->xdp_prog)
		return -EINVAL;