Commit ed3c9a2f authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller
Browse files

net: tls: make the offload check helper take skb not socket



All callers of tls_is_sk_tx_device_offloaded() currently do
an equivalent of:

 if (skb->sk && tls_is_skb_tx_device_offloaded(skb->sk))

Have the helper accept skb and do the skb->sk check locally.
Two drivers have local static inlines with similar wrappers
already.

While at it change the ifdef condition to TLS_DEVICE.
Only TLS_DEVICE selects SOCK_VALIDATE_XMIT, so the two are
equivalent. This makes removing the duplicated IS_ENABLED()
check in funeth more obviously correct.

Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
Acked-by: default avatarMaxim Mikityanskiy <maxtram95@gmail.com>
Reviewed-by: default avatarSimon Horman <simon.horman@corigine.com>
Acked-by: default avatarTariq Toukan <tariqt@nvidia.com>
Acked-by: default avatarDimitris Michailidis <dmichail@fungible.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 580b7fe5
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -5442,7 +5442,7 @@ static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *sk
{
	struct net_device *tls_netdev = rcu_dereference(tls_get_ctx(skb->sk)->netdev);

	/* tls_netdev might become NULL, even if tls_is_sk_tx_device_offloaded
	/* tls_netdev might become NULL, even if tls_is_skb_tx_device_offloaded
	 * was true, if tls_device_down is running in parallel, but it's OK,
	 * because bond_get_slave_by_dev has a NULL check.
	 */
@@ -5461,7 +5461,7 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
		return NETDEV_TX_OK;

#if IS_ENABLED(CONFIG_TLS_DEVICE)
	if (skb->sk && tls_is_sk_tx_device_offloaded(skb->sk))
	if (tls_is_skb_tx_device_offloaded(skb))
		return bond_tls_device_xmit(bond, skb, dev);
#endif

+1 −1
Original line number Diff line number Diff line
@@ -1175,7 +1175,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
		txq = netdev_pick_tx(dev, skb, sb_dev);
		if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) ||
		    skb->encapsulation ||
		    cxgb4_is_ktls_skb(skb) ||
		    tls_is_skb_tx_device_offloaded(skb) ||
		    (proto != IPPROTO_TCP && proto != IPPROTO_UDP))
			txq = txq % pi->nqsets;

+0 −5
Original line number Diff line number Diff line
@@ -497,11 +497,6 @@ struct cxgb4_uld_info {
#endif
};

static inline bool cxgb4_is_ktls_skb(struct sk_buff *skb)
{
	return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk);
}

void cxgb4_uld_enable(struct adapter *adap);
void cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
int cxgb4_unregister_uld(enum cxgb4_uld type);
+1 −1
Original line number Diff line number Diff line
@@ -1530,7 +1530,7 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
#endif /* CHELSIO_IPSEC_INLINE */

#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
	if (cxgb4_is_ktls_skb(skb) &&
	if (tls_is_skb_tx_device_offloaded(skb) &&
	    (skb->len - skb_tcp_all_headers(skb)))
		return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev);
#endif /* CHELSIO_TLS_DEVICE */
+1 −1
Original line number Diff line number Diff line
@@ -1946,7 +1946,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
	tls_ctx = tls_get_ctx(skb->sk);
	tls_netdev = rcu_dereference_bh(tls_ctx->netdev);
	/* Don't quit on NULL: if tls_device_down is running in parallel,
	 * netdev might become NULL, even if tls_is_sk_tx_device_offloaded was
	 * netdev might become NULL, even if tls_is_skb_tx_device_offloaded was
	 * true. Rather continue processing this packet.
	 */
	if (unlikely(tls_netdev && tls_netdev != dev))
Loading