static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *skb,
struct net_device *dev)
{
- if (likely(bond_get_slave_by_dev(bond, tls_get_ctx(skb->sk)->netdev)))
- return bond_dev_queue_xmit(bond, skb, tls_get_ctx(skb->sk)->netdev);
+ struct net_device *tls_netdev = rcu_dereference(tls_get_ctx(skb->sk)->netdev);
+
+ /* tls_netdev might become NULL, even if tls_is_sk_tx_device_offloaded
+ * was true, if tls_device_down is running in parallel, but it's OK,
+ * because bond_get_slave_by_dev has a NULL check.
+ */
+ if (likely(bond_get_slave_by_dev(bond, tls_netdev)))
+ return bond_dev_queue_xmit(bond, skb, tls_netdev);
return bond_tx_drop(dev, skb);
}
#endif
int data_len, qidx, ret = 0, mss;
struct tls_record_info *record;
struct chcr_ktls_info *tx_info;
+ struct net_device *tls_netdev;
struct tls_context *tls_ctx;
struct sge_eth_txq *q;
struct adapter *adap;
mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : data_len;
tls_ctx = tls_get_ctx(skb->sk);
- if (unlikely(tls_ctx->netdev != dev))
+ tls_netdev = rcu_dereference_bh(tls_ctx->netdev);
+ /* Don't quit on NULL: if tls_device_down is running in parallel,
+ * netdev might become NULL, even if tls_is_sk_tx_device_offloaded was
+ * true. Rather continue processing this packet.
+ */
+ if (unlikely(tls_netdev && tls_netdev != dev))
goto out;
tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
struct tls_offload_context_tx *offload_ctx =
container_of(work, struct tls_offload_context_tx, destruct_work);
struct tls_context *ctx = offload_ctx->ctx;
- struct net_device *netdev = ctx->netdev;
+ struct net_device *netdev;
+
+ /* Safe, because this is the destroy flow, refcount is 0, so
+ * tls_device_down can't store this field in parallel.
+ */
+ netdev = rcu_dereference_protected(ctx->netdev,
+ !refcount_read(&ctx->refcount));
netdev->tlsdev_ops->tls_dev_del(netdev, ctx, TLS_OFFLOAD_CTX_DIR_TX);
dev_put(netdev);
static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
{
+ struct net_device *netdev;
unsigned long flags;
bool async_cleanup;
}
list_del(&ctx->list); /* Remove from tls_device_list / tls_device_down_list */
- async_cleanup = ctx->netdev && ctx->tx_conf == TLS_HW;
+
+ /* Safe, because this is the destroy flow, refcount is 0, so
+ * tls_device_down can't store this field in parallel.
+ */
+ netdev = rcu_dereference_protected(ctx->netdev,
+ !refcount_read(&ctx->refcount));
+
+ async_cleanup = netdev && ctx->tx_conf == TLS_HW;
if (async_cleanup) {
struct tls_offload_context_tx *offload_ctx = tls_offload_ctx_tx(ctx);
trace_tls_device_tx_resync_send(sk, seq, rcd_sn);
down_read(&device_offload_lock);
- netdev = tls_ctx->netdev;
+ netdev = rcu_dereference_protected(tls_ctx->netdev,
+ lockdep_is_held(&device_offload_lock));
if (netdev)
err = netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq,
rcd_sn,
trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type);
rcu_read_lock();
- netdev = READ_ONCE(tls_ctx->netdev);
+ netdev = rcu_dereference(tls_ctx->netdev);
if (netdev)
netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
TLS_OFFLOAD_CTX_DIR_RX);
if (sk->sk_destruct != tls_device_sk_destruct) {
refcount_set(&ctx->refcount, 1);
dev_hold(netdev);
- ctx->netdev = netdev;
+ RCU_INIT_POINTER(ctx->netdev, netdev);
spin_lock_irq(&tls_device_lock);
list_add_tail(&ctx->list, &tls_device_list);
spin_unlock_irq(&tls_device_lock);
struct net_device *netdev;
down_read(&device_offload_lock);
- netdev = tls_ctx->netdev;
+ netdev = rcu_dereference_protected(tls_ctx->netdev,
+ lockdep_is_held(&device_offload_lock));
if (!netdev)
goto out;
if (tls_ctx->tx_conf != TLS_HW) {
dev_put(netdev);
- tls_ctx->netdev = NULL;
+ rcu_assign_pointer(tls_ctx->netdev, NULL);
} else {
set_bit(TLS_RX_DEV_CLOSED, &tls_ctx->flags);
}
spin_lock_irqsave(&tls_device_lock, flags);
list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) {
- if (ctx->netdev != netdev ||
+ struct net_device *ctx_netdev =
+ rcu_dereference_protected(ctx->netdev,
+ lockdep_is_held(&device_offload_lock));
+
+ if (ctx_netdev != netdev ||
!refcount_inc_not_zero(&ctx->refcount))
continue;
/* Stop the RX and TX resync.
* tls_dev_resync must not be called after tls_dev_del.
*/
- WRITE_ONCE(ctx->netdev, NULL);
+ rcu_assign_pointer(ctx->netdev, NULL);
/* Start skipping the RX resync logic completely. */
set_bit(TLS_RX_DEV_DEGRADED, &ctx->flags);