]> git.baikalelectronics.ru Git - kernel.git/commitdiff
net/mlx5: DR, Add a spinlock to protect the send ring
authorAlex Vesker <valex@mellanox.com>
Wed, 20 May 2020 15:09:14 +0000 (18:09 +0300)
committerSaeed Mahameed <saeedm@mellanox.com>
Thu, 28 May 2020 01:13:51 +0000 (18:13 -0700)
Adding this lock will allow writing steering entries without
locking the dr_domain and allow parallel insertion.

Signed-off-by: Alex Vesker <valex@mellanox.com>
Reviewed-by: Mark Bloch <markb@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h

index b8d97d44be7b3f7c0f9b2ac5a9dff35bc6beb5d1..f421013b0b542b1c0895f4b0b30c363df36641c4 100644 (file)
@@ -357,9 +357,11 @@ static int dr_postsend_icm_data(struct mlx5dr_domain *dmn,
        u32 buff_offset;
        int ret;
 
+       spin_lock(&send_ring->lock);
+
        ret = dr_handle_pending_wc(dmn, send_ring);
        if (ret)
-               return ret;
+               goto out_unlock;
 
        if (send_info->write.length > dmn->info.max_inline_size) {
                buff_offset = (send_ring->tx_head &
@@ -377,7 +379,9 @@ static int dr_postsend_icm_data(struct mlx5dr_domain *dmn,
        dr_fill_data_segs(send_ring, send_info);
        dr_post_send(send_ring->qp, send_info);
 
-       return 0;
+out_unlock:
+       spin_unlock(&send_ring->lock);
+       return ret;
 }
 
 static int dr_get_tbl_copy_details(struct mlx5dr_domain *dmn,
@@ -563,9 +567,7 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
        send_info.remote_addr = action->rewrite.chunk->mr_addr;
        send_info.rkey = action->rewrite.chunk->rkey;
 
-       mutex_lock(&dmn->mutex);
        ret = dr_postsend_icm_data(dmn, &send_info);
-       mutex_unlock(&dmn->mutex);
 
        return ret;
 }
@@ -886,6 +888,7 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
        init_attr.pdn = dmn->pdn;
        init_attr.uar = dmn->uar;
        init_attr.max_send_wr = QUEUE_SIZE;
+       spin_lock_init(&dmn->send_ring->lock);
 
        dmn->send_ring->qp = dr_create_rc_qp(dmn->mdev, &init_attr);
        if (!dmn->send_ring->qp)  {
@@ -990,7 +993,9 @@ int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn)
                        return ret;
        }
 
+       spin_lock(&send_ring->lock);
        ret = dr_handle_pending_wc(dmn, send_ring);
+       spin_unlock(&send_ring->lock);
 
        return ret;
 }
index 984783238baa6126f9e6b191943cf1cea6fec37c..b6061c639cb13af74e777142037a58c7651374e0 100644 (file)
@@ -1043,6 +1043,7 @@ struct mlx5dr_send_ring {
        struct ib_wc wc[MAX_SEND_CQE];
        u8 sync_buff[MIN_READ_SYNC];
        struct mlx5dr_mr *sync_mr;
+       spinlock_t lock; /* Protect the data path of the send ring */
 };
 
 int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn);