]> git.itanic.dy.fi Git - linux-stable/commitdiff
net/mlx5: DR, Read ICM memory into dedicated buffer
authorYevgeny Kliteynik <kliteyn@nvidia.com>
Mon, 14 Nov 2022 22:33:25 +0000 (00:33 +0200)
committerSaeed Mahameed <saeedm@nvidia.com>
Fri, 14 Apr 2023 22:06:21 +0000 (15:06 -0700)
Instead of using the write buffer for reading we will use a dedicated
buffer only for reading ICM memory.
Due to the new support for args, we can have a case with pending_wc
being odd number, and with reading into the same write buffer, it is
possible to overwrite next write on the same slot.
For example:
pending_wc is 17 so the buffer for write is:
   | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
and we have requests as follows:
   r wr wr wr wr wr wr wr wr
Now, the first read will be written into the last write because we use
the same buffer for read and write, before it was written to the HW and
we will have a wrong data in the ICM area.

Signed-off-by: Erez Shitrit <erezsh@nvidia.com>
Signed-off-by: Yevgeny Kliteynik <kliteyn@nvidia.com>
Reviewed-by: Alex Vesker <valex@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h

index d7c7363f9096be63fb13bd68dff009f7ba5cf0cb..d052d469d4dfea5570ff2f1b05dc4f9dc3ff1b00 100644 (file)
@@ -602,9 +602,10 @@ static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
 
        send_ring->pending_wqe++;
        send_info->read.length = send_info->write.length;
-       /* Read into the same write area */
-       send_info->read.addr = (uintptr_t)send_info->write.addr;
-       send_info->read.lkey = send_ring->mr->mkey;
+
+       /* Read into dedicated sync buffer */
+       send_info->read.addr = (uintptr_t)send_ring->sync_mr->dma_addr;
+       send_info->read.lkey = send_ring->sync_mr->mkey;
 
        if (send_ring->pending_wqe % send_ring->signal_th == 0)
                send_info->read.send_flags = IB_SEND_SIGNALED;
@@ -1288,16 +1289,25 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
                goto free_mem;
        }
 
+       dmn->send_ring->sync_buff = kzalloc(dmn->send_ring->max_post_send_size,
+                                           GFP_KERNEL);
+       if (!dmn->send_ring->sync_buff) {
+               ret = -ENOMEM;
+               goto clean_mr;
+       }
+
        dmn->send_ring->sync_mr = dr_reg_mr(dmn->mdev,
                                            dmn->pdn, dmn->send_ring->sync_buff,
-                                           MIN_READ_SYNC);
+                                           dmn->send_ring->max_post_send_size);
        if (!dmn->send_ring->sync_mr) {
                ret = -ENOMEM;
-               goto clean_mr;
+               goto free_sync_mem;
        }
 
        return 0;
 
+free_sync_mem:
+       kfree(dmn->send_ring->sync_buff);
 clean_mr:
        dr_dereg_mr(dmn->mdev, dmn->send_ring->mr);
 free_mem:
@@ -1320,6 +1330,7 @@ void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn,
        dr_dereg_mr(dmn->mdev, send_ring->sync_mr);
        dr_dereg_mr(dmn->mdev, send_ring->mr);
        kfree(send_ring->buf);
+       kfree(send_ring->sync_buff);
        kfree(send_ring);
 }
 
index 7b35f78a84a2ec058500cc3328dc366e6fd70925..81d7ac6d6258b5605ac18c6b9aea58d1291a5559 100644 (file)
@@ -1429,9 +1429,6 @@ struct mlx5dr_mr {
        size_t size;
 };
 
-#define MAX_SEND_CQE           64
-#define MIN_READ_SYNC          64
-
 struct mlx5dr_send_ring {
        struct mlx5dr_cq *cq;
        struct mlx5dr_qp *qp;
@@ -1446,7 +1443,7 @@ struct mlx5dr_send_ring {
        u32 tx_head;
        void *buf;
        u32 buf_size;
-       u8 sync_buff[MIN_READ_SYNC];
+       u8 *sync_buff;
        struct mlx5dr_mr *sync_mr;
        spinlock_t lock; /* Protect the data path of the send ring */
        bool err_state; /* send_ring is not usable in err state */