]> git.itanic.dy.fi Git - linux-stable/commitdiff
RDMA/mlx4: Avoid flush_scheduled_work() usage
authorTetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Fri, 6 May 2022 13:50:13 +0000 (22:50 +0900)
committerJason Gunthorpe <jgg@nvidia.com>
Fri, 20 May 2022 14:21:00 +0000 (11:21 -0300)
Flushing system-wide workqueues is dangerous and will be forbidden.
Replace system_wq with local cm_wq.

Link: https://lore.kernel.org/r/22f7183b-cc16-5a34-e879-7605f5efc6e6@I-love.SAKURA.ne.jp
Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/hw/mlx4/cm.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mlx4_ib.h

index 4aff1c8298b1e84a143982b266c0ed11c64583f3..12b481d138cf453dc9e50259e437e918368d9f2c 100644 (file)
@@ -80,6 +80,7 @@ struct cm_req_msg {
        union ib_gid primary_path_sgid;
 };
 
+static struct workqueue_struct *cm_wq;
 
 static void set_local_comm_id(struct ib_mad *mad, u32 cm_id)
 {
@@ -288,10 +289,10 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
        /*make sure that there is no schedule inside the scheduled work.*/
        if (!sriov->is_going_down && !id->scheduled_delete) {
                id->scheduled_delete = 1;
-               schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
+               queue_delayed_work(cm_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
        } else if (id->scheduled_delete) {
                /* Adjust timeout if already scheduled */
-               mod_delayed_work(system_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
+               mod_delayed_work(cm_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
        }
        spin_unlock_irqrestore(&sriov->going_down_lock, flags);
        spin_unlock(&sriov->id_map_lock);
@@ -370,7 +371,7 @@ static int alloc_rej_tmout(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id, int sl
                        ret =  xa_err(item);
                else
                        /* If a retry, adjust delayed work */
-                       mod_delayed_work(system_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
+                       mod_delayed_work(cm_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
                goto err_or_exists;
        }
        xa_unlock(&sriov->xa_rej_tmout);
@@ -393,7 +394,7 @@ static int alloc_rej_tmout(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id, int sl
                return xa_err(old);
        }
 
-       schedule_delayed_work(&item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
+       queue_delayed_work(cm_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
 
        return 0;
 
@@ -500,7 +501,7 @@ static void rej_tmout_xa_cleanup(struct mlx4_ib_sriov *sriov, int slave)
        xa_lock(&sriov->xa_rej_tmout);
        xa_for_each(&sriov->xa_rej_tmout, id, item) {
                if (slave < 0 || slave == item->slave) {
-                       mod_delayed_work(system_wq, &item->timeout, 0);
+                       mod_delayed_work(cm_wq, &item->timeout, 0);
                        flush_needed = true;
                        ++cnt;
                }
@@ -508,7 +509,7 @@ static void rej_tmout_xa_cleanup(struct mlx4_ib_sriov *sriov, int slave)
        xa_unlock(&sriov->xa_rej_tmout);
 
        if (flush_needed) {
-               flush_scheduled_work();
+               flush_workqueue(cm_wq);
                pr_debug("Deleted %d entries in xarray for slave %d during cleanup\n",
                         cnt, slave);
        }
@@ -540,7 +541,7 @@ void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
        spin_unlock(&sriov->id_map_lock);
 
        if (need_flush)
-               flush_scheduled_work(); /* make sure all timers were flushed */
+               flush_workqueue(cm_wq); /* make sure all timers were flushed */
 
        /* now, remove all leftover entries from databases*/
        spin_lock(&sriov->id_map_lock);
@@ -587,3 +588,17 @@ void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
 
        rej_tmout_xa_cleanup(sriov, slave);
 }
+
+int mlx4_ib_cm_init(void)
+{
+       cm_wq = alloc_workqueue("mlx4_ib_cm", 0, 0);
+       if (!cm_wq)
+               return -ENOMEM;
+
+       return 0;
+}
+
+void mlx4_ib_cm_destroy(void)
+{
+       destroy_workqueue(cm_wq);
+}
index c448168375db12dafc3da498b1c85879707407d8..ba47874f90d381695f06d732bc1e520400c7c647 100644 (file)
@@ -3307,10 +3307,14 @@ static int __init mlx4_ib_init(void)
        if (!wq)
                return -ENOMEM;
 
-       err = mlx4_ib_mcg_init();
+       err = mlx4_ib_cm_init();
        if (err)
                goto clean_wq;
 
+       err = mlx4_ib_mcg_init();
+       if (err)
+               goto clean_cm;
+
        err = mlx4_register_interface(&mlx4_ib_interface);
        if (err)
                goto clean_mcg;
@@ -3320,6 +3324,9 @@ static int __init mlx4_ib_init(void)
 clean_mcg:
        mlx4_ib_mcg_destroy();
 
+clean_cm:
+       mlx4_ib_cm_destroy();
+
 clean_wq:
        destroy_workqueue(wq);
        return err;
@@ -3329,6 +3336,7 @@ static void __exit mlx4_ib_cleanup(void)
 {
        mlx4_unregister_interface(&mlx4_ib_interface);
        mlx4_ib_mcg_destroy();
+       mlx4_ib_cm_destroy();
        destroy_workqueue(wq);
 }
 
index d84023b4b1b8f26dc7820a637130e35a8d777776..6a3b0f121045e18948d1e14abecd469b8061f8b3 100644 (file)
@@ -937,4 +937,7 @@ mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table)
 int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
                                       int *num_of_mtts);
 
+int mlx4_ib_cm_init(void);
+void mlx4_ib_cm_destroy(void);
+
 #endif /* MLX4_IB_H */