]> git.itanic.dy.fi Git - linux-stable/commitdiff
Revert "s390/ism: fix receive message buffer allocation"
authorGerd Bayer <gbayer@linux.ibm.com>
Tue, 9 Apr 2024 11:37:53 +0000 (13:37 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 17 Apr 2024 09:23:33 +0000 (11:23 +0200)
[ Upstream commit d51dc8dd6ab6f93a894ff8b38d3b8d02c98eb9fb ]

This reverts commit 58effa3476536215530c9ec4910ffc981613b413.
Review was not finished on this patch. So it's not ready for
upstreaming.

Signed-off-by: Gerd Bayer <gbayer@linux.ibm.com>
Link: https://lore.kernel.org/r/20240409113753.2181368-1-gbayer@linux.ibm.com
Fixes: 58effa347653 ("s390/ism: fix receive message buffer allocation")
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/s390/net/ism_drv.c

index affb05521e146f3ec6de7ffaf005517b6a0caba7..2c8e964425dc38ca80fa5009b17b4e9dc29bbf10 100644 (file)
@@ -14,8 +14,6 @@
 #include <linux/err.h>
 #include <linux/ctype.h>
 #include <linux/processor.h>
-#include <linux/dma-mapping.h>
-#include <linux/mm.h>
 
 #include "ism.h"
 
@@ -294,15 +292,13 @@ static int ism_read_local_gid(struct ism_dev *ism)
 static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
 {
        clear_bit(dmb->sba_idx, ism->sba_bitmap);
-       dma_unmap_page(&ism->pdev->dev, dmb->dma_addr, dmb->dmb_len,
-                      DMA_FROM_DEVICE);
-       folio_put(virt_to_folio(dmb->cpu_addr));
+       dma_free_coherent(&ism->pdev->dev, dmb->dmb_len,
+                         dmb->cpu_addr, dmb->dma_addr);
 }
 
 static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
 {
        unsigned long bit;
-       int rc;
 
        if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
                return -EINVAL;
@@ -319,30 +315,14 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
            test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
                return -EINVAL;
 
-       dmb->cpu_addr =
-               folio_address(folio_alloc(GFP_KERNEL | __GFP_NOWARN |
-                                         __GFP_NOMEMALLOC | __GFP_NORETRY,
-                                         get_order(dmb->dmb_len)));
+       dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len,
+                                          &dmb->dma_addr,
+                                          GFP_KERNEL | __GFP_NOWARN |
+                                          __GFP_NOMEMALLOC | __GFP_NORETRY);
+       if (!dmb->cpu_addr)
+               clear_bit(dmb->sba_idx, ism->sba_bitmap);
 
-       if (!dmb->cpu_addr) {
-               rc = -ENOMEM;
-               goto out_bit;
-       }
-       dmb->dma_addr = dma_map_page(&ism->pdev->dev,
-                                    virt_to_page(dmb->cpu_addr), 0,
-                                    dmb->dmb_len, DMA_FROM_DEVICE);
-       if (dma_mapping_error(&ism->pdev->dev, dmb->dma_addr)) {
-               rc = -ENOMEM;
-               goto out_free;
-       }
-
-       return 0;
-
-out_free:
-       kfree(dmb->cpu_addr);
-out_bit:
-       clear_bit(dmb->sba_idx, ism->sba_bitmap);
-       return rc;
+       return dmb->cpu_addr ? 0 : -ENOMEM;
 }
 
 int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,