]> git.itanic.dy.fi Git - linux-stable/commitdiff
Revert "i915: fix remap_io_sg to verify the pgprot"
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 19 May 2021 15:55:57 +0000 (05:55 -1000)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 19 May 2021 15:55:57 +0000 (05:55 -1000)
This reverts commit b12d691ea5e01db42ccf3b4207e57cb3ce7cfe91.

It turns out this is not ready for primetime yet.  The intentions are
good, but using remap_pfn_range() requires that there is nothing already
mapped in the area, and the i915 code seems to very much intentionally
remap the same area multiple times.

That will then just trigger the

                BUG_ON(!pte_none(*pte));

in mm/memory.c: remap_pte_range().

There are also reports of mapping type inconsistencies, resulting in
warnings and in screen corruption.

Link: https://lore.kernel.org/lkml/20210519024322.GA29704@xsang-OptiPlex-9020/
Link: https://lore.kernel.org/lkml/YKUjvoaKKggAmpIR@sf/
Link: https://lore.kernel.org/lkml/b6b61cf0-5874-f4c0-1fcc-4b3848451c31@redhat.com/
Reported-by: kernel test robot <oliver.sang@intel.com>
Reported-by: Kalle Valo <kvalo@codeaurora.org>
Reported-by: Hans de Goede <hdegoede@redhat.com>
Reported-by: Sergei Trofimovich <slyfox@gentoo.org>
Acked-by: Christoph Hellwig <hch@lst.de>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
drivers/gpu/drm/i915/i915_mm.c

index 4c8cd08c672d2dd899ac87b78ea6c0f0ed93484b..9a777b0ff59b053604f8259bef2ffc13fb31c2ff 100644 (file)
 
 #include "i915_drv.h"
 
-#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
+struct remap_pfn {
+       struct mm_struct *mm;
+       unsigned long pfn;
+       pgprot_t prot;
+
+       struct sgt_iter sgt;
+       resource_size_t iobase;
+};
 
 #define use_dma(io) ((io) != -1)
 
+static inline unsigned long sgt_pfn(const struct remap_pfn *r)
+{
+       if (use_dma(r->iobase))
+               return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT;
+       else
+               return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT);
+}
+
+static int remap_sg(pte_t *pte, unsigned long addr, void *data)
+{
+       struct remap_pfn *r = data;
+
+       if (GEM_WARN_ON(!r->sgt.sgp))
+               return -EINVAL;
+
+       /* Special PTE are not associated with any struct page */
+       set_pte_at(r->mm, addr, pte,
+                  pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot)));
+       r->pfn++; /* track insertions in case we need to unwind later */
+
+       r->sgt.curr += PAGE_SIZE;
+       if (r->sgt.curr >= r->sgt.max)
+               r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase));
+
+       return 0;
+}
+
+#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
+
 /**
  * remap_io_sg - remap an IO mapping to userspace
  * @vma: user vma to map to
@@ -46,7 +82,12 @@ int remap_io_sg(struct vm_area_struct *vma,
                unsigned long addr, unsigned long size,
                struct scatterlist *sgl, resource_size_t iobase)
 {
-       unsigned long pfn, len, remapped = 0;
+       struct remap_pfn r = {
+               .mm = vma->vm_mm,
+               .prot = vma->vm_page_prot,
+               .sgt = __sgt_iter(sgl, use_dma(iobase)),
+               .iobase = iobase,
+       };
        int err;
 
        /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
@@ -55,25 +96,11 @@ int remap_io_sg(struct vm_area_struct *vma,
        if (!use_dma(iobase))
                flush_cache_range(vma, addr, size);
 
-       do {
-               if (use_dma(iobase)) {
-                       if (!sg_dma_len(sgl))
-                               break;
-                       pfn = (sg_dma_address(sgl) + iobase) >> PAGE_SHIFT;
-                       len = sg_dma_len(sgl);
-               } else {
-                       pfn = page_to_pfn(sg_page(sgl));
-                       len = sgl->length;
-               }
-
-               err = remap_pfn_range(vma, addr + remapped, pfn, len,
-                                     vma->vm_page_prot);
-               if (err)
-                       break;
-               remapped += len;
-       } while ((sgl = __sg_next(sgl)));
-
-       if (err)
-               zap_vma_ptes(vma, addr, remapped);
-       return err;
+       err = apply_to_page_range(r.mm, addr, size, remap_sg, &r);
+       if (unlikely(err)) {
+               zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT);
+               return err;
+       }
+
+       return 0;
 }