]> git.itanic.dy.fi Git - linux-stable/commitdiff
crypto: scatterwalk - use kmap_local() not kmap_atomic()
authorArd Biesheuvel <ardb@kernel.org>
Tue, 13 Dec 2022 16:13:10 +0000 (17:13 +0100)
committerHerbert Xu <herbert@gondor.apana.org.au>
Fri, 30 Dec 2022 14:56:27 +0000 (22:56 +0800)
kmap_atomic() is used to create short-lived mappings of pages that may
not be accessible via the kernel direct map. This is only needed on
32-bit architectures that implement CONFIG_HIGHMEM, but it can be used
on 64-bit other architectures too, where the returned mapping is simply
the kernel direct address of the page.

However, kmap_atomic() does not support migration on CONFIG_HIGHMEM
configurations, due to the use of per-CPU kmap slots, and so it disables
preemption on all architectures, not just the 32-bit ones. This implies
that all scatterwalk based crypto routines essentially execute with
preemption disabled all the time, which is less than ideal.

So let's switch scatterwalk_map/_unmap and the shash/ahash routines to
kmap_local() instead, which serves a similar purpose, but without the
resulting impact on preemption on architectures that have no need for
CONFIG_HIGHMEM.

Cc: Eric Biggers <ebiggers@kernel.org>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: "Elliott, Robert (Servers)" <elliott@hpe.com>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
crypto/ahash.c
crypto/shash.c
include/crypto/scatterwalk.h

index c2ca631a111fc7fd0b860946d920a360f142fce7..4b089f1b770f2a6066dd8bf5e1053463c327d5d1 100644 (file)
@@ -45,7 +45,7 @@ static int hash_walk_next(struct crypto_hash_walk *walk)
        unsigned int nbytes = min(walk->entrylen,
                                  ((unsigned int)(PAGE_SIZE)) - offset);
 
-       walk->data = kmap_atomic(walk->pg);
+       walk->data = kmap_local_page(walk->pg);
        walk->data += offset;
 
        if (offset & alignmask) {
@@ -95,7 +95,7 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
                }
        }
 
-       kunmap_atomic(walk->data);
+       kunmap_local(walk->data);
        crypto_yield(walk->flags);
 
        if (err)
index 868b6ba2b3b74e8ac6e644c707fb11a854767abb..58b46f198449ec80b484214da4a47be8924406ea 100644 (file)
@@ -320,10 +320,10 @@ int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
             nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
                void *data;
 
-               data = kmap_atomic(sg_page(sg));
+               data = kmap_local_page(sg_page(sg));
                err = crypto_shash_digest(desc, data + offset, nbytes,
                                          req->result);
-               kunmap_atomic(data);
+               kunmap_local(data);
        } else
                err = crypto_shash_init(desc) ?:
                      shash_ahash_finup(req, desc);
index f2c42b4111b1b66a9ebe73a8d27903a86bd7fdfd..32fc4473175b1d814867ced1bd05abf5e7a2c48e 100644 (file)
@@ -53,7 +53,7 @@ static inline struct page *scatterwalk_page(struct scatter_walk *walk)
 
 static inline void scatterwalk_unmap(void *vaddr)
 {
-       kunmap_atomic(vaddr);
+       kunmap_local(vaddr);
 }
 
 static inline void scatterwalk_start(struct scatter_walk *walk,
@@ -65,7 +65,7 @@ static inline void scatterwalk_start(struct scatter_walk *walk,
 
 static inline void *scatterwalk_map(struct scatter_walk *walk)
 {
-       return kmap_atomic(scatterwalk_page(walk)) +
+       return kmap_local_page(scatterwalk_page(walk)) +
               offset_in_page(walk->offset);
 }