mm/zsmalloc: use memcpy_from/to_page whereever possible
authorPintu Kumar <quic_pintu@quicinc.com>
Thu, 10 Oct 2024 17:51:43 +0000 (23:21 +0530)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 7 Nov 2024 22:38:07 +0000 (14:38 -0800)
As part of "zsmalloc: replace kmap_atomic with kmap_local_page" [1] we
replaced kmap/kunmap_atomic() with kmap_local_page()/kunmap_local().

But later it was found that some of the code could be replaced with
already available apis in highmem.h, such as
memcpy_from_page()/memcpy_to_page().

Also, update the comments with correct api naming.

[1] https://lkml.kernel.org/r/20241001175358.12970-1-quic_pintu@quicinc.com

Link: https://lkml.kernel.org/r/20241010175143.27262-1-quic_pintu@quicinc.com
Signed-off-by: Pintu Kumar <quic_pintu@quicinc.com>
Suggested-by: Matthew Wilcox <willy@infradead.org>
Suggested-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Joe Perches <joe@perches.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Pintu Agarwal <pintu.ping@gmail.com>
Cc: Shuah Khan <skhan@linuxfoundation.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/zsmalloc.c

index d3ff10160a5f86623538358004c1797d0353724d..64b66a4d3e6ef7ebf662bf744bee123dc2c6835f 100644 (file)
@@ -263,7 +263,7 @@ struct zspage {
 struct mapping_area {
        local_lock_t lock;
        char *vm_buf; /* copy buffer for objects that span pages */
-       char *vm_addr; /* address of kmap_atomic()'ed pages */
+       char *vm_addr; /* address of kmap_local_page()'ed pages */
        enum zs_mapmode vm_mm; /* mapping mode */
 };
 
@@ -1046,11 +1046,10 @@ static inline void __zs_cpu_down(struct mapping_area *area)
 static void *__zs_map_object(struct mapping_area *area,
                        struct page *pages[2], int off, int size)
 {
-       int sizes[2];
-       void *addr;
+       size_t sizes[2];
        char *buf = area->vm_buf;
 
-       /* disable page faults to match kmap_atomic() return conditions */
+       /* disable page faults to match kmap_local_page() return conditions */
        pagefault_disable();
 
        /* no read fastpath */
@@ -1061,12 +1060,8 @@ static void *__zs_map_object(struct mapping_area *area,
        sizes[1] = size - sizes[0];
 
        /* copy object to per-cpu buffer */
-       addr = kmap_local_page(pages[0]);
-       memcpy(buf, addr + off, sizes[0]);
-       kunmap_local(addr);
-       addr = kmap_local_page(pages[1]);
-       memcpy(buf + sizes[0], addr, sizes[1]);
-       kunmap_local(addr);
+       memcpy_from_page(buf, pages[0], off, sizes[0]);
+       memcpy_from_page(buf + sizes[0], pages[1], 0, sizes[1]);
 out:
        return area->vm_buf;
 }
@@ -1074,8 +1069,7 @@ out:
 static void __zs_unmap_object(struct mapping_area *area,
                        struct page *pages[2], int off, int size)
 {
-       int sizes[2];
-       void *addr;
+       size_t sizes[2];
        char *buf;
 
        /* no write fastpath */
@@ -1091,15 +1085,11 @@ static void __zs_unmap_object(struct mapping_area *area,
        sizes[1] = size - sizes[0];
 
        /* copy per-cpu buffer to object */
-       addr = kmap_local_page(pages[0]);
-       memcpy(addr + off, buf, sizes[0]);
-       kunmap_local(addr);
-       addr = kmap_local_page(pages[1]);
-       memcpy(addr, buf + sizes[0], sizes[1]);
-       kunmap_local(addr);
+       memcpy_to_page(pages[0], off, buf, sizes[0]);
+       memcpy_to_page(pages[1], 0, buf + sizes[0], sizes[1]);
 
 out:
-       /* enable page faults to match kunmap_atomic() return conditions */
+       /* enable page faults to match kunmap_local() return conditions */
        pagefault_enable();
 }
 
@@ -1511,10 +1501,10 @@ static void zs_object_copy(struct size_class *class, unsigned long dst,
                d_size -= size;
 
                /*
-                * Calling kunmap_atomic(d_addr) is necessary. kunmap_atomic()
-                * calls must occurs in reverse order of calls to kmap_atomic().
-                * So, to call kunmap_atomic(s_addr) we should first call
-                * kunmap_atomic(d_addr). For more details see
+                * Calling kunmap_local(d_addr) is necessary. kunmap_local()
+                * calls must occurs in reverse order of calls to kmap_local_page().
+                * So, to call kunmap_local(s_addr) we should first call
+                * kunmap_local(d_addr). For more details see
                 * Documentation/mm/highmem.rst.
                 */
                if (s_off >= PAGE_SIZE) {