zram: remove second stage of handle allocation
authorSergey Senozhatsky <senozhatsky@chromium.org>
Mon, 3 Mar 2025 02:03:14 +0000 (11:03 +0900)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 17 Mar 2025 05:06:34 +0000 (22:06 -0700)
Previously zram write() was atomic which required us to pass
__GFP_KSWAPD_RECLAIM to zsmalloc handle allocation on a fast path and
attempt a slow path allocation (with recompression) if the fast path
failed.

Since we are not in atomic context anymore we can permit direct reclaim
during handle allocation, and hence can have a single allocation path.
There is no slow path anymore so we don't unlock per-CPU stream (and don't
lose compressed data) which means that there is no need to do
recompression now (which should reduce CPU and battery usage).

Link: https://lkml.kernel.org/r/20250303022425.285971-6-senozhatsky@chromium.org
Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Hillf Danton <hdanton@sina.com>
Cc: Kairui Song <ryncsn@gmail.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Yosry Ahmed <yosry.ahmed@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
drivers/block/zram/zram_drv.c

index 93cedc60ac162061a1b07ac3ba7834174f1e6edc..f043f35b17a433030b7a2ce66c97e25dee2f171f 100644 (file)
@@ -1723,11 +1723,11 @@ static int write_incompressible_page(struct zram *zram, struct page *page,
 static int zram_write_page(struct zram *zram, struct page *page, u32 index)
 {
        int ret = 0;
-       unsigned long handle = -ENOMEM;
-       unsigned int comp_len = 0;
+       unsigned long handle;
+       unsigned int comp_len;
        void *dst, *mem;
        struct zcomp_strm *zstrm;
-       unsigned long element = 0;
+       unsigned long element;
        bool same_filled;
 
        /* First, free memory allocated to this slot (if any) */
@@ -1741,7 +1741,6 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
        if (same_filled)
                return write_same_filled_page(zram, element, index);
 
-compress_again:
        zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
        mem = kmap_local_page(page);
        ret = zcomp_compress(zram->comps[ZRAM_PRIMARY_COMP], zstrm,
@@ -1751,7 +1750,6 @@ compress_again:
        if (unlikely(ret)) {
                zcomp_stream_put(zstrm);
                pr_err("Compression failed! err=%d\n", ret);
-               zs_free(zram->mem_pool, handle);
                return ret;
        }
 
@@ -1760,35 +1758,12 @@ compress_again:
                return write_incompressible_page(zram, page, index);
        }
 
-       /*
-        * handle allocation has 2 paths:
-        * a) fast path is executed with preemption disabled (for
-        *  per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
-        *  since we can't sleep;
-        * b) slow path enables preemption and attempts to allocate
-        *  the page with __GFP_DIRECT_RECLAIM bit set. we have to
-        *  put per-cpu compression stream and, thus, to re-do
-        *  the compression once handle is allocated.
-        *
-        * if we have a 'non-null' handle here then we are coming
-        * from the slow path and handle has already been allocated.
-        */
-       if (IS_ERR_VALUE(handle))
-               handle = zs_malloc(zram->mem_pool, comp_len,
-                                  __GFP_KSWAPD_RECLAIM |
-                                  __GFP_NOWARN |
-                                  __GFP_HIGHMEM |
-                                  __GFP_MOVABLE);
+       handle = zs_malloc(zram->mem_pool, comp_len,
+                          GFP_NOIO | __GFP_NOWARN |
+                          __GFP_HIGHMEM | __GFP_MOVABLE);
        if (IS_ERR_VALUE(handle)) {
                zcomp_stream_put(zstrm);
-               atomic64_inc(&zram->stats.writestall);
-               handle = zs_malloc(zram->mem_pool, comp_len,
-                                  GFP_NOIO | __GFP_HIGHMEM |
-                                  __GFP_MOVABLE);
-               if (IS_ERR_VALUE(handle))
-                       return PTR_ERR((void *)handle);
-
-               goto compress_again;
+               return PTR_ERR((void *)handle);
        }
 
        if (!zram_can_store_page(zram)) {