mm: zswap: increase reject_compress_poor but not reject_compress_fail if compression...
authorBarry Song <v-songbaohua@oppo.com>
Mon, 19 Feb 2024 21:19:35 +0000 (10:19 +1300)
committerAndrew Morton <akpm@linux-foundation.org>
Sat, 24 Feb 2024 01:48:31 +0000 (17:48 -0800)
We used to rely on the returned -ENOSPC of zpool_malloc() to increase
reject_compress_poor.  But the code wouldn't get to there after commit
744e1885922a ("crypto: scomp - fix req->dst buffer overflow") as the new
code will goto out immediately after the special compression case happens.
So there might be no longer a chance to execute zpool_malloc now.  We are
incorrectly increasing zswap_reject_compress_fail instead.  Thus, we need
to fix the counters handling right after compressions return ENOSPC.  This
patch also centralizes the counters handling for all of compress_poor,
compress_fail and alloc_fail.

Link: https://lkml.kernel.org/r/20240219211935.72394-1-21cnbao@gmail.com
Fixes: 744e1885922a ("crypto: scomp - fix req->dst buffer overflow")
Signed-off-by: Barry Song <v-songbaohua@oppo.com>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Reviewed-by: Nhat Pham <nphamcs@gmail.com>
Acked-by: Yosry Ahmed <yosryahmed@google.com>
Reviewed-by: Chengming Zhou <zhouchengming@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/zswap.c

index 62fe307521c9370e49edf9ff8f55f98169617286..51de79aa86593de080bb18e511400fb6ca156bf8 100644 (file)
@@ -1021,12 +1021,12 @@ static bool zswap_compress(struct folio *folio, struct zswap_entry *entry)
 {
        struct crypto_acomp_ctx *acomp_ctx;
        struct scatterlist input, output;
+       int comp_ret = 0, alloc_ret = 0;
        unsigned int dlen = PAGE_SIZE;
        unsigned long handle;
        struct zpool *zpool;
        char *buf;
        gfp_t gfp;
-       int ret;
        u8 *dst;
 
        acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
@@ -1057,26 +1057,18 @@ static bool zswap_compress(struct folio *folio, struct zswap_entry *entry)
         * but in different threads running on different cpu, we have different
         * acomp instance, so multiple threads can do (de)compression in parallel.
         */
-       ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
+       comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
        dlen = acomp_ctx->req->dlen;
-       if (ret) {
-               zswap_reject_compress_fail++;
+       if (comp_ret)
                goto unlock;
-       }
 
        zpool = zswap_find_zpool(entry);
        gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
        if (zpool_malloc_support_movable(zpool))
                gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
-       ret = zpool_malloc(zpool, dlen, gfp, &handle);
-       if (ret == -ENOSPC) {
-               zswap_reject_compress_poor++;
-               goto unlock;
-       }
-       if (ret) {
-               zswap_reject_alloc_fail++;
+       alloc_ret = zpool_malloc(zpool, dlen, gfp, &handle);
+       if (alloc_ret)
                goto unlock;
-       }
 
        buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO);
        memcpy(buf, dst, dlen);
@@ -1086,8 +1078,15 @@ static bool zswap_compress(struct folio *folio, struct zswap_entry *entry)
        entry->length = dlen;
 
 unlock:
+       if (comp_ret == -ENOSPC || alloc_ret == -ENOSPC)
+               zswap_reject_compress_poor++;
+       else if (comp_ret)
+               zswap_reject_compress_fail++;
+       else if (alloc_ret)
+               zswap_reject_alloc_fail++;
+
        mutex_unlock(&acomp_ctx->mutex);
-       return ret == 0;
+       return comp_ret == 0 && alloc_ret == 0;
 }
 
 static void zswap_decompress(struct zswap_entry *entry, struct page *page)