mm, page_alloc: remove unnecessary initialisation from __alloc_pages_nodemask()
authorMel Gorman <mgorman@techsingularity.net>
Fri, 20 May 2016 00:13:50 +0000 (17:13 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 20 May 2016 02:12:14 +0000 (19:12 -0700)
page is guaranteed to be set before it is read with or without the
initialisation.

[akpm@linux-foundation.org: fix warning]
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/page_alloc.c

index 1096ac8f5ed11c82c5669802015603ddda7486d7..f9ca6cc553c7ce0346784eb0bf451e6b5d9fec97 100644 (file)
@@ -3347,7 +3347,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
                        struct zonelist *zonelist, nodemask_t *nodemask)
 {
        struct zoneref *preferred_zoneref;
-       struct page *page = NULL;
+       struct page *page;
        unsigned int cpuset_mems_cookie;
        unsigned int alloc_flags = ALLOC_WMARK_LOW|ALLOC_FAIR;
        gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
@@ -3393,8 +3393,11 @@ retry_cpuset:
        /* The preferred zone is used for statistics later */
        preferred_zoneref = first_zones_zonelist(ac.zonelist, ac.high_zoneidx,
                                ac.nodemask, &ac.preferred_zone);
-       if (!ac.preferred_zone)
+       if (!ac.preferred_zone) {
+               page = NULL;
                goto out;
+       }
+
        ac.classzone_idx = zonelist_zone_idx(preferred_zoneref);
 
        /* First allocation attempt */