mm: ksm: fix a typo in comment "alreaady"->"already"
authorEthon Paul <ethp@qq.com>
Thu, 4 Jun 2020 23:49:01 +0000 (16:49 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 5 Jun 2020 02:06:23 +0000 (19:06 -0700)
There is a typo in comment, fix it.

Signed-off-by: Ethon Paul <ethp@qq.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Ralph Campbell <rcampbell@nvidia.com>
Link: http://lkml.kernel.org/r/20200410162427.13927-1-ethp@qq.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/ksm.c

index 281c00129a2ea7487b87d5e2642b8fdc4b82c929..18c5d005bd01b8607c646c4ed39c9d8922be6a4f 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -612,7 +612,7 @@ static struct stable_node *alloc_stable_node_chain(struct stable_node *dup,
                 * Move the old stable node to the second dimension
                 * queued in the hlist_dup. The invariant is that all
                 * dup stable_nodes in the chain->hlist point to pages
-                * that are wrprotected and have the exact same
+                * that are write protected and have the exact same
                 * content.
                 */
                stable_node_chain_add_dup(dup, chain);
@@ -1148,7 +1148,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
 
        /*
         * No need to check ksm_use_zero_pages here: we can only have a
-        * zero_page here if ksm_use_zero_pages was enabled alreaady.
+        * zero_page here if ksm_use_zero_pages was enabled already.
         */
        if (!is_zero_pfn(page_to_pfn(kpage))) {
                get_page(kpage);
@@ -1608,7 +1608,7 @@ again:
                         * continue. All KSM pages belonging to the
                         * stable_node dups in a stable_node chain
                         * have the same content and they're
-                        * wrprotected at all times. Any will work
+                        * write protected at all times. Any will work
                         * fine to continue the walk.
                         */
                        tree_page = get_ksm_page(stable_node_any,
@@ -1843,7 +1843,7 @@ again:
                         * continue. All KSM pages belonging to the
                         * stable_node dups in a stable_node chain
                         * have the same content and they're
-                        * wrprotected at all times. Any will work
+                        * write protected at all times. Any will work
                         * fine to continue the walk.
                         */
                        tree_page = get_ksm_page(stable_node_any,
@@ -2001,7 +2001,7 @@ static void stable_tree_append(struct rmap_item *rmap_item,
         * duplicate. page_migration could break later if rmap breaks,
         * so we can as well crash here. We really need to check for
         * rmap_hlist_len == STABLE_NODE_CHAIN, but we can as well check
-        * for other negative values as an undeflow if detected here
+        * for other negative values as an underflow if detected here
         * for the first time (and not when decreasing rmap_hlist_len)
         * would be sign of memory corruption in the stable_node.
         */