thp: optimize away unnecessary page table locking
[linux-2.6-block.git] / fs / ntfs / super.c
index f907611cca73c2de10ce7645b8afafb3319cf9fd..28d4e6ab6634e951499088d5c6710e4411d8a903 100644 (file)
@@ -2473,7 +2473,7 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
                        nr_free -= PAGE_CACHE_SIZE * 8;
                        continue;
                }
-               kaddr = kmap_atomic(page, KM_USER0);
+               kaddr = kmap_atomic(page);
                /*
                 * Subtract the number of set bits. If this
                 * is the last page and it is partial we don't really care as
@@ -2483,7 +2483,7 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
                 */
                nr_free -= bitmap_weight(kaddr,
                                        PAGE_CACHE_SIZE * BITS_PER_BYTE);
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                page_cache_release(page);
        }
        ntfs_debug("Finished reading $Bitmap, last index = 0x%lx.", index - 1);
@@ -2544,7 +2544,7 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
                        nr_free -= PAGE_CACHE_SIZE * 8;
                        continue;
                }
-               kaddr = kmap_atomic(page, KM_USER0);
+               kaddr = kmap_atomic(page);
                /*
                 * Subtract the number of set bits. If this
                 * is the last page and it is partial we don't really care as
@@ -2554,7 +2554,7 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
                 */
                nr_free -= bitmap_weight(kaddr,
                                        PAGE_CACHE_SIZE * BITS_PER_BYTE);
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                page_cache_release(page);
        }
        ntfs_debug("Finished reading $MFT/$BITMAP, last index = 0x%lx.",