Merge branch 'PAGE_CACHE_SIZE-removal'
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 4 Apr 2016 17:50:24 +0000 (10:50 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 4 Apr 2016 17:50:24 +0000 (10:50 -0700)
Merge PAGE_CACHE_SIZE removal patches from Kirill Shutemov:
 "PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
  ago with promise that one day it will be possible to implement page
  cache with bigger chunks than PAGE_SIZE.

  This promise never materialized.  And unlikely will.

  Let's stop pretending that pages in page cache are special.  They are
  not.

  The first patch with most changes has been done with coccinelle.  The
  second is manual fixups on top.

  The third patch removes macros definition"

[ I was planning to apply this just before rc2, but then I spaced out,
  so here it is right _after_ rc2 instead.

  As Kirill suggested as a possibility, I could have decided to only
  merge the first two patches, and leave the old interfaces for
  compatibility, but I'd rather get it all done and any out-of-tree
  modules and patches can trivially do the converstion while still also
  working with older kernels, so there is little reason to try to
  maintain the redundant legacy model.    - Linus ]

* PAGE_CACHE_SIZE-removal:
  mm: drop PAGE_CACHE_* and page_cache_{get,release} definition
  mm, fs: remove remaining PAGE_CACHE_* and page_cache_{get,release} usage
  mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros

1  2 
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/radeon/radeon_ttm.c
fs/btrfs/disk-io.c
mm/rmap.c

index f1a55d1888cbbd343db0a69f02ad9ce52aa67084,c018b021707cdac62f57165a7be6dff1033df810..6f3369de232fe5f545382a6cc2ca528bdf8c1026
@@@ -384,15 -384,9 +384,15 @@@ static int amdgpu_bo_move(struct ttm_bu
                        struct ttm_mem_reg *new_mem)
  {
        struct amdgpu_device *adev;
 +      struct amdgpu_bo *abo;
        struct ttm_mem_reg *old_mem = &bo->mem;
        int r;
  
 +      /* Can't move a pinned BO */
 +      abo = container_of(bo, struct amdgpu_bo, tbo);
 +      if (WARN_ON_ONCE(abo->pin_count > 0))
 +              return -EINVAL;
 +
        adev = amdgpu_get_adev(bo->bdev);
        if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
                amdgpu_move_null(bo, new_mem);
@@@ -622,7 -616,7 +622,7 @@@ static void amdgpu_ttm_tt_unpin_userptr
                        set_page_dirty(page);
  
                mark_page_accessed(page);
-               page_cache_release(page);
+               put_page(page);
        }
  
        sg_free_table(ttm->sg);
index c008312e1bcdf44fe18285e906f358aa567c8c5a,0deb7f047be055db566a5b867e954c507788e9f8..7dddfdce85e6be56f5d6bab8787fad82aa120809
@@@ -397,15 -397,9 +397,15 @@@ static int radeon_bo_move(struct ttm_bu
                        struct ttm_mem_reg *new_mem)
  {
        struct radeon_device *rdev;
 +      struct radeon_bo *rbo;
        struct ttm_mem_reg *old_mem = &bo->mem;
        int r;
  
 +      /* Can't move a pinned BO */
 +      rbo = container_of(bo, struct radeon_bo, tbo);
 +      if (WARN_ON_ONCE(rbo->pin_count > 0))
 +              return -EINVAL;
 +
        rdev = radeon_get_rdev(bo->bdev);
        if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
                radeon_move_null(bo, new_mem);
@@@ -615,7 -609,7 +615,7 @@@ static void radeon_ttm_tt_unpin_userptr
                        set_page_dirty(page);
  
                mark_page_accessed(page);
-               page_cache_release(page);
+               put_page(page);
        }
  
        sg_free_table(ttm->sg);
diff --combined fs/btrfs/disk-io.c
index d01f89d130e029dd31f9a2bba6d50a9ca1f1b7cd,942af3d2885f02d441e6bcd709710291ae6ba833..4e47849d7427247dd179e8d4f320c5fcda687809
@@@ -25,6 -25,7 +25,6 @@@
  #include <linux/buffer_head.h>
  #include <linux/workqueue.h>
  #include <linux/kthread.h>
 -#include <linux/freezer.h>
  #include <linux/slab.h>
  #include <linux/migrate.h>
  #include <linux/ratelimit.h>
@@@ -302,7 -303,7 +302,7 @@@ static int csum_tree_block(struct btrfs
                err = map_private_extent_buffer(buf, offset, 32,
                                        &kaddr, &map_start, &map_len);
                if (err)
 -                      return 1;
 +                      return err;
                cur_len = min(len, map_len - (offset - map_start));
                crc = btrfs_csum_data(kaddr + offset - map_start,
                                      crc, cur_len);
        if (csum_size > sizeof(inline_result)) {
                result = kzalloc(csum_size, GFP_NOFS);
                if (!result)
 -                      return 1;
 +                      return -ENOMEM;
        } else {
                result = (char *)&inline_result;
        }
                                val, found, btrfs_header_level(buf));
                        if (result != (char *)&inline_result)
                                kfree(result);
 -                      return 1;
 +                      return -EUCLEAN;
                }
        } else {
                write_extent_buffer(buf, result, 0, csum_size);
@@@ -512,21 -513,11 +512,21 @@@ static int csum_dirty_buffer(struct btr
        eb = (struct extent_buffer *)page->private;
        if (page != eb->pages[0])
                return 0;
 +
        found_start = btrfs_header_bytenr(eb);
 -      if (WARN_ON(found_start != start || !PageUptodate(page)))
 -              return 0;
 -      csum_tree_block(fs_info, eb, 0);
 -      return 0;
 +      /*
 +       * Please do not consolidate these warnings into a single if.
 +       * It is useful to know what went wrong.
 +       */
 +      if (WARN_ON(found_start != start))
 +              return -EUCLEAN;
 +      if (WARN_ON(!PageUptodate(page)))
 +              return -EUCLEAN;
 +
 +      ASSERT(memcmp_extent_buffer(eb, fs_info->fsid,
 +                      btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0);
 +
 +      return csum_tree_block(fs_info, eb, 0);
  }
  
  static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
@@@ -670,8 -661,10 +670,8 @@@ static int btree_readpage_end_io_hook(s
                                       eb, found_level);
  
        ret = csum_tree_block(fs_info, eb, 1);
 -      if (ret) {
 -              ret = -EIO;
 +      if (ret)
                goto err;
 -      }
  
        /*
         * If this is a leaf block and it is corrupt, set the corrupt bit so
@@@ -1062,7 -1055,7 +1062,7 @@@ static void btree_invalidatepage(struc
                           (unsigned long long)page_offset(page));
                ClearPagePrivate(page);
                set_page_private(page, 0);
-               page_cache_release(page);
+               put_page(page);
        }
  }
  
@@@ -1764,7 -1757,7 +1764,7 @@@ static int setup_bdi(struct btrfs_fs_in
        if (err)
                return err;
  
-       bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE;
+       bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_SIZE;
        bdi->congested_fn       = btrfs_congested_fn;
        bdi->congested_data     = info;
        bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
@@@ -1838,7 -1831,7 +1838,7 @@@ static int cleaner_kthread(void *arg
                 */
                btrfs_delete_unused_bgs(root->fs_info);
  sleep:
 -              if (!try_to_freeze() && !again) {
 +              if (!again) {
                        set_current_state(TASK_INTERRUPTIBLE);
                        if (!kthread_should_stop())
                                schedule();
@@@ -1928,12 -1921,14 +1928,12 @@@ sleep
                if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
                                      &root->fs_info->fs_state)))
                        btrfs_cleanup_transaction(root);
 -              if (!try_to_freeze()) {
 -                      set_current_state(TASK_INTERRUPTIBLE);
 -                      if (!kthread_should_stop() &&
 -                          (!btrfs_transaction_blocked(root->fs_info) ||
 -                           cannot_commit))
 -                              schedule_timeout(delay);
 -                      __set_current_state(TASK_RUNNING);
 -              }
 +              set_current_state(TASK_INTERRUPTIBLE);
 +              if (!kthread_should_stop() &&
 +                              (!btrfs_transaction_blocked(root->fs_info) ||
 +                               cannot_commit))
 +                      schedule_timeout(delay);
 +              __set_current_state(TASK_RUNNING);
        } while (!kthread_should_stop());
        return 0;
  }
@@@ -2542,7 -2537,7 +2542,7 @@@ int open_ctree(struct super_block *sb
                err = ret;
                goto fail_bdi;
        }
-       fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE *
+       fs_info->dirty_metadata_batch = PAGE_SIZE *
                                        (1 + ilog2(nr_cpu_ids));
  
        ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
         * flag our filesystem as having big metadata blocks if
         * they are bigger than the page size
         */
-       if (btrfs_super_nodesize(disk_super) > PAGE_CACHE_SIZE) {
+       if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
                if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
                        printk(KERN_INFO "BTRFS: flagging fs with big metadata feature\n");
                features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
  
        fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
        fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
-                                   SZ_4M / PAGE_CACHE_SIZE);
+                                   SZ_4M / PAGE_SIZE);
  
        tree_root->nodesize = nodesize;
        tree_root->sectorsize = sectorsize;
@@@ -4076,9 -4071,9 +4076,9 @@@ static int btrfs_check_super_valid(stru
                ret = -EINVAL;
        }
        /* Only PAGE SIZE is supported yet */
-       if (sectorsize != PAGE_CACHE_SIZE) {
+       if (sectorsize != PAGE_SIZE) {
                printk(KERN_ERR "BTRFS: sectorsize %llu not supported yet, only support %lu\n",
-                               sectorsize, PAGE_CACHE_SIZE);
+                               sectorsize, PAGE_SIZE);
                ret = -EINVAL;
        }
        if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
diff --combined mm/rmap.c
index 395e314b79962696d9be729861bd28300fd7e68b,525b92f866a7ffb92db8d0d437a34d99cc1ab6f7..307b555024efb6787cca6030d1f58060ab4031fe
+++ b/mm/rmap.c
@@@ -569,6 -569,19 +569,6 @@@ void page_unlock_anon_vma_read(struct a
  }
  
  #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
 -static void percpu_flush_tlb_batch_pages(void *data)
 -{
 -      /*
 -       * All TLB entries are flushed on the assumption that it is
 -       * cheaper to flush all TLBs and let them be refilled than
 -       * flushing individual PFNs. Note that we do not track mm's
 -       * to flush as that might simply be multiple full TLB flushes
 -       * for no gain.
 -       */
 -      count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
 -      flush_tlb_local();
 -}
 -
  /*
   * Flush TLB entries for recently unmapped pages from remote CPUs. It is
   * important if a PTE was dirty when it was unmapped that it's flushed
@@@ -585,14 -598,15 +585,14 @@@ void try_to_unmap_flush(void
  
        cpu = get_cpu();
  
 -      trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, -1UL);
 -
 -      if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask))
 -              percpu_flush_tlb_batch_pages(&tlb_ubc->cpumask);
 -
 -      if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) {
 -              smp_call_function_many(&tlb_ubc->cpumask,
 -                      percpu_flush_tlb_batch_pages, (void *)tlb_ubc, true);
 +      if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) {
 +              count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
 +              local_flush_tlb();
 +              trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
        }
 +
 +      if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids)
 +              flush_tlb_others(&tlb_ubc->cpumask, NULL, 0, TLB_FLUSH_ALL);
        cpumask_clear(&tlb_ubc->cpumask);
        tlb_ubc->flush_required = false;
        tlb_ubc->writable = false;
@@@ -1541,7 -1555,7 +1541,7 @@@ static int try_to_unmap_one(struct pag
  
  discard:
        page_remove_rmap(page, PageHuge(page));
-       page_cache_release(page);
+       put_page(page);
  
  out_unmap:
        pte_unmap_unlock(pte, ptl);