Merge branch 'work.get_user_pages_fast' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 17 Nov 2017 20:38:51 +0000 (12:38 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 17 Nov 2017 20:38:51 +0000 (12:38 -0800)
Pull get_user_pages_fast() conversion from Al Viro:
 "A bunch of places switched to get_user_pages_fast()"

* 'work.get_user_pages_fast' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
  ceph: use get_user_pages_fast()
  pvr2fs: use get_user_pages_fast()
  atomisp: use get_user_pages_fast()
  st: use get_user_pages_fast()
  via_dmablit(): use get_user_pages_fast()
  fsl_hypervisor: switch to get_user_pages_fast()
  rapidio: switch to get_user_pages_fast()
  vchiq_2835_arm: switch to get_user_pages_fast()

1  2 
drivers/rapidio/devices/rio_mport_cdev.c
drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
net/ceph/pagevec.c

index 5c1b6388122ad8502933ae9878f12f3419204be2,cf8e4ec2fd48ecedc5d692e1ecb99e96cd2faf3d..665d9e94a7e1bee135b6e046d8e27a15579fee5d
@@@ -876,10 -876,10 +876,10 @@@ rio_dma_transfer(struct file *filp, u3
         * offset within the internal buffer specified by handle parameter.
         */
        if (xfer->loc_addr) {
 -              unsigned long offset;
 +              unsigned int offset;
                long pinned;
  
 -              offset = (unsigned long)(uintptr_t)xfer->loc_addr & ~PAGE_MASK;
 +              offset = lower_32_bits(offset_in_page(xfer->loc_addr));
                nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT;
  
                page_list = kmalloc_array(nr_pages,
                        goto err_req;
                }
  
-               pinned = get_user_pages_unlocked(
+               pinned = get_user_pages_fast(
                                (unsigned long)xfer->loc_addr & PAGE_MASK,
-                               nr_pages,
-                               page_list,
-                               dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0);
+                               nr_pages, dir == DMA_FROM_DEVICE, page_list);
  
                if (pinned != nr_pages) {
                        if (pinned < 0) {
index 6e2dce7a5a2dab6402dddfa03fef09f75811e2b6,2052f8d2f1375f1b3db27af212dae54818d982f7..79bd540d78821f7d7d0d71944eef13b06c073f07
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   * GNU General Public License for more details.
   *
 - * You should have received a copy of the GNU General Public License
 - * along with this program; if not, write to the Free Software
 - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 - * 02110-1301, USA.
   *
   */
  /*
@@@ -54,7 -58,7 +54,7 @@@ static unsigned int nr_to_order_bottom(
        return fls(nr) - 1;
  }
  
 -struct hmm_buffer_object *__bo_alloc(struct kmem_cache *bo_cache)
 +static struct hmm_buffer_object *__bo_alloc(struct kmem_cache *bo_cache)
  {
        struct hmm_buffer_object *bo;
  
@@@ -95,7 -99,7 +95,7 @@@ static int __bo_init(struct hmm_bo_devi
        return 0;
  }
  
 -struct hmm_buffer_object *__bo_search_and_remove_from_free_rbtree(
 +static struct hmm_buffer_object *__bo_search_and_remove_from_free_rbtree(
                                struct rb_node *node, unsigned int pgnr)
  {
        struct hmm_buffer_object *this, *ret_bo, *temp_bo;
@@@ -146,7 -150,7 +146,7 @@@ remove_bo_and_return
        return temp_bo;
  }
  
 -struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root,
 +static struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root,
                                                        ia_css_ptr start)
  {
        struct rb_node *n = root->rb_node;
        return NULL;
  }
  
 -struct hmm_buffer_object *__bo_search_by_addr_in_range(struct rb_root *root,
 -                                      unsigned int start)
 +static struct hmm_buffer_object *__bo_search_by_addr_in_range(
 +              struct rb_root *root, unsigned int start)
  {
        struct rb_node *n = root->rb_node;
        struct hmm_buffer_object *bo;
@@@ -254,7 -258,7 +254,7 @@@ static void __bo_insert_to_alloc_rbtree
        rb_insert_color(&bo->node, root);
  }
  
 -struct hmm_buffer_object *__bo_break_up(struct hmm_bo_device *bdev,
 +static struct hmm_buffer_object *__bo_break_up(struct hmm_bo_device *bdev,
                                        struct hmm_buffer_object *bo,
                                        unsigned int pgnr)
  {
@@@ -327,7 -331,7 +327,7 @@@ static void __bo_take_off_handling(stru
        }
  }
  
 -struct hmm_buffer_object *__bo_merge(struct hmm_buffer_object *bo,
 +static struct hmm_buffer_object *__bo_merge(struct hmm_buffer_object *bo,
                                        struct hmm_buffer_object *next_bo)
  {
        struct hmm_bo_device *bdev;
@@@ -721,10 -725,12 +721,10 @@@ static int alloc_private_pages(struct h
  
        pgnr = bo->pgnr;
  
 -      bo->page_obj = kmalloc(sizeof(struct hmm_page_object) * pgnr,
 +      bo->page_obj = kmalloc_array(pgnr, sizeof(struct hmm_page_object),
                                GFP_KERNEL);
 -      if (unlikely(!bo->page_obj)) {
 -              dev_err(atomisp_dev, "out of memory for bo->page_obj\n");
 +      if (unlikely(!bo->page_obj))
                return -ENOMEM;
 -      }
  
        i = 0;
        alloc_pgnr = 0;
@@@ -984,13 -990,16 +984,13 @@@ static int alloc_user_pages(struct hmm_
        struct vm_area_struct *vma;
        struct page **pages;
  
 -      pages = kmalloc(sizeof(struct page *) * bo->pgnr, GFP_KERNEL);
 -      if (unlikely(!pages)) {
 -              dev_err(atomisp_dev, "out of memory for pages...\n");
 +      pages = kmalloc_array(bo->pgnr, sizeof(struct page *), GFP_KERNEL);
 +      if (unlikely(!pages))
                return -ENOMEM;
 -      }
  
 -      bo->page_obj = kmalloc(sizeof(struct hmm_page_object) * bo->pgnr,
 +      bo->page_obj = kmalloc_array(bo->pgnr, sizeof(struct hmm_page_object),
                GFP_KERNEL);
        if (unlikely(!bo->page_obj)) {
 -              dev_err(atomisp_dev, "out of memory for bo->page_obj...\n");
                kfree(pages);
                return -ENOMEM;
        }
        } else {
                /*Handle frame buffer allocated in user space*/
                mutex_unlock(&bo->mutex);
-               down_read(&current->mm->mmap_sem);
-               page_nr = get_user_pages((unsigned long)userptr,
-                                        (int)(bo->pgnr), 1, pages, NULL);
-               up_read(&current->mm->mmap_sem);
+               page_nr = get_user_pages_fast((unsigned long)userptr,
+                                        (int)(bo->pgnr), 1, pages);
                mutex_lock(&bo->mutex);
                bo->mem_type = HMM_BO_MEM_TYPE_USER;
        }
@@@ -1159,9 -1166,13 +1157,9 @@@ status_err2
  
  int hmm_bo_page_allocated(struct hmm_buffer_object *bo)
  {
 -      int ret;
 -
        check_bo_null_return(bo, 0);
  
 -      ret = bo->status & HMM_BO_PAGE_ALLOCED;
 -
 -      return ret;
 +      return bo->status & HMM_BO_PAGE_ALLOCED;
  }
  
  /*
@@@ -1350,9 -1361,10 +1348,9 @@@ void *hmm_bo_vmap(struct hmm_buffer_obj
                bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
        }
  
 -      pages = kmalloc(sizeof(*pages) * bo->pgnr, GFP_KERNEL);
 +      pages = kmalloc_array(bo->pgnr, sizeof(*pages), GFP_KERNEL);
        if (unlikely(!pages)) {
                mutex_unlock(&bo->mutex);
 -              dev_err(atomisp_dev, "out of memory for pages...\n");
                return NULL;
        }
  
index eea2d78b0ec6a6b867da7b2e797b0afd75a9ea54,cd2b19f335d8d1466855cd515d8ab80b31f9716c..315b49c1de3bb323f4383e4ba8acb3e3ded422b1
  #define BELL0 0x00
  #define BELL2 0x08
  
 -typedef struct vchiq_2835_state_struct {
 +struct vchiq_2835_state {
        int inited;
        VCHIQ_ARM_STATE_T arm_state;
 -} VCHIQ_2835_ARM_STATE_T;
 +};
  
  struct vchiq_pagelist_info {
        PAGELIST_T *pagelist;
@@@ -84,14 -84,15 +84,13 @@@ static char *g_free_fragments
  static struct semaphore g_free_fragments_sema;
  static struct device *g_dev;
  
 -extern int vchiq_arm_log_level;
 -
  static DEFINE_SEMAPHORE(g_free_fragments_mutex);
  
  static irqreturn_t
  vchiq_doorbell_irq(int irq, void *dev_id);
  
  static struct vchiq_pagelist_info *
- create_pagelist(char __user *buf, size_t count, unsigned short type,
-               struct task_struct *task);
+ create_pagelist(char __user *buf, size_t count, unsigned short type);
  
  static void
  free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
@@@ -204,31 -205,25 +203,31 @@@ VCHIQ_STATUS_
  vchiq_platform_init_state(VCHIQ_STATE_T *state)
  {
        VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
 +      struct vchiq_2835_state *platform_state;
 +
 +      state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
 +      platform_state = (struct vchiq_2835_state *)state->platform_state;
 +
 +      platform_state->inited = 1;
 +      status = vchiq_arm_init_state(state, &platform_state->arm_state);
  
 -      state->platform_state = kzalloc(sizeof(VCHIQ_2835_ARM_STATE_T), GFP_KERNEL);
 -      ((VCHIQ_2835_ARM_STATE_T *)state->platform_state)->inited = 1;
 -      status = vchiq_arm_init_state(state, &((VCHIQ_2835_ARM_STATE_T *)state->platform_state)->arm_state);
        if (status != VCHIQ_SUCCESS)
 -      {
 -              ((VCHIQ_2835_ARM_STATE_T *)state->platform_state)->inited = 0;
 -      }
 +              platform_state->inited = 0;
 +
        return status;
  }
  
  VCHIQ_ARM_STATE_T*
  vchiq_platform_get_arm_state(VCHIQ_STATE_T *state)
  {
 -      if (!((VCHIQ_2835_ARM_STATE_T *)state->platform_state)->inited)
 -      {
 +      struct vchiq_2835_state *platform_state;
 +
 +      platform_state   = (struct vchiq_2835_state *)state->platform_state;
 +
 +      if (!platform_state->inited)
                BUG();
 -      }
 -      return &((VCHIQ_2835_ARM_STATE_T *)state->platform_state)->arm_state;
 +
 +      return &platform_state->arm_state;
  }
  
  void
@@@ -255,8 -250,7 +254,7 @@@ vchiq_prepare_bulk_data(VCHIQ_BULK_T *b
        pagelistinfo = create_pagelist((char __user *)offset, size,
                                       (dir == VCHIQ_BULK_RECEIVE)
                                       ? PAGELIST_READ
-                                      : PAGELIST_WRITE,
-                                      current);
+                                      : PAGELIST_WRITE);
  
        if (!pagelistinfo)
                return VCHIQ_ERROR;
@@@ -387,16 -381,15 +385,15 @@@ cleanup_pagelistinfo(struct vchiq_pagel
  }
  
  /* There is a potential problem with partial cache lines (pages?)
 -** at the ends of the block when reading. If the CPU accessed anything in
 -** the same line (page?) then it may have pulled old data into the cache,
 -** obscuring the new data underneath. We can solve this by transferring the
 -** partial cache lines separately, and allowing the ARM to copy into the
 -** cached area.
 -*/
 + * at the ends of the block when reading. If the CPU accessed anything in
 + * the same line (page?) then it may have pulled old data into the cache,
 + * obscuring the new data underneath. We can solve this by transferring the
 + * partial cache lines separately, and allowing the ARM to copy into the
 + * cached area.
 + */
  
  static struct vchiq_pagelist_info *
- create_pagelist(char __user *buf, size_t count, unsigned short type,
-               struct task_struct *task)
+ create_pagelist(char __user *buf, size_t count, unsigned short type)
  {
        PAGELIST_T *pagelist;
        struct vchiq_pagelist_info *pagelistinfo;
                        sizeof(struct vchiq_pagelist_info);
  
        /* Allocate enough storage to hold the page pointers and the page
 -      ** list
 -      */
 +       * list
 +       */
        pagelist = dma_zalloc_coherent(g_dev,
                                       pagelist_size,
                                       &dma_addr,
                                       GFP_KERNEL);
  
 -      vchiq_log_trace(vchiq_arm_log_level, "create_pagelist - %pK",
 -                      pagelist);
 +      vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist);
 +
        if (!pagelist)
                return NULL;
  
                }
                /* do not try and release vmalloc pages */
        } else {
-               down_read(&task->mm->mmap_sem);
-               actual_pages = get_user_pages(
-                                         (unsigned long)buf & PAGE_MASK,
+               actual_pages = get_user_pages_fast(
+                                         (unsigned long)buf & PAGE_MASK,
                                          num_pages,
-                                         (type == PAGELIST_READ) ? FOLL_WRITE : 0,
-                                         pages,
-                                         NULL /*vmas */);
-               up_read(&task->mm->mmap_sem);
+                                         type == PAGELIST_READ,
+                                         pages);
  
                if (actual_pages != num_pages) {
                        vchiq_log_info(vchiq_arm_log_level,
 -                                     "create_pagelist - only %d/%d pages locked",
 -                                     actual_pages,
 -                                     num_pages);
 +                                     "%s - only %d/%d pages locked",
 +                                     __func__, actual_pages, num_pages);
  
                        /* This is probably due to the process being killed */
                        while (actual_pages > 0)
@@@ -615,20 -606,18 +609,20 @@@ free_pagelist(struct vchiq_pagelist_inf
                        if (head_bytes > actual)
                                head_bytes = actual;
  
 -                      memcpy((char *)page_address(pages[0]) +
 +                      memcpy((char *)kmap(pages[0]) +
                                pagelist->offset,
                                fragments,
                                head_bytes);
 +                      kunmap(pages[0]);
                }
                if ((actual >= 0) && (head_bytes < actual) &&
                        (tail_bytes != 0)) {
 -                      memcpy((char *)page_address(pages[num_pages - 1]) +
 +                      memcpy((char *)kmap(pages[num_pages - 1]) +
                                ((pagelist->offset + actual) &
                                (PAGE_SIZE - 1) & ~(g_cache_line_size - 1)),
                                fragments + g_cache_line_size,
                                tail_bytes);
 +                      kunmap(pages[num_pages - 1]);
                }
  
                down(&g_free_fragments_mutex);
diff --combined net/ceph/pagevec.c
index ee43bc13221c5d7dffcfed609dfffb48107736f4,4098b17d0812f6f253a1a9decb550734e459a888..a3d0adc828e6417e7772420fddbdff1313064f65
@@@ -1,4 -1,3 +1,4 @@@
 +// SPDX-License-Identifier: GPL-2.0
  #include <linux/ceph/ceph_debug.h>
  
  #include <linux/module.h>
@@@ -25,9 -24,9 +25,9 @@@ struct page **ceph_get_direct_page_vect
                return ERR_PTR(-ENOMEM);
  
        while (got < num_pages) {
-               rc = get_user_pages_unlocked(
+               rc = get_user_pages_fast(
                    (unsigned long)data + ((unsigned long)got * PAGE_SIZE),
-                   num_pages - got, pages + got, write_page ? FOLL_WRITE : 0);
+                   num_pages - got, write_page, pages + got);
                if (rc < 0)
                        break;
                BUG_ON(rc == 0);