mm: use new helper functions around the i_mmap_mutex
[linux-2.6-block.git] / mm / hugetlb.c
index 919b86a2164d2acacd3c17bf8ce3ddb1bfa1afe4..ffe19304cc093728e54721b88f5e0c6dbba3dc3a 100644 (file)
@@ -2774,7 +2774,7 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
         * this mapping should be shared between all the VMAs,
         * __unmap_hugepage_range() is called as the lock is already held
         */
-       mutex_lock(&mapping->i_mmap_mutex);
+       i_mmap_lock_write(mapping);
        vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
                /* Do not unmap the current VMA */
                if (iter_vma == vma)
@@ -2791,7 +2791,7 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
                        unmap_hugepage_range(iter_vma, address,
                                             address + huge_page_size(h), page);
        }
-       mutex_unlock(&mapping->i_mmap_mutex);
+       i_mmap_unlock_write(mapping);
 }
 
 /*
@@ -3348,7 +3348,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
        flush_cache_range(vma, address, end);
 
        mmu_notifier_invalidate_range_start(mm, start, end);
-       mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
+       i_mmap_lock_write(vma->vm_file->f_mapping);
        for (; address < end; address += huge_page_size(h)) {
                spinlock_t *ptl;
                ptep = huge_pte_offset(mm, address);
@@ -3376,7 +3376,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
         * and that page table be reused and filled with junk.
         */
        flush_tlb_range(vma, start, end);
-       mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
+       i_mmap_unlock_write(vma->vm_file->f_mapping);
        mmu_notifier_invalidate_range_end(mm, start, end);
 
        return pages << h->order;
@@ -3544,7 +3544,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
        if (!vma_shareable(vma, addr))
                return (pte_t *)pmd_alloc(mm, pud, addr);
 
-       mutex_lock(&mapping->i_mmap_mutex);
+       i_mmap_lock_write(mapping);
        vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
                if (svma == vma)
                        continue;
@@ -3572,7 +3572,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
        spin_unlock(ptl);
 out:
        pte = (pte_t *)pmd_alloc(mm, pud, addr);
-       mutex_unlock(&mapping->i_mmap_mutex);
+       i_mmap_unlock_write(mapping);
        return pte;
 }