usb: gadget: udc: reduce indentation
[linux-2.6-block.git] / fs / dax.c
index 616e36ea6aaab6baf9fd3210aaa425c2398e491b..6959837cc4659e16e19788d7bc362c3fb10b1b4e 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -98,12 +98,6 @@ static void *dax_make_entry(pfn_t pfn, unsigned long flags)
        return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
 }
 
-static void *dax_make_page_entry(struct page *page)
-{
-       pfn_t pfn = page_to_pfn_t(page);
-       return dax_make_entry(pfn, PageHead(page) ? DAX_PMD : 0);
-}
-
 static bool dax_is_locked(void *entry)
 {
        return xa_to_value(entry) & DAX_LOCKED;
@@ -116,12 +110,12 @@ static unsigned int dax_entry_order(void *entry)
        return 0;
 }
 
-static int dax_is_pmd_entry(void *entry)
+static unsigned long dax_is_pmd_entry(void *entry)
 {
        return xa_to_value(entry) & DAX_PMD;
 }
 
-static int dax_is_pte_entry(void *entry)
+static bool dax_is_pte_entry(void *entry)
 {
        return !(xa_to_value(entry) & DAX_PMD);
 }
@@ -222,9 +216,8 @@ static void *get_unlocked_entry(struct xa_state *xas)
        ewait.wait.func = wake_exceptional_entry_func;
 
        for (;;) {
-               entry = xas_load(xas);
-               if (!entry || xa_is_internal(entry) ||
-                               WARN_ON_ONCE(!xa_is_value(entry)) ||
+               entry = xas_find_conflict(xas);
+               if (!entry || WARN_ON_ONCE(!xa_is_value(entry)) ||
                                !dax_is_locked(entry))
                        return entry;
 
@@ -239,6 +232,32 @@ static void *get_unlocked_entry(struct xa_state *xas)
        }
 }
 
+/*
+ * The only thing keeping the address space around is the i_pages lock
+ * (it's cycled in clear_inode() after removing the entries from i_pages)
+ * After we call xas_unlock_irq(), we cannot touch xas->xa.
+ */
+static void wait_entry_unlocked(struct xa_state *xas, void *entry)
+{
+       struct wait_exceptional_entry_queue ewait;
+       wait_queue_head_t *wq;
+
+       init_wait(&ewait.wait);
+       ewait.wait.func = wake_exceptional_entry_func;
+
+       wq = dax_entry_waitqueue(xas, entry, &ewait.key);
+       /*
+        * Unlike get_unlocked_entry() there is no guarantee that this
+        * path ever successfully retrieves an unlocked entry before an
+        * inode dies. Perform a non-exclusive wait in case this path
+        * never successfully performs its own wake up.
+        */
+       prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
+       xas_unlock_irq(xas);
+       schedule();
+       finish_wait(wq, &ewait.wait);
+}
+
 static void put_unlocked_entry(struct xa_state *xas, void *entry)
 {
        /* If we were the only waiter woken, wake the next one */
@@ -255,6 +274,7 @@ static void dax_unlock_entry(struct xa_state *xas, void *entry)
 {
        void *old;
 
+       BUG_ON(dax_is_locked(entry));
        xas_reset(xas);
        xas_lock_irq(xas);
        old = xas_store(xas, entry);
@@ -352,16 +372,27 @@ static struct page *dax_busy_page(void *entry)
        return NULL;
 }
 
-bool dax_lock_mapping_entry(struct page *page)
+/*
+ * dax_lock_mapping_entry - Lock the DAX entry corresponding to a page
+ * @page: The page whose entry we want to lock
+ *
+ * Context: Process context.
+ * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
+ * not be locked.
+ */
+dax_entry_t dax_lock_page(struct page *page)
 {
        XA_STATE(xas, NULL, 0);
        void *entry;
 
+       /* Ensure page->mapping isn't freed while we look at it */
+       rcu_read_lock();
        for (;;) {
                struct address_space *mapping = READ_ONCE(page->mapping);
 
-               if (!dax_mapping(mapping))
-                       return false;
+               entry = NULL;
+               if (!mapping || !dax_mapping(mapping))
+                       break;
 
                /*
                 * In the device-dax case there's no need to lock, a
@@ -370,8 +401,9 @@ bool dax_lock_mapping_entry(struct page *page)
                 * otherwise we would not have a valid pfn_to_page()
                 * translation.
                 */
+               entry = (void *)~0UL;
                if (S_ISCHR(mapping->host->i_mode))
-                       return true;
+                       break;
 
                xas.xa = &mapping->i_pages;
                xas_lock_irq(&xas);
@@ -382,20 +414,20 @@ bool dax_lock_mapping_entry(struct page *page)
                xas_set(&xas, page->index);
                entry = xas_load(&xas);
                if (dax_is_locked(entry)) {
-                       entry = get_unlocked_entry(&xas);
-                       /* Did the page move while we slept? */
-                       if (dax_to_pfn(entry) != page_to_pfn(page)) {
-                               xas_unlock_irq(&xas);
-                               continue;
-                       }
+                       rcu_read_unlock();
+                       wait_entry_unlocked(&xas, entry);
+                       rcu_read_lock();
+                       continue;
                }
                dax_lock_entry(&xas, entry);
                xas_unlock_irq(&xas);
-               return true;
+               break;
        }
+       rcu_read_unlock();
+       return (dax_entry_t)entry;
 }
 
-void dax_unlock_mapping_entry(struct page *page)
+void dax_unlock_page(struct page *page, dax_entry_t cookie)
 {
        struct address_space *mapping = page->mapping;
        XA_STATE(xas, &mapping->i_pages, page->index);
@@ -403,7 +435,7 @@ void dax_unlock_mapping_entry(struct page *page)
        if (S_ISCHR(mapping->host->i_mode))
                return;
 
-       dax_unlock_entry(&xas, dax_make_page_entry(page));
+       dax_unlock_entry(&xas, (void *)cookie);
 }
 
 /*
@@ -445,11 +477,9 @@ static void *grab_mapping_entry(struct xa_state *xas,
 retry:
        xas_lock_irq(xas);
        entry = get_unlocked_entry(xas);
-       if (xa_is_internal(entry))
-               goto fallback;
 
        if (entry) {
-               if (WARN_ON_ONCE(!xa_is_value(entry))) {
+               if (!xa_is_value(entry)) {
                        xas_set_err(xas, EIO);
                        goto out_unlock;
                }
@@ -747,7 +777,8 @@ static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
 
        i_mmap_lock_read(mapping);
        vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
-               unsigned long address, start, end;
+               struct mmu_notifier_range range;
+               unsigned long address;
 
                cond_resched();
 
@@ -761,7 +792,8 @@ static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
                 * call mmu_notifier_invalidate_range_start() on our behalf
                 * before taking any lock.
                 */
-               if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl))
+               if (follow_pte_pmd(vma->vm_mm, address, &range,
+                                  &ptep, &pmdp, &ptl))
                        continue;
 
                /*
@@ -803,7 +835,7 @@ unlock_pte:
                        pte_unmap_unlock(ptep, ptl);
                }
 
-               mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
+               mmu_notifier_invalidate_range_end(&range);
        }
        i_mmap_unlock_read(mapping);
 }
@@ -1628,8 +1660,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
        /* Did we race with someone splitting entry or so? */
        if (!entry ||
            (order == 0 && !dax_is_pte_entry(entry)) ||
-           (order == PMD_ORDER && (xa_is_internal(entry) ||
-                                   !dax_is_pmd_entry(entry)))) {
+           (order == PMD_ORDER && !dax_is_pmd_entry(entry))) {
                put_unlocked_entry(&xas, entry);
                xas_unlock_irq(&xas);
                trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,