Merge branch 'prep-for-5level'
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 10 Mar 2017 16:59:07 +0000 (08:59 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 10 Mar 2017 16:59:07 +0000 (08:59 -0800)
Merge 5-level page table prep from Kirill Shutemov:
 "Here's relatively low-risk part of 5-level paging patchset. Merging it
  now will make x86 5-level paging enabling in v4.12 easier.

  The first patch is actually x86-specific: detect 5-level paging
  support. It boils down to single define.

  The rest of patchset converts Linux MMU abstraction from 4- to 5-level
  paging.

  Enabling of new abstraction in most cases requires adding single line
  of code in arch-specific code. The rest is taken care by asm-generic/.

  Changes to mm/ code are mostly mechanical: add support for new page
  table level -- p4d_t -- where we deal with pud_t now.

  v2:
   - fix build on microblaze (Michal);
   - comment for __ARCH_HAS_5LEVEL_HACK in kasan_populate_zero_shadow();
   - acks from Michal"

* emailed patches from Kirill A Shutemov <kirill.shutemov@linux.intel.com>:
  mm: introduce __p4d_alloc()
  mm: convert generic code to 5-level paging
  asm-generic: introduce <asm-generic/pgtable-nop4d.h>
  arch, mm: convert all architectures to use 5level-fixup.h
  asm-generic: introduce __ARCH_USE_5LEVEL_HACK
  asm-generic: introduce 5level-fixup.h
  x86/cpufeature: Add 5-level paging detection

98 files changed:
Documentation/admin-guide/kernel-parameters.txt
Documentation/dev-tools/kcov.rst
Documentation/devicetree/bindings/regulator/ti-abb-regulator.txt
Documentation/vm/userfaultfd.txt
arch/cris/arch-v32/drivers/cryptocop.c
arch/powerpc/include/asm/book3s/64/pgtable.h
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/kvm/book3s_hv_rm_mmu.c
arch/powerpc/sysdev/axonram.c
arch/sh/boards/mach-cayman/setup.c
arch/x86/kernel/ftrace.c
arch/x86/mm/gup.c
arch/x86/pci/xen.c
block/blk-core.c
block/blk-mq-sysfs.c
block/blk-mq.c
block/blk-mq.h
block/genhd.c
block/sed-opal.c
drivers/block/paride/pcd.c
drivers/block/paride/pd.c
drivers/block/paride/pf.c
drivers/block/paride/pg.c
drivers/block/paride/pt.c
drivers/block/zram/zram_drv.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/crypto/ux500/cryp/cryp.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/rcar-du/rcar_du_vsp.c
drivers/hv/channel.c
drivers/isdn/hisax/st5481_b.c
drivers/md/bcache/util.h
drivers/media/dvb-frontends/drx39xyj/drx_driver.h
drivers/media/platform/vsp1/vsp1_drm.c
drivers/media/rc/lirc_dev.c
drivers/media/rc/nuvoton-cir.c
drivers/media/rc/rc-main.c
drivers/media/rc/serial_ir.c
drivers/media/usb/dvb-usb/dw2102.c
drivers/mtd/spi-nor/spi-nor.c
drivers/net/ethernet/qlogic/qlge/qlge.h
drivers/pci/dwc/pci-exynos.c
drivers/pci/pcie/aspm.c
drivers/pci/quirks.c
drivers/scsi/aic7xxx/aic79xx_core.c
drivers/scsi/sd.c
drivers/usb/gadget/legacy/inode.c
drivers/usb/host/xhci.c
drivers/xen/xenbus/xenbus_dev_frontend.c
fs/fat/inode.c
fs/iomap.c
fs/userfaultfd.c
fs/xfs/kmem.c
fs/xfs/kmem.h
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_bmap_btree.c
fs/xfs/xfs_aops.c
fs/xfs/xfs_icache.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_itable.c
fs/xfs/xfs_mount.c
fs/xfs/xfs_reflink.c
fs/xfs/xfs_reflink.h
fs/xfs/xfs_super.c
include/dt-bindings/sound/cs42l42.h
include/linux/blkdev.h
include/linux/fs.h
include/linux/genhd.h
include/linux/regulator/machine.h
include/linux/userfaultfd_k.h
include/linux/vm_event_item.h
include/media/vsp1.h
include/net/irda/timer.h
include/uapi/linux/userfaultfd.h
kernel/cgroup/cgroup.c
kernel/events/core.c
kernel/exit.c
kernel/sched/cpufreq_schedutil.c
kernel/trace/trace_stack.c
mm/backing-dev.c
mm/huge_memory.c
mm/kasan/quarantine.c
mm/madvise.c
mm/memblock.c
mm/memcontrol.c
mm/mlock.c
mm/rmap.c
mm/vmstat.c
scripts/gcc-plugins/sancov_plugin.c
scripts/spelling.txt
sound/soc/amd/acp-pcm-dma.c
tools/lguest/lguest.c
tools/lib/bpf/Makefile
tools/lib/traceevent/Makefile
tools/lib/traceevent/event-parse.h
tools/testing/selftests/vm/Makefile

index 9adcc803b9a54cbd51a63f024cc83cf5cf99a788..2ba45caabada3e88339e88ebdfaaedbb322085c8 100644 (file)
        cpuidle.off=1   [CPU_IDLE]
                        disable the cpuidle sub-system
 
+       cpufreq.off=1   [CPU_FREQ]
+                       disable the cpufreq sub-system
+
        cpu_init_udelay=N
                        [X86] Delay for N microsec between assert and de-assert
                        of APIC INIT to start processors.  This delay occurs
index 2c41b713841fd497a95b57054ed9d6998625b71e..44886c91e112d4d21a41e0c4d1a96f37a584aa68 100644 (file)
@@ -10,7 +10,7 @@ Note that kcov does not aim to collect as much coverage as possible. It aims
 to collect more or less stable coverage that is function of syscall inputs.
 To achieve this goal it does not collect coverage in soft/hard interrupts
 and instrumentation of some inherently non-deterministic parts of kernel is
-disbled (e.g. scheduler, locking).
+disabled (e.g. scheduler, locking).
 
 Usage
 -----
index c3f6546ebac777421b467b0008f7f78f06e8e5c4..6a23ad9ac53a4cabc85a6bc592a873f38c7c144b 100644 (file)
@@ -45,7 +45,7 @@ Required Properties:
 Optional Properties:
 - reg-names: In addition to the required properties, the following are optional
   - "efuse-address"    - Contains efuse base address used to pick up ABB info.
-  - "ldo-address"      - Contains address of ABB LDO overide register address.
+  - "ldo-address"      - Contains address of ABB LDO override register.
        "efuse-address" is required for this.
 - ti,ldovbb-vset-mask  - Required if ldo-address is set, mask for LDO override
        register to provide override vset value.
index 0e5543a920e5b2595f4d194462ba722ce25bd80d..bb2f945f87ab6a2e59f2e7503b7500c08c5427d9 100644 (file)
@@ -172,10 +172,6 @@ the same read(2) protocol as for the page fault notifications. The
 manager has to explicitly enable these events by setting appropriate
 bits in uffdio_api.features passed to UFFDIO_API ioctl:
 
-UFFD_FEATURE_EVENT_EXIT - enable notification about exit() of the
-non-cooperative process. When the monitored process exits, the uffd
-manager will get UFFD_EVENT_EXIT.
-
 UFFD_FEATURE_EVENT_FORK - enable userfaultfd hooks for fork(). When
 this feature is enabled, the userfaultfd context of the parent process
 is duplicated into the newly created process. The manager receives
index ae6903d7fdbe08c25a7fa23439d228b345d1a874..14970f11bbf2b60cc1d9e7ce26adffa1bf15f63e 100644 (file)
@@ -2086,7 +2086,7 @@ static void cryptocop_job_queue_close(void)
                dma_in_cfg.en = regk_dma_no;
                REG_WR(dma, IN_DMA_INST, rw_cfg, dma_in_cfg);
 
-               /* Disble the cryptocop. */
+               /* Disable the cryptocop. */
                rw_cfg = REG_RD(strcop, regi_strcop, rw_cfg);
                rw_cfg.en = 0;
                REG_WR(strcop, regi_strcop, rw_cfg, rw_cfg);
index 13c39b6d5d64cb2ed3511861782d2184ed74c0c2..8f4d41936e5a90986c679876f66bfd15339de102 100644 (file)
@@ -350,23 +350,58 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
        __r;                                                    \
 })
 
+static inline int __pte_write(pte_t pte)
+{
+       return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE));
+}
+
+#ifdef CONFIG_NUMA_BALANCING
+#define pte_savedwrite pte_savedwrite
+static inline bool pte_savedwrite(pte_t pte)
+{
+       /*
+        * Saved write ptes are prot none ptes that doesn't have
+        * privileged bit sit. We mark prot none as one which has
+        * present and pviliged bit set and RWX cleared. To mark
+        * protnone which used to have _PAGE_WRITE set we clear
+        * the privileged bit.
+        */
+       return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED));
+}
+#else
+#define pte_savedwrite pte_savedwrite
+static inline bool pte_savedwrite(pte_t pte)
+{
+       return false;
+}
+#endif
+
+static inline int pte_write(pte_t pte)
+{
+       return __pte_write(pte) || pte_savedwrite(pte);
+}
+
 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
                                      pte_t *ptep)
 {
-       if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0)
-               return;
-
-       pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
+       if (__pte_write(*ptep))
+               pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
+       else if (unlikely(pte_savedwrite(*ptep)))
+               pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 0);
 }
 
 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
                                           unsigned long addr, pte_t *ptep)
 {
-       if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0)
-               return;
-
-       pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
+       /*
+        * We should not find protnone for hugetlb, but this complete the
+        * interface.
+        */
+       if (__pte_write(*ptep))
+               pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
+       else if (unlikely(pte_savedwrite(*ptep)))
+               pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 1);
 }
 
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
@@ -400,11 +435,6 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
        pte_update(mm, addr, ptep, ~0UL, 0, 0);
 }
 
-static inline int pte_write(pte_t pte)
-{
-       return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE));
-}
-
 static inline int pte_dirty(pte_t pte)
 {
        return !!(pte_raw(pte) & cpu_to_be64(_PAGE_DIRTY));
@@ -468,19 +498,12 @@ static inline pte_t pte_clear_savedwrite(pte_t pte)
        VM_BUG_ON(!pte_protnone(pte));
        return __pte(pte_val(pte) | _PAGE_PRIVILEGED);
 }
-
-#define pte_savedwrite pte_savedwrite
-static inline bool pte_savedwrite(pte_t pte)
+#else
+#define pte_clear_savedwrite pte_clear_savedwrite
+static inline pte_t pte_clear_savedwrite(pte_t pte)
 {
-       /*
-        * Saved write ptes are prot none ptes that doesn't have
-        * privileged bit sit. We mark prot none as one which has
-        * present and pviliged bit set and RWX cleared. To mark
-        * protnone which used to have _PAGE_WRITE set we clear
-        * the privileged bit.
-        */
-       VM_BUG_ON(!pte_protnone(pte));
-       return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED));
+       VM_WARN_ON(1);
+       return __pte(pte_val(pte) & ~_PAGE_WRITE);
 }
 #endif /* CONFIG_NUMA_BALANCING */
 
@@ -509,6 +532,8 @@ static inline unsigned long pte_pfn(pte_t pte)
 /* Generic modifiers for PTE bits */
 static inline pte_t pte_wrprotect(pte_t pte)
 {
+       if (unlikely(pte_savedwrite(pte)))
+               return pte_clear_savedwrite(pte);
        return __pte(pte_val(pte) & ~_PAGE_WRITE);
 }
 
@@ -929,6 +954,7 @@ static inline int pmd_protnone(pmd_t pmd)
 
 #define __HAVE_ARCH_PMD_WRITE
 #define pmd_write(pmd)         pte_write(pmd_pte(pmd))
+#define __pmd_write(pmd)       __pte_write(pmd_pte(pmd))
 #define pmd_savedwrite(pmd)    pte_savedwrite(pmd_pte(pmd))
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -985,11 +1011,10 @@ static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
 static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
                                      pmd_t *pmdp)
 {
-
-       if ((pmd_raw(*pmdp) & cpu_to_be64(_PAGE_WRITE)) == 0)
-               return;
-
-       pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0);
+       if (__pmd_write((*pmdp)))
+               pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0);
+       else if (unlikely(pmd_savedwrite(*pmdp)))
+               pmd_hugepage_update(mm, addr, pmdp, 0, _PAGE_PRIVILEGED);
 }
 
 static inline int pmd_trans_huge(pmd_t pmd)
index f3158fb16de34b69acdb593ce39de7d88437e094..8c68145ba1bd35f4e86f0a3da729ee5387a7c781 100644 (file)
@@ -601,7 +601,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
                                                         hva, NULL, NULL);
                        if (ptep) {
                                pte = kvmppc_read_update_linux_pte(ptep, 1);
-                               if (pte_write(pte))
+                               if (__pte_write(pte))
                                        write_ok = 1;
                        }
                        local_irq_restore(flags);
index 6fca970373ee90eee718912c48d34a3ebab3ff37..ce6f2121fffe46857bf4b250c06ad3916ac24aeb 100644 (file)
@@ -256,7 +256,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
                }
                pte = kvmppc_read_update_linux_pte(ptep, writing);
                if (pte_present(pte) && !pte_protnone(pte)) {
-                       if (writing && !pte_write(pte))
+                       if (writing && !__pte_write(pte))
                                /* make the actual HPTE be read-only */
                                ptel = hpte_make_readonly(ptel);
                        is_ci = pte_ci(pte);
index ada29eaed6e280c08f6d3ee5671c58da9eb06e38..f523ac88315070873eede1c978312569d48953a7 100644 (file)
@@ -274,7 +274,9 @@ failed:
                        if (bank->disk->major > 0)
                                unregister_blkdev(bank->disk->major,
                                                bank->disk->disk_name);
-                       del_gendisk(bank->disk);
+                       if (bank->disk->flags & GENHD_FL_UP)
+                               del_gendisk(bank->disk);
+                       put_disk(bank->disk);
                }
                device->dev.platform_data = NULL;
                if (bank->io_addr != 0)
@@ -299,6 +301,7 @@ axon_ram_remove(struct platform_device *device)
        device_remove_file(&device->dev, &dev_attr_ecc);
        free_irq(bank->irq_id, device);
        del_gendisk(bank->disk);
+       put_disk(bank->disk);
        iounmap((void __iomem *) bank->io_addr);
        kfree(bank);
 
index 340fd40b381dc348b2bf8e86e48a2f8ee52efdaa..9c292c27e0d7114768a7bf8379df7be3fc157257 100644 (file)
@@ -128,7 +128,6 @@ static int __init smsc_superio_setup(void)
        SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_PRIMARY_INT_INDEX);
        SMSC_SUPERIO_WRITE_INDEXED(12, SMSC_SECONDARY_INT_INDEX);
 
-#ifdef CONFIG_IDE
        /*
         * Only IDE1 exists on the Cayman
         */
@@ -158,7 +157,6 @@ static int __init smsc_superio_setup(void)
        SMSC_SUPERIO_WRITE_INDEXED(0x01, 0xc5); /* GP45 = IDE1_IRQ */
        SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc6); /* GP46 = nIOROP */
        SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc7); /* GP47 = nIOWOP */
-#endif
 
        /* Exit the configuration state */
        outb(SMSC_EXIT_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR);
index 8639bb2ae05868ab65d88e44683f44c8651121f3..8f3d9cf26ff9f70ce6b93af12c3a0ff65785d5fa 100644 (file)
@@ -535,7 +535,7 @@ static void run_sync(void)
 {
        int enable_irqs = irqs_disabled();
 
-       /* We may be called with interrupts disbled (on bootup). */
+       /* We may be called with interrupts disabled (on bootup). */
        if (enable_irqs)
                local_irq_enable();
        on_each_cpu(do_sync_core, NULL, 1);
index 99c7805a96937c17fffa7b92eb72a8b8c776ccbb..1f3b6ef105cda5732146fa6121c35f75ada9c0f5 100644 (file)
@@ -106,32 +106,35 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
                unsigned long end, int write, struct page **pages, int *nr)
 {
        struct dev_pagemap *pgmap = NULL;
-       int nr_start = *nr;
-       pte_t *ptep;
+       int nr_start = *nr, ret = 0;
+       pte_t *ptep, *ptem;
 
-       ptep = pte_offset_map(&pmd, addr);
+       /*
+        * Keep the original mapped PTE value (ptem) around since we
+        * might increment ptep off the end of the page when finishing
+        * our loop iteration.
+        */
+       ptem = ptep = pte_offset_map(&pmd, addr);
        do {
                pte_t pte = gup_get_pte(ptep);
                struct page *page;
 
                /* Similar to the PMD case, NUMA hinting must take slow path */
-               if (pte_protnone(pte)) {
-                       pte_unmap(ptep);
-                       return 0;
-               }
+               if (pte_protnone(pte))
+                       break;
+
+               if (!pte_allows_gup(pte_val(pte), write))
+                       break;
 
                if (pte_devmap(pte)) {
                        pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
                        if (unlikely(!pgmap)) {
                                undo_dev_pagemap(nr, nr_start, pages);
-                               pte_unmap(ptep);
-                               return 0;
+                               break;
                        }
-               } else if (!pte_allows_gup(pte_val(pte), write) ||
-                          pte_special(pte)) {
-                       pte_unmap(ptep);
-                       return 0;
-               }
+               } else if (pte_special(pte))
+                       break;
+
                VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
                page = pte_page(pte);
                get_page(page);
@@ -141,9 +144,11 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
                (*nr)++;
 
        } while (ptep++, addr += PAGE_SIZE, addr != end);
-       pte_unmap(ptep - 1);
+       if (addr == end)
+               ret = 1;
+       pte_unmap(ptem);
 
-       return 1;
+       return ret;
 }
 
 static inline void get_head_page_multiple(struct page *page, int nr)
index e1fb269c87af7b39f1445e01734e76f431982f58..292ab0364a89af9aa6bc93a2ad79a88d00fbad9d 100644 (file)
@@ -234,23 +234,14 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
                return 1;
 
        for_each_pci_msi_entry(msidesc, dev) {
-               __pci_read_msi_msg(msidesc, &msg);
-               pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) |
-                       ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff);
-               if (msg.data != XEN_PIRQ_MSI_DATA ||
-                   xen_irq_from_pirq(pirq) < 0) {
-                       pirq = xen_allocate_pirq_msi(dev, msidesc);
-                       if (pirq < 0) {
-                               irq = -ENODEV;
-                               goto error;
-                       }
-                       xen_msi_compose_msg(dev, pirq, &msg);
-                       __pci_write_msi_msg(msidesc, &msg);
-                       dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
-               } else {
-                       dev_dbg(&dev->dev,
-                               "xen: msi already bound to pirq=%d\n", pirq);
+               pirq = xen_allocate_pirq_msi(dev, msidesc);
+               if (pirq < 0) {
+                       irq = -ENODEV;
+                       goto error;
                }
+               xen_msi_compose_msg(dev, pirq, &msg);
+               __pci_write_msi_msg(msidesc, &msg);
+               dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
                irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq,
                                               (type == PCI_CAP_ID_MSI) ? nvec : 1,
                                               (type == PCI_CAP_ID_MSIX) ?
index 1086dac8724c995b85bf3c843a4aae080ab938a1..0eeb99ef654f4ad6874cf579883a263c9894ca31 100644 (file)
@@ -578,8 +578,6 @@ void blk_cleanup_queue(struct request_queue *q)
                q->queue_lock = &q->__queue_lock;
        spin_unlock_irq(lock);
 
-       put_disk_devt(q->disk_devt);
-
        /* @q is and will stay empty, shutdown and put */
        blk_put_queue(q);
 }
@@ -2017,17 +2015,34 @@ blk_qc_t generic_make_request(struct bio *bio)
                struct request_queue *q = bdev_get_queue(bio->bi_bdev);
 
                if (likely(blk_queue_enter(q, false) == 0)) {
+                       struct bio_list hold;
+                       struct bio_list lower, same;
+
+                       /* Create a fresh bio_list for all subordinate requests */
+                       hold = bio_list_on_stack;
+                       bio_list_init(&bio_list_on_stack);
                        ret = q->make_request_fn(q, bio);
 
                        blk_queue_exit(q);
 
-                       bio = bio_list_pop(current->bio_list);
+                       /* sort new bios into those for a lower level
+                        * and those for the same level
+                        */
+                       bio_list_init(&lower);
+                       bio_list_init(&same);
+                       while ((bio = bio_list_pop(&bio_list_on_stack)) != NULL)
+                               if (q == bdev_get_queue(bio->bi_bdev))
+                                       bio_list_add(&same, bio);
+                               else
+                                       bio_list_add(&lower, bio);
+                       /* now assemble so we handle the lowest level first */
+                       bio_list_merge(&bio_list_on_stack, &lower);
+                       bio_list_merge(&bio_list_on_stack, &same);
+                       bio_list_merge(&bio_list_on_stack, &hold);
                } else {
-                       struct bio *bio_next = bio_list_pop(current->bio_list);
-
                        bio_io_error(bio);
-                       bio = bio_next;
                }
+               bio = bio_list_pop(current->bio_list);
        } while (bio);
        current->bio_list = NULL; /* deactivate */
 
index 295e69670c39343d058cbf7f67fc076f7249e94e..d745ab81033afa8510f6b2d8dbbbdc187693901a 100644 (file)
@@ -17,6 +17,15 @@ static void blk_mq_sysfs_release(struct kobject *kobj)
 {
 }
 
+static void blk_mq_hw_sysfs_release(struct kobject *kobj)
+{
+       struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
+                                                 kobj);
+       free_cpumask_var(hctx->cpumask);
+       kfree(hctx->ctxs);
+       kfree(hctx);
+}
+
 struct blk_mq_ctx_sysfs_entry {
        struct attribute attr;
        ssize_t (*show)(struct blk_mq_ctx *, char *);
@@ -200,7 +209,7 @@ static struct kobj_type blk_mq_ctx_ktype = {
 static struct kobj_type blk_mq_hw_ktype = {
        .sysfs_ops      = &blk_mq_hw_sysfs_ops,
        .default_attrs  = default_hw_ctx_attrs,
-       .release        = blk_mq_sysfs_release,
+       .release        = blk_mq_hw_sysfs_release,
 };
 
 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
@@ -242,24 +251,15 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
 static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
 {
        struct blk_mq_hw_ctx *hctx;
-       struct blk_mq_ctx *ctx;
-       int i, j;
+       int i;
 
-       queue_for_each_hw_ctx(q, hctx, i) {
+       queue_for_each_hw_ctx(q, hctx, i)
                blk_mq_unregister_hctx(hctx);
 
-               hctx_for_each_ctx(hctx, ctx, j)
-                       kobject_put(&ctx->kobj);
-
-               kobject_put(&hctx->kobj);
-       }
-
        blk_mq_debugfs_unregister_hctxs(q);
 
        kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
        kobject_del(&q->mq_kobj);
-       kobject_put(&q->mq_kobj);
-
        kobject_put(&dev->kobj);
 
        q->mq_sysfs_init_done = false;
@@ -277,7 +277,19 @@ void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
        kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
 }
 
-static void blk_mq_sysfs_init(struct request_queue *q)
+void blk_mq_sysfs_deinit(struct request_queue *q)
+{
+       struct blk_mq_ctx *ctx;
+       int cpu;
+
+       for_each_possible_cpu(cpu) {
+               ctx = per_cpu_ptr(q->queue_ctx, cpu);
+               kobject_put(&ctx->kobj);
+       }
+       kobject_put(&q->mq_kobj);
+}
+
+void blk_mq_sysfs_init(struct request_queue *q)
 {
        struct blk_mq_ctx *ctx;
        int cpu;
@@ -297,8 +309,6 @@ int blk_mq_register_dev(struct device *dev, struct request_queue *q)
 
        blk_mq_disable_hotplug();
 
-       blk_mq_sysfs_init(q);
-
        ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
        if (ret < 0)
                goto out;
index b2fd175e84d79af071b28768e74de06cb673407b..159187a28d66521b4ab0109d3db38e6225ac71b3 100644 (file)
@@ -1955,16 +1955,6 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
        }
 }
 
-static void blk_mq_free_hw_queues(struct request_queue *q,
-               struct blk_mq_tag_set *set)
-{
-       struct blk_mq_hw_ctx *hctx;
-       unsigned int i;
-
-       queue_for_each_hw_ctx(q, hctx, i)
-               free_cpumask_var(hctx->cpumask);
-}
-
 static int blk_mq_init_hctx(struct request_queue *q,
                struct blk_mq_tag_set *set,
                struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
@@ -2045,7 +2035,6 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
                struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
                struct blk_mq_hw_ctx *hctx;
 
-               memset(__ctx, 0, sizeof(*__ctx));
                __ctx->cpu = i;
                spin_lock_init(&__ctx->lock);
                INIT_LIST_HEAD(&__ctx->rq_list);
@@ -2257,15 +2246,19 @@ void blk_mq_release(struct request_queue *q)
        queue_for_each_hw_ctx(q, hctx, i) {
                if (!hctx)
                        continue;
-               kfree(hctx->ctxs);
-               kfree(hctx);
+               kobject_put(&hctx->kobj);
        }
 
        q->mq_map = NULL;
 
        kfree(q->queue_hw_ctx);
 
-       /* ctx kobj stays in queue_ctx */
+       /*
+        * release .mq_kobj and sw queue's kobject now because
+        * both share lifetime with request queue.
+        */
+       blk_mq_sysfs_deinit(q);
+
        free_percpu(q->queue_ctx);
 }
 
@@ -2330,10 +2323,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
                        if (hctx->tags)
                                blk_mq_free_map_and_requests(set, j);
                        blk_mq_exit_hctx(q, set, hctx, j);
-                       free_cpumask_var(hctx->cpumask);
                        kobject_put(&hctx->kobj);
-                       kfree(hctx->ctxs);
-                       kfree(hctx);
                        hctxs[j] = NULL;
 
                }
@@ -2352,6 +2342,9 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
        if (!q->queue_ctx)
                goto err_exit;
 
+       /* init q->mq_kobj and sw queues' kobjects */
+       blk_mq_sysfs_init(q);
+
        q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
                                                GFP_KERNEL, set->numa_node);
        if (!q->queue_hw_ctx)
@@ -2442,7 +2435,6 @@ void blk_mq_free_queue(struct request_queue *q)
        blk_mq_del_queue_tag_set(q);
 
        blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
-       blk_mq_free_hw_queues(q, set);
 }
 
 /* Basically redo blk_mq_init_queue with queue frozen */
index 088ced003c13d7282712b423ade0521c16aeebdc..b79f9a7d8cf62010dd9a91d3b271e5d2474cb836 100644 (file)
@@ -77,6 +77,8 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
 /*
  * sysfs helpers
  */
+extern void blk_mq_sysfs_init(struct request_queue *q);
+extern void blk_mq_sysfs_deinit(struct request_queue *q);
 extern int blk_mq_sysfs_register(struct request_queue *q);
 extern void blk_mq_sysfs_unregister(struct request_queue *q);
 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
index b26a5ea115d00b51f20c8e59f09a420b2dbadc9f..a9c516a8b37dbceca9f46a74b7ccb7b4df35639c 100644 (file)
@@ -572,20 +572,6 @@ exit:
        disk_part_iter_exit(&piter);
 }
 
-void put_disk_devt(struct disk_devt *disk_devt)
-{
-       if (disk_devt && atomic_dec_and_test(&disk_devt->count))
-               disk_devt->release(disk_devt);
-}
-EXPORT_SYMBOL(put_disk_devt);
-
-void get_disk_devt(struct disk_devt *disk_devt)
-{
-       if (disk_devt)
-               atomic_inc(&disk_devt->count);
-}
-EXPORT_SYMBOL(get_disk_devt);
-
 /**
  * device_add_disk - add partitioning information to kernel list
  * @parent: parent device for the disk
@@ -626,13 +612,6 @@ void device_add_disk(struct device *parent, struct gendisk *disk)
 
        disk_alloc_events(disk);
 
-       /*
-        * Take a reference on the devt and assign it to queue since it
-        * must not be reallocated while the bdi is registered
-        */
-       disk->queue->disk_devt = disk->disk_devt;
-       get_disk_devt(disk->disk_devt);
-
        /* Register BDI before referencing it from bdev */
        bdi = disk->queue->backing_dev_info;
        bdi_register_owner(bdi, disk_to_dev(disk));
@@ -681,12 +660,16 @@ void del_gendisk(struct gendisk *disk)
        disk->flags &= ~GENHD_FL_UP;
 
        sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
-       /*
-        * Unregister bdi before releasing device numbers (as they can get
-        * reused and we'd get clashes in sysfs).
-        */
-       bdi_unregister(disk->queue->backing_dev_info);
-       blk_unregister_queue(disk);
+       if (disk->queue) {
+               /*
+                * Unregister bdi before releasing device numbers (as they can
+                * get reused and we'd get clashes in sysfs).
+                */
+               bdi_unregister(disk->queue->backing_dev_info);
+               blk_unregister_queue(disk);
+       } else {
+               WARN_ON(1);
+       }
        blk_unregister_region(disk_devt(disk), disk->minors);
 
        part_stat_set_all(&disk->part0, 0);
index 1e18dca360fc501033762d4c505c2e32c4674ee6..14035f826b5e350dbec1710d60aca560f2c1066b 100644 (file)
@@ -1023,7 +1023,6 @@ static int finalize_and_send(struct opal_dev *dev, cont_fn cont)
 
 static int gen_key(struct opal_dev *dev, void *data)
 {
-       const u8 *method;
        u8 uid[OPAL_UID_LENGTH];
        int err = 0;
 
@@ -1031,7 +1030,6 @@ static int gen_key(struct opal_dev *dev, void *data)
        set_comid(dev, dev->comid);
 
        memcpy(uid, dev->prev_data, min(sizeof(uid), dev->prev_d_len));
-       method = opalmethod[OPAL_GENKEY];
        kfree(dev->prev_data);
        dev->prev_data = NULL;
 
@@ -1669,7 +1667,6 @@ static int add_user_to_lr(struct opal_dev *dev, void *data)
 static int lock_unlock_locking_range(struct opal_dev *dev, void *data)
 {
        u8 lr_buffer[OPAL_UID_LENGTH];
-       const u8 *method;
        struct opal_lock_unlock *lkul = data;
        u8 read_locked = 1, write_locked = 1;
        int err = 0;
@@ -1677,7 +1674,6 @@ static int lock_unlock_locking_range(struct opal_dev *dev, void *data)
        clear_opal_cmd(dev);
        set_comid(dev, dev->comid);
 
-       method = opalmethod[OPAL_SET];
        if (build_locking_range(lr_buffer, sizeof(lr_buffer),
                                lkul->session.opal_key.lr) < 0)
                return -ERANGE;
@@ -1733,14 +1729,12 @@ static int lock_unlock_locking_range_sum(struct opal_dev *dev, void *data)
 {
        u8 lr_buffer[OPAL_UID_LENGTH];
        u8 read_locked = 1, write_locked = 1;
-       const u8 *method;
        struct opal_lock_unlock *lkul = data;
        int ret;
 
        clear_opal_cmd(dev);
        set_comid(dev, dev->comid);
 
-       method = opalmethod[OPAL_SET];
        if (build_locking_range(lr_buffer, sizeof(lr_buffer),
                                lkul->session.opal_key.lr) < 0)
                return -ERANGE;
@@ -2133,7 +2127,7 @@ static int opal_add_user_to_lr(struct opal_dev *dev,
                pr_err("Locking state was not RO or RW\n");
                return -EINVAL;
        }
-       if (lk_unlk->session.who < OPAL_USER1 &&
+       if (lk_unlk->session.who < OPAL_USER1 ||
            lk_unlk->session.who > OPAL_USER9) {
                pr_err("Authority was not within the range of users: %d\n",
                       lk_unlk->session.who);
@@ -2316,7 +2310,7 @@ static int opal_activate_user(struct opal_dev *dev,
        int ret;
 
        /* We can't activate Admin1 it's active as manufactured */
-       if (opal_session->who < OPAL_USER1 &&
+       if (opal_session->who < OPAL_USER1 ||
            opal_session->who > OPAL_USER9) {
                pr_err("Who was not a valid user: %d\n", opal_session->who);
                return -EINVAL;
index 10aed84244f51854305ff7e0c59277731ab826f2..939641d6e2625e80babab415331c1ac187d88251 100644 (file)
@@ -50,7 +50,7 @@
                         the slower the port i/o.  In some cases, setting
                         this to zero will speed up the device. (default -1)
                         
-            major       You may use this parameter to overide the
+            major       You may use this parameter to override the
                         default major number (46) that this driver
                         will use.  Be sure to change the device
                         name as well.
index 644ba0888bd41bb5e54f4ab58345b6af9519e0c2..9cfd2e06a64917a99a3f70da9753cbc8bd27401c 100644 (file)
@@ -61,7 +61,7 @@
                         first drive found.
                        
 
-            major       You may use this parameter to overide the
+            major       You may use this parameter to override the
                         default major number (45) that this driver
                         will use.  Be sure to change the device
                         name as well.
index ed93e8badf5684d513ef78a8c03f74ccc4531ecd..14c5d32f5d8bc067532ba6ea95070d5c2a76db73 100644 (file)
@@ -59,7 +59,7 @@
                         the slower the port i/o.  In some cases, setting
                         this to zero will speed up the device. (default -1)
 
-           major       You may use this parameter to overide the
+           major       You may use this parameter to override the
                        default major number (47) that this driver
                        will use.  Be sure to change the device
                        name as well.
index 5db955fe3a949018e353ebaa1b98a4a1f17b86ef..3b5882bfb7364e33ab3f7b8355219ee2c977c4fa 100644 (file)
@@ -84,7 +84,7 @@
                        the slower the port i/o.  In some cases, setting
                        this to zero will speed up the device. (default -1)
 
-           major       You may use this parameter to overide the
+           major       You may use this parameter to override the
                        default major number (97) that this driver
                        will use.  Be sure to change the device
                        name as well.
index 61fc6824299ac13c762e84dde6cae8baf8411e37..e815312a00add6b96651f2a956dc84d14d90adc7 100644 (file)
@@ -61,7 +61,7 @@
                         the slower the port i/o.  In some cases, setting
                         this to zero will speed up the device. (default -1)
 
-           major       You may use this parameter to overide the
+           major       You may use this parameter to override the
                        default major number (96) that this driver
                        will use.  Be sure to change the device
                        name as well.
index e27d89a36c34170d1c894b60f43ab3903a5fbf70..dceb5edd1e5455f4c1b101e8ad3ce4dba46ac22f 100644 (file)
@@ -1189,6 +1189,8 @@ static int zram_add(void)
        blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
        blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
        zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
+       zram->disk->queue->limits.max_sectors = SECTORS_PER_PAGE;
+       zram->disk->queue->limits.chunk_sectors = 0;
        blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
        /*
         * zram_bio_discard() will clear all logical blocks if logical block
index a475432818642fee4547699011ba4cf5aa619f3a..38b9fdf854a49a7e4ba9950e365904d18b64caf5 100644 (file)
@@ -2532,4 +2532,5 @@ static int __init cpufreq_core_init(void)
 
        return 0;
 }
+module_param(off, int, 0444);
 core_initcall(cpufreq_core_init);
index b1fbaa30ae0415c330b9b1069e17900b99a48868..3d37219a0dd7afc3108b017f1d2960868efb7903 100644 (file)
@@ -377,6 +377,7 @@ static void intel_pstate_set_performance_limits(struct perf_limits *limits)
        intel_pstate_init_limits(limits);
        limits->min_perf_pct = 100;
        limits->min_perf = int_ext_tofp(1);
+       limits->min_sysfs_pct = 100;
 }
 
 static DEFINE_MUTEX(intel_pstate_driver_lock);
@@ -968,11 +969,20 @@ static int intel_pstate_resume(struct cpufreq_policy *policy)
 }
 
 static void intel_pstate_update_policies(void)
+       __releases(&intel_pstate_limits_lock)
+       __acquires(&intel_pstate_limits_lock)
 {
+       struct perf_limits *saved_limits = limits;
        int cpu;
 
+       mutex_unlock(&intel_pstate_limits_lock);
+
        for_each_possible_cpu(cpu)
                cpufreq_update_policy(cpu);
+
+       mutex_lock(&intel_pstate_limits_lock);
+
+       limits = saved_limits;
 }
 
 /************************** debugfs begin ************************/
@@ -1180,10 +1190,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
 
        limits->no_turbo = clamp_t(int, input, 0, 1);
 
-       mutex_unlock(&intel_pstate_limits_lock);
-
        intel_pstate_update_policies();
 
+       mutex_unlock(&intel_pstate_limits_lock);
+
        mutex_unlock(&intel_pstate_driver_lock);
 
        return count;
@@ -1217,10 +1227,10 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
                                   limits->max_perf_pct);
        limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
 
-       mutex_unlock(&intel_pstate_limits_lock);
-
        intel_pstate_update_policies();
 
+       mutex_unlock(&intel_pstate_limits_lock);
+
        mutex_unlock(&intel_pstate_driver_lock);
 
        return count;
@@ -1254,10 +1264,10 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
                                   limits->min_perf_pct);
        limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
 
-       mutex_unlock(&intel_pstate_limits_lock);
-
        intel_pstate_update_policies();
 
+       mutex_unlock(&intel_pstate_limits_lock);
+
        mutex_unlock(&intel_pstate_driver_lock);
 
        return count;
@@ -1874,13 +1884,11 @@ static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
 
        intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
        pstate = clamp_t(int, pstate, min_perf, max_perf);
-       trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
        return pstate;
 }
 
 static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
 {
-       pstate = intel_pstate_prepare_request(cpu, pstate);
        if (pstate == cpu->pstate.current_pstate)
                return;
 
@@ -1900,6 +1908,8 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
 
        update_turbo_state();
 
+       target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
+       trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu);
        intel_pstate_update_pstate(cpu, target_pstate);
 
        sample = &cpu->sample;
@@ -2132,16 +2142,11 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
        mutex_lock(&intel_pstate_limits_lock);
 
        if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
+               pr_debug("set performance\n");
                if (!perf_limits) {
                        limits = &performance_limits;
                        perf_limits = limits;
                }
-               if (policy->max >= policy->cpuinfo.max_freq &&
-                   !limits->no_turbo) {
-                       pr_debug("set performance\n");
-                       intel_pstate_set_performance_limits(perf_limits);
-                       goto out;
-               }
        } else {
                pr_debug("set powersave\n");
                if (!perf_limits) {
@@ -2152,7 +2157,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
        }
 
        intel_pstate_update_perf_limits(policy, perf_limits);
- out:
+
        if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
                /*
                 * NOHZ_FULL CPUs need this as the governor callback may not
@@ -2198,9 +2203,9 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
                unsigned int max_freq, min_freq;
 
                max_freq = policy->cpuinfo.max_freq *
-                                               limits->max_sysfs_pct / 100;
+                                       perf_limits->max_sysfs_pct / 100;
                min_freq = policy->cpuinfo.max_freq *
-                                               limits->min_sysfs_pct / 100;
+                                       perf_limits->min_sysfs_pct / 100;
                cpufreq_verify_within_limits(policy, min_freq, max_freq);
        }
 
@@ -2243,13 +2248,8 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
 
        cpu = all_cpu_data[policy->cpu];
 
-       /*
-        * We need sane value in the cpu->perf_limits, so inherit from global
-        * perf_limits limits, which are seeded with values based on the
-        * CONFIG_CPU_FREQ_DEFAULT_GOV_*, during boot up.
-        */
        if (per_cpu_limits)
-               memcpy(cpu->perf_limits, limits, sizeof(struct perf_limits));
+               intel_pstate_init_limits(cpu->perf_limits);
 
        policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
        policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
@@ -2301,7 +2301,6 @@ static struct cpufreq_driver intel_pstate = {
 static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
 {
        struct cpudata *cpu = all_cpu_data[policy->cpu];
-       struct perf_limits *perf_limits = limits;
 
        update_turbo_state();
        policy->cpuinfo.max_freq = limits->turbo_disabled ?
@@ -2309,15 +2308,6 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
 
        cpufreq_verify_within_cpu_limits(policy);
 
-       if (per_cpu_limits)
-               perf_limits = cpu->perf_limits;
-
-       mutex_lock(&intel_pstate_limits_lock);
-
-       intel_pstate_update_perf_limits(policy, perf_limits);
-
-       mutex_unlock(&intel_pstate_limits_lock);
-
        return 0;
 }
 
@@ -2370,6 +2360,7 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
                wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL,
                              pstate_funcs.get_val(cpu, target_pstate));
        }
+       freqs.new = target_pstate * cpu->pstate.scaling;
        cpufreq_freq_transition_end(policy, &freqs, false);
 
        return 0;
@@ -2383,8 +2374,9 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
 
        target_freq = intel_cpufreq_turbo_update(cpu, policy, target_freq);
        target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
+       target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
        intel_pstate_update_pstate(cpu, target_pstate);
-       return target_freq;
+       return target_pstate * cpu->pstate.scaling;
 }
 
 static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
@@ -2437,8 +2429,11 @@ static int intel_pstate_register_driver(void)
 
        intel_pstate_init_limits(&powersave_limits);
        intel_pstate_set_performance_limits(&performance_limits);
-       limits = IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE) ?
-                       &performance_limits : &powersave_limits;
+       if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE) &&
+           intel_pstate_driver == &intel_pstate)
+               limits = &performance_limits;
+       else
+               limits = &powersave_limits;
 
        ret = cpufreq_register_driver(intel_pstate_driver);
        if (ret) {
index 43a0c8a26ab0c56c25b56f425d9ce8192780e54b..00a16ab601cb07d4b525a89a8ca6bf3a5393c94b 100644 (file)
@@ -82,7 +82,7 @@ void cryp_activity(struct cryp_device_data *device_data,
 void cryp_flush_inoutfifo(struct cryp_device_data *device_data)
 {
        /*
-        * We always need to disble the hardware before trying to flush the
+        * We always need to disable the hardware before trying to flush the
         * FIFO. This is something that isn't written in the design
         * specification, but we have been informed by the hardware designers
         * that this must be done.
index 31375bdde6f1769ec674082141c77fb7383e302a..011800f621c6ce5574f740e85188aec215e1f2e5 100644 (file)
@@ -788,7 +788,7 @@ static int sdma_v3_0_start(struct amdgpu_device *adev)
                }
        }
 
-       /* disble sdma engine before programing it */
+       /* disable sdma engine before programing it */
        sdma_v3_0_ctx_switch_enable(adev, false);
        sdma_v3_0_enable(adev, false);
 
index b5bfbe50bd87167a7b28f528a74a034b1f68a738..b0ff304ce3dc4a9ac18f359a73498efede18cbd4 100644 (file)
@@ -32,6 +32,10 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
 {
        const struct drm_display_mode *mode = &crtc->crtc.state->adjusted_mode;
        struct rcar_du_device *rcdu = crtc->group->dev;
+       struct vsp1_du_lif_config cfg = {
+               .width = mode->hdisplay,
+               .height = mode->vdisplay,
+       };
        struct rcar_du_plane_state state = {
                .state = {
                        .crtc = &crtc->crtc,
@@ -66,12 +70,12 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
         */
        crtc->group->need_restart = true;
 
-       vsp1_du_setup_lif(crtc->vsp->vsp, mode->hdisplay, mode->vdisplay);
+       vsp1_du_setup_lif(crtc->vsp->vsp, &cfg);
 }
 
 void rcar_du_vsp_disable(struct rcar_du_crtc *crtc)
 {
-       vsp1_du_setup_lif(crtc->vsp->vsp, 0, 0);
+       vsp1_du_setup_lif(crtc->vsp->vsp, NULL);
 }
 
 void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc)
index 81a80c82f1bd2b6a55df393a3df55376d709adfd..bd0d1988feb2ad85f94faaebb939167119c1cca0 100644 (file)
@@ -543,7 +543,7 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
        /*
         * In case a device driver's probe() fails (e.g.,
         * util_probe() -> vmbus_open() returns -ENOMEM) and the device is
-        * rescinded later (e.g., we dynamically disble an Integrated Service
+        * rescinded later (e.g., we dynamically disable an Integrated Service
         * in Hyper-V Manager), the driver's remove() invokes vmbus_close():
         * here we should skip most of the below cleanup work.
         */
index 409849165838fba631eeb74983c7fc7e8e68b152..f64a36007800cf91132b015773a78436cf488227 100644 (file)
@@ -239,7 +239,7 @@ static void st5481B_mode(struct st5481_bcs *bcs, int mode)
                        }
                }
        } else {
-               // Disble B channel interrupts
+               // Disable B channel interrupts
                st5481_usb_device_ctrl_msg(adapter, FFMSK_B1+(bcs->channel * 2), 0, NULL, NULL);
 
                // Disable B channel FIFOs
index a126919ed102763e9d86da2a9ce615ff0b8a2001..5d13930f0f22fc42e40228cee8eee1ecb7cfa8e5 100644 (file)
@@ -4,7 +4,6 @@
 
 #include <linux/blkdev.h>
 #include <linux/errno.h>
-#include <linux/blkdev.h>
 #include <linux/kernel.h>
 #include <linux/sched/clock.h>
 #include <linux/llist.h>
index 7a681d8202c7ee9e9eed6dd2dbb4bf118d8f9508..4442e478db72a2420207efc2deca49d56c92c30c 100644 (file)
@@ -256,8 +256,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
 *
 * The actual DAP implementation may be restricted to only one of the modes.
 * A compiler warning or error will be generated if the DAP implementation
-* overides or cannot handle the mode defined below.
-*
+* overrides or cannot handle the mode defined below.
 */
 #ifndef DRXDAP_SINGLE_MASTER
 #define DRXDAP_SINGLE_MASTER 1
@@ -272,7 +271,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
 *
 * This maximum size may be restricted by the actual DAP implementation.
 * A compiler warning or error will be generated if the DAP implementation
-* overides or cannot handle the chunksize defined below.
+* overrides or cannot handle the chunksize defined below.
 *
 * Beware that the DAP uses  DRXDAP_MAX_WCHUNKSIZE to create a temporary data
 * buffer. Do not undefine or choose too large, unless your system is able to
@@ -292,8 +291,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
 *
 * This maximum size may be restricted by the actual DAP implementation.
 * A compiler warning or error will be generated if the DAP implementation
-* overides or cannot handle the chunksize defined below.
-*
+* overrides or cannot handle the chunksize defined below.
 */
 #ifndef DRXDAP_MAX_RCHUNKSIZE
 #define  DRXDAP_MAX_RCHUNKSIZE 60
index b4b583f7137a54eb86f8592724603b296afb9347..b4c0f10fc3b0f12eb9f114ac063ac5b0a85ecb1a 100644 (file)
@@ -54,12 +54,11 @@ EXPORT_SYMBOL_GPL(vsp1_du_init);
 /**
  * vsp1_du_setup_lif - Setup the output part of the VSP pipeline
  * @dev: the VSP device
- * @width: output frame width in pixels
- * @height: output frame height in pixels
+ * @cfg: the LIF configuration
  *
- * Configure the output part of VSP DRM pipeline for the given frame @width and
- * @height. This sets up formats on the BRU source pad, the WPF0 sink and source
- * pads, and the LIF sink pad.
+ * Configure the output part of VSP DRM pipeline for the given frame @cfg.width
+ * and @cfg.height. This sets up formats on the BRU source pad, the WPF0 sink
+ * and source pads, and the LIF sink pad.
  *
  * As the media bus code on the BRU source pad is conditioned by the
  * configuration of the BRU sink 0 pad, we also set up the formats on all BRU
@@ -69,8 +68,7 @@ EXPORT_SYMBOL_GPL(vsp1_du_init);
  *
  * Return 0 on success or a negative error code on failure.
  */
-int vsp1_du_setup_lif(struct device *dev, unsigned int width,
-                     unsigned int height)
+int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg)
 {
        struct vsp1_device *vsp1 = dev_get_drvdata(dev);
        struct vsp1_pipeline *pipe = &vsp1->drm->pipe;
@@ -79,11 +77,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width,
        unsigned int i;
        int ret;
 
-       dev_dbg(vsp1->dev, "%s: configuring LIF with format %ux%u\n",
-               __func__, width, height);
-
-       if (width == 0 || height == 0) {
-               /* Zero width or height means the CRTC is being disabled, stop
+       if (!cfg) {
+               /* NULL configuration means the CRTC is being disabled, stop
                 * the pipeline and turn the light off.
                 */
                ret = vsp1_pipeline_stop(pipe);
@@ -108,6 +103,9 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width,
                return 0;
        }
 
+       dev_dbg(vsp1->dev, "%s: configuring LIF with format %ux%u\n",
+               __func__, cfg->width, cfg->height);
+
        /* Configure the format at the BRU sinks and propagate it through the
         * pipeline.
         */
@@ -117,8 +115,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width,
        for (i = 0; i < bru->entity.source_pad; ++i) {
                format.pad = i;
 
-               format.format.width = width;
-               format.format.height = height;
+               format.format.width = cfg->width;
+               format.format.height = cfg->height;
                format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32;
                format.format.field = V4L2_FIELD_NONE;
 
@@ -133,8 +131,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width,
        }
 
        format.pad = bru->entity.source_pad;
-       format.format.width = width;
-       format.format.height = height;
+       format.format.width = cfg->width;
+       format.format.height = cfg->height;
        format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32;
        format.format.field = V4L2_FIELD_NONE;
 
@@ -180,7 +178,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width,
        /* Verify that the format at the output of the pipeline matches the
         * requested frame size and media bus code.
         */
-       if (format.format.width != width || format.format.height != height ||
+       if (format.format.width != cfg->width ||
+           format.format.height != cfg->height ||
            format.format.code != MEDIA_BUS_FMT_ARGB8888_1X32) {
                dev_dbg(vsp1->dev, "%s: format mismatch\n", __func__);
                return -EPIPE;
index 393dccaabdd02ac83744faf049ef1a08675fe7ae..1688893a65bb57d2d2ff0d667f82d27fbd88dc37 100644 (file)
@@ -436,6 +436,8 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
                return -ERESTARTSYS;
 
        ir = irctls[iminor(inode)];
+       mutex_unlock(&lirc_dev_lock);
+
        if (!ir) {
                retval = -ENODEV;
                goto error;
@@ -476,8 +478,6 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
        }
 
 error:
-       mutex_unlock(&lirc_dev_lock);
-
        nonseekable_open(inode, file);
 
        return retval;
index b109f8246b968d99cacde9b6ee73719f554a4bfd..ec4b25bd2ec29912f062ae1b654a5ac05434b6f7 100644 (file)
@@ -176,12 +176,13 @@ static void nvt_write_wakeup_codes(struct rc_dev *dev,
 {
        u8 tolerance, config;
        struct nvt_dev *nvt = dev->priv;
+       unsigned long flags;
        int i;
 
        /* hardcode the tolerance to 10% */
        tolerance = DIV_ROUND_UP(count, 10);
 
-       spin_lock(&nvt->lock);
+       spin_lock_irqsave(&nvt->lock, flags);
 
        nvt_clear_cir_wake_fifo(nvt);
        nvt_cir_wake_reg_write(nvt, count, CIR_WAKE_FIFO_CMP_DEEP);
@@ -203,7 +204,7 @@ static void nvt_write_wakeup_codes(struct rc_dev *dev,
 
        nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON);
 
-       spin_unlock(&nvt->lock);
+       spin_unlock_irqrestore(&nvt->lock, flags);
 }
 
 static ssize_t wakeup_data_show(struct device *dev,
index 2424946740e64fb602f55a30d5f158a212cc88ce..d84533699668d20e1797bc7feef1693f74e87be5 100644 (file)
@@ -1663,6 +1663,7 @@ static int rc_setup_rx_device(struct rc_dev *dev)
 {
        int rc;
        struct rc_map *rc_map;
+       u64 rc_type;
 
        if (!dev->map_name)
                return -EINVAL;
@@ -1677,15 +1678,18 @@ static int rc_setup_rx_device(struct rc_dev *dev)
        if (rc)
                return rc;
 
-       if (dev->change_protocol) {
-               u64 rc_type = (1ll << rc_map->rc_type);
+       rc_type = BIT_ULL(rc_map->rc_type);
 
+       if (dev->change_protocol) {
                rc = dev->change_protocol(dev, &rc_type);
                if (rc < 0)
                        goto out_table;
                dev->enabled_protocols = rc_type;
        }
 
+       if (dev->driver_type == RC_DRIVER_IR_RAW)
+               ir_raw_load_modules(&rc_type);
+
        set_bit(EV_KEY, dev->input_dev->evbit);
        set_bit(EV_REP, dev->input_dev->evbit);
        set_bit(EV_MSC, dev->input_dev->evbit);
@@ -1777,12 +1781,6 @@ int rc_register_device(struct rc_dev *dev)
                dev->input_name ?: "Unspecified device", path ?: "N/A");
        kfree(path);
 
-       if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
-               rc = rc_setup_rx_device(dev);
-               if (rc)
-                       goto out_dev;
-       }
-
        if (dev->driver_type == RC_DRIVER_IR_RAW ||
            dev->driver_type == RC_DRIVER_IR_RAW_TX) {
                if (!raw_init) {
@@ -1791,7 +1789,13 @@ int rc_register_device(struct rc_dev *dev)
                }
                rc = ir_raw_event_register(dev);
                if (rc < 0)
-                       goto out_rx;
+                       goto out_dev;
+       }
+
+       if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
+               rc = rc_setup_rx_device(dev);
+               if (rc)
+                       goto out_raw;
        }
 
        /* Allow the RC sysfs nodes to be accessible */
@@ -1803,8 +1807,8 @@ int rc_register_device(struct rc_dev *dev)
 
        return 0;
 
-out_rx:
-       rc_free_rx_device(dev);
+out_raw:
+       ir_raw_event_unregister(dev);
 out_dev:
        device_del(&dev->dev);
 out_unlock:
index 923fb2299553cb96c0db87368a322ea875da4652..41b54e40176c2393b846a1fb59f6e2cacf187c74 100644 (file)
@@ -487,10 +487,69 @@ static void serial_ir_timeout(unsigned long arg)
        ir_raw_event_handle(serial_ir.rcdev);
 }
 
+/* Needed by serial_ir_probe() */
+static int serial_ir_tx(struct rc_dev *dev, unsigned int *txbuf,
+                       unsigned int count);
+static int serial_ir_tx_duty_cycle(struct rc_dev *dev, u32 cycle);
+static int serial_ir_tx_carrier(struct rc_dev *dev, u32 carrier);
+static int serial_ir_open(struct rc_dev *rcdev);
+static void serial_ir_close(struct rc_dev *rcdev);
+
 static int serial_ir_probe(struct platform_device *dev)
 {
+       struct rc_dev *rcdev;
        int i, nlow, nhigh, result;
 
+       rcdev = devm_rc_allocate_device(&dev->dev, RC_DRIVER_IR_RAW);
+       if (!rcdev)
+               return -ENOMEM;
+
+       if (hardware[type].send_pulse && hardware[type].send_space)
+               rcdev->tx_ir = serial_ir_tx;
+       if (hardware[type].set_send_carrier)
+               rcdev->s_tx_carrier = serial_ir_tx_carrier;
+       if (hardware[type].set_duty_cycle)
+               rcdev->s_tx_duty_cycle = serial_ir_tx_duty_cycle;
+
+       switch (type) {
+       case IR_HOMEBREW:
+               rcdev->input_name = "Serial IR type home-brew";
+               break;
+       case IR_IRDEO:
+               rcdev->input_name = "Serial IR type IRdeo";
+               break;
+       case IR_IRDEO_REMOTE:
+               rcdev->input_name = "Serial IR type IRdeo remote";
+               break;
+       case IR_ANIMAX:
+               rcdev->input_name = "Serial IR type AnimaX";
+               break;
+       case IR_IGOR:
+               rcdev->input_name = "Serial IR type IgorPlug";
+               break;
+       }
+
+       rcdev->input_phys = KBUILD_MODNAME "/input0";
+       rcdev->input_id.bustype = BUS_HOST;
+       rcdev->input_id.vendor = 0x0001;
+       rcdev->input_id.product = 0x0001;
+       rcdev->input_id.version = 0x0100;
+       rcdev->open = serial_ir_open;
+       rcdev->close = serial_ir_close;
+       rcdev->dev.parent = &serial_ir.pdev->dev;
+       rcdev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
+       rcdev->driver_name = KBUILD_MODNAME;
+       rcdev->map_name = RC_MAP_RC6_MCE;
+       rcdev->min_timeout = 1;
+       rcdev->timeout = IR_DEFAULT_TIMEOUT;
+       rcdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
+       rcdev->rx_resolution = 250000;
+
+       serial_ir.rcdev = rcdev;
+
+       setup_timer(&serial_ir.timeout_timer, serial_ir_timeout,
+                   (unsigned long)&serial_ir);
+
        result = devm_request_irq(&dev->dev, irq, serial_ir_irq_handler,
                                  share_irq ? IRQF_SHARED : 0,
                                  KBUILD_MODNAME, &hardware);
@@ -516,9 +575,6 @@ static int serial_ir_probe(struct platform_device *dev)
                return -EBUSY;
        }
 
-       setup_timer(&serial_ir.timeout_timer, serial_ir_timeout,
-                   (unsigned long)&serial_ir);
-
        result = hardware_init_port();
        if (result < 0)
                return result;
@@ -552,7 +608,8 @@ static int serial_ir_probe(struct platform_device *dev)
                         sense ? "low" : "high");
 
        dev_dbg(&dev->dev, "Interrupt %d, port %04x obtained\n", irq, io);
-       return 0;
+
+       return devm_rc_register_device(&dev->dev, rcdev);
 }
 
 static int serial_ir_open(struct rc_dev *rcdev)
@@ -723,7 +780,6 @@ static void serial_ir_exit(void)
 
 static int __init serial_ir_init_module(void)
 {
-       struct rc_dev *rcdev;
        int result;
 
        switch (type) {
@@ -754,63 +810,9 @@ static int __init serial_ir_init_module(void)
                sense = !!sense;
 
        result = serial_ir_init();
-       if (result)
-               return result;
-
-       rcdev = devm_rc_allocate_device(&serial_ir.pdev->dev, RC_DRIVER_IR_RAW);
-       if (!rcdev) {
-               result = -ENOMEM;
-               goto serial_cleanup;
-       }
-
-       if (hardware[type].send_pulse && hardware[type].send_space)
-               rcdev->tx_ir = serial_ir_tx;
-       if (hardware[type].set_send_carrier)
-               rcdev->s_tx_carrier = serial_ir_tx_carrier;
-       if (hardware[type].set_duty_cycle)
-               rcdev->s_tx_duty_cycle = serial_ir_tx_duty_cycle;
-
-       switch (type) {
-       case IR_HOMEBREW:
-               rcdev->input_name = "Serial IR type home-brew";
-               break;
-       case IR_IRDEO:
-               rcdev->input_name = "Serial IR type IRdeo";
-               break;
-       case IR_IRDEO_REMOTE:
-               rcdev->input_name = "Serial IR type IRdeo remote";
-               break;
-       case IR_ANIMAX:
-               rcdev->input_name = "Serial IR type AnimaX";
-               break;
-       case IR_IGOR:
-               rcdev->input_name = "Serial IR type IgorPlug";
-               break;
-       }
-
-       rcdev->input_phys = KBUILD_MODNAME "/input0";
-       rcdev->input_id.bustype = BUS_HOST;
-       rcdev->input_id.vendor = 0x0001;
-       rcdev->input_id.product = 0x0001;
-       rcdev->input_id.version = 0x0100;
-       rcdev->open = serial_ir_open;
-       rcdev->close = serial_ir_close;
-       rcdev->dev.parent = &serial_ir.pdev->dev;
-       rcdev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
-       rcdev->driver_name = KBUILD_MODNAME;
-       rcdev->map_name = RC_MAP_RC6_MCE;
-       rcdev->min_timeout = 1;
-       rcdev->timeout = IR_DEFAULT_TIMEOUT;
-       rcdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
-       rcdev->rx_resolution = 250000;
-
-       serial_ir.rcdev = rcdev;
-
-       result = rc_register_device(rcdev);
-
        if (!result)
                return 0;
-serial_cleanup:
+
        serial_ir_exit();
        return result;
 }
@@ -818,7 +820,6 @@ serial_cleanup:
 static void __exit serial_ir_exit_module(void)
 {
        del_timer_sync(&serial_ir.timeout_timer);
-       rc_unregister_device(serial_ir.rcdev);
        serial_ir_exit();
 }
 
index 6ca502d834b4f2cfcc0e6c6a3699bdaaea04d293..4f42d57f81d9541d25f02af65086f6465af90728 100644 (file)
@@ -68,6 +68,7 @@
 struct dw2102_state {
        u8 initialized;
        u8 last_lock;
+       u8 data[MAX_XFER_SIZE + 4];
        struct i2c_client *i2c_client_demod;
        struct i2c_client *i2c_client_tuner;
 
@@ -661,62 +662,72 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
                                                                int num)
 {
        struct dvb_usb_device *d = i2c_get_adapdata(adap);
-       u8 obuf[0x40], ibuf[0x40];
+       struct dw2102_state *state;
 
        if (!d)
                return -ENODEV;
+
+       state = d->priv;
+
        if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
                return -EAGAIN;
+       if (mutex_lock_interruptible(&d->data_mutex) < 0) {
+               mutex_unlock(&d->i2c_mutex);
+               return -EAGAIN;
+       }
 
        switch (num) {
        case 1:
                switch (msg[0].addr) {
                case SU3000_STREAM_CTRL:
-                       obuf[0] = msg[0].buf[0] + 0x36;
-                       obuf[1] = 3;
-                       obuf[2] = 0;
-                       if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 0, 0) < 0)
+                       state->data[0] = msg[0].buf[0] + 0x36;
+                       state->data[1] = 3;
+                       state->data[2] = 0;
+                       if (dvb_usb_generic_rw(d, state->data, 3,
+                                       state->data, 0, 0) < 0)
                                err("i2c transfer failed.");
                        break;
                case DW2102_RC_QUERY:
-                       obuf[0] = 0x10;
-                       if (dvb_usb_generic_rw(d, obuf, 1, ibuf, 2, 0) < 0)
+                       state->data[0] = 0x10;
+                       if (dvb_usb_generic_rw(d, state->data, 1,
+                                       state->data, 2, 0) < 0)
                                err("i2c transfer failed.");
-                       msg[0].buf[1] = ibuf[0];
-                       msg[0].buf[0] = ibuf[1];
+                       msg[0].buf[1] = state->data[0];
+                       msg[0].buf[0] = state->data[1];
                        break;
                default:
                        /* always i2c write*/
-                       obuf[0] = 0x08;
-                       obuf[1] = msg[0].addr;
-                       obuf[2] = msg[0].len;
+                       state->data[0] = 0x08;
+                       state->data[1] = msg[0].addr;
+                       state->data[2] = msg[0].len;
 
-                       memcpy(&obuf[3], msg[0].buf, msg[0].len);
+                       memcpy(&state->data[3], msg[0].buf, msg[0].len);
 
-                       if (dvb_usb_generic_rw(d, obuf, msg[0].len + 3,
-                                               ibuf, 1, 0) < 0)
+                       if (dvb_usb_generic_rw(d, state->data, msg[0].len + 3,
+                                               state->data, 1, 0) < 0)
                                err("i2c transfer failed.");
 
                }
                break;
        case 2:
                /* always i2c read */
-               obuf[0] = 0x09;
-               obuf[1] = msg[0].len;
-               obuf[2] = msg[1].len;
-               obuf[3] = msg[0].addr;
-               memcpy(&obuf[4], msg[0].buf, msg[0].len);
-
-               if (dvb_usb_generic_rw(d, obuf, msg[0].len + 4,
-                                       ibuf, msg[1].len + 1, 0) < 0)
+               state->data[0] = 0x09;
+               state->data[1] = msg[0].len;
+               state->data[2] = msg[1].len;
+               state->data[3] = msg[0].addr;
+               memcpy(&state->data[4], msg[0].buf, msg[0].len);
+
+               if (dvb_usb_generic_rw(d, state->data, msg[0].len + 4,
+                                       state->data, msg[1].len + 1, 0) < 0)
                        err("i2c transfer failed.");
 
-               memcpy(msg[1].buf, &ibuf[1], msg[1].len);
+               memcpy(msg[1].buf, &state->data[1], msg[1].len);
                break;
        default:
                warn("more than 2 i2c messages at a time is not handled yet.");
                break;
        }
+       mutex_unlock(&d->data_mutex);
        mutex_unlock(&d->i2c_mutex);
        return num;
 }
@@ -844,17 +855,23 @@ static int su3000_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
 static int su3000_power_ctrl(struct dvb_usb_device *d, int i)
 {
        struct dw2102_state *state = (struct dw2102_state *)d->priv;
-       u8 obuf[] = {0xde, 0};
+       int ret = 0;
 
        info("%s: %d, initialized %d", __func__, i, state->initialized);
 
        if (i && !state->initialized) {
+               mutex_lock(&d->data_mutex);
+
+               state->data[0] = 0xde;
+               state->data[1] = 0;
+
                state->initialized = 1;
                /* reset board */
-               return dvb_usb_generic_rw(d, obuf, 2, NULL, 0, 0);
+               ret = dvb_usb_generic_rw(d, state->data, 2, NULL, 0, 0);
+               mutex_unlock(&d->data_mutex);
        }
 
-       return 0;
+       return ret;
 }
 
 static int su3000_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
@@ -1309,49 +1326,57 @@ static int prof_7500_frontend_attach(struct dvb_usb_adapter *d)
        return 0;
 }
 
-static int su3000_frontend_attach(struct dvb_usb_adapter *d)
+static int su3000_frontend_attach(struct dvb_usb_adapter *adap)
 {
-       u8 obuf[3] = { 0xe, 0x80, 0 };
-       u8 ibuf[] = { 0 };
+       struct dvb_usb_device *d = adap->dev;
+       struct dw2102_state *state = d->priv;
+
+       mutex_lock(&d->data_mutex);
+
+       state->data[0] = 0xe;
+       state->data[1] = 0x80;
+       state->data[2] = 0;
 
-       if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+       if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
                err("command 0x0e transfer failed.");
 
-       obuf[0] = 0xe;
-       obuf[1] = 0x02;
-       obuf[2] = 1;
+       state->data[0] = 0xe;
+       state->data[1] = 0x02;
+       state->data[2] = 1;
 
-       if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+       if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
                err("command 0x0e transfer failed.");
        msleep(300);
 
-       obuf[0] = 0xe;
-       obuf[1] = 0x83;
-       obuf[2] = 0;
+       state->data[0] = 0xe;
+       state->data[1] = 0x83;
+       state->data[2] = 0;
 
-       if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+       if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
                err("command 0x0e transfer failed.");
 
-       obuf[0] = 0xe;
-       obuf[1] = 0x83;
-       obuf[2] = 1;
+       state->data[0] = 0xe;
+       state->data[1] = 0x83;
+       state->data[2] = 1;
 
-       if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+       if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
                err("command 0x0e transfer failed.");
 
-       obuf[0] = 0x51;
+       state->data[0] = 0x51;
 
-       if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0)
+       if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
                err("command 0x51 transfer failed.");
 
-       d->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config,
-                                       &d->dev->i2c_adap);
-       if (d->fe_adap[0].fe == NULL)
+       mutex_unlock(&d->data_mutex);
+
+       adap->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config,
+                                       &d->i2c_adap);
+       if (adap->fe_adap[0].fe == NULL)
                return -EIO;
 
-       if (dvb_attach(ts2020_attach, d->fe_adap[0].fe,
+       if (dvb_attach(ts2020_attach, adap->fe_adap[0].fe,
                                &dw2104_ts2020_config,
-                               &d->dev->i2c_adap)) {
+                               &d->i2c_adap)) {
                info("Attached DS3000/TS2020!");
                return 0;
        }
@@ -1360,47 +1385,55 @@ static int su3000_frontend_attach(struct dvb_usb_adapter *d)
        return -EIO;
 }
 
-static int t220_frontend_attach(struct dvb_usb_adapter *d)
+static int t220_frontend_attach(struct dvb_usb_adapter *adap)
 {
-       u8 obuf[3] = { 0xe, 0x87, 0 };
-       u8 ibuf[] = { 0 };
+       struct dvb_usb_device *d = adap->dev;
+       struct dw2102_state *state = d->priv;
+
+       mutex_lock(&d->data_mutex);
 
-       if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+       state->data[0] = 0xe;
+       state->data[1] = 0x87;
+       state->data[2] = 0x0;
+
+       if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
                err("command 0x0e transfer failed.");
 
-       obuf[0] = 0xe;
-       obuf[1] = 0x86;
-       obuf[2] = 1;
+       state->data[0] = 0xe;
+       state->data[1] = 0x86;
+       state->data[2] = 1;
 
-       if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+       if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
                err("command 0x0e transfer failed.");
 
-       obuf[0] = 0xe;
-       obuf[1] = 0x80;
-       obuf[2] = 0;
+       state->data[0] = 0xe;
+       state->data[1] = 0x80;
+       state->data[2] = 0;
 
-       if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+       if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
                err("command 0x0e transfer failed.");
 
        msleep(50);
 
-       obuf[0] = 0xe;
-       obuf[1] = 0x80;
-       obuf[2] = 1;
+       state->data[0] = 0xe;
+       state->data[1] = 0x80;
+       state->data[2] = 1;
 
-       if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+       if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
                err("command 0x0e transfer failed.");
 
-       obuf[0] = 0x51;
+       state->data[0] = 0x51;
 
-       if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0)
+       if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
                err("command 0x51 transfer failed.");
 
-       d->fe_adap[0].fe = dvb_attach(cxd2820r_attach, &cxd2820r_config,
-                                       &d->dev->i2c_adap, NULL);
-       if (d->fe_adap[0].fe != NULL) {
-               if (dvb_attach(tda18271_attach, d->fe_adap[0].fe, 0x60,
-                                       &d->dev->i2c_adap, &tda18271_config)) {
+       mutex_unlock(&d->data_mutex);
+
+       adap->fe_adap[0].fe = dvb_attach(cxd2820r_attach, &cxd2820r_config,
+                                       &d->i2c_adap, NULL);
+       if (adap->fe_adap[0].fe != NULL) {
+               if (dvb_attach(tda18271_attach, adap->fe_adap[0].fe, 0x60,
+                                       &d->i2c_adap, &tda18271_config)) {
                        info("Attached TDA18271HD/CXD2820R!");
                        return 0;
                }
@@ -1410,23 +1443,30 @@ static int t220_frontend_attach(struct dvb_usb_adapter *d)
        return -EIO;
 }
 
-static int m88rs2000_frontend_attach(struct dvb_usb_adapter *d)
+static int m88rs2000_frontend_attach(struct dvb_usb_adapter *adap)
 {
-       u8 obuf[] = { 0x51 };
-       u8 ibuf[] = { 0 };
+       struct dvb_usb_device *d = adap->dev;
+       struct dw2102_state *state = d->priv;
+
+       mutex_lock(&d->data_mutex);
 
-       if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0)
+       state->data[0] = 0x51;
+
+       if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
                err("command 0x51 transfer failed.");
 
-       d->fe_adap[0].fe = dvb_attach(m88rs2000_attach, &s421_m88rs2000_config,
-                                       &d->dev->i2c_adap);
+       mutex_unlock(&d->data_mutex);
 
-       if (d->fe_adap[0].fe == NULL)
+       adap->fe_adap[0].fe = dvb_attach(m88rs2000_attach,
+                                       &s421_m88rs2000_config,
+                                       &d->i2c_adap);
+
+       if (adap->fe_adap[0].fe == NULL)
                return -EIO;
 
-       if (dvb_attach(ts2020_attach, d->fe_adap[0].fe,
+       if (dvb_attach(ts2020_attach, adap->fe_adap[0].fe,
                                &dw2104_ts2020_config,
-                               &d->dev->i2c_adap)) {
+                               &d->i2c_adap)) {
                info("Attached RS2000/TS2020!");
                return 0;
        }
@@ -1439,44 +1479,50 @@ static int tt_s2_4600_frontend_attach(struct dvb_usb_adapter *adap)
 {
        struct dvb_usb_device *d = adap->dev;
        struct dw2102_state *state = d->priv;
-       u8 obuf[3] = { 0xe, 0x80, 0 };
-       u8 ibuf[] = { 0 };
        struct i2c_adapter *i2c_adapter;
        struct i2c_client *client;
        struct i2c_board_info board_info;
        struct m88ds3103_platform_data m88ds3103_pdata = {};
        struct ts2020_config ts2020_config = {};
 
-       if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
+       mutex_lock(&d->data_mutex);
+
+       state->data[0] = 0xe;
+       state->data[1] = 0x80;
+       state->data[2] = 0x0;
+
+       if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
                err("command 0x0e transfer failed.");
 
-       obuf[0] = 0xe;
-       obuf[1] = 0x02;
-       obuf[2] = 1;
+       state->data[0] = 0xe;
+       state->data[1] = 0x02;
+       state->data[2] = 1;
 
-       if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
+       if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
                err("command 0x0e transfer failed.");
        msleep(300);
 
-       obuf[0] = 0xe;
-       obuf[1] = 0x83;
-       obuf[2] = 0;
+       state->data[0] = 0xe;
+       state->data[1] = 0x83;
+       state->data[2] = 0;
 
-       if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
+       if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
                err("command 0x0e transfer failed.");
 
-       obuf[0] = 0xe;
-       obuf[1] = 0x83;
-       obuf[2] = 1;
+       state->data[0] = 0xe;
+       state->data[1] = 0x83;
+       state->data[2] = 1;
 
-       if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
+       if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
                err("command 0x0e transfer failed.");
 
-       obuf[0] = 0x51;
+       state->data[0] = 0x51;
 
-       if (dvb_usb_generic_rw(d, obuf, 1, ibuf, 1, 0) < 0)
+       if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
                err("command 0x51 transfer failed.");
 
+       mutex_unlock(&d->data_mutex);
+
        /* attach demod */
        m88ds3103_pdata.clk = 27000000;
        m88ds3103_pdata.i2c_wr_max = 33;
index 1ae872bfc3ba5be342993f4d176b5a2d4390da57..747645c74134de4cd620a284e0f26ca8a61d4991 100644 (file)
@@ -186,7 +186,7 @@ static inline int write_enable(struct spi_nor *nor)
 }
 
 /*
- * Send write disble instruction to the chip.
+ * Send write disable instruction to the chip.
  */
 static inline int write_disable(struct spi_nor *nor)
 {
index 6d31f92ef2b6340642eca02039266aace70aefdd..84ac50f92c9c5167adfc5e295139a7a2d42a1eb3 100644 (file)
@@ -1162,8 +1162,8 @@ struct ob_mac_tso_iocb_rsp {
 struct ib_mac_iocb_rsp {
        u8 opcode;              /* 0x20 */
        u8 flags1;
-#define IB_MAC_IOCB_RSP_OI     0x01    /* Overide intr delay */
-#define IB_MAC_IOCB_RSP_I      0x02    /* Disble Intr Generation */
+#define IB_MAC_IOCB_RSP_OI     0x01    /* Override intr delay */
+#define IB_MAC_IOCB_RSP_I      0x02    /* Disable Intr Generation */
 #define IB_MAC_CSUM_ERR_MASK 0x1c      /* A mask to use for csum errs */
 #define IB_MAC_IOCB_RSP_TE     0x04    /* Checksum error */
 #define IB_MAC_IOCB_RSP_NU     0x08    /* No checksum rcvd */
index 993b650ef2759cbffc56c0bc086a8d2172ef4fcf..44f774c12fb25e7ab6f98df5edf8a09638971eca 100644 (file)
@@ -132,10 +132,6 @@ static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev,
        struct device *dev = pci->dev;
        struct resource *res;
 
-       /* If using the PHY framework, doesn't need to get other resource */
-       if (ep->using_phy)
-               return 0;
-
        ep->mem_res = devm_kzalloc(dev, sizeof(*ep->mem_res), GFP_KERNEL);
        if (!ep->mem_res)
                return -ENOMEM;
@@ -145,6 +141,10 @@ static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev,
        if (IS_ERR(ep->mem_res->elbi_base))
                return PTR_ERR(ep->mem_res->elbi_base);
 
+       /* If using the PHY framework, doesn't need to get other resource */
+       if (ep->using_phy)
+               return 0;
+
        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
        ep->mem_res->phy_base = devm_ioremap_resource(dev, res);
        if (IS_ERR(ep->mem_res->phy_base))
index 973472c23d89045000cf1119a09867c921f2fdf8..1dfa10cc566bebed005c2fe11a72c85a37036c32 100644 (file)
@@ -478,7 +478,7 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
 
 static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
 {
-       struct pci_dev *child, *parent = link->pdev;
+       struct pci_dev *child = link->downstream, *parent = link->pdev;
        struct pci_bus *linkbus = parent->subordinate;
        struct aspm_register_info upreg, dwreg;
 
@@ -491,9 +491,7 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
 
        /* Get upstream/downstream components' register state */
        pcie_get_aspm_reg(parent, &upreg);
-       child = pci_function_0(linkbus);
        pcie_get_aspm_reg(child, &dwreg);
-       link->downstream = child;
 
        /*
         * If ASPM not supported, don't mess with the clocks and link,
@@ -800,6 +798,7 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
        INIT_LIST_HEAD(&link->children);
        INIT_LIST_HEAD(&link->link);
        link->pdev = pdev;
+       link->downstream = pci_function_0(pdev->subordinate);
 
        /*
         * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
index f754453fe754e985361cb49cee0bddf54d752443..673683660b5c70567d7c49cd091c5c8ecf088655 100644 (file)
@@ -2174,6 +2174,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
                quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd);
 
 /*
  * For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the
index 109e2c99e6c162e01a4b569292bad7b4e68fd3dc..95d8f25cbccab7056dc4c7967814cd5932fd3507 100644 (file)
@@ -6278,7 +6278,7 @@ ahd_reset(struct ahd_softc *ahd, int reinit)
                 * does not disable its parity logic prior to
                 * the start of the reset.  This may cause a
                 * parity error to be detected and thus a
-                * spurious SERR or PERR assertion.  Disble
+                * spurious SERR or PERR assertion.  Disable
                 * PERR and SERR responses during the CHIPRST.
                 */
                mod_cmd = cmd & ~(PCIM_CMD_PERRESPEN|PCIM_CMD_SERRESPEN);
index c7839f6c35ccc479c8f7a044407b35f203cd3102..d277e8620e3e39794584ac29dc55cc3ce476a03a 100644 (file)
@@ -3075,23 +3075,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
        put_device(&sdkp->dev);
 }
 
-struct sd_devt {
-       int idx;
-       struct disk_devt disk_devt;
-};
-
-static void sd_devt_release(struct disk_devt *disk_devt)
-{
-       struct sd_devt *sd_devt = container_of(disk_devt, struct sd_devt,
-                       disk_devt);
-
-       spin_lock(&sd_index_lock);
-       ida_remove(&sd_index_ida, sd_devt->idx);
-       spin_unlock(&sd_index_lock);
-
-       kfree(sd_devt);
-}
-
 /**
  *     sd_probe - called during driver initialization and whenever a
  *     new scsi device is attached to the system. It is called once
@@ -3113,7 +3096,6 @@ static void sd_devt_release(struct disk_devt *disk_devt)
 static int sd_probe(struct device *dev)
 {
        struct scsi_device *sdp = to_scsi_device(dev);
-       struct sd_devt *sd_devt;
        struct scsi_disk *sdkp;
        struct gendisk *gd;
        int index;
@@ -3139,13 +3121,9 @@ static int sd_probe(struct device *dev)
        if (!sdkp)
                goto out;
 
-       sd_devt = kzalloc(sizeof(*sd_devt), GFP_KERNEL);
-       if (!sd_devt)
-               goto out_free;
-
        gd = alloc_disk(SD_MINORS);
        if (!gd)
-               goto out_free_devt;
+               goto out_free;
 
        do {
                if (!ida_pre_get(&sd_index_ida, GFP_KERNEL))
@@ -3161,11 +3139,6 @@ static int sd_probe(struct device *dev)
                goto out_put;
        }
 
-       atomic_set(&sd_devt->disk_devt.count, 1);
-       sd_devt->disk_devt.release = sd_devt_release;
-       sd_devt->idx = index;
-       gd->disk_devt = &sd_devt->disk_devt;
-
        error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
        if (error) {
                sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n");
@@ -3205,12 +3178,11 @@ static int sd_probe(struct device *dev)
        return 0;
 
  out_free_index:
-       put_disk_devt(&sd_devt->disk_devt);
-       sd_devt = NULL;
+       spin_lock(&sd_index_lock);
+       ida_remove(&sd_index_ida, index);
+       spin_unlock(&sd_index_lock);
  out_put:
        put_disk(gd);
- out_free_devt:
-       kfree(sd_devt);
  out_free:
        kfree(sdkp);
  out:
@@ -3271,7 +3243,10 @@ static void scsi_disk_release(struct device *dev)
        struct scsi_disk *sdkp = to_scsi_disk(dev);
        struct gendisk *disk = sdkp->disk;
        
-       put_disk_devt(disk->disk_devt);
+       spin_lock(&sd_index_lock);
+       ida_remove(&sd_index_ida, sdkp->index);
+       spin_unlock(&sd_index_lock);
+
        disk->private_data = NULL;
        put_disk(disk);
        put_device(&sdkp->device->sdev_gendev);
index a2615d64d07c1967d7cd2c25ab2e046747f6bd7d..79a2d8fba6b60622e7ba94570ce1c07265fc8ade 100644 (file)
@@ -84,8 +84,7 @@ static int ep_open(struct inode *, struct file *);
 
 /* /dev/gadget/$CHIP represents ep0 and the whole device */
 enum ep0_state {
-       /* DISBLED is the initial state.
-        */
+       /* DISABLED is the initial state. */
        STATE_DEV_DISABLED = 0,
 
        /* Only one open() of /dev/gadget/$CHIP; only one file tracks
index 6d6c46000e56cc76895a34f9d3980c949030b8a2..50aee8b7718b30dc86938bba6a5e540e179ecdb9 100644 (file)
@@ -868,7 +868,7 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
 
        spin_lock_irqsave(&xhci->lock, flags);
 
-       /* disble usb3 ports Wake bits*/
+       /* disable usb3 ports Wake bits */
        port_index = xhci->num_usb3_ports;
        port_array = xhci->usb3_ports;
        while (port_index--) {
@@ -879,7 +879,7 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
                        writel(t2, port_array[port_index]);
        }
 
-       /* disble usb2 ports Wake bits*/
+       /* disable usb2 ports Wake bits */
        port_index = xhci->num_usb2_ports;
        port_array = xhci->usb2_ports;
        while (port_index--) {
index 4d343eed08f51e1a3d2a0628dccb256b95858fae..1f4733b80c877426fa337e67eebf708b5fb9b41c 100644 (file)
@@ -55,7 +55,6 @@
 #include <linux/string.h>
 #include <linux/slab.h>
 #include <linux/miscdevice.h>
-#include <linux/init.h>
 
 #include <xen/xenbus.h>
 #include <xen/xen.h>
index 338d2f73eb29c8f1691a22a162e5929875bbf8cf..a2c05f2ada6dd86576df1dede141c05248126187 100644 (file)
@@ -1359,6 +1359,16 @@ out:
        return 0;
 }
 
+static void fat_dummy_inode_init(struct inode *inode)
+{
+       /* Initialize this dummy inode to work as no-op. */
+       MSDOS_I(inode)->mmu_private = 0;
+       MSDOS_I(inode)->i_start = 0;
+       MSDOS_I(inode)->i_logstart = 0;
+       MSDOS_I(inode)->i_attrs = 0;
+       MSDOS_I(inode)->i_pos = 0;
+}
+
 static int fat_read_root(struct inode *inode)
 {
        struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
@@ -1803,12 +1813,13 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
        fat_inode = new_inode(sb);
        if (!fat_inode)
                goto out_fail;
-       MSDOS_I(fat_inode)->i_pos = 0;
+       fat_dummy_inode_init(fat_inode);
        sbi->fat_inode = fat_inode;
 
        fsinfo_inode = new_inode(sb);
        if (!fsinfo_inode)
                goto out_fail;
+       fat_dummy_inode_init(fsinfo_inode);
        fsinfo_inode->i_ino = MSDOS_FSINFO_INO;
        sbi->fsinfo_inode = fsinfo_inode;
        insert_inode_hash(fsinfo_inode);
index 3ca1a8e44135ed757bc309cd750899d51f093970..141c3cd55a8b2d974f431d7710fbe4de58f78355 100644 (file)
@@ -846,7 +846,8 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
        struct address_space *mapping = iocb->ki_filp->f_mapping;
        struct inode *inode = file_inode(iocb->ki_filp);
        size_t count = iov_iter_count(iter);
-       loff_t pos = iocb->ki_pos, end = iocb->ki_pos + count - 1, ret = 0;
+       loff_t pos = iocb->ki_pos, start = pos;
+       loff_t end = iocb->ki_pos + count - 1, ret = 0;
        unsigned int flags = IOMAP_DIRECT;
        struct blk_plug plug;
        struct iomap_dio *dio;
@@ -887,12 +888,12 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
        }
 
        if (mapping->nrpages) {
-               ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, end);
+               ret = filemap_write_and_wait_range(mapping, start, end);
                if (ret)
                        goto out_free_dio;
 
                ret = invalidate_inode_pages2_range(mapping,
-                               iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
+                               start >> PAGE_SHIFT, end >> PAGE_SHIFT);
                WARN_ON_ONCE(ret);
                ret = 0;
        }
@@ -941,6 +942,8 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
                __set_current_state(TASK_RUNNING);
        }
 
+       ret = iomap_dio_complete(dio);
+
        /*
         * Try again to invalidate clean pages which might have been cached by
         * non-direct readahead, or faulted in by get_user_pages() if the source
@@ -949,12 +952,12 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
         * this invalidation fails, tough, the write still worked...
         */
        if (iov_iter_rw(iter) == WRITE && mapping->nrpages) {
-               ret = invalidate_inode_pages2_range(mapping,
-                               iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
-               WARN_ON_ONCE(ret);
+               int err = invalidate_inode_pages2_range(mapping,
+                               start >> PAGE_SHIFT, end >> PAGE_SHIFT);
+               WARN_ON_ONCE(err);
        }
 
-       return iomap_dio_complete(dio);
+       return ret;
 
 out_free_dio:
        kfree(dio);
index 02ce3944d0f5554ac65428305dd74183247b6eb0..1d227b0fcf49ff26b40bdd726b3839fb8f353f35 100644 (file)
@@ -138,8 +138,6 @@ out:
  * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd
  * context.
  * @ctx: [in] Pointer to the userfaultfd context.
- *
- * Returns: In case of success, returns not zero.
  */
 static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
 {
@@ -494,7 +492,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
                         * in such case.
                         */
                        down_read(&mm->mmap_sem);
-                       ret = 0;
+                       ret = VM_FAULT_NOPAGE;
                }
        }
 
@@ -531,10 +529,11 @@ out:
        return ret;
 }
 
-static int userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
-                                            struct userfaultfd_wait_queue *ewq)
+static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
+                                             struct userfaultfd_wait_queue *ewq)
 {
-       int ret = 0;
+       if (WARN_ON_ONCE(current->flags & PF_EXITING))
+               goto out;
 
        ewq->ctx = ctx;
        init_waitqueue_entry(&ewq->wq, current);
@@ -551,8 +550,16 @@ static int userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
                        break;
                if (ACCESS_ONCE(ctx->released) ||
                    fatal_signal_pending(current)) {
-                       ret = -1;
                        __remove_wait_queue(&ctx->event_wqh, &ewq->wq);
+                       if (ewq->msg.event == UFFD_EVENT_FORK) {
+                               struct userfaultfd_ctx *new;
+
+                               new = (struct userfaultfd_ctx *)
+                                       (unsigned long)
+                                       ewq->msg.arg.reserved.reserved1;
+
+                               userfaultfd_ctx_put(new);
+                       }
                        break;
                }
 
@@ -570,9 +577,8 @@ static int userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
         * ctx may go away after this if the userfault pseudo fd is
         * already released.
         */
-
+out:
        userfaultfd_ctx_put(ctx);
-       return ret;
 }
 
 static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx,
@@ -630,7 +636,7 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
        return 0;
 }
 
-static int dup_fctx(struct userfaultfd_fork_ctx *fctx)
+static void dup_fctx(struct userfaultfd_fork_ctx *fctx)
 {
        struct userfaultfd_ctx *ctx = fctx->orig;
        struct userfaultfd_wait_queue ewq;
@@ -640,17 +646,15 @@ static int dup_fctx(struct userfaultfd_fork_ctx *fctx)
        ewq.msg.event = UFFD_EVENT_FORK;
        ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new;
 
-       return userfaultfd_event_wait_completion(ctx, &ewq);
+       userfaultfd_event_wait_completion(ctx, &ewq);
 }
 
 void dup_userfaultfd_complete(struct list_head *fcs)
 {
-       int ret = 0;
        struct userfaultfd_fork_ctx *fctx, *n;
 
        list_for_each_entry_safe(fctx, n, fcs, list) {
-               if (!ret)
-                       ret = dup_fctx(fctx);
+               dup_fctx(fctx);
                list_del(&fctx->list);
                kfree(fctx);
        }
@@ -693,8 +697,7 @@ void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx,
        userfaultfd_event_wait_completion(ctx, &ewq);
 }
 
-void userfaultfd_remove(struct vm_area_struct *vma,
-                       struct vm_area_struct **prev,
+bool userfaultfd_remove(struct vm_area_struct *vma,
                        unsigned long start, unsigned long end)
 {
        struct mm_struct *mm = vma->vm_mm;
@@ -703,13 +706,11 @@ void userfaultfd_remove(struct vm_area_struct *vma,
 
        ctx = vma->vm_userfaultfd_ctx.ctx;
        if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE))
-               return;
+               return true;
 
        userfaultfd_ctx_get(ctx);
        up_read(&mm->mmap_sem);
 
-       *prev = NULL; /* We wait for ACK w/o the mmap semaphore */
-
        msg_init(&ewq.msg);
 
        ewq.msg.event = UFFD_EVENT_REMOVE;
@@ -718,7 +719,7 @@ void userfaultfd_remove(struct vm_area_struct *vma,
 
        userfaultfd_event_wait_completion(ctx, &ewq);
 
-       down_read(&mm->mmap_sem);
+       return false;
 }
 
 static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps,
@@ -779,34 +780,6 @@ void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf)
        }
 }
 
-void userfaultfd_exit(struct mm_struct *mm)
-{
-       struct vm_area_struct *vma = mm->mmap;
-
-       /*
-        * We can do the vma walk without locking because the caller
-        * (exit_mm) knows it now has exclusive access
-        */
-       while (vma) {
-               struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
-
-               if (ctx && (ctx->features & UFFD_FEATURE_EVENT_EXIT)) {
-                       struct userfaultfd_wait_queue ewq;
-
-                       userfaultfd_ctx_get(ctx);
-
-                       msg_init(&ewq.msg);
-                       ewq.msg.event = UFFD_EVENT_EXIT;
-
-                       userfaultfd_event_wait_completion(ctx, &ewq);
-
-                       ctx->features &= ~UFFD_FEATURE_EVENT_EXIT;
-               }
-
-               vma = vma->vm_next;
-       }
-}
-
 static int userfaultfd_release(struct inode *inode, struct file *file)
 {
        struct userfaultfd_ctx *ctx = file->private_data;
index 2dfdc62f795e63177e3f2f58306840656644492d..70a5b55e0870a0523c0dd8ce629debf2fccebe25 100644 (file)
 #include "kmem.h"
 #include "xfs_message.h"
 
-/*
- * Greedy allocation.  May fail and may return vmalloced memory.
- */
-void *
-kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize)
-{
-       void            *ptr;
-       size_t          kmsize = maxsize;
-
-       while (!(ptr = vzalloc(kmsize))) {
-               if ((kmsize >>= 1) <= minsize)
-                       kmsize = minsize;
-       }
-       if (ptr)
-               *size = kmsize;
-       return ptr;
-}
-
 void *
 kmem_alloc(size_t size, xfs_km_flags_t flags)
 {
index 689f746224e7ab8a0fbf3d2f9acb4f1dd68a9a16..f0fc84fcaac2553283f90bc3f157b924bd03d932 100644 (file)
@@ -69,8 +69,6 @@ static inline void  kmem_free(const void *ptr)
 }
 
 
-extern void *kmem_zalloc_greedy(size_t *, size_t, size_t);
-
 static inline void *
 kmem_zalloc(size_t size, xfs_km_flags_t flags)
 {
index a9c66d47757a757324e5fbf4224883e1d369588a..9bd104f32908962046af6d2dd4437a045fecdb36 100644 (file)
@@ -763,8 +763,8 @@ xfs_bmap_extents_to_btree(
                args.type = XFS_ALLOCTYPE_START_BNO;
                args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
        } else if (dfops->dop_low) {
-try_another_ag:
                args.type = XFS_ALLOCTYPE_START_BNO;
+try_another_ag:
                args.fsbno = *firstblock;
        } else {
                args.type = XFS_ALLOCTYPE_NEAR_BNO;
@@ -790,13 +790,17 @@ try_another_ag:
        if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) &&
            args.fsbno == NULLFSBLOCK &&
            args.type == XFS_ALLOCTYPE_NEAR_BNO) {
-               dfops->dop_low = true;
+               args.type = XFS_ALLOCTYPE_FIRST_AG;
                goto try_another_ag;
        }
+       if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
+               xfs_iroot_realloc(ip, -1, whichfork);
+               xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+               return -ENOSPC;
+       }
        /*
         * Allocation can't fail, the space was reserved.
         */
-       ASSERT(args.fsbno != NULLFSBLOCK);
        ASSERT(*firstblock == NULLFSBLOCK ||
               args.agno >= XFS_FSB_TO_AGNO(mp, *firstblock));
        *firstblock = cur->bc_private.b.firstblock = args.fsbno;
@@ -4150,6 +4154,19 @@ xfs_bmapi_read(
        return 0;
 }
 
+/*
+ * Add a delayed allocation extent to an inode. Blocks are reserved from the
+ * global pool and the extent inserted into the inode in-core extent tree.
+ *
+ * On entry, got refers to the first extent beyond the offset of the extent to
+ * allocate or eof is specified if no such extent exists. On return, got refers
+ * to the extent record that was inserted to the inode fork.
+ *
+ * Note that the allocated extent may have been merged with contiguous extents
+ * during insertion into the inode fork. Thus, got does not reflect the current
+ * state of the inode fork on return. If necessary, the caller can use lastx to
+ * look up the updated record in the inode fork.
+ */
 int
 xfs_bmapi_reserve_delalloc(
        struct xfs_inode        *ip,
@@ -4236,13 +4253,8 @@ xfs_bmapi_reserve_delalloc(
        got->br_startblock = nullstartblock(indlen);
        got->br_blockcount = alen;
        got->br_state = XFS_EXT_NORM;
-       xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got);
 
-       /*
-        * Update our extent pointer, given that xfs_bmap_add_extent_hole_delay
-        * might have merged it into one of the neighbouring ones.
-        */
-       xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *lastx), got);
+       xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got);
 
        /*
         * Tag the inode if blocks were preallocated. Note that COW fork
@@ -4254,10 +4266,6 @@ xfs_bmapi_reserve_delalloc(
        if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
                xfs_inode_set_cowblocks_tag(ip);
 
-       ASSERT(got->br_startoff <= aoff);
-       ASSERT(got->br_startoff + got->br_blockcount >= aoff + alen);
-       ASSERT(isnullstartblock(got->br_startblock));
-       ASSERT(got->br_state == XFS_EXT_NORM);
        return 0;
 
 out_unreserve_blocks:
index f93072b58a58323ae952d55d568a9e53384f88d3..fd55db47938562868d25d4998407ce7650c4da4f 100644 (file)
@@ -447,8 +447,8 @@ xfs_bmbt_alloc_block(
 
        if (args.fsbno == NULLFSBLOCK) {
                args.fsbno = be64_to_cpu(start->l);
-try_another_ag:
                args.type = XFS_ALLOCTYPE_START_BNO;
+try_another_ag:
                /*
                 * Make sure there is sufficient room left in the AG to
                 * complete a full tree split for an extent insert.  If
@@ -488,8 +488,8 @@ try_another_ag:
        if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) &&
            args.fsbno == NULLFSBLOCK &&
            args.type == XFS_ALLOCTYPE_NEAR_BNO) {
-               cur->bc_private.b.dfops->dop_low = true;
                args.fsbno = cur->bc_private.b.firstblock;
+               args.type = XFS_ALLOCTYPE_FIRST_AG;
                goto try_another_ag;
        }
 
@@ -506,7 +506,7 @@ try_another_ag:
                        goto error0;
                cur->bc_private.b.dfops->dop_low = true;
        }
-       if (args.fsbno == NULLFSBLOCK) {
+       if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
                XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
                *stat = 0;
                return 0;
index bf65a9ea864293d48e5326178336680c2eb29758..61494295d92fe1acb7d343bc3a4e1594f09027ab 100644 (file)
@@ -274,54 +274,49 @@ xfs_end_io(
        struct xfs_ioend        *ioend =
                container_of(work, struct xfs_ioend, io_work);
        struct xfs_inode        *ip = XFS_I(ioend->io_inode);
+       xfs_off_t               offset = ioend->io_offset;
+       size_t                  size = ioend->io_size;
        int                     error = ioend->io_bio->bi_error;
 
        /*
-        * Set an error if the mount has shut down and proceed with end I/O
-        * processing so it can perform whatever cleanups are necessary.
+        * Just clean up the in-memory strutures if the fs has been shut down.
         */
-       if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+       if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
                error = -EIO;
+               goto done;
+       }
 
        /*
-        * For a CoW extent, we need to move the mapping from the CoW fork
-        * to the data fork.  If instead an error happened, just dump the
-        * new blocks.
+        * Clean up any COW blocks on an I/O error.
         */
-       if (ioend->io_type == XFS_IO_COW) {
-               if (error)
-                       goto done;
-               if (ioend->io_bio->bi_error) {
-                       error = xfs_reflink_cancel_cow_range(ip,
-                                       ioend->io_offset, ioend->io_size);
-                       goto done;
+       if (unlikely(error)) {
+               switch (ioend->io_type) {
+               case XFS_IO_COW:
+                       xfs_reflink_cancel_cow_range(ip, offset, size, true);
+                       break;
                }
-               error = xfs_reflink_end_cow(ip, ioend->io_offset,
-                               ioend->io_size);
-               if (error)
-                       goto done;
+
+               goto done;
        }
 
        /*
-        * For unwritten extents we need to issue transactions to convert a
-        * range to normal written extens after the data I/O has finished.
-        * Detecting and handling completion IO errors is done individually
-        * for each case as different cleanup operations need to be performed
-        * on error.
+        * Success:  commit the COW or unwritten blocks if needed.
         */
-       if (ioend->io_type == XFS_IO_UNWRITTEN) {
-               if (error)
-                       goto done;
-               error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
-                                                 ioend->io_size);
-       } else if (ioend->io_append_trans) {
-               error = xfs_setfilesize_ioend(ioend, error);
-       } else {
-               ASSERT(!xfs_ioend_is_append(ioend) ||
-                      ioend->io_type == XFS_IO_COW);
+       switch (ioend->io_type) {
+       case XFS_IO_COW:
+               error = xfs_reflink_end_cow(ip, offset, size);
+               break;
+       case XFS_IO_UNWRITTEN:
+               error = xfs_iomap_write_unwritten(ip, offset, size);
+               break;
+       default:
+               ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
+               break;
        }
 
 done:
+       if (ioend->io_append_trans)
+               error = xfs_setfilesize_ioend(ioend, error);
        xfs_destroy_ioend(ioend, error);
 }
 
index 7234b9748c36e048b15b376e4408ef3626422cf4..3531f8f72fa5e10b83f0fa8bd37afc560b2dbf0a 100644 (file)
@@ -1608,7 +1608,7 @@ xfs_inode_free_cowblocks(
        xfs_ilock(ip, XFS_IOLOCK_EXCL);
        xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
 
-       ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF);
+       ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
 
        xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
        xfs_iunlock(ip, XFS_IOLOCK_EXCL);
index edfa6a55b0646d0d444ea0b2c12e46a62a2c4474..7eaf1ef74e3c63ebb3c640e32d2db87864984a4a 100644 (file)
@@ -1615,7 +1615,7 @@ xfs_itruncate_extents(
 
        /* Remove all pending CoW reservations. */
        error = xfs_reflink_cancel_cow_blocks(ip, &tp, first_unmap_block,
-                       last_block);
+                       last_block, true);
        if (error)
                goto out;
 
index 41662fb14e87d8b1546c42d6a65508db5c5a76bf..288ee5b840d738116b8981e9618fac36fb24614f 100644 (file)
@@ -630,6 +630,11 @@ retry:
                goto out_unlock;
        }
 
+       /*
+        * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
+        * them out if the write happens to fail.
+        */
+       iomap->flags = IOMAP_F_NEW;
        trace_xfs_iomap_alloc(ip, offset, count, 0, &got);
 done:
        if (isnullstartblock(got.br_startblock))
@@ -1071,16 +1076,22 @@ xfs_file_iomap_end_delalloc(
        struct xfs_inode        *ip,
        loff_t                  offset,
        loff_t                  length,
-       ssize_t                 written)
+       ssize_t                 written,
+       struct iomap            *iomap)
 {
        struct xfs_mount        *mp = ip->i_mount;
        xfs_fileoff_t           start_fsb;
        xfs_fileoff_t           end_fsb;
        int                     error = 0;
 
-       /* behave as if the write failed if drop writes is enabled */
-       if (xfs_mp_drop_writes(mp))
+       /*
+        * Behave as if the write failed if drop writes is enabled. Set the NEW
+        * flag to force delalloc cleanup.
+        */
+       if (xfs_mp_drop_writes(mp)) {
+               iomap->flags |= IOMAP_F_NEW;
                written = 0;
+       }
 
        /*
         * start_fsb refers to the first unused block after a short write. If
@@ -1094,14 +1105,14 @@ xfs_file_iomap_end_delalloc(
        end_fsb = XFS_B_TO_FSB(mp, offset + length);
 
        /*
-        * Trim back delalloc blocks if we didn't manage to write the whole
-        * range reserved.
+        * Trim delalloc blocks if they were allocated by this write and we
+        * didn't manage to write the whole range.
         *
         * We don't need to care about racing delalloc as we hold i_mutex
         * across the reserve/allocate/unreserve calls. If there are delalloc
         * blocks in the range, they are ours.
         */
-       if (start_fsb < end_fsb) {
+       if ((iomap->flags & IOMAP_F_NEW) && start_fsb < end_fsb) {
                truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb),
                                         XFS_FSB_TO_B(mp, end_fsb) - 1);
 
@@ -1131,7 +1142,7 @@ xfs_file_iomap_end(
 {
        if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC)
                return xfs_file_iomap_end_delalloc(XFS_I(inode), offset,
-                               length, written);
+                               length, written, iomap);
        return 0;
 }
 
index 66e881790c17109496e21bd5e2d7d21f5ecc7fe5..2a6d9b1558e00dca550a2d46f8a5a51b9661ec3a 100644 (file)
@@ -361,7 +361,6 @@ xfs_bulkstat(
        xfs_agino_t             agino;  /* inode # in allocation group */
        xfs_agnumber_t          agno;   /* allocation group number */
        xfs_btree_cur_t         *cur;   /* btree cursor for ialloc btree */
-       size_t                  irbsize; /* size of irec buffer in bytes */
        xfs_inobt_rec_incore_t  *irbuf; /* start of irec buffer */
        int                     nirbuf; /* size of irbuf */
        int                     ubcount; /* size of user's buffer */
@@ -388,11 +387,10 @@ xfs_bulkstat(
        *ubcountp = 0;
        *done = 0;
 
-       irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
+       irbuf = kmem_zalloc_large(PAGE_SIZE * 4, KM_SLEEP);
        if (!irbuf)
                return -ENOMEM;
-
-       nirbuf = irbsize / sizeof(*irbuf);
+       nirbuf = (PAGE_SIZE * 4) / sizeof(*irbuf);
 
        /*
         * Loop over the allocation groups, starting from the last
index 450bde68bb7528d70a47e0b38275ca75c7e757a1..688ebff1f66384a309cca74539cbe4d27172b177 100644 (file)
@@ -513,8 +513,7 @@ STATIC void
 xfs_set_inoalignment(xfs_mount_t *mp)
 {
        if (xfs_sb_version_hasalign(&mp->m_sb) &&
-           mp->m_sb.sb_inoalignmt >=
-           XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size))
+               mp->m_sb.sb_inoalignmt >= xfs_icluster_size_fsb(mp))
                mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1;
        else
                mp->m_inoalign_mask = 0;
index da6d08fb359c8efdf42a53e283bb030780b5b064..4a84c5ea266d8f8fcec61aa55776fec339d27aaf 100644 (file)
@@ -548,14 +548,18 @@ xfs_reflink_trim_irec_to_next_cow(
 }
 
 /*
- * Cancel all pending CoW reservations for some block range of an inode.
+ * Cancel CoW reservations for some block range of an inode.
+ *
+ * If cancel_real is true this function cancels all COW fork extents for the
+ * inode; if cancel_real is false, real extents are not cleared.
  */
 int
 xfs_reflink_cancel_cow_blocks(
        struct xfs_inode                *ip,
        struct xfs_trans                **tpp,
        xfs_fileoff_t                   offset_fsb,
-       xfs_fileoff_t                   end_fsb)
+       xfs_fileoff_t                   end_fsb,
+       bool                            cancel_real)
 {
        struct xfs_ifork                *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
        struct xfs_bmbt_irec            got, del;
@@ -579,7 +583,7 @@ xfs_reflink_cancel_cow_blocks(
                                        &idx, &got, &del);
                        if (error)
                                break;
-               } else {
+               } else if (del.br_state == XFS_EXT_UNWRITTEN || cancel_real) {
                        xfs_trans_ijoin(*tpp, ip, 0);
                        xfs_defer_init(&dfops, &firstfsb);
 
@@ -621,13 +625,17 @@ xfs_reflink_cancel_cow_blocks(
 }
 
 /*
- * Cancel all pending CoW reservations for some byte range of an inode.
+ * Cancel CoW reservations for some byte range of an inode.
+ *
+ * If cancel_real is true this function cancels all COW fork extents for the
+ * inode; if cancel_real is false, real extents are not cleared.
  */
 int
 xfs_reflink_cancel_cow_range(
        struct xfs_inode        *ip,
        xfs_off_t               offset,
-       xfs_off_t               count)
+       xfs_off_t               count,
+       bool                    cancel_real)
 {
        struct xfs_trans        *tp;
        xfs_fileoff_t           offset_fsb;
@@ -653,7 +661,8 @@ xfs_reflink_cancel_cow_range(
        xfs_trans_ijoin(tp, ip, 0);
 
        /* Scrape out the old CoW reservations */
-       error = xfs_reflink_cancel_cow_blocks(ip, &tp, offset_fsb, end_fsb);
+       error = xfs_reflink_cancel_cow_blocks(ip, &tp, offset_fsb, end_fsb,
+                       cancel_real);
        if (error)
                goto out_cancel;
 
@@ -1450,7 +1459,7 @@ next:
         * We didn't find any shared blocks so turn off the reflink flag.
         * First, get rid of any leftover CoW mappings.
         */
-       error = xfs_reflink_cancel_cow_blocks(ip, tpp, 0, NULLFILEOFF);
+       error = xfs_reflink_cancel_cow_blocks(ip, tpp, 0, NULLFILEOFF, true);
        if (error)
                return error;
 
index 33ac9b8db68380185ad80073b1890cc70e4e3b09..d29a7967f0290ecb8b4ca7c4d4077723262c8ba2 100644 (file)
@@ -39,9 +39,9 @@ extern void xfs_reflink_trim_irec_to_next_cow(struct xfs_inode *ip,
 
 extern int xfs_reflink_cancel_cow_blocks(struct xfs_inode *ip,
                struct xfs_trans **tpp, xfs_fileoff_t offset_fsb,
-               xfs_fileoff_t end_fsb);
+               xfs_fileoff_t end_fsb, bool cancel_real);
 extern int xfs_reflink_cancel_cow_range(struct xfs_inode *ip, xfs_off_t offset,
-               xfs_off_t count);
+               xfs_off_t count, bool cancel_real);
 extern int xfs_reflink_end_cow(struct xfs_inode *ip, xfs_off_t offset,
                xfs_off_t count);
 extern int xfs_reflink_recover_cow(struct xfs_mount *mp);
index 890862f2447c193f374b4de64c58940521b203fb..685c042a120f16a8a9a8dad69d8ee7ce6f9274a0 100644 (file)
@@ -953,7 +953,7 @@ xfs_fs_destroy_inode(
        XFS_STATS_INC(ip->i_mount, vn_remove);
 
        if (xfs_is_reflink_inode(ip)) {
-               error = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF);
+               error = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
                if (error && !XFS_FORCED_SHUTDOWN(ip->i_mount))
                        xfs_warn(ip->i_mount,
 "Error %d while evicting CoW blocks for inode %llu.",
index 399a123aed5815f1f6d2b2784e0148794790bc43..db69d84ed7d14152626565529e0bc981c1976a33 100644 (file)
@@ -20,7 +20,7 @@
 #define CS42L42_HPOUT_LOAD_1NF         0
 #define CS42L42_HPOUT_LOAD_10NF                1
 
-/* HPOUT Clamp to GND Overide */
+/* HPOUT Clamp to GND Override */
 #define CS42L42_HPOUT_CLAMP_EN         0
 #define CS42L42_HPOUT_CLAMP_DIS                1
 
index 796016e63c1da7b64c59f8d9a1b4979d8059027b..5a7da607ca045f81a46e7b73bb31a8f1b978452a 100644 (file)
@@ -435,7 +435,6 @@ struct request_queue {
        struct delayed_work     delay_work;
 
        struct backing_dev_info *backing_dev_info;
-       struct disk_devt        *disk_devt;
 
        /*
         * The queue owner gets to use this for whatever they like.
index aad3fd0ff5f8314975c93af81d94b3ce88ddbdda..7251f7bb45e8b80b44f28c2051c5bef8e947e6bb 100644 (file)
@@ -2678,7 +2678,7 @@ static const char * const kernel_read_file_str[] = {
 
 static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id)
 {
-       if (id < 0 || id >= READING_MAX_ID)
+       if ((unsigned)id >= READING_MAX_ID)
                return kernel_read_file_str[READING_UNKNOWN];
 
        return kernel_read_file_str[id];
index a999d281a2f1e41ce6cb7613dc5ecd8e0d4797c8..76f39754e7b0299df616bc3cb909f9a35fce9ea1 100644 (file)
@@ -167,13 +167,6 @@ struct blk_integrity {
 };
 
 #endif /* CONFIG_BLK_DEV_INTEGRITY */
-struct disk_devt {
-       atomic_t count;
-       void (*release)(struct disk_devt *disk_devt);
-};
-
-void put_disk_devt(struct disk_devt *disk_devt);
-void get_disk_devt(struct disk_devt *disk_devt);
 
 struct gendisk {
        /* major, first_minor and minors are input parameters only,
@@ -183,7 +176,6 @@ struct gendisk {
        int first_minor;
        int minors;                     /* maximum number of minors, =1 for
                                          * disks that can't be partitioned. */
-       struct disk_devt *disk_devt;
 
        char disk_name[DISK_NAME_LEN];  /* name of major driver */
        char *(*devnode)(struct gendisk *gd, umode_t *mode);
index ad3e5158e586dc841e9cd37492ec7104d60e7a81..c9f795e9a2ee26aaf562e9a97a2fe2f963a2f054 100644 (file)
@@ -65,7 +65,7 @@ struct regulator_state {
        int uV; /* suspend voltage */
        unsigned int mode; /* suspend regulator operating mode */
        int enabled; /* is regulator enabled in this suspend state */
-       int disabled; /* is the regulator disbled in this suspend state */
+       int disabled; /* is the regulator disabled in this suspend state */
 };
 
 /**
index 0468548acebfef5431ea7bfd6f565cfdfb73f348..48a3483dccb12360e288ffdd97a9bf8d9d9080a4 100644 (file)
@@ -61,8 +61,7 @@ extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *,
                                        unsigned long from, unsigned long to,
                                        unsigned long len);
 
-extern void userfaultfd_remove(struct vm_area_struct *vma,
-                              struct vm_area_struct **prev,
+extern bool userfaultfd_remove(struct vm_area_struct *vma,
                               unsigned long start,
                               unsigned long end);
 
@@ -72,8 +71,6 @@ extern int userfaultfd_unmap_prep(struct vm_area_struct *vma,
 extern void userfaultfd_unmap_complete(struct mm_struct *mm,
                                       struct list_head *uf);
 
-extern void userfaultfd_exit(struct mm_struct *mm);
-
 #else /* CONFIG_USERFAULTFD */
 
 /* mm helpers */
@@ -120,11 +117,11 @@ static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *ctx,
 {
 }
 
-static inline void userfaultfd_remove(struct vm_area_struct *vma,
-                                     struct vm_area_struct **prev,
+static inline bool userfaultfd_remove(struct vm_area_struct *vma,
                                      unsigned long start,
                                      unsigned long end)
 {
+       return true;
 }
 
 static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
@@ -139,10 +136,6 @@ static inline void userfaultfd_unmap_complete(struct mm_struct *mm,
 {
 }
 
-static inline void userfaultfd_exit(struct mm_struct *mm)
-{
-}
-
 #endif /* CONFIG_USERFAULTFD */
 
 #endif /* _LINUX_USERFAULTFD_K_H */
index 6aa1b6cb58285d92ccd4a53d8de660669f518a6b..a80b7b59cf33418811217faca1b9c6b041dad814 100644 (file)
@@ -79,6 +79,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
                THP_SPLIT_PAGE_FAILED,
                THP_DEFERRED_SPLIT_PAGE,
                THP_SPLIT_PMD,
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+               THP_SPLIT_PUD,
+#endif
                THP_ZERO_PAGE_ALLOC,
                THP_ZERO_PAGE_ALLOC_FAILED,
 #endif
index 458b400373d44daf6d2fee8b2a971eb0e326d2c6..38aac554dbbab6384f1a16bd7d914b632b6a6d56 100644 (file)
@@ -20,8 +20,17 @@ struct device;
 
 int vsp1_du_init(struct device *dev);
 
-int vsp1_du_setup_lif(struct device *dev, unsigned int width,
-                     unsigned int height);
+/**
+ * struct vsp1_du_lif_config - VSP LIF configuration
+ * @width: output frame width
+ * @height: output frame height
+ */
+struct vsp1_du_lif_config {
+       unsigned int width;
+       unsigned int height;
+};
+
+int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg);
 
 struct vsp1_du_atomic_config {
        u32 pixelformat;
index cb2615ccf761d68123406d3600646d147a6e2f47..d784f242cf7b4dc114e355c52b319cb11d6faca7 100644 (file)
@@ -59,7 +59,7 @@ struct lap_cb;
  *  Slot timer must never exceed 85 ms, and must always be at least 25 ms, 
  *  suggested to  75-85 msec by IrDA lite. This doesn't work with a lot of
  *  devices, and other stackes uses a lot more, so it's best we do it as well
- *  (Note : this is the default value and sysctl overides it - Jean II)
+ *  (Note : this is the default value and sysctl overrides it - Jean II)
  */
 #define SLOT_TIMEOUT            (90*HZ/1000)
 
index c055947c5c989fa7e399a7b0dcaba8640014b548..3b059530dac95fa6e5dcf736e95a84fe80eb5f35 100644 (file)
@@ -18,8 +18,7 @@
  * means the userland is reading).
  */
 #define UFFD_API ((__u64)0xAA)
-#define UFFD_API_FEATURES (UFFD_FEATURE_EVENT_EXIT |           \
-                          UFFD_FEATURE_EVENT_FORK |            \
+#define UFFD_API_FEATURES (UFFD_FEATURE_EVENT_FORK |           \
                           UFFD_FEATURE_EVENT_REMAP |           \
                           UFFD_FEATURE_EVENT_REMOVE |  \
                           UFFD_FEATURE_EVENT_UNMAP |           \
@@ -113,7 +112,6 @@ struct uffd_msg {
 #define UFFD_EVENT_REMAP       0x14
 #define UFFD_EVENT_REMOVE      0x15
 #define UFFD_EVENT_UNMAP       0x16
-#define UFFD_EVENT_EXIT                0x17
 
 /* flags for UFFD_EVENT_PAGEFAULT */
 #define UFFD_PAGEFAULT_FLAG_WRITE      (1<<0)  /* If this was a write fault */
@@ -163,7 +161,6 @@ struct uffdio_api {
 #define UFFD_FEATURE_MISSING_HUGETLBFS         (1<<4)
 #define UFFD_FEATURE_MISSING_SHMEM             (1<<5)
 #define UFFD_FEATURE_EVENT_UNMAP               (1<<6)
-#define UFFD_FEATURE_EVENT_EXIT                        (1<<7)
        __u64 features;
 
        __u64 ioctls;
index 0125589c742841ddbff14639c1ded5e0590b00b4..48851327a15e18e8ba151a3a45c5126c5023ddb8 100644 (file)
@@ -2669,7 +2669,7 @@ static bool css_visible(struct cgroup_subsys_state *css)
  *
  * Returns 0 on success, -errno on failure.  On failure, csses which have
  * been processed already aren't cleaned up.  The caller is responsible for
- * cleaning up with cgroup_apply_control_disble().
+ * cleaning up with cgroup_apply_control_disable().
  */
 static int cgroup_apply_control_enable(struct cgroup *cgrp)
 {
index 6f41548f2e320a98182f4fe4b10700bcab7e6b86..a17ed56c8ce1f918519cfbf96ee3c938734ecb08 100644 (file)
@@ -998,7 +998,7 @@ list_update_cgroup_event(struct perf_event *event,
  */
 #define PERF_CPU_HRTIMER (1000 / HZ)
 /*
- * function must be called with interrupts disbled
+ * function must be called with interrupts disabled
  */
 static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
 {
index e126ebf2400c221adfb8a73508d883ec9accd63d..516acdb0e0ec9bd48e3006a8ede165437b3e121f 100644 (file)
@@ -554,7 +554,6 @@ static void exit_mm(void)
        enter_lazy_tlb(mm, current);
        task_unlock(current);
        mm_update_next_owner(mm);
-       userfaultfd_exit(mm);
        mmput(mm);
        if (test_thread_flag(TIF_MEMDIE))
                exit_oom_victim();
index 8f8de3d4d6b7a3c71358ac1c6660f2e645b98477..cd7cd489f739817f07349e8526812eaa3e110075 100644 (file)
@@ -36,6 +36,7 @@ struct sugov_policy {
        u64 last_freq_update_time;
        s64 freq_update_delay_ns;
        unsigned int next_freq;
+       unsigned int cached_raw_freq;
 
        /* The next fields are only needed if fast switch cannot be used. */
        struct irq_work irq_work;
@@ -52,7 +53,6 @@ struct sugov_cpu {
        struct update_util_data update_util;
        struct sugov_policy *sg_policy;
 
-       unsigned int cached_raw_freq;
        unsigned long iowait_boost;
        unsigned long iowait_boost_max;
        u64 last_update;
@@ -116,7 +116,7 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
 
 /**
  * get_next_freq - Compute a new frequency for a given cpufreq policy.
- * @sg_cpu: schedutil cpu object to compute the new frequency for.
+ * @sg_policy: schedutil policy object to compute the new frequency for.
  * @util: Current CPU utilization.
  * @max: CPU capacity.
  *
@@ -136,19 +136,18 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
  * next_freq (as calculated above) is returned, subject to policy min/max and
  * cpufreq driver limitations.
  */
-static unsigned int get_next_freq(struct sugov_cpu *sg_cpu, unsigned long util,
-                                 unsigned long max)
+static unsigned int get_next_freq(struct sugov_policy *sg_policy,
+                                 unsigned long util, unsigned long max)
 {
-       struct sugov_policy *sg_policy = sg_cpu->sg_policy;
        struct cpufreq_policy *policy = sg_policy->policy;
        unsigned int freq = arch_scale_freq_invariant() ?
                                policy->cpuinfo.max_freq : policy->cur;
 
        freq = (freq + (freq >> 2)) * util / max;
 
-       if (freq == sg_cpu->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
+       if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
                return sg_policy->next_freq;
-       sg_cpu->cached_raw_freq = freq;
+       sg_policy->cached_raw_freq = freq;
        return cpufreq_driver_resolve_freq(policy, freq);
 }
 
@@ -213,7 +212,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
        } else {
                sugov_get_util(&util, &max);
                sugov_iowait_boost(sg_cpu, &util, &max);
-               next_f = get_next_freq(sg_cpu, util, max);
+               next_f = get_next_freq(sg_policy, util, max);
        }
        sugov_update_commit(sg_policy, time, next_f);
 }
@@ -267,7 +266,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu,
                sugov_iowait_boost(j_sg_cpu, &util, &max);
        }
 
-       return get_next_freq(sg_cpu, util, max);
+       return get_next_freq(sg_policy, util, max);
 }
 
 static void sugov_update_shared(struct update_util_data *hook, u64 time,
@@ -580,6 +579,7 @@ static int sugov_start(struct cpufreq_policy *policy)
        sg_policy->next_freq = UINT_MAX;
        sg_policy->work_in_progress = false;
        sg_policy->need_freq_update = false;
+       sg_policy->cached_raw_freq = 0;
 
        for_each_cpu(cpu, policy->cpus) {
                struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
@@ -590,7 +590,6 @@ static int sugov_start(struct cpufreq_policy *policy)
                        sg_cpu->max = 0;
                        sg_cpu->flags = SCHED_CPUFREQ_RT;
                        sg_cpu->last_update = 0;
-                       sg_cpu->cached_raw_freq = 0;
                        sg_cpu->iowait_boost = 0;
                        sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
                        cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
index 1d68b5b7ad4133d102a39006f575bdfe49d808ea..5fb1f2c87e6b846b7f9d32823ef3aede4b28db9e 100644 (file)
@@ -65,7 +65,7 @@ void stack_trace_print(void)
 }
 
 /*
- * When arch-specific code overides this function, the following
+ * When arch-specific code overrides this function, the following
  * data should be filled up, assuming stack_trace_max_lock is held to
  * prevent concurrent updates.
  *     stack_trace_index[]
index 6d861d090e9fc79d39e2b48f57b1d9f4bc91463f..c6f2a37028c205db8143ebe58677c790c66a0faf 100644 (file)
@@ -683,33 +683,26 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
 static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
 {
        struct radix_tree_iter iter;
-       struct rb_node *rbn;
        void **slot;
 
        WARN_ON(test_bit(WB_registered, &bdi->wb.state));
 
        spin_lock_irq(&cgwb_lock);
-
        radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
                cgwb_kill(*slot);
-
-       while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
-               struct bdi_writeback_congested *congested =
-                       rb_entry(rbn, struct bdi_writeback_congested, rb_node);
-
-               rb_erase(rbn, &bdi->cgwb_congested_tree);
-               congested->bdi = NULL;  /* mark @congested unlinked */
-       }
-
        spin_unlock_irq(&cgwb_lock);
 
        /*
-        * All cgwb's and their congested states must be shutdown and
-        * released before returning.  Drain the usage counter to wait for
-        * all cgwb's and cgwb_congested's ever created on @bdi.
+        * All cgwb's must be shutdown and released before returning.  Drain
+        * the usage counter to wait for all cgwb's ever created on @bdi.
         */
        atomic_dec(&bdi->usage_cnt);
        wait_event(cgwb_release_wait, !atomic_read(&bdi->usage_cnt));
+       /*
+        * Grab back our reference so that we hold it when @bdi gets
+        * re-registered.
+        */
+       atomic_inc(&bdi->usage_cnt);
 }
 
 /**
@@ -749,6 +742,21 @@ void wb_blkcg_offline(struct blkcg *blkcg)
        spin_unlock_irq(&cgwb_lock);
 }
 
+static void cgwb_bdi_exit(struct backing_dev_info *bdi)
+{
+       struct rb_node *rbn;
+
+       spin_lock_irq(&cgwb_lock);
+       while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
+               struct bdi_writeback_congested *congested =
+                       rb_entry(rbn, struct bdi_writeback_congested, rb_node);
+
+               rb_erase(rbn, &bdi->cgwb_congested_tree);
+               congested->bdi = NULL;  /* mark @congested unlinked */
+       }
+       spin_unlock_irq(&cgwb_lock);
+}
+
 #else  /* CONFIG_CGROUP_WRITEBACK */
 
 static int cgwb_bdi_init(struct backing_dev_info *bdi)
@@ -769,7 +777,9 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
        return 0;
 }
 
-static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
+static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { }
+
+static void cgwb_bdi_exit(struct backing_dev_info *bdi)
 {
        wb_congested_put(bdi->wb_congested);
 }
@@ -857,6 +867,8 @@ int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner)
                        MINOR(owner->devt));
        if (rc)
                return rc;
+       /* Leaking owner reference... */
+       WARN_ON(bdi->owner);
        bdi->owner = owner;
        get_device(owner);
        return 0;
@@ -898,6 +910,7 @@ static void bdi_exit(struct backing_dev_info *bdi)
 {
        WARN_ON_ONCE(bdi->dev);
        wb_exit(&bdi->wb);
+       cgwb_bdi_exit(bdi);
 }
 
 static void release_bdi(struct kref *ref)
index e4766de257090c87bf1e921069f62b10ce688fee..1ebc93e179f3eab40cf469fd67a361ea43a11368 100644 (file)
@@ -1828,7 +1828,7 @@ static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
        VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
        VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
 
-       count_vm_event(THP_SPLIT_PMD);
+       count_vm_event(THP_SPLIT_PUD);
 
        pudp_huge_clear_flush_notify(vma, haddr, pud);
 }
index 6f1ed16308736918730ea836b5fecafc908e487b..3a8ddf8baf7dc3d52597bf0e53753c0cc17503cd 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/printk.h>
 #include <linux/shrinker.h>
 #include <linux/slab.h>
+#include <linux/srcu.h>
 #include <linux/string.h>
 #include <linux/types.h>
 
@@ -103,6 +104,7 @@ static int quarantine_tail;
 /* Total size of all objects in global_quarantine across all batches. */
 static unsigned long quarantine_size;
 static DEFINE_SPINLOCK(quarantine_lock);
+DEFINE_STATIC_SRCU(remove_cache_srcu);
 
 /* Maximum size of the global queue. */
 static unsigned long quarantine_max_size;
@@ -173,17 +175,22 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
        struct qlist_head *q;
        struct qlist_head temp = QLIST_INIT;
 
+       /*
+        * Note: irq must be disabled until after we move the batch to the
+        * global quarantine. Otherwise quarantine_remove_cache() can miss
+        * some objects belonging to the cache if they are in our local temp
+        * list. quarantine_remove_cache() executes on_each_cpu() at the
+        * beginning which ensures that it either sees the objects in per-cpu
+        * lists or in the global quarantine.
+        */
        local_irq_save(flags);
 
        q = this_cpu_ptr(&cpu_quarantine);
        qlist_put(q, &info->quarantine_link, cache->size);
-       if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE))
+       if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) {
                qlist_move_all(q, &temp);
 
-       local_irq_restore(flags);
-
-       if (unlikely(!qlist_empty(&temp))) {
-               spin_lock_irqsave(&quarantine_lock, flags);
+               spin_lock(&quarantine_lock);
                WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes);
                qlist_move_all(&temp, &global_quarantine[quarantine_tail]);
                if (global_quarantine[quarantine_tail].bytes >=
@@ -196,20 +203,33 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
                        if (new_tail != quarantine_head)
                                quarantine_tail = new_tail;
                }
-               spin_unlock_irqrestore(&quarantine_lock, flags);
+               spin_unlock(&quarantine_lock);
        }
+
+       local_irq_restore(flags);
 }
 
 void quarantine_reduce(void)
 {
        size_t total_size, new_quarantine_size, percpu_quarantines;
        unsigned long flags;
+       int srcu_idx;
        struct qlist_head to_free = QLIST_INIT;
 
        if (likely(READ_ONCE(quarantine_size) <=
                   READ_ONCE(quarantine_max_size)))
                return;
 
+       /*
+        * srcu critical section ensures that quarantine_remove_cache()
+        * will not miss objects belonging to the cache while they are in our
+        * local to_free list. srcu is chosen because (1) it gives us private
+        * grace period domain that does not interfere with anything else,
+        * and (2) it allows synchronize_srcu() to return without waiting
+        * if there are no pending read critical sections (which is the
+        * expected case).
+        */
+       srcu_idx = srcu_read_lock(&remove_cache_srcu);
        spin_lock_irqsave(&quarantine_lock, flags);
 
        /*
@@ -237,6 +257,7 @@ void quarantine_reduce(void)
        spin_unlock_irqrestore(&quarantine_lock, flags);
 
        qlist_free_all(&to_free, NULL);
+       srcu_read_unlock(&remove_cache_srcu, srcu_idx);
 }
 
 static void qlist_move_cache(struct qlist_head *from,
@@ -280,12 +301,28 @@ void quarantine_remove_cache(struct kmem_cache *cache)
        unsigned long flags, i;
        struct qlist_head to_free = QLIST_INIT;
 
+       /*
+        * Must be careful to not miss any objects that are being moved from
+        * per-cpu list to the global quarantine in quarantine_put(),
+        * nor objects being freed in quarantine_reduce(). on_each_cpu()
+        * achieves the first goal, while synchronize_srcu() achieves the
+        * second.
+        */
        on_each_cpu(per_cpu_remove_cache, cache, 1);
 
        spin_lock_irqsave(&quarantine_lock, flags);
-       for (i = 0; i < QUARANTINE_BATCHES; i++)
+       for (i = 0; i < QUARANTINE_BATCHES; i++) {
+               if (qlist_empty(&global_quarantine[i]))
+                       continue;
                qlist_move_cache(&global_quarantine[i], &to_free, cache);
+               /* Scanning whole quarantine can take a while. */
+               spin_unlock_irqrestore(&quarantine_lock, flags);
+               cond_resched();
+               spin_lock_irqsave(&quarantine_lock, flags);
+       }
        spin_unlock_irqrestore(&quarantine_lock, flags);
 
        qlist_free_all(&to_free, cache);
+
+       synchronize_srcu(&remove_cache_srcu);
 }
index dc5927c812d3d1f9a209fbdbea3a36a61cbde17d..7a2abf0127aef7a9d4879278293d8cab766133e1 100644 (file)
@@ -513,7 +513,43 @@ static long madvise_dontneed(struct vm_area_struct *vma,
        if (!can_madv_dontneed_vma(vma))
                return -EINVAL;
 
-       userfaultfd_remove(vma, prev, start, end);
+       if (!userfaultfd_remove(vma, start, end)) {
+               *prev = NULL; /* mmap_sem has been dropped, prev is stale */
+
+               down_read(&current->mm->mmap_sem);
+               vma = find_vma(current->mm, start);
+               if (!vma)
+                       return -ENOMEM;
+               if (start < vma->vm_start) {
+                       /*
+                        * This "vma" under revalidation is the one
+                        * with the lowest vma->vm_start where start
+                        * is also < vma->vm_end. If start <
+                        * vma->vm_start it means an hole materialized
+                        * in the user address space within the
+                        * virtual range passed to MADV_DONTNEED.
+                        */
+                       return -ENOMEM;
+               }
+               if (!can_madv_dontneed_vma(vma))
+                       return -EINVAL;
+               if (end > vma->vm_end) {
+                       /*
+                        * Don't fail if end > vma->vm_end. If the old
+                        * vma was splitted while the mmap_sem was
+                        * released the effect of the concurrent
+                        * operation may not cause MADV_DONTNEED to
+                        * have an undefined result. There may be an
+                        * adjacent next vma that we'll walk
+                        * next. userfaultfd_remove() will generate an
+                        * UFFD_EVENT_REMOVE repetition on the
+                        * end-vma->vm_end range, but the manager can
+                        * handle a repetition fine.
+                        */
+                       end = vma->vm_end;
+               }
+               VM_WARN_ON(start >= end);
+       }
        zap_page_range(vma, start, end - start);
        return 0;
 }
@@ -554,8 +590,10 @@ static long madvise_remove(struct vm_area_struct *vma,
         * mmap_sem.
         */
        get_file(f);
-       userfaultfd_remove(vma, prev, start, end);
-       up_read(&current->mm->mmap_sem);
+       if (userfaultfd_remove(vma, start, end)) {
+               /* mmap_sem was not released by userfaultfd_remove() */
+               up_read(&current->mm->mmap_sem);
+       }
        error = vfs_fallocate(f,
                                FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
                                offset, end - start);
index b64b47803e529a87d87f3e3f022e97f17ff606be..696f06d17c4e89b676f19c3c3a5a4c1908697caf 100644 (file)
@@ -1118,7 +1118,10 @@ unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn,
                }
        } while (left < right);
 
-       return min(PHYS_PFN(type->regions[right].base), max_pfn);
+       if (right == type->cnt)
+               return max_pfn;
+       else
+               return min(PHYS_PFN(type->regions[right].base), max_pfn);
 }
 
 /**
index c52ec893e241cf6b52764797f6aea5ed56219e23..2bd7541d7c11231431c060ca6cfe84a89f096fe3 100644 (file)
@@ -466,6 +466,8 @@ static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
        struct mem_cgroup_tree_per_node *mctz;
 
        mctz = soft_limit_tree_from_page(page);
+       if (!mctz)
+               return;
        /*
         * Necessary to update all ancestors when hierarchy is used.
         * because their event counter is not touched.
@@ -503,7 +505,8 @@ static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
        for_each_node(nid) {
                mz = mem_cgroup_nodeinfo(memcg, nid);
                mctz = soft_limit_tree_node(nid);
-               mem_cgroup_remove_exceeded(mz, mctz);
+               if (mctz)
+                       mem_cgroup_remove_exceeded(mz, mctz);
        }
 }
 
@@ -2558,7 +2561,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
         * is empty. Do it lockless to prevent lock bouncing. Races
         * are acceptable as soft limit is best effort anyway.
         */
-       if (RB_EMPTY_ROOT(&mctz->rb_root))
+       if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
                return 0;
 
        /*
@@ -4135,17 +4138,22 @@ static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
        kfree(memcg->nodeinfo[node]);
 }
 
-static void mem_cgroup_free(struct mem_cgroup *memcg)
+static void __mem_cgroup_free(struct mem_cgroup *memcg)
 {
        int node;
 
-       memcg_wb_domain_exit(memcg);
        for_each_node(node)
                free_mem_cgroup_per_node_info(memcg, node);
        free_percpu(memcg->stat);
        kfree(memcg);
 }
 
+static void mem_cgroup_free(struct mem_cgroup *memcg)
+{
+       memcg_wb_domain_exit(memcg);
+       __mem_cgroup_free(memcg);
+}
+
 static struct mem_cgroup *mem_cgroup_alloc(void)
 {
        struct mem_cgroup *memcg;
@@ -4196,7 +4204,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
 fail:
        if (memcg->id.id > 0)
                idr_remove(&mem_cgroup_idr, memcg->id.id);
-       mem_cgroup_free(memcg);
+       __mem_cgroup_free(memcg);
        return NULL;
 }
 
index 945edac468101469fc4cbcbdf4de7bdc96d5722e..0dd9ca18e19ed7ddb499a480c5831c312791b10a 100644 (file)
@@ -443,7 +443,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
 
        while (start < end) {
                struct page *page;
-               unsigned int page_mask;
+               unsigned int page_mask = 0;
                unsigned long page_increm;
                struct pagevec pvec;
                struct zone *zone;
@@ -457,8 +457,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
                 * suits munlock very well (and if somehow an abnormal page
                 * has sneaked into the range, we won't oops here: great).
                 */
-               page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
-                               &page_mask);
+               page = follow_page(vma, start, FOLL_GET | FOLL_DUMP);
 
                if (page && !IS_ERR(page)) {
                        if (PageTransTail(page)) {
@@ -469,8 +468,8 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
                                /*
                                 * Any THP page found by follow_page_mask() may
                                 * have gotten split before reaching
-                                * munlock_vma_page(), so we need to recompute
-                                * the page_mask here.
+                                * munlock_vma_page(), so we need to compute
+                                * the page_mask here instead.
                                 */
                                page_mask = munlock_vma_page(page);
                                unlock_page(page);
index 2984403a24247b570a5664f94b1e8514304c08a8..49ed681ccc7b01d5e2a73b48b62a1da4ac9731f2 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1321,12 +1321,6 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
        }
 
        while (page_vma_mapped_walk(&pvmw)) {
-               subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
-               address = pvmw.address;
-
-               /* Unexpected PMD-mapped THP? */
-               VM_BUG_ON_PAGE(!pvmw.pte, page);
-
                /*
                 * If the page is mlock()d, we cannot swap it out.
                 * If it's recently referenced (perhaps page_referenced
@@ -1350,6 +1344,13 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                                continue;
                }
 
+               /* Unexpected PMD-mapped THP? */
+               VM_BUG_ON_PAGE(!pvmw.pte, page);
+
+               subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
+               address = pvmw.address;
+
+
                if (!(flags & TTU_IGNORE_ACCESS)) {
                        if (ptep_clear_flush_young_notify(vma, address,
                                                pvmw.pte)) {
index 69f9aff39a2eaf608d4f7cfaed8904bd3c3312c8..b1947f0cbee2f97ce7ba7e5bd4ca4ea3dc205533 100644 (file)
@@ -1065,6 +1065,9 @@ const char * const vmstat_text[] = {
        "thp_split_page_failed",
        "thp_deferred_split_page",
        "thp_split_pmd",
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+       "thp_split_pud",
+#endif
        "thp_zero_page_alloc",
        "thp_zero_page_alloc_failed",
 #endif
index 9b0b5cbc5b899be4ddbafe2ce5f3ec5ab0743b6c..0f98634c20a097697cec9849dc9e4b338cd5e5c9 100644 (file)
@@ -133,7 +133,7 @@ __visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gc
 #if BUILDING_GCC_VERSION < 6000
        register_callback(plugin_name, PLUGIN_START_UNIT, &sancov_start_unit, NULL);
        register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)&gt_ggc_r_gt_sancov);
-       register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &sancov_plugin_pass_info);
+       register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &sancov_pass_info);
 #endif
 
        return 0;
index 0458b037c8a137daa0f0fc205cabc188b18ae513..0545f5a8cabed76cb2c49cfd8c2d08f567bc4980 100644 (file)
@@ -372,6 +372,8 @@ disassocation||disassociation
 disapear||disappear
 disapeared||disappeared
 disappared||disappeared
+disble||disable
+disbled||disabled
 disconnet||disconnect
 discontinous||discontinuous
 dispertion||dispersion
@@ -732,6 +734,7 @@ oustanding||outstanding
 overaall||overall
 overhread||overhead
 overlaping||overlapping
+overide||override
 overrided||overridden
 overriden||overridden
 overun||overrun
index ec1067a679da406019bd4c98e6b6cf22fd5a4432..08b1399d1da2b818b997b752555532ebdf45312e 100644 (file)
@@ -89,7 +89,7 @@ static void acp_reg_write(u32 val, void __iomem *acp_mmio, u32 reg)
        writel(val, acp_mmio + (reg * 4));
 }
 
-/* Configure a given dma channel parameters - enable/disble,
+/* Configure a given dma channel parameters - enable/disable,
  * number of descriptors, priority
  */
 static void config_acp_dma_channel(void __iomem *acp_mmio, u8 ch_num,
index 11c8d9bc762ef0c4bde99dec4292e84ff00d3477..5d19fdf80292c226769a91ccef519a47b3788b2b 100644 (file)
@@ -1387,7 +1387,7 @@ static bool pci_data_iowrite(u16 port, u32 mask, u32 val)
                /* Allow writing to any other BAR, or expansion ROM */
                iowrite(portoff, val, mask, &d->config_words[reg]);
                return true;
-               /* We let them overide latency timer and cacheline size */
+               /* We let them override latency timer and cacheline size */
        } else if (&d->config_words[reg] == (void *)&d->config.cacheline_size) {
                /* Only let them change the first two fields. */
                if (mask == 0xFFFFFFFF)
index e2efddf1023177c202d626257c8466f3cb8c40c3..1f5300e56b44dc7bca0b269261d5f7987eb564b6 100644 (file)
@@ -132,7 +132,7 @@ else
   Q = @
 endif
 
-# Disable command line variables (CFLAGS) overide from top
+# Disable command line variables (CFLAGS) override from top
 # level Makefile (perf), otherwise build Makefile will get
 # the same command line setup.
 MAKEOVERRIDES=
index 47076b15eebeaa5b54583761130b10ecef2fc0aa..9b8555ea3459c85bef282dad5166700771f0e5ed 100644 (file)
@@ -135,7 +135,7 @@ else
   Q = @
 endif
 
-# Disable command line variables (CFLAGS) overide from top
+# Disable command line variables (CFLAGS) override from top
 # level Makefile (perf), otherwise build Makefile will get
 # the same command line setup.
 MAKEOVERRIDES=
index 66342804161c80ea611b3dfa554a602fadc4213e..0c03538df74c01a1ecedc353e21b6c81083ee1e1 100644 (file)
@@ -140,7 +140,7 @@ struct pevent_plugin_option {
  *   struct pevent_plugin_option PEVENT_PLUGIN_OPTIONS[] = {
  *     {
  *             .name = "option-name",
- *             .plugin_alias = "overide-file-name", (optional)
+ *             .plugin_alias = "override-file-name", (optional)
  *             .description = "description of option to show users",
  *     },
  *     {
index 4cff7e7ddcc47b80ef30a06a779ea45dae5a5f3e..41642ba5e318a153d805720e47475436817be53e 100644 (file)
@@ -1,5 +1,9 @@
 # Makefile for vm selftests
 
+ifndef OUTPUT
+  OUTPUT := $(shell pwd)
+endif
+
 CFLAGS = -Wall -I ../../../../usr/include $(EXTRA_CFLAGS)
 LDLIBS = -lrt
 TEST_GEN_FILES = compaction_test