Merge commit 'linus/master' into x86/urgent
authorH. Peter Anvin <hpa@zytor.com>
Fri, 11 Dec 2009 18:57:42 +0000 (10:57 -0800)
committerH. Peter Anvin <hpa@zytor.com>
Fri, 11 Dec 2009 18:57:42 +0000 (10:57 -0800)
1  2 
arch/x86/kernel/amd_iommu.c
arch/x86/mm/kmmio.c

index a83185080e912b3a9245e70b574a06fb9a24af57,1c0fb4d4ad556c5daf51f623151b051577f3a966..b990b5cc95418df45f1c94f1b748cb5d59d45e1c
@@@ -166,43 -166,6 +166,43 @@@ static void iommu_uninit_device(struct 
  {
        kfree(dev->archdata.iommu);
  }
 +
 +void __init amd_iommu_uninit_devices(void)
 +{
 +      struct pci_dev *pdev = NULL;
 +
 +      for_each_pci_dev(pdev) {
 +
 +              if (!check_device(&pdev->dev))
 +                      continue;
 +
 +              iommu_uninit_device(&pdev->dev);
 +      }
 +}
 +
 +int __init amd_iommu_init_devices(void)
 +{
 +      struct pci_dev *pdev = NULL;
 +      int ret = 0;
 +
 +      for_each_pci_dev(pdev) {
 +
 +              if (!check_device(&pdev->dev))
 +                      continue;
 +
 +              ret = iommu_init_device(&pdev->dev);
 +              if (ret)
 +                      goto out_free;
 +      }
 +
 +      return 0;
 +
 +out_free:
 +
 +      amd_iommu_uninit_devices();
 +
 +      return ret;
 +}
  #ifdef CONFIG_AMD_IOMMU_STATS
  
  /*
@@@ -1624,11 -1587,6 +1624,11 @@@ static struct notifier_block device_nb 
        .notifier_call = device_change_notifier,
  };
  
 +void amd_iommu_init_notifier(void)
 +{
 +      bus_register_notifier(&pci_bus_type, &device_nb);
 +}
 +
  /*****************************************************************************
   *
   * The next functions belong to the dma_ops mapping/unmapping code.
@@@ -1825,7 -1783,7 +1825,7 @@@ retry
                        goto out;
  
                /*
-                * aperture was sucessfully enlarged by 128 MB, try
+                * aperture was successfully enlarged by 128 MB, try
                 * allocation again
                 */
                goto retry;
@@@ -2187,6 -2145,8 +2187,6 @@@ static void prealloc_protection_domains
                if (!check_device(&dev->dev))
                        continue;
  
 -              iommu_init_device(&dev->dev);
 -
                /* Is there already any domain for it? */
                if (domain_for_device(&dev->dev))
                        continue;
@@@ -2255,6 -2215,8 +2255,6 @@@ int __init amd_iommu_init_dma_ops(void
  
        register_iommu(&amd_iommu_ops);
  
 -      bus_register_notifier(&pci_bus_type, &device_nb);
 -
        amd_iommu_stats_init();
  
        return 0;
@@@ -2528,7 -2490,7 +2528,7 @@@ int __init amd_iommu_init_passthrough(v
        struct pci_dev *dev = NULL;
        u16 devid;
  
-       /* allocate passthroug domain */
+       /* allocate passthrough domain */
        pt_domain = protection_domain_alloc();
        if (!pt_domain)
                return -ENOMEM;
diff --combined arch/x86/mm/kmmio.c
index 68c3e89af5c2699b3b1c9b19cf35769ab2d57285,07bcc309cfdac8f48bbad39a1bc863c5e4d5dd78..c0f6198565eb63592a3cbebaf7c8e538e987b157
@@@ -5,8 -5,6 +5,8 @@@
   *     2008 Pekka Paalanen <pq@iki.fi>
   */
  
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
  #include <linux/list.h>
  #include <linux/rculist.h>
  #include <linux/spinlock.h>
@@@ -138,7 -136,7 +138,7 @@@ static int clear_page_presence(struct k
        pte_t *pte = lookup_address(f->page, &level);
  
        if (!pte) {
 -              pr_err("kmmio: no pte for page 0x%08lx\n", f->page);
 +              pr_err("no pte for page 0x%08lx\n", f->page);
                return -1;
        }
  
                clear_pte_presence(pte, clear, &f->old_presence);
                break;
        default:
 -              pr_err("kmmio: unexpected page level 0x%x.\n", level);
 +              pr_err("unexpected page level 0x%x.\n", level);
                return -1;
        }
  
  static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
  {
        int ret;
 -      WARN_ONCE(f->armed, KERN_ERR "kmmio page already armed.\n");
 +      WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n"));
        if (f->armed) {
 -              pr_warning("kmmio double-arm: page 0x%08lx, ref %d, old %d\n",
 -                                      f->page, f->count, !!f->old_presence);
 +              pr_warning("double-arm: page 0x%08lx, ref %d, old %d\n",
 +                         f->page, f->count, !!f->old_presence);
        }
        ret = clear_page_presence(f, true);
 -      WARN_ONCE(ret < 0, KERN_ERR "kmmio arming 0x%08lx failed.\n", f->page);
 +      WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming 0x%08lx failed.\n"),
 +                f->page);
        f->armed = true;
        return ret;
  }
@@@ -206,7 -203,7 +206,7 @@@ static void disarm_kmmio_fault_page(str
   */
  /*
   * Interrupts are disabled on entry as trap3 is an interrupt gate
-  * and they remain disabled thorough out this function.
+  * and they remain disabled throughout this function.
   */
  int kmmio_handler(struct pt_regs *regs, unsigned long addr)
  {
                         * condition needs handling by do_page_fault(), the
                         * page really not being present is the most common.
                         */
 -                      pr_debug("kmmio: secondary hit for 0x%08lx CPU %d.\n",
 -                                      addr, smp_processor_id());
 +                      pr_debug("secondary hit for 0x%08lx CPU %d.\n",
 +                               addr, smp_processor_id());
  
                        if (!faultpage->old_presence)
 -                              pr_info("kmmio: unexpected secondary hit for "
 -                                      "address 0x%08lx on CPU %d.\n", addr,
 -                                      smp_processor_id());
 +                              pr_info("unexpected secondary hit for address 0x%08lx on CPU %d.\n",
 +                                      addr, smp_processor_id());
                } else {
                        /*
                         * Prevent overwriting already in-flight context.
                         * This should not happen, let's hope disarming at
                         * least prevents a panic.
                         */
 -                      pr_emerg("kmmio: recursive probe hit on CPU %d, "
 -                                      "for address 0x%08lx. Ignoring.\n",
 -                                      smp_processor_id(), addr);
 -                      pr_emerg("kmmio: previous hit was at 0x%08lx.\n",
 -                                              ctx->addr);
 +                      pr_emerg("recursive probe hit on CPU %d, for address 0x%08lx. Ignoring.\n",
 +                               smp_processor_id(), addr);
 +                      pr_emerg("previous hit was at 0x%08lx.\n", ctx->addr);
                        disarm_kmmio_fault_page(faultpage);
                }
                goto no_kmmio_ctx;
@@@ -302,7 -302,7 +302,7 @@@ no_kmmio
  
  /*
   * Interrupts are disabled on entry as trap1 is an interrupt gate
-  * and they remain disabled thorough out this function.
+  * and they remain disabled throughout this function.
   * This must always get called as the pair to kmmio_handler().
   */
  static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
                 * something external causing them (f.e. using a debugger while
                 * mmio tracing enabled), or erroneous behaviour
                 */
 -              pr_warning("kmmio: unexpected debug trap on CPU %d.\n",
 -                                                      smp_processor_id());
 +              pr_warning("unexpected debug trap on CPU %d.\n",
 +                         smp_processor_id());
                goto out;
        }
  
@@@ -425,7 -425,7 +425,7 @@@ int register_kmmio_probe(struct kmmio_p
        list_add_rcu(&p->list, &kmmio_probes);
        while (size < size_lim) {
                if (add_kmmio_fault_page(p->addr + size))
 -                      pr_err("kmmio: Unable to set page fault.\n");
 +                      pr_err("Unable to set page fault.\n");
                size += PAGE_SIZE;
        }
  out:
@@@ -490,7 -490,7 +490,7 @@@ static void remove_kmmio_fault_pages(st
   * 2. remove_kmmio_fault_pages()
   *    Remove the pages from kmmio_page_table.
   * 3. rcu_free_kmmio_fault_pages()
 - *    Actally free the kmmio_fault_page structs as with RCU.
 + *    Actually free the kmmio_fault_page structs as with RCU.
   */
  void unregister_kmmio_probe(struct kmmio_probe *p)
  {
  
        drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC);
        if (!drelease) {
 -              pr_crit("kmmio: leaking kmmio_fault_page objects.\n");
 +              pr_crit("leaking kmmio_fault_page objects.\n");
                return;
        }
        drelease->release_list = release_list;