Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regulator
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 6 Mar 2012 16:23:30 +0000 (08:23 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 6 Mar 2012 16:23:30 +0000 (08:23 -0800)
Pull regulator updates from Mark Brown:
 "A simple fix that's obvious from inspection.  There's no mainline
  users of this driver yet (there's some i.MX platforms which will use
  it)."

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regulator:
  regulator: Fix mask parameter in da9052_reg_update calls

106 files changed:
Documentation/kernel-parameters.txt
MAINTAINERS
arch/alpha/include/asm/futex.h
arch/mips/alchemy/common/time.c
arch/mips/ath79/dev-wmac.c
arch/mips/configs/nlm_xlp_defconfig
arch/mips/configs/nlm_xlr_defconfig
arch/mips/configs/powertv_defconfig
arch/mips/include/asm/mach-au1x00/gpio-au1300.h
arch/mips/include/asm/page.h
arch/mips/kernel/smp-bmips.c
arch/mips/kernel/traps.c
arch/mips/kernel/vmlinux.lds.S
arch/mips/mm/fault.c
arch/mips/pci/pci.c
arch/mips/pmc-sierra/yosemite/ht-irq.c
arch/mips/txx9/generic/7segled.c
arch/x86/ia32/ia32_aout.c
arch/x86/pci/acpi.c
drivers/block/floppy.c
drivers/gpu/drm/gma500/cdv_device.c
drivers/gpu/drm/gma500/framebuffer.c
drivers/gpu/drm/gma500/gtt.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/usbhid/hid-quirks.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/mfd/ab8500-core.c
drivers/mfd/mfd-core.c
drivers/mfd/s5m-core.c
drivers/mfd/tps65910.c
drivers/mfd/tps65912-core.c
drivers/mfd/wm8350-irq.c
drivers/mfd/wm8994-core.c
drivers/mfd/wm8994-regmap.c
drivers/misc/c2port/core.c
drivers/mmc/core/core.c
drivers/mmc/core/host.c
drivers/mmc/core/mmc.c
drivers/mmc/core/sd.c
drivers/mmc/core/sdio.c
drivers/mmc/host/atmel-mci.c
drivers/mmc/host/mmci.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/cisco/enic/enic.h
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
drivers/net/ethernet/packetengines/Kconfig
drivers/net/ethernet/qlogic/qla3xxx.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vmxnet3/vmxnet3_int.h
drivers/net/wireless/ath/ath9k/ar5008_phy.c
drivers/net/wireless/ath/ath9k/ar9002_hw.c
drivers/net/wireless/ath/ath9k/hw.h
drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
drivers/net/wireless/iwlwifi/iwl-agn-sta.c
drivers/net/wireless/rt2x00/rt2x00dev.c
drivers/pps/pps.c
drivers/rapidio/devices/tsi721.c
drivers/rtc/rtc-r9701.c
drivers/scsi/sd_dif.c
drivers/tty/Kconfig
drivers/usb/host/ehci-fsl.c
drivers/usb/host/ehci-fsl.h
fs/aio.c
fs/binfmt_aout.c
fs/dcache.c
fs/exec.c
include/linux/dcache.h
include/linux/kmsg_dump.h
include/linux/memcontrol.h
include/linux/percpu.h
include/linux/sched.h
include/linux/tcp.h
include/net/tcp.h
kernel/fork.c
kernel/hung_task.c
kernel/kprobes.c
kernel/printk.c
lib/debugobjects.c
lib/vsprintf.c
mm/huge_memory.c
mm/hugetlb.c
mm/ksm.c
mm/memcontrol.c
mm/migrate.c
mm/mmap.c
mm/page_cgroup.c
mm/percpu-vm.c
mm/swap.c
mm/swap_state.c
net/bridge/br_multicast.c
net/bridge/br_stp.c
net/bridge/netfilter/ebtables.c
net/core/rtnetlink.c
net/ipv4/tcp_input.c
net/mac80211/iface.c
net/mac80211/rate.c
tools/perf/builtin-record.c
tools/perf/builtin-top.c
tools/perf/perf.h
tools/perf/util/top.h
tools/perf/util/util.c

index 033d4e69b43b107d9780ec959105823e6cefd07c..d99fd9c0ec0e16900663e1378eac9911d4afc845 100644 (file)
@@ -2211,6 +2211,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 
                        default: off.
 
+       printk.always_kmsg_dump=
+                       Trigger kmsg_dump for cases other than kernel oops or
+                       panics
+                       Format: <bool>  (1/Y/y=enable, 0/N/n=disable)
+                       default: disabled
+
        printk.time=    Show timing data prefixed to each printk message line
                        Format: <bool>  (1/Y/y=enable, 0/N/n=disable)
 
index 4e41d5255d7235662633f1e16af2b82a744dc85c..b087b3bca2901b5df233b805a5fb037bcfa9c74c 100644 (file)
@@ -1310,7 +1310,7 @@ F:        drivers/atm/
 F:     include/linux/atm*
 
 ATMEL AT91 MCI DRIVER
-M:     Nicolas Ferre <nicolas.ferre@atmel.com>
+M:     Ludovic Desroches <ludovic.desroches@atmel.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:     http://www.atmel.com/products/AT91/
 W:     http://www.at91.com/
@@ -1318,7 +1318,7 @@ S:        Maintained
 F:     drivers/mmc/host/at91_mci.c
 
 ATMEL AT91 / AT32 MCI DRIVER
-M:     Nicolas Ferre <nicolas.ferre@atmel.com>
+M:     Ludovic Desroches <ludovic.desroches@atmel.com>
 S:     Maintained
 F:     drivers/mmc/host/atmel-mci.c
 F:     drivers/mmc/host/atmel-mci-regs.h
index e8a761aee088a9bc5a8d2ce62d6bf1fde2183a92..f939794363ac6f94ad82a192ede6b9410f7bc002 100644 (file)
@@ -108,7 +108,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
        "       lda     $31,3b-2b(%0)\n"
        "       .previous\n"
        :       "+r"(ret), "=&r"(prev), "=&r"(cmp)
-       :       "r"(uaddr), "r"((long)oldval), "r"(newval)
+       :       "r"(uaddr), "r"((long)(int)oldval), "r"(newval)
        :       "memory");
 
        *uval = prev;
index 7da4d0081487b73f341cd4a51f6ef5efe33141ae..a7193ae13a5d2d6ab8df643a76c6b33cd9621421 100644 (file)
@@ -146,7 +146,7 @@ static int __init alchemy_time_init(unsigned int m2int)
        cd->shift = 32;
        cd->mult = div_sc(32768, NSEC_PER_SEC, cd->shift);
        cd->max_delta_ns = clockevent_delta2ns(0xffffffff, cd);
-       cd->min_delta_ns = clockevent_delta2ns(8, cd);  /* ~0.25ms */
+       cd->min_delta_ns = clockevent_delta2ns(9, cd);  /* ~0.28ms */
        clockevents_register_device(cd);
        setup_irq(m2int, &au1x_rtcmatch2_irqaction);
 
index 24f546985b69b5b572e60dfc60be97a92a726483..e21507052066fbfe5aaec7185286376f2eaf4d02 100644 (file)
@@ -96,7 +96,7 @@ void __init ath79_register_wmac(u8 *cal_data)
 {
        if (soc_is_ar913x())
                ar913x_wmac_setup();
-       if (soc_is_ar933x())
+       else if (soc_is_ar933x())
                ar933x_wmac_setup();
        else
                BUG();
index 4479fd669ac1877a4d84a65284c714bd68310633..28c6b276c21624a4550c2411042065936464d19c 100644 (file)
@@ -8,7 +8,7 @@ CONFIG_HIGH_RES_TIMERS=y
 # CONFIG_SECCOMP is not set
 CONFIG_USE_OF=y
 CONFIG_EXPERIMENTAL=y
-CONFIG_CROSS_COMPILE="mips-linux-gnu-"
+CONFIG_CROSS_COMPILE=""
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
@@ -22,7 +22,7 @@ CONFIG_AUDIT=y
 CONFIG_CGROUPS=y
 CONFIG_NAMESPACES=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="usr/dev_file_list usr/rootfs.xlp"
+CONFIG_INITRAMFS_SOURCE=""
 CONFIG_RD_BZIP2=y
 CONFIG_RD_LZMA=y
 CONFIG_INITRAMFS_COMPRESSION_LZMA=y
index 7c68666fdd646c3354f872ffc91eeeecfa4785d3..d0b857d98c91703b01950f9d1a705c1048cd7ac0 100644 (file)
@@ -8,7 +8,7 @@ CONFIG_HIGH_RES_TIMERS=y
 CONFIG_PREEMPT_VOLUNTARY=y
 CONFIG_KEXEC=y
 CONFIG_EXPERIMENTAL=y
-CONFIG_CROSS_COMPILE="mips-linux-gnu-"
+CONFIG_CROSS_COMPILE=""
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
@@ -22,7 +22,7 @@ CONFIG_AUDIT=y
 CONFIG_NAMESPACES=y
 CONFIG_SCHED_AUTOGROUP=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="usr/dev_file_list usr/rootfs.xlr"
+CONFIG_INITRAMFS_SOURCE=""
 CONFIG_RD_BZIP2=y
 CONFIG_RD_LZMA=y
 CONFIG_INITRAMFS_COMPRESSION_GZIP=y
index 3b0b6e8c85334d610099d554f5a7a72b941ffee9..7fda0ce5f692cd51ed8b06f788bffc6a14f866f7 100644 (file)
@@ -6,7 +6,7 @@ CONFIG_HZ_1000=y
 CONFIG_PREEMPT=y
 # CONFIG_SECCOMP is not set
 CONFIG_EXPERIMENTAL=y
-CONFIG_CROSS_COMPILE="mips-linux-"
+CONFIG_CROSS_COMPILE=""
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=16
index 556e1be20bf63862bacf3fcb464b6b87418c50eb..fb9975c74c571a01526f42667a8f0d7cbedf235e 100644 (file)
@@ -11,6 +11,9 @@
 #include <asm/io.h>
 #include <asm/mach-au1x00/au1000.h>
 
+struct gpio;
+struct gpio_chip;
+
 /* with the current GPIC design, up to 128 GPIOs are possible.
  * The only implementation so far is in the Au1300, which has 75 externally
  * available GPIOs.
@@ -203,7 +206,22 @@ static inline int gpio_request(unsigned int gpio, const char *label)
        return 0;
 }
 
-static inline void gpio_free(unsigned int gpio)
+static inline int gpio_request_one(unsigned gpio,
+                                       unsigned long flags, const char *label)
+{
+       return 0;
+}
+
+static inline int gpio_request_array(struct gpio *array, size_t num)
+{
+       return 0;
+}
+
+static inline void gpio_free(unsigned gpio)
+{
+}
+
+static inline void gpio_free_array(struct gpio *array, size_t num)
 {
 }
 
index d41790928c648de70fb02a7404517f1fc7a94fba..da9bd7d270d18a761f74f6168653d2eb16da7118 100644 (file)
@@ -39,9 +39,6 @@
 #define HPAGE_MASK     (~(HPAGE_SIZE - 1))
 #define HUGETLB_PAGE_ORDER     (HPAGE_SHIFT - PAGE_SHIFT)
 #else /* !CONFIG_HUGETLB_PAGE */
-# ifndef BUILD_BUG
-#  define BUILD_BUG() do { extern void __build_bug(void); __build_bug(); } while (0)
-# endif
 #define HPAGE_SHIFT    ({BUILD_BUG(); 0; })
 #define HPAGE_SIZE     ({BUILD_BUG(); 0; })
 #define HPAGE_MASK     ({BUILD_BUG(); 0; })
index 58fe71afd8797dd37e1cf0442f1b360082af63a2..d5e950ab852792b15c2226f193f55061719f0e20 100644 (file)
@@ -8,7 +8,6 @@
  * SMP support for BMIPS
  */
 
-#include <linux/version.h>
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/mm.h>
index cc4a3f120f54d6f036e66812cf47e5c35e7dbff9..d79ae5437b5871efda13f052c5f7511ccf9587b9 100644 (file)
@@ -1135,7 +1135,7 @@ asmlinkage void do_mt(struct pt_regs *regs)
                printk(KERN_DEBUG "YIELD Scheduler Exception\n");
                break;
        case 5:
-               printk(KERN_DEBUG "Gating Storage Schedulier Exception\n");
+               printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
                break;
        default:
                printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
index a81176f44c74d4e39be2eaf8d939ccdf1aa5157b..924da5eb7031498ea93dc050a7492aa78f5bb0ec 100644 (file)
@@ -69,7 +69,6 @@ SECTIONS
        RODATA
 
        /* writeable */
-       _sdata = .;                             /* Start of data section */
        .data : {       /* Data */
                . = . + DATAOFFSET;             /* for CONFIG_MAPPED_KERNEL */
 
index 937cf3368164c6f6d4a6db4b1867ca5866a36ed7..69ebd586d7ffbef5029b8540ae3d021135035295 100644 (file)
@@ -42,6 +42,8 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ
        const int field = sizeof(unsigned long) * 2;
        siginfo_t info;
        int fault;
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+                                                (write ? FAULT_FLAG_WRITE : 0);
 
 #if 0
        printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(),
@@ -91,6 +93,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ
        if (in_atomic() || !mm)
                goto bad_area_nosemaphore;
 
+retry:
        down_read(&mm->mmap_sem);
        vma = find_vma(mm, address);
        if (!vma)
@@ -144,7 +147,11 @@ good_area:
         * make sure we exit gracefully rather than endlessly redo
         * the fault.
         */
-       fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
+       fault = handle_mm_fault(mm, vma, address, flags);
+
+       if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+               return;
+
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
@@ -153,12 +160,27 @@ good_area:
                        goto do_sigbus;
                BUG();
        }
-       if (fault & VM_FAULT_MAJOR) {
-               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
-               tsk->maj_flt++;
-       } else {
-               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
-               tsk->min_flt++;
+       if (flags & FAULT_FLAG_ALLOW_RETRY) {
+               if (fault & VM_FAULT_MAJOR) {
+                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
+                                                 regs, address);
+                       tsk->maj_flt++;
+               } else {
+                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
+                                                 regs, address);
+                       tsk->min_flt++;
+               }
+               if (fault & VM_FAULT_RETRY) {
+                       flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+                       /*
+                        * No need to up_read(&mm->mmap_sem) as we would
+                        * have already released it in __lock_page_or_retry
+                        * in mm/filemap.c.
+                        */
+
+                       goto retry;
+               }
        }
 
        up_read(&mm->mmap_sem);
index aec2b111d35b0131f805e736bd80c2ca19d59cbf..15521505ebe80a4a6975f11905e4694111474a08 100644 (file)
@@ -279,7 +279,6 @@ void __devinit pcibios_fixup_bus(struct pci_bus *bus)
 {
        /* Propagate hose info into the subordinate devices.  */
 
-       struct list_head *ln;
        struct pci_dev *dev = bus->self;
 
        if (pci_probe_only && dev &&
@@ -288,9 +287,7 @@ void __devinit pcibios_fixup_bus(struct pci_bus *bus)
                pcibios_fixup_device_resources(dev, bus);
        }
 
-       for (ln = bus->devices.next; ln != &bus->devices; ln = ln->next) {
-               dev = pci_dev_b(ln);
-
+       list_for_each_entry(dev, &bus->devices, bus_list) {
                if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
                        pcibios_fixup_device_resources(dev, bus);
        }
index 86b98e98fb4f2f09906a8ee271550e1ab9b961c9..62ead6601c69437aeaafe5dcd3bc7c895fcee5f1 100644 (file)
  */
 void __init titan_ht_pcibios_fixup_bus(struct pci_bus *bus)
 {
-       struct pci_bus *current_bus = bus;
-       struct pci_dev *devices;
-       struct list_head *devices_link;
-
-       list_for_each(devices_link, &(current_bus->devices)) {
-               devices = pci_dev_b(devices_link);
-               if (devices == NULL)
-                       continue;
-       }
-
        /*
         * PLX and SPKT related changes go here
         */
index 8e93b21225249fe489b8666b94a4ac407b43406b..4642f56e70e54c38aca1d98df27fee53b7761402 100644 (file)
@@ -102,7 +102,7 @@ static int __init tx_7segled_init_sysfs(void)
                        break;
                }
                dev->id = i;
-               dev->dev = &tx_7segled_subsys;
+               dev->bus = &tx_7segled_subsys;
                error = device_register(dev);
                if (!error) {
                        device_create_file(dev, &dev_attr_ascii);
index fd843877e84152d87d437fca5173b858336a104a..39e49091f64841873ee1d79d768aa677832c500f 100644 (file)
@@ -315,6 +315,13 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
        current->mm->free_area_cache = TASK_UNMAPPED_BASE;
        current->mm->cached_hole_size = 0;
 
+       retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
+       if (retval < 0) {
+               /* Someone check-me: is this error path enough? */
+               send_sig(SIGKILL, current, 0);
+               return retval;
+       }
+
        install_exec_creds(bprm);
        current->flags &= ~PF_FORKNOEXEC;
 
@@ -410,13 +417,6 @@ beyond_if:
 
        set_brk(current->mm->start_brk, current->mm->brk);
 
-       retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
-       if (retval < 0) {
-               /* Someone check-me: is this error path enough? */
-               send_sig(SIGKILL, current, 0);
-               return retval;
-       }
-
        current->mm->start_stack =
                (unsigned long)create_aout_tables((char __user *)bprm->p, bprm);
        /* start thread */
index a312e76063a7c4b1320eb80718cf81c33e7361dc..49a5cb55429b6ae51696f43c5a05c63a07df5566 100644 (file)
@@ -60,6 +60,16 @@ static const struct dmi_system_id pci_use_crs_table[] __initconst = {
                        DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
                },
        },
+       /* https://bugzilla.kernel.org/show_bug.cgi?id=42619 */
+       {
+               .callback = set_use_crs,
+               .ident = "MSI MS-7253",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
+                       DMI_MATCH(DMI_BOARD_NAME, "MS-7253"),
+                       DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
+               },
+       },
 
        /* Now for the blacklist.. */
 
@@ -282,9 +292,6 @@ static void add_resources(struct pci_root_info *info)
        int i;
        struct resource *res, *root, *conflict;
 
-       if (!pci_use_crs)
-               return;
-
        coalesce_windows(info, IORESOURCE_MEM);
        coalesce_windows(info, IORESOURCE_IO);
 
@@ -336,8 +343,13 @@ get_current_resources(struct acpi_device *device, int busnum,
        acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
                                &info);
 
-       add_resources(&info);
-       return;
+       if (pci_use_crs) {
+               add_resources(&info);
+
+               return;
+       }
+
+       kfree(info.name);
 
 name_alloc_fail:
        kfree(info.res);
index 9baf11e8636205c413bd5c00498baab628657b44..744f078f4dd8311caed330939afbcdc29c2da913 100644 (file)
@@ -3832,7 +3832,7 @@ static int __floppy_read_block_0(struct block_device *bdev)
        bio.bi_size = size;
        bio.bi_bdev = bdev;
        bio.bi_sector = 0;
-       bio.bi_flags = BIO_QUIET;
+       bio.bi_flags = (1 << BIO_QUIET);
        init_completion(&complete);
        bio.bi_private = &complete;
        bio.bi_end_io = floppy_rb0_complete;
index 4a5b099c3bc5ac9430c9b24a651f326f0295c748..53404af2e748b74a6801f5d5f70079e4ce556a88 100644 (file)
@@ -321,6 +321,8 @@ static int cdv_chip_setup(struct drm_device *dev)
        cdv_get_core_freq(dev);
        gma_intel_opregion_init(dev);
        psb_intel_init_bios(dev);
+       REG_WRITE(PORT_HOTPLUG_EN, 0);
+       REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
        return 0;
 }
 
index 830dfdd6bf154a473e15357654cf4d19be8d6b81..be616735ec918502238df831ad59510513358996 100644 (file)
@@ -247,7 +247,6 @@ static struct fb_ops psbfb_roll_ops = {
        .fb_imageblit = cfb_imageblit,
        .fb_pan_display = psbfb_pan,
        .fb_mmap = psbfb_mmap,
-       .fb_sync = psbfb_sync,
        .fb_ioctl = psbfb_ioctl,
 };
 
index 5d5330f667f14031512c022e12ed3d8eb61bc530..aff194fbe9f3ee00c627e012f8d364af3caa2f8d 100644 (file)
@@ -446,10 +446,9 @@ int psb_gtt_init(struct drm_device *dev, int resume)
        pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
        gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE)
                                                                >> PAGE_SHIFT;
-       /* Some CDV firmware doesn't report this currently. In which case the
-          system has 64 gtt pages */
+       /* CDV doesn't report this. In which case the system has 64 gtt pages */
        if (pg->gtt_start == 0 || gtt_pages == 0) {
-               dev_err(dev->dev, "GTT PCI BAR not initialized.\n");
+               dev_dbg(dev->dev, "GTT PCI BAR not initialized.\n");
                gtt_pages = 64;
                pg->gtt_start = dev_priv->pge_ctl;
        }
@@ -461,10 +460,10 @@ int psb_gtt_init(struct drm_device *dev, int resume)
 
        if (pg->gatt_pages == 0 || pg->gatt_start == 0) {
                static struct resource fudge;   /* Preferably peppermint */
-               /* This can occur on CDV SDV systems. Fudge it in this case.
+               /* This can occur on CDV systems. Fudge it in this case.
                   We really don't care what imaginary space is being allocated
                   at this point */
-               dev_err(dev->dev, "GATT PCI BAR not initialized.\n");
+               dev_dbg(dev->dev, "GATT PCI BAR not initialized.\n");
                pg->gatt_start = 0x40000000;
                pg->gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT;
                /* This is a little confusing but in fact the GTT is providing
index b8574cddd95352a360ef7ff67200527185b3c1fb..63552e30d0c38986308ad2a385bccd8ab6bb9218 100644 (file)
@@ -59,6 +59,9 @@
 #define USB_VENDOR_ID_AIRCABLE         0x16CA
 #define USB_DEVICE_ID_AIRCABLE1                0x1502
 
+#define USB_VENDOR_ID_AIREN            0x1a2c
+#define USB_DEVICE_ID_AIREN_SLIMPLUS   0x0002
+
 #define USB_VENDOR_ID_ALCOR            0x058f
 #define USB_DEVICE_ID_ALCOR_USBRS232   0x9720
 
index 9333d692a786b03df115ebe87d920e748b607dc6..627850a54d34ecd6da97f281324b1eff98a368fc 100644 (file)
@@ -986,8 +986,13 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
                return;
        }
 
-       /* Ignore out-of-range values as per HID specification, section 5.10 */
-       if (value < field->logical_minimum || value > field->logical_maximum) {
+       /*
+        * Ignore out-of-range values as per HID specification,
+        * section 5.10 and 6.2.25
+        */
+       if ((field->flags & HID_MAIN_ITEM_VARIABLE) &&
+           (value < field->logical_minimum ||
+            value > field->logical_maximum)) {
                dbg_hid("Ignoring out-of-range value %x\n", value);
                return;
        }
index c831af937481c66123965743f62aaf5f8a634b3c..57d4e1e1df48df061461681449f5e98f49c20a1e 100644 (file)
@@ -54,6 +54,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT },
 
+       { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
index a368db2431a596020a98a3e64f67685ccef9c6cc..a0b225eb4ac449b0ae4b74349be9dbb1e0874fa7 100644 (file)
@@ -624,7 +624,7 @@ int md_raid1_congested(struct mddev *mddev, int bits)
                return 1;
 
        rcu_read_lock();
-       for (i = 0; i < conf->raid_disks; i++) {
+       for (i = 0; i < conf->raid_disks * 2; i++) {
                struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
                if (rdev && !test_bit(Faulty, &rdev->flags)) {
                        struct request_queue *q = bdev_get_queue(rdev->bdev);
index 6e8aa213f0d5208d917b8222a56980ab3582b4d6..58c44d6453a0bfa0fb40f155c0357e4392731999 100644 (file)
@@ -67,6 +67,7 @@ static int max_queued_requests = 1024;
 
 static void allow_barrier(struct r10conf *conf);
 static void lower_barrier(struct r10conf *conf);
+static int enough(struct r10conf *conf, int ignore);
 
 static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
 {
@@ -347,6 +348,19 @@ static void raid10_end_read_request(struct bio *bio, int error)
                 * wait for the 'master' bio.
                 */
                set_bit(R10BIO_Uptodate, &r10_bio->state);
+       } else {
+               /* If all other devices that store this block have
+                * failed, we want to return the error upwards rather
+                * than fail the last device.  Here we redefine
+                * "uptodate" to mean "Don't want to retry"
+                */
+               unsigned long flags;
+               spin_lock_irqsave(&conf->device_lock, flags);
+               if (!enough(conf, rdev->raid_disk))
+                       uptodate = 1;
+               spin_unlock_irqrestore(&conf->device_lock, flags);
+       }
+       if (uptodate) {
                raid_end_bio_io(r10_bio);
                rdev_dec_pending(rdev, conf->mddev);
        } else {
@@ -2052,6 +2066,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
                       "md/raid10:%s: %s: Failing raid device\n",
                       mdname(mddev), b);
                md_error(mddev, conf->mirrors[d].rdev);
+               r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
                return;
        }
 
@@ -2105,8 +2120,11 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
                                    rdev,
                                    r10_bio->devs[r10_bio->read_slot].addr
                                    + sect,
-                                   s, 0))
+                                   s, 0)) {
                                md_error(mddev, rdev);
+                               r10_bio->devs[r10_bio->read_slot].bio
+                                       = IO_BLOCKED;
+                       }
                        break;
                }
 
@@ -2299,17 +2317,20 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
         * This is all done synchronously while the array is
         * frozen.
         */
+       bio = r10_bio->devs[slot].bio;
+       bdevname(bio->bi_bdev, b);
+       bio_put(bio);
+       r10_bio->devs[slot].bio = NULL;
+
        if (mddev->ro == 0) {
                freeze_array(conf);
                fix_read_error(conf, mddev, r10_bio);
                unfreeze_array(conf);
-       }
+       } else
+               r10_bio->devs[slot].bio = IO_BLOCKED;
+
        rdev_dec_pending(rdev, mddev);
 
-       bio = r10_bio->devs[slot].bio;
-       bdevname(bio->bi_bdev, b);
-       r10_bio->devs[slot].bio =
-               mddev->ro ? IO_BLOCKED : NULL;
 read_more:
        rdev = read_balance(conf, r10_bio, &max_sectors);
        if (rdev == NULL) {
@@ -2318,13 +2339,10 @@ read_more:
                       mdname(mddev), b,
                       (unsigned long long)r10_bio->sector);
                raid_end_bio_io(r10_bio);
-               bio_put(bio);
                return;
        }
 
        do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
-       if (bio)
-               bio_put(bio);
        slot = r10_bio->read_slot;
        printk_ratelimited(
                KERN_ERR
@@ -2360,7 +2378,6 @@ read_more:
                        mbio->bi_phys_segments++;
                spin_unlock_irq(&conf->device_lock);
                generic_make_request(bio);
-               bio = NULL;
 
                r10_bio = mempool_alloc(conf->r10bio_pool,
                                        GFP_NOIO);
@@ -3243,7 +3260,6 @@ static int run(struct mddev *mddev)
                        disk->rdev = rdev;
                }
 
-               disk->rdev = rdev;
                disk_stack_limits(mddev->gendisk, rdev->bdev,
                                  rdev->data_offset << 9);
                /* as we don't honour merge_bvec_fn, we must never risk
index 53e2a80f42facb931f56c4adcde9221f5b5c69e1..d295941c9a3db63ab75408470ac8edfefbdb2cb2 100644 (file)
@@ -956,11 +956,12 @@ int __devinit ab8500_init(struct ab8500 *ab8500)
        return ret;
 
 out_freeirq:
-       if (ab8500->irq_base) {
+       if (ab8500->irq_base)
                free_irq(ab8500->irq, ab8500);
 out_removeirq:
+       if (ab8500->irq_base)
                ab8500_irq_remove(ab8500);
-       }
+
        return ret;
 }
 
index 0f5922812bffdee8e3e892cab978052548d92114..411f523d4878bd1cec6f8783ca6c99d6b57b669e 100644 (file)
@@ -123,7 +123,7 @@ static int mfd_add_device(struct device *parent, int id,
                }
 
                if (!cell->ignore_resource_conflicts) {
-                       ret = acpi_check_resource_conflict(res);
+                       ret = acpi_check_resource_conflict(&res[r]);
                        if (ret)
                                goto fail_res;
                }
index e075c113eec6f7d77a67b5ca1eb145abe8eebbdf..caadabeed8e94d59ae55fd8dbde84b37014769bb 100644 (file)
@@ -105,7 +105,7 @@ static int s5m87xx_i2c_probe(struct i2c_client *i2c,
        s5m87xx->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR);
        i2c_set_clientdata(s5m87xx->rtc, s5m87xx);
 
-       if (pdata->cfg_pmic_irq)
+       if (pdata && pdata->cfg_pmic_irq)
                pdata->cfg_pmic_irq();
 
        s5m_irq_init(s5m87xx);
index 01cf5012a08fb26c3b561fde99341d6ab84749e2..4392f6bca156e20a71d7acb8cce0b9316db42753 100644 (file)
@@ -168,7 +168,7 @@ static int tps65910_i2c_probe(struct i2c_client *i2c,
                goto err;
 
        init_data->irq = pmic_plat_data->irq;
-       init_data->irq_base = pmic_plat_data->irq;
+       init_data->irq_base = pmic_plat_data->irq_base;
 
        tps65910_gpio_init(tps65910, pmic_plat_data->gpio_base);
 
index 5fec23a9ac039f34eaf6cf902acbfb6521aa7ab4..74fd8cb5f37224e576f58398c20fda9f4884d9e7 100644 (file)
@@ -151,7 +151,7 @@ int tps65912_device_init(struct tps65912 *tps65912)
                goto err;
 
        init_data->irq = pmic_plat_data->irq;
-       init_data->irq_base = pmic_plat_data->irq;
+       init_data->irq_base = pmic_plat_data->irq_base;
        ret = tps65912_irq_init(tps65912, init_data->irq, init_data);
        if (ret < 0)
                goto err;
index 8a1fafd0bf7d129184e8910b73a0848954dcf42c..9fd01bf63c510eafab2c1efc987728576aa54f5e 100644 (file)
@@ -496,7 +496,6 @@ int wm8350_irq_init(struct wm8350 *wm8350, int irq,
 
        mutex_init(&wm8350->irq_lock);
        wm8350->chip_irq = irq;
-       wm8350->irq_base = pdata->irq_base;
 
        if (pdata && pdata->irq_base > 0)
                irq_base = pdata->irq_base;
index f117e7fb932194fc174ed52439dadcd57e81684c..a04b3c108c8ca38a06f54ac946f77ce4ec920f0c 100644 (file)
@@ -256,6 +256,20 @@ static int wm8994_suspend(struct device *dev)
                break;
        }
 
+       switch (wm8994->type) {
+       case WM1811:
+               ret = wm8994_reg_read(wm8994, WM8994_ANTIPOP_2);
+               if (ret < 0) {
+                       dev_err(dev, "Failed to read jackdet: %d\n", ret);
+               } else if (ret & WM1811_JACKDET_MODE_MASK) {
+                       dev_dbg(dev, "CODEC still active, ignoring suspend\n");
+                       return 0;
+               }
+               break;
+       default:
+               break;
+       }
+
        /* Disable LDO pulldowns while the device is suspended if we
         * don't know that something will be driving them. */
        if (!wm8994->ldo_ena_always_driven)
index c598ae69b8ff052584c94e75b44db9f07ea8d694..bc0c5096539a5bdbd874ef99babdd60dbf17a0c0 100644 (file)
@@ -806,6 +806,7 @@ static bool wm1811_readable_register(struct device *dev, unsigned int reg)
        case WM8994_DC_SERVO_2:
        case WM8994_DC_SERVO_READBACK:
        case WM8994_DC_SERVO_4:
+       case WM8994_DC_SERVO_4E:
        case WM8994_ANALOGUE_HP_1:
        case WM8958_MIC_DETECT_1:
        case WM8958_MIC_DETECT_2:
index 19fc7c1cb428c47f822254c2d748b7fe572b2da3..f428d86bfc103256ccd48f50436d23d6bd4d48a7 100644 (file)
@@ -984,9 +984,9 @@ static int __init c2port_init(void)
                " - (C) 2007 Rodolfo Giometti\n");
 
        c2port_class = class_create(THIS_MODULE, "c2port");
-       if (!c2port_class) {
+       if (IS_ERR(c2port_class)) {
                printk(KERN_ERR "c2port: failed to allocate class\n");
-               return -ENOMEM;
+               return PTR_ERR(c2port_class);
        }
        c2port_class->dev_attrs = c2port_attrs;
 
index 690255c7d4dcc8c407e7b0852216edab75f4ef31..132378b89d76a0072fe31b7f436975525d04e353 100644 (file)
@@ -2068,6 +2068,9 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
         */
        mmc_hw_reset_for_init(host);
 
+       /* Initialization should be done at 3.3 V I/O voltage. */
+       mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
+
        /*
         * sdio_reset sends CMD52 to reset card.  Since we do not know
         * if the card is being re-initialized, just send it.  CMD52
index 30055f2b0d445b3e0c08ed131f939146fef68626..c3704e293a7b30d52c9cc1893da88a0e4844131b 100644 (file)
@@ -238,10 +238,10 @@ static inline void mmc_host_clk_init(struct mmc_host *host)
        /* Hold MCI clock for 8 cycles by default */
        host->clk_delay = 8;
        /*
-        * Default clock gating delay is 200ms.
+        * Default clock gating delay is 0ms to avoid wasting power.
         * This value can be tuned by writing into sysfs entry.
         */
-       host->clkgate_delay = 200;
+       host->clkgate_delay = 0;
        host->clk_gated = false;
        INIT_DELAYED_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
        spin_lock_init(&host->clk_lock);
index a48066344fa87316997b0bcca501d0842ea7e8a1..2b9ed1401dc439bf1dd29e4aa751f74e0175bc93 100644 (file)
@@ -816,6 +816,9 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
        if (!mmc_host_is_spi(host))
                mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
 
+       /* Initialization should be done at 3.3 V I/O voltage. */
+       mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
+
        /*
         * Since we're changing the OCR value, we seem to
         * need to tell some cards to go back to the idle
index 5017f9354ce28af6a67418d1ce0a2c438ed07edd..c272c6868ecf6d11a39c3cf2be99f1257fccd026 100644 (file)
@@ -911,6 +911,9 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
        BUG_ON(!host);
        WARN_ON(!host->claimed);
 
+       /* The initialization should be done at 3.3 V I/O voltage. */
+       mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
+
        err = mmc_sd_get_cid(host, ocr, cid, &rocr);
        if (err)
                return err;
@@ -1156,11 +1159,6 @@ int mmc_attach_sd(struct mmc_host *host)
        BUG_ON(!host);
        WARN_ON(!host->claimed);
 
-       /* Make sure we are at 3.3V signalling voltage */
-       err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, false);
-       if (err)
-               return err;
-
        /* Disable preset value enable if already set since last time */
        if (host->ops->enable_preset_value) {
                mmc_host_clk_hold(host);
index 12cde6ee17f50732ac5cd05f6843928abf87d0c9..2c7c83f832d289c25eaf83cf4d7ef32d420fb002 100644 (file)
@@ -585,6 +585,9 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
         * Inform the card of the voltage
         */
        if (!powered_resume) {
+               /* The initialization should be done at 3.3 V I/O voltage. */
+               mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
+
                err = mmc_send_io_op_cond(host, host->ocr, &ocr);
                if (err)
                        goto err;
@@ -996,6 +999,11 @@ static int mmc_sdio_power_restore(struct mmc_host *host)
         * With these steps taken, mmc_select_voltage() is also required to
         * restore the correct voltage setting of the card.
         */
+
+       /* The initialization should be done at 3.3 V I/O voltage. */
+       if (!mmc_card_keep_power(host))
+               mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
+
        sdio_reset(host);
        mmc_go_idle(host);
        mmc_send_if_cond(host, host->ocr_avail);
index 6985cdb0bb26e641fb340c0218d331e48844614a..e4449a54ae8f9fc68c3d9f719fe3a09e3e028adf 100644 (file)
@@ -1948,12 +1948,12 @@ static bool atmci_filter(struct dma_chan *chan, void *slave)
        }
 }
 
-static void atmci_configure_dma(struct atmel_mci *host)
+static bool atmci_configure_dma(struct atmel_mci *host)
 {
        struct mci_platform_data        *pdata;
 
        if (host == NULL)
-               return;
+               return false;
 
        pdata = host->pdev->dev.platform_data;
 
@@ -1970,12 +1970,15 @@ static void atmci_configure_dma(struct atmel_mci *host)
                host->dma.chan =
                        dma_request_channel(mask, atmci_filter, pdata->dma_slave);
        }
-       if (!host->dma.chan)
-               dev_notice(&host->pdev->dev, "DMA not available, using PIO\n");
-       else
+       if (!host->dma.chan) {
+               dev_warn(&host->pdev->dev, "no DMA channel available\n");
+               return false;
+       } else {
                dev_info(&host->pdev->dev,
                                        "Using %s for DMA transfers\n",
                                        dma_chan_name(host->dma.chan));
+               return true;
+       }
 }
 
 static inline unsigned int atmci_get_version(struct atmel_mci *host)
@@ -2085,8 +2088,7 @@ static int __init atmci_probe(struct platform_device *pdev)
 
        /* Get MCI capabilities and set operations according to it */
        atmci_get_cap(host);
-       if (host->caps.has_dma) {
-               dev_info(&pdev->dev, "using DMA\n");
+       if (host->caps.has_dma && atmci_configure_dma(host)) {
                host->prepare_data = &atmci_prepare_data_dma;
                host->submit_data = &atmci_submit_data_dma;
                host->stop_transfer = &atmci_stop_transfer_dma;
@@ -2096,15 +2098,12 @@ static int __init atmci_probe(struct platform_device *pdev)
                host->submit_data = &atmci_submit_data_pdc;
                host->stop_transfer = &atmci_stop_transfer_pdc;
        } else {
-               dev_info(&pdev->dev, "no DMA, no PDC\n");
+               dev_info(&pdev->dev, "using PIO\n");
                host->prepare_data = &atmci_prepare_data;
                host->submit_data = &atmci_submit_data;
                host->stop_transfer = &atmci_stop_transfer;
        }
 
-       if (host->caps.has_dma)
-               atmci_configure_dma(host);
-
        platform_set_drvdata(pdev, host);
 
        /* We need at least one slot to succeed */
index 0d955ffaf44e2c3ec5961f966687da3e675819d9..11e589cd8233e5f7438f7240d14c8f7897fbf8d7 100644 (file)
@@ -1271,12 +1271,13 @@ static int __devinit mmci_probe(struct amba_device *dev,
        /*
         * Block size can be up to 2048 bytes, but must be a power of two.
         */
-       mmc->max_blk_size = 2048;
+       mmc->max_blk_size = 1 << 11;
 
        /*
-        * No limit on the number of blocks transferred.
+        * Limit the number of blocks transferred so that we don't overflow
+        * the maximum request size.
         */
-       mmc->max_blk_count = mmc->max_req_size;
+       mmc->max_blk_count = mmc->max_req_size >> 11;
 
        spin_lock_init(&host->lock);
 
index d601e41af282105ad5c3a86d30d569dfef3def8c..0be4e2013632f97ce4dddc489212c2ed6f7c4985 100644 (file)
@@ -269,8 +269,9 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
                imx_data->scratchpad = val;
                return;
        case SDHCI_COMMAND:
-               if ((host->cmd->opcode == MMC_STOP_TRANSMISSION)
-                       && (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT))
+               if ((host->cmd->opcode == MMC_STOP_TRANSMISSION ||
+                    host->cmd->opcode == MMC_SET_BLOCK_COUNT) &&
+                   (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT))
                        val |= SDHCI_CMD_ABORTCMD;
 
                if (is_imx6q_usdhc(imx_data)) {
index a1f2e0fed78bc2b23b7caa2c1457255e6eae9b89..423d0235a87804dc8f6d105fcb74cff8db985205 100644 (file)
@@ -7886,10 +7886,8 @@ static int tg3_chip_reset(struct tg3 *tp)
        return 0;
 }
 
-static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
-                                                struct rtnl_link_stats64 *);
-static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *,
-                                               struct tg3_ethtool_stats *);
+static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
+static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
 
 /* tp->lock is held. */
 static int tg3_halt(struct tg3 *tp, int kind, int silent)
@@ -7910,7 +7908,7 @@ static int tg3_halt(struct tg3 *tp, int kind, int silent)
 
        if (tp->hw_stats) {
                /* Save the stats across chip resets... */
-               tg3_get_stats64(tp->dev, &tp->net_stats_prev),
+               tg3_get_nstats(tp, &tp->net_stats_prev),
                tg3_get_estats(tp, &tp->estats_prev);
 
                /* And make sure the next sample is new data */
@@ -9847,7 +9845,7 @@ static inline u64 get_stat64(tg3_stat64_t *val)
        return ((u64)val->high << 32) | ((u64)val->low);
 }
 
-static u64 calc_crc_errors(struct tg3 *tp)
+static u64 tg3_calc_crc_errors(struct tg3 *tp)
 {
        struct tg3_hw_stats *hw_stats = tp->hw_stats;
 
@@ -9856,14 +9854,12 @@ static u64 calc_crc_errors(struct tg3 *tp)
             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
                u32 val;
 
-               spin_lock_bh(&tp->lock);
                if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
                        tg3_writephy(tp, MII_TG3_TEST1,
                                     val | MII_TG3_TEST1_CRC_EN);
                        tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
                } else
                        val = 0;
-               spin_unlock_bh(&tp->lock);
 
                tp->phy_crc_errors += val;
 
@@ -9877,14 +9873,13 @@ static u64 calc_crc_errors(struct tg3 *tp)
        estats->member =        old_estats->member + \
                                get_stat64(&hw_stats->member)
 
-static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp,
-                                              struct tg3_ethtool_stats *estats)
+static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
 {
        struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
        struct tg3_hw_stats *hw_stats = tp->hw_stats;
 
        if (!hw_stats)
-               return old_estats;
+               return;
 
        ESTAT_ADD(rx_octets);
        ESTAT_ADD(rx_fragments);
@@ -9963,20 +9958,13 @@ static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp,
        ESTAT_ADD(nic_tx_threshold_hit);
 
        ESTAT_ADD(mbuf_lwm_thresh_hit);
-
-       return estats;
 }
 
-static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
-                                                struct rtnl_link_stats64 *stats)
+static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
 {
-       struct tg3 *tp = netdev_priv(dev);
        struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
        struct tg3_hw_stats *hw_stats = tp->hw_stats;
 
-       if (!hw_stats)
-               return old_stats;
-
        stats->rx_packets = old_stats->rx_packets +
                get_stat64(&hw_stats->rx_ucast_packets) +
                get_stat64(&hw_stats->rx_mcast_packets) +
@@ -10019,15 +10007,13 @@ static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
                get_stat64(&hw_stats->tx_carrier_sense_errors);
 
        stats->rx_crc_errors = old_stats->rx_crc_errors +
-               calc_crc_errors(tp);
+               tg3_calc_crc_errors(tp);
 
        stats->rx_missed_errors = old_stats->rx_missed_errors +
                get_stat64(&hw_stats->rx_discards);
 
        stats->rx_dropped = tp->rx_dropped;
        stats->tx_dropped = tp->tx_dropped;
-
-       return stats;
 }
 
 static inline u32 calc_crc(unsigned char *buf, int len)
@@ -15409,6 +15395,21 @@ static void __devinit tg3_init_coal(struct tg3 *tp)
        }
 }
 
+static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
+                                               struct rtnl_link_stats64 *stats)
+{
+       struct tg3 *tp = netdev_priv(dev);
+
+       if (!tp->hw_stats)
+               return &tp->net_stats_prev;
+
+       spin_lock_bh(&tp->lock);
+       tg3_get_nstats(tp, stats);
+       spin_unlock_bh(&tp->lock);
+
+       return stats;
+}
+
 static const struct net_device_ops tg3_netdev_ops = {
        .ndo_open               = tg3_open,
        .ndo_stop               = tg3_close,
index ee93a2087fe6c0cfd116dcf3f4536185480ba362..c52295cd05ef009b368555e8b716bc082ad6caf5 100644 (file)
@@ -94,7 +94,7 @@ struct enic {
        u32 rx_coalesce_usecs;
        u32 tx_coalesce_usecs;
 #ifdef CONFIG_PCI_IOV
-       u32 num_vfs;
+       u16 num_vfs;
 #endif
        struct enic_port_profile *pp;
 
index ab3f67f980d8c3f7554d19bf3fdbaf275deefed3..0e4edd3b6bee0275cf3ca0c01ffd1f1f60ec9d49 100644 (file)
@@ -2370,7 +2370,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
        if (pos) {
                pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF,
-                       (u16 *)&enic->num_vfs);
+                       &enic->num_vfs);
                if (enic->num_vfs) {
                        err = pci_enable_sriov(pdev, enic->num_vfs);
                        if (err) {
index 9cb5f912e4891f5b832bb941bce6916c5248bdb6..29e23bec809c495104b08d009943ba2b10155660 100644 (file)
@@ -321,10 +321,10 @@ static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter)
                        pr_debug("AutoNeg specified along with Speed or Duplex, AutoNeg parameter ignored\n");
                        hw->phy.autoneg_advertised = opt.def;
                } else {
-                       hw->phy.autoneg_advertised = AutoNeg;
-                       pch_gbe_validate_option(
-                               (int *)(&hw->phy.autoneg_advertised),
-                               &opt, adapter);
+                       int tmp = AutoNeg;
+
+                       pch_gbe_validate_option(&tmp, &opt, adapter);
+                       hw->phy.autoneg_advertised = tmp;
                }
        }
 
@@ -495,9 +495,10 @@ void pch_gbe_check_options(struct pch_gbe_adapter *adapter)
                        .arg  = { .l = { .nr = (int)ARRAY_SIZE(fc_list),
                                         .p = fc_list } }
                };
-               hw->mac.fc = FlowControl;
-               pch_gbe_validate_option((int *)(&hw->mac.fc),
-                                               &opt, adapter);
+               int tmp = FlowControl;
+
+               pch_gbe_validate_option(&tmp, &opt, adapter);
+               hw->mac.fc = tmp;
        }
 
        pch_gbe_check_copper_options(adapter);
index b97132d9dff0c09fdd804ad63335e10a8225343d..8f29feb35548273c56d6ea2247031214eb820a22 100644 (file)
@@ -4,6 +4,7 @@
 
 config NET_PACKET_ENGINE
        bool "Packet Engine devices"
+       default y
        depends on PCI
        ---help---
          If you have a network (Ethernet) card belonging to this class, say Y
index 7931531c3a40be12f1d4f838fd854878c16b9470..e61560e16385e855164a2062aa56ae254212721d 100644 (file)
@@ -3017,7 +3017,6 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
                (void __iomem *)port_regs;
        u32 delay = 10;
        int status = 0;
-       unsigned long hw_flags = 0;
 
        if (ql_mii_setup(qdev))
                return -1;
@@ -3228,9 +3227,9 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
                value = ql_read_page0_reg(qdev, &port_regs->portStatus);
                if (value & PORT_STATUS_IC)
                        break;
-               spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+               spin_unlock_irq(&qdev->hw_lock);
                msleep(500);
-               spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+               spin_lock_irq(&qdev->hw_lock);
        } while (--delay);
 
        if (delay == 0) {
index 7a0c800b50adc90051ed008548a754f1af6b5695..1adf17757cea3f2f21e2c0fc77be6e84d53b473c 100644 (file)
@@ -3781,12 +3781,20 @@ static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
 
 static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
 {
+       void __iomem *ioaddr = tp->mmio_addr;
+
+       RTL_W8(Cfg9346, Cfg9346_Unlock);
        rtl_generic_op(tp, tp->jumbo_ops.enable);
+       RTL_W8(Cfg9346, Cfg9346_Lock);
 }
 
 static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
 {
+       void __iomem *ioaddr = tp->mmio_addr;
+
+       RTL_W8(Cfg9346, Cfg9346_Unlock);
        rtl_generic_op(tp, tp->jumbo_ops.disable);
+       RTL_W8(Cfg9346, Cfg9346_Lock);
 }
 
 static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
index 3dcd3857a36cb8346c82c8131c1131da40a53063..756c0f5565a5b185250668c5617b93ee357ff335 100644 (file)
@@ -830,13 +830,8 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
                                        ctx->l4_hdr_size = ((struct tcphdr *)
                                           skb_transport_header(skb))->doff * 4;
                                else if (iph->protocol == IPPROTO_UDP)
-                                       /*
-                                        * Use tcp header size so that bytes to
-                                        * be copied are more than required by
-                                        * the device.
-                                        */
                                        ctx->l4_hdr_size =
-                                                       sizeof(struct tcphdr);
+                                                       sizeof(struct udphdr);
                                else
                                        ctx->l4_hdr_size = 0;
                        } else {
index ed54797db1916d717fe3141559bf95bca2013547..fc46a81ad5384969b1670114765e3df12799a256 100644 (file)
 /*
  * Version numbers
  */
-#define VMXNET3_DRIVER_VERSION_STRING   "1.1.18.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING   "1.1.29.0-k"
 
 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM      0x01011200
+#define VMXNET3_DRIVER_VERSION_NUM      0x01011D00
 
 #if defined(CONFIG_PCI_MSI)
        /* RSS only makes sense if MSI-X is supported. */
index f901a17f76baf6da3a2ad3e55880a8aa25df35e8..86a891f93fc9dc70939088e70080af874343f4b4 100644 (file)
@@ -489,8 +489,6 @@ static int ar5008_hw_rf_alloc_ext_banks(struct ath_hw *ah)
        ATH_ALLOC_BANK(ah->analogBank6Data, ah->iniBank6.ia_rows);
        ATH_ALLOC_BANK(ah->analogBank6TPCData, ah->iniBank6TPC.ia_rows);
        ATH_ALLOC_BANK(ah->analogBank7Data, ah->iniBank7.ia_rows);
-       ATH_ALLOC_BANK(ah->addac5416_21,
-                      ah->iniAddac.ia_rows * ah->iniAddac.ia_columns);
        ATH_ALLOC_BANK(ah->bank6Temp, ah->iniBank6.ia_rows);
 
        return 0;
@@ -519,7 +517,6 @@ static void ar5008_hw_rf_free_ext_banks(struct ath_hw *ah)
        ATH_FREE_BANK(ah->analogBank6Data);
        ATH_FREE_BANK(ah->analogBank6TPCData);
        ATH_FREE_BANK(ah->analogBank7Data);
-       ATH_FREE_BANK(ah->addac5416_21);
        ATH_FREE_BANK(ah->bank6Temp);
 
 #undef ATH_FREE_BANK
@@ -805,27 +802,7 @@ static int ar5008_hw_process_ini(struct ath_hw *ah,
        if (ah->eep_ops->set_addac)
                ah->eep_ops->set_addac(ah, chan);
 
-       if (AR_SREV_5416_22_OR_LATER(ah)) {
-               REG_WRITE_ARRAY(&ah->iniAddac, 1, regWrites);
-       } else {
-               struct ar5416IniArray temp;
-               u32 addacSize =
-                       sizeof(u32) * ah->iniAddac.ia_rows *
-                       ah->iniAddac.ia_columns;
-
-               /* For AR5416 2.0/2.1 */
-               memcpy(ah->addac5416_21,
-                      ah->iniAddac.ia_array, addacSize);
-
-               /* override CLKDRV value at [row, column] = [31, 1] */
-               (ah->addac5416_21)[31 * ah->iniAddac.ia_columns + 1] = 0;
-
-               temp.ia_array = ah->addac5416_21;
-               temp.ia_columns = ah->iniAddac.ia_columns;
-               temp.ia_rows = ah->iniAddac.ia_rows;
-               REG_WRITE_ARRAY(&temp, 1, regWrites);
-       }
-
+       REG_WRITE_ARRAY(&ah->iniAddac, 1, regWrites);
        REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC);
 
        ENABLE_REGWRITE_BUFFER(ah);
index 11f192a1ceb72d332ccde32cf98f0ef7c3ff41c3..d190411ac8f51e5003fb2c639b7818be8df34b4c 100644 (file)
@@ -180,6 +180,25 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
                INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac,
                               ARRAY_SIZE(ar5416Addac), 2);
        }
+
+       /* iniAddac needs to be modified for these chips */
+       if (AR_SREV_9160(ah) || !AR_SREV_5416_22_OR_LATER(ah)) {
+               struct ar5416IniArray *addac = &ah->iniAddac;
+               u32 size = sizeof(u32) * addac->ia_rows * addac->ia_columns;
+               u32 *data;
+
+               data = kmalloc(size, GFP_KERNEL);
+               if (!data)
+                       return;
+
+               memcpy(data, addac->ia_array, size);
+               addac->ia_array = data;
+
+               if (!AR_SREV_5416_22_OR_LATER(ah)) {
+                       /* override CLKDRV value */
+                       INI_RA(addac, 31,1) = 0;
+               }
+       }
 }
 
 /* Support for Japan ch.14 (2484) spread */
index 6a29004a71b0864afe06f6c5a4b82b77f8c3d050..c8261d4fc780aa3b4db12586e2738a1065376604 100644 (file)
@@ -940,7 +940,6 @@ struct ath_hw {
        u32 *analogBank6Data;
        u32 *analogBank6TPCData;
        u32 *analogBank7Data;
-       u32 *addac5416_21;
        u32 *bank6Temp;
 
        u8 txpower_limit;
index 90911eec0cf55948fddecffb225eb1543412cc6a..30b58870b1b68455b09adecfd127b354dbcc975c 100644 (file)
@@ -1051,17 +1051,13 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
                }
                /* either retransmit or send bar if ack not recd */
                if (!ack_recd) {
-                       struct ieee80211_tx_rate *txrate =
-                           tx_info->status.rates;
-                       if (retry && (txrate[0].count < (int)retry_limit)) {
+                       if (retry && (ini->txretry[index] < (int)retry_limit)) {
                                ini->txretry[index]++;
                                ini->tx_in_transit--;
                                /*
                                 * Use high prededence for retransmit to
                                 * give some punch
                                 */
-                               /* brcms_c_txq_enq(wlc, scb, p,
-                                * BRCMS_PRIO_TO_PREC(tid)); */
                                brcms_c_txq_enq(wlc, scb, p,
                                                BRCMS_PRIO_TO_HI_PREC(tid));
                        } else {
@@ -1074,9 +1070,9 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
                                    IEEE80211_TX_STAT_AMPDU_NO_BACK;
                                skb_pull(p, D11_PHY_HDR_LEN);
                                skb_pull(p, D11_TXH_LEN);
-                               wiphy_err(wiphy, "%s: BA Timeout, seq %d, in_"
-                                       "transit %d\n", "AMPDU status", seq,
-                                       ini->tx_in_transit);
+                               BCMMSG(wiphy,
+                                      "BA Timeout, seq %d, in_transit %d\n",
+                                      seq, ini->tx_in_transit);
                                ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw,
                                                            p);
                        }
index 7353826095f110a8766d6c1a4425a71c49ba8a91..e483cfa8d14e175aac3432f77951d3641989f116 100644 (file)
@@ -1187,6 +1187,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
        unsigned long flags;
        struct iwl_addsta_cmd sta_cmd;
        u8 sta_id = iwlagn_key_sta_id(priv, ctx->vif, sta);
+       __le16 key_flags;
 
        /* if station isn't there, neither is the key */
        if (sta_id == IWL_INVALID_STATION)
@@ -1212,7 +1213,14 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
                IWL_ERR(priv, "offset %d not used in uCode key table.\n",
                        keyconf->hw_key_idx);
 
-       sta_cmd.key.key_flags = STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
+       key_flags = cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+       key_flags |= STA_KEY_FLG_MAP_KEY_MSK | STA_KEY_FLG_NO_ENC |
+                    STA_KEY_FLG_INVALID;
+
+       if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+               key_flags |= STA_KEY_MULTICAST_MSK;
+
+       sta_cmd.key.key_flags = key_flags;
        sta_cmd.key.key_offset = WEP_INVALID_OFFSET;
        sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
        sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
index c3e1aa7c1a8057b7eb19270f8aacb3f718b86577..d2a1ea98d0f23a144653ed7fceefeb62a2191e75 100644 (file)
@@ -1220,7 +1220,8 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
                cancel_work_sync(&rt2x00dev->rxdone_work);
                cancel_work_sync(&rt2x00dev->txdone_work);
        }
-       destroy_workqueue(rt2x00dev->workqueue);
+       if (rt2x00dev->workqueue)
+               destroy_workqueue(rt2x00dev->workqueue);
 
        /*
         * Free the tx status fifo.
index 2baadd21b7a664dea71cce5a237d66b0b8e36a25..98fbe62694d4eddbead50fde0de22fe834ce67a4 100644 (file)
@@ -369,9 +369,9 @@ static int __init pps_init(void)
        int err;
 
        pps_class = class_create(THIS_MODULE, "pps");
-       if (!pps_class) {
+       if (IS_ERR(pps_class)) {
                pr_err("failed to allocate class\n");
-               return -ENOMEM;
+               return PTR_ERR(pps_class);
        }
        pps_class->dev_attrs = pps_attrs;
 
index 691b1ab1a3d0499d85815bd03551e9d22518e494..30d2072f480b72947c74401d5522fbb9d697d313 100644 (file)
@@ -410,13 +410,14 @@ static void tsi721_db_dpc(struct work_struct *work)
         */
        mport = priv->mport;
 
-       wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE));
-       rd_ptr = ioread32(priv->regs + TSI721_IDQ_RP(IDB_QUEUE));
+       wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
+       rd_ptr = ioread32(priv->regs + TSI721_IDQ_RP(IDB_QUEUE)) % IDB_QSIZE;
 
        while (wr_ptr != rd_ptr) {
                idb_entry = (u64 *)(priv->idb_base +
                                        (TSI721_IDB_ENTRY_SIZE * rd_ptr));
                rd_ptr++;
+               rd_ptr %= IDB_QSIZE;
                idb.msg = *idb_entry;
                *idb_entry = 0;
 
index 9beba49c3c5b48c2abcca65c916eabc0d782a49a..2853c2a6f10f4a055d2aadb8d3fac0380bff7f97 100644 (file)
@@ -125,6 +125,13 @@ static int __devinit r9701_probe(struct spi_device *spi)
        unsigned char tmp;
        int res;
 
+       tmp = R100CNT;
+       res = read_regs(&spi->dev, &tmp, 1);
+       if (res || tmp != 0x20) {
+               dev_err(&spi->dev, "cannot read RTC register\n");
+               return -ENODEV;
+       }
+
        rtc = rtc_device_register("r9701",
                                &spi->dev, &r9701_rtc_ops, THIS_MODULE);
        if (IS_ERR(rtc))
@@ -132,13 +139,6 @@ static int __devinit r9701_probe(struct spi_device *spi)
 
        dev_set_drvdata(&spi->dev, rtc);
 
-       tmp = R100CNT;
-       res = read_regs(&spi->dev, &tmp, 1);
-       if (res || tmp != 0x20) {
-               rtc_device_unregister(rtc);
-               return res;
-       }
-
        return 0;
 }
 
index 0cb39ff21171b323c3df916db675285420822136..f8fb2d691c0a69e6aed9385ba1a81d2cb893967c 100644 (file)
@@ -408,7 +408,7 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
                        kunmap_atomic(sdt, KM_USER0);
                }
 
-               bio->bi_flags |= BIO_MAPPED_INTEGRITY;
+               bio->bi_flags |= (1 << BIO_MAPPED_INTEGRITY);
        }
 
        return 0;
index b3d17416d86a393d75bc518b9198fa7277b165d8..830cd62d84925e548cd7480b2d6fb3a4e582a08d 100644 (file)
@@ -365,7 +365,7 @@ config PPC_EPAPR_HV_BYTECHAN
 
 config PPC_EARLY_DEBUG_EHV_BC
        bool "Early console (udbg) support for ePAPR hypervisors"
-       depends on PPC_EPAPR_HV_BYTECHAN
+       depends on PPC_EPAPR_HV_BYTECHAN=y
        help
          Select this option to enable early console (a.k.a. "udbg") support
          via an ePAPR byte channel.  You also need to choose the byte channel
index c26a82e83f6e6d11b78f002dc3982df9a3b0a27b..b556a72264d1084b5e6809ea5b4a3f59c5583b82 100644 (file)
@@ -239,7 +239,7 @@ static void ehci_fsl_setup_phy(struct ehci_hcd *ehci,
        ehci_writel(ehci, portsc, &ehci->regs->port_status[port_offset]);
 }
 
-static int ehci_fsl_usb_setup(struct ehci_hcd *ehci)
+static void ehci_fsl_usb_setup(struct ehci_hcd *ehci)
 {
        struct usb_hcd *hcd = ehci_to_hcd(ehci);
        struct fsl_usb2_platform_data *pdata;
@@ -299,19 +299,12 @@ static int ehci_fsl_usb_setup(struct ehci_hcd *ehci)
 #endif
                out_be32(non_ehci + FSL_SOC_USB_SICTRL, 0x00000001);
        }
-
-       if (!(in_be32(non_ehci + FSL_SOC_USB_CTRL) & CTRL_PHY_CLK_VALID)) {
-               printk(KERN_WARNING "fsl-ehci: USB PHY clock invalid\n");
-               return -ENODEV;
-       }
-       return 0;
 }
 
 /* called after powerup, by probe or system-pm "wakeup" */
 static int ehci_fsl_reinit(struct ehci_hcd *ehci)
 {
-       if (ehci_fsl_usb_setup(ehci))
-               return -ENODEV;
+       ehci_fsl_usb_setup(ehci);
        ehci_port_power(ehci, 0);
 
        return 0;
index bdf43e2adc51a4fdb4f12ca4e70b1e5c42c62f2c..4918062211656c276f5904cc50d45605076ed026 100644 (file)
@@ -45,6 +45,5 @@
 #define FSL_SOC_USB_PRICTRL    0x40c   /* NOTE: big-endian */
 #define FSL_SOC_USB_SICTRL     0x410   /* NOTE: big-endian */
 #define FSL_SOC_USB_CTRL       0x500   /* NOTE: big-endian */
-#define CTRL_PHY_CLK_VALID     (1 << 17)
 #define SNOOP_SIZE_2GB         0x1e
 #endif                         /* _EHCI_FSL_H */
index 969beb0e22311a4f7e7f4155524709d5dd8269bf..67e4b9047cc944eb63beca468f77e1194956f931 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -490,6 +490,8 @@ static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch)
                kmem_cache_free(kiocb_cachep, req);
                ctx->reqs_active--;
        }
+       if (unlikely(!ctx->reqs_active && ctx->dead))
+               wake_up_all(&ctx->wait);
        spin_unlock_irq(&ctx->ctx_lock);
 }
 
index a6395bdb26aeb13b7b98c74df4f77780c1c95412..1ff94054d35aba8a5c0748585006641010a322af 100644 (file)
@@ -259,6 +259,13 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
        current->mm->free_area_cache = current->mm->mmap_base;
        current->mm->cached_hole_size = 0;
 
+       retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
+       if (retval < 0) {
+               /* Someone check-me: is this error path enough? */
+               send_sig(SIGKILL, current, 0);
+               return retval;
+       }
+
        install_exec_creds(bprm);
        current->flags &= ~PF_FORKNOEXEC;
 
@@ -352,13 +359,6 @@ beyond_if:
                return retval;
        }
 
-       retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
-       if (retval < 0) { 
-               /* Someone check-me: is this error path enough? */ 
-               send_sig(SIGKILL, current, 0); 
-               return retval;
-       }
-
        current->mm->start_stack =
                (unsigned long) create_aout_tables((char __user *) bprm->p, bprm);
 #ifdef __alpha__
index 138be96e25b674b6e6c92f1fe926f2c6fc24f605..bcbdb33fcc205aad37110be50752131aae894062 100644 (file)
@@ -137,6 +137,26 @@ int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
 }
 #endif
 
+/*
+ * Compare 2 name strings, return 0 if they match, otherwise non-zero.
+ * The strings are both count bytes long, and count is non-zero.
+ */
+static inline int dentry_cmp(const unsigned char *cs, size_t scount,
+                               const unsigned char *ct, size_t tcount)
+{
+       if (scount != tcount)
+               return 1;
+
+       do {
+               if (*cs != *ct)
+                       return 1;
+               cs++;
+               ct++;
+               tcount--;
+       } while (tcount);
+       return 0;
+}
+
 static void __d_free(struct rcu_head *head)
 {
        struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
index 92ce83a11e90acbb0bceda45b682a83535089342..153dee14fe559b9180a2f79b4b1f8534e25c76f5 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1915,7 +1915,6 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
 {
        struct task_struct *tsk = current;
        struct mm_struct *mm = tsk->mm;
-       struct completion *vfork_done;
        int core_waiters = -EBUSY;
 
        init_completion(&core_state->startup);
@@ -1927,22 +1926,9 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
                core_waiters = zap_threads(tsk, mm, core_state, exit_code);
        up_write(&mm->mmap_sem);
 
-       if (unlikely(core_waiters < 0))
-               goto fail;
-
-       /*
-        * Make sure nobody is waiting for us to release the VM,
-        * otherwise we can deadlock when we wait on each other
-        */
-       vfork_done = tsk->vfork_done;
-       if (vfork_done) {
-               tsk->vfork_done = NULL;
-               complete(vfork_done);
-       }
-
-       if (core_waiters)
+       if (core_waiters > 0)
                wait_for_completion(&core_state->startup);
-fail:
+
        return core_waiters;
 }
 
index 4270bedd230849c48a1e5387eb2d8a6b6e530a80..ff5f5256d175c8dde1345394f8fedd4e701b03e0 100644 (file)
@@ -47,26 +47,6 @@ struct dentry_stat_t {
 };
 extern struct dentry_stat_t dentry_stat;
 
-/*
- * Compare 2 name strings, return 0 if they match, otherwise non-zero.
- * The strings are both count bytes long, and count is non-zero.
- */
-static inline int dentry_cmp(const unsigned char *cs, size_t scount,
-                               const unsigned char *ct, size_t tcount)
-{
-       if (scount != tcount)
-               return 1;
-
-       do {
-               if (*cs != *ct)
-                       return 1;
-               cs++;
-               ct++;
-               tcount--;
-       } while (tcount);
-       return 0;
-}
-
 /* Name hashing routines. Initial hash value */
 /* Hash courtesy of the R5 hash in reiserfs modulo sign bits */
 #define init_name_hash()               0
index fee66317e071547a4a75e0b90b61399b97249ed2..35f7237ec972bef4ce1ba24cd617571a193ce806 100644 (file)
 #include <linux/errno.h>
 #include <linux/list.h>
 
+/*
+ * Keep this list arranged in rough order of priority. Anything listed after
+ * KMSG_DUMP_OOPS will not be logged by default unless printk.always_kmsg_dump
+ * is passed to the kernel.
+ */
 enum kmsg_dump_reason {
-       KMSG_DUMP_OOPS,
        KMSG_DUMP_PANIC,
+       KMSG_DUMP_OOPS,
+       KMSG_DUMP_EMERG,
        KMSG_DUMP_RESTART,
        KMSG_DUMP_HALT,
        KMSG_DUMP_POWEROFF,
-       KMSG_DUMP_EMERG,
 };
 
 /**
index 4d34356fe644ee5dca447071829ae06ab68eeb60..b80de520670b64a67dc2ceb8b4e4e11c1d623071 100644 (file)
@@ -129,7 +129,6 @@ extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
 extern void mem_cgroup_replace_page_cache(struct page *oldpage,
                                        struct page *newpage);
 
-extern void mem_cgroup_reset_owner(struct page *page);
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
 extern int do_swap_account;
 #endif
@@ -392,10 +391,6 @@ static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
                                struct page *newpage)
 {
 }
-
-static inline void mem_cgroup_reset_owner(struct page *page)
-{
-}
 #endif /* CONFIG_CGROUP_MEM_CONT */
 
 #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
index 32cd1f67462e9b4a80f602474878c6ff632d6976..21638ae14e07c71a005d6cfcea61958e8cd8f970 100644 (file)
@@ -348,9 +348,9 @@ do {                                                                        \
 #define _this_cpu_generic_to_op(pcp, val, op)                          \
 do {                                                                   \
        unsigned long flags;                                            \
-       local_irq_save(flags);                                          \
+       raw_local_irq_save(flags);                                      \
        *__this_cpu_ptr(&(pcp)) op val;                                 \
-       local_irq_restore(flags);                                       \
+       raw_local_irq_restore(flags);                                   \
 } while (0)
 
 #ifndef this_cpu_write
@@ -449,10 +449,10 @@ do {                                                                      \
 ({                                                                     \
        typeof(pcp) ret__;                                              \
        unsigned long flags;                                            \
-       local_irq_save(flags);                                          \
+       raw_local_irq_save(flags);                                      \
        __this_cpu_add(pcp, val);                                       \
        ret__ = __this_cpu_read(pcp);                                   \
-       local_irq_restore(flags);                                       \
+       raw_local_irq_restore(flags);                                   \
        ret__;                                                          \
 })
 
@@ -479,10 +479,10 @@ do {                                                                      \
 #define _this_cpu_generic_xchg(pcp, nval)                              \
 ({     typeof(pcp) ret__;                                              \
        unsigned long flags;                                            \
-       local_irq_save(flags);                                          \
+       raw_local_irq_save(flags);                                      \
        ret__ = __this_cpu_read(pcp);                                   \
        __this_cpu_write(pcp, nval);                                    \
-       local_irq_restore(flags);                                       \
+       raw_local_irq_restore(flags);                                   \
        ret__;                                                          \
 })
 
@@ -507,11 +507,11 @@ do {                                                                      \
 ({                                                                     \
        typeof(pcp) ret__;                                              \
        unsigned long flags;                                            \
-       local_irq_save(flags);                                          \
+       raw_local_irq_save(flags);                                      \
        ret__ = __this_cpu_read(pcp);                                   \
        if (ret__ == (oval))                                            \
                __this_cpu_write(pcp, nval);                            \
-       local_irq_restore(flags);                                       \
+       raw_local_irq_restore(flags);                                   \
        ret__;                                                          \
 })
 
@@ -544,10 +544,10 @@ do {                                                                      \
 ({                                                                     \
        int ret__;                                                      \
        unsigned long flags;                                            \
-       local_irq_save(flags);                                          \
+       raw_local_irq_save(flags);                                      \
        ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2,           \
                        oval1, oval2, nval1, nval2);                    \
-       local_irq_restore(flags);                                       \
+       raw_local_irq_restore(flags);                                   \
        ret__;                                                          \
 })
 
@@ -718,12 +718,13 @@ do {                                                                      \
 # ifndef __this_cpu_add_return_8
 #  define __this_cpu_add_return_8(pcp, val)    __this_cpu_generic_add_return(pcp, val)
 # endif
-# define __this_cpu_add_return(pcp, val)       __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
+# define __this_cpu_add_return(pcp, val)       \
+       __pcpu_size_call_return2(__this_cpu_add_return_, pcp, val)
 #endif
 
-#define __this_cpu_sub_return(pcp, val)        this_cpu_add_return(pcp, -(val))
-#define __this_cpu_inc_return(pcp)     this_cpu_add_return(pcp, 1)
-#define __this_cpu_dec_return(pcp)     this_cpu_add_return(pcp, -1)
+#define __this_cpu_sub_return(pcp, val)        __this_cpu_add_return(pcp, -(val))
+#define __this_cpu_inc_return(pcp)     __this_cpu_add_return(pcp, 1)
+#define __this_cpu_dec_return(pcp)     __this_cpu_add_return(pcp, -1)
 
 #define __this_cpu_generic_xchg(pcp, nval)                             \
 ({     typeof(pcp) ret__;                                              \
index 7d379a6bfd886679fcfefee2ed90fb636d6cd028..0657368bd78fb9207369b484fa2aa294f90ce5dc 100644 (file)
@@ -1777,7 +1777,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
 /*
  * Per process flags
  */
-#define PF_STARTING    0x00000002      /* being created */
 #define PF_EXITING     0x00000004      /* getting shut down */
 #define PF_EXITPIDONE  0x00000008      /* pi exit done on shut down */
 #define PF_VCPU                0x00000010      /* I'm a virtual CPU */
@@ -2371,7 +2370,7 @@ static inline int thread_group_empty(struct task_struct *p)
  * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
  * subscriptions and synchronises with wait4().  Also used in procfs.  Also
  * pins the final release of task.io_context.  Also protects ->cpuset and
- * ->cgroup.subsys[].
+ * ->cgroup.subsys[]. And ->vfork_done.
  *
  * Nests both inside and outside of read_lock(&tasklist_lock).
  * It must not be nested with write_lock_irq(&tasklist_lock),
index 46a85c9e1f25bebddd3a1887b30fb182dcf5b5e4..3c7ffdb40dc64ba62cd8f9586fd04d668c4a469c 100644 (file)
@@ -412,7 +412,8 @@ struct tcp_sock {
 
        struct tcp_sack_block recv_sack_cache[4];
 
-       struct sk_buff *highest_sack;   /* highest skb with SACK received
+       struct sk_buff *highest_sack;   /* skb just after the highest
+                                        * skb with SACKed bit set
                                         * (validity guaranteed only if
                                         * sacked_out > 0)
                                         */
index 42c29bfbcee34d8d6ada5d18533850d82973f352..2d80c291fffbad01828648c46df1da0f69763e49 100644 (file)
@@ -1364,8 +1364,9 @@ static inline void tcp_push_pending_frames(struct sock *sk)
        }
 }
 
-/* Start sequence of the highest skb with SACKed bit, valid only if
- * sacked > 0 or when the caller has ensured validity by itself.
+/* Start sequence of the skb just after the highest skb with SACKed
+ * bit, valid only if sacked_out > 0 or when the caller has ensured
+ * validity by itself.
  */
 static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
 {
index e2cd3e2a5ae8b73dac244bd4ee98fbe70d5d5364..26a7a6707fa738e181a6b397d8ba9bea47269deb 100644 (file)
@@ -668,6 +668,38 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
        return mm;
 }
 
+static void complete_vfork_done(struct task_struct *tsk)
+{
+       struct completion *vfork;
+
+       task_lock(tsk);
+       vfork = tsk->vfork_done;
+       if (likely(vfork)) {
+               tsk->vfork_done = NULL;
+               complete(vfork);
+       }
+       task_unlock(tsk);
+}
+
+static int wait_for_vfork_done(struct task_struct *child,
+                               struct completion *vfork)
+{
+       int killed;
+
+       freezer_do_not_count();
+       killed = wait_for_completion_killable(vfork);
+       freezer_count();
+
+       if (killed) {
+               task_lock(child);
+               child->vfork_done = NULL;
+               task_unlock(child);
+       }
+
+       put_task_struct(child);
+       return killed;
+}
+
 /* Please note the differences between mmput and mm_release.
  * mmput is called whenever we stop holding onto a mm_struct,
  * error success whatever.
@@ -683,8 +715,6 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
  */
 void mm_release(struct task_struct *tsk, struct mm_struct *mm)
 {
-       struct completion *vfork_done = tsk->vfork_done;
-
        /* Get rid of any futexes when releasing the mm */
 #ifdef CONFIG_FUTEX
        if (unlikely(tsk->robust_list)) {
@@ -704,17 +734,15 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
        /* Get rid of any cached register state */
        deactivate_mm(tsk, mm);
 
-       /* notify parent sleeping on vfork() */
-       if (vfork_done) {
-               tsk->vfork_done = NULL;
-               complete(vfork_done);
-       }
+       if (tsk->vfork_done)
+               complete_vfork_done(tsk);
 
        /*
         * If we're exiting normally, clear a user-space tid field if
         * requested.  We leave this alone when dying by signal, to leave
         * the value intact in a core dump, and to save the unnecessary
-        * trouble otherwise.  Userland only wants this done for a sys_exit.
+        * trouble, say, a killed vfork parent shouldn't touch this mm.
+        * Userland only wants this done for a sys_exit.
         */
        if (tsk->clear_child_tid) {
                if (!(tsk->flags & PF_SIGNALED) &&
@@ -1018,7 +1046,6 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
 
        new_flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
        new_flags |= PF_FORKNOEXEC;
-       new_flags |= PF_STARTING;
        p->flags = new_flags;
 }
 
@@ -1548,16 +1575,9 @@ long do_fork(unsigned long clone_flags,
                if (clone_flags & CLONE_VFORK) {
                        p->vfork_done = &vfork;
                        init_completion(&vfork);
+                       get_task_struct(p);
                }
 
-               /*
-                * We set PF_STARTING at creation in case tracing wants to
-                * use this to distinguish a fully live task from one that
-                * hasn't finished SIGSTOP raising yet.  Now we clear it
-                * and set the child going.
-                */
-               p->flags &= ~PF_STARTING;
-
                wake_up_new_task(p);
 
                /* forking complete and child started to run, tell ptracer */
@@ -1565,10 +1585,8 @@ long do_fork(unsigned long clone_flags,
                        ptrace_event(trace, nr);
 
                if (clone_flags & CLONE_VFORK) {
-                       freezer_do_not_count();
-                       wait_for_completion(&vfork);
-                       freezer_count();
-                       ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
+                       if (!wait_for_vfork_done(p, &vfork))
+                               ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
                }
        } else {
                nr = PTR_ERR(p);
index 2e48ec0c2e91cf099642f8bf43e90476fb6457a3..c21449f85a2a4061bd9a34fc4844b5a3240850b3 100644 (file)
@@ -119,15 +119,20 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
  * For preemptible RCU it is sufficient to call rcu_read_unlock in order
  * to exit the grace period. For classic RCU, a reschedule is required.
  */
-static void rcu_lock_break(struct task_struct *g, struct task_struct *t)
+static bool rcu_lock_break(struct task_struct *g, struct task_struct *t)
 {
+       bool can_cont;
+
        get_task_struct(g);
        get_task_struct(t);
        rcu_read_unlock();
        cond_resched();
        rcu_read_lock();
+       can_cont = pid_alive(g) && pid_alive(t);
        put_task_struct(t);
        put_task_struct(g);
+
+       return can_cont;
 }
 
 /*
@@ -154,9 +159,7 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
                        goto unlock;
                if (!--batch_count) {
                        batch_count = HUNG_TASK_BATCHING;
-                       rcu_lock_break(g, t);
-                       /* Exit if t or g was unhashed during refresh. */
-                       if (t->state == TASK_DEAD || g->state == TASK_DEAD)
+                       if (!rcu_lock_break(g, t))
                                goto unlock;
                }
                /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
index 9788c0ec6f4378f19cd80e873d42a0aec7171548..c62b8546cc90e0b39de0e824b0f7ed7179d8ba92 100644 (file)
@@ -1334,8 +1334,10 @@ int __kprobes register_kprobe(struct kprobe *p)
        if (!kernel_text_address((unsigned long) p->addr) ||
            in_kprobes_functions((unsigned long) p->addr) ||
            ftrace_text_reserved(p->addr, p->addr) ||
-           jump_label_text_reserved(p->addr, p->addr))
-               goto fail_with_jump_label;
+           jump_label_text_reserved(p->addr, p->addr)) {
+               ret = -EINVAL;
+               goto cannot_probe;
+       }
 
        /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
        p->flags &= KPROBE_FLAG_DISABLED;
@@ -1352,7 +1354,7 @@ int __kprobes register_kprobe(struct kprobe *p)
                 * its code to prohibit unexpected unloading.
                 */
                if (unlikely(!try_module_get(probed_mod)))
-                       goto fail_with_jump_label;
+                       goto cannot_probe;
 
                /*
                 * If the module freed .init.text, we couldn't insert
@@ -1361,7 +1363,7 @@ int __kprobes register_kprobe(struct kprobe *p)
                if (within_module_init((unsigned long)p->addr, probed_mod) &&
                    probed_mod->state != MODULE_STATE_COMING) {
                        module_put(probed_mod);
-                       goto fail_with_jump_label;
+                       goto cannot_probe;
                }
                /* ret will be updated by following code */
        }
@@ -1409,7 +1411,7 @@ out:
 
        return ret;
 
-fail_with_jump_label:
+cannot_probe:
        preempt_enable();
        jump_label_unlock();
        return ret;
index 13c0a1143f49438f7cc97718e61ecf5bc5b23747..32690a0b7a1838e1bd9b8bcf8473654955807dae 100644 (file)
@@ -702,6 +702,9 @@ static bool printk_time = 0;
 #endif
 module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
 
+static bool always_kmsg_dump;
+module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR);
+
 /* Check if we have any console registered that can be called early in boot. */
 static int have_callable_console(void)
 {
@@ -1732,6 +1735,9 @@ void kmsg_dump(enum kmsg_dump_reason reason)
        unsigned long l1, l2;
        unsigned long flags;
 
+       if ((reason > KMSG_DUMP_OOPS) && !always_kmsg_dump)
+               return;
+
        /* Theoretically, the log could move on after we do this, but
           there's not a lot we can do about that. The new messages
           will overwrite the start of what we dump. */
index 77cb245f8e7bdee634b98f6c275ae221c7c90715..0ab9ae8057f00e8545deba43ff31fab7873a619c 100644 (file)
@@ -818,17 +818,9 @@ static int __init fixup_activate(void *addr, enum debug_obj_state state)
                if (obj->static_init == 1) {
                        debug_object_init(obj, &descr_type_test);
                        debug_object_activate(obj, &descr_type_test);
-                       /*
-                        * Real code should return 0 here ! This is
-                        * not a fixup of some bad behaviour. We
-                        * merily call the debug_init function to keep
-                        * track of the object.
-                        */
-                       return 1;
-               } else {
-                       /* Real code needs to emit a warning here */
+                       return 0;
                }
-               return 0;
+               return 1;
 
        case ODEBUG_STATE_ACTIVE:
                debug_object_deactivate(obj, &descr_type_test);
@@ -967,7 +959,7 @@ static void __init debug_objects_selftest(void)
 
        obj.static_init = 1;
        debug_object_activate(&obj, &descr_type_test);
-       if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, warnings))
+       if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
                goto out;
        debug_object_init(&obj, &descr_type_test);
        if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
index 8e75003d62f632c52b8c0ea56ca62bdc72e560f0..38e612e66da5993f7b1eb92065ebfec45216d83c 100644 (file)
@@ -891,9 +891,15 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
        case 'U':
                return uuid_string(buf, end, ptr, spec, fmt);
        case 'V':
-               return buf + vsnprintf(buf, end > buf ? end - buf : 0,
-                                      ((struct va_format *)ptr)->fmt,
-                                      *(((struct va_format *)ptr)->va));
+               {
+                       va_list va;
+
+                       va_copy(va, *((struct va_format *)ptr)->va);
+                       buf += vsnprintf(buf, end > buf ? end - buf : 0,
+                                        ((struct va_format *)ptr)->fmt, va);
+                       va_end(va);
+                       return buf;
+               }
        case 'K':
                /*
                 * %pK cannot be used in IRQ context because its test
index 91d3efb25d15472332e1f2d9c382a5732629ce69..8f7fc394f63672954cc4444da0f2158a42470b2c 100644 (file)
@@ -671,6 +671,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
                set_pmd_at(mm, haddr, pmd, entry);
                prepare_pmd_huge_pte(pgtable, mm);
                add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
+               mm->nr_ptes++;
                spin_unlock(&mm->page_table_lock);
        }
 
@@ -789,6 +790,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        pmd = pmd_mkold(pmd_wrprotect(pmd));
        set_pmd_at(dst_mm, addr, dst_pmd, pmd);
        prepare_pmd_huge_pte(pgtable, dst_mm);
+       dst_mm->nr_ptes++;
 
        ret = 0;
 out_unlock:
@@ -887,7 +889,6 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
        }
        kfree(pages);
 
-       mm->nr_ptes++;
        smp_wmb(); /* make pte visible before pmd */
        pmd_populate(mm, pmd, pgtable);
        page_remove_rmap(page);
@@ -1047,6 +1048,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                        VM_BUG_ON(page_mapcount(page) < 0);
                        add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
                        VM_BUG_ON(!PageHead(page));
+                       tlb->mm->nr_ptes--;
                        spin_unlock(&tlb->mm->page_table_lock);
                        tlb_remove_page(tlb, page);
                        pte_free(tlb->mm, pgtable);
@@ -1375,7 +1377,6 @@ static int __split_huge_page_map(struct page *page,
                        pte_unmap(pte);
                }
 
-               mm->nr_ptes++;
                smp_wmb(); /* make pte visible before pmd */
                /*
                 * Up to this point the pmd is present and huge and
@@ -1988,7 +1989,6 @@ static void collapse_huge_page(struct mm_struct *mm,
        set_pmd_at(mm, address, pmd, _pmd);
        update_mmu_cache(vma, address, _pmd);
        prepare_pmd_huge_pte(pgtable, mm);
-       mm->nr_ptes--;
        spin_unlock(&mm->page_table_lock);
 
 #ifndef CONFIG_NUMA
index 5f34bd8dda34bbc8224303ad080103f224fa5ad6..a876871f6be56d15b06d1da56a9f3dbbd61cda91 100644 (file)
@@ -2277,8 +2277,8 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
                        set_page_dirty(page);
                list_add(&page->lru, &page_list);
        }
-       spin_unlock(&mm->page_table_lock);
        flush_tlb_range(vma, start, end);
+       spin_unlock(&mm->page_table_lock);
        mmu_notifier_invalidate_range_end(mm, start, end);
        list_for_each_entry_safe(page, tmp, &page_list, lru) {
                page_remove_rmap(page);
index 1925ffbfb27f00ac3d3d262ce0ea1c8aaa3a117f..310544a379ae9c7b886b3b50815e5f3d5a991ba8 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -28,7 +28,6 @@
 #include <linux/kthread.h>
 #include <linux/wait.h>
 #include <linux/slab.h>
-#include <linux/memcontrol.h>
 #include <linux/rbtree.h>
 #include <linux/memory.h>
 #include <linux/mmu_notifier.h>
@@ -1572,16 +1571,6 @@ struct page *ksm_does_need_to_copy(struct page *page,
 
        new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
        if (new_page) {
-               /*
-                * The memcg-specific accounting when moving
-                * pages around the LRU lists relies on the
-                * page's owner (memcg) to be valid.  Usually,
-                * pages are assigned to a new owner before
-                * being put on the LRU list, but since this
-                * is not the case here, the stale owner from
-                * a previous allocation cycle must be reset.
-                */
-               mem_cgroup_reset_owner(new_page);
                copy_user_highpage(new_page, page, address, vma);
 
                SetPageDirty(new_page);
index 228d6461c12ade8941b37416ca867d3df3277acd..5585dc3d36466189b0d89adf8e86f424372bdd23 100644 (file)
@@ -1042,6 +1042,19 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
 
        pc = lookup_page_cgroup(page);
        memcg = pc->mem_cgroup;
+
+       /*
+        * Surreptitiously switch any uncharged page to root:
+        * an uncharged page off lru does nothing to secure
+        * its former mem_cgroup from sudden removal.
+        *
+        * Our caller holds lru_lock, and PageCgroupUsed is updated
+        * under page_cgroup lock: between them, they make all uses
+        * of pc->mem_cgroup safe.
+        */
+       if (!PageCgroupUsed(pc) && memcg != root_mem_cgroup)
+               pc->mem_cgroup = memcg = root_mem_cgroup;
+
        mz = page_cgroup_zoneinfo(memcg, page);
        /* compound_order() is stabilized through lru_lock */
        MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
@@ -2408,8 +2421,12 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
                                       struct page *page,
                                       unsigned int nr_pages,
                                       struct page_cgroup *pc,
-                                      enum charge_type ctype)
+                                      enum charge_type ctype,
+                                      bool lrucare)
 {
+       struct zone *uninitialized_var(zone);
+       bool was_on_lru = false;
+
        lock_page_cgroup(pc);
        if (unlikely(PageCgroupUsed(pc))) {
                unlock_page_cgroup(pc);
@@ -2420,6 +2437,21 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
         * we don't need page_cgroup_lock about tail pages, becase they are not
         * accessed by any other context at this point.
         */
+
+       /*
+        * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
+        * may already be on some other mem_cgroup's LRU.  Take care of it.
+        */
+       if (lrucare) {
+               zone = page_zone(page);
+               spin_lock_irq(&zone->lru_lock);
+               if (PageLRU(page)) {
+                       ClearPageLRU(page);
+                       del_page_from_lru_list(zone, page, page_lru(page));
+                       was_on_lru = true;
+               }
+       }
+
        pc->mem_cgroup = memcg;
        /*
         * We access a page_cgroup asynchronously without lock_page_cgroup().
@@ -2443,9 +2475,18 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
                break;
        }
 
+       if (lrucare) {
+               if (was_on_lru) {
+                       VM_BUG_ON(PageLRU(page));
+                       SetPageLRU(page);
+                       add_page_to_lru_list(zone, page, page_lru(page));
+               }
+               spin_unlock_irq(&zone->lru_lock);
+       }
+
        mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages);
        unlock_page_cgroup(pc);
-       WARN_ON_ONCE(PageLRU(page));
+
        /*
         * "charge_statistics" updated event counter. Then, check it.
         * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
@@ -2643,7 +2684,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
        ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
        if (ret == -ENOMEM)
                return ret;
-       __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype);
+       __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype, false);
        return 0;
 }
 
@@ -2663,35 +2704,6 @@ static void
 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
                                        enum charge_type ctype);
 
-static void
-__mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *memcg,
-                                       enum charge_type ctype)
-{
-       struct page_cgroup *pc = lookup_page_cgroup(page);
-       struct zone *zone = page_zone(page);
-       unsigned long flags;
-       bool removed = false;
-
-       /*
-        * In some case, SwapCache, FUSE(splice_buf->radixtree), the page
-        * is already on LRU. It means the page may on some other page_cgroup's
-        * LRU. Take care of it.
-        */
-       spin_lock_irqsave(&zone->lru_lock, flags);
-       if (PageLRU(page)) {
-               del_page_from_lru_list(zone, page, page_lru(page));
-               ClearPageLRU(page);
-               removed = true;
-       }
-       __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype);
-       if (removed) {
-               add_page_to_lru_list(zone, page, page_lru(page));
-               SetPageLRU(page);
-       }
-       spin_unlock_irqrestore(&zone->lru_lock, flags);
-       return;
-}
-
 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
                                gfp_t gfp_mask)
 {
@@ -2769,13 +2781,16 @@ static void
 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
                                        enum charge_type ctype)
 {
+       struct page_cgroup *pc;
+
        if (mem_cgroup_disabled())
                return;
        if (!memcg)
                return;
        cgroup_exclude_rmdir(&memcg->css);
 
-       __mem_cgroup_commit_charge_lrucare(page, memcg, ctype);
+       pc = lookup_page_cgroup(page);
+       __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype, true);
        /*
         * Now swap is on-memory. This means this page may be
         * counted both as mem and swap....double count.
@@ -3027,23 +3042,6 @@ void mem_cgroup_uncharge_end(void)
        batch->memcg = NULL;
 }
 
-/*
- * A function for resetting pc->mem_cgroup for newly allocated pages.
- * This function should be called if the newpage will be added to LRU
- * before start accounting.
- */
-void mem_cgroup_reset_owner(struct page *newpage)
-{
-       struct page_cgroup *pc;
-
-       if (mem_cgroup_disabled())
-               return;
-
-       pc = lookup_page_cgroup(newpage);
-       VM_BUG_ON(PageCgroupUsed(pc));
-       pc->mem_cgroup = root_mem_cgroup;
-}
-
 #ifdef CONFIG_SWAP
 /*
  * called after __delete_from_swap_cache() and drop "page" account.
@@ -3248,7 +3246,7 @@ int mem_cgroup_prepare_migration(struct page *page,
                ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
        else
                ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
-       __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype);
+       __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype, false);
        return ret;
 }
 
@@ -3332,7 +3330,7 @@ void mem_cgroup_replace_page_cache(struct page *oldpage,
         * the newpage may be on LRU(or pagevec for LRU) already. We lock
         * LRU while we overwrite pc->mem_cgroup.
         */
-       __mem_cgroup_commit_charge_lrucare(newpage, memcg, type);
+       __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type, true);
 }
 
 #ifdef CONFIG_DEBUG_VM
@@ -5077,7 +5075,7 @@ static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
                return NULL;
        if (PageAnon(page)) {
                /* we don't move shared anon */
-               if (!move_anon() || page_mapcount(page) > 2)
+               if (!move_anon() || page_mapcount(page) > 1)
                        return NULL;
        } else if (!move_file())
                /* we ignore mapcount for file pages */
index df141f60289eef61c7d278080e8290c3dfc717b9..1503b6b54ecbd1817e6e92968d40c404a6e90f75 100644 (file)
@@ -839,8 +839,6 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
        if (!newpage)
                return -ENOMEM;
 
-       mem_cgroup_reset_owner(newpage);
-
        if (page_count(page) == 1) {
                /* page was freed from under us. So we are done. */
                goto out;
index 3f758c7f4c815c2b0edf494526b2823b41fbc142..22e1a0b2f70c3f08428035d74e4ca9088924ba54 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1293,6 +1293,8 @@ munmap_back:
                pgoff = vma->vm_pgoff;
                vm_flags = vma->vm_flags;
        } else if (vm_flags & VM_SHARED) {
+               if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP)))
+                       goto free_vma;
                error = shmem_zero_setup(vma);
                if (error)
                        goto free_vma;
index de1616aa9b1e2b3fac054ce4077073cbb05a3c62..1ccbd714059cdd7ddb9d90cb3ca92438f2cb5d34 100644 (file)
@@ -379,13 +379,15 @@ static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent,
        pgoff_t offset = swp_offset(ent);
        struct swap_cgroup_ctrl *ctrl;
        struct page *mappage;
+       struct swap_cgroup *sc;
 
        ctrl = &swap_cgroup_ctrl[swp_type(ent)];
        if (ctrlp)
                *ctrlp = ctrl;
 
        mappage = ctrl->map[offset / SC_PER_PAGE];
-       return page_address(mappage) + offset % SC_PER_PAGE;
+       sc = page_address(mappage);
+       return sc + offset % SC_PER_PAGE;
 }
 
 /**
index 12a48a88c0d8cb00dd55b2adcea3e536493dee1a..405d331804c3da95c52347878387a807eb5e6c60 100644 (file)
@@ -184,8 +184,7 @@ static void pcpu_unmap_pages(struct pcpu_chunk *chunk,
                                   page_end - page_start);
        }
 
-       for (i = page_start; i < page_end; i++)
-               __clear_bit(i, populated);
+       bitmap_clear(populated, page_start, page_end - page_start);
 }
 
 /**
index fff1ff7fb9ada36fca1be10d33dec593486f50a6..14380e9fbe3380b927cb58d75c5a950a5ec14f77 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -652,7 +652,7 @@ EXPORT_SYMBOL(__pagevec_release);
 void lru_add_page_tail(struct zone* zone,
                       struct page *page, struct page *page_tail)
 {
-       int active;
+       int uninitialized_var(active);
        enum lru_list lru;
        const int file = 0;
 
@@ -672,7 +672,6 @@ void lru_add_page_tail(struct zone* zone,
                        active = 0;
                        lru = LRU_INACTIVE_ANON;
                }
-               update_page_reclaim_stat(zone, page_tail, file, active);
        } else {
                SetPageUnevictable(page_tail);
                lru = LRU_UNEVICTABLE;
@@ -693,6 +692,9 @@ void lru_add_page_tail(struct zone* zone,
                list_head = page_tail->lru.prev;
                list_move_tail(&page_tail->lru, list_head);
        }
+
+       if (!PageUnevictable(page))
+               update_page_reclaim_stat(zone, page_tail, file, active);
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
@@ -710,8 +712,8 @@ static void __pagevec_lru_add_fn(struct page *page, void *arg)
        SetPageLRU(page);
        if (active)
                SetPageActive(page);
-       update_page_reclaim_stat(zone, page, file, active);
        add_page_to_lru_list(zone, page, lru);
+       update_page_reclaim_stat(zone, page, file, active);
 }
 
 /*
index 470038a9187383790751817cda61a07cb50f9512..ea6b32d61873185f59db1eb9ae303488ec6de471 100644 (file)
@@ -300,16 +300,6 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
                        new_page = alloc_page_vma(gfp_mask, vma, addr);
                        if (!new_page)
                                break;          /* Out of memory */
-                       /*
-                        * The memcg-specific accounting when moving
-                        * pages around the LRU lists relies on the
-                        * page's owner (memcg) to be valid.  Usually,
-                        * pages are assigned to a new owner before
-                        * being put on the LRU list, but since this
-                        * is not the case here, the stale owner from
-                        * a previous allocation cycle must be reset.
-                        */
-                       mem_cgroup_reset_owner(new_page);
                }
 
                /*
index 568d5bf175341b4f84ef284173b5d6da623ceafb..702a1ae9220b79bd9e60cc25732201fe8750dcd2 100644 (file)
@@ -446,8 +446,11 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
        ip6h->nexthdr = IPPROTO_HOPOPTS;
        ip6h->hop_limit = 1;
        ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
-       ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
-                          &ip6h->saddr);
+       if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
+                              &ip6h->saddr)) {
+               kfree_skb(skb);
+               return NULL;
+       }
        ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
 
        hopopt = (u8 *)(ip6h + 1);
index dd147d78a5889ab6c2139712199c3672f9e087c6..6751ed4e0c074a8bd0f692ccdf5df0d001e0d3dc 100644 (file)
@@ -17,9 +17,9 @@
 #include "br_private_stp.h"
 
 /* since time values in bpdu are in jiffies and then scaled (1/256)
- * before sending, make sure that is at least one.
+ * before sending, make sure that is at least one STP tick.
  */
-#define MESSAGE_AGE_INCR       ((HZ < 256) ? 1 : (HZ/256))
+#define MESSAGE_AGE_INCR       ((HZ / 256) + 1)
 
 static const char *const br_port_state_names[] = {
        [BR_STATE_DISABLED] = "disabled",
@@ -186,7 +186,7 @@ static void br_record_config_information(struct net_bridge_port *p,
        p->designated_cost = bpdu->root_path_cost;
        p->designated_bridge = bpdu->bridge_id;
        p->designated_port = bpdu->port_id;
-       p->designated_age = jiffies + bpdu->message_age;
+       p->designated_age = jiffies - bpdu->message_age;
 
        mod_timer(&p->message_age_timer, jiffies
                  + (p->br->max_age - bpdu->message_age));
index 5864cc49136993d9de12f4c0f9110ab2dbcf1025..8aa4ad0e06af9a95e691e47e628198f11c39f069 100644 (file)
@@ -1893,10 +1893,7 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
 
        switch (compat_mwt) {
        case EBT_COMPAT_MATCH:
-               match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
-                                               name, 0), "ebt_%s", name);
-               if (match == NULL)
-                       return -ENOENT;
+               match = xt_request_find_match(NFPROTO_BRIDGE, name, 0);
                if (IS_ERR(match))
                        return PTR_ERR(match);
 
@@ -1915,10 +1912,7 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
                break;
        case EBT_COMPAT_WATCHER: /* fallthrough */
        case EBT_COMPAT_TARGET:
-               wt = try_then_request_module(xt_find_target(NFPROTO_BRIDGE,
-                                               name, 0), "ebt_%s", name);
-               if (wt == NULL)
-                       return -ENOENT;
+               wt = xt_request_find_target(NFPROTO_BRIDGE, name, 0);
                if (IS_ERR(wt))
                        return PTR_ERR(wt);
                off = xt_compat_target_offset(wt);
index 606a6e8f3671defcc1eab21a471f0468f54f3359..f965dce6f20f83c404f7133ab6a00a84a938d2b4 100644 (file)
@@ -1060,11 +1060,12 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
        rcu_read_lock();
        cb->seq = net->dev_base_seq;
 
-       nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
-                   ifla_policy);
+       if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
+                       ifla_policy) >= 0) {
 
-       if (tb[IFLA_EXT_MASK])
-               ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
+               if (tb[IFLA_EXT_MASK])
+                       ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
+       }
 
        for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
                idx = 0;
@@ -1900,10 +1901,11 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
        u32 ext_filter_mask = 0;
        u16 min_ifinfo_dump_size = 0;
 
-       nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, ifla_policy);
-
-       if (tb[IFLA_EXT_MASK])
-               ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
+       if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
+                       ifla_policy) >= 0) {
+               if (tb[IFLA_EXT_MASK])
+                       ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
+       }
 
        if (!ext_filter_mask)
                return NLMSG_GOODSIZE;
index 53c8ce4046b207cf6dc65735a8a030932095734d..d9b83d198c3d3abb1cc59d4aa20f0f4415170eaa 100644 (file)
@@ -1403,8 +1403,16 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
 
        BUG_ON(!pcount);
 
-       /* Adjust hint for FACK. Non-FACK is handled in tcp_sacktag_one(). */
-       if (tcp_is_fack(tp) && (skb == tp->lost_skb_hint))
+       /* Adjust counters and hints for the newly sacked sequence
+        * range but discard the return value since prev is already
+        * marked. We must tag the range first because the seq
+        * advancement below implicitly advances
+        * tcp_highest_sack_seq() when skb is highest_sack.
+        */
+       tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
+                       start_seq, end_seq, dup_sack, pcount);
+
+       if (skb == tp->lost_skb_hint)
                tp->lost_cnt_hint += pcount;
 
        TCP_SKB_CB(prev)->end_seq += shifted;
@@ -1430,12 +1438,6 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
                skb_shinfo(skb)->gso_type = 0;
        }
 
-       /* Adjust counters and hints for the newly sacked sequence range but
-        * discard the return value since prev is already marked.
-        */
-       tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
-                       start_seq, end_seq, dup_sack, pcount);
-
        /* Difference in this won't matter, both ACKed by the same cumul. ACK */
        TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
 
@@ -2567,6 +2569,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
 
                if (cnt > packets) {
                        if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) ||
+                           (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) ||
                            (oldcnt >= packets))
                                break;
 
index 01a21c2f6ab37df336f5f69c55f33de4e0a698f3..8e2137bd87e2435d45294ffa192f34688ab61097 100644 (file)
@@ -1332,6 +1332,9 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
                hw_roc = true;
 
        list_for_each_entry(sdata, &local->interfaces, list) {
+               if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
+                   sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+                       continue;
                if (sdata->old_idle == sdata->vif.bss_conf.idle)
                        continue;
                if (!ieee80211_sdata_running(sdata))
index ad64f4d5271a58121f93cf7f62806c93687d1193..f9b8e819ca63c0bfba25028c7d27b8cf42540157 100644 (file)
@@ -344,7 +344,7 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
        for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
                info->control.rates[i].idx = -1;
                info->control.rates[i].flags = 0;
-               info->control.rates[i].count = 1;
+               info->control.rates[i].count = 0;
        }
 
        if (sdata->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
index 0abfb18b911fb93f022f0e6ac1e7b4dda4fb1e4d..227b6ae99785118066a7184cc4cb66f27d38c841 100644 (file)
@@ -204,6 +204,9 @@ static void perf_record__open(struct perf_record *rec)
 
                if (opts->group && pos != first)
                        group_fd = first->fd;
+fallback_missing_features:
+               if (opts->exclude_guest_missing)
+                       attr->exclude_guest = attr->exclude_host = 0;
 retry_sample_id:
                attr->sample_id_all = opts->sample_id_all_avail ? 1 : 0;
 try_again:
@@ -217,15 +220,23 @@ try_again:
                        } else if (err ==  ENODEV && opts->cpu_list) {
                                die("No such device - did you specify"
                                        " an out-of-range profile CPU?\n");
-                       } else if (err == EINVAL && opts->sample_id_all_avail) {
-                               /*
-                                * Old kernel, no attr->sample_id_type_all field
-                                */
-                               opts->sample_id_all_avail = false;
-                               if (!opts->sample_time && !opts->raw_samples && !time_needed)
-                                       attr->sample_type &= ~PERF_SAMPLE_TIME;
-
-                               goto retry_sample_id;
+                       } else if (err == EINVAL) {
+                               if (!opts->exclude_guest_missing &&
+                                   (attr->exclude_guest || attr->exclude_host)) {
+                                       pr_debug("Old kernel, cannot exclude "
+                                                "guest or host samples.\n");
+                                       opts->exclude_guest_missing = true;
+                                       goto fallback_missing_features;
+                               } else if (opts->sample_id_all_avail) {
+                                       /*
+                                        * Old kernel, no attr->sample_id_type_all field
+                                        */
+                                       opts->sample_id_all_avail = false;
+                                       if (!opts->sample_time && !opts->raw_samples && !time_needed)
+                                               attr->sample_type &= ~PERF_SAMPLE_TIME;
+
+                                       goto retry_sample_id;
+                               }
                        }
 
                        /*
@@ -503,9 +514,9 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
                        return err;
        }
 
-       if (!!rec->no_buildid
+       if (!rec->no_buildid
            && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
-               pr_err("Couldn't generating buildids. "
+               pr_err("Couldn't generate buildids. "
                       "Use --no-buildid to profile anyway.\n");
                return -1;
        }
index dd162aa24baad2f44c5cbb1b4176fb2b2cc7b26d..ecff31257eb3771cf9db12acc49065e40e2c4d18 100644 (file)
@@ -857,6 +857,9 @@ static void perf_top__start_counters(struct perf_top *top)
                attr->mmap = 1;
                attr->comm = 1;
                attr->inherit = top->inherit;
+fallback_missing_features:
+               if (top->exclude_guest_missing)
+                       attr->exclude_guest = attr->exclude_host = 0;
 retry_sample_id:
                attr->sample_id_all = top->sample_id_all_avail ? 1 : 0;
 try_again:
@@ -868,12 +871,20 @@ try_again:
                        if (err == EPERM || err == EACCES) {
                                ui__error_paranoid();
                                goto out_err;
-                       } else if (err == EINVAL && top->sample_id_all_avail) {
-                               /*
-                                * Old kernel, no attr->sample_id_type_all field
-                                */
-                               top->sample_id_all_avail = false;
-                               goto retry_sample_id;
+                       } else if (err == EINVAL) {
+                               if (!top->exclude_guest_missing &&
+                                   (attr->exclude_guest || attr->exclude_host)) {
+                                       pr_debug("Old kernel, cannot exclude "
+                                                "guest or host samples.\n");
+                                       top->exclude_guest_missing = true;
+                                       goto fallback_missing_features;
+                               } else if (top->sample_id_all_avail) {
+                                       /*
+                                        * Old kernel, no attr->sample_id_type_all field
+                                        */
+                                       top->sample_id_all_avail = false;
+                                       goto retry_sample_id;
+                               }
                        }
                        /*
                         * If it's cycles then fall back to hrtimer
index 64f8bee31ced8d7eb3586788e4a69f872c33e57a..16e7d20eee8321110797f9f77c928c1e15e013ea 100644 (file)
@@ -199,6 +199,7 @@ struct perf_record_opts {
        bool         sample_address;
        bool         sample_time;
        bool         sample_id_all_avail;
+       bool         exclude_guest_missing;
        bool         system_wide;
        bool         period;
        unsigned int freq;
index a248f3c2c60d9bb730f5603e1b87fcb04160cb6a..f2eab81435ae1b4bf3ffaaedb3734e7e953b492b 100644 (file)
@@ -34,6 +34,7 @@ struct perf_top {
        bool               inherit;
        bool               group;
        bool               sample_id_all_avail;
+       bool               exclude_guest_missing;
        bool               dump_symtab;
        const char         *cpu_list;
        struct hist_entry  *sym_filter_entry;
index 813141047fc22fcfd61ae377444f7ff2a97f06eb..fb25d132921882c3b432ece1bc07bd2700800bc2 100644 (file)
@@ -6,7 +6,7 @@
  * XXX We need to find a better place for these things...
  */
 bool perf_host  = true;
-bool perf_guest = true;
+bool perf_guest = false;
 
 void event_attr_init(struct perf_event_attr *attr)
 {