Merge branch 'for-next' into for-linus
authorJiri Kosina <jkosina@suse.cz>
Mon, 8 Mar 2010 15:55:37 +0000 (16:55 +0100)
committerJiri Kosina <jkosina@suse.cz>
Mon, 8 Mar 2010 15:55:37 +0000 (16:55 +0100)
Conflicts:
Documentation/filesystems/proc.txt
arch/arm/mach-u300/include/mach/debug-macro.S
drivers/net/qlge/qlge_ethtool.c
drivers/net/qlge/qlge_main.c
drivers/net/typhoon.c

101 files changed:
1  2 
Documentation/filesystems/proc.txt
Documentation/power/runtime_pm.txt
arch/arm/common/clkdev.c
arch/arm/mach-davinci/include/mach/i2c.h
arch/arm/mach-omap2/board-3630sdp.c
arch/arm/mach-omap2/board-zoom-peripherals.c
arch/arm/mach-u300/core.c
arch/arm/mach-u300/include/mach/debug-macro.S
arch/ia64/sn/kernel/setup.c
arch/s390/kernel/sclp.S
arch/sparc/kernel/perf_event.c
arch/x86/kernel/pci-dma.c
arch/x86/kernel/ptrace.c
arch/x86/kernel/tsc.c
arch/x86/kernel/vmiclock_32.c
crypto/Kconfig
drivers/acpi/dock.c
drivers/ata/libata-core.c
drivers/ata/libata-sff.c
drivers/ata/pata_acpi.c
drivers/ata/pata_pcmcia.c
drivers/char/agp/intel-agp.c
drivers/char/hvc_iseries.c
drivers/char/serial167.c
drivers/char/tty_io.c
drivers/gpu/drm/nouveau/nouveau_bios.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/radeon/radeon_state.c
drivers/input/serio/i8042.c
drivers/media/video/gspca/ov519.c
drivers/mfd/sm501.c
drivers/mmc/card/sdio_uart.c
drivers/net/chelsio/sge.c
drivers/net/cs89x0.c
drivers/net/cxgb3/sge.c
drivers/net/davinci_emac.c
drivers/net/e1000e/82571.c
drivers/net/e1000e/lib.c
drivers/net/igb/igb_main.c
drivers/net/ks8851.c
drivers/net/qlge/qlge_ethtool.c
drivers/net/qlge/qlge_main.c
drivers/net/smsc9420.c
drivers/net/spider_net.c
drivers/net/sungem.c
drivers/net/tehuti.c
drivers/net/tokenring/tms380tr.c
drivers/net/tun.c
drivers/net/typhoon.c
drivers/net/ucc_geth.c
drivers/net/wimax/i2400m/fw.c
drivers/net/wireless/ath/ar9170/main.c
drivers/net/wireless/rt2x00/rt2500usb.c
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/net/wireless/rt2x00/rt2x00debug.c
drivers/net/wireless/rt2x00/rt2x00dev.c
drivers/net/wireless/rt2x00/rt2x00queue.c
drivers/net/wireless/rt2x00/rt61pci.c
drivers/net/wireless/rt2x00/rt73usb.c
drivers/net/wireless/zd1211rw/zd_mac.c
drivers/platform/x86/thinkpad_acpi.c
drivers/scsi/libfc/fc_fcp.c
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/sd.c
drivers/usb/musb/musb_regs.h
drivers/usb/serial/cypress_m8.c
drivers/usb/serial/opticon.c
drivers/usb/serial/symbolserial.c
fs/binfmt_elf_fdpic.c
fs/bio.c
fs/cifs/cifssmb.c
fs/ext3/super.c
fs/ext4/mballoc.c
fs/ext4/super.c
fs/gfs2/ops_fstype.c
fs/jbd/transaction.c
fs/locks.c
fs/namei.c
fs/nfsd/nfs4xdr.c
fs/ocfs2/dlmglue.c
fs/ocfs2/extent_map.c
fs/reiserfs/bitmap.c
fs/udf/inode.c
include/linux/mmzone.h
include/linux/sched.h
kernel/irq/chip.c
kernel/ksysfs.c
kernel/params.c
kernel/sched_cpupri.c
kernel/trace/ring_buffer.c
kernel/trace/trace.h
mm/slub.c
net/ipv4/tcp_timer.c
net/mac80211/mesh_plink.c
net/netfilter/nf_conntrack_sip.c
net/netfilter/xt_hashlimit.c
security/selinux/avc.c
sound/pci/rme9652/hdspm.c
sound/soc/codecs/wm8990.c
tools/perf/perf.c

index 96a44dd95e03f2a856a9fdd23cb9afbee76804b2,bb314f60a76a58f66269b0adb9ee60ee727b6c4e..a4f30faa4f1f73ad4622b333007525f9134c21c4
@@@ -164,7 -164,6 +164,7 @@@ read the file /proc/PID/status
    VmExe:        68 kB
    VmLib:      1412 kB
    VmPTE:        20 kb
 +  VmSwap:        0 kB
    Threads:        1
    SigQ:   0/28578
    SigPnd: 0000000000000000
@@@ -189,13 -188,7 +189,13 @@@ memory usage. Its seven fields are expl
  contains details information about the process itself.  Its fields are
  explained in Table 1-4.
  
- Table 1-2: Contents of the statm files (as of 2.6.30-rc7)
 +(for SMP CONFIG users)
 +For making accounting scalable, RSS related information are handled in
 +asynchronous manner and the vaule may not be very precise. To see a precise
 +snapshot of a moment, you can see /proc/<pid>/smaps file and scan page table.
 +It's slow but very precise.
 +
+ Table 1-2: Contents of the status files (as of 2.6.30-rc7)
  ..............................................................................
   Field                       Content
   Name                        filename of the executable
   VmExe                       size of text segment
   VmLib                       size of shared library code
   VmPTE                       size of page table entries
 + VmSwap                      size of swap usage (the number of referred swapents)
   Threads                     number of threads
   SigQ                        number of signals queued/max. number for queue
   SigPnd                      bitmap of pending signals for the thread
@@@ -438,7 -430,6 +438,7 @@@ Table 1-5: Kernel info in /pro
   modules     List of loaded modules                            
   mounts      Mounted filesystems                               
   net         Networking info (see text)                        
 + pagetypeinfo Additional page allocator information (see text)  (2.5)
   partitions  Table of partitions known to the system           
   pci       Deprecated info of PCI bus (new way -> /proc/bus/pci/,
               decoupled by lspci                                       (2.4)
@@@ -593,7 -584,7 +593,7 @@@ Node 0, zone      DMA      0      
  Node 0, zone   Normal      1      0      0      1    101      8 ...
  Node 0, zone  HighMem      2      0      0      1      1      0 ...
  
 -Memory fragmentation is a problem under some workloads, and buddyinfo is a 
 +External fragmentation is a problem under some workloads, and buddyinfo is a
  useful tool for helping diagnose these problems.  Buddyinfo will give you a 
  clue as to how big an area you can safely allocate, or why a previous
  allocation failed.
@@@ -603,48 -594,6 +603,48 @@@ available.  In this case, there are 0 c
  ZONE_DMA, 4 chunks of 2^1*PAGE_SIZE in ZONE_DMA, 101 chunks of 2^4*PAGE_SIZE 
  available in ZONE_NORMAL, etc... 
  
 +More information relevant to external fragmentation can be found in
 +pagetypeinfo.
 +
 +> cat /proc/pagetypeinfo
 +Page block order: 9
 +Pages per block:  512
 +
 +Free pages count per migrate type at order       0      1      2      3      4      5      6      7      8      9     10
 +Node    0, zone      DMA, type    Unmovable      0      0      0      1      1      1      1      1      1      1      0
 +Node    0, zone      DMA, type  Reclaimable      0      0      0      0      0      0      0      0      0      0      0
 +Node    0, zone      DMA, type      Movable      1      1      2      1      2      1      1      0      1      0      2
 +Node    0, zone      DMA, type      Reserve      0      0      0      0      0      0      0      0      0      1      0
 +Node    0, zone      DMA, type      Isolate      0      0      0      0      0      0      0      0      0      0      0
 +Node    0, zone    DMA32, type    Unmovable    103     54     77      1      1      1     11      8      7      1      9
 +Node    0, zone    DMA32, type  Reclaimable      0      0      2      1      0      0      0      0      1      0      0
 +Node    0, zone    DMA32, type      Movable    169    152    113     91     77     54     39     13      6      1    452
 +Node    0, zone    DMA32, type      Reserve      1      2      2      2      2      0      1      1      1      1      0
 +Node    0, zone    DMA32, type      Isolate      0      0      0      0      0      0      0      0      0      0      0
 +
 +Number of blocks type     Unmovable  Reclaimable      Movable      Reserve      Isolate
 +Node 0, zone      DMA            2            0            5            1            0
 +Node 0, zone    DMA32           41            6          967            2            0
 +
 +Fragmentation avoidance in the kernel works by grouping pages of different
 +migrate types into the same contiguous regions of memory called page blocks.
 +A page block is typically the size of the default hugepage size e.g. 2MB on
 +X86-64. By keeping pages grouped based on their ability to move, the kernel
 +can reclaim pages within a page block to satisfy a high-order allocation.
 +
 +The pagetypinfo begins with information on the size of a page block. It
 +then gives the same type of information as buddyinfo except broken down
 +by migrate-type and finishes with details on how many page blocks of each
 +type exist.
 +
 +If min_free_kbytes has been tuned correctly (recommendations made by hugeadm
 +from libhugetlbfs http://sourceforge.net/projects/libhugetlbfs/), one can
 +make an estimate of the likely number of huge pages that can be allocated
 +at a given point in time. All the "Movable" blocks should be allocatable
 +unless memory has been mlock()'d. Some of the Reclaimable blocks should
 +also be allocatable although a lot of filesystem metadata may have to be
 +reclaimed to achieve this.
 +
  ..............................................................................
  
  meminfo:
index ab00eeddecafc0e5cadfa0e6afa2bc758ccf53b9,8602e15212d6bae8ef20af83b344edf200164ee8..55b859b3bc723267db79d1b274bf820c37c7469a
@@@ -224,12 -224,6 +224,12 @@@ defined in include/linux/pm.h
        RPM_SUSPENDED, which means that each device is initially regarded by the
        PM core as 'suspended', regardless of its real hardware status
  
 +  unsigned int runtime_auto;
 +    - if set, indicates that the user space has allowed the device driver to
 +      power manage the device at run time via the /sys/devices/.../power/control
 +      interface; it may only be modified with the help of the pm_runtime_allow()
 +      and pm_runtime_forbid() helper functions
 +
  All of the above fields are members of the 'power' member of 'struct device'.
  
  4. Run-time PM Device Helper Functions
@@@ -256,7 -250,7 +256,7 @@@ drivers/base/power/runtime.c and includ
        to suspend the device again in future
  
    int pm_runtime_resume(struct device *dev);
-     - execute the subsystem-leve resume callback for the device; returns 0 on
+     - execute the subsystem-level resume callback for the device; returns 0 on
        success, 1 if the device's run-time PM status was already 'active' or
        error code on failure, where -EAGAIN means it may be safe to attempt to
        resume the device again in future, but 'power.runtime_error' should be
        'power.runtime_error' is set or 'power.disable_depth' is greater than
        zero)
  
 +  bool pm_runtime_suspended(struct device *dev);
 +    - return true if the device's runtime PM status is 'suspended', or false
 +      otherwise
 +
 +  void pm_runtime_allow(struct device *dev);
 +    - set the power.runtime_auto flag for the device and decrease its usage
 +      counter (used by the /sys/devices/.../power/control interface to
 +      effectively allow the device to be power managed at run time)
 +
 +  void pm_runtime_forbid(struct device *dev);
 +    - unset the power.runtime_auto flag for the device and increase its usage
 +      counter (used by the /sys/devices/.../power/control interface to
 +      effectively prevent the device from being power managed at run time)
 +
  It is safe to execute the following helper functions from interrupt context:
  
  pm_request_idle()
@@@ -402,18 -382,6 +402,18 @@@ may be desirable to suspend the device 
  finished, so the PM core uses pm_runtime_idle_sync() to invoke the
  subsystem-level idle callback for the device at that time.
  
 +The user space can effectively disallow the driver of the device to power manage
 +it at run time by changing the value of its /sys/devices/.../power/control
 +attribute to "on", which causes pm_runtime_forbid() to be called.  In principle,
 +this mechanism may also be used by the driver to effectively turn off the
 +run-time power management of the device until the user space turns it on.
 +Namely, during the initialization the driver can make sure that the run-time PM
 +status of the device is 'active' and call pm_runtime_forbid().  It should be
 +noted, however, that if the user space has already intentionally changed the
 +value of /sys/devices/.../power/control to "auto" to allow the driver to power
 +manage the device at run time, the driver may confuse it by using
 +pm_runtime_forbid() this way.
 +
  6. Run-time PM and System Sleep
  
  Run-time PM and system sleep (i.e., system suspend and hibernation, also known
@@@ -463,64 -431,3 +463,64 @@@ The PM core always increments the run-t
  ->prepare() callback and decrements it after calling the ->complete() callback.
  Hence disabling run-time PM temporarily like this will not cause any run-time
  suspend callbacks to be lost.
 +
 +7. Generic subsystem callbacks
 +
 +Subsystems may wish to conserve code space by using the set of generic power
 +management callbacks provided by the PM core, defined in
 +driver/base/power/generic_ops.c:
 +
 +  int pm_generic_runtime_idle(struct device *dev);
 +    - invoke the ->runtime_idle() callback provided by the driver of this
 +      device, if defined, and call pm_runtime_suspend() for this device if the
 +      return value is 0 or the callback is not defined
 +
 +  int pm_generic_runtime_suspend(struct device *dev);
 +    - invoke the ->runtime_suspend() callback provided by the driver of this
 +      device and return its result, or return -EINVAL if not defined
 +
 +  int pm_generic_runtime_resume(struct device *dev);
 +    - invoke the ->runtime_resume() callback provided by the driver of this
 +      device and return its result, or return -EINVAL if not defined
 +
 +  int pm_generic_suspend(struct device *dev);
 +    - if the device has not been suspended at run time, invoke the ->suspend()
 +      callback provided by its driver and return its result, or return 0 if not
 +      defined
 +
 +  int pm_generic_resume(struct device *dev);
 +    - invoke the ->resume() callback provided by the driver of this device and,
 +      if successful, change the device's runtime PM status to 'active'
 +
 +  int pm_generic_freeze(struct device *dev);
 +    - if the device has not been suspended at run time, invoke the ->freeze()
 +      callback provided by its driver and return its result, or return 0 if not
 +      defined
 +
 +  int pm_generic_thaw(struct device *dev);
 +    - if the device has not been suspended at run time, invoke the ->thaw()
 +      callback provided by its driver and return its result, or return 0 if not
 +      defined
 +
 +  int pm_generic_poweroff(struct device *dev);
 +    - if the device has not been suspended at run time, invoke the ->poweroff()
 +      callback provided by its driver and return its result, or return 0 if not
 +      defined
 +
 +  int pm_generic_restore(struct device *dev);
 +    - invoke the ->restore() callback provided by the driver of this device and,
 +      if successful, change the device's runtime PM status to 'active'
 +
 +These functions can be assigned to the ->runtime_idle(), ->runtime_suspend(),
 +->runtime_resume(), ->suspend(), ->resume(), ->freeze(), ->thaw(), ->poweroff(),
 +or ->restore() callback pointers in the subsystem-level dev_pm_ops structures.
 +
 +If a subsystem wishes to use all of them at the same time, it can simply assign
 +the GENERIC_SUBSYS_PM_OPS macro, defined in include/linux/pm.h, to its
 +dev_pm_ops structure pointer.
 +
 +Device drivers that wish to use the same function as a system suspend, freeze,
 +poweroff and run-time suspend callback, and similarly for system resume, thaw,
 +restore, and run-time resume, can achieve this with the help of the
 +UNIVERSAL_DEV_PM_OPS macro defined in include/linux/pm.h (possibly setting its
 +last argument to NULL).
diff --combined arch/arm/common/clkdev.c
index 446b696196e363858e66d12983b2bca21126d22d,6f29b5ccea4de49c7289a46883a9d30fa6d19966..6416d5b5020d221514b7d8d8eca6bbb3ac2be991
@@@ -32,7 -32,7 +32,7 @@@ static DEFINE_MUTEX(clocks_mutex)
   *  If an entry has a device ID, it must match
   *  If an entry has a connection ID, it must match
   * Then we take the most specific entry - with the following
-  * order of precidence: dev+con > dev only > con only.
+  * order of precedence: dev+con > dev only > con only.
   */
  static struct clk *clk_find(const char *dev_id, const char *con_id)
  {
@@@ -99,16 -99,6 +99,16 @@@ void clkdev_add(struct clk_lookup *cl
  }
  EXPORT_SYMBOL(clkdev_add);
  
 +void __init clkdev_add_table(struct clk_lookup *cl, size_t num)
 +{
 +      mutex_lock(&clocks_mutex);
 +      while (num--) {
 +              list_add_tail(&cl->node, &clocks);
 +              cl++;
 +      }
 +      mutex_unlock(&clocks_mutex);
 +}
 +
  #define MAX_DEV_ID    20
  #define MAX_CON_ID    16
  
index 39fdceac8414202a6dfa3327bc2f5fd535d3f0bb,44bdea13cc8c937b3ea8dca2dfe9c4e51dec479b..2312d197dfb7468a0bf22df2e69d8158597f1433
@@@ -1,5 -1,5 +1,5 @@@
  /*
-  * DaVinci I2C controller platfrom_device info
+  * DaVinci I2C controller platform_device info
   *
   * Author: Vladimir Barinov, MontaVista Software, Inc. <source@mvista.com>
   *
@@@ -16,8 -16,6 +16,8 @@@
  struct davinci_i2c_platform_data {
        unsigned int    bus_freq;       /* standard bus frequency (kHz) */
        unsigned int    bus_delay;      /* post-transaction delay (usec) */
 +      unsigned int    sda_pin;        /* GPIO pin ID to use for SDA */
 +      unsigned int    scl_pin;        /* GPIO pin ID to use for SCL */
  };
  
  /* for board setup code */
index 4386d2b4a785dcceaaecacb245024442312464d0,7390596328116b2972c1e3ceb6c232c283003d1f..4386d2b4a785dcceaaecacb245024442312464d0
mode 100755,100644..100644
@@@ -68,8 -68,8 +68,8 @@@ static struct ehci_hcd_omap_platform_da
  
  static void __init omap_sdp_map_io(void)
  {
 -      omap2_set_globals_343x();
 -      omap2_map_common_io();
 +      omap2_set_globals_36xx();
 +      omap34xx_map_common_io();
  }
  
  static struct omap_board_config_kernel sdp_config[] __initdata = {
index ca95d8d64136d903036ef069785b61810a82c54e,1e3dfb652acc08c22d3da12b5aa335c85ad51e89..ca95d8d64136d903036ef069785b61810a82c54e
mode 100755,100644..100644
@@@ -24,8 -24,7 +24,8 @@@
  #include <plat/common.h>
  #include <plat/usb.h>
  
 -#include "mmc-twl4030.h"
 +#include "mux.h"
 +#include "hsmmc.h"
  
  /* Zoom2 has Qwerty keyboard*/
  static int board_keymap[] = {
@@@ -151,7 -150,7 +151,7 @@@ static struct regulator_init_data zoom_
        .consumer_supplies      = &zoom_vsim_supply,
  };
  
 -static struct twl4030_hsmmc_info mmc[] __initdata = {
 +static struct omap2_hsmmc_info mmc[] __initdata = {
        {
                .name           = "external",
                .mmc            = 1,
@@@ -176,7 -175,7 +176,7 @@@ static int zoom_twl_gpio_setup(struct d
  {
        /* gpio + 0 is "mmc0_cd" (input/IRQ) */
        mmc[0].gpio_cd = gpio + 0;
 -      twl4030_mmc_init(mmc);
 +      omap2_hsmmc_init(mmc);
  
        /* link regulators to MMC adapters ... we "know" the
         * regulators will be set up only *after* we return.
@@@ -264,23 -263,9 +264,23 @@@ static int __init omap_i2c_init(void
        return 0;
  }
  
 +static struct omap_musb_board_data musb_board_data = {
 +      .interface_type         = MUSB_INTERFACE_ULPI,
 +      .mode                   = MUSB_OTG,
 +      .power                  = 100,
 +};
 +
 +static void enable_board_wakeup_source(void)
 +{
 +      /* T2 interrupt line (keypad) */
 +      omap_mux_init_signal("sys_nirq",
 +              OMAP_WAKEUP_EN | OMAP_PIN_INPUT_PULLUP);
 +}
 +
  void __init zoom_peripherals_init(void)
  {
        omap_i2c_init();
        omap_serial_init();
 -      usb_musb_init();
 +      usb_musb_init(&musb_board_data);
 +      enable_board_wakeup_source();
  }
index 01b50313914cf99fd8738d288495eb18bc2919c9,d0cb5e94077619aa6ce5dff6c39edebdafb598ba..5f34eb674d68ec4ebbc3a763d01b97e10285b844
@@@ -3,7 -3,7 +3,7 @@@
   * arch/arm/mach-u300/core.c
   *
   *
 - * Copyright (C) 2007-2009 ST-Ericsson AB
 + * Copyright (C) 2007-2010 ST-Ericsson AB
   * License terms: GNU General Public License (GPL) version 2
   * Core platform support, IRQ handling and device definitions.
   * Author: Linus Walleij <linus.walleij@stericsson.com>
@@@ -19,7 -19,6 +19,7 @@@
  #include <linux/amba/bus.h>
  #include <linux/platform_device.h>
  #include <linux/gpio.h>
 +#include <mach/coh901318.h>
  
  #include <asm/types.h>
  #include <asm/setup.h>
@@@ -30,7 -29,6 +30,7 @@@
  
  #include <mach/hardware.h>
  #include <mach/syscon.h>
 +#include <mach/dma_channels.h>
  
  #include "clock.h"
  #include "mmc.h"
@@@ -358,7 -356,7 +358,7 @@@ static struct resource ave_resources[] 
        /*
         * The AVE3e requires two regions of 256MB that it considers
         * "invisible". The hardware will not be able to access these
-        * adresses, so they should never point to system RAM.
+        * addresses, so they should never point to system RAM.
         */
        {
                .name  = "AVE3e Reserved 0",
        },
  };
  
 +static struct resource dma_resource[] = {
 +      {
 +              .start = U300_DMAC_BASE,
 +              .end = U300_DMAC_BASE + PAGE_SIZE - 1,
 +              .flags =  IORESOURCE_MEM,
 +      },
 +      {
 +              .start = IRQ_U300_DMA,
 +              .end = IRQ_U300_DMA,
 +              .flags =  IORESOURCE_IRQ,
 +      }
 +};
 +
 +#ifdef CONFIG_MACH_U300_BS335
 +/* points out all dma slave channels.
 + * Syntax is [A1, B1, A2, B2, .... ,-1,-1]
 + * Select all channels from A to B, end of list is marked with -1,-1
 + */
 +static int dma_slave_channels[] = {
 +      U300_DMA_MSL_TX_0, U300_DMA_SPI_RX,
 +      U300_DMA_UART1_TX, U300_DMA_UART1_RX, -1, -1};
 +
 +/* points out all dma memcpy channels. */
 +static int dma_memcpy_channels[] = {
 +      U300_DMA_GENERAL_PURPOSE_0, U300_DMA_GENERAL_PURPOSE_8, -1, -1};
 +
 +#else /* CONFIG_MACH_U300_BS335 */
 +
 +static int dma_slave_channels[] = {U300_DMA_MSL_TX_0, U300_DMA_SPI_RX, -1, -1};
 +static int dma_memcpy_channels[] = {
 +      U300_DMA_GENERAL_PURPOSE_0, U300_DMA_GENERAL_PURPOSE_10, -1, -1};
 +
 +#endif
 +
 +/** register dma for memory access
 + *
 + * active  1 means dma intends to access memory
 + *         0 means dma wont access memory
 + */
 +static void coh901318_access_memory_state(struct device *dev, bool active)
 +{
 +}
 +
 +#define flags_memcpy_config (COH901318_CX_CFG_CH_DISABLE | \
 +                      COH901318_CX_CFG_RM_MEMORY_TO_MEMORY | \
 +                      COH901318_CX_CFG_LCR_DISABLE | \
 +                      COH901318_CX_CFG_TC_IRQ_ENABLE | \
 +                      COH901318_CX_CFG_BE_IRQ_ENABLE)
 +#define flags_memcpy_lli_chained (COH901318_CX_CTRL_TC_ENABLE | \
 +                      COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \
 +                      COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \
 +                      COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \
 +                      COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \
 +                      COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \
 +                      COH901318_CX_CTRL_MASTER_MODE_M1RW | \
 +                      COH901318_CX_CTRL_TCP_DISABLE | \
 +                      COH901318_CX_CTRL_TC_IRQ_DISABLE | \
 +                      COH901318_CX_CTRL_HSP_DISABLE | \
 +                      COH901318_CX_CTRL_HSS_DISABLE | \
 +                      COH901318_CX_CTRL_DDMA_LEGACY | \
 +                      COH901318_CX_CTRL_PRDD_SOURCE)
 +#define flags_memcpy_lli (COH901318_CX_CTRL_TC_ENABLE | \
 +                      COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \
 +                      COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \
 +                      COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \
 +                      COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \
 +                      COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \
 +                      COH901318_CX_CTRL_MASTER_MODE_M1RW | \
 +                      COH901318_CX_CTRL_TCP_DISABLE | \
 +                      COH901318_CX_CTRL_TC_IRQ_DISABLE | \
 +                      COH901318_CX_CTRL_HSP_DISABLE | \
 +                      COH901318_CX_CTRL_HSS_DISABLE | \
 +                      COH901318_CX_CTRL_DDMA_LEGACY | \
 +                      COH901318_CX_CTRL_PRDD_SOURCE)
 +#define flags_memcpy_lli_last (COH901318_CX_CTRL_TC_ENABLE | \
 +                      COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \
 +                      COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \
 +                      COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \
 +                      COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \
 +                      COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \
 +                      COH901318_CX_CTRL_MASTER_MODE_M1RW | \
 +                      COH901318_CX_CTRL_TCP_DISABLE | \
 +                      COH901318_CX_CTRL_TC_IRQ_ENABLE | \
 +                      COH901318_CX_CTRL_HSP_DISABLE | \
 +                      COH901318_CX_CTRL_HSS_DISABLE | \
 +                      COH901318_CX_CTRL_DDMA_LEGACY | \
 +                      COH901318_CX_CTRL_PRDD_SOURCE)
 +
 +const struct coh_dma_channel chan_config[U300_DMA_CHANNELS] = {
 +      {
 +              .number = U300_DMA_MSL_TX_0,
 +              .name = "MSL TX 0",
 +              .priority_high = 0,
 +              .dev_addr = U300_MSL_BASE + 0 * 0x40 + 0x20,
 +      },
 +      {
 +              .number = U300_DMA_MSL_TX_1,
 +              .name = "MSL TX 1",
 +              .priority_high = 0,
 +              .dev_addr = U300_MSL_BASE + 1 * 0x40 + 0x20,
 +              .param.config = COH901318_CX_CFG_CH_DISABLE |
 +                              COH901318_CX_CFG_LCR_DISABLE |
 +                              COH901318_CX_CFG_TC_IRQ_ENABLE |
 +                              COH901318_CX_CFG_BE_IRQ_ENABLE,
 +              .param.ctrl_lli_chained = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
 +                              COH901318_CX_CTRL_TCP_DISABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_DISABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_LEGACY |
 +                              COH901318_CX_CTRL_PRDD_SOURCE,
 +              .param.ctrl_lli = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
 +                              COH901318_CX_CTRL_TCP_ENABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_DISABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_LEGACY |
 +                              COH901318_CX_CTRL_PRDD_SOURCE,
 +              .param.ctrl_lli_last = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
 +                              COH901318_CX_CTRL_TCP_ENABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_ENABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_LEGACY |
 +                              COH901318_CX_CTRL_PRDD_SOURCE,
 +      },
 +      {
 +              .number = U300_DMA_MSL_TX_2,
 +              .name = "MSL TX 2",
 +              .priority_high = 0,
 +              .dev_addr = U300_MSL_BASE + 2 * 0x40 + 0x20,
 +              .param.config = COH901318_CX_CFG_CH_DISABLE |
 +                              COH901318_CX_CFG_LCR_DISABLE |
 +                              COH901318_CX_CFG_TC_IRQ_ENABLE |
 +                              COH901318_CX_CFG_BE_IRQ_ENABLE,
 +              .param.ctrl_lli_chained = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
 +                              COH901318_CX_CTRL_TCP_DISABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_DISABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_LEGACY |
 +                              COH901318_CX_CTRL_PRDD_SOURCE,
 +              .param.ctrl_lli = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
 +                              COH901318_CX_CTRL_TCP_ENABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_DISABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_LEGACY |
 +                              COH901318_CX_CTRL_PRDD_SOURCE,
 +              .param.ctrl_lli_last = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
 +                              COH901318_CX_CTRL_TCP_ENABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_ENABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_LEGACY |
 +                              COH901318_CX_CTRL_PRDD_SOURCE,
 +              .desc_nbr_max = 10,
 +      },
 +      {
 +              .number = U300_DMA_MSL_TX_3,
 +              .name = "MSL TX 3",
 +              .priority_high = 0,
 +              .dev_addr = U300_MSL_BASE + 3 * 0x40 + 0x20,
 +              .param.config = COH901318_CX_CFG_CH_DISABLE |
 +                              COH901318_CX_CFG_LCR_DISABLE |
 +                              COH901318_CX_CFG_TC_IRQ_ENABLE |
 +                              COH901318_CX_CFG_BE_IRQ_ENABLE,
 +              .param.ctrl_lli_chained = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
 +                              COH901318_CX_CTRL_TCP_DISABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_DISABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_LEGACY |
 +                              COH901318_CX_CTRL_PRDD_SOURCE,
 +              .param.ctrl_lli = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
 +                              COH901318_CX_CTRL_TCP_ENABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_DISABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_LEGACY |
 +                              COH901318_CX_CTRL_PRDD_SOURCE,
 +              .param.ctrl_lli_last = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
 +                              COH901318_CX_CTRL_TCP_ENABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_ENABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_LEGACY |
 +                              COH901318_CX_CTRL_PRDD_SOURCE,
 +      },
 +      {
 +              .number = U300_DMA_MSL_TX_4,
 +              .name = "MSL TX 4",
 +              .priority_high = 0,
 +              .dev_addr = U300_MSL_BASE + 4 * 0x40 + 0x20,
 +              .param.config = COH901318_CX_CFG_CH_DISABLE |
 +                              COH901318_CX_CFG_LCR_DISABLE |
 +                              COH901318_CX_CFG_TC_IRQ_ENABLE |
 +                              COH901318_CX_CFG_BE_IRQ_ENABLE,
 +              .param.ctrl_lli_chained = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
 +                              COH901318_CX_CTRL_TCP_DISABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_DISABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_LEGACY |
 +                              COH901318_CX_CTRL_PRDD_SOURCE,
 +              .param.ctrl_lli = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
 +                              COH901318_CX_CTRL_TCP_ENABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_DISABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_LEGACY |
 +                              COH901318_CX_CTRL_PRDD_SOURCE,
 +              .param.ctrl_lli_last = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
 +                              COH901318_CX_CTRL_TCP_ENABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_ENABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_LEGACY |
 +                              COH901318_CX_CTRL_PRDD_SOURCE,
 +      },
 +      {
 +              .number = U300_DMA_MSL_TX_5,
 +              .name = "MSL TX 5",
 +              .priority_high = 0,
 +              .dev_addr = U300_MSL_BASE + 5 * 0x40 + 0x20,
 +      },
 +      {
 +              .number = U300_DMA_MSL_TX_6,
 +              .name = "MSL TX 6",
 +              .priority_high = 0,
 +              .dev_addr = U300_MSL_BASE + 6 * 0x40 + 0x20,
 +      },
 +      {
 +              .number = U300_DMA_MSL_RX_0,
 +              .name = "MSL RX 0",
 +              .priority_high = 0,
 +              .dev_addr = U300_MSL_BASE + 0 * 0x40 + 0x220,
 +      },
 +      {
 +              .number = U300_DMA_MSL_RX_1,
 +              .name = "MSL RX 1",
 +              .priority_high = 0,
 +              .dev_addr = U300_MSL_BASE + 1 * 0x40 + 0x220,
 +              .param.config = COH901318_CX_CFG_CH_DISABLE |
 +                              COH901318_CX_CFG_LCR_DISABLE |
 +                              COH901318_CX_CFG_TC_IRQ_ENABLE |
 +                              COH901318_CX_CFG_BE_IRQ_ENABLE,
 +              .param.ctrl_lli_chained = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
 +                              COH901318_CX_CTRL_TCP_DISABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_DISABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
 +                              COH901318_CX_CTRL_PRDD_DEST,
 +              .param.ctrl_lli = 0,
 +              .param.ctrl_lli_last = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
 +                              COH901318_CX_CTRL_TCP_DISABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_ENABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
 +                              COH901318_CX_CTRL_PRDD_DEST,
 +      },
 +      {
 +              .number = U300_DMA_MSL_RX_2,
 +              .name = "MSL RX 2",
 +              .priority_high = 0,
 +              .dev_addr = U300_MSL_BASE + 2 * 0x40 + 0x220,
 +              .param.config = COH901318_CX_CFG_CH_DISABLE |
 +                              COH901318_CX_CFG_LCR_DISABLE |
 +                              COH901318_CX_CFG_TC_IRQ_ENABLE |
 +                              COH901318_CX_CFG_BE_IRQ_ENABLE,
 +              .param.ctrl_lli_chained = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
 +                              COH901318_CX_CTRL_TCP_DISABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_DISABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
 +                              COH901318_CX_CTRL_PRDD_DEST,
 +              .param.ctrl_lli = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
 +                              COH901318_CX_CTRL_TCP_DISABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_ENABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
 +                              COH901318_CX_CTRL_PRDD_DEST,
 +              .param.ctrl_lli_last = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
 +                              COH901318_CX_CTRL_TCP_DISABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_ENABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
 +                              COH901318_CX_CTRL_PRDD_DEST,
 +      },
 +      {
 +              .number = U300_DMA_MSL_RX_3,
 +              .name = "MSL RX 3",
 +              .priority_high = 0,
 +              .dev_addr = U300_MSL_BASE + 3 * 0x40 + 0x220,
 +              .param.config = COH901318_CX_CFG_CH_DISABLE |
 +                              COH901318_CX_CFG_LCR_DISABLE |
 +                              COH901318_CX_CFG_TC_IRQ_ENABLE |
 +                              COH901318_CX_CFG_BE_IRQ_ENABLE,
 +              .param.ctrl_lli_chained = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
 +                              COH901318_CX_CTRL_TCP_DISABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_DISABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
 +                              COH901318_CX_CTRL_PRDD_DEST,
 +              .param.ctrl_lli = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
 +                              COH901318_CX_CTRL_TCP_DISABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_ENABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
 +                              COH901318_CX_CTRL_PRDD_DEST,
 +              .param.ctrl_lli_last = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
 +                              COH901318_CX_CTRL_TCP_DISABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_ENABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
 +                              COH901318_CX_CTRL_PRDD_DEST,
 +      },
 +      {
 +              .number = U300_DMA_MSL_RX_4,
 +              .name = "MSL RX 4",
 +              .priority_high = 0,
 +              .dev_addr = U300_MSL_BASE + 4 * 0x40 + 0x220,
 +              .param.config = COH901318_CX_CFG_CH_DISABLE |
 +                              COH901318_CX_CFG_LCR_DISABLE |
 +                              COH901318_CX_CFG_TC_IRQ_ENABLE |
 +                              COH901318_CX_CFG_BE_IRQ_ENABLE,
 +              .param.ctrl_lli_chained = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
 +                              COH901318_CX_CTRL_TCP_DISABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_DISABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
 +                              COH901318_CX_CTRL_PRDD_DEST,
 +              .param.ctrl_lli = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
 +                              COH901318_CX_CTRL_TCP_DISABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_ENABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
 +                              COH901318_CX_CTRL_PRDD_DEST,
 +              .param.ctrl_lli_last = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
 +                              COH901318_CX_CTRL_TCP_DISABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_ENABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
 +                              COH901318_CX_CTRL_PRDD_DEST,
 +      },
 +      {
 +              .number = U300_DMA_MSL_RX_5,
 +              .name = "MSL RX 5",
 +              .priority_high = 0,
 +              .dev_addr = U300_MSL_BASE + 5 * 0x40 + 0x220,
 +              .param.config = COH901318_CX_CFG_CH_DISABLE |
 +                              COH901318_CX_CFG_LCR_DISABLE |
 +                              COH901318_CX_CFG_TC_IRQ_ENABLE |
 +                              COH901318_CX_CFG_BE_IRQ_ENABLE,
 +              .param.ctrl_lli_chained = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
 +                              COH901318_CX_CTRL_TCP_DISABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_DISABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
 +                              COH901318_CX_CTRL_PRDD_DEST,
 +              .param.ctrl_lli = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
 +                              COH901318_CX_CTRL_TCP_DISABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_ENABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
 +                              COH901318_CX_CTRL_PRDD_DEST,
 +              .param.ctrl_lli_last = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
 +                              COH901318_CX_CTRL_TCP_DISABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_ENABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
 +                              COH901318_CX_CTRL_PRDD_DEST,
 +      },
 +      {
 +              .number = U300_DMA_MSL_RX_6,
 +              .name = "MSL RX 6",
 +              .priority_high = 0,
 +              .dev_addr = U300_MSL_BASE + 6 * 0x40 + 0x220,
 +      },
 +      {
 +              .number = U300_DMA_MMCSD_RX_TX,
 +              .name = "MMCSD RX TX",
 +              .priority_high = 0,
 +              .dev_addr =  U300_MMCSD_BASE + 0x080,
 +              .param.config = COH901318_CX_CFG_CH_DISABLE |
 +                              COH901318_CX_CFG_LCR_DISABLE |
 +                              COH901318_CX_CFG_TC_IRQ_ENABLE |
 +                              COH901318_CX_CFG_BE_IRQ_ENABLE,
 +              .param.ctrl_lli_chained = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_MASTER_MODE_M1RW |
 +                              COH901318_CX_CTRL_TCP_ENABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_ENABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_LEGACY,
 +              .param.ctrl_lli = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_MASTER_MODE_M1RW |
 +                              COH901318_CX_CTRL_TCP_ENABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_ENABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_LEGACY,
 +              .param.ctrl_lli_last = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_MASTER_MODE_M1RW |
 +                              COH901318_CX_CTRL_TCP_DISABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_ENABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_LEGACY,
 +
 +      },
 +      {
 +              .number = U300_DMA_MSPRO_TX,
 +              .name = "MSPRO TX",
 +              .priority_high = 0,
 +      },
 +      {
 +              .number = U300_DMA_MSPRO_RX,
 +              .name = "MSPRO RX",
 +              .priority_high = 0,
 +      },
 +      {
 +              .number = U300_DMA_UART0_TX,
 +              .name = "UART0 TX",
 +              .priority_high = 0,
 +      },
 +      {
 +              .number = U300_DMA_UART0_RX,
 +              .name = "UART0 RX",
 +              .priority_high = 0,
 +      },
 +      {
 +              .number = U300_DMA_APEX_TX,
 +              .name = "APEX TX",
 +              .priority_high = 0,
 +      },
 +      {
 +              .number = U300_DMA_APEX_RX,
 +              .name = "APEX RX",
 +              .priority_high = 0,
 +      },
 +      {
 +              .number = U300_DMA_PCM_I2S0_TX,
 +              .name = "PCM I2S0 TX",
 +              .priority_high = 1,
 +              .dev_addr = U300_PCM_I2S0_BASE + 0x14,
 +              .param.config = COH901318_CX_CFG_CH_DISABLE |
 +                              COH901318_CX_CFG_LCR_DISABLE |
 +                              COH901318_CX_CFG_TC_IRQ_ENABLE |
 +                              COH901318_CX_CFG_BE_IRQ_ENABLE,
 +              .param.ctrl_lli_chained = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M1RW |
 +                              COH901318_CX_CTRL_TCP_DISABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_DISABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_LEGACY |
 +                              COH901318_CX_CTRL_PRDD_SOURCE,
 +              .param.ctrl_lli = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M1RW |
 +                              COH901318_CX_CTRL_TCP_ENABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_DISABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_LEGACY |
 +                              COH901318_CX_CTRL_PRDD_SOURCE,
 +              .param.ctrl_lli_last = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M1RW |
 +                              COH901318_CX_CTRL_TCP_ENABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_ENABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_LEGACY |
 +                              COH901318_CX_CTRL_PRDD_SOURCE,
 +      },
 +      {
 +              .number = U300_DMA_PCM_I2S0_RX,
 +              .name = "PCM I2S0 RX",
 +              .priority_high = 1,
 +              .dev_addr = U300_PCM_I2S0_BASE + 0x10,
 +              .param.config = COH901318_CX_CFG_CH_DISABLE |
 +                              COH901318_CX_CFG_LCR_DISABLE |
 +                              COH901318_CX_CFG_TC_IRQ_ENABLE |
 +                              COH901318_CX_CFG_BE_IRQ_ENABLE,
 +              .param.ctrl_lli_chained = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M1RW |
 +                              COH901318_CX_CTRL_TCP_DISABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_DISABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_LEGACY |
 +                              COH901318_CX_CTRL_PRDD_DEST,
 +              .param.ctrl_lli = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M1RW |
 +                              COH901318_CX_CTRL_TCP_ENABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_DISABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_LEGACY |
 +                              COH901318_CX_CTRL_PRDD_DEST,
 +              .param.ctrl_lli_last = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M1RW |
 +                              COH901318_CX_CTRL_TCP_ENABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_ENABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_LEGACY |
 +                              COH901318_CX_CTRL_PRDD_DEST,
 +      },
 +      {
 +              .number = U300_DMA_PCM_I2S1_TX,
 +              .name = "PCM I2S1 TX",
 +              .priority_high = 1,
 +              .dev_addr =  U300_PCM_I2S1_BASE + 0x14,
 +              .param.config = COH901318_CX_CFG_CH_DISABLE |
 +                              COH901318_CX_CFG_LCR_DISABLE |
 +                              COH901318_CX_CFG_TC_IRQ_ENABLE |
 +                              COH901318_CX_CFG_BE_IRQ_ENABLE,
 +              .param.ctrl_lli_chained = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M1RW |
 +                              COH901318_CX_CTRL_TCP_DISABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_DISABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_LEGACY |
 +                              COH901318_CX_CTRL_PRDD_SOURCE,
 +              .param.ctrl_lli = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M1RW |
 +                              COH901318_CX_CTRL_TCP_ENABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_DISABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_LEGACY |
 +                              COH901318_CX_CTRL_PRDD_SOURCE,
 +              .param.ctrl_lli_last = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M1RW |
 +                              COH901318_CX_CTRL_TCP_ENABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_ENABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_LEGACY |
 +                              COH901318_CX_CTRL_PRDD_SOURCE,
 +      },
 +      {
 +              .number = U300_DMA_PCM_I2S1_RX,
 +              .name = "PCM I2S1 RX",
 +              .priority_high = 1,
 +              .dev_addr = U300_PCM_I2S1_BASE + 0x10,
 +              .param.config = COH901318_CX_CFG_CH_DISABLE |
 +                              COH901318_CX_CFG_LCR_DISABLE |
 +                              COH901318_CX_CFG_TC_IRQ_ENABLE |
 +                              COH901318_CX_CFG_BE_IRQ_ENABLE,
 +              .param.ctrl_lli_chained = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M1RW |
 +                              COH901318_CX_CTRL_TCP_DISABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_DISABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_LEGACY |
 +                              COH901318_CX_CTRL_PRDD_DEST,
 +              .param.ctrl_lli = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M1RW |
 +                              COH901318_CX_CTRL_TCP_ENABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_DISABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_LEGACY |
 +                              COH901318_CX_CTRL_PRDD_DEST,
 +              .param.ctrl_lli_last = 0 |
 +                              COH901318_CX_CTRL_TC_ENABLE |
 +                              COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
 +                              COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
 +                              COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
 +                              COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
 +                              COH901318_CX_CTRL_MASTER_MODE_M1RW |
 +                              COH901318_CX_CTRL_TCP_ENABLE |
 +                              COH901318_CX_CTRL_TC_IRQ_ENABLE |
 +                              COH901318_CX_CTRL_HSP_ENABLE |
 +                              COH901318_CX_CTRL_HSS_DISABLE |
 +                              COH901318_CX_CTRL_DDMA_LEGACY |
 +                              COH901318_CX_CTRL_PRDD_DEST,
 +      },
 +      {
 +              .number = U300_DMA_XGAM_CDI,
 +              .name = "XGAM CDI",
 +              .priority_high = 0,
 +      },
 +      {
 +              .number = U300_DMA_XGAM_PDI,
 +              .name = "XGAM PDI",
 +              .priority_high = 0,
 +      },
 +      {
 +              .number = U300_DMA_SPI_TX,
 +              .name = "SPI TX",
 +              .priority_high = 0,
 +      },
 +      {
 +              .number = U300_DMA_SPI_RX,
 +              .name = "SPI RX",
 +              .priority_high = 0,
 +      },
 +      {
 +              .number = U300_DMA_GENERAL_PURPOSE_0,
 +              .name = "GENERAL 00",
 +              .priority_high = 0,
 +
 +              .param.config = flags_memcpy_config,
 +              .param.ctrl_lli_chained = flags_memcpy_lli_chained,
 +              .param.ctrl_lli = flags_memcpy_lli,
 +              .param.ctrl_lli_last = flags_memcpy_lli_last,
 +      },
 +      {
 +              .number = U300_DMA_GENERAL_PURPOSE_1,
 +              .name = "GENERAL 01",
 +              .priority_high = 0,
 +
 +              .param.config = flags_memcpy_config,
 +              .param.ctrl_lli_chained = flags_memcpy_lli_chained,
 +              .param.ctrl_lli = flags_memcpy_lli,
 +              .param.ctrl_lli_last = flags_memcpy_lli_last,
 +      },
 +      {
 +              .number = U300_DMA_GENERAL_PURPOSE_2,
 +              .name = "GENERAL 02",
 +              .priority_high = 0,
 +
 +              .param.config = flags_memcpy_config,
 +              .param.ctrl_lli_chained = flags_memcpy_lli_chained,
 +              .param.ctrl_lli = flags_memcpy_lli,
 +              .param.ctrl_lli_last = flags_memcpy_lli_last,
 +      },
 +      {
 +              .number = U300_DMA_GENERAL_PURPOSE_3,
 +              .name = "GENERAL 03",
 +              .priority_high = 0,
 +
 +              .param.config = flags_memcpy_config,
 +              .param.ctrl_lli_chained = flags_memcpy_lli_chained,
 +              .param.ctrl_lli = flags_memcpy_lli,
 +              .param.ctrl_lli_last = flags_memcpy_lli_last,
 +      },
 +      {
 +              .number = U300_DMA_GENERAL_PURPOSE_4,
 +              .name = "GENERAL 04",
 +              .priority_high = 0,
 +
 +              .param.config = flags_memcpy_config,
 +              .param.ctrl_lli_chained = flags_memcpy_lli_chained,
 +              .param.ctrl_lli = flags_memcpy_lli,
 +              .param.ctrl_lli_last = flags_memcpy_lli_last,
 +      },
 +      {
 +              .number = U300_DMA_GENERAL_PURPOSE_5,
 +              .name = "GENERAL 05",
 +              .priority_high = 0,
 +
 +              .param.config = flags_memcpy_config,
 +              .param.ctrl_lli_chained = flags_memcpy_lli_chained,
 +              .param.ctrl_lli = flags_memcpy_lli,
 +              .param.ctrl_lli_last = flags_memcpy_lli_last,
 +      },
 +      {
 +              .number = U300_DMA_GENERAL_PURPOSE_6,
 +              .name = "GENERAL 06",
 +              .priority_high = 0,
 +
 +              .param.config = flags_memcpy_config,
 +              .param.ctrl_lli_chained = flags_memcpy_lli_chained,
 +              .param.ctrl_lli = flags_memcpy_lli,
 +              .param.ctrl_lli_last = flags_memcpy_lli_last,
 +      },
 +      {
 +              .number = U300_DMA_GENERAL_PURPOSE_7,
 +              .name = "GENERAL 07",
 +              .priority_high = 0,
 +
 +              .param.config = flags_memcpy_config,
 +              .param.ctrl_lli_chained = flags_memcpy_lli_chained,
 +              .param.ctrl_lli = flags_memcpy_lli,
 +              .param.ctrl_lli_last = flags_memcpy_lli_last,
 +      },
 +      {
 +              .number = U300_DMA_GENERAL_PURPOSE_8,
 +              .name = "GENERAL 08",
 +              .priority_high = 0,
 +
 +              .param.config = flags_memcpy_config,
 +              .param.ctrl_lli_chained = flags_memcpy_lli_chained,
 +              .param.ctrl_lli = flags_memcpy_lli,
 +              .param.ctrl_lli_last = flags_memcpy_lli_last,
 +      },
 +#ifdef CONFIG_MACH_U300_BS335
 +      {
 +              .number = U300_DMA_UART1_TX,
 +              .name = "UART1 TX",
 +              .priority_high = 0,
 +      },
 +      {
 +              .number = U300_DMA_UART1_RX,
 +              .name = "UART1 RX",
 +              .priority_high = 0,
 +      }
 +#else
 +      {
 +              .number = U300_DMA_GENERAL_PURPOSE_9,
 +              .name = "GENERAL 09",
 +              .priority_high = 0,
 +
 +              .param.config = flags_memcpy_config,
 +              .param.ctrl_lli_chained = flags_memcpy_lli_chained,
 +              .param.ctrl_lli = flags_memcpy_lli,
 +              .param.ctrl_lli_last = flags_memcpy_lli_last,
 +      },
 +      {
 +              .number = U300_DMA_GENERAL_PURPOSE_10,
 +              .name = "GENERAL 10",
 +              .priority_high = 0,
 +
 +              .param.config = flags_memcpy_config,
 +              .param.ctrl_lli_chained = flags_memcpy_lli_chained,
 +              .param.ctrl_lli = flags_memcpy_lli,
 +              .param.ctrl_lli_last = flags_memcpy_lli_last,
 +      }
 +#endif
 +};
 +
 +
 +static struct coh901318_platform coh901318_platform = {
 +      .chans_slave = dma_slave_channels,
 +      .chans_memcpy = dma_memcpy_channels,
 +      .access_memory_state = coh901318_access_memory_state,
 +      .chan_conf = chan_config,
 +      .max_channels = U300_DMA_CHANNELS,
 +};
 +
  static struct platform_device wdog_device = {
 -      .name = "wdog",
 +      .name = "coh901327_wdog",
        .id = -1,
        .num_resources = ARRAY_SIZE(wdog_resources),
        .resource = wdog_resources,
@@@ -1441,23 -428,11 +1441,23 @@@ static struct platform_device ave_devic
        .resource = ave_resources,
  };
  
 +static struct platform_device dma_device = {
 +      .name           = "coh901318",
 +      .id             = -1,
 +      .resource       = dma_resource,
 +      .num_resources  = ARRAY_SIZE(dma_resource),
 +      .dev = {
 +              .platform_data = &coh901318_platform,
 +              .coherent_dma_mask = ~0,
 +      },
 +};
 +
  /*
   * Notice that AMBA devices are initialized before platform devices.
   *
   */
  static struct platform_device *platform_devs[] __initdata = {
 +      &dma_device,
        &i2c0_device,
        &i2c1_device,
        &keypad_device,
@@@ -1596,7 -571,7 +1596,7 @@@ static void __init u300_init_check_chip
  /*
   * Some devices and their resources require reserved physical memory from
   * the end of the available RAM. This function traverses the list of devices
-  * and assigns actual adresses to these.
+  * and assigns actual addresses to these.
   */
  static void __init u300_assign_physmem(void)
  {
index ca4a028c26613097811977bf2f2d14be592aee18,d591fe13ed130870069cd9af5569d8166e132315..92c12420256ffdc518d8a4102198e42365039032
@@@ -10,8 -10,8 +10,8 @@@
   */
  #include <mach/hardware.h>
  
 -      .macro  addruart,rx
 +      .macro  addruart, rx, tmp
-       /* If we move the adress using MMU, use this. */
+       /* If we move the address using MMU, use this. */
        mrc     p15, 0, \rx, c1, c0
        tst     \rx, #1                 @ MMU enabled?
        ldreq   \rx,      = U300_SLOW_PER_PHYS_BASE @ MMU off, physical address
index e456f062f2419deb5f2bfd9d0e968d88b1c79ac5,c6c6d9381126ddd3870fe895a60c252b87b441e2..d00dfc180021cb85a84a8565f48595545d9a6615
@@@ -71,7 -71,7 +71,7 @@@ EXPORT_SYMBOL(sn_rtc_cycles_per_second)
  DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
  EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
  
 -DEFINE_PER_CPU(short [MAX_COMPACT_NODES], __sn_cnodeid_to_nasid);
 +DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
  EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid);
  
  DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);
@@@ -241,7 -241,7 +241,7 @@@ static void __cpuinit sn_check_for_wars
   * Note:  This stuff is duped here because Altix requires the PCDP to
   * locate a usable VGA device due to lack of proper ACPI support.  Structures
   * could be used from drivers/firmware/pcdp.h, but it was decided that moving
-  * this file to a more public location just for Altix use was undesireable.
+  * this file to a more public location just for Altix use was undesirable.
   */
  
  struct hcdp_uart_desc {
diff --combined arch/s390/kernel/sclp.S
index 27af3bf3a0098722ca6d0bee52562a1abd27d812,27c1a2e236d1793b9820cb96baf0fffdf31a8fb0..2e82fdd89320d438fad923fabdbdd8398a9bf8be
@@@ -9,10 -9,8 +9,10 @@@
   */
  
  LC_EXT_NEW_PSW                = 0x58                  # addr of ext int handler
 +LC_EXT_NEW_PSW_64     = 0x1b0                 # addr of ext int handler 64 bit
  LC_EXT_INT_PARAM      = 0x80                  # addr of ext int parameter
  LC_EXT_INT_CODE               = 0x86                  # addr of ext int code
 +LC_AR_MODE_ID         = 0xa3
  
  #
  # Subroutine which waits synchronously until either an external interruption
@@@ -32,16 -30,8 +32,16 @@@ _sclp_wait_int
  .LbaseS1:
        ahi     %r15,-96                        # create stack frame
        la      %r8,LC_EXT_NEW_PSW              # register int handler
 -      mvc     .LoldpswS1-.LbaseS1(8,%r13),0(%r8)
 -      mvc     0(8,%r8),.LextpswS1-.LbaseS1(%r13)
 +      la      %r9,.LextpswS1-.LbaseS1(%r13)
 +#ifdef CONFIG_64BIT
 +      tm      LC_AR_MODE_ID,1
 +      jno     .Lesa1
 +      la      %r8,LC_EXT_NEW_PSW_64           # register int handler 64 bit
 +      la      %r9,.LextpswS1_64-.LbaseS1(%r13)
 +.Lesa1:
 +#endif
 +      mvc     .LoldpswS1-.LbaseS1(16,%r13),0(%r8)
 +      mvc     0(16,%r8),0(%r9)
        lhi     %r6,0x0200                      # cr mask for ext int (cr0.54)
        ltr     %r2,%r2
        jz      .LsetctS1
  .LtimeoutS1:
        lctl    %c0,%c0,.LctlS1-.LbaseS1(%r13)  # restore interrupt setting
        # restore old handler
 -      mvc     0(8,%r8),.LoldpswS1-.LbaseS1(%r13)
 +      mvc     0(16,%r8),.LoldpswS1-.LbaseS1(%r13)
        lm      %r6,%r15,120(%r15)              # restore registers
        br      %r14                            # return to caller
  
        .align  8
  .LoldpswS1:
 -      .long   0, 0                            # old ext int PSW
 +      .long   0, 0, 0, 0                      # old ext int PSW
  .LextpswS1:
        .long   0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int
 +#ifdef CONFIG_64BIT
 +.LextpswS1_64:
 +      .quad   0x0000000180000000, .LwaitS1    # PSW to handle ext int, 64 bit
 +#endif
  .LwaitpswS1:
        .long   0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int
  .LtimeS1:
@@@ -235,7 -221,7 +235,7 @@@ _sclp_print
        lh      %r9,0(%r8)                      # update sccb length
        ar      %r9,%r6
        sth     %r9,0(%r8)
-       ar      %r7,%r6                         # update current mto adress
+       ar      %r7,%r6                         # update current mto address
        ltr     %r0,%r0                         # more characters?
        jnz     .LinitmtoS4
        l       %r2,.LwritedataS4-.LbaseS4(%r13)# write data
  _sclp_print_early:
        stm     %r6,%r15,24(%r15)               # save registers
        ahi     %r15,-96                        # create stack frame
 +#ifdef CONFIG_64BIT
 +      tm      LC_AR_MODE_ID,1
 +      jno     .Lesa2
 +      ahi     %r15,-80
 +      stmh    %r6,%r15,96(%r15)               # store upper register halves
 +.Lesa2:
 +#endif
        lr      %r10,%r2                        # save string pointer
        lhi     %r2,0
        bras    %r14,_sclp_setup                # enable console
        lhi     %r2,1
        bras    %r14,_sclp_setup                # disable console
  .LendS5:
 +#ifdef CONFIG_64BIT
 +      tm      LC_AR_MODE_ID,1
 +      jno     .Lesa3
 +      lmh     %r6,%r15,96(%r15)               # store upper register halves
 +      ahi     %r15,80
 +.Lesa3:
 +#endif
        lm      %r6,%r15,120(%r15)              # restore registers
        br      %r14
  
index 9f2b2bac8b2be58f86486343037c36e457e8c5ed,a565ee5146eb43041c8fb23a9e93af2236282916..b867ab3353b490b6298b2c6cba2b444ac9918388
@@@ -980,10 -980,10 +980,10 @@@ static int collect_events(struct perf_e
        return n;
  }
  
 -static void event_sched_in(struct perf_event *event, int cpu)
 +static void event_sched_in(struct perf_event *event)
  {
        event->state = PERF_EVENT_STATE_ACTIVE;
 -      event->oncpu = cpu;
 +      event->oncpu = smp_processor_id();
        event->tstamp_running += event->ctx->time - event->tstamp_stopped;
        if (is_software_event(event))
                event->pmu->enable(event);
  
  int hw_perf_group_sched_in(struct perf_event *group_leader,
                           struct perf_cpu_context *cpuctx,
 -                         struct perf_event_context *ctx, int cpu)
 +                         struct perf_event_context *ctx)
  {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct perf_event *sub;
  
        cpuctx->active_oncpu += n;
        n = 1;
 -      event_sched_in(group_leader, cpu);
 +      event_sched_in(group_leader);
        list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
                if (sub->state != PERF_EVENT_STATE_OFF) {
 -                      event_sched_in(sub, cpu);
 +                      event_sched_in(sub);
                        n++;
                }
        }
@@@ -1353,7 -1353,7 +1353,7 @@@ static void perf_callchain_user_32(stru
  }
  
  /* Like powerpc we can't get PMU interrupts within the PMU handler,
-  * so no need for seperate NMI and IRQ chains as on x86.
+  * so no need for separate NMI and IRQ chains as on x86.
   */
  static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
  
index 1aa966c565f92dcccaebf83c55f5ee9c71ac928d,eec33a7d96a0f363c89bb9be88219aeccfea11a1..a4ac764a6880fdaa7b06fc984b5d1976b2b2ee0f
@@@ -38,7 -38,7 +38,7 @@@ int iommu_detected __read_mostly = 0
   * This variable becomes 1 if iommu=pt is passed on the kernel command line.
   * If this variable is 1, IOMMU implementations do no DMA translation for
   * devices and allow every device to access to whole physical memory. This is
-  * useful if a user want to use an IOMMU only for KVM device assignment to
+  * useful if a user wants to use an IOMMU only for KVM device assignment to
   * guests and not for driver dma translation.
   */
  int iommu_pass_through __read_mostly;
@@@ -65,7 -65,7 +65,7 @@@ int dma_set_mask(struct device *dev, u6
  }
  EXPORT_SYMBOL(dma_set_mask);
  
 -#ifdef CONFIG_X86_64
 +#if defined(CONFIG_X86_64) && !defined(CONFIG_NUMA)
  static __initdata void *dma32_bootmem_ptr;
  static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
  
@@@ -116,21 -116,14 +116,21 @@@ static void __init dma32_free_bootmem(v
        dma32_bootmem_ptr = NULL;
        dma32_bootmem_size = 0;
  }
 +#else
 +void __init dma32_reserve_bootmem(void)
 +{
 +}
 +static void __init dma32_free_bootmem(void)
 +{
 +}
 +
  #endif
  
  void __init pci_iommu_alloc(void)
  {
 -#ifdef CONFIG_X86_64
        /* free the range so iommu could get some range less than 4G */
        dma32_free_bootmem();
 -#endif
 +
        if (pci_swiotlb_detect())
                goto out;
  
diff --combined arch/x86/kernel/ptrace.c
index 2d96aab82a4887881e20cbc7336eafb732bace39,118428085ea28629389f6256d7caf76a8efea556..a503b1fd04e51048c25fca9b675add81cac37304
@@@ -48,7 -48,6 +48,7 @@@ enum x86_regset 
        REGSET_FP,
        REGSET_XFP,
        REGSET_IOPERM64 = REGSET_XFP,
 +      REGSET_XSTATE,
        REGSET_TLS,
        REGSET_IOPERM32,
  };
@@@ -141,6 -140,30 +141,6 @@@ static const int arg_offs_table[] = 
  #endif
  };
  
 -/**
 - * regs_get_argument_nth() - get Nth argument at function call
 - * @regs:     pt_regs which contains registers at function entry.
 - * @n:                argument number.
 - *
 - * regs_get_argument_nth() returns @n th argument of a function call.
 - * Since usually the kernel stack will be changed right after function entry,
 - * you must use this at function entry. If the @n th entry is NOT in the
 - * kernel stack or pt_regs, this returns 0.
 - */
 -unsigned long regs_get_argument_nth(struct pt_regs *regs, unsigned int n)
 -{
 -      if (n < ARRAY_SIZE(arg_offs_table))
 -              return *(unsigned long *)((char *)regs + arg_offs_table[n]);
 -      else {
 -              /*
 -               * The typical case: arg n is on the stack.
 -               * (Note: stack[0] = return address, so skip it)
 -               */
 -              n -= ARRAY_SIZE(arg_offs_table);
 -              return regs_get_kernel_stack_nth(regs, 1 + n);
 -      }
 -}
 -
  /*
   * does not yet catch signals sent when the child dies.
   * in exit.c or in signal.c.
@@@ -581,7 -604,7 +581,7 @@@ ptrace_modify_breakpoint(struct perf_ev
        struct perf_event_attr attr;
  
        /*
-        * We shoud have at least an inactive breakpoint at this
+        * We should have at least an inactive breakpoint at this
         * slot. It means the user is writing dr7 without having
         * written the address register first
         */
@@@ -679,7 -702,7 +679,7 @@@ static unsigned long ptrace_get_debugre
        } else if (n == 6) {
                val = thread->debugreg6;
         } else if (n == 7) {
 -              val = ptrace_get_dr7(thread->ptrace_bps);
 +              val = thread->ptrace_dr7;
        }
        return val;
  }
@@@ -755,11 -778,8 +755,11 @@@ int ptrace_set_debugreg(struct task_str
                        return rc;
        }
        /* All that's left is DR7 */
 -      if (n == 7)
 +      if (n == 7) {
                rc = ptrace_write_dr7(tsk, val);
 +              if (!rc)
 +                      thread->ptrace_dr7 = val;
 +      }
  
  ret_path:
        return rc;
@@@ -1564,7 -1584,7 +1564,7 @@@ long compat_arch_ptrace(struct task_str
  
  #ifdef CONFIG_X86_64
  
 -static const struct user_regset x86_64_regsets[] = {
 +static struct user_regset x86_64_regsets[] __read_mostly = {
        [REGSET_GENERAL] = {
                .core_note_type = NT_PRSTATUS,
                .n = sizeof(struct user_regs_struct) / sizeof(long),
                .size = sizeof(long), .align = sizeof(long),
                .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
        },
 +      [REGSET_XSTATE] = {
 +              .core_note_type = NT_X86_XSTATE,
 +              .size = sizeof(u64), .align = sizeof(u64),
 +              .active = xstateregs_active, .get = xstateregs_get,
 +              .set = xstateregs_set
 +      },
        [REGSET_IOPERM64] = {
                .core_note_type = NT_386_IOPERM,
                .n = IO_BITMAP_LONGS,
@@@ -1608,7 -1622,7 +1608,7 @@@ static const struct user_regset_view us
  #endif        /* CONFIG_X86_64 */
  
  #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
 -static const struct user_regset x86_32_regsets[] = {
 +static struct user_regset x86_32_regsets[] __read_mostly = {
        [REGSET_GENERAL] = {
                .core_note_type = NT_PRSTATUS,
                .n = sizeof(struct user_regs_struct32) / sizeof(u32),
                .size = sizeof(u32), .align = sizeof(u32),
                .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
        },
 +      [REGSET_XSTATE] = {
 +              .core_note_type = NT_X86_XSTATE,
 +              .size = sizeof(u64), .align = sizeof(u64),
 +              .active = xstateregs_active, .get = xstateregs_get,
 +              .set = xstateregs_set
 +      },
        [REGSET_TLS] = {
                .core_note_type = NT_386_TLS,
                .n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN,
@@@ -1655,23 -1663,6 +1655,23 @@@ static const struct user_regset_view us
  };
  #endif
  
 +/*
 + * This represents bytes 464..511 in the memory layout exported through
 + * the REGSET_XSTATE interface.
 + */
 +u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
 +
 +void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
 +{
 +#ifdef CONFIG_X86_64
 +      x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
 +#endif
 +#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
 +      x86_32_regsets[REGSET_XSTATE].n = size / sizeof(u64);
 +#endif
 +      xstate_fx_sw_bytes[USER_XSTATE_XCR0_WORD] = xstate_mask;
 +}
 +
  const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  {
  #ifdef CONFIG_IA32_EMULATION
diff --combined arch/x86/kernel/tsc.c
index 208a857c679f99a9dd7e1902f35b8b56d16a0fca,dec8f68e3eda6d204f33a115e631a2353a3fcf87..9faf91ae1841e3530f611b136ad058a0efbcfa67
@@@ -50,7 -50,7 +50,7 @@@ u64 native_sched_clock(void
         *   unstable. We do this because unlike Time Of Day,
         *   the scheduler clock tolerates small errors and it's
         *   very important for it to be as fast as the platform
-        *   can achive it. )
+        *   can achieve it. )
         */
        if (unlikely(tsc_disabled)) {
                /* No locking but a rare wrong value is not a big deal: */
@@@ -740,7 -740,7 +740,7 @@@ static cycle_t __vsyscall_fn vread_tsc(
  }
  #endif
  
 -static void resume_tsc(void)
 +static void resume_tsc(struct clocksource *cs)
  {
        clocksource_tsc.cycle_last = 0;
  }
@@@ -806,7 -806,7 +806,7 @@@ static void __init check_system_tsc_rel
        unsigned long res_low, res_high;
  
        rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
 -      /* Geode_LX - the OLPC CPU has a possibly a very reliable TSC */
 +      /* Geode_LX - the OLPC CPU has a very reliable TSC */
        if (res_low & RTSC_SUSP)
                tsc_clocksource_reliable = 1;
  #endif
index 2f1ca56142928af1a98e6a9358bd758cfd282513,25bbb9bfc312835bd870a625baf04732058f9a1f..5e1ff66ecd73f0e2a8e92062e3db76294926ac43
@@@ -79,7 -79,11 +79,7 @@@ unsigned long vmi_tsc_khz(void
  
  static inline unsigned int vmi_get_timer_vector(void)
  {
 -#ifdef CONFIG_X86_IO_APIC
 -      return FIRST_DEVICE_VECTOR;
 -#else
 -      return FIRST_EXTERNAL_VECTOR;
 -#endif
 +      return IRQ0_VECTOR;
  }
  
  /** vmi clockchip */
@@@ -167,7 -171,7 +167,7 @@@ static int vmi_timer_next_event(unsigne
  {
        /* Unfortunately, set_next_event interface only passes relative
         * expiry, but we want absolute expiry.  It'd be better if were
-        * were passed an aboslute expiry, since a bunch of time may
+        * were passed an absolute expiry, since a bunch of time may
         * have been stolen between the time the delta is computed and
         * when we set the alarm below. */
        cycle_t now = vmi_timer_ops.get_cycle_counter(vmi_counter(VMI_ONESHOT));
diff --combined crypto/Kconfig
index 6a2e295ee2277bc2c88c25c9bbe5a7114940fbb9,755ab90294da3ce6198b96bf874b77ffa6b88bef..403857ad06d4f6c229a63c4b703b391b2417a788
@@@ -114,16 -114,6 +114,16 @@@ config CRYPTO_NUL
        help
          These are 'Null' algorithms, used by IPsec, which do nothing.
  
 +config CRYPTO_PCRYPT
 +      tristate "Parallel crypto engine (EXPERIMENTAL)"
 +      depends on SMP && EXPERIMENTAL
 +      select PADATA
 +      select CRYPTO_MANAGER
 +      select CRYPTO_AEAD
 +      help
 +        This converts an arbitrary crypto algorithm into a parallel
 +        algorithm that executes in kernel threads.
 +
  config CRYPTO_WORKQUEUE
         tristate
  
@@@ -826,8 -816,8 +826,8 @@@ config CRYPTO_ANSI_CPRN
        help
          This option enables the generic pseudo random number generator
          for cryptographic modules.  Uses the Algorithm specified in
-         ANSI X9.31 A.2.4. Not this option must be enabled if CRYPTO_FIPS 
-         is selected
+         ANSI X9.31 A.2.4. Note that this option must be enabled if
+         CRYPTO_FIPS is selected
  
  source "drivers/crypto/Kconfig"
  
diff --combined drivers/acpi/dock.c
index b2586f57e1f5fc1347757cbf24044acbac922a8d,d7f363f9435f5887b717cb391159aed287ae8d19..d9a85f1ddde6ca62ac1d529f4023df10b140714d
@@@ -605,7 -605,7 +605,7 @@@ register_hotplug_dock_device(acpi_handl
        list_for_each_entry(dock_station, &dock_stations, sibling) {
                /*
                 * An ATA bay can be in a dock and itself can be ejected
-                * seperately, so there are two 'dock stations' which need the
+                * separately, so there are two 'dock stations' which need the
                 * ops
                 */
                dd = find_dock_dependent_device(dock_station, handle);
@@@ -935,7 -935,6 +935,7 @@@ static int dock_add(acpi_handle handle
        struct platform_device *dd;
  
        id = dock_station_count;
 +      memset(&ds, 0, sizeof(ds));
        dd = platform_device_register_data(NULL, "dock", id, &ds, sizeof(ds));
        if (IS_ERR(dd))
                return PTR_ERR(dd);
index 9c77b0d1a9d0fe2e50e7c7a9409e241cc35aa499,698ef474767ed1ac227e9753c1a6792742539e9b..4a28420efff22a2059c50bbe6a15804d79c73dc1
@@@ -2232,7 -2232,7 +2232,7 @@@ retry
                 * Some drives were very specific about that exact sequence.
                 *
                 * Note that ATA4 says lba is mandatory so the second check
-                * shoud never trigger.
+                * should never trigger.
                 */
                if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
                        err_mask = ata_dev_init_params(dev, id[3], id[6]);
@@@ -3211,7 -3211,6 +3211,7 @@@ const struct ata_timing *ata_timing_fin
  int ata_timing_compute(struct ata_device *adev, unsigned short speed,
                       struct ata_timing *t, int T, int UT)
  {
 +      const u16 *id = adev->id;
        const struct ata_timing *s;
        struct ata_timing p;
  
         * PIO/MW_DMA cycle timing.
         */
  
 -      if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
 +      if (id[ATA_ID_FIELD_VALID] & 2) {       /* EIDE drive */
                memset(&p, 0, sizeof(p));
 +
                if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
 -                      if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
 -                                          else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
 -              } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
 -                      p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
 -              }
 +                      if (speed <= XFER_PIO_2)
 +                              p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
 +                      else if ((speed <= XFER_PIO_4) ||
 +                               (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
 +                              p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
 +              } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
 +                      p.cycle = id[ATA_ID_EIDE_DMA_MIN];
 +
                ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
        }
  
diff --combined drivers/ata/libata-sff.c
index 02441fd57e9ef631afa04708423044569c463716,7f2c94a07c003099617e88a205bfd9073589c53b..561dec2481cb2da91bed31b037edc31e2301b57b
@@@ -893,9 -893,6 +893,9 @@@ static void ata_pio_sector(struct ata_q
                                       do_write);
        }
  
 +      if (!do_write)
 +              flush_dcache_page(page);
 +
        qc->curbytes += qc->sect_size;
        qc->cursg_ofs += qc->sect_size;
  
@@@ -1763,50 -1760,24 +1763,50 @@@ irqreturn_t ata_sff_interrupt(int irq, 
  {
        struct ata_host *host = dev_instance;
        unsigned int i;
 -      unsigned int handled = 0;
 +      unsigned int handled = 0, polling = 0;
        unsigned long flags;
  
        /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
        spin_lock_irqsave(&host->lock, flags);
  
        for (i = 0; i < host->n_ports; i++) {
 -              struct ata_port *ap;
 +              struct ata_port *ap = host->ports[i];
 +              struct ata_queued_cmd *qc;
  
 -              ap = host->ports[i];
 -              if (ap &&
 -                  !(ap->flags & ATA_FLAG_DISABLED)) {
 -                      struct ata_queued_cmd *qc;
 +              if (unlikely(ap->flags & ATA_FLAG_DISABLED))
 +                      continue;
  
 -                      qc = ata_qc_from_tag(ap, ap->link.active_tag);
 -                      if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
 -                          (qc->flags & ATA_QCFLAG_ACTIVE))
 +              qc = ata_qc_from_tag(ap, ap->link.active_tag);
 +              if (qc) {
 +                      if (!(qc->tf.flags & ATA_TFLAG_POLLING))
                                handled |= ata_sff_host_intr(ap, qc);
 +                      else
 +                              polling |= 1 << i;
 +              }
 +      }
 +
 +      /*
 +       * If no port was expecting IRQ but the controller is actually
 +       * asserting IRQ line, nobody cared will ensue.  Check IRQ
 +       * pending status if available and clear spurious IRQ.
 +       */
 +      if (!handled) {
 +              for (i = 0; i < host->n_ports; i++) {
 +                      struct ata_port *ap = host->ports[i];
 +
 +                      if (polling & (1 << i))
 +                              continue;
 +
 +                      if (!ap->ops->sff_irq_check ||
 +                          !ap->ops->sff_irq_check(ap))
 +                              continue;
 +
 +                      if (printk_ratelimit())
 +                              ata_port_printk(ap, KERN_INFO,
 +                                              "clearing spurious IRQ\n");
 +
 +                      ap->ops->sff_check_status(ap);
 +                      ap->ops->sff_irq_clear(ap);
                }
        }
  
@@@ -2287,7 -2258,7 +2287,7 @@@ EXPORT_SYMBOL_GPL(ata_sff_postreset)
   *    @qc: command
   *
   *    Drain the FIFO and device of any stuck data following a command
-  *    failing to complete. In some cases this is neccessary before a
+  *    failing to complete. In some cases this is necessary before a
   *    reset will recover the device.
   *
   */
@@@ -3037,7 -3008,6 +3037,7 @@@ EXPORT_SYMBOL_GPL(ata_pci_sff_activate_
   *    @ppi: array of port_info, must be enough for two ports
   *    @sht: scsi_host_template to use when registering the host
   *    @host_priv: host private_data
 + *    @hflag: host flags
   *
   *    This is a helper function which can be called from a driver's
   *    xxx_init_one() probe function if the hardware uses traditional
   *    Zero on success, negative on errno-based value on error.
   */
  int ata_pci_sff_init_one(struct pci_dev *pdev,
 -                       const struct ata_port_info * const *ppi,
 -                       struct scsi_host_template *sht, void *host_priv)
 +               const struct ata_port_info * const *ppi,
 +               struct scsi_host_template *sht, void *host_priv, int hflag)
  {
        struct device *dev = &pdev->dev;
        const struct ata_port_info *pi = NULL;
        if (rc)
                goto out;
        host->private_data = host_priv;
 +      host->flags |= hflag;
  
        pci_set_master(pdev);
        rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
diff --combined drivers/ata/pata_acpi.c
index 294f3020a78ad26e42773a6d0bb396d45ca7f13e,9e33da9565d97ed3dd94ccd39557d37920ef7a56..8e5e13210426d9effb029058453f2c2cb9ae7504
@@@ -161,7 -161,7 +161,7 @@@ static void pacpi_set_dmamode(struct at
   *
   *    Called when the libata layer is about to issue a command. We wrap
   *    this interface so that we can load the correct ATA timings if
-  *    neccessary.
+  *    necessary.
   */
  
  static unsigned int pacpi_qc_issue(struct ata_queued_cmd *qc)
@@@ -259,7 -259,7 +259,7 @@@ static int pacpi_init_one (struct pci_d
                        return rc;
                pcim_pin_device(pdev);
        }
 -      return ata_pci_sff_init_one(pdev, ppi, &pacpi_sht, NULL);
 +      return ata_pci_sff_init_one(pdev, ppi, &pacpi_sht, NULL, 0);
  }
  
  static const struct pci_device_id pacpi_pci_tbl[] = {
index 36103531feeb0a4e5636ee3a4388b4722cd54d21,416aebb8b913b5dc07d21250fe4a8cc6c4c4942a..147de2fd66d2f67358058a26baa024a08db28d4b
@@@ -131,12 -131,12 +131,12 @@@ static unsigned int ata_data_xfer_8bit(
   *    @qc: command
   *
   *    Drain the FIFO and device of any stuck data following a command
-  *    failing to complete. In some cases this is neccessary before a
+  *    failing to complete. In some cases this is necessary before a
   *    reset will recover the device.
   *
   */
   
 -void pcmcia_8bit_drain_fifo(struct ata_queued_cmd *qc)
 +static void pcmcia_8bit_drain_fifo(struct ata_queued_cmd *qc)
  {
        int count;
        struct ata_port *ap;
index 919a28558d362fa152ae3e12d94babb1cecbfd60,45a22f9bfec21080374d01b7f405ba58fc3ea51e..a3e10dc7cc25ff375674a79c510effbcdfc452f9
@@@ -8,12 -8,8 +8,12 @@@
  #include <linux/kernel.h>
  #include <linux/pagemap.h>
  #include <linux/agp_backend.h>
 +#include <asm/smp.h>
  #include "agp.h"
  
 +int intel_agp_enabled;
 +EXPORT_SYMBOL(intel_agp_enabled);
 +
  /*
   * If we have Intel graphics, we're not going to have anything other than
   * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
  #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB        0x0062
  #define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB    0x006a
  #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG         0x0046
 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB  0x0100
 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG  0x0102
 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB  0x0104
 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG  0x0106
  
  /* cover 915 and 945 variants */
  #define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
 -              agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB)
 +              agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
 +              agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
 +              agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
  
  extern int agp_memory_reserved;
  
  #define INTEL_I7505_AGPCTRL   0x70
  #define INTEL_I7505_MCHCFG    0x50
  
 +#define SNB_GMCH_CTRL 0x50
 +#define SNB_GMCH_GMS_STOLEN_MASK      0xF8
 +#define SNB_GMCH_GMS_STOLEN_32M               (1 << 3)
 +#define SNB_GMCH_GMS_STOLEN_64M               (2 << 3)
 +#define SNB_GMCH_GMS_STOLEN_96M               (3 << 3)
 +#define SNB_GMCH_GMS_STOLEN_128M      (4 << 3)
 +#define SNB_GMCH_GMS_STOLEN_160M      (5 << 3)
 +#define SNB_GMCH_GMS_STOLEN_192M      (6 << 3)
 +#define SNB_GMCH_GMS_STOLEN_224M      (7 << 3)
 +#define SNB_GMCH_GMS_STOLEN_256M      (8 << 3)
 +#define SNB_GMCH_GMS_STOLEN_288M      (9 << 3)
 +#define SNB_GMCH_GMS_STOLEN_320M      (0xa << 3)
 +#define SNB_GMCH_GMS_STOLEN_352M      (0xb << 3)
 +#define SNB_GMCH_GMS_STOLEN_384M      (0xc << 3)
 +#define SNB_GMCH_GMS_STOLEN_416M      (0xd << 3)
 +#define SNB_GMCH_GMS_STOLEN_448M      (0xe << 3)
 +#define SNB_GMCH_GMS_STOLEN_480M      (0xf << 3)
 +#define SNB_GMCH_GMS_STOLEN_512M      (0x10 << 3)
 +
  static const struct aper_size_info_fixed intel_i810_sizes[] =
  {
        {64, 16384, 4},
@@@ -298,7 -269,7 +298,7 @@@ static void intel_agp_insert_sg_entries
                        j++;
                }
        } else {
-               /* sg may merge pages, but we have to seperate
+               /* sg may merge pages, but we have to separate
                 * per-page addr for GTT */
                unsigned int len, m;
  
@@@ -322,13 -293,6 +322,13 @@@ static void intel_agp_insert_sg_entries
                                        off_t pg_start, int mask_type)
  {
        int i, j;
 +      u32 cache_bits = 0;
 +
 +      if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
 +          agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
 +      {
 +              cache_bits = I830_PTE_SYSTEM_CACHED;
 +      }
  
        for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
                writel(agp_bridge->driver->mask_memory(agp_bridge,
@@@ -649,7 -613,7 +649,7 @@@ static struct aper_size_info_fixed inte
  static void intel_i830_init_gtt_entries(void)
  {
        u16 gmch_ctrl;
 -      int gtt_entries;
 +      int gtt_entries = 0;
        u8 rdct;
        int local = 0;
        static const int ddt[4] = { 0, 16, 32, 64 };
                        gtt_entries = 0;
                        break;
                }
 +      } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
 +                 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
 +              /*
 +               * SandyBridge has new memory control reg at 0x50.w
 +               */
 +              u16 snb_gmch_ctl;
 +              pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
 +              switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
 +              case SNB_GMCH_GMS_STOLEN_32M:
 +                      gtt_entries = MB(32) - KB(size);
 +                      break;
 +              case SNB_GMCH_GMS_STOLEN_64M:
 +                      gtt_entries = MB(64) - KB(size);
 +                      break;
 +              case SNB_GMCH_GMS_STOLEN_96M:
 +                      gtt_entries = MB(96) - KB(size);
 +                      break;
 +              case SNB_GMCH_GMS_STOLEN_128M:
 +                      gtt_entries = MB(128) - KB(size);
 +                      break;
 +              case SNB_GMCH_GMS_STOLEN_160M:
 +                      gtt_entries = MB(160) - KB(size);
 +                      break;
 +              case SNB_GMCH_GMS_STOLEN_192M:
 +                      gtt_entries = MB(192) - KB(size);
 +                      break;
 +              case SNB_GMCH_GMS_STOLEN_224M:
 +                      gtt_entries = MB(224) - KB(size);
 +                      break;
 +              case SNB_GMCH_GMS_STOLEN_256M:
 +                      gtt_entries = MB(256) - KB(size);
 +                      break;
 +              case SNB_GMCH_GMS_STOLEN_288M:
 +                      gtt_entries = MB(288) - KB(size);
 +                      break;
 +              case SNB_GMCH_GMS_STOLEN_320M:
 +                      gtt_entries = MB(320) - KB(size);
 +                      break;
 +              case SNB_GMCH_GMS_STOLEN_352M:
 +                      gtt_entries = MB(352) - KB(size);
 +                      break;
 +              case SNB_GMCH_GMS_STOLEN_384M:
 +                      gtt_entries = MB(384) - KB(size);
 +                      break;
 +              case SNB_GMCH_GMS_STOLEN_416M:
 +                      gtt_entries = MB(416) - KB(size);
 +                      break;
 +              case SNB_GMCH_GMS_STOLEN_448M:
 +                      gtt_entries = MB(448) - KB(size);
 +                      break;
 +              case SNB_GMCH_GMS_STOLEN_480M:
 +                      gtt_entries = MB(480) - KB(size);
 +                      break;
 +              case SNB_GMCH_GMS_STOLEN_512M:
 +                      gtt_entries = MB(512) - KB(size);
 +                      break;
 +              }
        } else {
                switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
                case I855_GMCH_GMS_STOLEN_1M:
@@@ -908,6 -815,12 +908,6 @@@ static void intel_i830_setup_flush(void
                intel_i830_fini_flush();
  }
  
 -static void
 -do_wbinvd(void *null)
 -{
 -      wbinvd();
 -}
 -
  /* The chipset_flush interface needs to get data that has already been
   * flushed out of the CPU all the way out to main memory, because the GPU
   * doesn't snoop those buffers.
@@@ -924,10 -837,12 +924,10 @@@ static void intel_i830_chipset_flush(st
  
        memset(pg, 0, 1024);
  
 -      if (cpu_has_clflush) {
 +      if (cpu_has_clflush)
                clflush_cache_range(pg, 1024);
 -      } else {
 -              if (on_each_cpu(do_wbinvd, NULL, 1) != 0)
 -                      printk(KERN_ERR "Timed out waiting for cache flush.\n");
 -      }
 +      else if (wbinvd_on_all_cpus() != 0)
 +              printk(KERN_ERR "Timed out waiting for cache flush.\n");
  }
  
  /* The intel i830 automatically initializes the agp aperture during POST.
@@@ -1449,8 -1364,6 +1449,8 @@@ static void intel_i965_get_gtt_range(in
        case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
        case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
        case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
 +      case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
 +      case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
                *gtt_offset = *gtt_size = MB(2);
                break;
        default:
@@@ -2432,9 -2345,9 +2432,9 @@@ static const struct intel_driver_descri
                NULL, &intel_g33_driver },
        { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
                NULL, &intel_g33_driver },
 -      { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "Pineview",
 +      { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "GMA3150",
                NULL, &intel_g33_driver },
 -      { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "Pineview",
 +      { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "GMA3150",
                NULL, &intel_g33_driver },
        { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
            "GM45", NULL, &intel_i965_driver },
        { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0,
            "G41", NULL, &intel_i965_driver },
        { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 0,
 -          "Ironlake/D", NULL, &intel_i965_driver },
 +          "HD Graphics", NULL, &intel_i965_driver },
        { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
 -          "Ironlake/M", NULL, &intel_i965_driver },
 +          "HD Graphics", NULL, &intel_i965_driver },
        { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
 -          "Ironlake/MA", NULL, &intel_i965_driver },
 +          "HD Graphics", NULL, &intel_i965_driver },
        { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
 -          "Ironlake/MC2", NULL, &intel_i965_driver },
 +          "HD Graphics", NULL, &intel_i965_driver },
 +      { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, 0,
 +          "Sandybridge", NULL, &intel_i965_driver },
 +      { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, 0,
 +          "Sandybridge", NULL, &intel_i965_driver },
        { 0, 0, 0, NULL, NULL, NULL }
  };
  
@@@ -2469,7 -2378,7 +2469,7 @@@ static int __devinit agp_intel_probe(st
        struct agp_bridge_data *bridge;
        u8 cap_ptr = 0;
        struct resource *r;
 -      int i;
 +      int i, err;
  
        cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
  
        }
  
        pci_set_drvdata(pdev, bridge);
 -      return agp_add_bridge(bridge);
 +      err = agp_add_bridge(bridge);
 +      if (!err)
 +              intel_agp_enabled = 1;
 +      return err;
  }
  
  static void __devexit agp_intel_remove(struct pci_dev *pdev)
@@@ -2669,8 -2575,6 +2669,8 @@@ static struct pci_device_id agp_intel_p
        ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
        ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
        ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
 +      ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB),
 +      ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB),
        { }
  };
  
index fd0242676a2a4530a24470f78e8609369ee0873b,0794925d8042d517b383972f59f526b514637cbf..21c54955084e743c17bfcbb817ac5464fe717e36
@@@ -197,7 -197,7 +197,7 @@@ done
        return sent;
  }
  
 -static struct hv_ops hvc_get_put_ops = {
 +static const struct hv_ops hvc_get_put_ops = {
        .get_chars = get_chars,
        .put_chars = put_chars,
        .notifier_add = notifier_add_irq,
@@@ -353,7 -353,7 +353,7 @@@ static void hvc_close_event(struct HvLp
  
        if (!hvlpevent_is_int(event)) {
                printk(KERN_WARNING
-                       "hvc: got unexpected close acknowlegement\n");
+                       "hvc: got unexpected close acknowledgement\n");
                return;
        }
  
diff --combined drivers/char/serial167.c
index 986aa606a6b6fe9084762162243f991a05565bab,aee3c0d1759f9623697aac49a63e75433aac7ba6..1ec3d5cd748f5e602fbf7813ca7a23266102daeb
@@@ -658,7 -658,8 +658,7 @@@ static irqreturn_t cd2401_rx_interrupt(
                        info->mon.char_max = char_count;
                info->mon.char_last = char_count;
  #endif
 -              len = tty_buffer_request_room(tty, char_count);
 -              while (len--) {
 +              while (char_count--) {
                        data = base_addr[CyRDR];
                        tty_insert_flip_char(tty, data, TTY_NORMAL);
  #ifdef CYCLOM_16Y_HACK
@@@ -1989,7 -1990,7 +1989,7 @@@ void mvme167_serial_console_setup(int c
        /*
         * Attempt to set up all channels to something reasonable, and
         * bang out a INIT_CHAN command.  We should then be able to limit
-        * the ammount of fiddling we have to do in normal running.
+        * the amount of fiddling we have to do in normal running.
         */
  
        for (ch = 3; ch >= 0; ch--) {
diff --combined drivers/char/tty_io.c
index dcb9083ecde0982dd061720be8e2e324d3bde5ed,56b11c1c7aeb8eebd7487902896a4fb3ea960f2a..a42c466f7092e2f519f8428be2669ab964a56575
@@@ -1951,10 -1951,8 +1951,10 @@@ static int tty_fasync(int fd, struct fi
                        pid = task_pid(current);
                        type = PIDTYPE_PID;
                }
 -              retval = __f_setown(filp, pid, type, 0);
 +              get_pid(pid);
                spin_unlock_irqrestore(&tty->ctrl_lock, flags);
 +              retval = __f_setown(filp, pid, type, 0);
 +              put_pid(pid);
                if (retval)
                        goto out;
        } else {
@@@ -2028,7 -2026,7 +2028,7 @@@ static int tiocgwinsz(struct tty_struc
   *    @rows: rows (character)
   *    @cols: cols (character)
   *
-  *    Update the termios variables and send the neccessary signals to
+  *    Update the termios variables and send the necessary signals to
   *    peform a terminal resize correctly
   */
  
index 71247da17da5cd4e820dbfe953b61f524cce52c4,52fb371784e170f55b1fdf8b394339eb07a8450b..75bceee76044e8ab25b6035e690e09fa3e29f850
@@@ -311,11 -311,11 +311,11 @@@ valid_reg(struct nvbios *bios, uint32_
  
        /* C51 has misaligned regs on purpose. Marvellous */
        if (reg & 0x2 ||
 -          (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51))
 +          (reg & 0x1 && dev_priv->vbios.chip_version != 0x51))
                NV_ERROR(dev, "======= misaligned reg 0x%08X =======\n", reg);
  
        /* warn on C51 regs that haven't been verified accessible in tracing */
 -      if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 &&
 +      if (reg & 0x1 && dev_priv->vbios.chip_version == 0x51 &&
            reg != 0x130d && reg != 0x1311 && reg != 0x60081d)
                NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n",
                        reg);
@@@ -420,7 -420,7 +420,7 @@@ bios_wr32(struct nvbios *bios, uint32_
        LOG_OLD_VALUE(bios_rd32(bios, reg));
        BIOSLOG(bios, " Write: Reg: 0x%08X, Data: 0x%08X\n", reg, data);
  
 -      if (dev_priv->VBIOS.execute) {
 +      if (dev_priv->vbios.execute) {
                still_alive();
                nv_wr32(bios->dev, reg, data);
        }
@@@ -647,7 -647,7 +647,7 @@@ nv50_pll_set(struct drm_device *dev, ui
        reg0 = (reg0 & 0xfff8ffff) | (pll.log2P << 16);
        reg1 = (reg1 & 0xffff0000) | (pll.N1 << 8) | pll.M1;
  
 -      if (dev_priv->VBIOS.execute) {
 +      if (dev_priv->vbios.execute) {
                still_alive();
                nv_wr32(dev, reg + 4, reg1);
                nv_wr32(dev, reg + 0, reg0);
@@@ -689,7 -689,7 +689,7 @@@ setPLL(struct nvbios *bios, uint32_t re
  static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
  {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 -      struct nvbios *bios = &dev_priv->VBIOS;
 +      struct nvbios *bios = &dev_priv->vbios;
  
        /*
         * For the results of this function to be correct, CR44 must have been
  
        uint8_t dcb_entry = NVReadVgaCrtc5758(dev, bios->state.crtchead, 0);
  
 -      if (dcb_entry > bios->bdcb.dcb.entries) {
 +      if (dcb_entry > bios->dcb.entries) {
                NV_ERROR(dev, "CR58 doesn't have a valid DCB entry currently "
                                "(%02X)\n", dcb_entry);
                dcb_entry = 0x7f;       /* unused / invalid marker */
@@@ -713,26 -713,25 +713,26 @@@ static struct nouveau_i2c_chan 
  init_i2c_device_find(struct drm_device *dev, int i2c_index)
  {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 -      struct bios_parsed_dcb *bdcb = &dev_priv->VBIOS.bdcb;
 +      struct dcb_table *dcb = &dev_priv->vbios.dcb;
  
        if (i2c_index == 0xff) {
                /* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
                int idx = dcb_entry_idx_from_crtchead(dev), shift = 0;
 -              int default_indices = bdcb->i2c_default_indices;
 +              int default_indices = dcb->i2c_default_indices;
  
 -              if (idx != 0x7f && bdcb->dcb.entry[idx].i2c_upper_default)
 +              if (idx != 0x7f && dcb->entry[idx].i2c_upper_default)
                        shift = 4;
  
                i2c_index = (default_indices >> shift) & 0xf;
        }
        if (i2c_index == 0x80)  /* g80+ */
 -              i2c_index = bdcb->i2c_default_indices & 0xf;
 +              i2c_index = dcb->i2c_default_indices & 0xf;
  
        return nouveau_i2c_find(dev, i2c_index);
  }
  
 -static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
 +static uint32_t
 +get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
  {
        /*
         * For mlv < 0x80, it is an index into a table of TMDS base addresses.
         */
  
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 +      struct nvbios *bios = &dev_priv->vbios;
        const int pramdac_offset[13] = {
                0, 0, 0x8, 0, 0x2000, 0, 0, 0, 0x2008, 0, 0, 0, 0x2000 };
        const uint32_t pramdac_table[4] = {
                dcb_entry = dcb_entry_idx_from_crtchead(dev);
                if (dcb_entry == 0x7f)
                        return 0;
 -              dacoffset = pramdac_offset[
 -                              dev_priv->VBIOS.bdcb.dcb.entry[dcb_entry].or];
 +              dacoffset = pramdac_offset[bios->dcb.entry[dcb_entry].or];
                if (mlv == 0x81)
                        dacoffset ^= 8;
                return 0x6808b0 + dacoffset;
        } else {
 -              if (mlv > ARRAY_SIZE(pramdac_table)) {
 +              if (mlv >= ARRAY_SIZE(pramdac_table)) {
                        NV_ERROR(dev, "Magic Lookup Value too big (%02X)\n",
                                                                        mlv);
                        return 0;
@@@ -1866,7 -1865,7 +1866,7 @@@ init_compute_mem(struct nvbios *bios, u
  
        struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
  
 -      if (dev_priv->card_type >= NV_50)
 +      if (dev_priv->card_type >= NV_40)
                return 1;
  
        /*
@@@ -2575,19 -2574,19 +2575,19 @@@ init_gpio(struct nvbios *bios, uint16_
  
        const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
        const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c };
 -      const uint8_t *gpio_table = &bios->data[bios->bdcb.gpio_table_ptr];
 +      const uint8_t *gpio_table = &bios->data[bios->dcb.gpio_table_ptr];
        const uint8_t *gpio_entry;
        int i;
  
        if (!iexec->execute)
                return 1;
  
 -      if (bios->bdcb.version != 0x40) {
 +      if (bios->dcb.version != 0x40) {
                NV_ERROR(bios->dev, "DCB table not version 4.0\n");
                return 0;
        }
  
 -      if (!bios->bdcb.gpio_table_ptr) {
 +      if (!bios->dcb.gpio_table_ptr) {
                NV_WARN(bios->dev, "Invalid pointer to INIT_8E table\n");
                return 0;
        }
@@@ -3124,7 -3123,7 +3124,7 @@@ run_digital_op_script(struct drm_devic
                      struct dcb_entry *dcbent, int head, bool dl)
  {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 -      struct nvbios *bios = &dev_priv->VBIOS;
 +      struct nvbios *bios = &dev_priv->vbios;
        struct init_exec iexec = {true, false};
  
        NV_TRACE(dev, "0x%04X: Parsing digital output script table\n",
  static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script)
  {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 -      struct nvbios *bios = &dev_priv->VBIOS;
 +      struct nvbios *bios = &dev_priv->vbios;
        uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & OUTPUT_C ? 1 : 0);
        uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]);
  
@@@ -3195,7 -3194,7 +3195,7 @@@ static int run_lvds_table(struct drm_de
         * of a list of pxclks and script pointers.
         */
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 -      struct nvbios *bios = &dev_priv->VBIOS;
 +      struct nvbios *bios = &dev_priv->vbios;
        unsigned int outputset = (dcbent->or == 4) ? 1 : 0;
        uint16_t scriptptr = 0, clktable;
        uint8_t clktableptr = 0;
@@@ -3262,7 -3261,7 +3262,7 @@@ int call_lvds_script(struct drm_device 
         */
  
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 -      struct nvbios *bios = &dev_priv->VBIOS;
 +      struct nvbios *bios = &dev_priv->vbios;
        uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
        uint32_t sel_clk_binding, sel_clk;
        int ret;
@@@ -3396,7 -3395,7 +3396,7 @@@ static int parse_fp_mode_table(struct d
  #ifndef __powerpc__
                NV_ERROR(dev, "Pointer to flat panel table invalid\n");
  #endif
 -              bios->pub.digital_min_front_porch = 0x4b;
 +              bios->digital_min_front_porch = 0x4b;
                return 0;
        }
  
                 * fptable[4] is the minimum
                 * RAMDAC_FP_HCRTC -> RAMDAC_FP_HSYNC_START gap
                 */
 -              bios->pub.digital_min_front_porch = fptable[4];
 +              bios->digital_min_front_porch = fptable[4];
                ofs = -7;
                break;
        default:
  
        /* nv4x cards need both a strap value and fpindex of 0xf to use DDC */
        if (lth.lvds_ver > 0x10)
 -              bios->pub.fp_no_ddc = fpstrapping != 0xf || fpindex != 0xf;
 +              bios->fp_no_ddc = fpstrapping != 0xf || fpindex != 0xf;
  
        /*
         * If either the strap or xlated fpindex value are 0xf there is no
  bool nouveau_bios_fp_mode(struct drm_device *dev, struct drm_display_mode *mode)
  {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 -      struct nvbios *bios = &dev_priv->VBIOS;
 +      struct nvbios *bios = &dev_priv->vbios;
        uint8_t *mode_entry = &bios->data[bios->fp.mode_ptr];
  
        if (!mode)      /* just checking whether we can produce a mode */
@@@ -3545,7 -3544,7 +3545,7 @@@ int nouveau_bios_parse_lvds_table(struc
         * at which modes should be set up in the dual link style.
         *
         * Following the header, the BMP (ver 0xa) table has several records,
-        * indexed by a seperate xlat table, indexed in turn by the fp strap in
+        * indexed by a separate xlat table, indexed in turn by the fp strap in
         * EXTDEV_BOOT. Each record had a config byte, followed by 6 script
         * numbers for use by INIT_SUB which controlled panel init and power,
         * and finally a dword of ms to sleep between power off and on
         * until later, when this function should be called with non-zero pxclk
         */
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 -      struct nvbios *bios = &dev_priv->VBIOS;
 +      struct nvbios *bios = &dev_priv->vbios;
        int fpstrapping = get_fp_strap(dev, bios), lvdsmanufacturerindex = 0;
        struct lvdstableheader lth;
        uint16_t lvdsofs;
 -      int ret, chip_version = bios->pub.chip_version;
 +      int ret, chip_version = bios->chip_version;
  
        ret = parse_lvds_manufacturer_table_header(dev, bios, &lth);
        if (ret)
@@@ -3683,7 -3682,7 +3683,7 @@@ bios_output_config_match(struct drm_dev
                         uint16_t record, int record_len, int record_nr)
  {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 -      struct nvbios *bios = &dev_priv->VBIOS;
 +      struct nvbios *bios = &dev_priv->vbios;
        uint32_t entry;
        uint16_t table;
        int i, v;
@@@ -3717,7 -3716,7 +3717,7 @@@ nouveau_bios_dp_table(struct drm_devic
                      int *length)
  {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 -      struct nvbios *bios = &dev_priv->VBIOS;
 +      struct nvbios *bios = &dev_priv->vbios;
        uint8_t *table;
  
        if (!bios->display.dp_table_ptr) {
        }
        table = &bios->data[bios->display.dp_table_ptr];
  
 -      if (table[0] != 0x21) {
 +      if (table[0] != 0x20 && table[0] != 0x21) {
                NV_ERROR(dev, "DisplayPort table version 0x%02x unknown\n",
                         table[0]);
                return NULL;
@@@ -3766,7 -3765,8 +3766,7 @@@ nouveau_bios_run_display_table(struct d
         */
  
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 -      struct init_exec iexec = {true, false};
 -      struct nvbios *bios = &dev_priv->VBIOS;
 +      struct nvbios *bios = &dev_priv->vbios;
        uint8_t *table = &bios->data[bios->display.script_table_ptr];
        uint8_t *otable = NULL;
        uint16_t script;
                }
        }
  
 -      bios->display.output = dcbent;
 -
        if (pxclk == 0) {
                script = ROM16(otable[6]);
                if (!script) {
                }
  
                NV_TRACE(dev, "0x%04X: parsing output script 0\n", script);
 -              parse_init_table(bios, script, &iexec);
 +              nouveau_bios_run_init_table(dev, script, dcbent);
        } else
        if (pxclk == -1) {
                script = ROM16(otable[8]);
                }
  
                NV_TRACE(dev, "0x%04X: parsing output script 1\n", script);
 -              parse_init_table(bios, script, &iexec);
 +              nouveau_bios_run_init_table(dev, script, dcbent);
        } else
        if (pxclk == -2) {
                if (table[4] >= 12)
                }
  
                NV_TRACE(dev, "0x%04X: parsing output script 2\n", script);
 -              parse_init_table(bios, script, &iexec);
 +              nouveau_bios_run_init_table(dev, script, dcbent);
        } else
        if (pxclk > 0) {
                script = ROM16(otable[table[4] + i*6 + 2]);
                }
  
                NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script);
 -              parse_init_table(bios, script, &iexec);
 +              nouveau_bios_run_init_table(dev, script, dcbent);
        } else
        if (pxclk < 0) {
                script = ROM16(otable[table[4] + i*6 + 4]);
                }
  
                NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script);
 -              parse_init_table(bios, script, &iexec);
 +              nouveau_bios_run_init_table(dev, script, dcbent);
        }
  
        return 0;
@@@ -3919,8 -3921,8 +3919,8 @@@ int run_tmds_table(struct drm_device *d
         */
  
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 -      struct nvbios *bios = &dev_priv->VBIOS;
 -      int cv = bios->pub.chip_version;
 +      struct nvbios *bios = &dev_priv->vbios;
 +      int cv = bios->chip_version;
        uint16_t clktable = 0, scriptptr;
        uint32_t sel_clk_binding, sel_clk;
  
@@@ -3979,8 -3981,8 +3979,8 @@@ int get_pll_limits(struct drm_device *d
         */
  
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 -      struct nvbios *bios = &dev_priv->VBIOS;
 -      int cv = bios->pub.chip_version, pllindex = 0;
 +      struct nvbios *bios = &dev_priv->vbios;
 +      int cv = bios->chip_version, pllindex = 0;
        uint8_t pll_lim_ver = 0, headerlen = 0, recordlen = 0, entries = 0;
        uint32_t crystal_strap_mask, crystal_straps;
  
@@@ -4333,7 -4335,7 +4333,7 @@@ static void parse_bios_version(struct d
         */
  
        bios->major_version = bios->data[offset + 3];
 -      bios->pub.chip_version = bios->data[offset + 2];
 +      bios->chip_version = bios->data[offset + 2];
        NV_TRACE(dev, "Bios version %02x.%02x.%02x.%02x\n",
                 bios->data[offset + 3], bios->data[offset + 2],
                 bios->data[offset + 1], bios->data[offset]);
@@@ -4403,7 -4405,7 +4403,7 @@@ static int parse_bit_A_tbl_entry(struc
        }
  
        /* First entry is normal dac, 2nd tv-out perhaps? */
 -      bios->pub.dactestval = ROM32(bios->data[load_table_ptr + headerlen]) & 0x3ff;
 +      bios->dactestval = ROM32(bios->data[load_table_ptr + headerlen]) & 0x3ff;
  
        return 0;
  }
@@@ -4527,8 -4529,8 +4527,8 @@@ static int parse_bit_i_tbl_entry(struc
                return -ENOSYS;
        }
  
 -      bios->pub.dactestval = ROM32(bios->data[daccmpoffset + dacheaderlen]);
 -      bios->pub.tvdactestval = ROM32(bios->data[daccmpoffset + dacheaderlen + 4]);
 +      bios->dactestval = ROM32(bios->data[daccmpoffset + dacheaderlen]);
 +      bios->tvdactestval = ROM32(bios->data[daccmpoffset + dacheaderlen + 4]);
  
        return 0;
  }
@@@ -4797,11 -4799,11 +4797,11 @@@ static int parse_bmp_structure(struct d
        uint16_t legacy_scripts_offset, legacy_i2c_offset;
  
        /* load needed defaults in case we can't parse this info */
 -      bios->bdcb.dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX;
 -      bios->bdcb.dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX;
 -      bios->bdcb.dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX;
 -      bios->bdcb.dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX;
 -      bios->pub.digital_min_front_porch = 0x4b;
 +      bios->dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX;
 +      bios->dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX;
 +      bios->dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX;
 +      bios->dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX;
 +      bios->digital_min_front_porch = 0x4b;
        bios->fmaxvco = 256000;
        bios->fminvco = 128000;
        bios->fp.duallink_transition_clk = 90000;
        bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset];
        bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1];
        bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2];
 -      bios->bdcb.dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4];
 -      bios->bdcb.dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5];
 -      bios->bdcb.dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6];
 -      bios->bdcb.dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7];
 +      bios->dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4];
 +      bios->dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5];
 +      bios->dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6];
 +      bios->dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7];
  
        if (bmplength > 74) {
                bios->fmaxvco = ROM32(bmp[67]);
@@@ -4985,8 -4987,7 +4985,8 @@@ read_dcb_i2c_entry(struct drm_device *d
                else
                        NV_WARN(dev,
                                "DCB I2C table has more entries than indexable "
 -                              "(%d entries, max index 15)\n", i2ctable[2]);
 +                              "(%d entries, max %d)\n", i2ctable[2],
 +                              DCB_MAX_NUM_I2C_ENTRIES);
                entry_len = i2ctable[3];
                /* [4] is i2c_default_indices, read in parse_dcb_table() */
        }
  
        if (index == 0xf)
                return 0;
 -      if (index > i2c_entries) {
 -              NV_ERROR(dev, "DCB I2C index too big (%d > %d)\n",
 +      if (index >= i2c_entries) {
 +              NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n",
                         index, i2ctable[2]);
                return -ENOENT;
        }
  static struct dcb_gpio_entry *
  new_gpio_entry(struct nvbios *bios)
  {
 -      struct parsed_dcb_gpio *gpio = &bios->bdcb.gpio;
 +      struct dcb_gpio_table *gpio = &bios->dcb.gpio;
  
        return &gpio->entry[gpio->entries++];
  }
@@@ -5047,14 -5048,14 +5047,14 @@@ struct dcb_gpio_entry 
  nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag)
  {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 -      struct nvbios *bios = &dev_priv->VBIOS;
 +      struct nvbios *bios = &dev_priv->vbios;
        int i;
  
 -      for (i = 0; i < bios->bdcb.gpio.entries; i++) {
 -              if (bios->bdcb.gpio.entry[i].tag != tag)
 +      for (i = 0; i < bios->dcb.gpio.entries; i++) {
 +              if (bios->dcb.gpio.entry[i].tag != tag)
                        continue;
  
 -              return &bios->bdcb.gpio.entry[i];
 +              return &bios->dcb.gpio.entry[i];
        }
  
        return NULL;
@@@ -5102,7 -5103,7 +5102,7 @@@ static voi
  parse_dcb_gpio_table(struct nvbios *bios)
  {
        struct drm_device *dev = bios->dev;
 -      uint16_t gpio_table_ptr = bios->bdcb.gpio_table_ptr;
 +      uint16_t gpio_table_ptr = bios->dcb.gpio_table_ptr;
        uint8_t *gpio_table = &bios->data[gpio_table_ptr];
        int header_len = gpio_table[1],
            entries = gpio_table[2],
        void (*parse_entry)(struct nvbios *, uint16_t) = NULL;
        int i;
  
 -      if (bios->bdcb.version >= 0x40) {
 +      if (bios->dcb.version >= 0x40) {
                if (gpio_table_ptr && entry_len != 4) {
                        NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
                        return;
  
                parse_entry = parse_dcb40_gpio_entry;
  
 -      } else if (bios->bdcb.version >= 0x30) {
 +      } else if (bios->dcb.version >= 0x30) {
                if (gpio_table_ptr && entry_len != 2) {
                        NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
                        return;
  
                parse_entry = parse_dcb30_gpio_entry;
  
 -      } else if (bios->bdcb.version >= 0x22) {
 +      } else if (bios->dcb.version >= 0x22) {
                /*
                 * DCBs older than v3.0 don't really have a GPIO
                 * table, instead they keep some GPIO info at fixed
@@@ -5160,67 -5161,30 +5160,67 @@@ struct dcb_connector_table_entry 
  nouveau_bios_connector_entry(struct drm_device *dev, int index)
  {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 -      struct nvbios *bios = &dev_priv->VBIOS;
 +      struct nvbios *bios = &dev_priv->vbios;
        struct dcb_connector_table_entry *cte;
  
 -      if (index >= bios->bdcb.connector.entries)
 +      if (index >= bios->dcb.connector.entries)
                return NULL;
  
 -      cte = &bios->bdcb.connector.entry[index];
 +      cte = &bios->dcb.connector.entry[index];
        if (cte->type == 0xff)
                return NULL;
  
        return cte;
  }
  
 +static enum dcb_connector_type
 +divine_connector_type(struct nvbios *bios, int index)
 +{
 +      struct dcb_table *dcb = &bios->dcb;
 +      unsigned encoders = 0, type = DCB_CONNECTOR_NONE;
 +      int i;
 +
 +      for (i = 0; i < dcb->entries; i++) {
 +              if (dcb->entry[i].connector == index)
 +                      encoders |= (1 << dcb->entry[i].type);
 +      }
 +
 +      if (encoders & (1 << OUTPUT_DP)) {
 +              if (encoders & (1 << OUTPUT_TMDS))
 +                      type = DCB_CONNECTOR_DP;
 +              else
 +                      type = DCB_CONNECTOR_eDP;
 +      } else
 +      if (encoders & (1 << OUTPUT_TMDS)) {
 +              if (encoders & (1 << OUTPUT_ANALOG))
 +                      type = DCB_CONNECTOR_DVI_I;
 +              else
 +                      type = DCB_CONNECTOR_DVI_D;
 +      } else
 +      if (encoders & (1 << OUTPUT_ANALOG)) {
 +              type = DCB_CONNECTOR_VGA;
 +      } else
 +      if (encoders & (1 << OUTPUT_LVDS)) {
 +              type = DCB_CONNECTOR_LVDS;
 +      } else
 +      if (encoders & (1 << OUTPUT_TV)) {
 +              type = DCB_CONNECTOR_TV_0;
 +      }
 +
 +      return type;
 +}
 +
  static void
  parse_dcb_connector_table(struct nvbios *bios)
  {
        struct drm_device *dev = bios->dev;
 -      struct dcb_connector_table *ct = &bios->bdcb.connector;
 +      struct dcb_connector_table *ct = &bios->dcb.connector;
        struct dcb_connector_table_entry *cte;
 -      uint8_t *conntab = &bios->data[bios->bdcb.connector_table_ptr];
 +      uint8_t *conntab = &bios->data[bios->dcb.connector_table_ptr];
        uint8_t *entry;
        int i;
  
 -      if (!bios->bdcb.connector_table_ptr) {
 +      if (!bios->dcb.connector_table_ptr) {
                NV_DEBUG_KMS(dev, "No DCB connector table present\n");
                return;
        }
                        cte->entry = ROM16(entry[0]);
                else
                        cte->entry = ROM32(entry[0]);
 +
                cte->type  = (cte->entry & 0x000000ff) >> 0;
                cte->index = (cte->entry & 0x00000f00) >> 8;
                switch (cte->entry & 0x00033000) {
  
                NV_INFO(dev, "  %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n",
                        i, cte->entry, cte->type, cte->index, cte->gpio_tag);
 +
 +              /* check for known types, fallback to guessing the type
 +               * from attached encoders if we hit an unknown.
 +               */
 +              switch (cte->type) {
 +              case DCB_CONNECTOR_VGA:
 +              case DCB_CONNECTOR_TV_0:
 +              case DCB_CONNECTOR_TV_1:
 +              case DCB_CONNECTOR_TV_3:
 +              case DCB_CONNECTOR_DVI_I:
 +              case DCB_CONNECTOR_DVI_D:
 +              case DCB_CONNECTOR_LVDS:
 +              case DCB_CONNECTOR_DP:
 +              case DCB_CONNECTOR_eDP:
 +              case DCB_CONNECTOR_HDMI_0:
 +              case DCB_CONNECTOR_HDMI_1:
 +                      break;
 +              default:
 +                      cte->type = divine_connector_type(bios, cte->index);
 +                      NV_WARN(dev, "unknown type, using 0x%02x", cte->type);
 +                      break;
 +              }
 +
        }
  }
  
 -static struct dcb_entry *new_dcb_entry(struct parsed_dcb *dcb)
 +static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb)
  {
        struct dcb_entry *entry = &dcb->entry[dcb->entries];
  
        return entry;
  }
  
 -static void fabricate_vga_output(struct parsed_dcb *dcb, int i2c, int heads)
 +static void fabricate_vga_output(struct dcb_table *dcb, int i2c, int heads)
  {
        struct dcb_entry *entry = new_dcb_entry(dcb);
  
        /* "or" mostly unused in early gen crt modesetting, 0 is fine */
  }
  
 -static void fabricate_dvi_i_output(struct parsed_dcb *dcb, bool twoHeads)
 +static void fabricate_dvi_i_output(struct dcb_table *dcb, bool twoHeads)
  {
        struct dcb_entry *entry = new_dcb_entry(dcb);
  
  #endif
  }
  
 -static void fabricate_tv_output(struct parsed_dcb *dcb, bool twoHeads)
 +static void fabricate_tv_output(struct dcb_table *dcb, bool twoHeads)
  {
        struct dcb_entry *entry = new_dcb_entry(dcb);
  
  }
  
  static bool
 -parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
 +parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
                  uint32_t conn, uint32_t conf, struct dcb_entry *entry)
  {
        entry->type = conn & 0xf;
        entry->i2c_index = (conn >> 4) & 0xf;
        entry->heads = (conn >> 8) & 0xf;
 -      if (bdcb->version >= 0x40)
 +      if (dcb->version >= 0x40)
                entry->connector = (conn >> 12) & 0xf;
        entry->bus = (conn >> 16) & 0xf;
        entry->location = (conn >> 20) & 0x3;
                 * Although the rest of a CRT conf dword is usually
                 * zeros, mac biosen have stuff there so we must mask
                 */
 -              entry->crtconf.maxfreq = (bdcb->version < 0x30) ?
 +              entry->crtconf.maxfreq = (dcb->version < 0x30) ?
                                         (conf & 0xffff) * 10 :
                                         (conf & 0xff) * 10000;
                break;
                uint32_t mask;
                if (conf & 0x1)
                        entry->lvdsconf.use_straps_for_mode = true;
 -              if (bdcb->version < 0x22) {
 +              if (dcb->version < 0x22) {
                        mask = ~0xd;
                        /*
                         * The laptop in bug 14567 lies and claims to not use
                         * Until we even try to use these on G8x, it's
                         * useless reporting unknown bits.  They all are.
                         */
 -                      if (bdcb->version >= 0x40)
 +                      if (dcb->version >= 0x40)
                                break;
  
                        NV_ERROR(dev, "Unknown LVDS configuration bits, "
                }
        case OUTPUT_TV:
        {
 -              if (bdcb->version >= 0x30)
 +              if (dcb->version >= 0x30)
                        entry->tvconf.has_component_output = conf & (0x8 << 4);
                else
                        entry->tvconf.has_component_output = false;
                break;
        case 0xe:
                /* weird g80 mobile type that "nv" treats as a terminator */
 -              bdcb->dcb.entries--;
 +              dcb->entries--;
                return false;
 +      default:
 +              break;
        }
  
        /* unsure what DCB version introduces this, 3.0? */
  }
  
  static bool
 -parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb,
 +parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
                  uint32_t conn, uint32_t conf, struct dcb_entry *entry)
  {
        switch (conn & 0x0000000f) {
        return true;
  }
  
 -static bool parse_dcb_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
 +static bool parse_dcb_entry(struct drm_device *dev, struct dcb_table *dcb,
                            uint32_t conn, uint32_t conf)
  {
 -      struct dcb_entry *entry = new_dcb_entry(&bdcb->dcb);
 +      struct dcb_entry *entry = new_dcb_entry(dcb);
        bool ret;
  
 -      if (bdcb->version >= 0x20)
 -              ret = parse_dcb20_entry(dev, bdcb, conn, conf, entry);
 +      if (dcb->version >= 0x20)
 +              ret = parse_dcb20_entry(dev, dcb, conn, conf, entry);
        else
 -              ret = parse_dcb15_entry(dev, &bdcb->dcb, conn, conf, entry);
 +              ret = parse_dcb15_entry(dev, dcb, conn, conf, entry);
        if (!ret)
                return ret;
  
 -      read_dcb_i2c_entry(dev, bdcb->version, bdcb->i2c_table,
 -                         entry->i2c_index, &bdcb->dcb.i2c[entry->i2c_index]);
 +      read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
 +                         entry->i2c_index, &dcb->i2c[entry->i2c_index]);
  
        return true;
  }
  
  static
 -void merge_like_dcb_entries(struct drm_device *dev, struct parsed_dcb *dcb)
 +void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
  {
        /*
         * DCB v2.0 lists each output combination separately.
@@@ -5599,7 -5537,8 +5599,7 @@@ static in
  parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
  {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 -      struct bios_parsed_dcb *bdcb = &bios->bdcb;
 -      struct parsed_dcb *dcb;
 +      struct dcb_table *dcb = &bios->dcb;
        uint16_t dcbptr = 0, i2ctabptr = 0;
        uint8_t *dcbtable;
        uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES;
        int recordlength = 8, confofs = 4;
        int i;
  
 -      dcb = bios->pub.dcb = &bdcb->dcb;
 -      dcb->entries = 0;
 -
        /* get the offset from 0x36 */
        if (dev_priv->card_type > NV_04) {
                dcbptr = ROM16(bios->data[0x36]);
        dcbtable = &bios->data[dcbptr];
  
        /* get DCB version */
 -      bdcb->version = dcbtable[0];
 +      dcb->version = dcbtable[0];
        NV_TRACE(dev, "Found Display Configuration Block version %d.%d\n",
 -               bdcb->version >> 4, bdcb->version & 0xf);
 +               dcb->version >> 4, dcb->version & 0xf);
  
 -      if (bdcb->version >= 0x20) { /* NV17+ */
 +      if (dcb->version >= 0x20) { /* NV17+ */
                uint32_t sig;
  
 -              if (bdcb->version >= 0x30) { /* NV40+ */
 +              if (dcb->version >= 0x30) { /* NV40+ */
                        headerlen = dcbtable[1];
                        entries = dcbtable[2];
                        recordlength = dcbtable[3];
                        i2ctabptr = ROM16(dcbtable[4]);
                        sig = ROM32(dcbtable[6]);
 -                      bdcb->gpio_table_ptr = ROM16(dcbtable[10]);
 -                      bdcb->connector_table_ptr = ROM16(dcbtable[20]);
 +                      dcb->gpio_table_ptr = ROM16(dcbtable[10]);
 +                      dcb->connector_table_ptr = ROM16(dcbtable[20]);
                } else {
                        i2ctabptr = ROM16(dcbtable[2]);
                        sig = ROM32(dcbtable[4]);
                                        "signature (%08X)\n", sig);
                        return -EINVAL;
                }
 -      } else if (bdcb->version >= 0x15) { /* some NV11 and NV20 */
 +      } else if (dcb->version >= 0x15) { /* some NV11 and NV20 */
                char sig[8] = { 0 };
  
                strncpy(sig, (char *)&dcbtable[-7], 7);
        if (!i2ctabptr)
                NV_WARN(dev, "No pointer to DCB I2C port table\n");
        else {
 -              bdcb->i2c_table = &bios->data[i2ctabptr];
 -              if (bdcb->version >= 0x30)
 -                      bdcb->i2c_default_indices = bdcb->i2c_table[4];
 +              dcb->i2c_table = &bios->data[i2ctabptr];
 +              if (dcb->version >= 0x30)
 +                      dcb->i2c_default_indices = dcb->i2c_table[4];
        }
  
 -      parse_dcb_gpio_table(bios);
 -      parse_dcb_connector_table(bios);
 -
        if (entries > DCB_MAX_NUM_ENTRIES)
                entries = DCB_MAX_NUM_ENTRIES;
  
                NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n",
                             dcb->entries, connection, config);
  
 -              if (!parse_dcb_entry(dev, bdcb, connection, config))
 +              if (!parse_dcb_entry(dev, dcb, connection, config))
                        break;
        }
  
         * apart for v2.1+ not being known for requiring merging, this
         * guarantees dcbent->index is the index of the entry in the rom image
         */
 -      if (bdcb->version < 0x21)
 +      if (dcb->version < 0x21)
                merge_like_dcb_entries(dev, dcb);
  
 -      return dcb->entries ? 0 : -ENXIO;
 +      if (!dcb->entries)
 +              return -ENXIO;
 +
 +      parse_dcb_gpio_table(bios);
 +      parse_dcb_connector_table(bios);
 +      return 0;
  }
  
  static void
  fixup_legacy_connector(struct nvbios *bios)
  {
 -      struct bios_parsed_dcb *bdcb = &bios->bdcb;
 -      struct parsed_dcb *dcb = &bdcb->dcb;
 -      int high = 0, i;
 +      struct dcb_table *dcb = &bios->dcb;
 +      int i, i2c, i2c_conn[DCB_MAX_NUM_I2C_ENTRIES] = { };
  
        /*
         * DCB 3.0 also has the table in most cases, but there are some cards
         * indices are all 0.  We don't need the connector indices on pre-G80
         * chips (yet?) so limit the use to DCB 4.0 and above.
         */
 -      if (bdcb->version >= 0x40)
 +      if (dcb->version >= 0x40)
                return;
  
 +      dcb->connector.entries = 0;
 +
        /*
         * No known connector info before v3.0, so make it up.  the rule here
         * is: anything on the same i2c bus is considered to be on the same
         * its own unique connector index.
         */
        for (i = 0; i < dcb->entries; i++) {
 -              if (dcb->entry[i].i2c_index == 0xf)
 -                      continue;
 -
                /*
                 * Ignore the I2C index for on-chip TV-out, as there
                 * are cards with bogus values (nv31m in bug 23212),
                 * and it's otherwise useless.
                 */
                if (dcb->entry[i].type == OUTPUT_TV &&
 -                  dcb->entry[i].location == DCB_LOC_ON_CHIP) {
 +                  dcb->entry[i].location == DCB_LOC_ON_CHIP)
                        dcb->entry[i].i2c_index = 0xf;
 +              i2c = dcb->entry[i].i2c_index;
 +
 +              if (i2c_conn[i2c]) {
 +                      dcb->entry[i].connector = i2c_conn[i2c] - 1;
                        continue;
                }
  
 -              dcb->entry[i].connector = dcb->entry[i].i2c_index;
 -              if (dcb->entry[i].connector > high)
 -                      high = dcb->entry[i].connector;
 +              dcb->entry[i].connector = dcb->connector.entries++;
 +              if (i2c != 0xf)
 +                      i2c_conn[i2c] = dcb->connector.entries;
        }
  
 -      for (i = 0; i < dcb->entries; i++) {
 -              if (dcb->entry[i].i2c_index != 0xf)
 -                      continue;
 -
 -              dcb->entry[i].connector = ++high;
 +      /* Fake the connector table as well as just connector indices */
 +      for (i = 0; i < dcb->connector.entries; i++) {
 +              dcb->connector.entry[i].index = i;
 +              dcb->connector.entry[i].type = divine_connector_type(bios, i);
 +              dcb->connector.entry[i].gpio_tag = 0xff;
        }
  }
  
  static void
  fixup_legacy_i2c(struct nvbios *bios)
  {
 -      struct parsed_dcb *dcb = &bios->bdcb.dcb;
 +      struct dcb_table *dcb = &bios->dcb;
        int i;
  
        for (i = 0; i < dcb->entries; i++) {
@@@ -5891,7 -5829,7 +5891,7 @@@ static int load_nv17_hw_sequencer_ucode
  uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
  {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 -      struct nvbios *bios = &dev_priv->VBIOS;
 +      struct nvbios *bios = &dev_priv->vbios;
        const uint8_t edid_sig[] = {
                        0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
        uint16_t offset = 0;
@@@ -5924,23 -5862,20 +5924,23 @@@ nouveau_bios_run_init_table(struct drm_
                            struct dcb_entry *dcbent)
  {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 -      struct nvbios *bios = &dev_priv->VBIOS;
 +      struct nvbios *bios = &dev_priv->vbios;
        struct init_exec iexec = { true, false };
  
 +      mutex_lock(&bios->lock);
        bios->display.output = dcbent;
        parse_init_table(bios, table, &iexec);
        bios->display.output = NULL;
 +      mutex_unlock(&bios->lock);
  }
  
  static bool NVInitVBIOS(struct drm_device *dev)
  {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 -      struct nvbios *bios = &dev_priv->VBIOS;
 +      struct nvbios *bios = &dev_priv->vbios;
  
        memset(bios, 0, sizeof(struct nvbios));
 +      mutex_init(&bios->lock);
        bios->dev = dev;
  
        if (!NVShadowVBIOS(dev, bios->data))
  static int nouveau_parse_vbios_struct(struct drm_device *dev)
  {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 -      struct nvbios *bios = &dev_priv->VBIOS;
 +      struct nvbios *bios = &dev_priv->vbios;
        const uint8_t bit_signature[] = { 0xff, 0xb8, 'B', 'I', 'T' };
        const uint8_t bmp_signature[] = { 0xff, 0x7f, 'N', 'V', 0x0 };
        int offset;
@@@ -5980,7 -5915,7 +5980,7 @@@ in
  nouveau_run_vbios_init(struct drm_device *dev)
  {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 -      struct nvbios *bios = &dev_priv->VBIOS;
 +      struct nvbios *bios = &dev_priv->vbios;
        int i, ret = 0;
  
        NVLockVgaCrtcs(dev, false);
        }
  
        if (dev_priv->card_type >= NV_50) {
 -              for (i = 0; i < bios->bdcb.dcb.entries; i++) {
 +              for (i = 0; i < bios->dcb.entries; i++) {
                        nouveau_bios_run_display_table(dev,
 -                                                     &bios->bdcb.dcb.entry[i],
 +                                                     &bios->dcb.entry[i],
                                                       0, 0);
                }
        }
@@@ -6027,11 -5962,11 +6027,11 @@@ static voi
  nouveau_bios_i2c_devices_takedown(struct drm_device *dev)
  {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 -      struct nvbios *bios = &dev_priv->VBIOS;
 +      struct nvbios *bios = &dev_priv->vbios;
        struct dcb_i2c_entry *entry;
        int i;
  
 -      entry = &bios->bdcb.dcb.i2c[0];
 +      entry = &bios->dcb.i2c[0];
        for (i = 0; i < DCB_MAX_NUM_I2C_ENTRIES; i++, entry++)
                nouveau_i2c_fini(dev, entry);
  }
  nouveau_bios_init(struct drm_device *dev)
  {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 -      struct nvbios *bios = &dev_priv->VBIOS;
 +      struct nvbios *bios = &dev_priv->vbios;
        uint32_t saved_nv_pextdev_boot_0;
        bool was_locked;
        int ret;
  
 -      dev_priv->vbios = &bios->pub;
 -
        if (!NVInitVBIOS(dev))
                return -ENODEV;
  
        bios_wr32(bios, NV_PEXTDEV_BOOT_0, saved_nv_pextdev_boot_0);
  
        ret = nouveau_run_vbios_init(dev);
 -      if (ret) {
 -              dev_priv->vbios = NULL;
 +      if (ret)
                return ret;
 -      }
  
        /* feature_byte on BMP is poor, but init always sets CR4B */
        was_locked = NVLockVgaCrtcs(dev, false);
index 5f8d987af3631a7fb06e612ae800bcff57de8bae,23664058690f7c494954f1ec43a43871f1e7fd26..4b9aaf2a8d0f0052bbafd498998e2c71b1e38434
@@@ -34,7 -34,7 +34,7 @@@
  
  #define DRIVER_MAJOR          0
  #define DRIVER_MINOR          0
 -#define DRIVER_PATCHLEVEL     15
 +#define DRIVER_PATCHLEVEL     16
  
  #define NOUVEAU_FAMILY   0x0000FFFF
  #define NOUVEAU_FLAGS    0xFFFF0000
@@@ -83,7 -83,6 +83,7 @@@ struct nouveau_bo 
        struct drm_file *reserved_by;
        struct list_head entry;
        int pbbo_index;
 +      bool validate_mapped;
  
        struct nouveau_channel *channel;
  
@@@ -240,11 -239,6 +240,11 @@@ struct nouveau_channel 
                int cur;
                int put;
                /* access via pushbuf_bo */
 +
 +              int ib_base;
 +              int ib_max;
 +              int ib_free;
 +              int ib_put;
        } dma;
  
        uint32_t sw_subchannel[8];
@@@ -539,9 -533,6 +539,9 @@@ struct drm_nouveau_private 
        struct nouveau_engine engine;
        struct nouveau_channel *channel;
  
 +      /* For PFIFO and PGRAPH. */
 +      spinlock_t context_switch_lock;
 +
        /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
        struct nouveau_gpuobj *ramht;
        uint32_t ramin_rsvd_vram;
        uint32_t ramro_offset;
        uint32_t ramro_size;
  
-       /* base physical adresses */
+       /* base physical addresses */
        uint64_t fb_phys;
        uint64_t fb_available_size;
        uint64_t fb_mappable_pages;
        uint64_t vm_end;
        struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
        int vm_vram_pt_nr;
 +      uint64_t vram_sys_base;
  
        /* the mtrr covering the FB */
        int fb_mtrr;
  
        struct list_head gpuobj_list;
  
 -      struct nvbios VBIOS;
 -      struct nouveau_bios_info *vbios;
 +      struct nvbios vbios;
  
        struct nv04_mode_state mode_reg;
        struct nv04_mode_state saved_reg;
        } susres;
  
        struct backlight_device *backlight;
 -      bool acpi_dsm;
  
        struct nouveau_channel *evo;
  
@@@ -686,11 -678,6 +686,11 @@@ extern int nouveau_reg_debug
  extern char *nouveau_vbios;
  extern int nouveau_ctxfw;
  extern int nouveau_ignorelid;
 +extern int nouveau_nofbaccel;
 +extern int nouveau_noaccel;
 +
 +extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
 +extern int nouveau_pci_resume(struct pci_dev *pdev);
  
  /* nouveau_state.c */
  extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
@@@ -706,6 -693,12 +706,6 @@@ extern bool nouveau_wait_until(struct d
                               uint32_t reg, uint32_t mask, uint32_t val);
  extern bool nouveau_wait_for_idle(struct drm_device *);
  extern int  nouveau_card_init(struct drm_device *);
 -extern int  nouveau_ioctl_card_init(struct drm_device *, void *data,
 -                                  struct drm_file *);
 -extern int  nouveau_ioctl_suspend(struct drm_device *, void *data,
 -                                struct drm_file *);
 -extern int  nouveau_ioctl_resume(struct drm_device *, void *data,
 -                               struct drm_file *);
  
  /* nouveau_mem.c */
  extern int  nouveau_mem_init_heap(struct mem_block **, uint64_t start,
@@@ -849,15 -842,21 +849,15 @@@ nouveau_debugfs_channel_fini(struct nou
  /* nouveau_dma.c */
  extern void nouveau_dma_pre_init(struct nouveau_channel *);
  extern int  nouveau_dma_init(struct nouveau_channel *);
 -extern int  nouveau_dma_wait(struct nouveau_channel *, int size);
 +extern int  nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
  
  /* nouveau_acpi.c */
 -#ifdef CONFIG_ACPI
 -extern int nouveau_hybrid_setup(struct drm_device *dev);
 -extern bool nouveau_dsm_probe(struct drm_device *dev);
 +#if defined(CONFIG_ACPI)
 +void nouveau_register_dsm_handler(void);
 +void nouveau_unregister_dsm_handler(void);
  #else
 -static inline int nouveau_hybrid_setup(struct drm_device *dev)
 -{
 -      return 0;
 -}
 -static inline bool nouveau_dsm_probe(struct drm_device *dev)
 -{
 -      return false;
 -}
 +static inline void nouveau_register_dsm_handler(void) {}
 +static inline void nouveau_unregister_dsm_handler(void) {}
  #endif
  
  /* nouveau_backlight.c */
@@@ -1025,7 -1024,6 +1025,7 @@@ extern void nv50_graph_destroy_context(
  extern int  nv50_graph_load_context(struct nouveau_channel *);
  extern int  nv50_graph_unload_context(struct drm_device *);
  extern void nv50_graph_context_switch(struct drm_device *);
 +extern int  nv50_grctx_init(struct nouveau_grctx *);
  
  /* nouveau_grctx.c */
  extern int  nouveau_grctx_prog_load(struct drm_device *);
@@@ -1151,6 -1149,16 +1151,6 @@@ extern int nouveau_gem_ioctl_new(struc
                                 struct drm_file *);
  extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *,
                                     struct drm_file *);
 -extern int nouveau_gem_ioctl_pushbuf_call(struct drm_device *, void *,
 -                                        struct drm_file *);
 -extern int nouveau_gem_ioctl_pushbuf_call2(struct drm_device *, void *,
 -                                         struct drm_file *);
 -extern int nouveau_gem_ioctl_pin(struct drm_device *, void *,
 -                               struct drm_file *);
 -extern int nouveau_gem_ioctl_unpin(struct drm_device *, void *,
 -                                 struct drm_file *);
 -extern int nouveau_gem_ioctl_tile(struct drm_device *, void *,
 -                                struct drm_file *);
  extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *,
                                      struct drm_file *);
  extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
index 3c32f840dcd2294b3ee5d0af9407f21c9354209e,1982a87386a109b317036d898109d47c6a4ed413..40ab6d9c3736ab754f07f55201eaa5252048df1f
@@@ -29,7 -29,6 +29,7 @@@
  
  #include "drmP.h"
  #include "drm.h"
 +#include "drm_buffer.h"
  #include "drm_sarea.h"
  #include "radeon_drm.h"
  #include "radeon_drv.h"
@@@ -92,27 -91,21 +92,27 @@@ static __inline__ int radeon_check_and_
  static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
                                                     dev_priv,
                                                     struct drm_file *file_priv,
 -                                                   int id, u32 *data)
 +                                                   int id, struct drm_buffer *buf)
  {
 +      u32 *data;
        switch (id) {
  
        case RADEON_EMIT_PP_MISC:
 -              if (radeon_check_and_fixup_offset(dev_priv, file_priv,
 -                  &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) {
 +              data = drm_buffer_pointer_to_dword(buf,
 +                      (RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4);
 +
 +              if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
                        DRM_ERROR("Invalid depth buffer offset\n");
                        return -EINVAL;
                }
 +              dev_priv->have_z_offset = 1;
                break;
  
        case RADEON_EMIT_PP_CNTL:
 -              if (radeon_check_and_fixup_offset(dev_priv, file_priv,
 -                  &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) {
 +              data = drm_buffer_pointer_to_dword(buf,
 +                      (RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4);
 +
 +              if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
                        DRM_ERROR("Invalid colour buffer offset\n");
                        return -EINVAL;
                }
        case R200_EMIT_PP_TXOFFSET_3:
        case R200_EMIT_PP_TXOFFSET_4:
        case R200_EMIT_PP_TXOFFSET_5:
 -              if (radeon_check_and_fixup_offset(dev_priv, file_priv,
 -                                                &data[0])) {
 +              data = drm_buffer_pointer_to_dword(buf, 0);
 +              if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
                        DRM_ERROR("Invalid R200 texture offset\n");
                        return -EINVAL;
                }
        case RADEON_EMIT_PP_TXFILTER_0:
        case RADEON_EMIT_PP_TXFILTER_1:
        case RADEON_EMIT_PP_TXFILTER_2:
 -              if (radeon_check_and_fixup_offset(dev_priv, file_priv,
 -                  &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) {
 +              data = drm_buffer_pointer_to_dword(buf,
 +                      (RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4);
 +              if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
                        DRM_ERROR("Invalid R100 texture offset\n");
                        return -EINVAL;
                }
        case R200_EMIT_PP_CUBIC_OFFSETS_5:{
                        int i;
                        for (i = 0; i < 5; i++) {
 +                              data = drm_buffer_pointer_to_dword(buf, i);
                                if (radeon_check_and_fixup_offset(dev_priv,
                                                                  file_priv,
 -                                                                &data[i])) {
 +                                                                data)) {
                                        DRM_ERROR
                                            ("Invalid R200 cubic texture offset\n");
                                        return -EINVAL;
        case RADEON_EMIT_PP_CUBIC_OFFSETS_T2:{
                        int i;
                        for (i = 0; i < 5; i++) {
 +                              data = drm_buffer_pointer_to_dword(buf, i);
                                if (radeon_check_and_fixup_offset(dev_priv,
                                                                  file_priv,
 -                                                                &data[i])) {
 +                                                                data)) {
                                        DRM_ERROR
                                            ("Invalid R100 cubic texture offset\n");
                                        return -EINVAL;
@@@ -279,24 -269,23 +279,24 @@@ static __inline__ int radeon_check_and_
                                                     cmdbuf,
                                                     unsigned int *cmdsz)
  {
 -      u32 *cmd = (u32 *) cmdbuf->buf;
 +      u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
        u32 offset, narrays;
        int count, i, k;
  
 -      *cmdsz = 2 + ((cmd[0] & RADEON_CP_PACKET_COUNT_MASK) >> 16);
 +      count = ((*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16);
 +      *cmdsz = 2 + count;
  
 -      if ((cmd[0] & 0xc0000000) != RADEON_CP_PACKET3) {
 +      if ((*cmd & 0xc0000000) != RADEON_CP_PACKET3) {
                DRM_ERROR("Not a type 3 packet\n");
                return -EINVAL;
        }
  
 -      if (4 * *cmdsz > cmdbuf->bufsz) {
 +      if (4 * *cmdsz > drm_buffer_unprocessed(cmdbuf->buffer)) {
                DRM_ERROR("Packet size larger than size of data provided\n");
                return -EINVAL;
        }
  
 -      switch(cmd[0] & 0xff00) {
 +      switch (*cmd & 0xff00) {
        /* XXX Are there old drivers needing other packets? */
  
        case RADEON_3D_DRAW_IMMD:
                break;
  
        case RADEON_3D_LOAD_VBPNTR:
 -              count = (cmd[0] >> 16) & 0x3fff;
  
                if (count > 18) { /* 12 arrays max */
                        DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
                }
  
                /* carefully check packet contents */
 -              narrays = cmd[1] & ~0xc000;
 +              cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
 +
 +              narrays = *cmd & ~0xc000;
                k = 0;
                i = 2;
                while ((k < narrays) && (i < (count + 2))) {
                        i++;            /* skip attribute field */
 +                      cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
                        if (radeon_check_and_fixup_offset(dev_priv, file_priv,
 -                                                        &cmd[i])) {
 +                                                        cmd)) {
                                DRM_ERROR
                                    ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
                                     k, i);
                        if (k == narrays)
                                break;
                        /* have one more to process, they come in pairs */
 +                      cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
 +
                        if (radeon_check_and_fixup_offset(dev_priv,
 -                                                        file_priv, &cmd[i]))
 +                                                        file_priv, cmd))
                        {
                                DRM_ERROR
                                    ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
                        DRM_ERROR("Invalid 3d packet for r200-class chip\n");
                        return -EINVAL;
                }
 -              if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[1])) {
 +
 +              cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
 +              if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) {
                                DRM_ERROR("Invalid rndr_gen_indx offset\n");
                                return -EINVAL;
                }
                        DRM_ERROR("Invalid 3d packet for r100-class chip\n");
                        return -EINVAL;
                }
 -              if ((cmd[1] & 0x8000ffff) != 0x80000810) {
 -                      DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
 +
 +              cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
 +              if ((*cmd & 0x8000ffff) != 0x80000810) {
 +                      DRM_ERROR("Invalid indx_buffer reg address %08X\n", *cmd);
                        return -EINVAL;
                }
 -              if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[2])) {
 -                      DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
 +              cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
 +              if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) {
 +                      DRM_ERROR("Invalid indx_buffer offset is %08X\n", *cmd);
                        return -EINVAL;
                }
                break;
        case RADEON_CNTL_PAINT_MULTI:
        case RADEON_CNTL_BITBLT_MULTI:
                /* MSB of opcode: next DWORD GUI_CNTL */
 -              if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
 +              cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
 +              if (*cmd & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
                              | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
 -                      offset = cmd[2] << 10;
 +                      u32 *cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
 +                      offset = *cmd2 << 10;
                        if (radeon_check_and_fixup_offset
                            (dev_priv, file_priv, &offset)) {
                                DRM_ERROR("Invalid first packet offset\n");
                                return -EINVAL;
                        }
 -                      cmd[2] = (cmd[2] & 0xffc00000) | offset >> 10;
 +                      *cmd2 = (*cmd2 & 0xffc00000) | offset >> 10;
                }
  
 -              if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
 -                  (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
 -                      offset = cmd[3] << 10;
 +              if ((*cmd & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
 +                  (*cmd & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
 +                      u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
 +                      offset = *cmd << 10;
                        if (radeon_check_and_fixup_offset
                            (dev_priv, file_priv, &offset)) {
                                DRM_ERROR("Invalid second packet offset\n");
                                return -EINVAL;
                        }
 -                      cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10;
 +                      *cmd3 = (*cmd3 & 0xffc00000) | offset >> 10;
                }
                break;
  
        default:
 -              DRM_ERROR("Invalid packet type %x\n", cmd[0] & 0xff00);
 +              DRM_ERROR("Invalid packet type %x\n", *cmd & 0xff00);
                return -EINVAL;
        }
  
@@@ -899,11 -876,6 +899,11 @@@ static void radeon_cp_dispatch_clear(st
                if (tmp & RADEON_BACK)
                        flags |= RADEON_FRONT;
        }
 +      if (flags & (RADEON_DEPTH|RADEON_STENCIL)) {
 +              if (!dev_priv->have_z_offset)
 +                      printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n");
 +              flags &= ~(RADEON_DEPTH | RADEON_STENCIL);
 +      }
  
        if (flags & (RADEON_FRONT | RADEON_BACK)) {
  
                                        /* judging by the first tile offset needed, could possibly
                                           directly address/clear 4x4 tiles instead of 8x2 * 4x4
                                           macro tiles, though would still need clear mask for
-                                          right/bottom if truely 4x4 granularity is desired ? */
+                                          right/bottom if truly 4x4 granularity is desired ? */
                                        OUT_RING(tileoffset * 16);
                                        /* the number of tiles to clear */
                                        OUT_RING(nrtilesx + 1);
@@@ -2639,6 -2611,7 +2639,6 @@@ static int radeon_emit_packets(drm_rade
  {
        int id = (int)header.packet.packet_id;
        int sz, reg;
 -      int *data = (int *)cmdbuf->buf;
        RING_LOCALS;
  
        if (id >= RADEON_MAX_STATE_PACKETS)
        sz = packet[id].len;
        reg = packet[id].start;
  
 -      if (sz * sizeof(int) > cmdbuf->bufsz) {
 +      if (sz * sizeof(u32) > drm_buffer_unprocessed(cmdbuf->buffer)) {
                DRM_ERROR("Packet size provided larger than data provided\n");
                return -EINVAL;
        }
  
 -      if (radeon_check_and_fixup_packets(dev_priv, file_priv, id, data)) {
 +      if (radeon_check_and_fixup_packets(dev_priv, file_priv, id,
 +                              cmdbuf->buffer)) {
                DRM_ERROR("Packet verification failed\n");
                return -EINVAL;
        }
  
        BEGIN_RING(sz + 1);
        OUT_RING(CP_PACKET0(reg, (sz - 1)));
 -      OUT_RING_TABLE(data, sz);
 +      OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
        ADVANCE_RING();
  
 -      cmdbuf->buf += sz * sizeof(int);
 -      cmdbuf->bufsz -= sz * sizeof(int);
        return 0;
  }
  
@@@ -2679,8 -2653,10 +2679,8 @@@ static __inline__ int radeon_emit_scala
        OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
        OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
        OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
 -      OUT_RING_TABLE(cmdbuf->buf, sz);
 +      OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
        ADVANCE_RING();
 -      cmdbuf->buf += sz * sizeof(int);
 -      cmdbuf->bufsz -= sz * sizeof(int);
        return 0;
  }
  
@@@ -2699,8 -2675,10 +2699,8 @@@ static __inline__ int radeon_emit_scala
        OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
        OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
        OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
 -      OUT_RING_TABLE(cmdbuf->buf, sz);
 +      OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
        ADVANCE_RING();
 -      cmdbuf->buf += sz * sizeof(int);
 -      cmdbuf->bufsz -= sz * sizeof(int);
        return 0;
  }
  
@@@ -2718,9 -2696,11 +2718,9 @@@ static __inline__ int radeon_emit_vecto
        OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
        OUT_RING(start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
        OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
 -      OUT_RING_TABLE(cmdbuf->buf, sz);
 +      OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
        ADVANCE_RING();
  
 -      cmdbuf->buf += sz * sizeof(int);
 -      cmdbuf->bufsz -= sz * sizeof(int);
        return 0;
  }
  
@@@ -2734,7 -2714,7 +2734,7 @@@ static __inline__ int radeon_emit_vecli
  
          if (!sz)
                  return 0;
 -        if (sz * 4 > cmdbuf->bufsz)
 +      if (sz * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
                  return -EINVAL;
  
        BEGIN_RING(5 + sz);
        OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
        OUT_RING(start | (1 << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
        OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
 -      OUT_RING_TABLE(cmdbuf->buf, sz);
 +      OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
        ADVANCE_RING();
  
 -      cmdbuf->buf += sz * sizeof(int);
 -      cmdbuf->bufsz -= sz * sizeof(int);
        return 0;
  }
  
@@@ -2766,9 -2748,11 +2766,9 @@@ static int radeon_emit_packet3(struct d
        }
  
        BEGIN_RING(cmdsz);
 -      OUT_RING_TABLE(cmdbuf->buf, cmdsz);
 +      OUT_RING_DRM_BUFFER(cmdbuf->buffer, cmdsz);
        ADVANCE_RING();
  
 -      cmdbuf->buf += cmdsz * 4;
 -      cmdbuf->bufsz -= cmdsz * 4;
        return 0;
  }
  
@@@ -2821,16 -2805,16 +2821,16 @@@ static int radeon_emit_packet3_cliprect
                }
  
                BEGIN_RING(cmdsz);
 -              OUT_RING_TABLE(cmdbuf->buf, cmdsz);
 +              OUT_RING_DRM_BUFFER(cmdbuf->buffer, cmdsz);
                ADVANCE_RING();
  
        } while (++i < cmdbuf->nbox);
        if (cmdbuf->nbox == 1)
                cmdbuf->nbox = 0;
  
 +      return 0;
        out:
 -      cmdbuf->buf += cmdsz * 4;
 -      cmdbuf->bufsz -= cmdsz * 4;
 +      drm_buffer_advance(cmdbuf->buffer, cmdsz * 4);
        return 0;
  }
  
@@@ -2863,16 -2847,16 +2863,16 @@@ static int radeon_emit_wait(struct drm_
        return 0;
  }
  
 -static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv)
 +static int radeon_cp_cmdbuf(struct drm_device *dev, void *data,
 +              struct drm_file *file_priv)
  {
        drm_radeon_private_t *dev_priv = dev->dev_private;
        struct drm_device_dma *dma = dev->dma;
        struct drm_buf *buf = NULL;
 +      drm_radeon_cmd_header_t stack_header;
        int idx;
        drm_radeon_kcmd_buffer_t *cmdbuf = data;
 -      drm_radeon_cmd_header_t header;
 -      int orig_nbox, orig_bufsz;
 -      char *kbuf = NULL;
 +      int orig_nbox;
  
        LOCK_TEST_WITH_RETURN(dev, file_priv);
  
         * races between checking values and using those values in other code,
         * and simply to avoid a lot of function calls to copy in data.
         */
 -      orig_bufsz = cmdbuf->bufsz;
 -      if (orig_bufsz != 0) {
 -              kbuf = kmalloc(cmdbuf->bufsz, GFP_KERNEL);
 -              if (kbuf == NULL)
 -                      return -ENOMEM;
 -              if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf->buf,
 -                                     cmdbuf->bufsz)) {
 -                      kfree(kbuf);
 -                      return -EFAULT;
 -              }
 -              cmdbuf->buf = kbuf;
 +      if (cmdbuf->bufsz != 0) {
 +              int rv;
 +              void __user *buffer = cmdbuf->buffer;
 +              rv = drm_buffer_alloc(&cmdbuf->buffer, cmdbuf->bufsz);
 +              if (rv)
 +                      return rv;
 +              rv = drm_buffer_copy_from_user(cmdbuf->buffer, buffer,
 +                                              cmdbuf->bufsz);
 +              if (rv)
 +                      return rv;
        }
  
        orig_nbox = cmdbuf->nbox;
                int temp;
                temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf);
  
 -              if (orig_bufsz != 0)
 -                      kfree(kbuf);
 +              if (cmdbuf->bufsz != 0)
 +                      drm_buffer_free(cmdbuf->buffer);
  
                return temp;
        }
  
        /* microcode_version != r300 */
 -      while (cmdbuf->bufsz >= sizeof(header)) {
 +      while (drm_buffer_unprocessed(cmdbuf->buffer) >= sizeof(stack_header)) {
  
 -              header.i = *(int *)cmdbuf->buf;
 -              cmdbuf->buf += sizeof(header);
 -              cmdbuf->bufsz -= sizeof(header);
 +              drm_radeon_cmd_header_t *header;
 +              header = drm_buffer_read_object(cmdbuf->buffer,
 +                              sizeof(stack_header), &stack_header);
  
 -              switch (header.header.cmd_type) {
 +              switch (header->header.cmd_type) {
                case RADEON_CMD_PACKET:
                        DRM_DEBUG("RADEON_CMD_PACKET\n");
                        if (radeon_emit_packets
 -                          (dev_priv, file_priv, header, cmdbuf)) {
 +                          (dev_priv, file_priv, *header, cmdbuf)) {
                                DRM_ERROR("radeon_emit_packets failed\n");
                                goto err;
                        }
  
                case RADEON_CMD_SCALARS:
                        DRM_DEBUG("RADEON_CMD_SCALARS\n");
 -                      if (radeon_emit_scalars(dev_priv, header, cmdbuf)) {
 +                      if (radeon_emit_scalars(dev_priv, *header, cmdbuf)) {
                                DRM_ERROR("radeon_emit_scalars failed\n");
                                goto err;
                        }
  
                case RADEON_CMD_VECTORS:
                        DRM_DEBUG("RADEON_CMD_VECTORS\n");
 -                      if (radeon_emit_vectors(dev_priv, header, cmdbuf)) {
 +                      if (radeon_emit_vectors(dev_priv, *header, cmdbuf)) {
                                DRM_ERROR("radeon_emit_vectors failed\n");
                                goto err;
                        }
  
                case RADEON_CMD_DMA_DISCARD:
                        DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
 -                      idx = header.dma.buf_idx;
 +                      idx = header->dma.buf_idx;
                        if (idx < 0 || idx >= dma->buf_count) {
                                DRM_ERROR("buffer index %d (of %d max)\n",
                                          idx, dma->buf_count - 1);
  
                case RADEON_CMD_SCALARS2:
                        DRM_DEBUG("RADEON_CMD_SCALARS2\n");
 -                      if (radeon_emit_scalars2(dev_priv, header, cmdbuf)) {
 +                      if (radeon_emit_scalars2(dev_priv, *header, cmdbuf)) {
                                DRM_ERROR("radeon_emit_scalars2 failed\n");
                                goto err;
                        }
  
                case RADEON_CMD_WAIT:
                        DRM_DEBUG("RADEON_CMD_WAIT\n");
 -                      if (radeon_emit_wait(dev, header.wait.flags)) {
 +                      if (radeon_emit_wait(dev, header->wait.flags)) {
                                DRM_ERROR("radeon_emit_wait failed\n");
                                goto err;
                        }
                        break;
                case RADEON_CMD_VECLINEAR:
                        DRM_DEBUG("RADEON_CMD_VECLINEAR\n");
 -                      if (radeon_emit_veclinear(dev_priv, header, cmdbuf)) {
 +                      if (radeon_emit_veclinear(dev_priv, *header, cmdbuf)) {
                                DRM_ERROR("radeon_emit_veclinear failed\n");
                                goto err;
                        }
                        break;
  
                default:
 -                      DRM_ERROR("bad cmd_type %d at %p\n",
 -                                header.header.cmd_type,
 -                                cmdbuf->buf - sizeof(header));
 +                      DRM_ERROR("bad cmd_type %d at byte %d\n",
 +                                header->header.cmd_type,
 +                                cmdbuf->buffer->iterator);
                        goto err;
                }
        }
  
 -      if (orig_bufsz != 0)
 -              kfree(kbuf);
 +      if (cmdbuf->bufsz != 0)
 +              drm_buffer_free(cmdbuf->buffer);
  
        DRM_DEBUG("DONE\n");
        COMMIT_RING();
        return 0;
  
        err:
 -      if (orig_bufsz != 0)
 -              kfree(kbuf);
 +      if (cmdbuf->bufsz != 0)
 +              drm_buffer_free(cmdbuf->buffer);
        return -EINVAL;
  }
  
index b54aee7cd9e352d6f1bbe7a43ce1a44b0f7e392b,33f3541aaf96d55013301e03c6e046c7ede48e9c..ff4d77c4de11f742150a7ec688c61afe222a4db2
@@@ -430,7 -430,7 +430,7 @@@ static bool i8042_filter(unsigned char 
        }
  
        if (i8042_platform_filter && i8042_platform_filter(data, str, serio)) {
-               dbg("Filtered out by platfrom filter\n");
+               dbg("Filtered out by platform filter\n");
                return true;
        }
  
@@@ -1161,17 -1161,9 +1161,17 @@@ static int i8042_pm_restore(struct devi
        return 0;
  }
  
 +static int i8042_pm_thaw(struct device *dev)
 +{
 +      i8042_interrupt(0, NULL);
 +
 +      return 0;
 +}
 +
  static const struct dev_pm_ops i8042_pm_ops = {
        .suspend        = i8042_pm_reset,
        .resume         = i8042_pm_restore,
 +      .thaw           = i8042_pm_thaw,
        .poweroff       = i8042_pm_reset,
        .restore        = i8042_pm_restore,
  };
index bc4ced6c013b1733f8b8e1ed49981ab9b035e023,e5e4c4440d3949e64dd742d5094eb52cf80a03b1..f36e11a0458d4ca7a248a68d4bc48b46daa5ce90
@@@ -38,7 -38,6 +38,7 @@@
   */
  #define MODULE_NAME "ov519"
  
 +#include <linux/input.h>
  #include "gspca.h"
  
  MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>");
@@@ -71,9 -70,6 +71,9 @@@ struct sd 
        char invert_led;
  #define BRIDGE_INVERT_LED     8
  
 +      char snapshot_pressed;
 +      char snapshot_needs_reset;
 +
        /* Determined by sensor type */
        __u8 sif;
  
  #define SEN_OV66308AF 5
  #define SEN_OV7610 6
  #define SEN_OV7620 7
 -#define SEN_OV7640 8
 -#define SEN_OV7670 9
 -#define SEN_OV76BE 10
 -#define SEN_OV8610 11
 +#define SEN_OV7620AE 8
 +#define SEN_OV7640 9
 +#define SEN_OV7648 10
 +#define SEN_OV7670 11
 +#define SEN_OV76BE 12
 +#define SEN_OV8610 13
  
        u8 sensor_addr;
        int sensor_width;
@@@ -145,7 -139,6 +145,7 @@@ static void setautobrightness(struct s
  static void setfreq(struct sd *sd);
  
  static const struct ctrl sd_ctrls[] = {
 +#define BRIGHTNESS_IDX 0
        {
            {
                .id      = V4L2_CID_BRIGHTNESS,
            .set = sd_setbrightness,
            .get = sd_getbrightness,
        },
 +#define CONTRAST_IDX 1
        {
            {
                .id      = V4L2_CID_CONTRAST,
            .set = sd_setcontrast,
            .get = sd_getcontrast,
        },
 +#define COLOR_IDX 2
        {
            {
                .id      = V4L2_CID_SATURATION,
@@@ -512,7 -503,7 +512,7 @@@ static const struct v4l2_pix_format ovf
  /*
   * The FX2 chip does not give us a zero length read at end of frame.
   * It does, however, give a short read at the end of a frame, if
-  * neccessary, rather than run two frames together.
+  * necessary, rather than run two frames together.
   *
   * By choosing the right bulk transfer size, we are guaranteed to always
   * get a short read for the last read of each frame.  Frame sizes are
@@@ -2563,7 -2554,7 +2563,7 @@@ static int ov7xx0_configure(struct sd *
                /* I don't know what's different about the 76BE yet. */
                if (i2c_r(sd, 0x15) & 1) {
                        PDEBUG(D_PROBE, "Sensor is an OV7620AE");
 -                      sd->sensor = SEN_OV7620;
 +                      sd->sensor = SEN_OV7620AE;
                } else {
                        PDEBUG(D_PROBE, "Sensor is an OV76BE");
                        sd->sensor = SEN_OV76BE;
                                break;
                        case 0x48:
                                PDEBUG(D_PROBE, "Sensor is an OV7648");
 -                              sd->sensor = SEN_OV7640; /* FIXME */
 +                              sd->sensor = SEN_OV7648;
                                break;
                        default:
                                PDEBUG(D_PROBE, "Unknown sensor: 0x76%x", low);
@@@ -2689,36 -2680,6 +2689,36 @@@ static void ov51x_led_control(struct s
        }
  }
  
 +static void sd_reset_snapshot(struct gspca_dev *gspca_dev)
 +{
 +      struct sd *sd = (struct sd *) gspca_dev;
 +
 +      if (!sd->snapshot_needs_reset)
 +              return;
 +
 +      /* Note it is important that we clear sd->snapshot_needs_reset,
 +         before actually clearing the snapshot state in the bridge
 +         otherwise we might race with the pkt_scan interrupt handler */
 +      sd->snapshot_needs_reset = 0;
 +
 +      switch (sd->bridge) {
 +      case BRIDGE_OV511:
 +      case BRIDGE_OV511PLUS:
 +              reg_w(sd, R51x_SYS_SNAP, 0x02);
 +              reg_w(sd, R51x_SYS_SNAP, 0x00);
 +              break;
 +      case BRIDGE_OV518:
 +      case BRIDGE_OV518PLUS:
 +              reg_w(sd, R51x_SYS_SNAP, 0x02); /* Reset */
 +              reg_w(sd, R51x_SYS_SNAP, 0x01); /* Enable */
 +              break;
 +      case BRIDGE_OV519:
 +              reg_w(sd, R51x_SYS_RESET, 0x40);
 +              reg_w(sd, R51x_SYS_RESET, 0x00);
 +              break;
 +      }
 +}
 +
  static int ov51x_upload_quan_tables(struct sd *sd)
  {
        const unsigned char yQuanTable511[] = {
@@@ -3154,11 -3115,7 +3154,11 @@@ static int sd_config(struct gspca_dev *
                                      (1 << OV7670_FREQ_IDX);
        }
        sd->quality = QUALITY_DEF;
 -      if (sd->sensor == SEN_OV7640 || sd->sensor == SEN_OV7670)
 +      if (sd->sensor == SEN_OV7640 ||
 +          sd->sensor == SEN_OV7648)
 +              gspca_dev->ctrl_dis |= (1 << AUTOBRIGHT_IDX) |
 +                                     (1 << CONTRAST_IDX);
 +      if (sd->sensor == SEN_OV7670)
                gspca_dev->ctrl_dis |= 1 << AUTOBRIGHT_IDX;
        /* OV8610 Frequency filter control should work but needs testing */
        if (sd->sensor == SEN_OV8610)
@@@ -3212,12 -3169,10 +3212,12 @@@ static int sd_init(struct gspca_dev *gs
                        return -EIO;
                break;
        case SEN_OV7620:
 +      case SEN_OV7620AE:
                if (write_i2c_regvals(sd, norm_7620, ARRAY_SIZE(norm_7620)))
                        return -EIO;
                break;
        case SEN_OV7640:
 +      case SEN_OV7648:
                if (write_i2c_regvals(sd, norm_7640, ARRAY_SIZE(norm_7640)))
                        return -EIO;
                break;
@@@ -3291,9 -3246,7 +3291,9 @@@ static int ov511_mode_init_regs(struct 
        /* Note once the FIXME's in mode_init_ov_sensor_regs() are fixed
           for more sensors we need to do this for them too */
        case SEN_OV7620:
 +      case SEN_OV7620AE:
        case SEN_OV7640:
 +      case SEN_OV7648:
        case SEN_OV76BE:
                if (sd->gspca_dev.width == 320)
                        interlaced = 1;
@@@ -3424,7 -3377,7 +3424,7 @@@ static int ov518_mode_init_regs(struct 
  
        if (sd->bridge == BRIDGE_OV518PLUS) {
                switch (sd->sensor) {
 -              case SEN_OV7620:
 +              case SEN_OV7620AE:
                        if (sd->gspca_dev.width == 320) {
                                reg_w(sd, 0x20, 0x00);
                                reg_w(sd, 0x21, 0x19);
                                reg_w(sd, 0x21, 0x1f);
                        }
                        break;
 +              case SEN_OV7620:
 +                      reg_w(sd, 0x20, 0x00);
 +                      reg_w(sd, 0x21, 0x19);
 +                      break;
                default:
                        reg_w(sd, 0x21, 0x19);
                }
@@@ -3539,8 -3488,7 +3539,8 @@@ static int ov519_mode_init_regs(struct 
                if (write_regvals(sd, mode_init_519,
                                  ARRAY_SIZE(mode_init_519)))
                        return -EIO;
 -              if (sd->sensor == SEN_OV7640) {
 +              if (sd->sensor == SEN_OV7640 ||
 +                  sd->sensor == SEN_OV7648) {
                        /* Select 8-bit input mode */
                        reg_w_mask(sd, OV519_R20_DFR, 0x10, 0x10);
                }
        if (sd->sensor == SEN_OV7670 &&
            sd->gspca_dev.cam.cam_mode[sd->gspca_dev.curr_mode].priv)
                reg_w(sd, OV519_R12_X_OFFSETL, 0x04);
 +      else if (sd->sensor == SEN_OV7648 &&
 +          sd->gspca_dev.cam.cam_mode[sd->gspca_dev.curr_mode].priv)
 +              reg_w(sd, OV519_R12_X_OFFSETL, 0x01);
        else
                reg_w(sd, OV519_R12_X_OFFSETL, 0x00);
        reg_w(sd, OV519_R13_X_OFFSETH,  0x00);
        sd->clockdiv = 0;
        switch (sd->sensor) {
        case SEN_OV7640:
 +      case SEN_OV7648:
                switch (sd->frame_rate) {
                default:
  /*            case 30: */
@@@ -3705,7 -3649,6 +3705,7 @@@ static int mode_init_ov_sensor_regs(str
                i2c_w_mask(sd, 0x12, 0x04, 0x06); /* AWB: 1 Test pattern: 0 */
                break;
        case SEN_OV7620:
 +      case SEN_OV7620AE:
        case SEN_OV76BE:
                i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
                i2c_w_mask(sd, 0x28, qvga ? 0x00 : 0x20, 0x20);
                        i2c_w(sd, 0x35, qvga ? 0x1e : 0x9e);
                break;
        case SEN_OV7640:
 +      case SEN_OV7648:
                i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
                i2c_w_mask(sd, 0x28, qvga ? 0x00 : 0x20, 0x20);
 -/*            i2c_w(sd, 0x24, qvga ? 0x20 : 0x3a); */
 -/*            i2c_w(sd, 0x25, qvga ? 0x30 : 0x60); */
 -/*            i2c_w_mask(sd, 0x2d, qvga ? 0x40 : 0x00, 0x40); */
 -/*            i2c_w_mask(sd, 0x67, qvga ? 0xf0 : 0x90, 0xf0); */
 -/*            i2c_w_mask(sd, 0x74, qvga ? 0x20 : 0x00, 0x20); */
 +              /* Setting this undocumented bit in qvga mode removes a very
 +                 annoying vertical shaking of the image */
 +              i2c_w_mask(sd, 0x2d, qvga ? 0x40 : 0x00, 0x40);
 +              /* Unknown */
 +              i2c_w_mask(sd, 0x67, qvga ? 0xf0 : 0x90, 0xf0);
 +              /* Allow higher automatic gain (to allow higher framerates) */
 +              i2c_w_mask(sd, 0x74, qvga ? 0x20 : 0x00, 0x20);
                i2c_w_mask(sd, 0x12, 0x04, 0x04); /* AWB: 1 */
                break;
        case SEN_OV7670:
@@@ -3855,13 -3795,11 +3855,13 @@@ static int set_ov_sensor_window(struct 
                }
                break;
        case SEN_OV7620:
 +      case SEN_OV7620AE:
                hwsbase = 0x2f;         /* From 7620.SET (spec is wrong) */
                hwebase = 0x2f;
                vwsbase = vwebase = 0x05;
                break;
        case SEN_OV7640:
 +      case SEN_OV7648:
                hwsbase = 0x1a;
                hwebase = 0x1a;
                vwsbase = vwebase = 0x03;
@@@ -3955,12 -3893,6 +3955,12 @@@ static int sd_start(struct gspca_dev *g
        setautobrightness(sd);
        setfreq(sd);
  
 +      /* Force clear snapshot state in case the snapshot button was
 +         pressed while we weren't streaming */
 +      sd->snapshot_needs_reset = 1;
 +      sd_reset_snapshot(gspca_dev);
 +      sd->snapshot_pressed = 0;
 +
        ret = ov51x_restart(sd);
        if (ret < 0)
                goto out;
@@@ -3987,34 -3919,6 +3987,34 @@@ static void sd_stop0(struct gspca_dev *
                w9968cf_stop0(sd);
  }
  
 +static void ov51x_handle_button(struct gspca_dev *gspca_dev, u8 state)
 +{
 +      struct sd *sd = (struct sd *) gspca_dev;
 +
 +      if (sd->snapshot_pressed != state) {
 +#ifdef CONFIG_INPUT
 +              input_report_key(gspca_dev->input_dev, KEY_CAMERA, state);
 +              input_sync(gspca_dev->input_dev);
 +#endif
 +              if (state)
 +                      sd->snapshot_needs_reset = 1;
 +
 +              sd->snapshot_pressed = state;
 +      } else {
 +              /* On the ov511 / ov519 we need to reset the button state
 +                 multiple times, as resetting does not work as long as the
 +                 button stays pressed */
 +              switch (sd->bridge) {
 +              case BRIDGE_OV511:
 +              case BRIDGE_OV511PLUS:
 +              case BRIDGE_OV519:
 +                      if (state)
 +                              sd->snapshot_needs_reset = 1;
 +                      break;
 +              }
 +      }
 +}
 +
  static void ov511_pkt_scan(struct gspca_dev *gspca_dev,
                        u8 *in,                 /* isoc packet */
                        int len)                /* iso packet length */
         */
        if (!(in[0] | in[1] | in[2] | in[3] | in[4] | in[5] | in[6] | in[7]) &&
            (in[8] & 0x08)) {
 +              ov51x_handle_button(gspca_dev, (in[8] >> 2) & 1);
                if (in[8] & 0x80) {
                        /* Frame end */
                        if ((in[9] + 1) * 8 != gspca_dev->width ||
@@@ -4074,7 -3977,6 +4074,7 @@@ static void ov518_pkt_scan(struct gspca
        /* A false positive here is likely, until OVT gives me
         * the definitive SOF/EOF format */
        if ((!(data[0] | data[1] | data[2] | data[3] | data[5])) && data[6]) {
 +              ov51x_handle_button(gspca_dev, (data[6] >> 1) & 1);
                gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
                gspca_frame_add(gspca_dev, FIRST_PACKET, NULL, 0);
                sd->packet_nr = 0;
@@@ -4122,9 -4024,6 +4122,9 @@@ static void ov519_pkt_scan(struct gspca
        if (data[0] == 0xff && data[1] == 0xff && data[2] == 0xff) {
                switch (data[3]) {
                case 0x50:              /* start of frame */
 +                      /* Don't check the button state here, as the state
 +                         usually (always ?) changes at EOF and checking it
 +                         here leads to unnecessary snapshot state resets. */
  #define HDRSZ 16
                        data += HDRSZ;
                        len -= HDRSZ;
                                gspca_dev->last_packet_type = DISCARD_PACKET;
                        return;
                case 0x51:              /* end of frame */
 +                      ov51x_handle_button(gspca_dev, data[11] & 1);
                        if (data[9] != 0)
                                gspca_dev->last_packet_type = DISCARD_PACKET;
                        gspca_frame_add(gspca_dev, LAST_PACKET,
@@@ -4205,11 -4103,9 +4205,11 @@@ static void setbrightness(struct gspca_
        case SEN_OV6630:
        case SEN_OV66308AF:
        case SEN_OV7640:
 +      case SEN_OV7648:
                i2c_w(sd, OV7610_REG_BRT, val);
                break;
        case SEN_OV7620:
 +      case SEN_OV7620AE:
                /* 7620 doesn't like manual changes when in auto mode */
                if (!sd->autobrightness)
                        i2c_w(sd, OV7610_REG_BRT, val);
@@@ -4246,8 -4142,7 +4246,8 @@@ static void setcontrast(struct gspca_de
                i2c_w(sd, 0x64, ctab[val >> 5]);
                break;
            }
 -      case SEN_OV7620: {
 +      case SEN_OV7620:
 +      case SEN_OV7620AE: {
                static const __u8 ctab[] = {
                        0x01, 0x05, 0x09, 0x11, 0x15, 0x35, 0x37, 0x57,
                        0x5b, 0xa5, 0xa7, 0xc7, 0xc9, 0xcf, 0xef, 0xff
                i2c_w(sd, 0x64, ctab[val >> 4]);
                break;
            }
 -      case SEN_OV7640:
 -              /* Use gain control instead. */
 -              i2c_w(sd, OV7610_REG_GAIN, val >> 2);
 -              break;
        case SEN_OV7670:
                /* check that this isn't just the same as ov7610 */
                i2c_w(sd, OV7670_REG_CONTRAS, val >> 1);
@@@ -4280,7 -4179,6 +4280,7 @@@ static void setcolors(struct gspca_dev 
                i2c_w(sd, OV7610_REG_SAT, val);
                break;
        case SEN_OV7620:
 +      case SEN_OV7620AE:
                /* Use UV gamma control instead. Bits 0 & 7 are reserved. */
  /*            rc = ov_i2c_write(sd->dev, 0x62, (val >> 9) & 0x7e);
                if (rc < 0)
                i2c_w(sd, OV7610_REG_SAT, val);
                break;
        case SEN_OV7640:
 +      case SEN_OV7648:
                i2c_w(sd, OV7610_REG_SAT, val & 0xf0);
                break;
        case SEN_OV7670:
  
  static void setautobrightness(struct sd *sd)
  {
 -      if (sd->sensor == SEN_OV7640 || sd->sensor == SEN_OV7670 ||
 +      if (sd->sensor == SEN_OV7640 || sd->sensor == SEN_OV7648 ||
 +          sd->sensor == SEN_OV7670 ||
            sd->sensor == SEN_OV2610 || sd->sensor == SEN_OV3610)
                return;
  
@@@ -4579,13 -4475,9 +4579,13 @@@ static const struct sd_desc sd_desc = 
        .stopN = sd_stopN,
        .stop0 = sd_stop0,
        .pkt_scan = sd_pkt_scan,
 +      .dq_callback = sd_reset_snapshot,
        .querymenu = sd_querymenu,
        .get_jcomp = sd_get_jcomp,
        .set_jcomp = sd_set_jcomp,
 +#ifdef CONFIG_INPUT
 +      .other_input = 1,
 +#endif
  };
  
  /* -- module initialisation -- */
@@@ -4602,8 -4494,7 +4602,8 @@@ static const __devinitdata struct usb_d
         .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
        {USB_DEVICE(0x045e, 0x028c), .driver_info = BRIDGE_OV519 },
        {USB_DEVICE(0x054c, 0x0154), .driver_info = BRIDGE_OV519 },
 -      {USB_DEVICE(0x054c, 0x0155), .driver_info = BRIDGE_OV519 },
 +      {USB_DEVICE(0x054c, 0x0155),
 +       .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
        {USB_DEVICE(0x05a9, 0x0511), .driver_info = BRIDGE_OV511 },
        {USB_DEVICE(0x05a9, 0x0518), .driver_info = BRIDGE_OV518 },
        {USB_DEVICE(0x05a9, 0x0519), .driver_info = BRIDGE_OV519 },
diff --combined drivers/mfd/sm501.c
index dc9ea95c0561bde7fed4f46e6d9f6ec140e0dc85,10491e4e305d899272536ed193075c3096409da2..ff0718efb0aeed6719ab26cd6df2391db5552613
@@@ -523,7 -523,7 +523,7 @@@ unsigned long sm501_set_clock(struct de
        unsigned long clock = readl(sm->regs + SM501_CURRENT_CLOCK);
        unsigned char reg;
        unsigned int pll_reg = 0;
-       unsigned long sm501_freq; /* the actual frequency acheived */
+       unsigned long sm501_freq; /* the actual frequency achieved */
  
        struct sm501_clock to;
  
  
        switch (clksrc) {
        case SM501_CLOCK_P2XCLK:
-               /* This clock is divided in half so to achive the
+               /* This clock is divided in half so to achieve the
                 * requested frequency the value must be multiplied by
                 * 2. This clock also has an additional pre divisor */
  
                break;
  
        case SM501_CLOCK_V2XCLK:
-               /* This clock is divided in half so to achive the
+               /* This clock is divided in half so to achieve the
                 * requested frequency the value must be multiplied by 2. */
  
                sm501_freq = (sm501_select_clock(2 * req_freq, &to, 3) / 2);
@@@ -648,7 -648,7 +648,7 @@@ unsigned long sm501_find_clock(struct d
                               unsigned long req_freq)
  {
        struct sm501_devdata *sm = dev_get_drvdata(dev);
-       unsigned long sm501_freq; /* the frequency achiveable by the 501 */
+       unsigned long sm501_freq; /* the frequency achieveable by the 501 */
        struct sm501_clock to;
  
        switch (clksrc) {
@@@ -1430,7 -1430,7 +1430,7 @@@ static int __devinit sm501_plat_probe(s
        }
  
        sm->regs_claim = request_mem_region(sm->io_res->start,
 -                                          0x100, "sm501");
 +                                          resource_size(sm->io_res), "sm501");
  
        if (sm->regs_claim == NULL) {
                dev_err(&dev->dev, "cannot claim registers\n");
  
        platform_set_drvdata(dev, sm);
  
 -      sm->regs = ioremap(sm->io_res->start,
 -                         (sm->io_res->end - sm->io_res->start) - 1);
 +      sm->regs = ioremap(sm->io_res->start, resource_size(sm->io_res));
  
        if (sm->regs == NULL) {
                dev_err(&dev->dev, "cannot remap registers\n");
@@@ -1644,7 -1645,7 +1644,7 @@@ static int __devinit sm501_pci_probe(st
        sm->mem_res = &dev->resource[0];
  
        sm->regs_claim = request_mem_region(sm->io_res->start,
 -                                          0x100, "sm501");
 +                                          resource_size(sm->io_res), "sm501");
        if (sm->regs_claim == NULL) {
                dev_err(&dev->dev, "cannot claim registers\n");
                err= -EBUSY;
index 3fab78ba895296e6ab704e02edb2d03ca5a2dfdd,a4e37758be4141b5e9a9cd811ec638c405bebc87..723e50894db9c769a08235aa99cca0ce630f1e57
@@@ -37,7 -37,6 +37,7 @@@
  #include <linux/gfp.h>
  #include <linux/tty.h>
  #include <linux/tty_flip.h>
 +#include <linux/kfifo.h>
  
  #include <linux/mmc/core.h>
  #include <linux/mmc/card.h>
  #define UART_NR               8       /* Number of UARTs this driver can handle */
  
  
 -#define UART_XMIT_SIZE        PAGE_SIZE
 +#define FIFO_SIZE     PAGE_SIZE
  #define WAKEUP_CHARS  256
  
 -#define circ_empty(circ)      ((circ)->head == (circ)->tail)
 -#define circ_clear(circ)      ((circ)->head = (circ)->tail = 0)
 -
 -#define circ_chars_pending(circ) \
 -              (CIRC_CNT((circ)->head, (circ)->tail, UART_XMIT_SIZE))
 -
 -#define circ_chars_free(circ) \
 -              (CIRC_SPACE((circ)->head, (circ)->tail, UART_XMIT_SIZE))
 -
 -
  struct uart_icount {
        __u32   cts;
        __u32   dsr;
@@@ -73,7 -82,7 +73,7 @@@ struct sdio_uart_port 
        struct mutex            func_lock;
        struct task_struct      *in_sdio_uart_irq;
        unsigned int            regs_offset;
 -      struct circ_buf         xmit;
 +      struct kfifo            xmit_fifo;
        spinlock_t              write_lock;
        struct uart_icount      icount;
        unsigned int            uartclk;
@@@ -96,8 -105,6 +96,8 @@@ static int sdio_uart_add_port(struct sd
        kref_init(&port->kref);
        mutex_init(&port->func_lock);
        spin_lock_init(&port->write_lock);
 +      if (kfifo_alloc(&port->xmit_fifo, FIFO_SIZE, GFP_KERNEL))
 +              return -ENOMEM;
  
        spin_lock(&sdio_uart_table_lock);
        for (index = 0; index < UART_NR; index++) {
@@@ -133,7 -140,6 +133,7 @@@ static void sdio_uart_port_destroy(stru
  {
        struct sdio_uart_port *port =
                container_of(kref, struct sdio_uart_port, kref);
 +      kfifo_free(&port->xmit_fifo);
        kfree(port);
  }
  
@@@ -450,11 -456,9 +450,11 @@@ static void sdio_uart_receive_chars(str
  
  static void sdio_uart_transmit_chars(struct sdio_uart_port *port)
  {
 -      struct circ_buf *xmit = &port->xmit;
 +      struct kfifo *xmit = &port->xmit_fifo;
        int count;
        struct tty_struct *tty;
 +      u8 iobuf[16];
 +      int len;
  
        if (port->x_char) {
                sdio_out(port, UART_TX, port->x_char);
  
        tty = tty_port_tty_get(&port->port);
  
 -      if (tty == NULL || circ_empty(xmit) ||
 +      if (tty == NULL || !kfifo_len(xmit) ||
                                tty->stopped || tty->hw_stopped) {
                sdio_uart_stop_tx(port);
                tty_kref_put(tty);
                return;
        }
  
 -      count = 16;
 -      do {
 -              sdio_out(port, UART_TX, xmit->buf[xmit->tail]);
 -              xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
 +      len = kfifo_out_locked(xmit, iobuf, 16, &port->write_lock);
 +      for (count = 0; count < len; count++) {
 +              sdio_out(port, UART_TX, iobuf[count]);
                port->icount.tx++;
 -              if (circ_empty(xmit))
 -                      break;
 -      } while (--count > 0);
 +      }
  
 -      if (circ_chars_pending(xmit) < WAKEUP_CHARS)
 +      len = kfifo_len(xmit);
 +      if (len < WAKEUP_CHARS) {
                tty_wakeup(tty);
 -
 -      if (circ_empty(xmit))
 -              sdio_uart_stop_tx(port);
 +              if (len == 0)
 +                      sdio_uart_stop_tx(port);
 +      }
        tty_kref_put(tty);
  }
  
@@@ -575,7 -581,7 +575,7 @@@ static int uart_carrier_raised(struct t
        struct sdio_uart_port *port =
                        container_of(tport, struct sdio_uart_port, port);
        unsigned int ret = sdio_uart_claim_func(port);
-       if (ret)        /* Missing hardware shoudn't block for carrier */
+       if (ret)        /* Missing hardware shouldn't block for carrier */
                return 1;
        ret = sdio_uart_get_mctrl(port);
        sdio_uart_release_func(port);
@@@ -626,6 -632,7 +626,6 @@@ static int sdio_uart_activate(struct tt
  {
        struct sdio_uart_port *port =
                        container_of(tport, struct sdio_uart_port, port);
 -      unsigned long page;
        int ret;
  
        /*
         */
        set_bit(TTY_IO_ERROR, &tty->flags);
  
 -      /* Initialise and allocate the transmit buffer. */
 -      page = __get_free_page(GFP_KERNEL);
 -      if (!page)
 -              return -ENOMEM;
 -      port->xmit.buf = (unsigned char *)page;
 -      circ_clear(&port->xmit);
 +      kfifo_reset(&port->xmit_fifo);
  
        ret = sdio_uart_claim_func(port);
        if (ret)
 -              goto err1;
 +              return ret;
        ret = sdio_enable_func(port->func);
        if (ret)
 -              goto err2;
 +              goto err1;
        ret = sdio_claim_irq(port->func, sdio_uart_irq);
        if (ret)
 -              goto err3;
 +              goto err2;
  
        /*
         * Clear the FIFO buffers and disable them.
        sdio_uart_release_func(port);
        return 0;
  
 -err3:
 -      sdio_disable_func(port->func);
  err2:
 -      sdio_uart_release_func(port);
 +      sdio_disable_func(port->func);
  err1:
 -      free_page((unsigned long)port->xmit.buf);
 +      sdio_uart_release_func(port);
        return ret;
  }
  
@@@ -713,7 -727,7 +713,7 @@@ static void sdio_uart_shutdown(struct t
  
        ret = sdio_uart_claim_func(port);
        if (ret)
 -              goto skip;
 +              return;
  
        sdio_uart_stop_rx(port);
  
        sdio_disable_func(port->func);
  
        sdio_uart_release_func(port);
 -
 -skip:
 -      /* Free the transmit buffer page. */
 -      free_page((unsigned long)port->xmit.buf);
  }
  
  /**
@@@ -804,12 -822,27 +804,12 @@@ static int sdio_uart_write(struct tty_s
                           int count)
  {
        struct sdio_uart_port *port = tty->driver_data;
 -      struct circ_buf *circ = &port->xmit;
 -      int c, ret = 0;
 +      int ret;
  
        if (!port->func)
                return -ENODEV;
  
 -      spin_lock(&port->write_lock);
 -      while (1) {
 -              c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);
 -              if (count < c)
 -                      c = count;
 -              if (c <= 0)
 -                      break;
 -              memcpy(circ->buf + circ->head, buf, c);
 -              circ->head = (circ->head + c) & (UART_XMIT_SIZE - 1);
 -              buf += c;
 -              count -= c;
 -              ret += c;
 -      }
 -      spin_unlock(&port->write_lock);
 -
 +      ret = kfifo_in_locked(&port->xmit_fifo, buf, count, &port->write_lock);
        if (!(port->ier & UART_IER_THRI)) {
                int err = sdio_uart_claim_func(port);
                if (!err) {
  static int sdio_uart_write_room(struct tty_struct *tty)
  {
        struct sdio_uart_port *port = tty->driver_data;
 -      return port ? circ_chars_free(&port->xmit) : 0;
 +      return FIFO_SIZE - kfifo_len(&port->xmit_fifo);
  }
  
  static int sdio_uart_chars_in_buffer(struct tty_struct *tty)
  {
        struct sdio_uart_port *port = tty->driver_data;
 -      return port ? circ_chars_pending(&port->xmit) : 0;
 +      return kfifo_len(&port->xmit_fifo);
  }
  
  static void sdio_uart_send_xchar(struct tty_struct *tty, char ch)
index 71384114a4ed7c266b71047b3f1f3bc7d028ffc3,bef02330464d7d01f32bc7c4fa3ba416eec72d13..55d99ca82f8ad5a67785aed9a1ecf684ba404a09
@@@ -248,7 -248,7 +248,7 @@@ static void restart_sched(unsigned long
   *
   * Interrupts are handled by a single CPU and it is likely that on a MP system
   * the application is migrated to another CPU. In that scenario, we try to
-  * seperate the RX(in irq context) and TX state in order to decrease memory
+  * separate the RX(in irq context) and TX state in order to decrease memory
   * contention.
   */
  struct sge {
        struct sk_buff  *espibug_skb[MAX_NPORTS];
        u32             sge_control;    /* shadow value of sge control reg */
        struct sge_intr_counts stats;
 -      struct sge_port_stats *port_stats[MAX_NPORTS];
 +      struct sge_port_stats __percpu *port_stats[MAX_NPORTS];
        struct sched    *tx_sched;
        struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
  };
@@@ -953,7 -953,7 +953,7 @@@ int t1_sge_intr_error_handler(struct sg
                sge->stats.respQ_empty++;
        if (cause & F_RESPQ_OVERFLOW) {
                sge->stats.respQ_overflow++;
 -              CH_ALERT("%s: SGE response queue overflow\n",
 +              pr_alert("%s: SGE response queue overflow\n",
                         adapter->name);
        }
        if (cause & F_FL_EXHAUSTED) {
        }
        if (cause & F_PACKET_TOO_BIG) {
                sge->stats.pkt_too_big++;
 -              CH_ALERT("%s: SGE max packet size exceeded\n",
 +              pr_alert("%s: SGE max packet size exceeded\n",
                         adapter->name);
        }
        if (cause & F_PACKET_MISMATCH) {
                sge->stats.pkt_mismatch++;
 -              CH_ALERT("%s: SGE packet mismatch\n", adapter->name);
 +              pr_alert("%s: SGE packet mismatch\n", adapter->name);
        }
        if (cause & SGE_INT_FATAL)
                t1_fatal_err(adapter);
@@@ -1101,7 -1101,7 +1101,7 @@@ static void unexpected_offload(struct a
  
        pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr),
                            pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
 -      CH_ERR("%s: unexpected offload packet, cmd %u\n",
 +      pr_err("%s: unexpected offload packet, cmd %u\n",
               adapter->name, *skb->data);
        recycle_fl_buf(fl, fl->cidx);
  }
@@@ -1687,7 -1687,7 +1687,7 @@@ static int t1_sge_tx(struct sk_buff *sk
                        netif_stop_queue(dev);
                        set_bit(dev->if_port, &sge->stopped_tx_queues);
                        sge->stats.cmdQ_full[2]++;
 -                      CH_ERR("%s: Tx ring full while queue awake!\n",
 +                      pr_err("%s: Tx ring full while queue awake!\n",
                               adapter->name);
                }
                spin_unlock(&q->lock);
diff --combined drivers/net/cs89x0.c
index 14624019ce71b7009ef69d262aa1f385851e3c41,9b5bbc6ea2fa8180b2bc4932631b385323f09a01..b0208e474f7eabaebebc646491e76747b378ba4b
@@@ -580,7 -580,7 +580,7 @@@ cs89x0_probe1(struct net_device *dev, i
        }
  
  #ifdef CONFIG_SH_HICOSH4
-       /* truely reset the chip */
+       /* truly reset the chip */
        writeword(ioaddr, ADD_PORT, 0x0114);
        writeword(ioaddr, DATA_PORT, 0x0040);
  #endif
@@@ -1785,7 -1785,7 +1785,7 @@@ static void set_multicast_list(struct n
        {
                lp->rx_mode = RX_ALL_ACCEPT;
        }
 -      else if((dev->flags&IFF_ALLMULTI)||dev->mc_list)
 +      else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
        {
                /* The multicast-accept list is initialized to accept-all, and we
                   rely on higher-level filtering for now. */
diff --combined drivers/net/cxgb3/sge.c
index 78e265b484b64c20baf6d3ab951b3db176415d76,5dbc125822b9aaf22825a6d0a67636a006756e4e..67e61b2a8c42332b2b2d678d560111fb134a203c
@@@ -42,7 -42,6 +42,7 @@@
  #include "sge_defs.h"
  #include "t3_cpl.h"
  #include "firmware_exports.h"
 +#include "cxgb3_offload.h"
  
  #define USE_GTS 0
  
@@@ -197,13 -196,13 +197,13 @@@ static inline void refill_rspq(struct a
  /**
   *    need_skb_unmap - does the platform need unmapping of sk_buffs?
   *
-  *    Returns true if the platfrom needs sk_buff unmapping.  The compiler
+  *    Returns true if the platform needs sk_buff unmapping.  The compiler
   *    optimizes away unecessary code if this returns true.
   */
  static inline int need_skb_unmap(void)
  {
        /*
-        * This structure is used to tell if the platfrom needs buffer
+        * This structure is used to tell if the platform needs buffer
         * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
         */
        struct dummy {
@@@ -481,7 -480,6 +481,7 @@@ static inline void ring_fl_db(struct ad
  {
        if (q->pend_cred >= q->credits / 4) {
                q->pend_cred = 0;
 +              wmb();
                t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
        }
  }
@@@ -2081,7 -2079,6 +2081,7 @@@ static void lro_add_page(struct adapte
                         struct sge_fl *fl, int len, int complete)
  {
        struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
 +      struct port_info *pi = netdev_priv(qs->netdev);
        struct sk_buff *skb = NULL;
        struct cpl_rx_pkt *cpl;
        struct skb_frag_struct *rx_frag;
  
        if (!nr_frags) {
                offset = 2 + sizeof(struct cpl_rx_pkt);
 -              qs->lro_va = sd->pg_chunk.va + 2;
 -      }
 -      len -= offset;
 +              cpl = qs->lro_va = sd->pg_chunk.va + 2;
  
 -      prefetch(qs->lro_va);
 +              if ((pi->rx_offload & T3_RX_CSUM) &&
 +                   cpl->csum_valid && cpl->csum == htons(0xffff)) {
 +                      skb->ip_summed = CHECKSUM_UNNECESSARY;
 +                      qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
 +              } else
 +                      skb->ip_summed = CHECKSUM_NONE;
 +      } else
 +              cpl = qs->lro_va;
 +
 +      len -= offset;
  
        rx_frag += nr_frags;
        rx_frag->page = sd->pg_chunk.page;
                return;
  
        skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
 -      skb->ip_summed = CHECKSUM_UNNECESSARY;
 -      cpl = qs->lro_va;
  
        if (unlikely(cpl->vlan_valid)) {
 -              struct net_device *dev = qs->netdev;
 -              struct port_info *pi = netdev_priv(dev);
                struct vlan_group *grp = pi->vlan_grp;
  
                if (likely(grp != NULL)) {
@@@ -2288,14 -2282,11 +2288,14 @@@ static int process_responses(struct ada
        while (likely(budget_left && is_new_response(r, q))) {
                int packet_complete, eth, ethpad = 2, lro = qs->lro_enabled;
                struct sk_buff *skb = NULL;
 -              u32 len, flags = ntohl(r->flags);
 -              __be32 rss_hi = *(const __be32 *)r,
 -                     rss_lo = r->rss_hdr.rss_hash_val;
 +              u32 len, flags;
 +              __be32 rss_hi, rss_lo;
  
 +              rmb();
                eth = r->rss_hdr.opcode == CPL_RX_PKT;
 +              rss_hi = *(const __be32 *)r;
 +              rss_lo = r->rss_hdr.rss_hash_val;
 +              flags = ntohl(r->flags);
  
                if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
                        skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
@@@ -2506,10 -2497,7 +2506,10 @@@ static int process_pure_responses(struc
                        refill_rspq(adap, q, q->credits);
                        q->credits = 0;
                }
 -      } while (is_new_response(r, q) && is_pure_response(r));
 +              if (!is_new_response(r, q))
 +                      break;
 +              rmb();
 +      } while (is_pure_response(r));
  
        if (sleeping)
                check_ring_db(adap, qs, sleeping);
@@@ -2543,7 -2531,6 +2543,7 @@@ static inline int handle_responses(stru
  
        if (!is_new_response(r, q))
                return -1;
 +      rmb();
        if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
                t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
                             V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
@@@ -2842,13 -2829,8 +2842,13 @@@ void t3_sge_err_intr_handler(struct ada
        }
  
        if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
 -              CH_ALERT(adapter, "SGE dropped %s priority doorbell\n",
 -                       status & F_HIPIODRBDROPERR ? "high" : "lo");
 +              queue_work(cxgb3_wq, &adapter->db_drop_task);
 +
 +      if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL))
 +              queue_work(cxgb3_wq, &adapter->db_full_task);
 +
 +      if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY))
 +              queue_work(cxgb3_wq, &adapter->db_empty_task);
  
        t3_write_reg(adapter, A_SG_INT_CAUSE, status);
        if (status &  SGE_FATALERR)
index 1ac9440eb3fb2daa6bf1c035d36dc37fb459279c,ea49d6ec4cbac630abe30b33f1d7b3396d4cc64f..13f9869927e3a68deee2e9b35f4b3b360077551c
  #include <linux/bitops.h>
  #include <linux/io.h>
  #include <linux/uaccess.h>
 +#include <linux/davinci_emac.h>
  
  #include <asm/irq.h>
  #include <asm/page.h>
  
 -#include <mach/emac.h>
 -
  static int debug_level;
  module_param(debug_level, int, 0);
  MODULE_PARM_DESC(debug_level, "DaVinci EMAC debug level (NETIF_MSG bits)");
@@@ -464,7 -465,6 +464,7 @@@ struct emac_priv 
        void __iomem *ctrl_base;
        void __iomem *emac_ctrl_ram;
        u32 ctrl_ram_size;
 +      u32 hw_ram_addr;
        struct emac_txch *txch[EMAC_DEF_MAX_TX_CH];
        struct emac_rxch *rxch[EMAC_DEF_MAX_RX_CH];
        u32 link; /* 1=link on, 0=link off */
        struct mii_bus *mii_bus;
        struct phy_device *phydev;
        spinlock_t lock;
 +      /*platform specific members*/
 +      void (*int_enable) (void);
 +      void (*int_disable) (void);
  };
  
  /* clock frequency for EMAC */
@@@ -498,9 -495,11 +498,9 @@@ static struct clk *emac_clk
  static unsigned long emac_bus_frequency;
  static unsigned long mdio_max_freq;
  
 -/* EMAC internal utility function */
 -static inline u32 emac_virt_to_phys(void __iomem *addr)
 -{
 -      return (u32 __force) io_v2p(addr);
 -}
 +#define emac_virt_to_phys(addr, priv) \
 +      (((u32 __force)(addr) - (u32 __force)(priv->emac_ctrl_ram)) \
 +      + priv->hw_ram_addr)
  
  /* Cache macros - Packet buffers would be from skb pool which is cached */
  #define EMAC_VIRT_NOCACHE(addr) (addr)
@@@ -957,18 -956,19 +957,18 @@@ static void emac_dev_mcast_set(struct n
        } else {
                mbp_enable = (mbp_enable & ~EMAC_MBP_RXPROMISC);
                if ((ndev->flags & IFF_ALLMULTI) ||
 -                  (ndev->mc_count > EMAC_DEF_MAX_MULTICAST_ADDRESSES)) {
 +                  netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) {
                        mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
                        emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL);
                }
 -              if (ndev->mc_count > 0) {
 +              if (!netdev_mc_empty(ndev)) {
                        struct dev_mc_list *mc_ptr;
                        mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
                        emac_add_mcast(priv, EMAC_ALL_MULTI_CLR, NULL);
                        /* program multicast address list into EMAC hardware */
 -                      for (mc_ptr = ndev->mc_list; mc_ptr;
 -                           mc_ptr = mc_ptr->next) {
 +                      netdev_for_each_mc_addr(mc_ptr, ndev) {
                                emac_add_mcast(priv, EMAC_MULTICAST_ADD,
 -                                             (u8 *)mc_ptr->dmi_addr);
 +                                             (u8 *) mc_ptr->dmi_addr);
                        }
                } else {
                        mbp_enable = (mbp_enable & ~EMAC_MBP_RXMCAST);
@@@ -1002,8 -1002,6 +1002,8 @@@ static void emac_int_disable(struct ema
                emac_ctrl_write(EMAC_DM646X_CMRXINTEN, 0x0);
                emac_ctrl_write(EMAC_DM646X_CMTXINTEN, 0x0);
                /* NOTE: Rx Threshold and Misc interrupts are not disabled */
 +              if (priv->int_disable)
 +                      priv->int_disable();
  
                local_irq_restore(flags);
  
  static void emac_int_enable(struct emac_priv *priv)
  {
        if (priv->version == EMAC_VERSION_2) {
 +              if (priv->int_enable)
 +                      priv->int_enable();
 +
                emac_ctrl_write(EMAC_DM646X_CMRXINTEN, 0xff);
                emac_ctrl_write(EMAC_DM646X_CMTXINTEN, 0xff);
  
@@@ -1307,7 -1302,7 +1307,7 @@@ static int emac_tx_bdproc(struct emac_p
        curr_bd = txch->active_queue_head;
        if (NULL == curr_bd) {
                emac_write(EMAC_TXCP(ch),
 -                         emac_virt_to_phys(txch->last_hw_bdprocessed));
 +                         emac_virt_to_phys(txch->last_hw_bdprocessed, priv));
                txch->no_active_pkts++;
                spin_unlock_irqrestore(&priv->tx_lock, flags);
                return 0;
        while ((curr_bd) &&
              ((frame_status & EMAC_CPPI_OWNERSHIP_BIT) == 0) &&
              (pkts_processed < budget)) {
 -              emac_write(EMAC_TXCP(ch), emac_virt_to_phys(curr_bd));
 +              emac_write(EMAC_TXCP(ch), emac_virt_to_phys(curr_bd, priv));
                txch->active_queue_head = curr_bd->next;
                if (frame_status & EMAC_CPPI_EOQ_BIT) {
                        if (curr_bd->next) {    /* misqueued packet */
@@@ -1404,7 -1399,7 +1404,7 @@@ static int emac_send(struct emac_priv *
                txch->active_queue_tail = curr_bd;
                if (1 != txch->queue_active) {
                        emac_write(EMAC_TXHDP(ch),
 -                                      emac_virt_to_phys(curr_bd));
 +                                      emac_virt_to_phys(curr_bd, priv));
                        txch->queue_active = 1;
                }
                ++txch->queue_reinit;
                tail_bd->next = curr_bd;
                txch->active_queue_tail = curr_bd;
                tail_bd = EMAC_VIRT_NOCACHE(tail_bd);
 -              tail_bd->h_next = (int)emac_virt_to_phys(curr_bd);
 +              tail_bd->h_next = (int)emac_virt_to_phys(curr_bd, priv);
                frame_status = tail_bd->mode;
                if (frame_status & EMAC_CPPI_EOQ_BIT) {
 -                      emac_write(EMAC_TXHDP(ch), emac_virt_to_phys(curr_bd));
 +                      emac_write(EMAC_TXHDP(ch),
 +                              emac_virt_to_phys(curr_bd, priv));
                        frame_status &= ~(EMAC_CPPI_EOQ_BIT);
                        tail_bd->mode = frame_status;
                        ++txch->end_of_queue_add;
@@@ -1610,8 -1604,7 +1610,8 @@@ static int emac_init_rxch(struct emac_p
                }
  
                /* populate the hardware descriptor */
 -              curr_bd->h_next = emac_virt_to_phys(rxch->active_queue_head);
 +              curr_bd->h_next = emac_virt_to_phys(rxch->active_queue_head,
 +                              priv);
                /* FIXME buff_ptr = dma_map_single(... data_ptr ...) */
                curr_bd->buff_ptr = virt_to_phys(curr_bd->data_ptr);
                curr_bd->off_b_len = rxch->buf_size;
@@@ -1886,7 -1879,7 +1886,7 @@@ static void emac_addbd_to_rx_queue(stru
                rxch->active_queue_tail = curr_bd;
                if (0 != rxch->queue_active) {
                        emac_write(EMAC_RXHDP(ch),
 -                                 emac_virt_to_phys(rxch->active_queue_head));
 +                         emac_virt_to_phys(rxch->active_queue_head, priv));
                        rxch->queue_active = 1;
                }
        } else {
                rxch->active_queue_tail = curr_bd;
                tail_bd->next = curr_bd;
                tail_bd = EMAC_VIRT_NOCACHE(tail_bd);
 -              tail_bd->h_next = emac_virt_to_phys(curr_bd);
 +              tail_bd->h_next = emac_virt_to_phys(curr_bd, priv);
                frame_status = tail_bd->mode;
                if (frame_status & EMAC_CPPI_EOQ_BIT) {
                        emac_write(EMAC_RXHDP(ch),
 -                                      emac_virt_to_phys(curr_bd));
 +                                      emac_virt_to_phys(curr_bd, priv));
                        frame_status &= ~(EMAC_CPPI_EOQ_BIT);
                        tail_bd->mode = frame_status;
                        ++rxch->end_of_queue_add;
@@@ -1994,7 -1987,7 +1994,7 @@@ static int emac_rx_bdproc(struct emac_p
                curr_pkt->num_bufs = 1;
                curr_pkt->pkt_length =
                        (frame_status & EMAC_RX_BD_PKT_LENGTH_MASK);
 -              emac_write(EMAC_RXCP(ch), emac_virt_to_phys(curr_bd));
 +              emac_write(EMAC_RXCP(ch), emac_virt_to_phys(curr_bd, priv));
                ++rxch->processed_bd;
                last_bd = curr_bd;
                curr_bd = last_bd->next;
                        if (curr_bd) {
                                ++rxch->mis_queued_packets;
                                emac_write(EMAC_RXHDP(ch),
 -                                         emac_virt_to_phys(curr_bd));
 +                                         emac_virt_to_phys(curr_bd, priv));
                        } else {
                                ++rxch->end_of_queue;
                                rxch->queue_active = 0;
@@@ -2106,7 -2099,7 +2106,7 @@@ static int emac_hw_enable(struct emac_p
                emac_write(EMAC_RXINTMASKSET, BIT(ch));
                rxch->queue_active = 1;
                emac_write(EMAC_RXHDP(ch),
 -                         emac_virt_to_phys(rxch->active_queue_head));
 +                         emac_virt_to_phys(rxch->active_queue_head, priv));
        }
  
        /* Enable MII */
@@@ -2658,7 -2651,7 +2658,7 @@@ static int __devinit davinci_emac_probe
  
        pdata = pdev->dev.platform_data;
        if (!pdata) {
-               printk(KERN_ERR "DaVinci EMAC: No platfrom data\n");
+               printk(KERN_ERR "DaVinci EMAC: No platform data\n");
                return -ENODEV;
        }
  
        priv->phy_mask = pdata->phy_mask;
        priv->rmii_en = pdata->rmii_en;
        priv->version = pdata->version;
 +      priv->int_enable = pdata->interrupt_enable;
 +      priv->int_disable = pdata->interrupt_disable;
 +
        emac_dev = &ndev->dev;
        /* Get EMAC platform data */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
        size = res->end - res->start + 1;
        if (!request_mem_region(res->start, size, ndev->name)) {
 -              dev_err(emac_dev, "DaVinci EMAC: failed request_mem_region() \
 -                                       for regs\n");
 +              dev_err(emac_dev, "DaVinci EMAC: failed request_mem_region() for regs\n");
                rc = -ENXIO;
                goto probe_quit;
        }
        priv->ctrl_ram_size = pdata->ctrl_ram_size;
        priv->emac_ctrl_ram = priv->remap_addr + pdata->ctrl_ram_offset;
  
 +      if (pdata->hw_ram_addr)
 +              priv->hw_ram_addr = pdata->hw_ram_addr;
 +      else
 +              priv->hw_ram_addr = (u32 __force)res->start +
 +                                      pdata->ctrl_ram_offset;
 +
        res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
        if (!res) {
                dev_err(emac_dev, "DaVinci EMAC: Error getting irq res\n");
index 3c95acb3a87d0eb641ec7146111bd5a88a516ea9,dc4eb87309c56a7326abb43a8fef727e55a6446b..712ccc66ba25b5f1a6b1059a2b7e537a91aad7db
@@@ -267,14 -267,8 +267,14 @@@ static s32 e1000_init_mac_params_82571(
        }
  
        switch (hw->mac.type) {
 +      case e1000_82573:
 +              func->set_lan_id = e1000_set_lan_id_single_port;
 +              func->check_mng_mode = e1000e_check_mng_mode_generic;
 +              func->led_on = e1000e_led_on_generic;
 +              break;
        case e1000_82574:
        case e1000_82583:
 +              func->set_lan_id = e1000_set_lan_id_single_port;
                func->check_mng_mode = e1000_check_mng_mode_82574;
                func->led_on = e1000_led_on_82574;
                break;
@@@ -928,12 -922,9 +928,12 @@@ static s32 e1000_reset_hw_82571(struct 
        ew32(IMC, 0xffffffff);
        icr = er32(ICR);
  
 -      if (hw->mac.type == e1000_82571 &&
 -              hw->dev_spec.e82571.alt_mac_addr_is_present)
 -                      e1000e_set_laa_state_82571(hw, true);
 +      /* Install any alternate MAC address into RAR0 */
 +      ret_val = e1000_check_alt_mac_addr_generic(hw);
 +      if (ret_val)
 +              return ret_val;
 +
 +      e1000e_set_laa_state_82571(hw, true);
  
        /* Reinitialize the 82571 serdes link state machine */
        if (hw->phy.media_type == e1000_media_type_internal_serdes)
@@@ -1233,6 -1224,32 +1233,6 @@@ static s32 e1000_led_on_82574(struct e1
        return 0;
  }
  
 -/**
 - *  e1000_update_mc_addr_list_82571 - Update Multicast addresses
 - *  @hw: pointer to the HW structure
 - *  @mc_addr_list: array of multicast addresses to program
 - *  @mc_addr_count: number of multicast addresses to program
 - *  @rar_used_count: the first RAR register free to program
 - *  @rar_count: total number of supported Receive Address Registers
 - *
 - *  Updates the Receive Address Registers and Multicast Table Array.
 - *  The caller must have a packed mc_addr_list of multicast addresses.
 - *  The parameter rar_count will usually be hw->mac.rar_entry_count
 - *  unless there are workarounds that change this.
 - **/
 -static void e1000_update_mc_addr_list_82571(struct e1000_hw *hw,
 -                                          u8 *mc_addr_list,
 -                                          u32 mc_addr_count,
 -                                          u32 rar_used_count,
 -                                          u32 rar_count)
 -{
 -      if (e1000e_get_laa_state_82571(hw))
 -              rar_count--;
 -
 -      e1000e_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count,
 -                                         rar_used_count, rar_count);
 -}
 -
  /**
   *  e1000_setup_link_82571 - Setup flow control and link settings
   *  @hw: pointer to the HW structure
@@@ -1346,7 -1363,7 +1346,7 @@@ static s32 e1000_setup_fiber_serdes_lin
   *
   *  1) down
   *  2) autoneg_progress
-  *  3) autoneg_complete (the link sucessfully autonegotiated)
+  *  3) autoneg_complete (the link successfully autonegotiated)
   *  4) forced_up (the link has been forced up, it did not autonegotiate)
   *
   **/
@@@ -1603,29 -1620,6 +1603,29 @@@ static s32 e1000_fix_nvm_checksum_82571
        return 0;
  }
  
 +/**
 + *  e1000_read_mac_addr_82571 - Read device MAC address
 + *  @hw: pointer to the HW structure
 + **/
 +static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
 +{
 +      s32 ret_val = 0;
 +
 +      /*
 +       * If there's an alternate MAC address place it in RAR0
 +       * so that it will override the Si installed default perm
 +       * address.
 +       */
 +      ret_val = e1000_check_alt_mac_addr_generic(hw);
 +      if (ret_val)
 +              goto out;
 +
 +      ret_val = e1000_read_mac_addr_generic(hw);
 +
 +out:
 +      return ret_val;
 +}
 +
  /**
   * e1000_power_down_phy_copper_82571 - Remove link during PHY power down
   * @hw: pointer to the HW structure
@@@ -1701,11 -1695,10 +1701,11 @@@ static struct e1000_mac_operations e825
        .cleanup_led            = e1000e_cleanup_led_generic,
        .clear_hw_cntrs         = e1000_clear_hw_cntrs_82571,
        .get_bus_info           = e1000e_get_bus_info_pcie,
 +      .set_lan_id             = e1000_set_lan_id_multi_port_pcie,
        /* .get_link_up_info: media type dependent */
        /* .led_on: mac type dependent */
        .led_off                = e1000e_led_off_generic,
 -      .update_mc_addr_list    = e1000_update_mc_addr_list_82571,
 +      .update_mc_addr_list    = e1000e_update_mc_addr_list_generic,
        .write_vfta             = e1000_write_vfta_generic,
        .clear_vfta             = e1000_clear_vfta_82571,
        .reset_hw               = e1000_reset_hw_82571,
        .setup_link             = e1000_setup_link_82571,
        /* .setup_physical_interface: media type dependent */
        .setup_led              = e1000e_setup_led_generic,
 +      .read_mac_addr          = e1000_read_mac_addr_82571,
  };
  
  static struct e1000_phy_operations e82_phy_ops_igp = {
diff --combined drivers/net/e1000e/lib.c
index 2425ed11d5cccae2b71e6646a0c75cd381524930,3af0b1b82832424f9ce5df4ce6472b75d876f0a7..a8b2c0de27c4084f138f510bf51a274fdd78bf80
@@@ -51,10 -51,10 +51,10 @@@ enum e1000_mng_mode 
   **/
  s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
  {
 +      struct e1000_mac_info *mac = &hw->mac;
        struct e1000_bus_info *bus = &hw->bus;
        struct e1000_adapter *adapter = hw->adapter;
 -      u32 status;
 -      u16 pcie_link_status, pci_header_type, cap_offset;
 +      u16 pcie_link_status, cap_offset;
  
        cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
        if (!cap_offset) {
                                                    PCIE_LINK_WIDTH_SHIFT);
        }
  
 -      pci_read_config_word(adapter->pdev, PCI_HEADER_TYPE_REGISTER,
 -                           &pci_header_type);
 -      if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
 -              status = er32(STATUS);
 -              bus->func = (status & E1000_STATUS_FUNC_MASK)
 -                          >> E1000_STATUS_FUNC_SHIFT;
 -      } else {
 -              bus->func = 0;
 -      }
 +      mac->ops.set_lan_id(hw);
  
        return 0;
  }
  
 +/**
 + *  e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
 + *
 + *  @hw: pointer to the HW structure
 + *
 + *  Determines the LAN function id by reading memory-mapped registers
 + *  and swaps the port value if requested.
 + **/
 +void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
 +{
 +      struct e1000_bus_info *bus = &hw->bus;
 +      u32 reg;
 +
 +      /*
 +       * The status register reports the correct function number
 +       * for the device regardless of function swap state.
 +       */
 +      reg = er32(STATUS);
 +      bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
 +}
 +
 +/**
 + *  e1000_set_lan_id_single_port - Set LAN id for a single port device
 + *  @hw: pointer to the HW structure
 + *
 + *  Sets the LAN function id to zero for a single port device.
 + **/
 +void e1000_set_lan_id_single_port(struct e1000_hw *hw)
 +{
 +      struct e1000_bus_info *bus = &hw->bus;
 +
 +      bus->func = 0;
 +}
 +
  /**
   *  e1000_clear_vfta_generic - Clear VLAN filter table
   *  @hw: pointer to the HW structure
@@@ -164,68 -138,6 +164,68 @@@ void e1000e_init_rx_addrs(struct e1000_
                e1000e_rar_set(hw, mac_addr, i);
  }
  
 +/**
 + *  e1000_check_alt_mac_addr_generic - Check for alternate MAC addr
 + *  @hw: pointer to the HW structure
 + *
 + *  Checks the nvm for an alternate MAC address.  An alternate MAC address
 + *  can be setup by pre-boot software and must be treated like a permanent
 + *  address and must override the actual permanent MAC address. If an
 + *  alternate MAC address is found it is programmed into RAR0, replacing
 + *  the permanent address that was installed into RAR0 by the Si on reset.
 + *  This function will return SUCCESS unless it encounters an error while
 + *  reading the EEPROM.
 + **/
 +s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
 +{
 +      u32 i;
 +      s32 ret_val = 0;
 +      u16 offset, nvm_alt_mac_addr_offset, nvm_data;
 +      u8 alt_mac_addr[ETH_ALEN];
 +
 +      ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
 +                               &nvm_alt_mac_addr_offset);
 +      if (ret_val) {
 +              e_dbg("NVM Read Error\n");
 +              goto out;
 +      }
 +
 +      if (nvm_alt_mac_addr_offset == 0xFFFF) {
 +              /* There is no Alternate MAC Address */
 +              goto out;
 +      }
 +
 +      if (hw->bus.func == E1000_FUNC_1)
 +              nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
 +      for (i = 0; i < ETH_ALEN; i += 2) {
 +              offset = nvm_alt_mac_addr_offset + (i >> 1);
 +              ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
 +              if (ret_val) {
 +                      e_dbg("NVM Read Error\n");
 +                      goto out;
 +              }
 +
 +              alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
 +              alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
 +      }
 +
 +      /* if multicast bit is set, the alternate address will not be used */
 +      if (alt_mac_addr[0] & 0x01) {
 +              e_dbg("Ignoring Alternate Mac Address with MC bit set\n");
 +              goto out;
 +      }
 +
 +      /*
 +       * We have a valid alternate MAC address, and we want to treat it the
 +       * same as the normal permanent MAC address stored by the HW into the
 +       * RAR. Do this by mapping this address into RAR0.
 +       */
 +      e1000e_rar_set(hw, alt_mac_addr, 0);
 +
 +out:
 +      return ret_val;
 +}
 +
  /**
   *  e1000e_rar_set - Set receive address register
   *  @hw: pointer to the HW structure
@@@ -340,34 -252,62 +340,34 @@@ static u32 e1000_hash_mc_addr(struct e1
   *  @hw: pointer to the HW structure
   *  @mc_addr_list: array of multicast addresses to program
   *  @mc_addr_count: number of multicast addresses to program
 - *  @rar_used_count: the first RAR register free to program
 - *  @rar_count: total number of supported Receive Address Registers
   *
 - *  Updates the Receive Address Registers and Multicast Table Array.
 + *  Updates entire Multicast Table Array.
   *  The caller must have a packed mc_addr_list of multicast addresses.
 - *  The parameter rar_count will usually be hw->mac.rar_entry_count
 - *  unless there are workarounds that change this.
   **/
  void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
 -                                      u8 *mc_addr_list, u32 mc_addr_count,
 -                                      u32 rar_used_count, u32 rar_count)
 +                                      u8 *mc_addr_list, u32 mc_addr_count)
  {
 -      u32 i;
 -      u32 *mcarray = kzalloc(hw->mac.mta_reg_count * sizeof(u32), GFP_ATOMIC);
 +      u32 hash_value, hash_bit, hash_reg;
 +      int i;
  
 -      if (!mcarray) {
 -              printk(KERN_ERR "multicast array memory allocation failed\n");
 -              return;
 -      }
 +      /* clear mta_shadow */
 +      memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
  
 -      /*
 -       * Load the first set of multicast addresses into the exact
 -       * filters (RAR).  If there are not enough to fill the RAR
 -       * array, clear the filters.
 -       */
 -      for (i = rar_used_count; i < rar_count; i++) {
 -              if (mc_addr_count) {
 -                      e1000e_rar_set(hw, mc_addr_list, i);
 -                      mc_addr_count--;
 -                      mc_addr_list += ETH_ALEN;
 -              } else {
 -                      E1000_WRITE_REG_ARRAY(hw, E1000_RA, i << 1, 0);
 -                      e1e_flush();
 -                      E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1) + 1, 0);
 -                      e1e_flush();
 -              }
 -      }
 -
 -      /* Load any remaining multicast addresses into the hash table. */
 -      for (; mc_addr_count > 0; mc_addr_count--) {
 -              u32 hash_value, hash_reg, hash_bit, mta;
 +      /* update mta_shadow from mc_addr_list */
 +      for (i = 0; (u32) i < mc_addr_count; i++) {
                hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
 -              e_dbg("Hash value = 0x%03X\n", hash_value);
 +
                hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
                hash_bit = hash_value & 0x1F;
 -              mta = (1 << hash_bit);
 -              mcarray[hash_reg] |= mta;
 -              mc_addr_list += ETH_ALEN;
 -      }
  
 -      /* write the hash table completely */
 -      for (i = 0; i < hw->mac.mta_reg_count; i++)
 -              E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, mcarray[i]);
 +              hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
 +              mc_addr_list += (ETH_ALEN);
 +      }
  
 +      /* replace the entire MTA table */
 +      for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
 +              E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
        e1e_flush();
 -      kfree(mcarray);
  }
  
  /**
@@@ -647,7 -587,7 +647,7 @@@ s32 e1000e_check_for_serdes_link(struc
                                if (!(rxcw & E1000_RXCW_IV)) {
                                        mac->serdes_has_link = true;
                                        e_dbg("SERDES: Link up - autoneg "
-                                          "completed sucessfully.\n");
+                                          "completed successfully.\n");
                                } else {
                                        mac->serdes_has_link = false;
                                        e_dbg("SERDES: Link down - invalid"
@@@ -2132,27 -2072,67 +2132,27 @@@ s32 e1000e_write_nvm_spi(struct e1000_h
  }
  
  /**
 - *  e1000e_read_mac_addr - Read device MAC address
 + *  e1000_read_mac_addr_generic - Read device MAC address
   *  @hw: pointer to the HW structure
   *
   *  Reads the device MAC address from the EEPROM and stores the value.
   *  Since devices with two ports use the same EEPROM, we increment the
   *  last bit in the MAC address for the second port.
   **/
 -s32 e1000e_read_mac_addr(struct e1000_hw *hw)
 +s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
  {
 -      s32 ret_val;
 -      u16 offset, nvm_data, i;
 -      u16 mac_addr_offset = 0;
 -
 -      if (hw->mac.type == e1000_82571) {
 -              /* Check for an alternate MAC address.  An alternate MAC
 -               * address can be setup by pre-boot software and must be
 -               * treated like a permanent address and must override the
 -               * actual permanent MAC address.*/
 -              ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
 -                                       &mac_addr_offset);
 -              if (ret_val) {
 -                      e_dbg("NVM Read Error\n");
 -                      return ret_val;
 -              }
 -              if (mac_addr_offset == 0xFFFF)
 -                      mac_addr_offset = 0;
 -
 -              if (mac_addr_offset) {
 -                      if (hw->bus.func == E1000_FUNC_1)
 -                              mac_addr_offset += ETH_ALEN/sizeof(u16);
 -
 -                      /* make sure we have a valid mac address here
 -                      * before using it */
 -                      ret_val = e1000_read_nvm(hw, mac_addr_offset, 1,
 -                                               &nvm_data);
 -                      if (ret_val) {
 -                              e_dbg("NVM Read Error\n");
 -                              return ret_val;
 -                      }
 -                      if (nvm_data & 0x0001)
 -                              mac_addr_offset = 0;
 -              }
 +      u32 rar_high;
 +      u32 rar_low;
 +      u16 i;
  
 -              if (mac_addr_offset)
 -              hw->dev_spec.e82571.alt_mac_addr_is_present = 1;
 -      }
 +      rar_high = er32(RAH(0));
 +      rar_low = er32(RAL(0));
  
 -      for (i = 0; i < ETH_ALEN; i += 2) {
 -              offset = mac_addr_offset + (i >> 1);
 -              ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
 -              if (ret_val) {
 -                      e_dbg("NVM Read Error\n");
 -                      return ret_val;
 -              }
 -              hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
 -              hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8);
 -      }
 +      for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
 +              hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
  
 -      /* Flip last bit of mac address if we're on second port */
 -      if (!mac_addr_offset && hw->bus.func == E1000_FUNC_1)
 -              hw->mac.perm_addr[5] ^= 1;
 +      for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
 +              hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
  
        for (i = 0; i < ETH_ALEN; i++)
                hw->mac.addr[i] = hw->mac.perm_addr[i];
index 583a21c1def391908a04fd1f2a32d8b39ccfa8bc,e2ce8f8a21a7ceb45b2703bf73826e8cacce82df..0ed25f059a00732345f34ecbf721a167ed9c1cc2
@@@ -60,7 -60,7 +60,7 @@@ static const struct e1000_info *igb_inf
        [board_82575] = &e1000_82575_info,
  };
  
 -static struct pci_device_id igb_pci_tbl[] = {
 +static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
@@@ -133,12 -133,6 +133,12 @@@ static void igb_msg_task(struct igb_ada
  static void igb_vmm_control(struct igb_adapter *);
  static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
  static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
 +static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
 +static int igb_ndo_set_vf_vlan(struct net_device *netdev,
 +                             int vf, u16 vlan, u8 qos);
 +static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
 +static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
 +                               struct ifla_vf_info *ivi);
  
  #ifdef CONFIG_PM
  static int igb_suspend(struct pci_dev *, pm_message_t);
@@@ -318,35 -312,31 +318,35 @@@ static void igb_cache_ring_register(str
                 */
                if (adapter->vfs_allocated_count) {
                        for (; i < adapter->rss_queues; i++)
 -                              adapter->rx_ring[i].reg_idx = rbase_offset +
 -                                                            Q_IDX_82576(i);
 +                              adapter->rx_ring[i]->reg_idx = rbase_offset +
 +                                                             Q_IDX_82576(i);
                        for (; j < adapter->rss_queues; j++)
 -                              adapter->tx_ring[j].reg_idx = rbase_offset +
 -                                                            Q_IDX_82576(j);
 +                              adapter->tx_ring[j]->reg_idx = rbase_offset +
 +                                                             Q_IDX_82576(j);
                }
        case e1000_82575:
        case e1000_82580:
        default:
                for (; i < adapter->num_rx_queues; i++)
 -                      adapter->rx_ring[i].reg_idx = rbase_offset + i;
 +                      adapter->rx_ring[i]->reg_idx = rbase_offset + i;
                for (; j < adapter->num_tx_queues; j++)
 -                      adapter->tx_ring[j].reg_idx = rbase_offset + j;
 +                      adapter->tx_ring[j]->reg_idx = rbase_offset + j;
                break;
        }
  }
  
  static void igb_free_queues(struct igb_adapter *adapter)
  {
 -      kfree(adapter->tx_ring);
 -      kfree(adapter->rx_ring);
 -
 -      adapter->tx_ring = NULL;
 -      adapter->rx_ring = NULL;
 +      int i;
  
 +      for (i = 0; i < adapter->num_tx_queues; i++) {
 +              kfree(adapter->tx_ring[i]);
 +              adapter->tx_ring[i] = NULL;
 +      }
 +      for (i = 0; i < adapter->num_rx_queues; i++) {
 +              kfree(adapter->rx_ring[i]);
 +              adapter->rx_ring[i] = NULL;
 +      }
        adapter->num_rx_queues = 0;
        adapter->num_tx_queues = 0;
  }
   **/
  static int igb_alloc_queues(struct igb_adapter *adapter)
  {
 +      struct igb_ring *ring;
        int i;
  
 -      adapter->tx_ring = kcalloc(adapter->num_tx_queues,
 -                                 sizeof(struct igb_ring), GFP_KERNEL);
 -      if (!adapter->tx_ring)
 -              goto err;
 -
 -      adapter->rx_ring = kcalloc(adapter->num_rx_queues,
 -                                 sizeof(struct igb_ring), GFP_KERNEL);
 -      if (!adapter->rx_ring)
 -              goto err;
 -
        for (i = 0; i < adapter->num_tx_queues; i++) {
 -              struct igb_ring *ring = &(adapter->tx_ring[i]);
 +              ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
 +              if (!ring)
 +                      goto err;
                ring->count = adapter->tx_ring_count;
                ring->queue_index = i;
                ring->pdev = adapter->pdev;
                /* For 82575, context index must be unique per ring. */
                if (adapter->hw.mac.type == e1000_82575)
                        ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
 +              adapter->tx_ring[i] = ring;
        }
  
        for (i = 0; i < adapter->num_rx_queues; i++) {
 -              struct igb_ring *ring = &(adapter->rx_ring[i]);
 +              ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
 +              if (!ring)
 +                      goto err;
                ring->count = adapter->rx_ring_count;
                ring->queue_index = i;
                ring->pdev = adapter->pdev;
                /* set flag indicating ring supports SCTP checksum offload */
                if (adapter->hw.mac.type >= e1000_82576)
                        ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
 +              adapter->rx_ring[i] = ring;
        }
  
        igb_cache_ring_register(adapter);
@@@ -428,8 -421,6 +428,8 @@@ static void igb_assign_vector(struct ig
                        msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
                if (tx_queue > IGB_N0_QUEUE)
                        msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
 +              if (!adapter->msix_entries && msix_vector == 0)
 +                      msixbm |= E1000_EIMS_OTHER;
                array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
                q_vector->eims_value = msixbm;
                break;
                BUG();
                break;
        }
 +
 +      /* add q_vector eims value to global eims_enable_mask */
 +      adapter->eims_enable_mask |= q_vector->eims_value;
 +
 +      /* configure q_vector to set itr on first interrupt */
 +      q_vector->set_itr = 1;
  }
  
  /**
@@@ -568,8 -553,11 +568,8 @@@ static void igb_configure_msix(struct i
  
        adapter->eims_enable_mask |= adapter->eims_other;
  
 -      for (i = 0; i < adapter->num_q_vectors; i++) {
 -              struct igb_q_vector *q_vector = adapter->q_vector[i];
 -              igb_assign_vector(q_vector, vector++);
 -              adapter->eims_enable_mask |= q_vector->eims_value;
 -      }
 +      for (i = 0; i < adapter->num_q_vectors; i++)
 +              igb_assign_vector(adapter->q_vector[i], vector++);
  
        wrfl();
  }
@@@ -649,8 -637,6 +649,8 @@@ static void igb_free_q_vectors(struct i
        for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
                struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
                adapter->q_vector[v_idx] = NULL;
 +              if (!q_vector)
 +                      continue;
                netif_napi_del(&q_vector->napi);
                kfree(q_vector);
        }
@@@ -688,7 -674,7 +688,7 @@@ static void igb_set_interrupt_capabilit
        /* start with one vector for every rx queue */
        numvecs = adapter->num_rx_queues;
  
-       /* if tx handler is seperate add 1 for every tx queue */
+       /* if tx handler is separate add 1 for every tx queue */
        if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
                numvecs += adapter->num_tx_queues;
  
@@@ -762,24 -748,33 +762,24 @@@ static int igb_alloc_q_vectors(struct i
                if (!q_vector)
                        goto err_out;
                q_vector->adapter = adapter;
 -              q_vector->itr_shift = (hw->mac.type == e1000_82575) ? 16 : 0;
                q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
                q_vector->itr_val = IGB_START_ITR;
 -              q_vector->set_itr = 1;
                netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
                adapter->q_vector[v_idx] = q_vector;
        }
        return 0;
  
  err_out:
 -      while (v_idx) {
 -              v_idx--;
 -              q_vector = adapter->q_vector[v_idx];
 -              netif_napi_del(&q_vector->napi);
 -              kfree(q_vector);
 -              adapter->q_vector[v_idx] = NULL;
 -      }
 +      igb_free_q_vectors(adapter);
        return -ENOMEM;
  }
  
  static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
                                        int ring_idx, int v_idx)
  {
 -      struct igb_q_vector *q_vector;
 +      struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
  
 -      q_vector = adapter->q_vector[v_idx];
 -      q_vector->rx_ring = &adapter->rx_ring[ring_idx];
 +      q_vector->rx_ring = adapter->rx_ring[ring_idx];
        q_vector->rx_ring->q_vector = q_vector;
        q_vector->itr_val = adapter->rx_itr_setting;
        if (q_vector->itr_val && q_vector->itr_val <= 3)
  static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
                                        int ring_idx, int v_idx)
  {
 -      struct igb_q_vector *q_vector;
 +      struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
  
 -      q_vector = adapter->q_vector[v_idx];
 -      q_vector->tx_ring = &adapter->tx_ring[ring_idx];
 +      q_vector->tx_ring = adapter->tx_ring[ring_idx];
        q_vector->tx_ring->q_vector = q_vector;
        q_vector->itr_val = adapter->tx_itr_setting;
        if (q_vector->itr_val && q_vector->itr_val <= 3)
@@@ -881,6 -877,7 +881,6 @@@ static int igb_request_irq(struct igb_a
  {
        struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
 -      struct e1000_hw *hw = &adapter->hw;
        int err = 0;
  
        if (adapter->msix_entries) {
                igb_setup_all_tx_resources(adapter);
                igb_setup_all_rx_resources(adapter);
        } else {
 -              switch (hw->mac.type) {
 -              case e1000_82575:
 -                      wr32(E1000_MSIXBM(0),
 -                           (E1000_EICR_RX_QUEUE0 |
 -                            E1000_EICR_TX_QUEUE0 |
 -                            E1000_EIMS_OTHER));
 -                      break;
 -              case e1000_82580:
 -              case e1000_82576:
 -                      wr32(E1000_IVAR0, E1000_IVAR_VALID);
 -                      break;
 -              default:
 -                      break;
 -              }
 +              igb_assign_vector(adapter->q_vector[0], 0);
        }
  
        if (adapter->flags & IGB_FLAG_HAS_MSI) {
@@@ -1101,7 -1111,7 +1101,7 @@@ static void igb_configure(struct igb_ad
         * at least 1 descriptor unused to make sure
         * next_to_use != next_to_clean */
        for (i = 0; i < adapter->num_rx_queues; i++) {
 -              struct igb_ring *ring = &adapter->rx_ring[i];
 +              struct igb_ring *ring = adapter->rx_ring[i];
                igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
        }
  
        adapter->tx_queue_len = netdev->tx_queue_len;
  }
  
 +/**
 + * igb_power_up_link - Power up the phy/serdes link
 + * @adapter: address of board private structure
 + **/
 +void igb_power_up_link(struct igb_adapter *adapter)
 +{
 +      if (adapter->hw.phy.media_type == e1000_media_type_copper)
 +              igb_power_up_phy_copper(&adapter->hw);
 +      else
 +              igb_power_up_serdes_link_82575(&adapter->hw);
 +}
 +
 +/**
 + * igb_power_down_link - Power down the phy/serdes link
 + * @adapter: address of board private structure
 + */
 +static void igb_power_down_link(struct igb_adapter *adapter)
 +{
 +      if (adapter->hw.phy.media_type == e1000_media_type_copper)
 +              igb_power_down_phy_copper_82575(&adapter->hw);
 +      else
 +              igb_shutdown_serdes_link_82575(&adapter->hw);
 +}
  
  /**
   * igb_up - Open the interface and prepare it to handle traffic
@@@ -1153,8 -1140,6 +1153,8 @@@ int igb_up(struct igb_adapter *adapter
        }
        if (adapter->msix_entries)
                igb_configure_msix(adapter);
 +      else
 +              igb_assign_vector(adapter->q_vector[0], 0);
  
        /* Clear any pending interrupts. */
        rd32(E1000_ICR);
@@@ -1353,14 -1338,12 +1353,14 @@@ void igb_reset(struct igb_adapter *adap
                wr32(E1000_PCIEMISC,
                                reg & ~E1000_PCIEMISC_LX_DECISION);
        }
 +      if (!netif_running(adapter->netdev))
 +              igb_power_down_link(adapter);
 +
        igb_update_mng_vlan(adapter);
  
        /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
        wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
  
 -      igb_reset_adaptive(hw);
        igb_get_phy_info(hw);
  }
  
@@@ -1379,10 -1362,6 +1379,10 @@@ static const struct net_device_ops igb_
        .ndo_vlan_rx_register   = igb_vlan_rx_register,
        .ndo_vlan_rx_add_vid    = igb_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = igb_vlan_rx_kill_vid,
 +      .ndo_set_vf_mac         = igb_ndo_set_vf_mac,
 +      .ndo_set_vf_vlan        = igb_ndo_set_vf_vlan,
 +      .ndo_set_vf_tx_rate     = igb_ndo_set_vf_bw,
 +      .ndo_get_vf_config      = igb_ndo_get_vf_config,
  #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = igb_netpoll,
  #endif
@@@ -1503,6 -1482,7 +1503,6 @@@ static int __devinit igb_probe(struct p
        igb_get_bus_info_pcie(hw);
  
        hw->phy.autoneg_wait_to_complete = false;
 -      hw->mac.adaptive_ifs = true;
  
        /* Copper options */
        if (hw->phy.media_type == e1000_media_type_copper) {
@@@ -1736,6 -1716,9 +1736,6 @@@ static void __devexit igb_remove(struc
  
        unregister_netdev(netdev);
  
 -      if (!igb_check_reset_block(hw))
 -              igb_reset_phy(hw);
 -
        igb_clear_interrupt_scheme(adapter);
  
  #ifdef CONFIG_PCI_IOV
@@@ -2011,7 -1994,7 +2011,7 @@@ static int igb_open(struct net_device *
        if (err)
                goto err_setup_rx;
  
 -      /* e1000_power_up_phy(adapter); */
 +      igb_power_up_link(adapter);
  
        /* before we allocate an interrupt, we must be ready to handle it.
         * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
  
  err_req_irq:
        igb_release_hw_control(adapter);
 -      /* e1000_power_down_phy(adapter); */
 +      igb_power_down_link(adapter);
        igb_free_all_rx_resources(adapter);
  err_setup_rx:
        igb_free_all_tx_resources(adapter);
@@@ -2141,19 -2124,19 +2141,19 @@@ static int igb_setup_all_tx_resources(s
        int i, err = 0;
  
        for (i = 0; i < adapter->num_tx_queues; i++) {
 -              err = igb_setup_tx_resources(&adapter->tx_ring[i]);
 +              err = igb_setup_tx_resources(adapter->tx_ring[i]);
                if (err) {
                        dev_err(&pdev->dev,
                                "Allocation for Tx Queue %u failed\n", i);
                        for (i--; i >= 0; i--)
 -                              igb_free_tx_resources(&adapter->tx_ring[i]);
 +                              igb_free_tx_resources(adapter->tx_ring[i]);
                        break;
                }
        }
  
        for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
                int r_idx = i % adapter->num_tx_queues;
 -              adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
 +              adapter->multi_tx_table[i] = adapter->tx_ring[r_idx];
        }
        return err;
  }
@@@ -2236,7 -2219,7 +2236,7 @@@ static void igb_configure_tx(struct igb
        int i;
  
        for (i = 0; i < adapter->num_tx_queues; i++)
 -              igb_configure_tx_ring(adapter, &adapter->tx_ring[i]);
 +              igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
  }
  
  /**
@@@ -2294,12 -2277,12 +2294,12 @@@ static int igb_setup_all_rx_resources(s
        int i, err = 0;
  
        for (i = 0; i < adapter->num_rx_queues; i++) {
 -              err = igb_setup_rx_resources(&adapter->rx_ring[i]);
 +              err = igb_setup_rx_resources(adapter->rx_ring[i]);
                if (err) {
                        dev_err(&pdev->dev,
                                "Allocation for Rx Queue %u failed\n", i);
                        for (i--; i >= 0; i--)
 -                              igb_free_rx_resources(&adapter->rx_ring[i]);
 +                              igb_free_rx_resources(adapter->rx_ring[i]);
                        break;
                }
        }
@@@ -2506,8 -2489,7 +2506,8 @@@ static void igb_rlpml_set(struct igb_ad
        wr32(E1000_RLPML, max_frame_size);
  }
  
 -static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn)
 +static inline void igb_set_vmolr(struct igb_adapter *adapter,
 +                               int vfn, bool aupe)
  {
        struct e1000_hw *hw = &adapter->hw;
        u32 vmolr;
                return;
  
        vmolr = rd32(E1000_VMOLR(vfn));
 -      vmolr |= E1000_VMOLR_AUPE |        /* Accept untagged packets */
 -               E1000_VMOLR_STRVLAN;      /* Strip vlan tags */
 +      vmolr |= E1000_VMOLR_STRVLAN;      /* Strip vlan tags */
 +      if (aupe)
 +              vmolr |= E1000_VMOLR_AUPE;        /* Accept untagged packets */
 +      else
 +              vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
  
        /* clear all bits that might not be set */
        vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
@@@ -2591,14 -2570,11 +2591,14 @@@ void igb_configure_rx_ring(struct igb_a
                         E1000_SRRCTL_BSIZEPKT_SHIFT;
                srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
        }
 +      /* Only set Drop Enable if we are supporting multiple queues */
 +      if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
 +              srrctl |= E1000_SRRCTL_DROP_EN;
  
        wr32(E1000_SRRCTL(reg_idx), srrctl);
  
        /* set filtering for VMDQ pools */
 -      igb_set_vmolr(adapter, reg_idx & 0x7);
 +      igb_set_vmolr(adapter, reg_idx & 0x7, true);
  
        /* enable receive descriptor fetching */
        rxdctl = rd32(E1000_RXDCTL(reg_idx));
@@@ -2630,7 -2606,7 +2630,7 @@@ static void igb_configure_rx(struct igb
        /* Setup the HW Rx Head and Tail Descriptor Pointers and
         * the Base and Length of the Rx Descriptor Ring */
        for (i = 0; i < adapter->num_rx_queues; i++)
 -              igb_configure_rx_ring(adapter, &adapter->rx_ring[i]);
 +              igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
  }
  
  /**
@@@ -2667,7 -2643,7 +2667,7 @@@ static void igb_free_all_tx_resources(s
        int i;
  
        for (i = 0; i < adapter->num_tx_queues; i++)
 -              igb_free_tx_resources(&adapter->tx_ring[i]);
 +              igb_free_tx_resources(adapter->tx_ring[i]);
  }
  
  void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
@@@ -2734,7 -2710,7 +2734,7 @@@ static void igb_clean_all_tx_rings(stru
        int i;
  
        for (i = 0; i < adapter->num_tx_queues; i++)
 -              igb_clean_tx_ring(&adapter->tx_ring[i]);
 +              igb_clean_tx_ring(adapter->tx_ring[i]);
  }
  
  /**
@@@ -2771,7 -2747,7 +2771,7 @@@ static void igb_free_all_rx_resources(s
        int i;
  
        for (i = 0; i < adapter->num_rx_queues; i++)
 -              igb_free_rx_resources(&adapter->rx_ring[i]);
 +              igb_free_rx_resources(adapter->rx_ring[i]);
  }
  
  /**
@@@ -2835,7 -2811,7 +2835,7 @@@ static void igb_clean_all_rx_rings(stru
        int i;
  
        for (i = 0; i < adapter->num_rx_queues; i++)
 -              igb_clean_rx_ring(&adapter->rx_ring[i]);
 +              igb_clean_rx_ring(adapter->rx_ring[i]);
  }
  
  /**
@@@ -2877,30 -2853,38 +2877,30 @@@ static int igb_write_mc_addr_list(struc
  {
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 -      struct dev_mc_list *mc_ptr = netdev->mc_list;
 +      struct dev_mc_list *mc_ptr;
        u8  *mta_list;
 -      u32 vmolr = 0;
        int i;
  
 -      if (!netdev->mc_count) {
 +      if (netdev_mc_empty(netdev)) {
                /* nothing to program, so clear mc list */
                igb_update_mc_addr_list(hw, NULL, 0);
                igb_restore_vf_multicasts(adapter);
                return 0;
        }
  
 -      mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
 +      mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
        if (!mta_list)
                return -ENOMEM;
  
 -      /* set vmolr receive overflow multicast bit */
 -      vmolr |= E1000_VMOLR_ROMPE;
 -
        /* The shared function expects a packed array of only addresses. */
 -      mc_ptr = netdev->mc_list;
 +      i = 0;
 +      netdev_for_each_mc_addr(mc_ptr, netdev)
 +              memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
  
 -      for (i = 0; i < netdev->mc_count; i++) {
 -              if (!mc_ptr)
 -                      break;
 -              memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
 -              mc_ptr = mc_ptr->next;
 -      }
        igb_update_mc_addr_list(hw, mta_list, i);
        kfree(mta_list);
  
 -      return netdev->mc_count;
 +      return netdev_mc_count(netdev);
  }
  
  /**
@@@ -2921,13 -2905,12 +2921,13 @@@ static int igb_write_uc_addr_list(struc
        int count = 0;
  
        /* return ENOMEM indicating insufficient memory for addresses */
 -      if (netdev->uc.count > rar_entries)
 +      if (netdev_uc_count(netdev) > rar_entries)
                return -ENOMEM;
  
 -      if (netdev->uc.count && rar_entries) {
 +      if (!netdev_uc_empty(netdev) && rar_entries) {
                struct netdev_hw_addr *ha;
 -              list_for_each_entry(ha, &netdev->uc.list, list) {
 +
 +              netdev_for_each_uc_addr(ha, netdev) {
                        if (!rar_entries)
                                break;
                        igb_rar_set_qsel(adapter, ha->addr,
@@@ -3031,7 -3014,7 +3031,7 @@@ static void igb_update_phy_info(unsigne
   * igb_has_link - check shared code for link and determine up/down
   * @adapter: pointer to driver private info
   **/
 -static bool igb_has_link(struct igb_adapter *adapter)
 +bool igb_has_link(struct igb_adapter *adapter)
  {
        struct e1000_hw *hw = &adapter->hw;
        bool link_active = false;
@@@ -3148,9 -3131,10 +3148,9 @@@ static void igb_watchdog_task(struct wo
        }
  
        igb_update_stats(adapter);
 -      igb_update_adaptive(hw);
  
        for (i = 0; i < adapter->num_tx_queues; i++) {
 -              struct igb_ring *tx_ring = &adapter->tx_ring[i];
 +              struct igb_ring *tx_ring = adapter->tx_ring[i];
                if (!netif_carrier_ok(netdev)) {
                        /* We've lost link, so the controller stops DMA,
                         * but we've got queued Tx work that's never going
@@@ -3251,10 -3235,6 +3251,10 @@@ static void igb_update_ring_itr(struct 
        else
                new_val = avg_wire_size / 2;
  
 +      /* when in itr mode 3 do not exceed 20K ints/sec */
 +      if (adapter->rx_itr_setting == 3 && new_val < 196)
 +              new_val = 196;
 +
  set_itr_val:
        if (new_val != q_vector->itr_val) {
                q_vector->itr_val = new_val;
@@@ -3350,13 -3330,13 +3350,13 @@@ static void igb_set_itr(struct igb_adap
  
        adapter->rx_itr = igb_update_itr(adapter,
                                    adapter->rx_itr,
 -                                  adapter->rx_ring->total_packets,
 -                                  adapter->rx_ring->total_bytes);
 +                                  q_vector->rx_ring->total_packets,
 +                                  q_vector->rx_ring->total_bytes);
  
        adapter->tx_itr = igb_update_itr(adapter,
                                    adapter->tx_itr,
 -                                  adapter->tx_ring->total_packets,
 -                                  adapter->tx_ring->total_bytes);
 +                                  q_vector->tx_ring->total_packets,
 +                                  q_vector->tx_ring->total_bytes);
        current_itr = max(adapter->rx_itr, adapter->tx_itr);
  
        /* conservative mode (itr 3) eliminates the lowest_latency setting */
        }
  
  set_itr_now:
 -      adapter->rx_ring->total_bytes = 0;
 -      adapter->rx_ring->total_packets = 0;
 -      adapter->tx_ring->total_bytes = 0;
 -      adapter->tx_ring->total_packets = 0;
 +      q_vector->rx_ring->total_bytes = 0;
 +      q_vector->rx_ring->total_packets = 0;
 +      q_vector->tx_ring->total_bytes = 0;
 +      q_vector->tx_ring->total_packets = 0;
  
        if (new_itr != q_vector->itr_val) {
                /* this attempts to bias the interrupt rate towards Bulk
@@@ -3422,8 -3402,8 +3422,8 @@@ static inline int igb_tso_adv(struct ig
        int err;
        struct igb_buffer *buffer_info;
        u32 info = 0, tu_cmd = 0;
 -      u32 mss_l4len_idx, l4len;
 -      *hdr_len = 0;
 +      u32 mss_l4len_idx;
 +      u8 l4len;
  
        if (skb_header_cloned(skb)) {
                err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
@@@ -3629,7 -3609,6 +3629,7 @@@ static inline int igb_tx_map_adv(struc
        }
  
        tx_ring->buffer_info[i].skb = skb;
 +      tx_ring->buffer_info[i].gso_segs = skb_shinfo(skb)->gso_segs ?: 1;
        tx_ring->buffer_info[first].next_to_watch = i;
  
        return ++count;
@@@ -3643,12 -3622,14 +3643,12 @@@ dma_error
        buffer_info->length = 0;
        buffer_info->next_to_watch = 0;
        buffer_info->mapped_as_page = false;
 -      count--;
  
        /* clear timestamp and dma mappings for remaining portion of packet */
 -      while (count >= 0) {
 -              count--;
 +      while (count--) {
 +              if (i == 0)
 +                      i = tx_ring->count;
                i--;
 -              if (i < 0)
 -                      i += tx_ring->count;
                buffer_info = &tx_ring->buffer_info[i];
                igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
        }
  }
  
  static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
 -                                  int tx_flags, int count, u32 paylen,
 +                                  u32 tx_flags, int count, u32 paylen,
                                    u8 hdr_len)
  {
        union e1000_adv_tx_desc *tx_desc;
@@@ -3745,7 -3726,7 +3745,7 @@@ static int __igb_maybe_stop_tx(struct i
        return 0;
  }
  
 -static int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
 +static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
  {
        if (igb_desc_unused(tx_ring) >= size)
                return 0;
@@@ -3756,10 -3737,10 +3756,10 @@@ netdev_tx_t igb_xmit_frame_ring_adv(str
                                    struct igb_ring *tx_ring)
  {
        struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
 -      unsigned int first;
 -      unsigned int tx_flags = 0;
 -      u8 hdr_len = 0;
        int tso = 0, count;
 +      u32 tx_flags = 0;
 +      u16 first;
 +      u8 hdr_len = 0;
        union skb_shared_tx *shtx = skb_tx(skb);
  
        /* need: 1 descriptor per page,
@@@ -3940,7 -3921,7 +3940,7 @@@ static int igb_change_mtu(struct net_de
        netdev->mtu = new_mtu;
  
        for (i = 0; i < adapter->num_rx_queues; i++)
 -              adapter->rx_ring[i].rx_buffer_len = rx_buffer_len;
 +              adapter->rx_ring[i]->rx_buffer_len = rx_buffer_len;
  
        if (netif_running(netdev))
                igb_up(adapter);
@@@ -3962,7 -3943,7 +3962,7 @@@ void igb_update_stats(struct igb_adapte
        struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
        struct e1000_hw *hw = &adapter->hw;
        struct pci_dev *pdev = adapter->pdev;
 -      u32 rnbc;
 +      u32 rnbc, reg;
        u16 phy_tmp;
        int i;
        u64 bytes, packets;
        packets = 0;
        for (i = 0; i < adapter->num_rx_queues; i++) {
                u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
 -              adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp;
 +              struct igb_ring *ring = adapter->rx_ring[i];
 +              ring->rx_stats.drops += rqdpc_tmp;
                net_stats->rx_fifo_errors += rqdpc_tmp;
 -              bytes += adapter->rx_ring[i].rx_stats.bytes;
 -              packets += adapter->rx_ring[i].rx_stats.packets;
 +              bytes += ring->rx_stats.bytes;
 +              packets += ring->rx_stats.packets;
        }
  
        net_stats->rx_bytes = bytes;
        bytes = 0;
        packets = 0;
        for (i = 0; i < adapter->num_tx_queues; i++) {
 -              bytes += adapter->tx_ring[i].tx_stats.bytes;
 -              packets += adapter->tx_ring[i].tx_stats.packets;
 +              struct igb_ring *ring = adapter->tx_ring[i];
 +              bytes += ring->tx_stats.bytes;
 +              packets += ring->tx_stats.packets;
        }
        net_stats->tx_bytes = bytes;
        net_stats->tx_packets = packets;
        adapter->stats.mptc += rd32(E1000_MPTC);
        adapter->stats.bptc += rd32(E1000_BPTC);
  
 -      /* used for adaptive IFS */
 -      hw->mac.tx_packet_delta = rd32(E1000_TPT);
 -      adapter->stats.tpt += hw->mac.tx_packet_delta;
 -      hw->mac.collision_delta = rd32(E1000_COLC);
 -      adapter->stats.colc += hw->mac.collision_delta;
 +      adapter->stats.tpt += rd32(E1000_TPT);
 +      adapter->stats.colc += rd32(E1000_COLC);
  
        adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
 -      adapter->stats.rxerrc += rd32(E1000_RXERRC);
 -      adapter->stats.tncrs += rd32(E1000_TNCRS);
 +      /* read internal phy specific stats */
 +      reg = rd32(E1000_CTRL_EXT);
 +      if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
 +              adapter->stats.rxerrc += rd32(E1000_RXERRC);
 +              adapter->stats.tncrs += rd32(E1000_TNCRS);
 +      }
 +
        adapter->stats.tsctc += rd32(E1000_TSCTC);
        adapter->stats.tsctfc += rd32(E1000_TSCTFC);
  
@@@ -4128,9 -4105,6 +4128,9 @@@ static irqreturn_t igb_msix_other(int i
        u32 icr = rd32(E1000_ICR);
        /* reading ICR causes bit 31 of EICR to be cleared */
  
 +      if (icr & E1000_ICR_DRSTA)
 +              schedule_work(&adapter->reset_task);
 +
        if (icr & E1000_ICR_DOUTSYNC) {
                /* HW is reporting DMA is out of sync */
                adapter->stats.doosync++;
  
  static void igb_write_itr(struct igb_q_vector *q_vector)
  {
 +      struct igb_adapter *adapter = q_vector->adapter;
        u32 itr_val = q_vector->itr_val & 0x7FFC;
  
        if (!q_vector->set_itr)
        if (!itr_val)
                itr_val = 0x4;
  
 -      if (q_vector->itr_shift)
 -              itr_val |= itr_val << q_vector->itr_shift;
 +      if (adapter->hw.mac.type == e1000_82575)
 +              itr_val |= itr_val << 16;
        else
                itr_val |= 0x8000000;
  
@@@ -4247,8 -4220,9 +4247,8 @@@ static void igb_setup_dca(struct igb_ad
        wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
  
        for (i = 0; i < adapter->num_q_vectors; i++) {
 -              struct igb_q_vector *q_vector = adapter->q_vector[i];
 -              q_vector->cpu = -1;
 -              igb_update_dca(q_vector);
 +              adapter->q_vector[i]->cpu = -1;
 +              igb_update_dca(adapter->q_vector[i]);
        }
  }
  
@@@ -4522,57 -4496,10 +4522,57 @@@ static s32 igb_vlvf_set(struct igb_adap
                                reg |= size;
                                wr32(E1000_VMOLR(vf), reg);
                        }
 -                      return 0;
                }
        }
 -      return -1;
 +      return 0;
 +}
 +
 +static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
 +{
 +      struct e1000_hw *hw = &adapter->hw;
 +
 +      if (vid)
 +              wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
 +      else
 +              wr32(E1000_VMVIR(vf), 0);
 +}
 +
 +static int igb_ndo_set_vf_vlan(struct net_device *netdev,
 +                             int vf, u16 vlan, u8 qos)
 +{
 +      int err = 0;
 +      struct igb_adapter *adapter = netdev_priv(netdev);
 +
 +      if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
 +              return -EINVAL;
 +      if (vlan || qos) {
 +              err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
 +              if (err)
 +                      goto out;
 +              igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
 +              igb_set_vmolr(adapter, vf, !vlan);
 +              adapter->vf_data[vf].pf_vlan = vlan;
 +              adapter->vf_data[vf].pf_qos = qos;
 +              dev_info(&adapter->pdev->dev,
 +                       "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
 +              if (test_bit(__IGB_DOWN, &adapter->state)) {
 +                      dev_warn(&adapter->pdev->dev,
 +                               "The VF VLAN has been set,"
 +                               " but the PF device is not up.\n");
 +                      dev_warn(&adapter->pdev->dev,
 +                               "Bring the PF device up before"
 +                               " attempting to use the VF device.\n");
 +              }
 +      } else {
 +              igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
 +                                 false, vf);
 +              igb_set_vmvir(adapter, vlan, vf);
 +              igb_set_vmolr(adapter, vf, true);
 +              adapter->vf_data[vf].pf_vlan = 0;
 +              adapter->vf_data[vf].pf_qos = 0;
 +       }
 +out:
 +       return err;
  }
  
  static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
  
  static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
  {
 -      /* clear all flags */
 -      adapter->vf_data[vf].flags = 0;
 +      /* clear flags */
 +      adapter->vf_data[vf].flags &= ~(IGB_VF_FLAG_PF_SET_MAC);
        adapter->vf_data[vf].last_nack = jiffies;
  
        /* reset offloads to defaults */
 -      igb_set_vmolr(adapter, vf);
 +      igb_set_vmolr(adapter, vf, true);
  
        /* reset vlans for device */
        igb_clear_vf_vfta(adapter, vf);
 +      if (adapter->vf_data[vf].pf_vlan)
 +              igb_ndo_set_vf_vlan(adapter->netdev, vf,
 +                                  adapter->vf_data[vf].pf_vlan,
 +                                  adapter->vf_data[vf].pf_qos);
 +      else
 +              igb_clear_vf_vfta(adapter, vf);
  
        /* reset multicast table array for vf */
        adapter->vf_data[vf].num_vf_mc_hashes = 0;
@@@ -4613,8 -4534,7 +4613,8 @@@ static void igb_vf_reset_event(struct i
        unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
  
        /* generate a new mac address as we were hotplug removed/added */
 -      random_ether_addr(vf_mac);
 +      if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
 +              random_ether_addr(vf_mac);
  
        /* process remaining reset events */
        igb_vf_reset(adapter, vf);
@@@ -4727,10 -4647,7 +4727,10 @@@ static void igb_rcv_msg_from_vf(struct 
                retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
                break;
        case E1000_VF_SET_VLAN:
 -              retval = igb_set_vf_vlan(adapter, msgbuf, vf);
 +              if (adapter->vf_data[vf].pf_vlan)
 +                      retval = -1;
 +              else
 +                      retval = igb_set_vf_vlan(adapter, msgbuf, vf);
                break;
        default:
                dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
@@@ -4811,9 -4728,6 +4811,9 @@@ static irqreturn_t igb_intr_msi(int irq
  
        igb_write_itr(q_vector);
  
 +      if (icr & E1000_ICR_DRSTA)
 +              schedule_work(&adapter->reset_task);
 +
        if (icr & E1000_ICR_DOUTSYNC) {
                /* HW is reporting DMA is out of sync */
                adapter->stats.doosync++;
@@@ -4853,9 -4767,6 +4853,9 @@@ static irqreturn_t igb_intr(int irq, vo
        if (!(icr & E1000_ICR_INT_ASSERTED))
                return IRQ_NONE;
  
 +      if (icr & E1000_ICR_DRSTA)
 +              schedule_work(&adapter->reset_task);
 +
        if (icr & E1000_ICR_DOUTSYNC) {
                /* HW is reporting DMA is out of sync */
                adapter->stats.doosync++;
@@@ -5019,7 -4930,7 +5019,7 @@@ static bool igb_clean_tx_irq(struct igb
                        if (skb) {
                                unsigned int segs, bytecount;
                                /* gso_segs is currently only valid for tcp */
 -                              segs = skb_shinfo(skb)->gso_segs ?: 1;
 +                              segs = buffer_info->gso_segs;
                                /* multiply data chunks by size of headers */
                                bytecount = ((segs - 1) * skb_headlen(skb)) +
                                            skb->len;
@@@ -5837,9 -5748,7 +5837,9 @@@ static int __igb_shutdown(struct pci_de
  
        *enable_wake = wufc || adapter->en_mng_pt;
        if (!*enable_wake)
 -              igb_shutdown_serdes_link_82575(hw);
 +              igb_power_down_link(adapter);
 +      else
 +              igb_power_up_link(adapter);
  
        /* Release control of h/w to f/w.  If f/w is AMT enabled, this
         * would have already happened in close and is redundant. */
@@@ -5879,7 -5788,6 +5879,7 @@@ static int igb_resume(struct pci_dev *p
  
        pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
 +      pci_save_state(pdev);
  
        err = pci_enable_device_mem(pdev);
        if (err) {
                return -ENOMEM;
        }
  
 -      /* e1000_power_up_phy(adapter); */
 -
        igb_reset(adapter);
  
        /* let the f/w know that the h/w is now under the control of the
@@@ -6005,7 -5915,6 +6005,7 @@@ static pci_ers_result_t igb_io_slot_res
        } else {
                pci_set_master(pdev);
                pci_restore_state(pdev);
 +              pci_save_state(pdev);
  
                pci_enable_wake(pdev, PCI_D3hot, 0);
                pci_enable_wake(pdev, PCI_D3cold, 0);
@@@ -6094,43 -6003,6 +6094,43 @@@ static int igb_set_vf_mac(struct igb_ad
        return 0;
  }
  
 +static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
 +{
 +      struct igb_adapter *adapter = netdev_priv(netdev);
 +      if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
 +              return -EINVAL;
 +      adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
 +      dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
 +      dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
 +                                    " change effective.");
 +      if (test_bit(__IGB_DOWN, &adapter->state)) {
 +              dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
 +                       " but the PF device is not up.\n");
 +              dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
 +                       " attempting to use the VF device.\n");
 +      }
 +      return igb_set_vf_mac(adapter, vf, mac);
 +}
 +
 +static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
 +{
 +      return -EOPNOTSUPP;
 +}
 +
 +static int igb_ndo_get_vf_config(struct net_device *netdev,
 +                               int vf, struct ifla_vf_info *ivi)
 +{
 +      struct igb_adapter *adapter = netdev_priv(netdev);
 +      if (vf >= adapter->vfs_allocated_count)
 +              return -EINVAL;
 +      ivi->vf = vf;
 +      memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
 +      ivi->tx_rate = 0;
 +      ivi->vlan = adapter->vf_data[vf].pf_vlan;
 +      ivi->qos = adapter->vf_data[vf].pf_qos;
 +      return 0;
 +}
 +
  static void igb_vmm_control(struct igb_adapter *adapter)
  {
        struct e1000_hw *hw = &adapter->hw;
diff --combined drivers/net/ks8851.c
index b5219cce12ed8c4df5275458d078b65dd452726a,cfebe0e218fc21fdaa6d7515e5c9572750d8168a..0573e0bb4444162e5153043532023b82b7a16bb0
@@@ -407,7 -407,7 +407,7 @@@ static irqreturn_t ks8851_irq(int irq, 
   * @buff: The buffer address
   * @len: The length of the data to read
   *
-  * Issue an RXQ FIFO read command and read the @len ammount of data from
+  * Issue an RXQ FIFO read command and read the @len amount of data from
   * the FIFO into the buffer specified by @buff.
   */
  static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len)
@@@ -965,13 -965,14 +965,13 @@@ static void ks8851_set_rx_mode(struct n
  
                rxctrl.rxcr1 = (RXCR1_RXME | RXCR1_RXAE |
                                RXCR1_RXPAFMA | RXCR1_RXMAFMA);
 -      } else if (dev->flags & IFF_MULTICAST && dev->mc_count > 0) {
 -              struct dev_mc_list *mcptr = dev->mc_list;
 +      } else if (dev->flags & IFF_MULTICAST && !netdev_mc_empty(dev)) {
 +              struct dev_mc_list *mcptr;
                u32 crc;
 -              int i;
  
                /* accept some multicast */
  
 -              for (i = dev->mc_count; i > 0; i--) {
 +              netdev_for_each_mc_addr(mcptr, dev) {
                        crc = ether_crc(ETH_ALEN, mcptr->dmi_addr);
                        crc >>= (32 - 6);  /* get top six bits */
  
index 05b8bde9980dbbe0b571dd17a995a375c959ed1c,b8d21ab212c663f3db7c2002553ee2ba7e4fb895..7dbff87480dc6d6d844ae5bce2230c561a5ea7f9
@@@ -67,8 -67,8 +67,8 @@@ static int ql_update_ring_coalescing(st
                        status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
                                                CFG_LCQ, rx_ring->cq_id);
                        if (status) {
 -                              QPRINTK(qdev, IFUP, ERR,
 -                                      "Failed to load CQICB.\n");
 +                              netif_err(qdev, ifup, qdev->ndev,
 +                                        "Failed to load CQICB.\n");
                                goto exit;
                        }
                }
@@@ -89,8 -89,8 +89,8 @@@
                        status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
                                                CFG_LCQ, rx_ring->cq_id);
                        if (status) {
 -                              QPRINTK(qdev, IFUP, ERR,
 -                                      "Failed to load CQICB.\n");
 +                              netif_err(qdev, ifup, qdev->ndev,
 +                                        "Failed to load CQICB.\n");
                                goto exit;
                        }
                }
@@@ -107,8 -107,8 +107,8 @@@ static void ql_update_stats(struct ql_a
  
        spin_lock(&qdev->stats_lock);
        if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
 -                      QPRINTK(qdev, DRV, ERR,
 -                              "Couldn't get xgmac sem.\n");
 +                      netif_err(qdev, drv, qdev->ndev,
 +                                "Couldn't get xgmac sem.\n");
                goto quit;
        }
        /*
         */
        for (i = 0x200; i < 0x280; i += 8) {
                if (ql_read_xgmac_reg64(qdev, i, &data)) {
 -                      QPRINTK(qdev, DRV, ERR,
 -                              "Error reading status register 0x%.04x.\n", i);
 +                      netif_err(qdev, drv, qdev->ndev,
 +                                "Error reading status register 0x%.04x.\n",
 +                                i);
                        goto end;
                } else
                        *iter = data;
         */
        for (i = 0x300; i < 0x3d0; i += 8) {
                if (ql_read_xgmac_reg64(qdev, i, &data)) {
 -                      QPRINTK(qdev, DRV, ERR,
 -                              "Error reading status register 0x%.04x.\n", i);
 +                      netif_err(qdev, drv, qdev->ndev,
 +                                "Error reading status register 0x%.04x.\n",
 +                                i);
                        goto end;
                } else
                        *iter = data;
         */
        for (i = 0x500; i < 0x540; i += 8) {
                if (ql_read_xgmac_reg64(qdev, i, &data)) {
 -                      QPRINTK(qdev, DRV, ERR,
 -                              "Error reading status register 0x%.04x.\n", i);
 +                      netif_err(qdev, drv, qdev->ndev,
 +                                "Error reading status register 0x%.04x.\n",
 +                                i);
                        goto end;
                } else
                        *iter = data;
         */
        for (i = 0x568; i < 0x5a8; i += 8) {
                if (ql_read_xgmac_reg64(qdev, i, &data)) {
 -                      QPRINTK(qdev, DRV, ERR,
 -                              "Error reading status register 0x%.04x.\n", i);
 +                      netif_err(qdev, drv, qdev->ndev,
 +                                "Error reading status register 0x%.04x.\n",
 +                                i);
                        goto end;
                } else
                        *iter = data;
         * Get RX NIC FIFO DROP statistics.
         */
        if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) {
 -              QPRINTK(qdev, DRV, ERR,
 -                      "Error reading status register 0x%.04x.\n", i);
 +              netif_err(qdev, drv, qdev->ndev,
 +                        "Error reading status register 0x%.04x.\n", i);
                goto end;
        } else
                *iter = data;
@@@ -400,13 -396,14 +400,13 @@@ static int ql_set_wol(struct net_devic
                return -EINVAL;
        qdev->wol = wol->wolopts;
  
 -      QPRINTK(qdev, DRV, INFO, "Set wol option 0x%x on %s\n",
 -                       qdev->wol, ndev->name);
 +      netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol);
        if (!qdev->wol) {
                u32 wol = 0;
                status = ql_mb_wol_mode(qdev, wol);
 -              QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
 -                      (status == 0) ? "cleared successfully" : "clear failed",
 -                      wol, qdev->ndev->name);
 +              netif_err(qdev, drv, qdev->ndev, "WOL %s (wol code 0x%x)\n",
-                         status == 0 ? "cleared sucessfully" : "clear failed",
++                        status == 0 ? "cleared successfully" : "clear failed",
 +                        wol);
        }
  
        return 0;
@@@ -503,8 -500,7 +503,8 @@@ static int ql_run_loopback_test(struct 
                        return -EPIPE;
                atomic_inc(&qdev->lb_count);
        }
 -
 +      /* Give queue time to settle before testing results. */
 +      msleep(2);
        ql_clean_lb_rx_ring(&qdev->rx_ring[0], 128);
        return atomic_read(&qdev->lb_count) ? -EIO : 0;
  }
@@@ -537,13 -533,9 +537,13 @@@ static void ql_self_test(struct net_dev
                        data[0] = 0;
                }
                clear_bit(QL_SELFTEST, &qdev->flags);
 +              /* Give link time to come up after
 +               * port configuration changes.
 +               */
 +              msleep_interruptible(4 * 1000);
        } else {
 -              QPRINTK(qdev, DRV, ERR,
 -                      "%s: is down, Loopback test will fail.\n", ndev->name);
 +              netif_err(qdev, drv, qdev->ndev,
 +                        "is down, Loopback test will fail.\n");
                eth_test->flags |= ETH_TEST_FL_FAILED;
        }
  }
index c26ec5d740f6cf8ec51730b798a6ee051cc792a9,a35845b48ea4fbf1fa3e72987640c89fcf1c6a0e..fd34f266c0a80f9cf83daf61d1b50abccdab66cd
@@@ -73,19 -73,7 +73,19 @@@ static int qlge_irq_type = MSIX_IRQ
  module_param(qlge_irq_type, int, MSIX_IRQ);
  MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
  
 -static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
 +static int qlge_mpi_coredump;
 +module_param(qlge_mpi_coredump, int, 0);
 +MODULE_PARM_DESC(qlge_mpi_coredump,
 +              "Option to enable MPI firmware dump. "
 +              "Default is OFF - Do Not allocate memory. ");
 +
 +static int qlge_force_coredump;
 +module_param(qlge_force_coredump, int, 0);
 +MODULE_PARM_DESC(qlge_force_coredump,
 +              "Option to allow force of firmware core dump. "
 +              "Default is OFF - Do not allow.");
 +
 +static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
        {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
        {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
        /* required last entry */
@@@ -128,7 -116,7 +128,7 @@@ static int ql_sem_trylock(struct ql_ada
                sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
                break;
        default:
 -              QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
 +              netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
                return -EINVAL;
        }
  
@@@ -168,17 -156,17 +168,17 @@@ int ql_wait_reg_rdy(struct ql_adapter *
  
                /* check for errors */
                if (temp & err_bit) {
 -                      QPRINTK(qdev, PROBE, ALERT,
 -                              "register 0x%.08x access error, value = 0x%.08x!.\n",
 -                              reg, temp);
 +                      netif_alert(qdev, probe, qdev->ndev,
 +                                  "register 0x%.08x access error, value = 0x%.08x!.\n",
 +                                  reg, temp);
                        return -EIO;
                } else if (temp & bit)
                        return 0;
                udelay(UDELAY_DELAY);
                count--;
        }
 -      QPRINTK(qdev, PROBE, ALERT,
 -              "Timed out waiting for reg %x to come ready.\n", reg);
 +      netif_alert(qdev, probe, qdev->ndev,
 +                  "Timed out waiting for reg %x to come ready.\n", reg);
        return -ETIMEDOUT;
  }
  
@@@ -221,7 -209,7 +221,7 @@@ int ql_write_cfg(struct ql_adapter *qde
  
        map = pci_map_single(qdev->pdev, ptr, size, direction);
        if (pci_dma_mapping_error(qdev->pdev, map)) {
 -              QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
 +              netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
                return -ENOMEM;
        }
  
  
        status = ql_wait_cfg(qdev, bit);
        if (status) {
 -              QPRINTK(qdev, IFUP, ERR,
 -                      "Timed out waiting for CFG to come ready.\n");
 +              netif_err(qdev, ifup, qdev->ndev,
 +                        "Timed out waiting for CFG to come ready.\n");
                goto exit;
        }
  
@@@ -313,8 -301,8 +313,8 @@@ int ql_get_mac_addr_reg(struct ql_adapt
        case MAC_ADDR_TYPE_VLAN:
        case MAC_ADDR_TYPE_MULTI_FLTR:
        default:
 -              QPRINTK(qdev, IFUP, CRIT,
 -                      "Address type %d not yet supported.\n", type);
 +              netif_crit(qdev, ifup, qdev->ndev,
 +                         "Address type %d not yet supported.\n", type);
                status = -EPERM;
        }
  exit:
@@@ -371,11 -359,12 +371,11 @@@ static int ql_set_mac_addr_reg(struct q
                            (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
                            (addr[5]);
  
 -                      QPRINTK(qdev, IFUP, DEBUG,
 -                              "Adding %s address %pM"
 -                              " at index %d in the CAM.\n",
 -                              ((type ==
 -                                MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
 -                               "UNICAST"), addr, index);
 +                      netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
 +                                   "Adding %s address %pM at index %d in the CAM.\n",
 +                                   type == MAC_ADDR_TYPE_MULTI_MAC ?
 +                                   "MULTICAST" : "UNICAST",
 +                                   addr, index);
  
                        status =
                            ql_wait_reg_rdy(qdev,
                         * addressing. It's either MAC_ADDR_E on or off.
                         * That's bit-27 we're talking about.
                         */
 -                      QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
 -                              (enable_bit ? "Adding" : "Removing"),
 -                              index, (enable_bit ? "to" : "from"));
 +                      netif_info(qdev, ifup, qdev->ndev,
 +                                 "%s VLAN ID %d %s the CAM.\n",
 +                                 enable_bit ? "Adding" : "Removing",
 +                                 index,
 +                                 enable_bit ? "to" : "from");
  
                        status =
                            ql_wait_reg_rdy(qdev,
                }
        case MAC_ADDR_TYPE_MULTI_FLTR:
        default:
 -              QPRINTK(qdev, IFUP, CRIT,
 -                      "Address type %d not yet supported.\n", type);
 +              netif_crit(qdev, ifup, qdev->ndev,
 +                         "Address type %d not yet supported.\n", type);
                status = -EPERM;
        }
  exit:
@@@ -463,14 -450,17 +463,14 @@@ static int ql_set_mac_addr(struct ql_ad
        char *addr;
  
        if (set) {
 -              addr = &qdev->ndev->dev_addr[0];
 -              QPRINTK(qdev, IFUP, DEBUG,
 -                      "Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n",
 -                      addr[0], addr[1], addr[2], addr[3],
 -                      addr[4], addr[5]);
 +              addr = &qdev->current_mac_addr[0];
 +              netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
 +                           "Set Mac addr %pM\n", addr);
        } else {
                memset(zero_mac_addr, 0, ETH_ALEN);
                addr = &zero_mac_addr[0];
 -              QPRINTK(qdev, IFUP, DEBUG,
 -                              "Clearing MAC address on %s\n",
 -                              qdev->ndev->name);
 +              netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
 +                           "Clearing MAC address\n");
        }
        status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
        if (status)
                        MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
        ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
        if (status)
 -              QPRINTK(qdev, IFUP, ERR, "Failed to init mac "
 -                      "address.\n");
 +              netif_err(qdev, ifup, qdev->ndev,
 +                        "Failed to init mac address.\n");
        return status;
  }
  
  void ql_link_on(struct ql_adapter *qdev)
  {
 -      QPRINTK(qdev, LINK, ERR, "%s: Link is up.\n",
 -                               qdev->ndev->name);
 +      netif_err(qdev, link, qdev->ndev, "Link is up.\n");
        netif_carrier_on(qdev->ndev);
        ql_set_mac_addr(qdev, 1);
  }
  
  void ql_link_off(struct ql_adapter *qdev)
  {
 -      QPRINTK(qdev, LINK, ERR, "%s: Link is down.\n",
 -                               qdev->ndev->name);
 +      netif_err(qdev, link, qdev->ndev, "Link is down.\n");
        netif_carrier_off(qdev->ndev);
        ql_set_mac_addr(qdev, 0);
  }
@@@ -530,27 -522,27 +530,27 @@@ static int ql_set_routing_reg(struct ql
        int status = -EINVAL; /* Return error if no mask match. */
        u32 value = 0;
  
 -      QPRINTK(qdev, IFUP, DEBUG,
 -              "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
 -              (enable ? "Adding" : "Removing"),
 -              ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
 -              ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
 -              ((index ==
 -                RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
 -              ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
 -              ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
 -              ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
 -              ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
 -              ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
 -              ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
 -              ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
 -              ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
 -              ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
 -              ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
 -              ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
 -              ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
 -              ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
 -              (enable ? "to" : "from"));
 +      netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
 +                   "%s %s mask %s the routing reg.\n",
 +                   enable ? "Adding" : "Removing",
 +                   index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
 +                   index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
 +                   index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
 +                   index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
 +                   index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
 +                   index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
 +                   index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
 +                   index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
 +                   index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
 +                   index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
 +                   index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
 +                   index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
 +                   index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
 +                   index == RT_IDX_UNUSED013 ? "UNUSED13" :
 +                   index == RT_IDX_UNUSED014 ? "UNUSED14" :
 +                   index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
 +                   "(Bad index != RT_IDX)",
 +                   enable ? "to" : "from");
  
        switch (mask) {
        case RT_IDX_CAM_HIT:
                        break;
                }
        default:
 -              QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
 -                      mask);
 +              netif_err(qdev, ifup, qdev->ndev,
 +                        "Mask type %d not yet supported.\n", mask);
                status = -EPERM;
                goto exit;
        }
@@@ -717,7 -709,7 +717,7 @@@ static int ql_validate_flash(struct ql_
  
        status = strncmp((char *)&qdev->flash, str, 4);
        if (status) {
 -              QPRINTK(qdev, IFUP, ERR, "Invalid flash signature.\n");
 +              netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
                return  status;
        }
  
                csum += le16_to_cpu(*flash++);
  
        if (csum)
 -              QPRINTK(qdev, IFUP, ERR,
 -                      "Invalid flash checksum, csum = 0x%.04x.\n", csum);
 +              netif_err(qdev, ifup, qdev->ndev,
 +                        "Invalid flash checksum, csum = 0x%.04x.\n", csum);
  
        return csum;
  }
@@@ -778,8 -770,7 +778,8 @@@ static int ql_get_8000_flash_params(str
        for (i = 0; i < size; i++, p++) {
                status = ql_read_flash_word(qdev, i+offset, p);
                if (status) {
 -                      QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
 +                      netif_err(qdev, ifup, qdev->ndev,
 +                                "Error reading flash.\n");
                        goto exit;
                }
        }
                        sizeof(struct flash_params_8000) / sizeof(u16),
                        "8000");
        if (status) {
 -              QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
 +              netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
                status = -EINVAL;
                goto exit;
        }
                        qdev->ndev->addr_len);
  
        if (!is_valid_ether_addr(mac_addr)) {
 -              QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n");
 +              netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
                status = -EINVAL;
                goto exit;
        }
@@@ -840,8 -831,7 +840,8 @@@ static int ql_get_8012_flash_params(str
        for (i = 0; i < size; i++, p++) {
                status = ql_read_flash_word(qdev, i+offset, p);
                if (status) {
 -                      QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
 +                      netif_err(qdev, ifup, qdev->ndev,
 +                                "Error reading flash.\n");
                        goto exit;
                }
  
                        sizeof(struct flash_params_8012) / sizeof(u16),
                        "8012");
        if (status) {
 -              QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
 +              netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
                status = -EINVAL;
                goto exit;
        }
@@@ -969,17 -959,17 +969,17 @@@ static int ql_8012_port_initialize(stru
                /* Another function has the semaphore, so
                 * wait for the port init bit to come ready.
                 */
 -              QPRINTK(qdev, LINK, INFO,
 -                      "Another function has the semaphore, so wait for the port init bit to come ready.\n");
 +              netif_info(qdev, link, qdev->ndev,
 +                         "Another function has the semaphore, so wait for the port init bit to come ready.\n");
                status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
                if (status) {
 -                      QPRINTK(qdev, LINK, CRIT,
 -                              "Port initialize timed out.\n");
 +                      netif_crit(qdev, link, qdev->ndev,
 +                                 "Port initialize timed out.\n");
                }
                return status;
        }
  
 -      QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
 +      netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
        /* Set the core reset. */
        status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
        if (status)
@@@ -1109,8 -1099,8 +1109,8 @@@ static int ql_get_next_chunk(struct ql_
                                                GFP_ATOMIC,
                                                qdev->lbq_buf_order);
                if (unlikely(!rx_ring->pg_chunk.page)) {
 -                      QPRINTK(qdev, DRV, ERR,
 -                              "page allocation failed.\n");
 +                      netif_err(qdev, drv, qdev->ndev,
 +                                "page allocation failed.\n");
                        return -ENOMEM;
                }
                rx_ring->pg_chunk.offset = 0;
                if (pci_dma_mapping_error(qdev->pdev, map)) {
                        __free_pages(rx_ring->pg_chunk.page,
                                        qdev->lbq_buf_order);
 -                      QPRINTK(qdev, DRV, ERR,
 -                              "PCI mapping failed.\n");
 +                      netif_err(qdev, drv, qdev->ndev,
 +                                "PCI mapping failed.\n");
                        return -ENOMEM;
                }
                rx_ring->pg_chunk.map = map;
@@@ -1158,15 -1148,15 +1158,15 @@@ static void ql_update_lbq(struct ql_ada
  
        while (rx_ring->lbq_free_cnt > 32) {
                for (i = 0; i < 16; i++) {
 -                      QPRINTK(qdev, RX_STATUS, DEBUG,
 -                              "lbq: try cleaning clean_idx = %d.\n",
 -                              clean_idx);
 +                      netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
 +                                   "lbq: try cleaning clean_idx = %d.\n",
 +                                   clean_idx);
                        lbq_desc = &rx_ring->lbq[clean_idx];
                        if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
 -                              QPRINTK(qdev, IFUP, ERR,
 -                                      "Could not get a page chunk.\n");
 -                                      return;
 -                              }
 +                              netif_err(qdev, ifup, qdev->ndev,
 +                                        "Could not get a page chunk.\n");
 +                              return;
 +                      }
  
                        map = lbq_desc->p.pg_chunk.map +
                                lbq_desc->p.pg_chunk.offset;
        }
  
        if (start_idx != clean_idx) {
 -              QPRINTK(qdev, RX_STATUS, DEBUG,
 -                      "lbq: updating prod idx = %d.\n",
 -                      rx_ring->lbq_prod_idx);
 +              netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
 +                           "lbq: updating prod idx = %d.\n",
 +                           rx_ring->lbq_prod_idx);
                ql_write_db_reg(rx_ring->lbq_prod_idx,
                                rx_ring->lbq_prod_idx_db_reg);
        }
@@@ -1211,20 -1201,19 +1211,20 @@@ static void ql_update_sbq(struct ql_ada
        while (rx_ring->sbq_free_cnt > 16) {
                for (i = 0; i < 16; i++) {
                        sbq_desc = &rx_ring->sbq[clean_idx];
 -                      QPRINTK(qdev, RX_STATUS, DEBUG,
 -                              "sbq: try cleaning clean_idx = %d.\n",
 -                              clean_idx);
 +                      netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
 +                                   "sbq: try cleaning clean_idx = %d.\n",
 +                                   clean_idx);
                        if (sbq_desc->p.skb == NULL) {
 -                              QPRINTK(qdev, RX_STATUS, DEBUG,
 -                                      "sbq: getting new skb for index %d.\n",
 -                                      sbq_desc->index);
 +                              netif_printk(qdev, rx_status, KERN_DEBUG,
 +                                           qdev->ndev,
 +                                           "sbq: getting new skb for index %d.\n",
 +                                           sbq_desc->index);
                                sbq_desc->p.skb =
                                    netdev_alloc_skb(qdev->ndev,
                                                     SMALL_BUFFER_SIZE);
                                if (sbq_desc->p.skb == NULL) {
 -                                      QPRINTK(qdev, PROBE, ERR,
 -                                              "Couldn't get an skb.\n");
 +                                      netif_err(qdev, probe, qdev->ndev,
 +                                                "Couldn't get an skb.\n");
                                        rx_ring->sbq_clean_idx = clean_idx;
                                        return;
                                }
                                                     rx_ring->sbq_buf_size,
                                                     PCI_DMA_FROMDEVICE);
                                if (pci_dma_mapping_error(qdev->pdev, map)) {
 -                                      QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
 +                                      netif_err(qdev, ifup, qdev->ndev,
 +                                                "PCI mapping failed.\n");
                                        rx_ring->sbq_clean_idx = clean_idx;
                                        dev_kfree_skb_any(sbq_desc->p.skb);
                                        sbq_desc->p.skb = NULL;
        }
  
        if (start_idx != clean_idx) {
 -              QPRINTK(qdev, RX_STATUS, DEBUG,
 -                      "sbq: updating prod idx = %d.\n",
 -                      rx_ring->sbq_prod_idx);
 +              netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
 +                           "sbq: updating prod idx = %d.\n",
 +                           rx_ring->sbq_prod_idx);
                ql_write_db_reg(rx_ring->sbq_prod_idx,
                                rx_ring->sbq_prod_idx_db_reg);
        }
@@@ -1293,9 -1281,8 +1293,9 @@@ static void ql_unmap_send(struct ql_ada
                         * then its an OAL.
                         */
                        if (i == 7) {
 -                              QPRINTK(qdev, TX_DONE, DEBUG,
 -                                      "unmapping OAL area.\n");
 +                              netif_printk(qdev, tx_done, KERN_DEBUG,
 +                                           qdev->ndev,
 +                                           "unmapping OAL area.\n");
                        }
                        pci_unmap_single(qdev->pdev,
                                         pci_unmap_addr(&tx_ring_desc->map[i],
                                                       maplen),
                                         PCI_DMA_TODEVICE);
                } else {
 -                      QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
 -                              i);
 +                      netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
 +                                   "unmapping frag %d.\n", i);
                        pci_unmap_page(qdev->pdev,
                                       pci_unmap_addr(&tx_ring_desc->map[i],
                                                      mapaddr),
@@@ -1330,8 -1317,7 +1330,8 @@@ static int ql_map_send(struct ql_adapte
        int frag_cnt = skb_shinfo(skb)->nr_frags;
  
        if (frag_cnt) {
 -              QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
 +              netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
 +                           "frag_cnt = %d.\n", frag_cnt);
        }
        /*
         * Map the skb buffer first.
  
        err = pci_dma_mapping_error(qdev->pdev, map);
        if (err) {
 -              QPRINTK(qdev, TX_QUEUED, ERR,
 -                      "PCI mapping failed with error: %d\n", err);
 +              netif_err(qdev, tx_queued, qdev->ndev,
 +                        "PCI mapping failed with error: %d\n", err);
  
                return NETDEV_TX_BUSY;
        }
                                             PCI_DMA_TODEVICE);
                        err = pci_dma_mapping_error(qdev->pdev, map);
                        if (err) {
 -                              QPRINTK(qdev, TX_QUEUED, ERR,
 -                                      "PCI mapping outbound address list with error: %d\n",
 -                                      err);
 +                              netif_err(qdev, tx_queued, qdev->ndev,
 +                                        "PCI mapping outbound address list with error: %d\n",
 +                                        err);
                                goto map_error;
                        }
  
  
                err = pci_dma_mapping_error(qdev->pdev, map);
                if (err) {
 -                      QPRINTK(qdev, TX_QUEUED, ERR,
 -                              "PCI mapping frags failed with error: %d.\n",
 -                              err);
 +                      netif_err(qdev, tx_queued, qdev->ndev,
 +                                "PCI mapping frags failed with error: %d.\n",
 +                                err);
                        goto map_error;
                }
  
@@@ -1447,260 -1433,6 +1447,260 @@@ map_error
        return NETDEV_TX_BUSY;
  }
  
 +/* Process an inbound completion from an rx ring. */
 +static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
 +                                      struct rx_ring *rx_ring,
 +                                      struct ib_mac_iocb_rsp *ib_mac_rsp,
 +                                      u32 length,
 +                                      u16 vlan_id)
 +{
 +      struct sk_buff *skb;
 +      struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
 +      struct skb_frag_struct *rx_frag;
 +      int nr_frags;
 +      struct napi_struct *napi = &rx_ring->napi;
 +
 +      napi->dev = qdev->ndev;
 +
 +      skb = napi_get_frags(napi);
 +      if (!skb) {
 +              netif_err(qdev, drv, qdev->ndev,
 +                        "Couldn't get an skb, exiting.\n");
 +              rx_ring->rx_dropped++;
 +              put_page(lbq_desc->p.pg_chunk.page);
 +              return;
 +      }
 +      prefetch(lbq_desc->p.pg_chunk.va);
 +      rx_frag = skb_shinfo(skb)->frags;
 +      nr_frags = skb_shinfo(skb)->nr_frags;
 +      rx_frag += nr_frags;
 +      rx_frag->page = lbq_desc->p.pg_chunk.page;
 +      rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
 +      rx_frag->size = length;
 +
 +      skb->len += length;
 +      skb->data_len += length;
 +      skb->truesize += length;
 +      skb_shinfo(skb)->nr_frags++;
 +
 +      rx_ring->rx_packets++;
 +      rx_ring->rx_bytes += length;
 +      skb->ip_summed = CHECKSUM_UNNECESSARY;
 +      skb_record_rx_queue(skb, rx_ring->cq_id);
 +      if (qdev->vlgrp && (vlan_id != 0xffff))
 +              vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
 +      else
 +              napi_gro_frags(napi);
 +}
 +
 +/* Process an inbound completion from an rx ring. */
 +static void ql_process_mac_rx_page(struct ql_adapter *qdev,
 +                                      struct rx_ring *rx_ring,
 +                                      struct ib_mac_iocb_rsp *ib_mac_rsp,
 +                                      u32 length,
 +                                      u16 vlan_id)
 +{
 +      struct net_device *ndev = qdev->ndev;
 +      struct sk_buff *skb = NULL;
 +      void *addr;
 +      struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
 +      struct napi_struct *napi = &rx_ring->napi;
 +
 +      skb = netdev_alloc_skb(ndev, length);
 +      if (!skb) {
 +              netif_err(qdev, drv, qdev->ndev,
 +                        "Couldn't get an skb, need to unwind!.\n");
 +              rx_ring->rx_dropped++;
 +              put_page(lbq_desc->p.pg_chunk.page);
 +              return;
 +      }
 +
 +      addr = lbq_desc->p.pg_chunk.va;
 +      prefetch(addr);
 +
 +
 +      /* Frame error, so drop the packet. */
 +      if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
 +              netif_err(qdev, drv, qdev->ndev,
 +                        "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
 +              rx_ring->rx_errors++;
 +              goto err_out;
 +      }
 +
 +      /* The max framesize filter on this chip is set higher than
 +       * MTU since FCoE uses 2k frames.
 +       */
 +      if (skb->len > ndev->mtu + ETH_HLEN) {
 +              netif_err(qdev, drv, qdev->ndev,
 +                        "Segment too small, dropping.\n");
 +              rx_ring->rx_dropped++;
 +              goto err_out;
 +      }
 +      memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
 +      netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
 +                   "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
 +                   length);
 +      skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
 +                              lbq_desc->p.pg_chunk.offset+ETH_HLEN,
 +                              length-ETH_HLEN);
 +      skb->len += length-ETH_HLEN;
 +      skb->data_len += length-ETH_HLEN;
 +      skb->truesize += length-ETH_HLEN;
 +
 +      rx_ring->rx_packets++;
 +      rx_ring->rx_bytes += skb->len;
 +      skb->protocol = eth_type_trans(skb, ndev);
 +      skb->ip_summed = CHECKSUM_NONE;
 +
 +      if (qdev->rx_csum &&
 +              !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
 +              /* TCP frame. */
 +              if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
 +                      netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
 +                                   "TCP checksum done!\n");
 +                      skb->ip_summed = CHECKSUM_UNNECESSARY;
 +              } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
 +                              (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
 +                      /* Unfragmented ipv4 UDP frame. */
 +                      struct iphdr *iph = (struct iphdr *) skb->data;
 +                      if (!(iph->frag_off &
 +                              cpu_to_be16(IP_MF|IP_OFFSET))) {
 +                              skb->ip_summed = CHECKSUM_UNNECESSARY;
 +                              netif_printk(qdev, rx_status, KERN_DEBUG,
 +                                           qdev->ndev,
 +                                           "TCP checksum done!\n");
 +                      }
 +              }
 +      }
 +
 +      skb_record_rx_queue(skb, rx_ring->cq_id);
 +      if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
 +              if (qdev->vlgrp && (vlan_id != 0xffff))
 +                      vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
 +              else
 +                      napi_gro_receive(napi, skb);
 +      } else {
 +              if (qdev->vlgrp && (vlan_id != 0xffff))
 +                      vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
 +              else
 +                      netif_receive_skb(skb);
 +      }
 +      return;
 +err_out:
 +      dev_kfree_skb_any(skb);
 +      put_page(lbq_desc->p.pg_chunk.page);
 +}
 +
 +/* Process an inbound completion from an rx ring. */
 +static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
 +                                      struct rx_ring *rx_ring,
 +                                      struct ib_mac_iocb_rsp *ib_mac_rsp,
 +                                      u32 length,
 +                                      u16 vlan_id)
 +{
 +      struct net_device *ndev = qdev->ndev;
 +      struct sk_buff *skb = NULL;
 +      struct sk_buff *new_skb = NULL;
 +      struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
 +
 +      skb = sbq_desc->p.skb;
 +      /* Allocate new_skb and copy */
 +      new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
 +      if (new_skb == NULL) {
 +              netif_err(qdev, probe, qdev->ndev,
 +                        "No skb available, drop the packet.\n");
 +              rx_ring->rx_dropped++;
 +              return;
 +      }
 +      skb_reserve(new_skb, NET_IP_ALIGN);
 +      memcpy(skb_put(new_skb, length), skb->data, length);
 +      skb = new_skb;
 +
 +      /* Frame error, so drop the packet. */
 +      if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
 +              netif_err(qdev, drv, qdev->ndev,
 +                        "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
 +              dev_kfree_skb_any(skb);
 +              rx_ring->rx_errors++;
 +              return;
 +      }
 +
 +      /* loopback self test for ethtool */
 +      if (test_bit(QL_SELFTEST, &qdev->flags)) {
 +              ql_check_lb_frame(qdev, skb);
 +              dev_kfree_skb_any(skb);
 +              return;
 +      }
 +
 +      /* The max framesize filter on this chip is set higher than
 +       * MTU since FCoE uses 2k frames.
 +       */
 +      if (skb->len > ndev->mtu + ETH_HLEN) {
 +              dev_kfree_skb_any(skb);
 +              rx_ring->rx_dropped++;
 +              return;
 +      }
 +
 +      prefetch(skb->data);
 +      skb->dev = ndev;
 +      if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
 +              netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
 +                           "%s Multicast.\n",
 +                           (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
 +                           IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
 +                           (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
 +                           IB_MAC_IOCB_RSP_M_REG ? "Registered" :
 +                           (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
 +                           IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
 +      }
 +      if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
 +              netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
 +                           "Promiscuous Packet.\n");
 +
 +      rx_ring->rx_packets++;
 +      rx_ring->rx_bytes += skb->len;
 +      skb->protocol = eth_type_trans(skb, ndev);
 +      skb->ip_summed = CHECKSUM_NONE;
 +
 +      /* If rx checksum is on, and there are no
 +       * csum or frame errors.
 +       */
 +      if (qdev->rx_csum &&
 +              !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
 +              /* TCP frame. */
 +              if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
 +                      netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
 +                                   "TCP checksum done!\n");
 +                      skb->ip_summed = CHECKSUM_UNNECESSARY;
 +              } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
 +                              (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
 +                      /* Unfragmented ipv4 UDP frame. */
 +                      struct iphdr *iph = (struct iphdr *) skb->data;
 +                      if (!(iph->frag_off &
 +                              cpu_to_be16(IP_MF|IP_OFFSET))) {
 +                              skb->ip_summed = CHECKSUM_UNNECESSARY;
 +                              netif_printk(qdev, rx_status, KERN_DEBUG,
 +                                           qdev->ndev,
 +                                           "TCP checksum done!\n");
 +                      }
 +              }
 +      }
 +
 +      skb_record_rx_queue(skb, rx_ring->cq_id);
 +      if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
 +              if (qdev->vlgrp && (vlan_id != 0xffff))
 +                      vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
 +                                              vlan_id, skb);
 +              else
 +                      napi_gro_receive(&rx_ring->napi, skb);
 +      } else {
 +              if (qdev->vlgrp && (vlan_id != 0xffff))
 +                      vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
 +              else
 +                      netif_receive_skb(skb);
 +      }
 +}
 +
  static void ql_realign_skb(struct sk_buff *skb, int len)
  {
        void *temp_addr = skb->data;
@@@ -1735,8 -1467,7 +1735,8 @@@ static struct sk_buff *ql_build_rx_skb(
         */
        if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
            ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
 -              QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
 +              netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
 +                           "Header of %d bytes in small buffer.\n", hdr_len);
                /*
                 * Headers fit nicely into a small buffer.
                 */
         * Handle the data buffer(s).
         */
        if (unlikely(!length)) {        /* Is there data too? */
 -              QPRINTK(qdev, RX_STATUS, DEBUG,
 -                      "No Data buffer in this packet.\n");
 +              netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
 +                           "No Data buffer in this packet.\n");
                return skb;
        }
  
        if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
                if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
 -                      QPRINTK(qdev, RX_STATUS, DEBUG,
 -                              "Headers in small, data of %d bytes in small, combine them.\n", length);
 +                      netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
 +                                   "Headers in small, data of %d bytes in small, combine them.\n",
 +                                   length);
                        /*
                         * Data is less than small buffer size so it's
                         * stuffed in a small buffer.
                                                        maplen),
                                                       PCI_DMA_FROMDEVICE);
                } else {
 -                      QPRINTK(qdev, RX_STATUS, DEBUG,
 -                              "%d bytes in a single small buffer.\n", length);
 +                      netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
 +                                   "%d bytes in a single small buffer.\n",
 +                                   length);
                        sbq_desc = ql_get_curr_sbuf(rx_ring);
                        skb = sbq_desc->p.skb;
                        ql_realign_skb(skb, length);
                }
        } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
                if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
 -                      QPRINTK(qdev, RX_STATUS, DEBUG,
 -                              "Header in small, %d bytes in large. Chain large to small!\n", length);
 +                      netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
 +                                   "Header in small, %d bytes in large. Chain large to small!\n",
 +                                   length);
                        /*
                         * The data is in a single large buffer.  We
                         * chain it to the header buffer's skb and let
                         * it rip.
                         */
                        lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
 -                      QPRINTK(qdev, RX_STATUS, DEBUG,
 -                              "Chaining page at offset = %d,"
 -                              "for %d bytes  to skb.\n",
 -                              lbq_desc->p.pg_chunk.offset, length);
 +                      netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
 +                                   "Chaining page at offset = %d, for %d bytes  to skb.\n",
 +                                   lbq_desc->p.pg_chunk.offset, length);
                        skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
                                                lbq_desc->p.pg_chunk.offset,
                                                length);
                        lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
                        skb = netdev_alloc_skb(qdev->ndev, length);
                        if (skb == NULL) {
 -                              QPRINTK(qdev, PROBE, DEBUG,
 -                                      "No skb available, drop the packet.\n");
 +                              netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
 +                                           "No skb available, drop the packet.\n");
                                return NULL;
                        }
                        pci_unmap_page(qdev->pdev,
                                       pci_unmap_len(lbq_desc, maplen),
                                       PCI_DMA_FROMDEVICE);
                        skb_reserve(skb, NET_IP_ALIGN);
 -                      QPRINTK(qdev, RX_STATUS, DEBUG,
 -                              "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
 +                      netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
 +                                   "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
 +                                   length);
                        skb_fill_page_desc(skb, 0,
                                                lbq_desc->p.pg_chunk.page,
                                                lbq_desc->p.pg_chunk.offset,
                         * a local buffer and use it to find the
                         * pages to chain.
                         */
 -                      QPRINTK(qdev, RX_STATUS, DEBUG,
 -                              "%d bytes of headers & data in chain of large.\n", length);
 +                      netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
 +                                   "%d bytes of headers & data in chain of large.\n",
 +                                   length);
                        skb = sbq_desc->p.skb;
                        sbq_desc->p.skb = NULL;
                        skb_reserve(skb, NET_IP_ALIGN);
                        size = (length < rx_ring->lbq_buf_size) ? length :
                                rx_ring->lbq_buf_size;
  
 -                      QPRINTK(qdev, RX_STATUS, DEBUG,
 -                              "Adding page %d to skb for %d bytes.\n",
 -                              i, size);
 +                      netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
 +                                   "Adding page %d to skb for %d bytes.\n",
 +                                   i, size);
                        skb_fill_page_desc(skb, i,
                                                lbq_desc->p.pg_chunk.page,
                                                lbq_desc->p.pg_chunk.offset,
  }
  
  /* Process an inbound completion from an rx ring. */
 -static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
 +static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
                                   struct rx_ring *rx_ring,
 -                                 struct ib_mac_iocb_rsp *ib_mac_rsp)
 +                                 struct ib_mac_iocb_rsp *ib_mac_rsp,
 +                                 u16 vlan_id)
  {
        struct net_device *ndev = qdev->ndev;
        struct sk_buff *skb = NULL;
 -      u16 vlan_id = (le16_to_cpu(ib_mac_rsp->vlan_id) &
 -                      IB_MAC_IOCB_RSP_VLAN_MASK)
  
        QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
  
        skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
        if (unlikely(!skb)) {
 -              QPRINTK(qdev, RX_STATUS, DEBUG,
 -                      "No skb available, drop packet.\n");
 +              netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
 +                           "No skb available, drop packet.\n");
                rx_ring->rx_dropped++;
                return;
        }
  
        /* Frame error, so drop the packet. */
        if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
 -              QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
 -                                      ib_mac_rsp->flags2);
 +              netif_err(qdev, drv, qdev->ndev,
 +                        "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
                dev_kfree_skb_any(skb);
                rx_ring->rx_errors++;
                return;
        prefetch(skb->data);
        skb->dev = ndev;
        if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
 -              QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
 -                      (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
 -                      IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
 -                      (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
 -                      IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
 -                      (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
 -                      IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
 +              netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
 +                           (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
 +                           IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
 +                           (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
 +                           IB_MAC_IOCB_RSP_M_REG ? "Registered" :
 +                           (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
 +                           IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
                rx_ring->rx_multicast++;
        }
        if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
 -              QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
 +              netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
 +                           "Promiscuous Packet.\n");
        }
  
        skb->protocol = eth_type_trans(skb, ndev);
                !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
                /* TCP frame. */
                if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
 -                      QPRINTK(qdev, RX_STATUS, DEBUG,
 -                                      "TCP checksum done!\n");
 +                      netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
 +                                   "TCP checksum done!\n");
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
                                (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
                        if (!(iph->frag_off &
                                cpu_to_be16(IP_MF|IP_OFFSET))) {
                                skb->ip_summed = CHECKSUM_UNNECESSARY;
 -                              QPRINTK(qdev, RX_STATUS, DEBUG,
 -                                              "TCP checksum done!\n");
 +                              netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
 +                                           "TCP checksum done!\n");
                        }
                }
        }
        }
  }
  
 +/* Process an inbound completion from an rx ring. */
 +static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
 +                                      struct rx_ring *rx_ring,
 +                                      struct ib_mac_iocb_rsp *ib_mac_rsp)
 +{
 +      u32 length = le32_to_cpu(ib_mac_rsp->data_len);
 +      u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
 +                      ((le16_to_cpu(ib_mac_rsp->vlan_id) &
 +                      IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
 +
 +      QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
 +
 +      if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
 +              /* The data and headers are split into
 +               * separate buffers.
 +               */
 +              ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
 +                                              vlan_id);
 +      } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
 +              /* The data fit in a single small buffer.
 +               * Allocate a new skb, copy the data and
 +               * return the buffer to the free pool.
 +               */
 +              ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
 +                                              length, vlan_id);
 +      } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
 +              !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
 +              (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
 +              /* TCP packet in a page chunk that's been checksummed.
 +               * Tack it on to our GRO skb and let it go.
 +               */
 +              ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
 +                                              length, vlan_id);
 +      } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
 +              /* Non-TCP packet in a page chunk. Allocate an
 +               * skb, tack it on frags, and send it up.
 +               */
 +              ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
 +                                              length, vlan_id);
 +      } else {
 +              /* Non-TCP/UDP large frames that span multiple buffers
 +               * can be processed corrrectly by the split frame logic.
 +               */
 +              ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
 +                                              vlan_id);
 +      }
 +
 +      return (unsigned long)length;
 +}
 +
  /* Process an outbound completion from an rx ring. */
  static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
                                   struct ob_mac_iocb_rsp *mac_rsp)
                                        OB_MAC_IOCB_RSP_L |
                                        OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
                if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
 -                      QPRINTK(qdev, TX_DONE, WARNING,
 -                              "Total descriptor length did not match transfer length.\n");
 +                      netif_warn(qdev, tx_done, qdev->ndev,
 +                                 "Total descriptor length did not match transfer length.\n");
                }
                if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
 -                      QPRINTK(qdev, TX_DONE, WARNING,
 -                              "Frame too short to be legal, not sent.\n");
 +                      netif_warn(qdev, tx_done, qdev->ndev,
 +                                 "Frame too short to be valid, not sent.\n");
                }
                if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
 -                      QPRINTK(qdev, TX_DONE, WARNING,
 -                              "Frame too long, but sent anyway.\n");
 +                      netif_warn(qdev, tx_done, qdev->ndev,
 +                                 "Frame too long, but sent anyway.\n");
                }
                if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
 -                      QPRINTK(qdev, TX_DONE, WARNING,
 -                              "PCI backplane error. Frame not sent.\n");
 +                      netif_warn(qdev, tx_done, qdev->ndev,
 +                                 "PCI backplane error. Frame not sent.\n");
                }
        }
        atomic_inc(&tx_ring->tx_count);
@@@ -2140,35 -1817,33 +2140,35 @@@ static void ql_process_chip_ae_intr(str
  {
        switch (ib_ae_rsp->event) {
        case MGMT_ERR_EVENT:
 -              QPRINTK(qdev, RX_ERR, ERR,
 -                      "Management Processor Fatal Error.\n");
 +              netif_err(qdev, rx_err, qdev->ndev,
 +                        "Management Processor Fatal Error.\n");
                ql_queue_fw_error(qdev);
                return;
  
        case CAM_LOOKUP_ERR_EVENT:
 -              QPRINTK(qdev, LINK, ERR,
 -                      "Multiple CAM hits lookup occurred.\n");
 -              QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
 +              netif_err(qdev, link, qdev->ndev,
 +                        "Multiple CAM hits lookup occurred.\n");
 +              netif_err(qdev, drv, qdev->ndev,
 +                        "This event shouldn't occur.\n");
                ql_queue_asic_error(qdev);
                return;
  
        case SOFT_ECC_ERROR_EVENT:
 -              QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
 +              netif_err(qdev, rx_err, qdev->ndev,
 +                        "Soft ECC error detected.\n");
                ql_queue_asic_error(qdev);
                break;
  
        case PCI_ERR_ANON_BUF_RD:
 -              QPRINTK(qdev, RX_ERR, ERR,
 -                      "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
 -                      ib_ae_rsp->q_id);
 +              netif_err(qdev, rx_err, qdev->ndev,
 +                        "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
 +                        ib_ae_rsp->q_id);
                ql_queue_asic_error(qdev);
                break;
  
        default:
 -              QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
 -                      ib_ae_rsp->event);
 +              netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
 +                        ib_ae_rsp->event);
                ql_queue_asic_error(qdev);
                break;
        }
@@@ -2185,9 -1860,9 +2185,9 @@@ static int ql_clean_outbound_rx_ring(st
        /* While there are entries in the completion queue. */
        while (prod != rx_ring->cnsmr_idx) {
  
 -              QPRINTK(qdev, RX_STATUS, DEBUG,
 -                      "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
 -                      prod, rx_ring->cnsmr_idx);
 +              netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
 +                           "cq_id = %d, prod = %d, cnsmr = %d.\n.",
 +                           rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
  
                net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
                rmb();
                        ql_process_mac_tx_intr(qdev, net_rsp);
                        break;
                default:
 -                      QPRINTK(qdev, RX_STATUS, DEBUG,
 -                              "Hit default case, not handled! dropping the packet, opcode = %x.\n",
 -                              net_rsp->opcode);
 +                      netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
 +                                   "Hit default case, not handled! dropping the packet, opcode = %x.\n",
 +                                   net_rsp->opcode);
                }
                count++;
                ql_update_cq(rx_ring);
@@@ -2232,9 -1907,9 +2232,9 @@@ static int ql_clean_inbound_rx_ring(str
        /* While there are entries in the completion queue. */
        while (prod != rx_ring->cnsmr_idx) {
  
 -              QPRINTK(qdev, RX_STATUS, DEBUG,
 -                      "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
 -                      prod, rx_ring->cnsmr_idx);
 +              netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
 +                           "cq_id = %d, prod = %d, cnsmr = %d.\n.",
 +                           rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
  
                net_rsp = rx_ring->curr_entry;
                rmb();
                                                net_rsp);
                        break;
                default:
 -                      {
 -                              QPRINTK(qdev, RX_STATUS, DEBUG,
 -                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
 -                                      net_rsp->opcode);
 -                      }
 +                      netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
 +                                   "Hit default case, not handled! dropping the packet, opcode = %x.\n",
 +                                   net_rsp->opcode);
 +                      break;
                }
                count++;
                ql_update_cq(rx_ring);
@@@ -2274,8 -1950,8 +2274,8 @@@ static int ql_napi_poll_msix(struct nap
        int i, work_done = 0;
        struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
  
 -      QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
 -              rx_ring->cq_id);
 +      netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
 +                   "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
  
        /* Service the TX rings first.  They start
         * right after the RSS rings. */
                if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
                        (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
                                        trx_ring->cnsmr_idx)) {
 -                      QPRINTK(qdev, INTR, DEBUG,
 -                              "%s: Servicing TX completion ring %d.\n",
 -                              __func__, trx_ring->cq_id);
 +                      netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
 +                                   "%s: Servicing TX completion ring %d.\n",
 +                                   __func__, trx_ring->cq_id);
                        ql_clean_outbound_rx_ring(trx_ring);
                }
        }
         */
        if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
                                        rx_ring->cnsmr_idx) {
 -              QPRINTK(qdev, INTR, DEBUG,
 -                      "%s: Servicing RX completion ring %d.\n",
 -                      __func__, rx_ring->cq_id);
 +              netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
 +                           "%s: Servicing RX completion ring %d.\n",
 +                           __func__, rx_ring->cq_id);
                work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
        }
  
@@@ -2318,13 -1994,12 +2318,13 @@@ static void qlge_vlan_rx_register(struc
  
        qdev->vlgrp = grp;
        if (grp) {
 -              QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
 +              netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
 +                           "Turning on VLAN in NIC_RCV_CFG.\n");
                ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
                           NIC_RCV_CFG_VLAN_MATCH_AND_NON);
        } else {
 -              QPRINTK(qdev, IFUP, DEBUG,
 -                      "Turning off VLAN in NIC_RCV_CFG.\n");
 +              netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
 +                           "Turning off VLAN in NIC_RCV_CFG.\n");
                ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
        }
  }
@@@ -2340,8 -2015,7 +2340,8 @@@ static void qlge_vlan_rx_add_vid(struc
                return;
        if (ql_set_mac_addr_reg
            (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
 -              QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
 +              netif_err(qdev, ifup, qdev->ndev,
 +                        "Failed to init vlan address.\n");
        }
        ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  }
@@@ -2358,8 -2032,7 +2358,8 @@@ static void qlge_vlan_rx_kill_vid(struc
  
        if (ql_set_mac_addr_reg
            (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
 -              QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
 +              netif_err(qdev, ifup, qdev->ndev,
 +                        "Failed to clear vlan address.\n");
        }
        ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  
@@@ -2388,8 -2061,7 +2388,8 @@@ static irqreturn_t qlge_isr(int irq, vo
  
        spin_lock(&qdev->hw_lock);
        if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
 -              QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
 +              netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
 +                           "Shared Interrupt, Not ours!\n");
                spin_unlock(&qdev->hw_lock);
                return IRQ_NONE;
        }
         */
        if (var & STS_FE) {
                ql_queue_asic_error(qdev);
 -              QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
 +              netif_err(qdev, intr, qdev->ndev,
 +                        "Got fatal error, STS = %x.\n", var);
                var = ql_read32(qdev, ERR_STS);
 -              QPRINTK(qdev, INTR, ERR,
 -                      "Resetting chip. Error Status Register = 0x%x\n", var);
 +              netif_err(qdev, intr, qdev->ndev,
 +                        "Resetting chip. Error Status Register = 0x%x\n", var);
                return IRQ_HANDLED;
        }
  
                 * We've got an async event or mailbox completion.
                 * Handle it and clear the source of the interrupt.
                 */
 -              QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
 +              netif_err(qdev, intr, qdev->ndev,
 +                        "Got MPI processor interrupt.\n");
                ql_disable_completion_interrupt(qdev, intr_context->intr);
                ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
                queue_delayed_work_on(smp_processor_id(),
         */
        var = ql_read32(qdev, ISR1);
        if (var & intr_context->irq_mask) {
 -              QPRINTK(qdev, INTR, INFO,
 -                      "Waking handler for rx_ring[0].\n");
 +              netif_info(qdev, intr, qdev->ndev,
 +                         "Waking handler for rx_ring[0].\n");
                ql_disable_completion_interrupt(qdev, intr_context->intr);
                napi_schedule(&rx_ring->napi);
                work_done++;
@@@ -2533,9 -2203,9 +2533,9 @@@ static netdev_tx_t qlge_send(struct sk_
                return NETDEV_TX_OK;
  
        if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
 -              QPRINTK(qdev, TX_QUEUED, INFO,
 -                      "%s: shutting down tx queue %d du to lack of resources.\n",
 -                      __func__, tx_ring_idx);
 +              netif_info(qdev, tx_queued, qdev->ndev,
 +                         "%s: shutting down tx queue %d du to lack of resources.\n",
 +                         __func__, tx_ring_idx);
                netif_stop_subqueue(ndev, tx_ring->wq_id);
                atomic_inc(&tx_ring->queue_stopped);
                tx_ring->tx_errors++;
        mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
  
        if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
 -              QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
 -                      vlan_tx_tag_get(skb));
 +              netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
 +                           "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
                mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
                mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
        }
        }
        if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
                        NETDEV_TX_OK) {
 -              QPRINTK(qdev, TX_QUEUED, ERR,
 -                              "Could not map the segments.\n");
 +              netif_err(qdev, tx_queued, qdev->ndev,
 +                        "Could not map the segments.\n");
                tx_ring->tx_errors++;
                return NETDEV_TX_BUSY;
        }
        wmb();
  
        ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
 -      QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
 -              tx_ring->prod_idx, skb->len);
 +      netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
 +                   "tx queued, slot %d, len %d\n",
 +                   tx_ring->prod_idx, skb->len);
  
        atomic_dec(&tx_ring->tx_count);
        return NETDEV_TX_OK;
@@@ -2616,8 -2285,8 +2616,8 @@@ static int ql_alloc_shadow_space(struc
            pci_alloc_consistent(qdev->pdev,
                                 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
        if (qdev->rx_ring_shadow_reg_area == NULL) {
 -              QPRINTK(qdev, IFUP, ERR,
 -                      "Allocation of RX shadow space failed.\n");
 +              netif_err(qdev, ifup, qdev->ndev,
 +                        "Allocation of RX shadow space failed.\n");
                return -ENOMEM;
        }
        memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
            pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
                                 &qdev->tx_ring_shadow_reg_dma);
        if (qdev->tx_ring_shadow_reg_area == NULL) {
 -              QPRINTK(qdev, IFUP, ERR,
 -                      "Allocation of TX shadow space failed.\n");
 +              netif_err(qdev, ifup, qdev->ndev,
 +                        "Allocation of TX shadow space failed.\n");
                goto err_wqp_sh_area;
        }
        memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
@@@ -2680,7 -2349,7 +2680,7 @@@ static int ql_alloc_tx_resources(struc
  
        if ((tx_ring->wq_base == NULL) ||
            tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
 -              QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
 +              netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
                return -ENOMEM;
        }
        tx_ring->q =
@@@ -2731,8 -2400,7 +2731,8 @@@ static void ql_free_sbq_buffers(struct 
        for (i = 0; i < rx_ring->sbq_len; i++) {
                sbq_desc = &rx_ring->sbq[i];
                if (sbq_desc == NULL) {
 -                      QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
 +                      netif_err(qdev, ifup, qdev->ndev,
 +                                "sbq_desc %d is NULL.\n", i);
                        return;
                }
                if (sbq_desc->p.skb) {
@@@ -2859,7 -2527,7 +2859,7 @@@ static int ql_alloc_rx_resources(struc
                                 &rx_ring->cq_base_dma);
  
        if (rx_ring->cq_base == NULL) {
 -              QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
 +              netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
                return -ENOMEM;
        }
  
                                         &rx_ring->sbq_base_dma);
  
                if (rx_ring->sbq_base == NULL) {
 -                      QPRINTK(qdev, IFUP, ERR,
 -                              "Small buffer queue allocation failed.\n");
 +                      netif_err(qdev, ifup, qdev->ndev,
 +                                "Small buffer queue allocation failed.\n");
                        goto err_mem;
                }
  
                    kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
                            GFP_KERNEL);
                if (rx_ring->sbq == NULL) {
 -                      QPRINTK(qdev, IFUP, ERR,
 -                              "Small buffer queue control block allocation failed.\n");
 +                      netif_err(qdev, ifup, qdev->ndev,
 +                                "Small buffer queue control block allocation failed.\n");
                        goto err_mem;
                }
  
                                         &rx_ring->lbq_base_dma);
  
                if (rx_ring->lbq_base == NULL) {
 -                      QPRINTK(qdev, IFUP, ERR,
 -                              "Large buffer queue allocation failed.\n");
 +                      netif_err(qdev, ifup, qdev->ndev,
 +                                "Large buffer queue allocation failed.\n");
                        goto err_mem;
                }
                /*
                    kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
                            GFP_KERNEL);
                if (rx_ring->lbq == NULL) {
 -                      QPRINTK(qdev, IFUP, ERR,
 -                              "Large buffer queue control block allocation failed.\n");
 +                      netif_err(qdev, ifup, qdev->ndev,
 +                                "Large buffer queue control block allocation failed.\n");
                        goto err_mem;
                }
  
@@@ -2942,10 -2610,10 +2942,10 @@@ static void ql_tx_ring_clean(struct ql_
                for (i = 0; i < tx_ring->wq_len; i++) {
                        tx_ring_desc = &tx_ring->q[i];
                        if (tx_ring_desc && tx_ring_desc->skb) {
 -                              QPRINTK(qdev, IFDOWN, ERR,
 -                              "Freeing lost SKB %p, from queue %d, index %d.\n",
 -                                      tx_ring_desc->skb, j,
 -                                      tx_ring_desc->index);
 +                              netif_err(qdev, ifdown, qdev->ndev,
 +                                        "Freeing lost SKB %p, from queue %d, index %d.\n",
 +                                        tx_ring_desc->skb, j,
 +                                        tx_ring_desc->index);
                                ql_unmap_send(qdev, tx_ring_desc,
                                              tx_ring_desc->map_cnt);
                                dev_kfree_skb(tx_ring_desc->skb);
@@@ -2976,16 -2644,16 +2976,16 @@@ static int ql_alloc_mem_resources(struc
  
        for (i = 0; i < qdev->rx_ring_count; i++) {
                if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
 -                      QPRINTK(qdev, IFUP, ERR,
 -                              "RX resource allocation failed.\n");
 +                      netif_err(qdev, ifup, qdev->ndev,
 +                                "RX resource allocation failed.\n");
                        goto err_mem;
                }
        }
        /* Allocate tx queue resources */
        for (i = 0; i < qdev->tx_ring_count; i++) {
                if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
 -                      QPRINTK(qdev, IFUP, ERR,
 -                              "TX resource allocation failed.\n");
 +                      netif_err(qdev, ifup, qdev->ndev,
 +                                "TX resource allocation failed.\n");
                        goto err_mem;
                }
        }
@@@ -3120,15 -2788,14 +3120,15 @@@ static int ql_start_rx_ring(struct ql_a
                cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
                break;
        default:
 -              QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
 -                      rx_ring->type);
 +              netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
 +                           "Invalid rx_ring->type = %d.\n", rx_ring->type);
        }
 -      QPRINTK(qdev, IFUP, DEBUG, "Initializing rx work queue.\n");
 +      netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
 +                   "Initializing rx work queue.\n");
        err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
                           CFG_LCQ, rx_ring->cq_id);
        if (err) {
 -              QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
 +              netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
                return err;
        }
        return err;
@@@ -3174,11 -2841,10 +3174,11 @@@ static int ql_start_tx_ring(struct ql_a
        err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
                           (u16) tx_ring->wq_id);
        if (err) {
 -              QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
 +              netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
                return err;
        }
 -      QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded WQICB.\n");
 +      netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
 +                   "Successfully loaded WQICB.\n");
        return err;
  }
  
@@@ -3232,15 -2898,15 +3232,15 @@@ static void ql_enable_msix(struct ql_ad
                if (err < 0) {
                        kfree(qdev->msi_x_entry);
                        qdev->msi_x_entry = NULL;
 -                      QPRINTK(qdev, IFUP, WARNING,
 -                              "MSI-X Enable failed, trying MSI.\n");
 +                      netif_warn(qdev, ifup, qdev->ndev,
 +                                 "MSI-X Enable failed, trying MSI.\n");
                        qdev->intr_count = 1;
                        qlge_irq_type = MSI_IRQ;
                } else if (err == 0) {
                        set_bit(QL_MSIX_ENABLED, &qdev->flags);
 -                      QPRINTK(qdev, IFUP, INFO,
 -                              "MSI-X Enabled, got %d vectors.\n",
 -                              qdev->intr_count);
 +                      netif_info(qdev, ifup, qdev->ndev,
 +                                 "MSI-X Enabled, got %d vectors.\n",
 +                                 qdev->intr_count);
                        return;
                }
        }
        if (qlge_irq_type == MSI_IRQ) {
                if (!pci_enable_msi(qdev->pdev)) {
                        set_bit(QL_MSI_ENABLED, &qdev->flags);
 -                      QPRINTK(qdev, IFUP, INFO,
 -                              "Running with MSI interrupts.\n");
 +                      netif_info(qdev, ifup, qdev->ndev,
 +                                 "Running with MSI interrupts.\n");
                        return;
                }
        }
        qlge_irq_type = LEG_IRQ;
 -      QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
 +      netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
 +                   "Running with legacy interrupts.\n");
  }
  
  /* Each vector services 1 RSS ring and and 1 or more
@@@ -3428,12 -3093,12 +3428,12 @@@ static void ql_free_irq(struct ql_adapt
                        if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
                                free_irq(qdev->msi_x_entry[i].vector,
                                         &qdev->rx_ring[i]);
 -                              QPRINTK(qdev, IFDOWN, DEBUG,
 -                                      "freeing msix interrupt %d.\n", i);
 +                              netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
 +                                           "freeing msix interrupt %d.\n", i);
                        } else {
                                free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
 -                              QPRINTK(qdev, IFDOWN, DEBUG,
 -                                      "freeing msi interrupt %d.\n", i);
 +                              netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
 +                                           "freeing msi interrupt %d.\n", i);
                        }
                }
        }
@@@ -3458,33 -3123,32 +3458,33 @@@ static int ql_request_irq(struct ql_ada
                                             intr_context->name,
                                             &qdev->rx_ring[i]);
                        if (status) {
 -                              QPRINTK(qdev, IFUP, ERR,
 -                                      "Failed request for MSIX interrupt %d.\n",
 -                                      i);
 +                              netif_err(qdev, ifup, qdev->ndev,
 +                                        "Failed request for MSIX interrupt %d.\n",
 +                                        i);
                                goto err_irq;
                        } else {
 -                              QPRINTK(qdev, IFUP, DEBUG,
 -                                      "Hooked intr %d, queue type %s%s%s, with name %s.\n",
 -                                      i,
 -                                      qdev->rx_ring[i].type ==
 -                                      DEFAULT_Q ? "DEFAULT_Q" : "",
 -                                      qdev->rx_ring[i].type ==
 -                                      TX_Q ? "TX_Q" : "",
 -                                      qdev->rx_ring[i].type ==
 -                                      RX_Q ? "RX_Q" : "", intr_context->name);
 +                              netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
 +                                           "Hooked intr %d, queue type %s, with name %s.\n",
 +                                           i,
 +                                           qdev->rx_ring[i].type == DEFAULT_Q ?
 +                                           "DEFAULT_Q" :
 +                                           qdev->rx_ring[i].type == TX_Q ?
 +                                           "TX_Q" :
 +                                           qdev->rx_ring[i].type == RX_Q ?
 +                                           "RX_Q" : "",
 +                                           intr_context->name);
                        }
                } else {
 -                      QPRINTK(qdev, IFUP, DEBUG,
 -                              "trying msi or legacy interrupts.\n");
 -                      QPRINTK(qdev, IFUP, DEBUG,
 -                              "%s: irq = %d.\n", __func__, pdev->irq);
 -                      QPRINTK(qdev, IFUP, DEBUG,
 -                              "%s: context->name = %s.\n", __func__,
 -                             intr_context->name);
 -                      QPRINTK(qdev, IFUP, DEBUG,
 -                              "%s: dev_id = 0x%p.\n", __func__,
 -                             &qdev->rx_ring[0]);
 +                      netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
 +                                   "trying msi or legacy interrupts.\n");
 +                      netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
 +                                   "%s: irq = %d.\n", __func__, pdev->irq);
 +                      netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
 +                                   "%s: context->name = %s.\n", __func__,
 +                                   intr_context->name);
 +                      netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
 +                                   "%s: dev_id = 0x%p.\n", __func__,
 +                                   &qdev->rx_ring[0]);
                        status =
                            request_irq(pdev->irq, qlge_isr,
                                        test_bit(QL_MSI_ENABLED,
                        if (status)
                                goto err_irq;
  
 -                      QPRINTK(qdev, IFUP, ERR,
 -                              "Hooked intr %d, queue type %s%s%s, with name %s.\n",
 -                              i,
 -                              qdev->rx_ring[0].type ==
 -                              DEFAULT_Q ? "DEFAULT_Q" : "",
 -                              qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
 -                              qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
 -                              intr_context->name);
 +                      netif_err(qdev, ifup, qdev->ndev,
 +                                "Hooked intr %d, queue type %s, with name %s.\n",
 +                                i,
 +                                qdev->rx_ring[0].type == DEFAULT_Q ?
 +                                "DEFAULT_Q" :
 +                                qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
 +                                qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
 +                                intr_context->name);
                }
                intr_context->hooked = 1;
        }
        return status;
  err_irq:
 -      QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
 +      netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
        ql_free_irq(qdev);
        return status;
  }
@@@ -3541,15 -3205,14 +3541,15 @@@ static int ql_start_rss(struct ql_adapt
        memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
        memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
  
 -      QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
 +      netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
  
        status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
        if (status) {
 -              QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
 +              netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
                return status;
        }
 -      QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded RICB.\n");
 +      netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
 +                   "Successfully loaded RICB.\n");
        return status;
  }
  
@@@ -3564,8 -3227,9 +3564,8 @@@ static int ql_clear_routing_entries(str
        for (i = 0; i < 16; i++) {
                status = ql_set_routing_reg(qdev, i, 0, 0);
                if (status) {
 -                      QPRINTK(qdev, IFUP, ERR,
 -                              "Failed to init routing register for CAM "
 -                              "packets.\n");
 +                      netif_err(qdev, ifup, qdev->ndev,
 +                                "Failed to init routing register for CAM packets.\n");
                        break;
                }
        }
@@@ -3589,14 -3253,14 +3589,14 @@@ static int ql_route_initialize(struct q
  
        status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
        if (status) {
 -              QPRINTK(qdev, IFUP, ERR,
 -                      "Failed to init routing register for error packets.\n");
 +              netif_err(qdev, ifup, qdev->ndev,
 +                        "Failed to init routing register for error packets.\n");
                goto exit;
        }
        status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
        if (status) {
 -              QPRINTK(qdev, IFUP, ERR,
 -                      "Failed to init routing register for broadcast packets.\n");
 +              netif_err(qdev, ifup, qdev->ndev,
 +                        "Failed to init routing register for broadcast packets.\n");
                goto exit;
        }
        /* If we have more than one inbound queue, then turn on RSS in the
                status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
                                        RT_IDX_RSS_MATCH, 1);
                if (status) {
 -                      QPRINTK(qdev, IFUP, ERR,
 -                              "Failed to init routing register for MATCH RSS packets.\n");
 +                      netif_err(qdev, ifup, qdev->ndev,
 +                                "Failed to init routing register for MATCH RSS packets.\n");
                        goto exit;
                }
        }
        status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
                                    RT_IDX_CAM_HIT, 1);
        if (status)
 -              QPRINTK(qdev, IFUP, ERR,
 -                      "Failed to init routing register for CAM packets.\n");
 +              netif_err(qdev, ifup, qdev->ndev,
 +                        "Failed to init routing register for CAM packets.\n");
  exit:
        ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
        return status;
@@@ -3634,13 -3298,13 +3634,13 @@@ int ql_cam_route_initialize(struct ql_a
        set &= qdev->port_link_up;
        status = ql_set_mac_addr(qdev, set);
        if (status) {
 -              QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
 +              netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
                return status;
        }
  
        status = ql_route_initialize(qdev);
        if (status)
 -              QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
 +              netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
  
        return status;
  }
@@@ -3668,15 -3332,15 +3668,15 @@@ static int ql_adapter_initialize(struc
  
        /* Enable the function, set pagesize, enable error checking. */
        value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
 -          FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
 +          FSC_EC | FSC_VM_PAGE_4K;
 +      value |= SPLT_SETTING;
  
        /* Set/clear header splitting. */
        mask = FSC_VM_PAGESIZE_MASK |
            FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
        ql_write32(qdev, FSC, mask | value);
  
 -      ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
 -              min(SMALL_BUF_MAP_SIZE, MAX_SPLIT_SIZE));
 +      ql_write32(qdev, SPLT_HDR, SPLT_LEN);
  
        /* Set RX packet routing to use port/pci function on which the
         * packet arrived on in addition to usual frame routing.
        for (i = 0; i < qdev->rx_ring_count; i++) {
                status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
                if (status) {
 -                      QPRINTK(qdev, IFUP, ERR,
 -                              "Failed to start rx ring[%d].\n", i);
 +                      netif_err(qdev, ifup, qdev->ndev,
 +                                "Failed to start rx ring[%d].\n", i);
                        return status;
                }
        }
        if (qdev->rss_ring_count > 1) {
                status = ql_start_rss(qdev);
                if (status) {
 -                      QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
 +                      netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
                        return status;
                }
        }
        for (i = 0; i < qdev->tx_ring_count; i++) {
                status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
                if (status) {
 -                      QPRINTK(qdev, IFUP, ERR,
 -                              "Failed to start tx ring[%d].\n", i);
 +                      netif_err(qdev, ifup, qdev->ndev,
 +                                "Failed to start tx ring[%d].\n", i);
                        return status;
                }
        }
        /* Initialize the port and set the max framesize. */
        status = qdev->nic_ops->port_initialize(qdev);
        if (status)
 -              QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
 +              netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
  
        /* Set up the MAC address and frame routing filter. */
        status = ql_cam_route_initialize(qdev);
        if (status) {
 -              QPRINTK(qdev, IFUP, ERR,
 -                              "Failed to init CAM/Routing tables.\n");
 +              netif_err(qdev, ifup, qdev->ndev,
 +                        "Failed to init CAM/Routing tables.\n");
                return status;
        }
  
        /* Start NAPI for the RSS queues. */
        for (i = 0; i < qdev->rss_ring_count; i++) {
 -              QPRINTK(qdev, IFUP, DEBUG, "Enabling NAPI for rx_ring[%d].\n",
 -                      i);
 +              netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
 +                           "Enabling NAPI for rx_ring[%d].\n", i);
                napi_enable(&qdev->rx_ring[i].napi);
        }
  
@@@ -3765,7 -3429,7 +3765,7 @@@ static int ql_adapter_reset(struct ql_a
        /* Clear all the entries in the routing table. */
        status = ql_clear_routing_entries(qdev);
        if (status) {
 -              QPRINTK(qdev, IFUP, ERR, "Failed to clear routing bits.\n");
 +              netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
                return status;
        }
  
        } while (time_before(jiffies, end_jiffies));
  
        if (value & RST_FO_FR) {
 -              QPRINTK(qdev, IFDOWN, ERR,
 -                      "ETIMEDOUT!!! errored out of resetting the chip!\n");
 +              netif_err(qdev, ifdown, qdev->ndev,
 +                        "ETIMEDOUT!!! errored out of resetting the chip!\n");
                status = -ETIMEDOUT;
        }
  
@@@ -3802,17 -3466,16 +3802,17 @@@ static void ql_display_dev_info(struct 
  {
        struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
  
 -      QPRINTK(qdev, PROBE, INFO,
 -              "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
 -              "XG Roll = %d, XG Rev = %d.\n",
 -              qdev->func,
 -              qdev->port,
 -              qdev->chip_rev_id & 0x0000000f,
 -              qdev->chip_rev_id >> 4 & 0x0000000f,
 -              qdev->chip_rev_id >> 8 & 0x0000000f,
 -              qdev->chip_rev_id >> 12 & 0x0000000f);
 -      QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
 +      netif_info(qdev, probe, qdev->ndev,
 +                 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
 +                 "XG Roll = %d, XG Rev = %d.\n",
 +                 qdev->func,
 +                 qdev->port,
 +                 qdev->chip_rev_id & 0x0000000f,
 +                 qdev->chip_rev_id >> 4 & 0x0000000f,
 +                 qdev->chip_rev_id >> 8 & 0x0000000f,
 +                 qdev->chip_rev_id >> 12 & 0x0000000f);
 +      netif_info(qdev, probe, qdev->ndev,
 +                 "MAC address %pM\n", ndev->dev_addr);
  }
  
  int ql_wol(struct ql_adapter *qdev)
  
        if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
                        WAKE_MCAST | WAKE_BCAST)) {
 -              QPRINTK(qdev, IFDOWN, ERR,
 -                      "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
 -                      qdev->wol);
 +              netif_err(qdev, ifdown, qdev->ndev,
 +                        "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
 +                        qdev->wol);
                return -EINVAL;
        }
  
        if (qdev->wol & WAKE_MAGIC) {
                status = ql_mb_wol_set_magic(qdev, 1);
                if (status) {
 -                      QPRINTK(qdev, IFDOWN, ERR,
 -                              "Failed to set magic packet on %s.\n",
 -                              qdev->ndev->name);
 +                      netif_err(qdev, ifdown, qdev->ndev,
 +                                "Failed to set magic packet on %s.\n",
 +                                qdev->ndev->name);
                        return status;
                } else
 -                      QPRINTK(qdev, DRV, INFO,
 -                              "Enabled magic packet successfully on %s.\n",
 -                              qdev->ndev->name);
 +                      netif_info(qdev, drv, qdev->ndev,
 +                                 "Enabled magic packet successfully on %s.\n",
 +                                 qdev->ndev->name);
  
                wol |= MB_WOL_MAGIC_PKT;
        }
        if (qdev->wol) {
                wol |= MB_WOL_MODE_ON;
                status = ql_mb_wol_mode(qdev, wol);
 -              QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
 -                      (status == 0) ? "Successfully set" : "Failed", wol,
 -                      qdev->ndev->name);
 +              netif_err(qdev, drv, qdev->ndev,
 +                        "WOL %s (wol code 0x%x) on %s\n",
-                         (status == 0) ? "Sucessfully set" : "Failed",
++                        (status == 0) ? "Successfully set" : "Failed",
 +                        wol, qdev->ndev->name);
        }
  
        return status;
@@@ -3876,7 -3538,6 +3876,7 @@@ static int ql_adapter_down(struct ql_ad
        cancel_delayed_work_sync(&qdev->mpi_reset_work);
        cancel_delayed_work_sync(&qdev->mpi_work);
        cancel_delayed_work_sync(&qdev->mpi_idc_work);
 +      cancel_delayed_work_sync(&qdev->mpi_core_to_log);
        cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
  
        for (i = 0; i < qdev->rss_ring_count; i++)
  
        status = ql_adapter_reset(qdev);
        if (status)
 -              QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
 -                      qdev->func);
 +              netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
 +                        qdev->func);
        return status;
  }
  
@@@ -3908,7 -3569,7 +3908,7 @@@ static int ql_adapter_up(struct ql_adap
  
        err = ql_adapter_initialize(qdev);
        if (err) {
 -              QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
 +              netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
                goto err_init;
        }
        set_bit(QL_ADAPTER_UP, &qdev->flags);
@@@ -3940,7 -3601,7 +3940,7 @@@ static int ql_get_adapter_resources(str
        int status = 0;
  
        if (ql_alloc_mem_resources(qdev)) {
 -              QPRINTK(qdev, IFUP, ERR, "Unable to  allocate memory.\n");
 +              netif_err(qdev, ifup, qdev->ndev, "Unable to  allocate memory.\n");
                return -ENOMEM;
        }
        status = ql_request_irq(qdev);
@@@ -3951,16 -3612,6 +3951,16 @@@ static int qlge_close(struct net_devic
  {
        struct ql_adapter *qdev = netdev_priv(ndev);
  
 +      /* If we hit pci_channel_io_perm_failure
 +       * failure condition, then we already
 +       * brought the adapter down.
 +       */
 +      if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
 +              netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
 +              clear_bit(QL_EEH_FATAL, &qdev->flags);
 +              return 0;
 +      }
 +
        /*
         * Wait for device to recover from a reset.
         * (Rarely happens, but possible.)
@@@ -4030,10 -3681,9 +4030,10 @@@ static int ql_configure_rings(struct ql
                        rx_ring->lbq_size =
                            rx_ring->lbq_len * sizeof(__le64);
                        rx_ring->lbq_buf_size = (u16)lbq_buf_len;
 -                      QPRINTK(qdev, IFUP, DEBUG,
 -                              "lbq_buf_size %d, order = %d\n",
 -                              rx_ring->lbq_buf_size, qdev->lbq_buf_order);
 +                      netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
 +                                   "lbq_buf_size %d, order = %d\n",
 +                                   rx_ring->lbq_buf_size,
 +                                   qdev->lbq_buf_order);
                        rx_ring->sbq_len = NUM_SMALL_BUFFERS;
                        rx_ring->sbq_size =
                            rx_ring->sbq_len * sizeof(__le64);
@@@ -4097,14 -3747,14 +4097,14 @@@ static int ql_change_rx_buffers(struct 
        if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
                int i = 3;
                while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
 -                      QPRINTK(qdev, IFUP, ERR,
 -                               "Waiting for adapter UP...\n");
 +                      netif_err(qdev, ifup, qdev->ndev,
 +                                "Waiting for adapter UP...\n");
                        ssleep(1);
                }
  
                if (!i) {
 -                      QPRINTK(qdev, IFUP, ERR,
 -                       "Timed out waiting for adapter UP\n");
 +                      netif_err(qdev, ifup, qdev->ndev,
 +                                "Timed out waiting for adapter UP\n");
                        return -ETIMEDOUT;
                }
        }
  
        return status;
  error:
 -      QPRINTK(qdev, IFUP, ALERT,
 -              "Driver up/down cycle failed, closing device.\n");
 +      netif_alert(qdev, ifup, qdev->ndev,
 +                  "Driver up/down cycle failed, closing device.\n");
        set_bit(QL_ADAPTER_UP, &qdev->flags);
        dev_close(qdev->ndev);
        return status;
@@@ -4143,25 -3793,28 +4143,25 @@@ static int qlge_change_mtu(struct net_d
        int status;
  
        if (ndev->mtu == 1500 && new_mtu == 9000) {
 -              QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
 +              netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
        } else if (ndev->mtu == 9000 && new_mtu == 1500) {
 -              QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
 -      } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
 -                 (ndev->mtu == 9000 && new_mtu == 9000)) {
 -              return 0;
 +              netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
        } else
                return -EINVAL;
  
        queue_delayed_work(qdev->workqueue,
                        &qdev->mpi_port_cfg_work, 3*HZ);
  
 +      ndev->mtu = new_mtu;
 +
        if (!netif_running(qdev->ndev)) {
 -              ndev->mtu = new_mtu;
                return 0;
        }
  
 -      ndev->mtu = new_mtu;
        status = ql_change_rx_buffers(qdev);
        if (status) {
 -              QPRINTK(qdev, IFUP, ERR,
 -                      "Changing MTU failed.\n");
 +              netif_err(qdev, ifup, qdev->ndev,
 +                        "Changing MTU failed.\n");
        }
  
        return status;
@@@ -4221,8 -3874,8 +4221,8 @@@ static void qlge_set_multicast_list(str
                if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
                        if (ql_set_routing_reg
                            (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
 -                              QPRINTK(qdev, HW, ERR,
 -                                      "Failed to set promiscous mode.\n");
 +                              netif_err(qdev, hw, qdev->ndev,
 +                                        "Failed to set promiscous mode.\n");
                        } else {
                                set_bit(QL_PROMISCUOUS, &qdev->flags);
                        }
                if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
                        if (ql_set_routing_reg
                            (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
 -                              QPRINTK(qdev, HW, ERR,
 -                                      "Failed to clear promiscous mode.\n");
 +                              netif_err(qdev, hw, qdev->ndev,
 +                                        "Failed to clear promiscous mode.\n");
                        } else {
                                clear_bit(QL_PROMISCUOUS, &qdev->flags);
                        }
         * transition is taking place.
         */
        if ((ndev->flags & IFF_ALLMULTI) ||
 -          (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
 +          (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
                if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
                        if (ql_set_routing_reg
                            (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
 -                              QPRINTK(qdev, HW, ERR,
 -                                      "Failed to set all-multi mode.\n");
 +                              netif_err(qdev, hw, qdev->ndev,
 +                                        "Failed to set all-multi mode.\n");
                        } else {
                                set_bit(QL_ALLMULTI, &qdev->flags);
                        }
                if (test_bit(QL_ALLMULTI, &qdev->flags)) {
                        if (ql_set_routing_reg
                            (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
 -                              QPRINTK(qdev, HW, ERR,
 -                                      "Failed to clear all-multi mode.\n");
 +                              netif_err(qdev, hw, qdev->ndev,
 +                                        "Failed to clear all-multi mode.\n");
                        } else {
                                clear_bit(QL_ALLMULTI, &qdev->flags);
                        }
                }
        }
  
 -      if (ndev->mc_count) {
 +      if (!netdev_mc_empty(ndev)) {
                status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
                if (status)
                        goto exit;
 -              for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
 -                   i++, mc_ptr = mc_ptr->next)
 +              i = 0;
 +              netdev_for_each_mc_addr(mc_ptr, ndev) {
                        if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
                                                MAC_ADDR_TYPE_MULTI_MAC, i)) {
 -                              QPRINTK(qdev, HW, ERR,
 -                                      "Failed to loadmulticast address.\n");
 +                              netif_err(qdev, hw, qdev->ndev,
 +                                        "Failed to loadmulticast address.\n");
                                ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
                                goto exit;
                        }
 +                      i++;
 +              }
                ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
                if (ql_set_routing_reg
                    (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
 -                      QPRINTK(qdev, HW, ERR,
 -                              "Failed to set multicast match mode.\n");
 +                      netif_err(qdev, hw, qdev->ndev,
 +                                "Failed to set multicast match mode.\n");
                } else {
                        set_bit(QL_ALLMULTI, &qdev->flags);
                }
@@@ -4303,8 -3954,6 +4303,8 @@@ static int qlge_set_mac_address(struct 
        if (!is_valid_ether_addr(addr->sa_data))
                return -EADDRNOTAVAIL;
        memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
 +      /* Update local copy of current mac address. */
 +      memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
  
        status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
        if (status)
        status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
                        MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
        if (status)
 -              QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
 +              netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
        ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
        return status;
  }
@@@ -4345,8 -3994,8 +4345,8 @@@ static void ql_asic_reset_work(struct w
        rtnl_unlock();
        return;
  error:
 -      QPRINTK(qdev, IFUP, ALERT,
 -              "Driver up/down cycle failed, closing device\n");
 +      netif_alert(qdev, ifup, qdev->ndev,
 +                  "Driver up/down cycle failed, closing device\n");
  
        set_bit(QL_ADAPTER_UP, &qdev->flags);
        dev_close(qdev->ndev);
@@@ -4445,7 -4094,6 +4445,7 @@@ static void ql_release_all(struct pci_d
                iounmap(qdev->reg_base);
        if (qdev->doorbell_area)
                iounmap(qdev->doorbell_area);
 +      vfree(qdev->mpi_coredump);
        pci_release_regions(pdev);
        pci_set_drvdata(pdev, NULL);
  }
@@@ -4527,17 -4175,6 +4527,17 @@@ static int __devinit ql_init_device(str
        spin_lock_init(&qdev->hw_lock);
        spin_lock_init(&qdev->stats_lock);
  
 +      if (qlge_mpi_coredump) {
 +              qdev->mpi_coredump =
 +                      vmalloc(sizeof(struct ql_mpi_coredump));
 +              if (qdev->mpi_coredump == NULL) {
 +                      dev_err(&pdev->dev, "Coredump alloc failed.\n");
 +                      err = -ENOMEM;
 +                      goto err_out2;
 +              }
 +              if (qlge_force_coredump)
 +                      set_bit(QL_FRC_COREDUMP, &qdev->flags);
 +      }
        /* make sure the EEPROM is good */
        err = qdev->nic_ops->get_flash(qdev);
        if (err) {
        }
  
        memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
 +      /* Keep local copy of current mac address. */
 +      memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
  
        /* Set up the default ring sizes. */
        qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
        INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
        INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
        INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
 +      INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
        init_completion(&qdev->ide_completion);
  
        if (!cards_found) {
@@@ -4600,21 -4234,6 +4600,21 @@@ static const struct net_device_ops qlge
        .ndo_vlan_rx_kill_vid   = qlge_vlan_rx_kill_vid,
  };
  
 +static void ql_timer(unsigned long data)
 +{
 +      struct ql_adapter *qdev = (struct ql_adapter *)data;
 +      u32 var = 0;
 +
 +      var = ql_read32(qdev, STS);
 +      if (pci_channel_offline(qdev->pdev)) {
 +              netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
 +              return;
 +      }
 +
 +      qdev->timer.expires = jiffies + (5*HZ);
 +      add_timer(&qdev->timer);
 +}
 +
  static int __devinit qlge_probe(struct pci_dev *pdev,
                                const struct pci_device_id *pci_entry)
  {
                pci_disable_device(pdev);
                return err;
        }
 +      /* Start up the timer to trigger EEH if
 +       * the bus goes dead
 +       */
 +      init_timer_deferrable(&qdev->timer);
 +      qdev->timer.data = (unsigned long)qdev;
 +      qdev->timer.function = ql_timer;
 +      qdev->timer.expires = jiffies + (5*HZ);
 +      add_timer(&qdev->timer);
        ql_link_off(qdev);
        ql_display_dev_info(ndev);
        atomic_set(&qdev->lb_count, 0);
@@@ -4694,8 -4305,6 +4694,8 @@@ int ql_clean_lb_rx_ring(struct rx_ring 
  static void __devexit qlge_remove(struct pci_dev *pdev)
  {
        struct net_device *ndev = pci_get_drvdata(pdev);
 +      struct ql_adapter *qdev = netdev_priv(ndev);
 +      del_timer_sync(&qdev->timer);
        unregister_netdev(ndev);
        ql_release_all(pdev);
        pci_disable_device(pdev);
@@@ -4718,7 -4327,6 +4718,7 @@@ static void ql_eeh_close(struct net_dev
        cancel_delayed_work_sync(&qdev->mpi_reset_work);
        cancel_delayed_work_sync(&qdev->mpi_work);
        cancel_delayed_work_sync(&qdev->mpi_idc_work);
 +      cancel_delayed_work_sync(&qdev->mpi_core_to_log);
        cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
  
        for (i = 0; i < qdev->rss_ring_count; i++)
@@@ -4738,7 -4346,6 +4738,7 @@@ static pci_ers_result_t qlge_io_error_d
                                               enum pci_channel_state state)
  {
        struct net_device *ndev = pci_get_drvdata(pdev);
 +      struct ql_adapter *qdev = netdev_priv(ndev);
  
        switch (state) {
        case pci_channel_io_normal:
        case pci_channel_io_perm_failure:
                dev_err(&pdev->dev,
                        "%s: pci_channel_io_perm_failure.\n", __func__);
 +              ql_eeh_close(ndev);
 +              set_bit(QL_EEH_FATAL, &qdev->flags);
                return PCI_ERS_RESULT_DISCONNECT;
        }
  
@@@ -4776,18 -4381,11 +4776,18 @@@ static pci_ers_result_t qlge_io_slot_re
  
        pci_restore_state(pdev);
        if (pci_enable_device(pdev)) {
 -              QPRINTK(qdev, IFUP, ERR,
 -                      "Cannot re-enable PCI device after reset.\n");
 +              netif_err(qdev, ifup, qdev->ndev,
 +                        "Cannot re-enable PCI device after reset.\n");
                return PCI_ERS_RESULT_DISCONNECT;
        }
        pci_set_master(pdev);
 +
 +      if (ql_adapter_reset(qdev)) {
 +              netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
 +              set_bit(QL_EEH_FATAL, &qdev->flags);
 +              return PCI_ERS_RESULT_DISCONNECT;
 +      }
 +
        return PCI_ERS_RESULT_RECOVERED;
  }
  
@@@ -4797,19 -4395,19 +4797,19 @@@ static void qlge_io_resume(struct pci_d
        struct ql_adapter *qdev = netdev_priv(ndev);
        int err = 0;
  
 -      if (ql_adapter_reset(qdev))
 -              QPRINTK(qdev, DRV, ERR, "reset FAILED!\n");
        if (netif_running(ndev)) {
                err = qlge_open(ndev);
                if (err) {
 -                      QPRINTK(qdev, IFUP, ERR,
 -                              "Device initialization failed after reset.\n");
 +                      netif_err(qdev, ifup, qdev->ndev,
 +                                "Device initialization failed after reset.\n");
                        return;
                }
        } else {
 -              QPRINTK(qdev, IFUP, ERR,
 -                      "Device was not running prior to EEH.\n");
 +              netif_err(qdev, ifup, qdev->ndev,
 +                        "Device was not running prior to EEH.\n");
        }
 +      qdev->timer.expires = jiffies + (5*HZ);
 +      add_timer(&qdev->timer);
        netif_device_attach(ndev);
  }
  
@@@ -4826,7 -4424,6 +4826,7 @@@ static int qlge_suspend(struct pci_dev 
        int err;
  
        netif_device_detach(ndev);
 +      del_timer_sync(&qdev->timer);
  
        if (netif_running(ndev)) {
                err = ql_adapter_down(qdev);
@@@ -4857,7 -4454,7 +4857,7 @@@ static int qlge_resume(struct pci_dev *
        pci_restore_state(pdev);
        err = pci_enable_device(pdev);
        if (err) {
 -              QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
 +              netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
                return err;
        }
        pci_set_master(pdev);
                        return err;
        }
  
 +      qdev->timer.expires = jiffies + (5*HZ);
 +      add_timer(&qdev->timer);
        netif_device_attach(ndev);
  
        return 0;
diff --combined drivers/net/smsc9420.c
index 30110a11d73784299d5c3318c19166e209620ff2,2ae1972bcb46dd32e7bf943e7fd3f16af82c0491..34fa10d8ad40077e085d571633b59329a28672af
@@@ -80,7 -80,7 +80,7 @@@ struct smsc9420_pdata 
        int last_carrier;
  };
  
 -static const struct pci_device_id smsc9420_id_table[] = {
 +static DEFINE_PCI_DEVICE_TABLE(smsc9420_id_table) = {
        { PCI_VENDOR_ID_9420, PCI_DEVICE_ID_9420, PCI_ANY_ID, PCI_ANY_ID, },
        { 0, }
  };
@@@ -1062,12 -1062,12 +1062,12 @@@ static void smsc9420_set_multicast_list
                mac_cr &= (~MAC_CR_PRMS_);
                mac_cr |= MAC_CR_MCPAS_;
                mac_cr &= (~MAC_CR_HPFILT_);
 -      } else if (dev->mc_count > 0) {
 -              struct dev_mc_list *mc_list = dev->mc_list;
 +      } else if (!netdev_mc_empty(dev)) {
 +              struct dev_mc_list *mc_list;
                u32 hash_lo = 0, hash_hi = 0;
  
                smsc_dbg(HW, "Multicast filter enabled");
 -              while (mc_list) {
 +              netdev_for_each_mc_addr(mc_list, dev) {
                        u32 bit_num = smsc9420_hash(mc_list->dmi_addr);
                        u32 mask = 1 << (bit_num & 0x1F);
  
                        else
                                hash_lo |= mask;
  
 -                      mc_list = mc_list->next;
                }
                smsc9420_reg_write(pd, HASHH, hash_hi);
                smsc9420_reg_write(pd, HASHL, hash_lo);
@@@ -1347,7 -1348,7 +1347,7 @@@ static int smsc9420_open(struct net_dev
  
        netif_carrier_off(dev);
  
-       /* disable, mask and acknowlege all interrupts */
+       /* disable, mask and acknowledge all interrupts */
        spin_lock_irqsave(&pd->int_lock, flags);
        int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_);
        smsc9420_reg_write(pd, INT_CFG, int_cfg);
diff --combined drivers/net/spider_net.c
index 2f8a8c32021e1b2b0b1a28f70296e1367b3692a0,839b1f065d3c23edffb9f963ee4662073a7bc8a0..5ba9d989f8fc6819ca02e0dd21e8be5d042057f5
@@@ -72,7 -72,7 +72,7 @@@ MODULE_PARM_DESC(tx_descriptors, "numbe
  
  char spider_net_driver_name[] = "spidernet";
  
 -static struct pci_device_id spider_net_pci_tbl[] = {
 +static DEFINE_PCI_DEVICE_TABLE(spider_net_pci_tbl) = {
        { PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET,
          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
        { 0, }
@@@ -474,7 -474,7 +474,7 @@@ spider_net_prepare_rx_descr(struct spid
   * spider_net_enable_rxchtails - sets RX dmac chain tail addresses
   * @card: card structure
   *
-  * spider_net_enable_rxchtails sets the RX DMAC chain tail adresses in the
+  * spider_net_enable_rxchtails sets the RX DMAC chain tail addresses in the
   * chip by writing to the appropriate register. DMA is enabled in
   * spider_net_enable_rxdmac.
   */
@@@ -646,7 -646,7 +646,7 @@@ spider_net_set_multi(struct net_device 
        hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
        set_bit(0xfd, bitmask);
  
 -      for (mc = netdev->mc_list; mc; mc = mc->next) {
 +      netdev_for_each_mc_addr(mc, netdev) {
                hash = spider_net_get_multicast_hash(netdev, mc->dmi_addr);
                set_bit(hash, bitmask);
        }
@@@ -1820,7 -1820,7 +1820,7 @@@ spider_net_enable_card(struct spider_ne
  
        spider_net_write_reg(card, SPIDER_NET_ECMODE, SPIDER_NET_ECMODE_VALUE);
  
-       /* set chain tail adress for RX chains and
+       /* set chain tail address for RX chains and
         * enable DMA */
        spider_net_enable_rxchtails(card);
        spider_net_enable_rxdmac(card);
diff --combined drivers/net/sungem.c
index 4344017bfaef8effbb1b78d2da024952dfcce91b,a88fcb39ba1579dafd1f21b102ff04bbd68f1136..70196bc5fe61973de756b5d41793546e3e93b73a
@@@ -107,7 -107,7 +107,7 @@@ MODULE_LICENSE("GPL")
  #define GEM_MODULE_NAME       "gem"
  #define PFX GEM_MODULE_NAME ": "
  
 -static struct pci_device_id gem_pci_tbl[] = {
 +static DEFINE_PCI_DEVICE_TABLE(gem_pci_tbl) = {
        { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM,
          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  
@@@ -782,7 -782,7 +782,7 @@@ static int gem_rx(struct gem *gp, int w
                        break;
  
                /* When writing back RX descriptor, GEM writes status
-                * then buffer address, possibly in seperate transactions.
+                * then buffer address, possibly in separate transactions.
                 * If we don't wait for the chip to write both, we could
                 * post a new buffer to this descriptor then have GEM spam
                 * on the buffer address.  We sync on the RX completion
@@@ -1837,7 -1837,7 +1837,7 @@@ static u32 gem_setup_multicast(struct g
        int i;
  
        if ((gp->dev->flags & IFF_ALLMULTI) ||
 -          (gp->dev->mc_count > 256)) {
 +          (netdev_mc_count(gp->dev) > 256)) {
                for (i=0; i<16; i++)
                        writel(0xffff, gp->regs + MAC_HASH0 + (i << 2));
                rxcfg |= MAC_RXCFG_HFE;
        } else {
                u16 hash_table[16];
                u32 crc;
 -              struct dev_mc_list *dmi = gp->dev->mc_list;
 +              struct dev_mc_list *dmi;
                int i;
  
 -              for (i = 0; i < 16; i++)
 -                      hash_table[i] = 0;
 -
 -              for (i = 0; i < gp->dev->mc_count; i++) {
 +              memset(hash_table, 0, sizeof(hash_table));
 +              netdev_for_each_mc_addr(dmi, gp->dev) {
                        char *addrs = dmi->dmi_addr;
  
 -                      dmi = dmi->next;
 -
                        if (!(*addrs & 1))
                                continue;
  
diff --combined drivers/net/tehuti.c
index 0c9780217c870e63be00ffa520e0ba24d84d8807,ed4e9c42935c0da85014a5af687d5ff9531b66b2..f5493092521acd159f750fe18072aba812dd2816
   *
   */
  
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
  #include "tehuti.h"
  
 -static struct pci_device_id __devinitdata bdx_pci_tbl[] = {
 +static DEFINE_PCI_DEVICE_TABLE(bdx_pci_tbl) = {
        {0x1FC9, 0x3009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
        {0x1FC9, 0x3010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
        {0x1FC9, 0x3014, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
@@@ -107,24 -105,26 +107,24 @@@ static void print_hw_id(struct pci_dev 
        pci_read_config_word(pdev, PCI_LINK_STATUS_REG, &pci_link_status);
        pci_read_config_word(pdev, PCI_DEV_CTRL_REG, &pci_ctrl);
  
 -      printk(KERN_INFO "tehuti: %s%s\n", BDX_NIC_NAME,
 -             nic->port_num == 1 ? "" : ", 2-Port");
 -      printk(KERN_INFO
 -             "tehuti: srom 0x%x fpga %d build %u lane# %d"
 -             " max_pl 0x%x mrrs 0x%x\n",
 -             readl(nic->regs + SROM_VER), readl(nic->regs + FPGA_VER) & 0xFFF,
 -             readl(nic->regs + FPGA_SEED),
 -             GET_LINK_STATUS_LANES(pci_link_status),
 -             GET_DEV_CTRL_MAXPL(pci_ctrl), GET_DEV_CTRL_MRRS(pci_ctrl));
 +      pr_info("%s%s\n", BDX_NIC_NAME,
 +              nic->port_num == 1 ? "" : ", 2-Port");
 +      pr_info("srom 0x%x fpga %d build %u lane# %d max_pl 0x%x mrrs 0x%x\n",
 +              readl(nic->regs + SROM_VER), readl(nic->regs + FPGA_VER) & 0xFFF,
 +              readl(nic->regs + FPGA_SEED),
 +              GET_LINK_STATUS_LANES(pci_link_status),
 +              GET_DEV_CTRL_MAXPL(pci_ctrl), GET_DEV_CTRL_MRRS(pci_ctrl));
  }
  
  static void print_fw_id(struct pci_nic *nic)
  {
 -      printk(KERN_INFO "tehuti: fw 0x%x\n", readl(nic->regs + FW_VER));
 +      pr_info("fw 0x%x\n", readl(nic->regs + FW_VER));
  }
  
  static void print_eth_id(struct net_device *ndev)
  {
 -      printk(KERN_INFO "%s: %s, Port %c\n", ndev->name, BDX_NIC_NAME,
 -             (ndev->if_port == 0) ? 'A' : 'B');
 +      netdev_info(ndev, "%s, Port %c\n",
 +                  BDX_NIC_NAME, (ndev->if_port == 0) ? 'A' : 'B');
  
  }
  
@@@ -160,7 -160,7 +160,7 @@@ bdx_fifo_init(struct bdx_priv *priv, st
        f->va = pci_alloc_consistent(priv->pdev,
                                     memsz + FIFO_EXTRA_SPACE, &f->da);
        if (!f->va) {
 -              ERR("pci_alloc_consistent failed\n");
 +              pr_err("pci_alloc_consistent failed\n");
                RET(-ENOMEM);
        }
        f->reg_CFG0 = reg_CFG0;
@@@ -204,13 -204,13 +204,13 @@@ static void bdx_link_changed(struct bdx
                if (netif_carrier_ok(priv->ndev)) {
                        netif_stop_queue(priv->ndev);
                        netif_carrier_off(priv->ndev);
 -                      ERR("%s: Link Down\n", priv->ndev->name);
 +                      netdev_err(priv->ndev, "Link Down\n");
                }
        } else {
                if (!netif_carrier_ok(priv->ndev)) {
                        netif_wake_queue(priv->ndev);
                        netif_carrier_on(priv->ndev);
 -                      ERR("%s: Link Up\n", priv->ndev->name);
 +                      netdev_err(priv->ndev, "Link Up\n");
                }
        }
  }
@@@ -226,10 -226,10 +226,10 @@@ static void bdx_isr_extra(struct bdx_pr
                bdx_link_changed(priv);
  
        if (isr & IR_PCIE_LINK)
 -              ERR("%s: PCI-E Link Fault\n", priv->ndev->name);
 +              netdev_err(priv->ndev, "PCI-E Link Fault\n");
  
        if (isr & IR_PCIE_TOUT)
 -              ERR("%s: PCI-E Time Out\n", priv->ndev->name);
 +              netdev_err(priv->ndev, "PCI-E Time Out\n");
  
  }
  
@@@ -345,7 -345,7 +345,7 @@@ out
                release_firmware(fw);
  
        if (rc) {
 -              ERR("%s: firmware loading failed\n", priv->ndev->name);
 +              netdev_err(priv->ndev, "firmware loading failed\n");
                if (rc == -EIO)
                        DBG("VPC = 0x%x VIC = 0x%x INIT_STATUS = 0x%x i=%d\n",
                            READ_REG(priv, regVPC),
@@@ -419,11 -419,9 +419,11 @@@ static int bdx_hw_start(struct bdx_pri
        WRITE_REG(priv, regGMAC_RXF_A, GMAC_RX_FILTER_OSEN |
                  GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB);
  
 -#define BDX_IRQ_TYPE  ((priv->nic->irq_type == IRQ_MSI)?0:IRQF_SHARED)
 -      if ((rc = request_irq(priv->pdev->irq, bdx_isr_napi, BDX_IRQ_TYPE,
 -                       ndev->name, ndev)))
 +#define BDX_IRQ_TYPE  ((priv->nic->irq_type == IRQ_MSI) ? 0 : IRQF_SHARED)
 +
 +      rc = request_irq(priv->pdev->irq, bdx_isr_napi, BDX_IRQ_TYPE,
 +                       ndev->name, ndev);
 +      if (rc)
                goto err_irq;
        bdx_enable_interrupts(priv);
  
@@@ -464,7 -462,7 +464,7 @@@ static int bdx_hw_reset_direct(void __i
                        readl(regs + regRXD_CFG0_0);
                        return 0;
                }
 -      ERR("tehuti: HW reset failed\n");
 +      pr_err("HW reset failed\n");
        return 1;               /* failure */
  }
  
@@@ -488,7 -486,7 +488,7 @@@ static int bdx_hw_reset(struct bdx_pri
                        READ_REG(priv, regRXD_CFG0_0);
                        return 0;
                }
 -      ERR("tehuti: HW reset failed\n");
 +      pr_err("HW reset failed\n");
        return 1;               /* failure */
  }
  
@@@ -512,7 -510,8 +512,7 @@@ static int bdx_sw_reset(struct bdx_pri
                mdelay(10);
        }
        if (i == 50)
 -              ERR("%s: SW reset timeout. continuing anyway\n",
 -                  priv->ndev->name);
 +              netdev_err(priv->ndev, "SW reset timeout. continuing anyway\n");
  
        /* 6. disable intrs */
        WRITE_REG(priv, regRDINTCM0, 0);
@@@ -605,15 -604,18 +605,15 @@@ static int bdx_open(struct net_device *
        if (netif_running(ndev))
                netif_stop_queue(priv->ndev);
  
 -      if ((rc = bdx_tx_init(priv)))
 -              goto err;
 -
 -      if ((rc = bdx_rx_init(priv)))
 -              goto err;
 -
 -      if ((rc = bdx_fw_load(priv)))
 +      if ((rc = bdx_tx_init(priv)) ||
 +          (rc = bdx_rx_init(priv)) ||
 +          (rc = bdx_fw_load(priv)))
                goto err;
  
        bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
  
 -      if ((rc = bdx_hw_start(priv)))
 +      rc = bdx_hw_start(priv);
 +      if (rc)
                goto err;
  
        napi_enable(&priv->napi);
@@@ -645,7 -647,7 +645,7 @@@ static int bdx_ioctl_priv(struct net_de
        if (cmd != SIOCDEVPRIVATE) {
                error = copy_from_user(data, ifr->ifr_data, sizeof(data));
                if (error) {
 -                      ERR("cant copy from user\n");
 +                      pr_err("cant copy from user\n");
                        RET(error);
                }
                DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]);
@@@ -706,7 -708,7 +706,7 @@@ static void __bdx_vlan_rx_vid(struct ne
        ENTER;
        DBG2("vid=%d value=%d\n", (int)vid, enable);
        if (unlikely(vid >= 4096)) {
 -              ERR("tehuti: invalid VID: %u (> 4096)\n", vid);
 +              pr_err("invalid VID: %u (> 4096)\n", vid);
                RET();
        }
        reg = regVLAN_0 + (vid / 32) * 4;
@@@ -774,8 -776,8 +774,8 @@@ static int bdx_change_mtu(struct net_de
  
        /* enforce minimum frame size */
        if (new_mtu < ETH_ZLEN) {
 -              ERR("%s: %s mtu %d is less then minimal %d\n",
 -                  BDX_DRV_NAME, ndev->name, new_mtu, ETH_ZLEN);
 +              netdev_err(ndev, "mtu %d is less then minimal %d\n",
 +                         new_mtu, ETH_ZLEN);
                RET(-EINVAL);
        }
  
@@@ -806,7 -808,7 +806,7 @@@ static void bdx_setmulti(struct net_dev
                /* set IMF to accept all multicast frmaes */
                for (i = 0; i < MAC_MCST_HASH_NUM; i++)
                        WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0);
 -      } else if (ndev->mc_count) {
 +      } else if (!netdev_mc_empty(ndev)) {
                u8 hash;
                struct dev_mc_list *mclist;
                u32 reg, val;
                /* TBD: sort addreses and write them in ascending order
                 * into RX_MAC_MCST regs. we skip this phase now and accept ALL
                 * multicast frames throu IMF */
 -              mclist = ndev->mc_list;
 -
                /* accept the rest of addresses throu IMF */
 -              for (; mclist; mclist = mclist->next) {
 +              netdev_for_each_mc_addr(mclist, ndev) {
                        hash = 0;
                        for (i = 0; i < ETH_ALEN; i++)
                                hash ^= mclist->dmi_addr[i];
                }
  
        } else {
 -              DBG("only own mac %d\n", ndev->mc_count);
 +              DBG("only own mac %d\n", netdev_mc_count(ndev));
                rxf_val |= GMAC_RX_FILTER_AB;
        }
        WRITE_REG(priv, regGMAC_RXF_A, rxf_val);
@@@ -1024,16 -1028,17 +1024,16 @@@ static int bdx_rx_init(struct bdx_priv 
                          regRXF_CFG0_0, regRXF_CFG1_0,
                          regRXF_RPTR_0, regRXF_WPTR_0))
                goto err_mem;
 -      if (!
 -          (priv->rxdb =
 -           bdx_rxdb_create(priv->rxf_fifo0.m.memsz /
 -                           sizeof(struct rxf_desc))))
 +      priv->rxdb = bdx_rxdb_create(priv->rxf_fifo0.m.memsz /
 +                                   sizeof(struct rxf_desc));
 +      if (!priv->rxdb)
                goto err_mem;
  
        priv->rxf_fifo0.m.pktsz = priv->ndev->mtu + VLAN_ETH_HLEN;
        return 0;
  
  err_mem:
 -      ERR("%s: %s: Rx init failed\n", BDX_DRV_NAME, priv->ndev->name);
 +      netdev_err(priv->ndev, "Rx init failed\n");
        return -ENOMEM;
  }
  
@@@ -1110,9 -1115,8 +1110,9 @@@ static void bdx_rx_alloc_skbs(struct bd
        ENTER;
        dno = bdx_rxdb_available(db) - 1;
        while (dno > 0) {
 -              if (!(skb = dev_alloc_skb(f->m.pktsz + NET_IP_ALIGN))) {
 -                      ERR("NO MEM: dev_alloc_skb failed\n");
 +              skb = dev_alloc_skb(f->m.pktsz + NET_IP_ALIGN);
 +              if (!skb) {
 +                      pr_err("NO MEM: dev_alloc_skb failed\n");
                        break;
                }
                skb->dev = priv->ndev;
@@@ -1333,7 -1337,9 +1333,7 @@@ static int bdx_rx_receive(struct bdx_pr
  static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len,
                       u16 rxd_vlan)
  {
 -      DBG("ERROR: rxdd bc %d rxfq %d to %d type %d err %d rxp %d "
 -          "pkt_id %d vtag %d len %d vlan_id %d cfi %d prio %d "
 -          "va_lo %d va_hi %d\n",
 +      DBG("ERROR: rxdd bc %d rxfq %d to %d type %d err %d rxp %d pkt_id %d vtag %d len %d vlan_id %d cfi %d prio %d va_lo %d va_hi %d\n",
            GET_RXD_BC(rxd_val1), GET_RXD_RXFQ(rxd_val1), GET_RXD_TO(rxd_val1),
            GET_RXD_TYPE(rxd_val1), GET_RXD_ERR(rxd_val1),
            GET_RXD_RXP(rxd_val1), GET_RXD_PKT_ID(rxd_val1),
@@@ -1585,7 -1591,7 +1585,7 @@@ static int bdx_tx_init(struct bdx_priv 
        return 0;
  
  err_mem:
 -      ERR("tehuti: %s: Tx init failed\n", priv->ndev->name);
 +      netdev_err(priv->ndev, "Tx init failed\n");
        return -ENOMEM;
  }
  
@@@ -1603,7 -1609,7 +1603,7 @@@ static inline int bdx_tx_space(struct b
        fsize = f->m.rptr - f->m.wptr;
        if (fsize <= 0)
                fsize = f->m.memsz + fsize;
 -      return (fsize);
 +      return fsize;
  }
  
  /* bdx_tx_transmit - send packet to NIC
@@@ -1851,7 -1857,7 +1851,7 @@@ static void bdx_tx_push_desc(struct bdx
   * @data - desc's data
   * @size - desc's size
   *
-  * NOTE: this func does check for available space and, if neccessary, waits for
+  * NOTE: this func does check for available space and, if necessary, waits for
   *   NIC to read existing data before writing new one.
   */
  static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size)
@@@ -1931,9 -1937,8 +1931,9 @@@ bdx_probe(struct pci_dev *pdev, const s
                RET(-ENOMEM);
  
      /************** pci *****************/
 -      if ((err = pci_enable_device(pdev)))    /* it trigers interrupt, dunno why. */
 -              goto err_pci;                   /* it's not a problem though */
 +      err = pci_enable_device(pdev);
 +      if (err)                        /* it triggers interrupt, dunno why. */
 +              goto err_pci;           /* it's not a problem though */
  
        if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) &&
            !(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) {
        } else {
                if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) ||
                    (err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) {
 -                      printk(KERN_ERR "tehuti: No usable DMA configuration"
 -                                      ", aborting\n");
 +                      pr_err("No usable DMA configuration, aborting\n");
                        goto err_dma;
                }
                pci_using_dac = 0;
        }
  
 -      if ((err = pci_request_regions(pdev, BDX_DRV_NAME)))
 +      err = pci_request_regions(pdev, BDX_DRV_NAME);
 +      if (err)
                goto err_dma;
  
        pci_set_master(pdev);
        pciaddr = pci_resource_start(pdev, 0);
        if (!pciaddr) {
                err = -EIO;
 -              ERR("tehuti: no MMIO resource\n");
 +              pr_err("no MMIO resource\n");
                goto err_out_res;
        }
 -      if ((regionSize = pci_resource_len(pdev, 0)) < BDX_REGS_SIZE) {
 +      regionSize = pci_resource_len(pdev, 0);
 +      if (regionSize < BDX_REGS_SIZE) {
                err = -EIO;
 -              ERR("tehuti: MMIO resource (%x) too small\n", regionSize);
 +              pr_err("MMIO resource (%x) too small\n", regionSize);
                goto err_out_res;
        }
  
        nic->regs = ioremap(pciaddr, regionSize);
        if (!nic->regs) {
                err = -EIO;
 -              ERR("tehuti: ioremap failed\n");
 +              pr_err("ioremap failed\n");
                goto err_out_res;
        }
  
        if (pdev->irq < 2) {
                err = -EIO;
 -              ERR("tehuti: invalid irq (%d)\n", pdev->irq);
 +              pr_err("invalid irq (%d)\n", pdev->irq);
                goto err_out_iomap;
        }
        pci_set_drvdata(pdev, nic);
        nic->irq_type = IRQ_INTX;
  #ifdef BDX_MSI
        if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) {
 -              if ((err = pci_enable_msi(pdev)))
 -                      ERR("Tehuti: Can't eneble msi. error is %d\n", err);
 +              err = pci_enable_msi(pdev);
 +              if (err)
 +                      pr_err("Can't eneble msi. error is %d\n", err);
                else
                        nic->irq_type = IRQ_MSI;
        } else
  
      /************** netdev **************/
        for (port = 0; port < nic->port_num; port++) {
 -              if (!(ndev = alloc_etherdev(sizeof(struct bdx_priv)))) {
 +              ndev = alloc_etherdev(sizeof(struct bdx_priv));
 +              if (!ndev) {
                        err = -ENOMEM;
 -                      printk(KERN_ERR "tehuti: alloc_etherdev failed\n");
 +                      pr_err("alloc_etherdev failed\n");
                        goto err_out_iomap;
                }
  
  
                /*bdx_hw_reset(priv); */
                if (bdx_read_mac(priv)) {
 -                      printk(KERN_ERR "tehuti: load MAC address failed\n");
 +                      pr_err("load MAC address failed\n");
                        goto err_out_iomap;
                }
                SET_NETDEV_DEV(ndev, &pdev->dev);
 -              if ((err = register_netdev(ndev))) {
 -                      printk(KERN_ERR "tehuti: register_netdev failed\n");
 +              err = register_netdev(ndev);
 +              if (err) {
 +                      pr_err("register_netdev failed\n");
                        goto err_out_free;
                }
                netif_carrier_off(ndev);
@@@ -2293,13 -2294,13 +2293,13 @@@ bdx_set_coalesce(struct net_device *net
  /* Convert RX fifo size to number of pending packets */
  static inline int bdx_rx_fifo_size_to_packets(int rx_size)
  {
 -      return ((FIFO_SIZE * (1 << rx_size)) / sizeof(struct rxf_desc));
 +      return (FIFO_SIZE * (1 << rx_size)) / sizeof(struct rxf_desc);
  }
  
  /* Convert TX fifo size to number of pending packets */
  static inline int bdx_tx_fifo_size_to_packets(int tx_size)
  {
 -      return ((FIFO_SIZE * (1 << tx_size)) / BDX_TXF_DESC_SZ);
 +      return (FIFO_SIZE * (1 << tx_size)) / BDX_TXF_DESC_SZ;
  }
  
  /*
@@@ -2391,10 -2392,10 +2391,10 @@@ static int bdx_get_sset_count(struct ne
        case ETH_SS_STATS:
                BDX_ASSERT(ARRAY_SIZE(bdx_stat_names)
                           != sizeof(struct bdx_stats) / sizeof(u64));
 -              return ((priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0);
 -      default:
 -              return -EINVAL;
 +              return (priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names)  : 0;
        }
 +
 +      return -EINVAL;
  }
  
  /*
@@@ -2492,8 -2493,10 +2492,8 @@@ static struct pci_driver bdx_pci_drive
   */
  static void __init print_driver_id(void)
  {
 -      printk(KERN_INFO "%s: %s, %s\n", BDX_DRV_NAME, BDX_DRV_DESC,
 -             BDX_DRV_VERSION);
 -      printk(KERN_INFO "%s: Options: hw_csum %s\n", BDX_DRV_NAME,
 -             BDX_MSI_STRING);
 +      pr_info("%s, %s\n", BDX_DRV_DESC, BDX_DRV_VERSION);
 +      pr_info("Options: hw_csum %s\n", BDX_MSI_STRING);
  }
  
  static int __init bdx_module_init(void)
index 21a01753312aa168055d5163af3d238c66d5b287,4e4c402319c9c1e155c90bf0bf4748ff54f9bf94..ee71bcfb3753021ce13a454b0ff78f12ddaa860b
@@@ -693,7 -693,7 +693,7 @@@ static netdev_tx_t tms380tr_hardware_se
   * NOTE: This function should be used whenever the status of any TPL must be
   * modified by the driver, because the compiler may otherwise change the
   * order of instructions such that writing the TPL status may be executed at
-  * an undesireable time. When this function is used, the status is always
+  * an undesirable time. When this function is used, the status is always
   * written when the function is called.
   */
  static void tms380tr_write_tpl_status(TPL *tpl, unsigned int Status)
@@@ -1212,9 -1212,10 +1212,9 @@@ static void tms380tr_set_multicast_list
                }
                else
                {
 -                      int i;
 -                      struct dev_mc_list *mclist = dev->mc_list;
 -                      for (i=0; i< dev->mc_count; i++)
 -                      {
 +                      struct dev_mc_list *mclist;
 +
 +                      netdev_for_each_mc_addr(mclist, dev) {
                                ((char *)(&tp->ocpl.FunctAddr))[0] |=
                                        mclist->dmi_addr[2];
                                ((char *)(&tp->ocpl.FunctAddr))[1] |=
                                        mclist->dmi_addr[4];
                                ((char *)(&tp->ocpl.FunctAddr))[3] |=
                                        mclist->dmi_addr[5];
 -                              mclist = mclist->next;
                        }
                }
                tms380tr_exec_cmd(dev, OC_SET_FUNCT_ADDR);
@@@ -2264,7 -2266,7 +2264,7 @@@ static void tms380tr_rcv_status_irq(str
   * This function should be used whenever the status of any RPL must be
   * modified by the driver, because the compiler may otherwise change the
   * order of instructions such that writing the RPL status may be executed
-  * at an undesireable time. When this function is used, the status is
+  * at an undesirable time. When this function is used, the status is
   * always written when the function is called.
   */
  static void tms380tr_write_rpl_status(RPL *rpl, unsigned int Status)
diff --combined drivers/net/tun.c
index ce1efa4c0b0dff284d94e05427f3743853aa1ac7,e572ecc09a443037919cb7bb8f43b684c525a55e..96c39bddc78c049e85411f6f298485b531356c78
@@@ -61,7 -61,6 +61,7 @@@
  #include <linux/crc32.h>
  #include <linux/nsproxy.h>
  #include <linux/virtio_net.h>
 +#include <linux/rcupdate.h>
  #include <net/net_namespace.h>
  #include <net/netns/generic.h>
  #include <net/rtnetlink.h>
@@@ -145,7 -144,6 +145,7 @@@ static int tun_attach(struct tun_struc
        err = 0;
        tfile->tun = tun;
        tun->tfile = tfile;
 +      tun->socket.file = file;
        dev_hold(tun->dev);
        sock_hold(tun->socket.sk);
        atomic_inc(&tfile->count);
@@@ -160,7 -158,6 +160,7 @@@ static void __tun_detach(struct tun_str
        /* Detach from net device */
        netif_tx_lock_bh(tun->dev);
        tun->tfile = NULL;
 +      tun->socket.file = NULL;
        netif_tx_unlock_bh(tun->dev);
  
        /* Drop read queue */
@@@ -367,10 -364,6 +367,10 @@@ static netdev_tx_t tun_net_xmit(struct 
        if (!check_filter(&tun->txflt, skb))
                goto drop;
  
 +      if (tun->socket.sk->sk_filter &&
 +          sk_filter(tun->socket.sk, skb))
 +              goto drop;
 +
        if (skb_queue_len(&tun->socket.sk->sk_receive_queue) >= dev->tx_queue_len) {
                if (!(tun->flags & TUN_ONE_QUEUE)) {
                        /* Normal queueing mode. */
        /* Notify and wake up reader process */
        if (tun->flags & TUN_FASYNC)
                kill_fasync(&tun->fasync, SIGIO, POLL_IN);
 -      wake_up_interruptible(&tun->socket.wait);
 +      wake_up_interruptible_poll(&tun->socket.wait, POLLIN |
 +                                 POLLRDNORM | POLLRDBAND);
        return NETDEV_TX_OK;
  
  drop:
@@@ -751,7 -743,7 +751,7 @@@ static __inline__ ssize_t tun_put_user(
        len = min_t(int, skb->len, len);
  
        skb_copy_datagram_const_iovec(skb, 0, iv, total, len);
 -      total += len;
 +      total += skb->len;
  
        tun->dev->stats.tx_packets++;
        tun->dev->stats.tx_bytes += len;
        return total;
  }
  
 -static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
 -                          unsigned long count, loff_t pos)
 +static ssize_t tun_do_read(struct tun_struct *tun,
 +                         struct kiocb *iocb, const struct iovec *iv,
 +                         ssize_t len, int noblock)
  {
 -      struct file *file = iocb->ki_filp;
 -      struct tun_file *tfile = file->private_data;
 -      struct tun_struct *tun = __tun_get(tfile);
        DECLARE_WAITQUEUE(wait, current);
        struct sk_buff *skb;
 -      ssize_t len, ret = 0;
 -
 -      if (!tun)
 -              return -EBADFD;
 +      ssize_t ret = 0;
  
        DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name);
  
 -      len = iov_length(iv, count);
 -      if (len < 0) {
 -              ret = -EINVAL;
 -              goto out;
 -      }
 -
        add_wait_queue(&tun->socket.wait, &wait);
        while (len) {
                current->state = TASK_INTERRUPTIBLE;
  
                /* Read frames from the queue */
                if (!(skb=skb_dequeue(&tun->socket.sk->sk_receive_queue))) {
 -                      if (file->f_flags & O_NONBLOCK) {
 +                      if (noblock) {
                                ret = -EAGAIN;
                                break;
                        }
        current->state = TASK_RUNNING;
        remove_wait_queue(&tun->socket.wait, &wait);
  
 +      return ret;
 +}
 +
 +static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
 +                          unsigned long count, loff_t pos)
 +{
 +      struct file *file = iocb->ki_filp;
 +      struct tun_file *tfile = file->private_data;
 +      struct tun_struct *tun = __tun_get(tfile);
 +      ssize_t len, ret;
 +
 +      if (!tun)
 +              return -EBADFD;
 +      len = iov_length(iv, count);
 +      if (len < 0) {
 +              ret = -EINVAL;
 +              goto out;
 +      }
 +
 +      ret = tun_do_read(tun, iocb, iv, len, file->f_flags & O_NONBLOCK);
 +      ret = min_t(ssize_t, ret, len);
  out:
        tun_put(tun);
        return ret;
@@@ -865,8 -847,7 +865,8 @@@ static void tun_sock_write_space(struc
                return;
  
        if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
 -              wake_up_interruptible_sync(sk->sk_sleep);
 +              wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT |
 +                                              POLLWRNORM | POLLWRBAND);
  
        tun = tun_sk(sk)->tun;
        kill_fasync(&tun->fasync, SIGIO, POLL_OUT);
@@@ -877,37 -858,6 +877,37 @@@ static void tun_sock_destruct(struct so
        free_netdev(tun_sk(sk)->tun->dev);
  }
  
 +static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
 +                     struct msghdr *m, size_t total_len)
 +{
 +      struct tun_struct *tun = container_of(sock, struct tun_struct, socket);
 +      return tun_get_user(tun, m->msg_iov, total_len,
 +                          m->msg_flags & MSG_DONTWAIT);
 +}
 +
 +static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
 +                     struct msghdr *m, size_t total_len,
 +                     int flags)
 +{
 +      struct tun_struct *tun = container_of(sock, struct tun_struct, socket);
 +      int ret;
 +      if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
 +              return -EINVAL;
 +      ret = tun_do_read(tun, iocb, m->msg_iov, total_len,
 +                        flags & MSG_DONTWAIT);
 +      if (ret > total_len) {
 +              m->msg_flags |= MSG_TRUNC;
 +              ret = flags & MSG_TRUNC ? ret : total_len;
 +      }
 +      return ret;
 +}
 +
 +/* Ops structure to mimic raw sockets with tun */
 +static const struct proto_ops tun_socket_ops = {
 +      .sendmsg = tun_sendmsg,
 +      .recvmsg = tun_recvmsg,
 +};
 +
  static struct proto tun_proto = {
        .name           = "tun",
        .owner          = THIS_MODULE,
@@@ -1036,7 -986,6 +1036,7 @@@ static int tun_set_iff(struct net *net
                        goto err_free_dev;
  
                init_waitqueue_head(&tun->socket.wait);
 +              tun->socket.ops = &tun_socket_ops;
                sock_init_data(&tun->socket, sk);
                sk->sk_write_space = tun_sock_write_space;
                sk->sk_sndbuf = INT_MAX;
@@@ -1167,7 -1116,6 +1167,7 @@@ static long __tun_chr_ioctl(struct fil
        struct tun_file *tfile = file->private_data;
        struct tun_struct *tun;
        void __user* argp = (void __user*)arg;
 +      struct sock_fprog fprog;
        struct ifreq ifr;
        int sndbuf;
        int ret;
                tun->socket.sk->sk_sndbuf = sndbuf;
                break;
  
 +      case TUNATTACHFILTER:
 +              /* Can be set only for TAPs */
 +              ret = -EINVAL;
 +              if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
 +                      break;
 +              ret = -EFAULT;
 +              if (copy_from_user(&fprog, argp, sizeof(fprog)))
 +                      break;
 +
 +              ret = sk_attach_filter(&fprog, tun->socket.sk);
 +              break;
 +
 +      case TUNDETACHFILTER:
 +              /* Can be set only for TAPs */
 +              ret = -EINVAL;
 +              if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
 +                      break;
 +              ret = sk_detach_filter(tun->socket.sk);
 +              break;
 +
        default:
                ret = -EINVAL;
                break;
@@@ -1437,7 -1365,7 +1437,7 @@@ static int tun_chr_close(struct inode *
  
                __tun_detach(tun);
  
-               /* If desireable, unregister the netdevice. */
+               /* If desirable, unregister the netdevice. */
                if (!(tun->flags & TUN_PERSIST)) {
                        rtnl_lock();
                        if (dev->reg_state == NETREG_REGISTERED)
@@@ -1597,23 -1525,6 +1597,23 @@@ static void tun_cleanup(void
        rtnl_link_unregister(&tun_link_ops);
  }
  
 +/* Get an underlying socket object from tun file.  Returns error unless file is
 + * attached to a device.  The returned object works like a packet socket, it
 + * can be used for sock_sendmsg/sock_recvmsg.  The caller is responsible for
 + * holding a reference to the file for as long as the socket is in use. */
 +struct socket *tun_get_socket(struct file *file)
 +{
 +      struct tun_struct *tun;
 +      if (file->f_op != &tun_fops)
 +              return ERR_PTR(-EINVAL);
 +      tun = tun_get(file);
 +      if (!tun)
 +              return ERR_PTR(-EBADFD);
 +      tun_put(tun);
 +      return &tun->socket;
 +}
 +EXPORT_SYMBOL_GPL(tun_get_socket);
 +
  module_init(tun_init);
  module_exit(tun_cleanup);
  MODULE_DESCRIPTION(DRV_DESCRIPTION);
diff --combined drivers/net/typhoon.c
index e3ddcb8f29df39a5e45784b62f0ffe1b2cb15e7e,d1a80685e3a70c0678133eba72efbfdbdb26a9bd..2fbf15235c05ac8ee95a31669bc0315722a0b6b2
@@@ -98,10 -98,14 +98,10 @@@ static const int multicast_filter_limi
  #define TX_TIMEOUT  (2*HZ)
  
  #define PKT_BUF_SZ            1536
 -
 -#define DRV_MODULE_NAME               "typhoon"
 -#define DRV_MODULE_VERSION    "1.5.9"
 -#define DRV_MODULE_RELDATE    "Mar 2, 2009"
 -#define PFX                   DRV_MODULE_NAME ": "
 -#define ERR_PFX                       KERN_ERR PFX
  #define FIRMWARE_NAME         "3com/typhoon.bin"
  
 +#define pr_fmt(fmt)           KBUILD_MODNAME " " fmt
 +
  #include <linux/module.h>
  #include <linux/kernel.h>
  #include <linux/sched.h>
  #include <linux/in6.h>
  #include <linux/dma-mapping.h>
  #include <linux/firmware.h>
 +#include <generated/utsrelease.h>
  
  #include "typhoon.h"
  
 -static char version[] __devinitdata =
 -    "typhoon.c: version " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
 -
  MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
 -MODULE_VERSION(DRV_MODULE_VERSION);
 +MODULE_VERSION(UTS_RELEASE);
  MODULE_LICENSE("GPL");
  MODULE_FIRMWARE(FIRMWARE_NAME);
  MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
@@@ -155,8 -161,8 +155,8 @@@ module_param(use_mmio, int, 0)
  #endif
  
  struct typhoon_card_info {
 -      char *name;
 -      int capabilities;
 +      const char *name;
 +      const int capabilities;
  };
  
  #define TYPHOON_CRYPTO_NONE           0x00
@@@ -209,7 -215,7 +209,7 @@@ static struct typhoon_card_info typhoon
   * bit 8 indicates if this is a (0) copper or (1) fiber card
   * bits 12-16 indicate card type: (0) client and (1) server
   */
 -static struct pci_device_id typhoon_pci_tbl[] = {
 +static DEFINE_PCI_DEVICE_TABLE(typhoon_pci_tbl) = {
        { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
          PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
        { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
@@@ -293,6 -299,7 +293,6 @@@ struct typhoon 
        struct basic_ring       respRing;
        struct net_device_stats stats;
        struct net_device_stats stats_saved;
 -      const char *            name;
        struct typhoon_shared * shared;
        dma_addr_t              shared_dma;
        __le16                  xcvr_select;
@@@ -527,13 -534,13 +527,13 @@@ typhoon_process_response(struct typhoo
                } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
                        typhoon_hello(tp);
                } else {
 -                      printk(KERN_ERR "%s: dumping unexpected response "
 -                             "0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
 -                             tp->name, le16_to_cpu(resp->cmd),
 -                             resp->numDesc, resp->flags,
 -                             le16_to_cpu(resp->parm1),
 -                             le32_to_cpu(resp->parm2),
 -                             le32_to_cpu(resp->parm3));
 +                      netdev_err(tp->dev,
 +                                 "dumping unexpected response 0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
 +                                 le16_to_cpu(resp->cmd),
 +                                 resp->numDesc, resp->flags,
 +                                 le16_to_cpu(resp->parm1),
 +                                 le32_to_cpu(resp->parm2),
 +                                 le32_to_cpu(resp->parm3));
                }
  
  cleanup:
@@@ -599,8 -606,9 +599,8 @@@ typhoon_issue_command(struct typhoon *t
        freeResp = typhoon_num_free_resp(tp);
  
        if(freeCmd < num_cmd || freeResp < num_resp) {
 -              printk("%s: no descs for cmd, had (needed) %d (%d) cmd, "
 -                      "%d (%d) resp\n", tp->name, freeCmd, num_cmd,
 -                      freeResp, num_resp);
 +              netdev_err(tp->dev, "no descs for cmd, had (needed) %d (%d) cmd, %d (%d) resp\n",
 +                         freeCmd, num_cmd, freeResp, num_resp);
                err = -ENOMEM;
                goto out;
        }
@@@ -725,7 -733,7 +725,7 @@@ typhoon_vlan_rx_register(struct net_dev
                spin_unlock_bh(&tp->state_lock);
                err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
                if(err < 0)
 -                      printk("%s: vlan offload error %d\n", tp->name, -err);
 +                      netdev_err(tp->dev, "vlan offload error %d\n", -err);
                spin_lock_bh(&tp->state_lock);
        }
  
@@@ -916,15 -924,17 +916,15 @@@ typhoon_set_rx_mode(struct net_device *
        filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
        if(dev->flags & IFF_PROMISC) {
                filter |= TYPHOON_RX_FILTER_PROMISCOUS;
 -      } else if((dev->mc_count > multicast_filter_limit) ||
 +      } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
                  (dev->flags & IFF_ALLMULTI)) {
                /* Too many to match, or accept all multicasts. */
                filter |= TYPHOON_RX_FILTER_ALL_MCAST;
 -      } else if(dev->mc_count) {
 +      } else if (!netdev_mc_empty(dev)) {
                struct dev_mc_list *mclist;
 -              int i;
  
                memset(mc_filter, 0, sizeof(mc_filter));
 -              for(i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
 -                  i++, mclist = mclist->next) {
 +              netdev_for_each_mc_addr(mclist, dev) {
                        int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
                        mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
                }
@@@ -1010,7 -1020,7 +1010,7 @@@ typhoon_get_stats(struct net_device *de
                return saved;
  
        if(typhoon_do_get_stats(tp) < 0) {
 -              printk(KERN_ERR "%s: error getting stats\n", dev->name);
 +              netdev_err(dev, "error getting stats\n");
                return saved;
        }
  
@@@ -1052,8 -1062,8 +1052,8 @@@ typhoon_get_drvinfo(struct net_device *
                }
        }
  
 -      strcpy(info->driver, DRV_MODULE_NAME);
 -      strcpy(info->version, DRV_MODULE_VERSION);
 +      strcpy(info->driver, KBUILD_MODNAME);
 +      strcpy(info->version, UTS_RELEASE);
        strcpy(info->bus_info, pci_name(pci_dev));
  }
  
@@@ -1355,8 -1365,8 +1355,8 @@@ typhoon_request_firmware(struct typhoo
  
        err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev);
        if (err) {
 -              printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
 -                              tp->name, FIRMWARE_NAME);
 +              netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
 +                         FIRMWARE_NAME);
                return err;
        }
  
        return 0;
  
  invalid_fw:
 -      printk(KERN_ERR "%s: Invalid firmware image\n", tp->name);
 +      netdev_err(tp->dev, "Invalid firmware image\n");
        release_firmware(typhoon_fw);
        typhoon_fw = NULL;
        return -EINVAL;
@@@ -1428,7 -1438,7 +1428,7 @@@ typhoon_download_firmware(struct typhoo
        err = -ENOMEM;
        dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
        if(!dpage) {
 -              printk(KERN_ERR "%s: no DMA mem for firmware\n", tp->name);
 +              netdev_err(tp->dev, "no DMA mem for firmware\n");
                goto err_out;
        }
  
  
        err = -ETIMEDOUT;
        if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
 -              printk(KERN_ERR "%s: card ready timeout\n", tp->name);
 +              netdev_err(tp->dev, "card ready timeout\n");
                goto err_out_irq;
        }
  
                        if(typhoon_wait_interrupt(ioaddr) < 0 ||
                           ioread32(ioaddr + TYPHOON_REG_STATUS) !=
                           TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
 -                              printk(KERN_ERR "%s: segment ready timeout\n",
 -                                     tp->name);
 +                              netdev_err(tp->dev, "segment ready timeout\n");
                                goto err_out_irq;
                        }
  
                         * the checksum, we can do this once, at the end.
                         */
                        csum = csum_fold(csum_partial_copy_nocheck(image_data,
 -                                                                dpage, len,
 -                                                                0));
 +                                                                 dpage, len,
 +                                                                 0));
  
                        iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
                        iowrite32(le16_to_cpu((__force __le16)csum),
                        iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
                        typhoon_post_pci_writes(ioaddr);
                        iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
 -                             ioaddr + TYPHOON_REG_COMMAND);
 +                                      ioaddr + TYPHOON_REG_COMMAND);
  
                        image_data += len;
                        load_addr += len;
        if(typhoon_wait_interrupt(ioaddr) < 0 ||
           ioread32(ioaddr + TYPHOON_REG_STATUS) !=
           TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
 -              printk(KERN_ERR "%s: final segment ready timeout\n", tp->name);
 +              netdev_err(tp->dev, "final segment ready timeout\n");
                goto err_out_irq;
        }
  
        iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
  
        if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
 -              printk(KERN_ERR "%s: boot ready timeout, status 0x%0x\n",
 -                     tp->name, ioread32(ioaddr + TYPHOON_REG_STATUS));
 +              netdev_err(tp->dev, "boot ready timeout, status 0x%0x\n",
 +                         ioread32(ioaddr + TYPHOON_REG_STATUS));
                goto err_out_irq;
        }
  
@@@ -1544,7 -1555,7 +1544,7 @@@ typhoon_boot_3XP(struct typhoon *tp, u3
        void __iomem *ioaddr = tp->ioaddr;
  
        if(typhoon_wait_status(ioaddr, initial_status) < 0) {
 -              printk(KERN_ERR "%s: boot ready timeout\n", tp->name);
 +              netdev_err(tp->dev, "boot ready timeout\n");
                goto out_timeout;
        }
  
                                ioaddr + TYPHOON_REG_COMMAND);
  
        if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
 -              printk(KERN_ERR "%s: boot finish timeout (status 0x%x)\n",
 -                     tp->name, ioread32(ioaddr + TYPHOON_REG_STATUS));
 +              netdev_err(tp->dev, "boot finish timeout (status 0x%x)\n",
 +                         ioread32(ioaddr + TYPHOON_REG_STATUS));
                goto out_timeout;
        }
  
@@@ -1855,7 -1866,8 +1855,7 @@@ typhoon_interrupt(int irq, void *dev_in
                typhoon_post_pci_writes(ioaddr);
                __napi_schedule(&tp->napi);
        } else {
 -              printk(KERN_ERR "%s: Error, poll already scheduled\n",
 -                       dev->name);
 +              netdev_err(dev, "Error, poll already scheduled\n");
        }
        return IRQ_HANDLED;
  }
@@@ -1888,15 -1900,16 +1888,15 @@@ typhoon_sleep(struct typhoon *tp, pci_p
        xp_cmd.parm1 = events;
        err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
        if(err < 0) {
 -              printk(KERN_ERR "%s: typhoon_sleep(): wake events cmd err %d\n",
 -                              tp->name, err);
 +              netdev_err(tp->dev, "typhoon_sleep(): wake events cmd err %d\n",
 +                         err);
                return err;
        }
  
        INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
        err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
        if(err < 0) {
 -              printk(KERN_ERR "%s: typhoon_sleep(): sleep cmd err %d\n",
 -                              tp->name, err);
 +              netdev_err(tp->dev, "typhoon_sleep(): sleep cmd err %d\n", err);
                return err;
        }
  
@@@ -1947,12 -1960,12 +1947,12 @@@ typhoon_start_runtime(struct typhoon *t
  
        err = typhoon_download_firmware(tp);
        if(err < 0) {
 -              printk("%s: cannot load runtime on 3XP\n", tp->name);
 +              netdev_err(tp->dev, "cannot load runtime on 3XP\n");
                goto error_out;
        }
  
        if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
 -              printk("%s: cannot boot 3XP\n", tp->name);
 +              netdev_err(tp->dev, "cannot boot 3XP\n");
                err = -EIO;
                goto error_out;
        }
@@@ -2056,7 -2069,9 +2056,7 @@@ typhoon_stop_runtime(struct typhoon *tp
        }
  
        if(i == TYPHOON_WAIT_TIMEOUT)
 -              printk(KERN_ERR
 -                     "%s: halt timed out waiting for Tx to complete\n",
 -                     tp->name);
 +              netdev_err(tp->dev, "halt timed out waiting for Tx to complete\n");
  
        INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
        typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
        typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
  
        if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
 -              printk(KERN_ERR "%s: timed out waiting for 3XP to halt\n",
 -                     tp->name);
 +              netdev_err(tp->dev, "timed out waiting for 3XP to halt\n");
  
        if(typhoon_reset(ioaddr, wait_type) < 0) {
 -              printk(KERN_ERR "%s: unable to reset 3XP\n", tp->name);
 +              netdev_err(tp->dev, "unable to reset 3XP\n");
                return -ETIMEDOUT;
        }
  
@@@ -2095,8 -2111,9 +2095,8 @@@ typhoon_tx_timeout(struct net_device *d
        struct typhoon *tp = netdev_priv(dev);
  
        if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
 -              printk(KERN_WARNING "%s: could not reset in tx timeout\n",
 -                                      dev->name);
 +              netdev_warn(dev, "could not reset in tx timeout\n");
-               goto truely_dead;
+               goto truly_dead;
        }
  
        /* If we ever start using the Hi ring, it will need cleaning too */
        typhoon_free_rx_rings(tp);
  
        if(typhoon_start_runtime(tp) < 0) {
 -              printk(KERN_ERR "%s: could not start runtime in tx timeout\n",
 -                                      dev->name);
 +              netdev_err(dev, "could not start runtime in tx timeout\n");
-               goto truely_dead;
+               goto truly_dead;
          }
  
        netif_wake_queue(dev);
        return;
  
- truely_dead:
+ truly_dead:
        /* Reset the hardware, and turn off carrier to avoid more timeouts */
        typhoon_reset(tp->ioaddr, NoWait);
        netif_carrier_off(dev);
@@@ -2129,7 -2147,7 +2129,7 @@@ typhoon_open(struct net_device *dev
  
        err = typhoon_wakeup(tp, WaitSleep);
        if(err < 0) {
 -              printk(KERN_ERR "%s: unable to wakeup device\n", dev->name);
 +              netdev_err(dev, "unable to wakeup device\n");
                goto out_sleep;
        }
  
@@@ -2154,13 -2172,14 +2154,13 @@@ out_irq
  
  out_sleep:
        if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
 -              printk(KERN_ERR "%s: unable to reboot into sleep img\n",
 -                              dev->name);
 +              netdev_err(dev, "unable to reboot into sleep img\n");
                typhoon_reset(tp->ioaddr, NoWait);
                goto out;
        }
  
        if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
 -              printk(KERN_ERR "%s: unable to go back to sleep\n", dev->name);
 +              netdev_err(dev, "unable to go back to sleep\n");
  
  out:
        return err;
@@@ -2175,7 -2194,7 +2175,7 @@@ typhoon_close(struct net_device *dev
        napi_disable(&tp->napi);
  
        if(typhoon_stop_runtime(tp, WaitSleep) < 0)
 -              printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
 +              netdev_err(dev, "unable to stop runtime\n");
  
        /* Make sure there is no irq handler running on a different CPU. */
        free_irq(dev->irq, dev);
        typhoon_init_rings(tp);
  
        if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
 -              printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
 +              netdev_err(dev, "unable to boot sleep image\n");
  
        if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
 -              printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
 +              netdev_err(dev, "unable to put card to sleep\n");
  
        return 0;
  }
@@@ -2205,12 -2224,14 +2205,12 @@@ typhoon_resume(struct pci_dev *pdev
                return 0;
  
        if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
 -              printk(KERN_ERR "%s: critical: could not wake up in resume\n",
 -                              dev->name);
 +              netdev_err(dev, "critical: could not wake up in resume\n");
                goto reset;
        }
  
        if(typhoon_start_runtime(tp) < 0) {
 -              printk(KERN_ERR "%s: critical: could not start runtime in "
 -                              "resume\n", dev->name);
 +              netdev_err(dev, "critical: could not start runtime in resume\n");
                goto reset;
        }
  
@@@ -2237,7 -2258,8 +2237,7 @@@ typhoon_suspend(struct pci_dev *pdev, p
        spin_lock_bh(&tp->state_lock);
        if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
                spin_unlock_bh(&tp->state_lock);
 -              printk(KERN_ERR "%s: cannot do WAKE_MAGIC with VLANS\n",
 -                              dev->name);
 +              netdev_err(dev, "cannot do WAKE_MAGIC with VLANS\n");
                return -EBUSY;
        }
        spin_unlock_bh(&tp->state_lock);
        netif_device_detach(dev);
  
        if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
 -              printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
 +              netdev_err(dev, "unable to stop runtime\n");
                goto need_resume;
        }
  
        typhoon_init_rings(tp);
  
        if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
 -              printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
 +              netdev_err(dev, "unable to boot sleep image\n");
                goto need_resume;
        }
  
        xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
        xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
        if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
 -              printk(KERN_ERR "%s: unable to set mac address in suspend\n",
 -                              dev->name);
 +              netdev_err(dev, "unable to set mac address in suspend\n");
                goto need_resume;
        }
  
        INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
        xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
        if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
 -              printk(KERN_ERR "%s: unable to set rx filter in suspend\n",
 -                              dev->name);
 +              netdev_err(dev, "unable to set rx filter in suspend\n");
                goto need_resume;
        }
  
        if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
 -              printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
 +              netdev_err(dev, "unable to put card to sleep\n");
                goto need_resume;
        }
  
@@@ -2327,7 -2351,7 +2327,7 @@@ out_unmap
  
  out:
        if(!mode)
 -              printk(KERN_INFO PFX "falling back to port IO\n");
 +              pr_info("%s: falling back to port IO\n", pci_name(pdev));
        return mode;
  }
  
@@@ -2347,6 -2371,7 +2347,6 @@@ static const struct net_device_ops typh
  static int __devinit
  typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  {
 -      static int did_version = 0;
        struct net_device *dev;
        struct typhoon *tp;
        int card_id = (int) ent->driver_data;
        struct cmd_desc xp_cmd;
        struct resp_desc xp_resp[3];
        int err = 0;
 -
 -      if(!did_version++)
 -              printk(KERN_INFO "%s", version);
 +      const char *err_msg;
  
        dev = alloc_etherdev(sizeof(*tp));
        if(dev == NULL) {
 -              printk(ERR_PFX "%s: unable to alloc new net device\n",
 -                     pci_name(pdev));
 +              err_msg = "unable to alloc new net device";
                err = -ENOMEM;
                goto error_out;
        }
  
        err = pci_enable_device(pdev);
        if(err < 0) {
 -              printk(ERR_PFX "%s: unable to enable device\n",
 -                     pci_name(pdev));
 +              err_msg = "unable to enable device";
                goto error_out_dev;
        }
  
        err = pci_set_mwi(pdev);
        if(err < 0) {
 -              printk(ERR_PFX "%s: unable to set MWI\n", pci_name(pdev));
 +              err_msg = "unable to set MWI";
                goto error_out_disable;
        }
  
        err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
        if(err < 0) {
 -              printk(ERR_PFX "%s: No usable DMA configuration\n",
 -                     pci_name(pdev));
 +              err_msg = "No usable DMA configuration";
                goto error_out_mwi;
        }
  
        /* sanity checks on IO and MMIO BARs
         */
        if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
 -              printk(ERR_PFX
 -                     "%s: region #1 not a PCI IO resource, aborting\n",
 -                     pci_name(pdev));
 +              err_msg = "region #1 not a PCI IO resource, aborting";
                err = -ENODEV;
                goto error_out_mwi;
        }
        if(pci_resource_len(pdev, 0) < 128) {
 -              printk(ERR_PFX "%s: Invalid PCI IO region size, aborting\n",
 -                     pci_name(pdev));
 +              err_msg = "Invalid PCI IO region size, aborting";
                err = -ENODEV;
                goto error_out_mwi;
        }
        if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
 -              printk(ERR_PFX
 -                     "%s: region #1 not a PCI MMIO resource, aborting\n",
 -                     pci_name(pdev));
 +              err_msg = "region #1 not a PCI MMIO resource, aborting";
                err = -ENODEV;
                goto error_out_mwi;
        }
        if(pci_resource_len(pdev, 1) < 128) {
 -              printk(ERR_PFX "%s: Invalid PCI MMIO region size, aborting\n",
 -                     pci_name(pdev));
 +              err_msg = "Invalid PCI MMIO region size, aborting";
                err = -ENODEV;
                goto error_out_mwi;
        }
  
 -      err = pci_request_regions(pdev, "typhoon");
 +      err = pci_request_regions(pdev, KBUILD_MODNAME);
        if(err < 0) {
 -              printk(ERR_PFX "%s: could not request regions\n",
 -                     pci_name(pdev));
 +              err_msg = "could not request regions";
                goto error_out_mwi;
        }
  
  
        ioaddr = pci_iomap(pdev, use_mmio, 128);
        if (!ioaddr) {
 -              printk(ERR_PFX "%s: cannot remap registers, aborting\n",
 -                     pci_name(pdev));
 +              err_msg = "cannot remap registers, aborting";
                err = -EIO;
                goto error_out_regions;
        }
        shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
                                      &shared_dma);
        if(!shared) {
 -              printk(ERR_PFX "%s: could not allocate DMA memory\n",
 -                     pci_name(pdev));
 +              err_msg = "could not allocate DMA memory";
                err = -ENOMEM;
                goto error_out_remap;
        }
         * 5) Put the card to sleep.
         */
        if (typhoon_reset(ioaddr, WaitSleep) < 0) {
 -              printk(ERR_PFX "%s: could not reset 3XP\n", pci_name(pdev));
 +              err_msg = "could not reset 3XP";
                err = -EIO;
                goto error_out_dma;
        }
        pci_set_master(pdev);
        pci_save_state(pdev);
  
 -      /* dev->name is not valid until we register, but we need to
 -       * use some common routines to initialize the card. So that those
 -       * routines print the right name, we keep our oun pointer to the name
 -       */
 -      tp->name = pci_name(pdev);
 -
        typhoon_init_interface(tp);
        typhoon_init_rings(tp);
  
        if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
 -              printk(ERR_PFX "%s: cannot boot 3XP sleep image\n",
 -                     pci_name(pdev));
 +              err_msg = "cannot boot 3XP sleep image";
                err = -EIO;
                goto error_out_reset;
        }
  
        INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
        if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
 -              printk(ERR_PFX "%s: cannot read MAC address\n",
 -                     pci_name(pdev));
 +              err_msg = "cannot read MAC address";
                err = -EIO;
                goto error_out_reset;
        }
        *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
  
        if(!is_valid_ether_addr(dev->dev_addr)) {
 -              printk(ERR_PFX "%s: Could not obtain valid ethernet address, "
 -                     "aborting\n", pci_name(pdev));
 +              err_msg = "Could not obtain valid ethernet address, aborting";
                goto error_out_reset;
        }
  
         */
        INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
        if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
 -              printk(ERR_PFX "%s: Could not get Sleep Image version\n",
 -                      pci_name(pdev));
 +              err_msg = "Could not get Sleep Image version";
                goto error_out_reset;
        }
  
                tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
  
        if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
 -              printk(ERR_PFX "%s: cannot put adapter to sleep\n",
 -                     pci_name(pdev));
 +              err_msg = "cannot put adapter to sleep";
                err = -EIO;
                goto error_out_reset;
        }
        dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
        dev->features |= NETIF_F_TSO;
  
 -      if(register_netdev(dev) < 0)
 +      if(register_netdev(dev) < 0) {
 +              err_msg = "unable to register netdev";
                goto error_out_reset;
 -
 -      /* fixup our local name */
 -      tp->name = dev->name;
 +      }
  
        pci_set_drvdata(pdev, dev);
  
 -      printk(KERN_INFO "%s: %s at %s 0x%llx, %pM\n",
 -             dev->name, typhoon_card_info[card_id].name,
 -             use_mmio ? "MMIO" : "IO",
 -             (unsigned long long)pci_resource_start(pdev, use_mmio),
 -             dev->dev_addr);
 +      netdev_info(dev, "%s at %s 0x%llx, %pM\n",
 +                  typhoon_card_info[card_id].name,
 +                  use_mmio ? "MMIO" : "IO",
 +                  (unsigned long long)pci_resource_start(pdev, use_mmio),
 +                  dev->dev_addr);
  
        /* xp_resp still contains the response to the READ_VERSIONS command.
         * For debugging, let the user know what version he has.
                 * of version is Month/Day of build.
                 */
                u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
 -              printk(KERN_INFO "%s: Typhoon 1.0 Sleep Image built "
 -                      "%02u/%02u/2000\n", dev->name, monthday >> 8,
 -                      monthday & 0xff);
 +              netdev_info(dev, "Typhoon 1.0 Sleep Image built %02u/%02u/2000\n",
 +                          monthday >> 8, monthday & 0xff);
        } else if(xp_resp[0].numDesc == 2) {
                /* This is the Typhoon 1.1+ type Sleep Image
                 */
                u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
                u8 *ver_string = (u8 *) &xp_resp[1];
                ver_string[25] = 0;
 -              printk(KERN_INFO "%s: Typhoon 1.1+ Sleep Image version "
 -                      "%02x.%03x.%03x %s\n", dev->name, sleep_ver >> 24,
 -                      (sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff,
 -                      ver_string);
 +              netdev_info(dev, "Typhoon 1.1+ Sleep Image version %02x.%03x.%03x %s\n",
 +                          sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
 +                          sleep_ver & 0xfff, ver_string);
        } else {
 -              printk(KERN_WARNING "%s: Unknown Sleep Image version "
 -                      "(%u:%04x)\n", dev->name, xp_resp[0].numDesc,
 -                      le32_to_cpu(xp_resp[0].parm2));
 +              netdev_warn(dev, "Unknown Sleep Image version (%u:%04x)\n",
 +                          xp_resp[0].numDesc, le32_to_cpu(xp_resp[0].parm2));
        }
  
        return 0;
@@@ -2586,7 -2640,6 +2586,7 @@@ error_out_disable
  error_out_dev:
        free_netdev(dev);
  error_out:
 +      pr_err("%s: %s\n", pci_name(pdev), err_msg);
        return err;
  }
  
@@@ -2611,7 -2664,7 +2611,7 @@@ typhoon_remove_one(struct pci_dev *pdev
  }
  
  static struct pci_driver typhoon_driver = {
 -      .name           = DRV_MODULE_NAME,
 +      .name           = KBUILD_MODNAME,
        .id_table       = typhoon_pci_tbl,
        .probe          = typhoon_init_one,
        .remove         = __devexit_p(typhoon_remove_one),
diff --combined drivers/net/ucc_geth.c
index 23a97518bc1f9ef9b38d3803127c3a6f4b4e4399,8eec97799e056de864e404c5cdd191da647e488a..1b0aef37e495a656a266eca8fa2a03a500a64a04
@@@ -37,7 -37,6 +37,7 @@@
  #include <asm/qe.h>
  #include <asm/ucc.h>
  #include <asm/ucc_fast.h>
 +#include <asm/machdep.h>
  
  #include "ucc_geth.h"
  #include "fsl_pq_mdio.h"
@@@ -430,7 -429,7 +430,7 @@@ static void hw_add_addr_in_hash(struct 
            ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
  
        /* Ethernet frames are defined in Little Endian mode,
-       therefor to insert */
+       therefore to insert */
        /* the address to the hash (Big Endian mode), we reverse the bytes.*/
  
        set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr);
@@@ -1335,7 -1334,7 +1335,7 @@@ static int adjust_enet_interface(struc
        struct ucc_geth __iomem *ug_regs;
        struct ucc_fast __iomem *uf_regs;
        int ret_val;
 -      u32 upsmr, maccfg2, tbiBaseAddress;
 +      u32 upsmr, maccfg2;
        u16 value;
  
        ugeth_vdbg("%s: IN", __func__);
        /* Note that this depends on proper setting in utbipar register. */
        if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
            (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
 -              tbiBaseAddress = in_be32(&ug_regs->utbipar);
 -              tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK;
 -              tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT;
 -              value = ugeth->phydev->bus->read(ugeth->phydev->bus,
 -                              (u8) tbiBaseAddress, ENET_TBI_MII_CR);
 +              struct ucc_geth_info *ug_info = ugeth->ug_info;
 +              struct phy_device *tbiphy;
 +
 +              if (!ug_info->tbi_node)
 +                      ugeth_warn("TBI mode requires that the device "
 +                              "tree specify a tbi-handle\n");
 +
 +              tbiphy = of_phy_find_device(ug_info->tbi_node);
 +              if (!tbiphy)
 +                      ugeth_warn("Could not get TBI device\n");
 +
 +              value = phy_read(tbiphy, ENET_TBI_MII_CR);
                value &= ~0x1000;       /* Turn off autonegotiation */
 -              ugeth->phydev->bus->write(ugeth->phydev->bus,
 -                              (u8) tbiBaseAddress, ENET_TBI_MII_CR, value);
 +              phy_write(tbiphy, ENET_TBI_MII_CR, value);
        }
  
        init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
@@@ -2002,6 -1995,7 +2002,6 @@@ static void ucc_geth_set_multi(struct n
        struct dev_mc_list *dmi;
        struct ucc_fast __iomem *uf_regs;
        struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
 -      int i;
  
        ugeth = netdev_priv(dev);
  
                        out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
                        out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
  
 -                      dmi = dev->mc_list;
 -
 -                      for (i = 0; i < dev->mc_count; i++, dmi = dmi->next) {
 -
 +                      netdev_for_each_mc_addr(dmi, dev) {
                                /* Only support group multicast for now.
                                 */
                                if (!(dmi->dmi_addr[0] & 1))
index e803a7dc6502e057f6acfc8610eb4ffcbbd66eac,6c459f5cb5df3a068bbcd5639b69d8e5e709a280..25c24f0368d8d4c921cacb60582e21708ef66cc9
@@@ -612,7 -612,7 +612,7 @@@ ssize_t i2400m_bm_cmd(struct i2400m *i2
                goto error_wait_for_ack;
        }
        rx_bytes = result;
-       /* verify the ack and read more if neccessary [result is the
+       /* verify the ack and read more if necessary [result is the
         * final amount of bytes we get in the ack]  */
        result = __i2400m_bm_ack_verify(i2400m, opcode, ack, ack_size, flags);
        if (result < 0)
@@@ -1041,14 -1041,21 +1041,14 @@@ int i2400m_read_mac_addr(struct i2400m 
                dev_err(dev, "BM: read mac addr failed: %d\n", result);
                goto error_read_mac;
        }
 -      d_printf(2, dev,
 -               "mac addr is %02x:%02x:%02x:%02x:%02x:%02x\n",
 -               ack_buf.ack_pl[0], ack_buf.ack_pl[1],
 -               ack_buf.ack_pl[2], ack_buf.ack_pl[3],
 -               ack_buf.ack_pl[4], ack_buf.ack_pl[5]);
 +      d_printf(2, dev, "mac addr is %pM\n", ack_buf.ack_pl);
        if (i2400m->bus_bm_mac_addr_impaired == 1) {
                ack_buf.ack_pl[0] = 0x00;
                ack_buf.ack_pl[1] = 0x16;
                ack_buf.ack_pl[2] = 0xd3;
                get_random_bytes(&ack_buf.ack_pl[3], 3);
                dev_err(dev, "BM is MAC addr impaired, faking MAC addr to "
 -                      "mac addr is %02x:%02x:%02x:%02x:%02x:%02x\n",
 -                      ack_buf.ack_pl[0], ack_buf.ack_pl[1],
 -                      ack_buf.ack_pl[2], ack_buf.ack_pl[3],
 -                      ack_buf.ack_pl[4], ack_buf.ack_pl[5]);
 +                      "mac addr is %pM\n", ack_buf.ack_pl);
                result = 0;
        }
        net_dev->addr_len = ETH_ALEN;
index a6452af9c6c5949bb7ab744e91fa6893c7afb455,46a1e19c67877870dfe1870145a22a14f778d5c4..08dc42da0f63f567568fbd50a5f323ef0f653cdf
@@@ -194,15 -194,12 +194,15 @@@ static inline u16 ar9170_get_seq(struc
        return ar9170_get_seq_h((void *) txc->frame_data);
  }
  
 +static inline u16 ar9170_get_tid_h(struct ieee80211_hdr *hdr)
 +{
 +      return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
 +}
 +
  static inline u16 ar9170_get_tid(struct sk_buff *skb)
  {
        struct ar9170_tx_control *txc = (void *) skb->data;
 -      struct ieee80211_hdr *hdr = (void *) txc->frame_data;
 -
 -      return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
 +      return ar9170_get_tid_h((struct ieee80211_hdr *) txc->frame_data);
  }
  
  #define GET_NEXT_SEQ(seq)     ((seq + 1) & 0x0fff)
@@@ -216,10 -213,10 +216,10 @@@ static void ar9170_print_txheader(struc
        struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
        struct ieee80211_hdr *hdr = (void *) txc->frame_data;
  
 -      printk(KERN_DEBUG "%s: => FRAME [skb:%p, q:%d, DA:[%pM] flags:%x s:%d "
 +      printk(KERN_DEBUG "%s: => FRAME [skb:%p, q:%d, DA:[%pM] s:%d "
                          "mac_ctrl:%04x, phy_ctrl:%08x, timeout:[%d ms]]\n",
               wiphy_name(ar->hw->wiphy), skb, skb_get_queue_mapping(skb),
 -             ieee80211_get_DA(hdr), arinfo->flags, ar9170_get_seq_h(hdr),
 +             ieee80211_get_DA(hdr), ar9170_get_seq_h(hdr),
               le16_to_cpu(txc->mac_control), le32_to_cpu(txc->phy_control),
               jiffies_to_msecs(arinfo->timeout - jiffies));
  }
@@@ -394,7 -391,7 +394,7 @@@ static void ar9170_tx_fake_ampdu_status
                ieee80211_tx_status_irqsafe(ar->hw, skb);
        }
  
 -      for_each_bit(i, &queue_bitmap, BITS_PER_BYTE) {
 +      for_each_set_bit(i, &queue_bitmap, BITS_PER_BYTE) {
  #ifdef AR9170_QUEUE_STOP_DEBUG
                printk(KERN_DEBUG "%s: wake queue %d\n",
                       wiphy_name(ar->hw->wiphy), i);
@@@ -433,7 -430,7 +433,7 @@@ void ar9170_tx_callback(struct ar9170 *
        spin_lock_irqsave(&ar->tx_stats_lock, flags);
        ar->tx_stats[queue].len--;
  
 -      if (skb_queue_empty(&ar->tx_pending[queue])) {
 +      if (ar->tx_stats[queue].len < AR9170_NUM_TX_LIMIT_SOFT) {
  #ifdef AR9170_QUEUE_STOP_DEBUG
                printk(KERN_DEBUG "%s: wake queue %d\n",
                       wiphy_name(ar->hw->wiphy), queue);
        }
        spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
  
 -      if (arinfo->flags & AR9170_TX_FLAG_BLOCK_ACK) {
 -              ar9170_tx_ampdu_callback(ar, skb);
 -      } else if (arinfo->flags & AR9170_TX_FLAG_WAIT_FOR_ACK) {
 -              arinfo->timeout = jiffies +
 -                                msecs_to_jiffies(AR9170_TX_TIMEOUT);
 -
 -              skb_queue_tail(&ar->tx_status[queue], skb);
 -      } else if (arinfo->flags & AR9170_TX_FLAG_NO_ACK) {
 +      if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
                ar9170_tx_status(ar, skb, AR9170_TX_STATUS_FAILED);
        } else {
 -#ifdef AR9170_QUEUE_DEBUG
 -              printk(KERN_DEBUG "%s: unsupported frame flags!\n",
 -                     wiphy_name(ar->hw->wiphy));
 -              ar9170_print_txheader(ar, skb);
 -#endif /* AR9170_QUEUE_DEBUG */
 -              dev_kfree_skb_any(skb);
 +              if (info->flags & IEEE80211_TX_CTL_AMPDU) {
 +                      ar9170_tx_ampdu_callback(ar, skb);
 +              } else {
 +                      arinfo->timeout = jiffies +
 +                                msecs_to_jiffies(AR9170_TX_TIMEOUT);
 +
 +                      skb_queue_tail(&ar->tx_status[queue], skb);
 +              }
        }
  
        if (!ar->tx_stats[queue].len &&
@@@ -1405,6 -1407,17 +1405,6 @@@ static int ar9170_tx_prepare(struct ar9
  
        if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
             (is_valid_ether_addr(ieee80211_get_DA(hdr)))) {
 -              if (info->flags & IEEE80211_TX_CTL_AMPDU) {
 -                      if (unlikely(!info->control.sta))
 -                              goto err_out;
 -
 -                      txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
 -                      arinfo->flags = AR9170_TX_FLAG_BLOCK_ACK;
 -
 -                      goto out;
 -              }
 -
 -              txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
                /*
                 * WARNING:
                 * Putting the QoS queue bits into an unexplored territory is
  
                txc->phy_control |=
                        cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
 -              arinfo->flags = AR9170_TX_FLAG_WAIT_FOR_ACK;
 -      } else {
 -              arinfo->flags = AR9170_TX_FLAG_NO_ACK;
 +
 +              if (info->flags & IEEE80211_TX_CTL_AMPDU) {
 +                      if (unlikely(!info->control.sta))
 +                              goto err_out;
 +
 +                      txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
 +              } else {
 +                      txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
 +              }
        }
  
 -out:
        return 0;
  
  err_out:
@@@ -1663,7 -1671,8 +1663,7 @@@ static bool ar9170_tx_ampdu(struct ar91
                 * tell the FW/HW that this is the last frame,
                 * that way it will wait for the immediate block ack.
                 */
 -              if (likely(skb_peek_tail(&agg)))
 -                      ar9170_tx_indicate_immba(ar, skb_peek_tail(&agg));
 +              ar9170_tx_indicate_immba(ar, skb_peek_tail(&agg));
  
  #ifdef AR9170_TXAGG_DEBUG
                printk(KERN_DEBUG "%s: generated A-MPDU looks like this:\n",
@@@ -1707,21 -1716,6 +1707,21 @@@ static void ar9170_tx(struct ar9170 *ar
  
        for (i = 0; i < __AR9170_NUM_TXQ; i++) {
                spin_lock_irqsave(&ar->tx_stats_lock, flags);
 +              frames = min(ar->tx_stats[i].limit - ar->tx_stats[i].len,
 +                           skb_queue_len(&ar->tx_pending[i]));
 +
 +              if (remaining_space < frames) {
 +#ifdef AR9170_QUEUE_DEBUG
 +                      printk(KERN_DEBUG "%s: tx quota reached queue:%d, "
 +                             "remaining slots:%d, needed:%d\n",
 +                             wiphy_name(ar->hw->wiphy), i, remaining_space,
 +                             frames);
 +#endif /* AR9170_QUEUE_DEBUG */
 +                      frames = remaining_space;
 +              }
 +
 +              ar->tx_stats[i].len += frames;
 +              ar->tx_stats[i].count += frames;
                if (ar->tx_stats[i].len >= ar->tx_stats[i].limit) {
  #ifdef AR9170_QUEUE_DEBUG
                        printk(KERN_DEBUG "%s: queue %d full\n",
                        __ar9170_dump_txstats(ar);
  #endif /* AR9170_QUEUE_STOP_DEBUG */
                        ieee80211_stop_queue(ar->hw, i);
 -                      spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
 -                      continue;
 -              }
 -
 -              frames = min(ar->tx_stats[i].limit - ar->tx_stats[i].len,
 -                           skb_queue_len(&ar->tx_pending[i]));
 -
 -              if (remaining_space < frames) {
 -#ifdef AR9170_QUEUE_DEBUG
 -                      printk(KERN_DEBUG "%s: tx quota reached queue:%d, "
 -                             "remaining slots:%d, needed:%d\n",
 -                             wiphy_name(ar->hw->wiphy), i, remaining_space,
 -                             frames);
 -#endif /* AR9170_QUEUE_DEBUG */
 -                      frames = remaining_space;
                }
  
 -              ar->tx_stats[i].len += frames;
 -              ar->tx_stats[i].count += frames;
                spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
  
                if (!frames)
                        arinfo->timeout = jiffies +
                                          msecs_to_jiffies(AR9170_TX_TIMEOUT);
  
 -                      if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK)
 +                      if (info->flags & IEEE80211_TX_CTL_AMPDU)
                                atomic_inc(&ar->tx_ampdu_pending);
  
  #ifdef AR9170_QUEUE_DEBUG
  
                        err = ar->tx(ar, skb);
                        if (unlikely(err)) {
 -                              if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK)
 +                              if (info->flags & IEEE80211_TX_CTL_AMPDU)
                                        atomic_dec(&ar->tx_ampdu_pending);
  
                                frames_failed++;
@@@ -1939,7 -1950,7 +1939,7 @@@ err_free
  }
  
  static int ar9170_op_add_interface(struct ieee80211_hw *hw,
 -                                 struct ieee80211_if_init_conf *conf)
 +                                 struct ieee80211_vif *vif)
  {
        struct ar9170 *ar = hw->priv;
        struct ath_common *common = &ar->common;
                goto unlock;
        }
  
 -      ar->vif = conf->vif;
 -      memcpy(common->macaddr, conf->mac_addr, ETH_ALEN);
 +      ar->vif = vif;
 +      memcpy(common->macaddr, vif->addr, ETH_ALEN);
  
        if (modparam_nohwcrypt || (ar->vif->type != NL80211_IFTYPE_STATION)) {
                ar->rx_software_decryption = true;
@@@ -1973,7 -1984,7 +1973,7 @@@ unlock
  }
  
  static void ar9170_op_remove_interface(struct ieee80211_hw *hw,
 -                                     struct ieee80211_if_init_conf *conf)
 +                                     struct ieee80211_vif *vif)
  {
        struct ar9170 *ar = hw->priv;
  
        return err;
  }
  
 -static void ar9170_sta_notify(struct ieee80211_hw *hw,
 -                            struct ieee80211_vif *vif,
 -                            enum sta_notify_cmd cmd,
 -                            struct ieee80211_sta *sta)
 +static int ar9170_sta_add(struct ieee80211_hw *hw,
 +                        struct ieee80211_vif *vif,
 +                        struct ieee80211_sta *sta)
  {
        struct ar9170 *ar = hw->priv;
        struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
        unsigned int i;
  
 -      switch (cmd) {
 -      case STA_NOTIFY_ADD:
 -              memset(sta_info, 0, sizeof(*sta_info));
 +      memset(sta_info, 0, sizeof(*sta_info));
  
 -              if (!sta->ht_cap.ht_supported)
 -                      break;
 +      if (!sta->ht_cap.ht_supported)
 +              return 0;
  
 -              if (sta->ht_cap.ampdu_density > ar->global_ampdu_density)
 -                      ar->global_ampdu_density = sta->ht_cap.ampdu_density;
 +      if (sta->ht_cap.ampdu_density > ar->global_ampdu_density)
 +              ar->global_ampdu_density = sta->ht_cap.ampdu_density;
  
 -              if (sta->ht_cap.ampdu_factor < ar->global_ampdu_factor)
 -                      ar->global_ampdu_factor = sta->ht_cap.ampdu_factor;
 +      if (sta->ht_cap.ampdu_factor < ar->global_ampdu_factor)
 +              ar->global_ampdu_factor = sta->ht_cap.ampdu_factor;
  
 -              for (i = 0; i < AR9170_NUM_TID; i++) {
 -                      sta_info->agg[i].state = AR9170_TID_STATE_SHUTDOWN;
 -                      sta_info->agg[i].active = false;
 -                      sta_info->agg[i].ssn = 0;
 -                      sta_info->agg[i].retry = 0;
 -                      sta_info->agg[i].tid = i;
 -                      INIT_LIST_HEAD(&sta_info->agg[i].list);
 -                      skb_queue_head_init(&sta_info->agg[i].queue);
 -              }
 +      for (i = 0; i < AR9170_NUM_TID; i++) {
 +              sta_info->agg[i].state = AR9170_TID_STATE_SHUTDOWN;
 +              sta_info->agg[i].active = false;
 +              sta_info->agg[i].ssn = 0;
 +              sta_info->agg[i].tid = i;
 +              INIT_LIST_HEAD(&sta_info->agg[i].list);
 +              skb_queue_head_init(&sta_info->agg[i].queue);
 +      }
  
 -              sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
 -              break;
 +      sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
  
 -      case STA_NOTIFY_REMOVE:
 -              if (!sta->ht_cap.ht_supported)
 -                      break;
 +      return 0;
 +}
  
 -              for (i = 0; i < AR9170_NUM_TID; i++) {
 -                      sta_info->agg[i].state = AR9170_TID_STATE_INVALID;
 -                      skb_queue_purge(&sta_info->agg[i].queue);
 -              }
 +static int ar9170_sta_remove(struct ieee80211_hw *hw,
 +                           struct ieee80211_vif *vif,
 +                           struct ieee80211_sta *sta)
 +{
 +      struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
 +      unsigned int i;
  
 -              break;
 +      if (!sta->ht_cap.ht_supported)
 +              return 0;
  
 -      default:
 -              break;
 +      for (i = 0; i < AR9170_NUM_TID; i++) {
 +              sta_info->agg[i].state = AR9170_TID_STATE_INVALID;
 +              skb_queue_purge(&sta_info->agg[i].queue);
        }
 +
 +      return 0;
  }
  
  static int ar9170_get_stats(struct ieee80211_hw *hw,
        return 0;
  }
  
 -static int ar9170_get_tx_stats(struct ieee80211_hw *hw,
 -                             struct ieee80211_tx_queue_stats *tx_stats)
 -{
 -      struct ar9170 *ar = hw->priv;
 -
 -      spin_lock_bh(&ar->tx_stats_lock);
 -      memcpy(tx_stats, ar->tx_stats, sizeof(tx_stats[0]) * hw->queues);
 -      spin_unlock_bh(&ar->tx_stats_lock);
 -
 -      return 0;
 -}
 -
  static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
                          const struct ieee80211_tx_queue_params *param)
  {
@@@ -2496,9 -2519,9 +2496,9 @@@ static const struct ieee80211_ops ar917
        .bss_info_changed       = ar9170_op_bss_info_changed,
        .get_tsf                = ar9170_op_get_tsf,
        .set_key                = ar9170_set_key,
 -      .sta_notify             = ar9170_sta_notify,
 +      .sta_add                = ar9170_sta_add,
 +      .sta_remove             = ar9170_sta_remove,
        .get_stats              = ar9170_get_stats,
 -      .get_tx_stats           = ar9170_get_tx_stats,
        .ampdu_action           = ar9170_ampdu_action,
  };
  
@@@ -2512,7 -2535,7 +2512,7 @@@ void *ar9170_alloc(size_t priv_size
        /*
         * this buffer is used for rx stream reconstruction.
         * Under heavy load this device (or the transport layer?)
-        * tends to split the streams into seperate rx descriptors.
+        * tends to split the streams into separate rx descriptors.
         */
  
        skb = __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE, GFP_KERNEL);
index ee34c137e7cdf45cdaafdb33d63511ee9bb428b0,486c93559c29c09c98a8af92e36eee46c18a7b00..9b04964deced458e85a8c814580db05052838fb3
@@@ -368,7 -368,7 +368,7 @@@ static int rt2500usb_config_key(struct 
  
                /*
                 * The encryption key doesn't fit within the CSR cache,
-                * this means we should allocate it seperately and use
+                * this means we should allocate it separately and use
                 * rt2x00usb_vendor_request() to send the key to the hardware.
                 */
                reg = KEY_ENTRY(key->hw_key_idx);
                /*
                 * The driver does not support the IV/EIV generation
                 * in hardware. However it demands the data to be provided
-                * both seperately as well as inside the frame.
+                * both separately as well as inside the frame.
                 * We already provided the CONFIG_CRYPTO_COPY_IV to rt2x00lib
                 * to ensure rt2x00lib will not strip the data from the
                 * frame after the copy, now we must tell mac80211
@@@ -565,7 -565,8 +565,7 @@@ static void rt2500usb_config_ant(struc
        /*
         * RT2525E and RT5222 need to flip TX I/Q
         */
 -      if (rt2x00_rf(&rt2x00dev->chip, RF2525E) ||
 -          rt2x00_rf(&rt2x00dev->chip, RF5222)) {
 +      if (rt2x00_rf(rt2x00dev, RF2525E) || rt2x00_rf(rt2x00dev, RF5222)) {
                rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1);
                rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 1);
                rt2x00_set_field16(&csr6, PHY_CSR6_OFDM_FLIP, 1);
                /*
                 * RT2525E does not need RX I/Q Flip.
                 */
 -              if (rt2x00_rf(&rt2x00dev->chip, RF2525E))
 +              if (rt2x00_rf(rt2x00dev, RF2525E))
                        rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0);
        } else {
                rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 0);
@@@ -597,7 -598,7 +597,7 @@@ static void rt2500usb_config_channel(st
        /*
         * For RT2525E we should first set the channel to half band higher.
         */
 -      if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) {
 +      if (rt2x00_rf(rt2x00dev, RF2525E)) {
                static const u32 vals[] = {
                        0x000008aa, 0x000008ae, 0x000008ae, 0x000008b2,
                        0x000008b2, 0x000008b6, 0x000008b6, 0x000008ba,
@@@ -792,7 -793,7 +792,7 @@@ static int rt2500usb_init_registers(str
        rt2x00_set_field16(&reg, MAC_CSR1_HOST_READY, 1);
        rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg);
  
 -      if (rt2x00_rev(&rt2x00dev->chip) >= RT2570_VERSION_C) {
 +      if (rt2x00_rev(rt2x00dev) >= RT2570_VERSION_C) {
                rt2500usb_register_read(rt2x00dev, PHY_CSR2, &reg);
                rt2x00_set_field16(&reg, PHY_CSR2_LNA, 0);
        } else {
@@@ -1408,18 -1409,21 +1408,18 @@@ static int rt2500usb_init_eeprom(struc
        value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
        rt2500usb_register_read(rt2x00dev, MAC_CSR0, &reg);
        rt2x00_set_chip(rt2x00dev, RT2570, value, reg);
 -      rt2x00_print_chip(rt2x00dev);
 -
 -      if (!rt2x00_check_rev(&rt2x00dev->chip, 0x000ffff0, 0) ||
 -          rt2x00_check_rev(&rt2x00dev->chip, 0x0000000f, 0)) {
  
 +      if (((reg & 0xfff0) != 0) || ((reg & 0x0000000f) == 0)) {
                ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
                return -ENODEV;
        }
  
 -      if (!rt2x00_rf(&rt2x00dev->chip, RF2522) &&
 -          !rt2x00_rf(&rt2x00dev->chip, RF2523) &&
 -          !rt2x00_rf(&rt2x00dev->chip, RF2524) &&
 -          !rt2x00_rf(&rt2x00dev->chip, RF2525) &&
 -          !rt2x00_rf(&rt2x00dev->chip, RF2525E) &&
 -          !rt2x00_rf(&rt2x00dev->chip, RF5222)) {
 +      if (!rt2x00_rf(rt2x00dev, RF2522) &&
 +          !rt2x00_rf(rt2x00dev, RF2523) &&
 +          !rt2x00_rf(rt2x00dev, RF2524) &&
 +          !rt2x00_rf(rt2x00dev, RF2525) &&
 +          !rt2x00_rf(rt2x00dev, RF2525E) &&
 +          !rt2x00_rf(rt2x00dev, RF5222)) {
                ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
                return -ENODEV;
        }
@@@ -1663,22 -1667,22 +1663,22 @@@ static int rt2500usb_probe_hw_mode(stru
        spec->supported_bands = SUPPORT_BAND_2GHZ;
        spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
  
 -      if (rt2x00_rf(&rt2x00dev->chip, RF2522)) {
 +      if (rt2x00_rf(rt2x00dev, RF2522)) {
                spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522);
                spec->channels = rf_vals_bg_2522;
 -      } else if (rt2x00_rf(&rt2x00dev->chip, RF2523)) {
 +      } else if (rt2x00_rf(rt2x00dev, RF2523)) {
                spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523);
                spec->channels = rf_vals_bg_2523;
 -      } else if (rt2x00_rf(&rt2x00dev->chip, RF2524)) {
 +      } else if (rt2x00_rf(rt2x00dev, RF2524)) {
                spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524);
                spec->channels = rf_vals_bg_2524;
 -      } else if (rt2x00_rf(&rt2x00dev->chip, RF2525)) {
 +      } else if (rt2x00_rf(rt2x00dev, RF2525)) {
                spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525);
                spec->channels = rf_vals_bg_2525;
 -      } else if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) {
 +      } else if (rt2x00_rf(rt2x00dev, RF2525E)) {
                spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e);
                spec->channels = rf_vals_bg_2525e;
 -      } else if (rt2x00_rf(&rt2x00dev->chip, RF5222)) {
 +      } else if (rt2x00_rf(rt2x00dev, RF5222)) {
                spec->supported_bands |= SUPPORT_BAND_5GHZ;
                spec->num_channels = ARRAY_SIZE(rf_vals_5222);
                spec->channels = rf_vals_5222;
@@@ -1759,6 -1763,7 +1759,6 @@@ static const struct ieee80211_ops rt250
        .get_stats              = rt2x00mac_get_stats,
        .bss_info_changed       = rt2x00mac_bss_info_changed,
        .conf_tx                = rt2x00mac_conf_tx,
 -      .get_tx_stats           = rt2x00mac_get_tx_stats,
        .rfkill_poll            = rt2x00mac_rfkill_poll,
  };
  
index 5e4ee2023fcfc5874facf1518a2ff49293e435aa,2e5c8a13758bb2bfdcabfea5a8f78bef8a9961db..d27d7d5d850cc6ea7c63b85f29da3c010c9d6c37
@@@ -92,6 -92,7 +92,6 @@@ static bool rt2800usb_check_crc(const u
  static int rt2800usb_check_firmware(struct rt2x00_dev *rt2x00dev,
                                    const u8 *data, const size_t len)
  {
 -      u16 chipset = (rt2x00_rev(&rt2x00dev->chip) >> 16) & 0xffff;
        size_t offset = 0;
  
        /*
         * There are 2 variations of the rt2870 firmware.
         * a) size: 4kb
         * b) size: 8kb
-        * Note that (b) contains 2 seperate firmware blobs of 4k
+        * Note that (b) contains 2 separate firmware blobs of 4k
         * within the file. The first blob is the same firmware as (a),
         * but the second blob is for the additional chipsets.
         */
         * Check if we need the upper 4kb firmware data or not.
         */
        if ((len == 4096) &&
 -          (chipset != 0x2860) &&
 -          (chipset != 0x2872) &&
 -          (chipset != 0x3070))
 +          !rt2x00_rt(rt2x00dev, RT2860) &&
 +          !rt2x00_rt(rt2x00dev, RT2872) &&
 +          !rt2x00_rt(rt2x00dev, RT3070))
                return FW_BAD_VERSION;
  
        /*
         * 8kb firmware files must be checked as if it were
-        * 2 seperate firmware files.
+        * 2 separate firmware files.
         */
        while (offset < len) {
                if (!rt2800usb_check_crc(data + offset, 4096))
@@@ -137,13 -138,14 +137,13 @@@ static int rt2800usb_load_firmware(stru
        u32 reg;
        u32 offset;
        u32 length;
 -      u16 chipset = (rt2x00_rev(&rt2x00dev->chip) >> 16) & 0xffff;
  
        /*
         * Check which section of the firmware we need.
         */
 -      if ((chipset == 0x2860) ||
 -          (chipset == 0x2872) ||
 -          (chipset == 0x3070)) {
 +      if (rt2x00_rt(rt2x00dev, RT2860) ||
 +          rt2x00_rt(rt2x00dev, RT2872) ||
 +          rt2x00_rt(rt2x00dev, RT3070)) {
                offset = 0;
                length = 4096;
        } else {
         */
        rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0xff, 0, 0);
  
 -      if ((chipset == 0x3070) ||
 -          (chipset == 0x3071) ||
 -          (chipset == 0x3572)) {
 +      if (rt2x00_rt(rt2x00dev, RT3070) ||
 +          rt2x00_rt(rt2x00dev, RT3071) ||
 +          rt2x00_rt(rt2x00dev, RT3572)) {
                udelay(200);
                rt2800_mcu_request(rt2x00dev, MCU_CURRENT, 0, 0, 0);
                udelay(10);
@@@ -246,6 -248,24 +246,6 @@@ static void rt2800usb_toggle_rx(struct 
        rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
  }
  
 -static int rt2800usb_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
 -{
 -      unsigned int i;
 -      u32 reg;
 -
 -      for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
 -              rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
 -              if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
 -                  !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
 -                      return 0;
 -
 -              msleep(1);
 -      }
 -
 -      ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
 -      return -EACCES;
 -}
 -
  static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
  {
        u32 reg;
        /*
         * Initialize all registers.
         */
 -      if (unlikely(rt2800usb_wait_wpdma_ready(rt2x00dev) ||
 +      if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
                     rt2800_init_registers(rt2x00dev) ||
                     rt2800_init_bbp(rt2x00dev) ||
                     rt2800_init_rfcsr(rt2x00dev)))
  
        rt2800_register_read(rt2x00dev, USB_DMA_CFG, &reg);
        rt2x00_set_field32(&reg, USB_DMA_CFG_PHY_CLEAR, 0);
 -      /* Don't use bulk in aggregation when working with USB 1.1 */
 -      rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_EN,
 -                         (rt2x00dev->rx->usb_maxpacket == 512));
 +      rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_EN, 0);
        rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_TIMEOUT, 128);
        /*
         * Total room for RX frames in kilobytes, PBF might still exceed
@@@ -324,7 -346,7 +324,7 @@@ static void rt2800usb_disable_radio(str
        rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
  
        /* Wait for DMA, ignore error */
 -      rt2800usb_wait_wpdma_ready(rt2x00dev);
 +      rt2800_wait_wpdma_ready(rt2x00dev);
  
        rt2x00usb_disable_radio(rt2x00dev);
  }
@@@ -551,57 -573,41 +551,57 @@@ static void rt2800usb_fill_rxdone(struc
  {
        struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
        struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
 -      __le32 *rxd = (__le32 *)entry->skb->data;
 +      __le32 *rxi = (__le32 *)entry->skb->data;
        __le32 *rxwi;
 -      u32 rxd0;
 +      __le32 *rxd;
 +      u32 rxi0;
        u32 rxwi0;
        u32 rxwi1;
        u32 rxwi2;
        u32 rxwi3;
 +      u32 rxd0;
 +      int rx_pkt_len;
 +
 +      /*
 +       * RX frame format is :
 +       * | RXINFO | RXWI | header | L2 pad | payload | pad | RXD | USB pad |
 +       *          |<------------ rx_pkt_len -------------->|
 +       */
 +      rt2x00_desc_read(rxi, 0, &rxi0);
 +      rx_pkt_len = rt2x00_get_field32(rxi0, RXINFO_W0_USB_DMA_RX_PKT_LEN);
 +
 +      rxwi = (__le32 *)(entry->skb->data + RXINFO_DESC_SIZE);
 +
 +      /*
 +       * FIXME : we need to check for rx_pkt_len validity
 +       */
 +      rxd = (__le32 *)(entry->skb->data + RXINFO_DESC_SIZE + rx_pkt_len);
  
        /*
         * Copy descriptor to the skbdesc->desc buffer, making it safe from
         * moving of frame data in rt2x00usb.
         */
 -      memcpy(skbdesc->desc, rxd, skbdesc->desc_len);
 -      rxd = (__le32 *)skbdesc->desc;
 -      rxwi = &rxd[RXINFO_DESC_SIZE / sizeof(__le32)];
 +      memcpy(skbdesc->desc, rxi, skbdesc->desc_len);
  
        /*
         * It is now safe to read the descriptor on all architectures.
         */
 -      rt2x00_desc_read(rxd, 0, &rxd0);
        rt2x00_desc_read(rxwi, 0, &rxwi0);
        rt2x00_desc_read(rxwi, 1, &rxwi1);
        rt2x00_desc_read(rxwi, 2, &rxwi2);
        rt2x00_desc_read(rxwi, 3, &rxwi3);
 +      rt2x00_desc_read(rxd, 0, &rxd0);
  
 -      if (rt2x00_get_field32(rxd0, RXINFO_W0_CRC_ERROR))
 +      if (rt2x00_get_field32(rxd0, RXD_W0_CRC_ERROR))
                rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
  
        if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
                rxdesc->cipher = rt2x00_get_field32(rxwi0, RXWI_W0_UDF);
                rxdesc->cipher_status =
 -                  rt2x00_get_field32(rxd0, RXINFO_W0_CIPHER_ERROR);
 +                  rt2x00_get_field32(rxd0, RXD_W0_CIPHER_ERROR);
        }
  
 -      if (rt2x00_get_field32(rxd0, RXINFO_W0_DECRYPTED)) {
 +      if (rt2x00_get_field32(rxd0, RXD_W0_DECRYPTED)) {
                /*
                 * Hardware has stripped IV/EIV data from 802.11 frame during
                 * decryption. Unfortunately the descriptor doesn't contain
                        rxdesc->flags |= RX_FLAG_MMIC_ERROR;
        }
  
 -      if (rt2x00_get_field32(rxd0, RXINFO_W0_MY_BSS))
 +      if (rt2x00_get_field32(rxd0, RXD_W0_MY_BSS))
                rxdesc->dev_flags |= RXDONE_MY_BSS;
  
 -      if (rt2x00_get_field32(rxd0, RXINFO_W0_L2PAD)) {
 +      if (rt2x00_get_field32(rxd0, RXD_W0_L2PAD))
                rxdesc->dev_flags |= RXDONE_L2PAD;
 -              skbdesc->flags |= SKBDESC_L2_PADDED;
 -      }
  
        if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI))
                rxdesc->flags |= RX_FLAG_SHORT_GI;
         * Remove RXWI descriptor from start of buffer.
         */
        skb_pull(entry->skb, skbdesc->desc_len);
 -      skb_trim(entry->skb, rxdesc->size);
  }
  
  /*
@@@ -805,27 -814,51 +805,27 @@@ static struct usb_device_id rt2800usb_d
        /* Abocom */
        { USB_DEVICE(0x07b8, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x07b8, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x07b8, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x07b8, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x07b8, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x1482, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      /* AirTies */
 -      { USB_DEVICE(0x1eda, 0x2310), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      /* Amigo */
 -      { USB_DEVICE(0x0e0b, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x0e0b, 0x9041), USB_DEVICE_DATA(&rt2800usb_ops) },
        /* Amit */
        { USB_DEVICE(0x15c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) },
        /* Askey */
        { USB_DEVICE(0x1690, 0x0740), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x1690, 0x0744), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x0930, 0x0a07), USB_DEVICE_DATA(&rt2800usb_ops) },
        /* ASUS */
        { USB_DEVICE(0x0b05, 0x1731), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x0b05, 0x1732), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x0b05, 0x1742), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) },
        /* AzureWave */
        { USB_DEVICE(0x13d3, 0x3247), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x13d3, 0x3305), USB_DEVICE_DATA(&rt2800usb_ops) },
        /* Belkin */
        { USB_DEVICE(0x050d, 0x8053), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x050d, 0x805c), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x050d, 0x815c), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x050d, 0x825a), USB_DEVICE_DATA(&rt2800usb_ops) },
        /* Buffalo */
        { USB_DEVICE(0x0411, 0x00e8), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x0411, 0x012e), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      /* Cisco */
 -      { USB_DEVICE(0x167b, 0x4001), USB_DEVICE_DATA(&rt2800usb_ops) },
        /* Conceptronic */
        { USB_DEVICE(0x14b2, 0x3c06), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x14b2, 0x3c07), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x14b2, 0x3c08), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x14b2, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x14b2, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x14b2, 0x3c12), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x14b2, 0x3c23), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x14b2, 0x3c25), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x14b2, 0x3c27), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x07aa, 0x002f), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x07aa, 0x003c), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x07aa, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x07aa, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x07aa, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x18c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x18c5, 0x0012), USB_DEVICE_DATA(&rt2800usb_ops) },
        /* D-Link */
        { USB_DEVICE(0x07d1, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x07d1, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Edimax */
 +      { USB_DEVICE(0x7392, 0x7717), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x7392, 0x7718), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* EnGenius */
 +      { USB_DEVICE(0X1740, 0x9701), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x1740, 0x9702), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Gigabyte */
 +      { USB_DEVICE(0x1044, 0x800b), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Hawking */
 +      { USB_DEVICE(0x0e66, 0x0001), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x0e66, 0x0003), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Linksys */
 +      { USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Logitec */
 +      { USB_DEVICE(0x0789, 0x0162), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x0789, 0x0163), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x0789, 0x0164), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Motorola */
 +      { USB_DEVICE(0x100d, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* MSI */
 +      { USB_DEVICE(0x0db0, 0x6899), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Philips */
 +      { USB_DEVICE(0x0471, 0x200f), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Planex */
 +      { USB_DEVICE(0x2019, 0xed06), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Ralink */
 +      { USB_DEVICE(0x148f, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x148f, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Samsung */
 +      { USB_DEVICE(0x04e8, 0x2018), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Siemens */
 +      { USB_DEVICE(0x129b, 0x1828), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Sitecom */
 +      { USB_DEVICE(0x0df6, 0x0017), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x0df6, 0x002b), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x0df6, 0x002c), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x0df6, 0x002d), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x0df6, 0x0039), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x0df6, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* SMC */
 +      { USB_DEVICE(0x083a, 0x6618), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x083a, 0x7512), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x083a, 0x7522), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x083a, 0x8522), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x083a, 0xa618), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x083a, 0xb522), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Sparklan */
 +      { USB_DEVICE(0x15a9, 0x0006), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Sweex */
 +      { USB_DEVICE(0x177f, 0x0302), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* U-Media*/
 +      { USB_DEVICE(0x157e, 0x300e), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* ZCOM */
 +      { USB_DEVICE(0x0cde, 0x0022), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x0cde, 0x0025), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Zinwell */
 +      { USB_DEVICE(0x5a57, 0x0280), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x5a57, 0x0282), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Zyxel */
 +      { USB_DEVICE(0x0586, 0x3416), USB_DEVICE_DATA(&rt2800usb_ops) },
 +#ifdef CONFIG_RT2800USB_RT30XX
 +      /* Abocom */
 +      { USB_DEVICE(0x07b8, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x07b8, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x07b8, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* AirTies */
 +      { USB_DEVICE(0x1eda, 0x2310), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* AzureWave */
 +      { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Conceptronic */
 +      { USB_DEVICE(0x14b2, 0x3c12), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Corega */
 +      { USB_DEVICE(0x18c5, 0x0012), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* D-Link */
        { USB_DEVICE(0x07d1, 0x3c0a), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x07d1, 0x3c0b), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x07d1, 0x3c0d), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x07d1, 0x3c0e), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x07d1, 0x3c0f), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x07d1, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) },
        /* Edimax */
        { USB_DEVICE(0x7392, 0x7711), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x7392, 0x7717), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x7392, 0x7718), USB_DEVICE_DATA(&rt2800usb_ops) },
        /* Encore */
        { USB_DEVICE(0x203d, 0x1480), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x203d, 0x14a9), USB_DEVICE_DATA(&rt2800usb_ops) },
        /* EnGenius */
 -      { USB_DEVICE(0X1740, 0x9701), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x1740, 0x9702), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x1740, 0x9703), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x1740, 0x9705), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x1740, 0x9706), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Gigabyte */
 +      { USB_DEVICE(0x1044, 0x800d), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* I-O DATA */
 +      { USB_DEVICE(0x04bb, 0x0945), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* MSI */
 +      { USB_DEVICE(0x0db0, 0x3820), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Pegatron */
 +      { USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x1d4d, 0x000e), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Planex */
 +      { USB_DEVICE(0x2019, 0xab25), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Quanta */
 +      { USB_DEVICE(0x1a32, 0x0304), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Ralink */
 +      { USB_DEVICE(0x148f, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x148f, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x148f, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x148f, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Sitecom */
 +      { USB_DEVICE(0x0df6, 0x003e), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x0df6, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* SMC */
 +      { USB_DEVICE(0x083a, 0x7511), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Zinwell */
 +      { USB_DEVICE(0x5a57, 0x0283), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x5a57, 0x5257), USB_DEVICE_DATA(&rt2800usb_ops) },
 +#endif
 +#ifdef CONFIG_RT2800USB_RT35XX
 +      /* Askey */
 +      { USB_DEVICE(0x1690, 0x0744), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Cisco */
 +      { USB_DEVICE(0x167b, 0x4001), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* EnGenius */
 +      { USB_DEVICE(0x1740, 0x9801), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* I-O DATA */
 +      { USB_DEVICE(0x04bb, 0x0944), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Ralink */
 +      { USB_DEVICE(0x148f, 0x3370), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x148f, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x148f, 0x8070), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Sitecom */
 +      { USB_DEVICE(0x0df6, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Zinwell */
 +      { USB_DEVICE(0x5a57, 0x0284), USB_DEVICE_DATA(&rt2800usb_ops) },
 +#endif
 +#ifdef CONFIG_RT2800USB_UNKNOWN
 +      /*
 +       * Unclear what kind of devices these are (they aren't supported by the
 +       * vendor driver).
 +       */
 +      /* Allwin */
 +      { USB_DEVICE(0x8516, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x8516, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x8516, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x8516, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x8516, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x8516, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x8516, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Amigo */
 +      { USB_DEVICE(0x0e0b, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x0e0b, 0x9041), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Askey */
 +      { USB_DEVICE(0x0930, 0x0a07), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* ASUS */
 +      { USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x0b05, 0x1790), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x1761, 0x0b05), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* AzureWave */
 +      { USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x13d3, 0x3305), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Belkin */
 +      { USB_DEVICE(0x050d, 0x825a), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Buffalo */
 +      { USB_DEVICE(0x0411, 0x012e), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x0411, 0x0148), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x0411, 0x0150), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x0411, 0x015d), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Conceptronic */
 +      { USB_DEVICE(0x14b2, 0x3c08), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x14b2, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Corega */
 +      { USB_DEVICE(0x07aa, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x07aa, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x18c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* D-Link */
 +      { USB_DEVICE(0x07d1, 0x3c0b), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x07d1, 0x3c16), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* Encore */
 +      { USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x203d, 0x14a9), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      /* EnGenius */
        { USB_DEVICE(0x1740, 0x9707), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x1740, 0x9708), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x1740, 0x9709), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x1740, 0x9801), USB_DEVICE_DATA(&rt2800usb_ops) },
        /* Gemtek */
        { USB_DEVICE(0x15a9, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) },
        /* Gigabyte */
 -      { USB_DEVICE(0x1044, 0x800b), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x1044, 0x800c), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x1044, 0x800d), USB_DEVICE_DATA(&rt2800usb_ops) },
        /* Hawking */
 -      { USB_DEVICE(0x0e66, 0x0001), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x0e66, 0x0003), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x0e66, 0x0009), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x0e66, 0x000b), USB_DEVICE_DATA(&rt2800usb_ops) },
        /* I-O DATA */
 -      { USB_DEVICE(0x04bb, 0x0944), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x04bb, 0x0945), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x04bb, 0x0947), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x04bb, 0x0948), USB_DEVICE_DATA(&rt2800usb_ops) },
        /* LevelOne */
        { USB_DEVICE(0x1740, 0x0605), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x1740, 0x0615), USB_DEVICE_DATA(&rt2800usb_ops) },
        /* Linksys */
 -      { USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x1737, 0x0077), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x1737, 0x0078), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x1737, 0x0079), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      /* Logitec */
 -      { USB_DEVICE(0x0789, 0x0162), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x0789, 0x0163), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x0789, 0x0164), USB_DEVICE_DATA(&rt2800usb_ops) },
        /* Motorola */
 -      { USB_DEVICE(0x100d, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x100d, 0x9032), USB_DEVICE_DATA(&rt2800usb_ops) },
        /* MSI */
 -      { USB_DEVICE(0x0db0, 0x3820), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x0db0, 0x3821), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x0db0, 0x3822), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x0db0, 0x3870), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x0db0, 0x6899), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x0db0, 0x3871), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x0db0, 0x821a), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x0db0, 0x822a), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x0db0, 0x870a), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x0db0, 0x871a), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x0db0, 0x899a), USB_DEVICE_DATA(&rt2800usb_ops) },
        /* Ovislink */
        { USB_DEVICE(0x1b75, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
        /* Para */
        { USB_DEVICE(0x20b8, 0x8888), USB_DEVICE_DATA(&rt2800usb_ops) },
        /* Pegatron */
 +      { USB_DEVICE(0x05a6, 0x0101), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x1d4d, 0x0002), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x1d4d, 0x000e), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      /* Philips */
 -      { USB_DEVICE(0x0471, 0x200f), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x1d4d, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) },
        /* Planex */
 -      { USB_DEVICE(0x2019, 0xed06), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x2019, 0xab24), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x2019, 0xab25), USB_DEVICE_DATA(&rt2800usb_ops) },
        /* Qcom */
        { USB_DEVICE(0x18e8, 0x6259), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      /* Quanta */
 -      { USB_DEVICE(0x1a32, 0x0304), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      /* Ralink */
 -      { USB_DEVICE(0x148f, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x148f, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x148f, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x148f, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x148f, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x148f, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x148f, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      /* Samsung */
 -      { USB_DEVICE(0x04e8, 0x2018), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      /* Siemens */
 -      { USB_DEVICE(0x129b, 0x1828), USB_DEVICE_DATA(&rt2800usb_ops) },
        /* Sitecom */
 -      { USB_DEVICE(0x0df6, 0x0017), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x0df6, 0x002b), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x0df6, 0x002c), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x0df6, 0x002d), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x0df6, 0x0039), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x0df6, 0x003b), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x0df6, 0x003c), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x0df6, 0x003d), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x0df6, 0x003e), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x0df6, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x0df6, 0x0040), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x0df6, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x0df6, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x0df6, 0x0047), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x0df6, 0x0048), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x0df6, 0x004a), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x0df6, 0x004d), USB_DEVICE_DATA(&rt2800usb_ops) },
        /* SMC */
 -      { USB_DEVICE(0x083a, 0x6618), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x083a, 0x7511), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x083a, 0x7512), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x083a, 0x7522), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x083a, 0x8522), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x083a, 0xa512), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x083a, 0xa618), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x083a, 0xa701), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x083a, 0xa702), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x083a, 0xb522), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x083a, 0xc522), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      /* Sparklan */
 -      { USB_DEVICE(0x15a9, 0x0006), USB_DEVICE_DATA(&rt2800usb_ops) },
 +      { USB_DEVICE(0x083a, 0xd522), USB_DEVICE_DATA(&rt2800usb_ops) },
        /* Sweex */
        { USB_DEVICE(0x177f, 0x0153), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x177f, 0x0302), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x177f, 0x0313), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      /* U-Media*/
 -      { USB_DEVICE(0x157e, 0x300e), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      /* ZCOM */
 -      { USB_DEVICE(0x0cde, 0x0022), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x0cde, 0x0025), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      /* Zinwell */
 -      { USB_DEVICE(0x5a57, 0x0280), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x5a57, 0x0282), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x5a57, 0x0283), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x5a57, 0x0284), USB_DEVICE_DATA(&rt2800usb_ops) },
 -      { USB_DEVICE(0x5a57, 0x5257), USB_DEVICE_DATA(&rt2800usb_ops) },
        /* Zyxel */
 -      { USB_DEVICE(0x0586, 0x3416), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x0586, 0x341a), USB_DEVICE_DATA(&rt2800usb_ops) },
 +#endif
        { 0, }
  };
  
index 70c04c282efc0bf7f34cc60c7fcbbb32793d03d9,afee806affc2ff18f889523912a2ec3c6ff8d61b..28a1c46ec4eb28194e315a3a3c22baacc059b762
@@@ -109,7 -109,7 +109,7 @@@ struct rt2x00debug_intf 
  
        /*
         * HW crypto statistics.
-        * All statistics are stored seperately per cipher type.
+        * All statistics are stored separately per cipher type.
         */
        struct rt2x00debug_crypto crypto_stats[CIPHER_MAX];
  
@@@ -184,7 -184,7 +184,7 @@@ void rt2x00debug_dump_frame(struct rt2x
        dump_hdr->data_length = cpu_to_le32(skb->len);
        dump_hdr->chip_rt = cpu_to_le16(rt2x00dev->chip.rt);
        dump_hdr->chip_rf = cpu_to_le16(rt2x00dev->chip.rf);
 -      dump_hdr->chip_rev = cpu_to_le32(rt2x00dev->chip.rev);
 +      dump_hdr->chip_rev = cpu_to_le16(rt2x00dev->chip.rev);
        dump_hdr->type = cpu_to_le16(type);
        dump_hdr->queue_index = desc->entry->queue->qid;
        dump_hdr->entry_index = desc->entry->entry_idx;
@@@ -573,7 -573,7 +573,7 @@@ static struct dentry *rt2x00debug_creat
        blob->data = data;
        data += sprintf(data, "rt chip:\t%04x\n", intf->rt2x00dev->chip.rt);
        data += sprintf(data, "rf chip:\t%04x\n", intf->rt2x00dev->chip.rf);
 -      data += sprintf(data, "revision:\t%08x\n", intf->rt2x00dev->chip.rev);
 +      data += sprintf(data, "revision:\t%04x\n", intf->rt2x00dev->chip.rev);
        data += sprintf(data, "\n");
        data += sprintf(data, "register\tbase\twords\twordsize\n");
        data += sprintf(data, "csr\t%d\t%d\t%d\n",
index b93731b79903308d06031326061fde8cde71ac8d,5e1d5167fff4151e58728a887f6890cb1db498e4..dd5ab8fe232180dfc0a44c19330d649780a171ae
@@@ -385,6 -385,9 +385,6 @@@ void rt2x00lib_rxdone(struct rt2x00_de
        memset(&rxdesc, 0, sizeof(rxdesc));
        rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
  
 -      /* Trim buffer to correct size */
 -      skb_trim(entry->skb, rxdesc.size);
 -
        /*
         * The data behind the ieee80211 header must be
         * aligned on a 4 byte boundary.
        /*
         * Hardware might have stripped the IV/EIV/ICV data,
         * in that case it is possible that the data was
-        * provided seperately (through hardware descriptor)
+        * provided separately (through hardware descriptor)
         * in which case we should reinsert the data into the frame.
         */
        if ((rxdesc.dev_flags & RXDONE_CRYPTO_IV) &&
            (rxdesc.flags & RX_FLAG_IV_STRIPPED))
                rt2x00crypto_rx_insert_iv(entry->skb, header_length,
                                          &rxdesc);
 -      else if (rxdesc.dev_flags & RXDONE_L2PAD)
 +      else if (header_length &&
 +               (rxdesc.size > header_length) &&
 +               (rxdesc.dev_flags & RXDONE_L2PAD))
                rt2x00queue_remove_l2pad(entry->skb, header_length);
        else
                rt2x00queue_align_payload(entry->skb, header_length);
  
 +      /* Trim buffer to correct size */
 +      skb_trim(entry->skb, rxdesc.size);
 +
        /*
         * Check if the frame was received using HT. In that case,
         * the rate is the MCS index and should be passed to mac80211
index 0b4801a14601fa85b9878a6b4d8d32983411ae43,38ffca9b0fe749cc45ee5b5101786df97054353f..5b6b789cad3d6a4f0bd88ad0747e53aabcaf4774
@@@ -177,45 -177,55 +177,45 @@@ void rt2x00queue_align_payload(struct s
  
  void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
  {
 -      struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
 -      unsigned int frame_length = skb->len;
 +      unsigned int payload_length = skb->len - header_length;
        unsigned int header_align = ALIGN_SIZE(skb, 0);
        unsigned int payload_align = ALIGN_SIZE(skb, header_length);
 -      unsigned int l2pad = 4 - (payload_align - header_align);
 +      unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
  
 -      if (header_align == payload_align) {
 -              /*
 -               * Both header and payload must be moved the same
 -               * amount of bytes to align them properly. This means
 -               * we don't use the L2 padding but just move the entire
 -               * frame.
 -               */
 -              rt2x00queue_align_frame(skb);
 -      } else if (!payload_align) {
 -              /*
 -               * Simple L2 padding, only the header needs to be moved,
 -               * the payload is already properly aligned.
 -               */
 -              skb_push(skb, header_align);
 -              memmove(skb->data, skb->data + header_align, frame_length);
 -              skbdesc->flags |= SKBDESC_L2_PADDED;
 -      } else {
 -              /*
 -               *
 -               * Complicated L2 padding, both header and payload need
 -               * to be moved. By default we only move to the start
 -               * of the buffer, so our header alignment needs to be
 -               * increased if there is not enough room for the header
 -               * to be moved.
 -               */
 -              if (payload_align > header_align)
 -                      header_align += 4;
 +      /*
 +       * Adjust the header alignment if the payload needs to be moved more
 +       * than the header.
 +       */
 +      if (payload_align > header_align)
 +              header_align += 4;
 +
 +      /* There is nothing to do if no alignment is needed */
 +      if (!header_align)
 +              return;
 +
 +      /* Reserve the amount of space needed in front of the frame */
 +      skb_push(skb, header_align);
 +
 +      /*
 +       * Move the header.
 +       */
 +      memmove(skb->data, skb->data + header_align, header_length);
  
 -              skb_push(skb, header_align);
 -              memmove(skb->data, skb->data + header_align, header_length);
 +      /* Move the payload, if present and if required */
 +      if (payload_length && payload_align)
                memmove(skb->data + header_length + l2pad,
                        skb->data + header_length + l2pad + payload_align,
 -                      frame_length - header_length);
 -              skbdesc->flags |= SKBDESC_L2_PADDED;
 -      }
 +                      payload_length);
 +
 +      /* Trim the skb to the correct size */
 +      skb_trim(skb, header_length + l2pad + payload_length);
  }
  
  void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
  {
 -      struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
 -      unsigned int l2pad = 4 - (header_length & 3);
 +      unsigned int l2pad = L2PAD_SIZE(header_length);
  
 -      if (!l2pad || (skbdesc->flags & SKBDESC_L2_PADDED))
 +      if (!l2pad)
                return;
  
        memmove(skb->data + l2pad, skb->data, header_length);
@@@ -336,9 -346,7 +336,9 @@@ static void rt2x00queue_create_tx_descr
         * Header and alignment information.
         */
        txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
 -      txdesc->l2pad = ALIGN_SIZE(entry->skb, txdesc->header_length);
 +      if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags) &&
 +          (entry->skb->len > txdesc->header_length))
 +              txdesc->l2pad = L2PAD_SIZE(txdesc->header_length);
  
        /*
         * Check whether this frame is to be acked.
  
        /*
         * Beacons and probe responses require the tsf timestamp
 -       * to be inserted into the frame.
 +       * to be inserted into the frame, except for a frame that has been injected
 +       * through a monitor interface. This latter is needed for testing a
 +       * monitor interface.
         */
 -      if (ieee80211_is_beacon(hdr->frame_control) ||
 -          ieee80211_is_probe_resp(hdr->frame_control))
 +      if ((ieee80211_is_beacon(hdr->frame_control) ||
 +          ieee80211_is_probe_resp(hdr->frame_control)) &&
 +          (!(tx_info->flags & IEEE80211_TX_CTL_INJECTED)))
                __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
  
        /*
@@@ -497,7 -502,7 +497,7 @@@ int rt2x00queue_write_tx_frame(struct d
        /*
         * When hardware encryption is supported, and this frame
         * is to be encrypted, we should strip the IV/EIV data from
-        * the frame so we can provide it to the driver seperately.
+        * the frame so we can provide it to the driver separately.
         */
        if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
            !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
index e2da928dd9f067b3008ae510be2b9806b1919517,99459db61efd051794d124bc0cf8f89248159d2c..17747274217243792f462cf9e6c0009cd674f44f
@@@ -476,7 -476,7 +476,7 @@@ static int rt61pci_config_pairwise_key(
                 * The driver does not support the IV/EIV generation
                 * in hardware. However it doesn't support the IV/EIV
                 * inside the ieee80211 frame either, but requires it
-                * to be provided seperately for the descriptor.
+                * to be provided separately for the descriptor.
                 * rt2x00lib will cut the IV/EIV data out of all frames
                 * given to us by mac80211, but we must tell mac80211
                 * to generate the IV/EIV data.
@@@ -637,7 -637,8 +637,7 @@@ static void rt61pci_config_antenna_5x(s
        rt61pci_bbp_read(rt2x00dev, 4, &r4);
        rt61pci_bbp_read(rt2x00dev, 77, &r77);
  
 -      rt2x00_set_field8(&r3, BBP_R3_SMART_MODE,
 -                        rt2x00_rf(&rt2x00dev->chip, RF5325));
 +      rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, rt2x00_rf(rt2x00dev, RF5325));
  
        /*
         * Configure the RX antenna.
@@@ -683,7 -684,8 +683,7 @@@ static void rt61pci_config_antenna_2x(s
        rt61pci_bbp_read(rt2x00dev, 4, &r4);
        rt61pci_bbp_read(rt2x00dev, 77, &r77);
  
 -      rt2x00_set_field8(&r3, BBP_R3_SMART_MODE,
 -                        rt2x00_rf(&rt2x00dev->chip, RF2529));
 +      rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, rt2x00_rf(rt2x00dev, RF2529));
        rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
                          !test_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags));
  
@@@ -831,11 -833,12 +831,11 @@@ static void rt61pci_config_ant(struct r
  
        rt2x00pci_register_write(rt2x00dev, PHY_CSR0, reg);
  
 -      if (rt2x00_rf(&rt2x00dev->chip, RF5225) ||
 -          rt2x00_rf(&rt2x00dev->chip, RF5325))
 +      if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF5325))
                rt61pci_config_antenna_5x(rt2x00dev, ant);
 -      else if (rt2x00_rf(&rt2x00dev->chip, RF2527))
 +      else if (rt2x00_rf(rt2x00dev, RF2527))
                rt61pci_config_antenna_2x(rt2x00dev, ant);
 -      else if (rt2x00_rf(&rt2x00dev->chip, RF2529)) {
 +      else if (rt2x00_rf(rt2x00dev, RF2529)) {
                if (test_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags))
                        rt61pci_config_antenna_2x(rt2x00dev, ant);
                else
@@@ -876,7 -879,8 +876,7 @@@ static void rt61pci_config_channel(stru
        rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower));
        rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
  
 -      smart = !(rt2x00_rf(&rt2x00dev->chip, RF5225) ||
 -                rt2x00_rf(&rt2x00dev->chip, RF2527));
 +      smart = !(rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527));
  
        rt61pci_bbp_read(rt2x00dev, 3, &r3);
        rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart);
@@@ -1131,18 -1135,16 +1131,18 @@@ dynamic_cca_tune
   */
  static char *rt61pci_get_firmware_name(struct rt2x00_dev *rt2x00dev)
  {
 +      u16 chip;
        char *fw_name;
  
 -      switch (rt2x00dev->chip.rt) {
 -      case RT2561:
 +      pci_read_config_word(to_pci_dev(rt2x00dev->dev), PCI_DEVICE_ID, &chip);
 +      switch (chip) {
 +      case RT2561_PCI_ID:
                fw_name = FIRMWARE_RT2561;
                break;
 -      case RT2561s:
 +      case RT2561s_PCI_ID:
                fw_name = FIRMWARE_RT2561s;
                break;
 -      case RT2661:
 +      case RT2661_PCI_ID:
                fw_name = FIRMWARE_RT2661;
                break;
        default:
@@@ -2297,13 -2299,13 +2297,13 @@@ static int rt61pci_init_eeprom(struct r
         */
        value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
        rt2x00pci_register_read(rt2x00dev, MAC_CSR0, &reg);
 -      rt2x00_set_chip_rf(rt2x00dev, value, reg);
 -      rt2x00_print_chip(rt2x00dev);
 +      rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET),
 +                      value, rt2x00_get_field32(reg, MAC_CSR0_REVISION));
  
 -      if (!rt2x00_rf(&rt2x00dev->chip, RF5225) &&
 -          !rt2x00_rf(&rt2x00dev->chip, RF5325) &&
 -          !rt2x00_rf(&rt2x00dev->chip, RF2527) &&
 -          !rt2x00_rf(&rt2x00dev->chip, RF2529)) {
 +      if (!rt2x00_rf(rt2x00dev, RF5225) &&
 +          !rt2x00_rf(rt2x00dev, RF5325) &&
 +          !rt2x00_rf(rt2x00dev, RF2527) &&
 +          !rt2x00_rf(rt2x00dev, RF2529)) {
                ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
                return -ENODEV;
        }
         * the antenna settings should be gathered from the NIC
         * eeprom word.
         */
 -      if (rt2x00_rf(&rt2x00dev->chip, RF2529) &&
 +      if (rt2x00_rf(rt2x00dev, RF2529) &&
            !test_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags)) {
                rt2x00dev->default_ant.rx =
                    ANTENNA_A + rt2x00_get_field16(eeprom, EEPROM_NIC_RX_FIXED);
@@@ -2569,7 -2571,8 +2569,7 @@@ static int rt61pci_probe_hw_mode(struc
                spec->channels = rf_vals_seq;
        }
  
 -      if (rt2x00_rf(&rt2x00dev->chip, RF5225) ||
 -          rt2x00_rf(&rt2x00dev->chip, RF5325)) {
 +      if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF5325)) {
                spec->supported_bands |= SUPPORT_BAND_5GHZ;
                spec->num_channels = ARRAY_SIZE(rf_vals_seq);
        }
@@@ -2732,6 -2735,7 +2732,6 @@@ static const struct ieee80211_ops rt61p
        .get_stats              = rt2x00mac_get_stats,
        .bss_info_changed       = rt2x00mac_bss_info_changed,
        .conf_tx                = rt61pci_conf_tx,
 -      .get_tx_stats           = rt2x00mac_get_tx_stats,
        .get_tsf                = rt61pci_get_tsf,
        .rfkill_poll            = rt2x00mac_rfkill_poll,
  };
@@@ -2808,7 -2812,7 +2808,7 @@@ static const struct rt2x00_ops rt61pci_
  /*
   * RT61pci module information.
   */
 -static struct pci_device_id rt61pci_device_table[] = {
 +static DEFINE_PCI_DEVICE_TABLE(rt61pci_device_table) = {
        /* RT2561s */
        { PCI_DEVICE(0x1814, 0x0301), PCI_DEVICE_DATA(&rt61pci_ops) },
        /* RT2561 v2 */
index f39a8ed17841815125760b5e67ee467978a0d6e0,527368a45fd5b3d5df40cb54f6978b99e95a79ae..e77aec8d0a840ea37db33d3bc3cae4c904658265
@@@ -136,8 -136,8 +136,8 @@@ static void rt73usb_rf_write(struct rt2
                 * all others contain 20 bits.
                 */
                rt2x00_set_field32(&reg, PHY_CSR4_NUMBER_OF_BITS,
 -                                 20 + (rt2x00_rf(&rt2x00dev->chip, RF5225) ||
 -                                       rt2x00_rf(&rt2x00dev->chip, RF2527)));
 +                                 20 + (rt2x00_rf(rt2x00dev, RF5225) ||
 +                                       rt2x00_rf(rt2x00dev, RF2527)));
                rt2x00_set_field32(&reg, PHY_CSR4_IF_SELECT, 0);
                rt2x00_set_field32(&reg, PHY_CSR4_BUSY, 1);
  
@@@ -339,7 -339,7 +339,7 @@@ static int rt73usb_config_shared_key(st
                 * The driver does not support the IV/EIV generation
                 * in hardware. However it doesn't support the IV/EIV
                 * inside the ieee80211 frame either, but requires it
-                * to be provided seperately for the descriptor.
+                * to be provided separately for the descriptor.
                 * rt2x00lib will cut the IV/EIV data out of all frames
                 * given to us by mac80211, but we must tell mac80211
                 * to generate the IV/EIV data.
@@@ -439,7 -439,7 +439,7 @@@ static int rt73usb_config_pairwise_key(
                 * The driver does not support the IV/EIV generation
                 * in hardware. However it doesn't support the IV/EIV
                 * inside the ieee80211 frame either, but requires it
-                * to be provided seperately for the descriptor.
+                * to be provided separately for the descriptor.
                 * rt2x00lib will cut the IV/EIV data out of all frames
                 * given to us by mac80211, but we must tell mac80211
                 * to generate the IV/EIV data.
@@@ -741,9 -741,11 +741,9 @@@ static void rt73usb_config_ant(struct r
  
        rt2x00usb_register_write(rt2x00dev, PHY_CSR0, reg);
  
 -      if (rt2x00_rf(&rt2x00dev->chip, RF5226) ||
 -          rt2x00_rf(&rt2x00dev->chip, RF5225))
 +      if (rt2x00_rf(rt2x00dev, RF5226) || rt2x00_rf(rt2x00dev, RF5225))
                rt73usb_config_antenna_5x(rt2x00dev, ant);
 -      else if (rt2x00_rf(&rt2x00dev->chip, RF2528) ||
 -               rt2x00_rf(&rt2x00dev->chip, RF2527))
 +      else if (rt2x00_rf(rt2x00dev, RF2528) || rt2x00_rf(rt2x00dev, RF2527))
                rt73usb_config_antenna_2x(rt2x00dev, ant);
  }
  
@@@ -777,7 -779,8 +777,7 @@@ static void rt73usb_config_channel(stru
        rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower));
        rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
  
 -      smart = !(rt2x00_rf(&rt2x00dev->chip, RF5225) ||
 -                rt2x00_rf(&rt2x00dev->chip, RF2527));
 +      smart = !(rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527));
  
        rt73usb_bbp_read(rt2x00dev, 3, &r3);
        rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart);
@@@ -1207,7 -1210,8 +1207,7 @@@ static int rt73usb_init_registers(struc
        rt2x00usb_register_write(rt2x00dev, SEC_CSR5, 0x00000000);
  
        reg = 0x000023b0;
 -      if (rt2x00_rf(&rt2x00dev->chip, RF5225) ||
 -          rt2x00_rf(&rt2x00dev->chip, RF2527))
 +      if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527))
                rt2x00_set_field32(&reg, PHY_CSR1_RF_RPI, 1);
        rt2x00usb_register_write(rt2x00dev, PHY_CSR1, reg);
  
@@@ -1661,7 -1665,7 +1661,7 @@@ static void rt73usb_fill_rxdone(struct 
  
                /*
                 * Hardware has stripped IV/EIV data from 802.11 frame during
-                * decryption. It has provided the data seperately but rt2x00lib
+                * decryption. It has provided the data separately but rt2x00lib
                 * should decide if it should be reinserted.
                 */
                rxdesc->flags |= RX_FLAG_IV_STRIPPED;
@@@ -1820,18 -1824,19 +1820,18 @@@ static int rt73usb_init_eeprom(struct r
         */
        value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
        rt2x00usb_register_read(rt2x00dev, MAC_CSR0, &reg);
 -      rt2x00_set_chip(rt2x00dev, RT2571, value, reg);
 -      rt2x00_print_chip(rt2x00dev);
 +      rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET),
 +                      value, rt2x00_get_field32(reg, MAC_CSR0_REVISION));
  
 -      if (!rt2x00_check_rev(&rt2x00dev->chip, 0x000ffff0, 0x25730) ||
 -          rt2x00_check_rev(&rt2x00dev->chip, 0x0000000f, 0)) {
 +      if (!rt2x00_rt(rt2x00dev, RT2573) || (rt2x00_rev(rt2x00dev) == 0)) {
                ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
                return -ENODEV;
        }
  
 -      if (!rt2x00_rf(&rt2x00dev->chip, RF5226) &&
 -          !rt2x00_rf(&rt2x00dev->chip, RF2528) &&
 -          !rt2x00_rf(&rt2x00dev->chip, RF5225) &&
 -          !rt2x00_rf(&rt2x00dev->chip, RF2527)) {
 +      if (!rt2x00_rf(rt2x00dev, RF5226) &&
 +          !rt2x00_rf(rt2x00dev, RF2528) &&
 +          !rt2x00_rf(rt2x00dev, RF5225) &&
 +          !rt2x00_rf(rt2x00dev, RF2527)) {
                ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
                return -ENODEV;
        }
@@@ -2076,17 -2081,17 +2076,17 @@@ static int rt73usb_probe_hw_mode(struc
        spec->supported_bands = SUPPORT_BAND_2GHZ;
        spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
  
 -      if (rt2x00_rf(&rt2x00dev->chip, RF2528)) {
 +      if (rt2x00_rf(rt2x00dev, RF2528)) {
                spec->num_channels = ARRAY_SIZE(rf_vals_bg_2528);
                spec->channels = rf_vals_bg_2528;
 -      } else if (rt2x00_rf(&rt2x00dev->chip, RF5226)) {
 +      } else if (rt2x00_rf(rt2x00dev, RF5226)) {
                spec->supported_bands |= SUPPORT_BAND_5GHZ;
                spec->num_channels = ARRAY_SIZE(rf_vals_5226);
                spec->channels = rf_vals_5226;
 -      } else if (rt2x00_rf(&rt2x00dev->chip, RF2527)) {
 +      } else if (rt2x00_rf(rt2x00dev, RF2527)) {
                spec->num_channels = 14;
                spec->channels = rf_vals_5225_2527;
 -      } else if (rt2x00_rf(&rt2x00dev->chip, RF5225)) {
 +      } else if (rt2x00_rf(rt2x00dev, RF5225)) {
                spec->supported_bands |= SUPPORT_BAND_5GHZ;
                spec->num_channels = ARRAY_SIZE(rf_vals_5225_2527);
                spec->channels = rf_vals_5225_2527;
@@@ -2244,6 -2249,7 +2244,6 @@@ static const struct ieee80211_ops rt73u
        .get_stats              = rt2x00mac_get_stats,
        .bss_info_changed       = rt2x00mac_bss_info_changed,
        .conf_tx                = rt73usb_conf_tx,
 -      .get_tx_stats           = rt2x00mac_get_tx_stats,
        .get_tsf                = rt73usb_get_tsf,
        .rfkill_poll            = rt2x00mac_rfkill_poll,
  };
@@@ -2348,7 -2354,6 +2348,7 @@@ static struct usb_device_id rt73usb_dev
        { USB_DEVICE(0x08dd, 0x0120), USB_DEVICE_DATA(&rt73usb_ops) },
        /* Buffalo */
        { USB_DEVICE(0x0411, 0x00d8), USB_DEVICE_DATA(&rt73usb_ops) },
 +      { USB_DEVICE(0x0411, 0x00d9), USB_DEVICE_DATA(&rt73usb_ops) },
        { USB_DEVICE(0x0411, 0x00f4), USB_DEVICE_DATA(&rt73usb_ops) },
        { USB_DEVICE(0x0411, 0x0116), USB_DEVICE_DATA(&rt73usb_ops) },
        { USB_DEVICE(0x0411, 0x0119), USB_DEVICE_DATA(&rt73usb_ops) },
index 2d555cc3050868954f8cc86789f9fba4a0fbb1f1,7a24802df2e1e4c049a83b04302bdcb2fcd157de..a22a192031208547627ce594b256a315ebc5d390
@@@ -374,7 -374,7 +374,7 @@@ static void zd_mac_tx_status(struct iee
   * zd_mac_tx_failed - callback for failed frames
   * @dev: the mac80211 wireless device
   *
-  * This function is called if a frame couldn't be successfully be
+  * This function is called if a frame couldn't be successfully
   * transferred. The first frame from the tx queue, will be selected and
   * reported as error to the upper layers.
   */
@@@ -869,7 -869,7 +869,7 @@@ int zd_mac_rx(struct ieee80211_hw *hw, 
  }
  
  static int zd_op_add_interface(struct ieee80211_hw *hw,
 -                              struct ieee80211_if_init_conf *conf)
 +                              struct ieee80211_vif *vif)
  {
        struct zd_mac *mac = zd_hw_mac(hw);
  
        if (mac->type != NL80211_IFTYPE_UNSPECIFIED)
                return -EOPNOTSUPP;
  
 -      switch (conf->type) {
 +      switch (vif->type) {
        case NL80211_IFTYPE_MONITOR:
        case NL80211_IFTYPE_MESH_POINT:
        case NL80211_IFTYPE_STATION:
        case NL80211_IFTYPE_ADHOC:
 -              mac->type = conf->type;
 +              mac->type = vif->type;
                break;
        default:
                return -EOPNOTSUPP;
        }
  
 -      return zd_write_mac_addr(&mac->chip, conf->mac_addr);
 +      return zd_write_mac_addr(&mac->chip, vif->addr);
  }
  
  static void zd_op_remove_interface(struct ieee80211_hw *hw,
 -                                  struct ieee80211_if_init_conf *conf)
 +                                  struct ieee80211_vif *vif)
  {
        struct zd_mac *mac = zd_hw_mac(hw);
        mac->type = NL80211_IFTYPE_UNSPECIFIED;
index e7b0c3bcef895b41f41298617e73c963024f1d2e,215621c31c5fadfcfa08d3f8416c8d35ae3c84e7..c64e3528889bd9fdd91cddd6b0e7fd2e4130c4ea
@@@ -286,7 -286,6 +286,7 @@@ struct ibm_init_struct 
        char param[32];
  
        int (*init) (struct ibm_init_struct *);
 +      mode_t base_procfs_mode;
        struct ibm_struct *data;
  };
  
@@@ -1668,7 -1667,7 +1668,7 @@@ static void tpacpi_remove_driver_attrib
   * Table of recommended minimum BIOS versions
   *
   * Reasons for listing:
-  *    1. Stable BIOS, listed because the unknown ammount of
+  *    1. Stable BIOS, listed because the unknown amount of
   *       bugs and bad ACPI behaviour on older versions
   *
   *    2. BIOS or EC fw with known bugs that trigger on Linux
@@@ -2083,7 -2082,6 +2083,7 @@@ static struct attribute_set *hotkey_dev
  
  static void tpacpi_driver_event(const unsigned int hkey_event);
  static void hotkey_driver_event(const unsigned int scancode);
 +static void hotkey_poll_setup(const bool may_warn);
  
  /* HKEY.MHKG() return bits */
  #define TP_HOTKEY_TABLET_MASK (1 << 3)
@@@ -2266,8 -2264,6 +2266,8 @@@ static int tpacpi_hotkey_driver_mask_se
  
        rc = hotkey_mask_set((hotkey_acpi_mask | hotkey_driver_mask) &
                                                        ~hotkey_source_mask);
 +      hotkey_poll_setup(true);
 +
        mutex_unlock(&hotkey_mutex);
  
        return rc;
@@@ -2552,7 -2548,7 +2552,7 @@@ static void hotkey_poll_stop_sync(void
  }
  
  /* call with hotkey_mutex held */
 -static void hotkey_poll_setup(bool may_warn)
 +static void hotkey_poll_setup(const bool may_warn)
  {
        const u32 poll_driver_mask = hotkey_driver_mask & hotkey_source_mask;
        const u32 poll_user_mask = hotkey_user_mask & hotkey_source_mask;
        }
  }
  
 -static void hotkey_poll_setup_safe(bool may_warn)
 +static void hotkey_poll_setup_safe(const bool may_warn)
  {
        mutex_lock(&hotkey_mutex);
        hotkey_poll_setup(may_warn);
@@@ -2601,11 -2597,7 +2601,11 @@@ static void hotkey_poll_set_freq(unsign
  
  #else /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
  
 -static void hotkey_poll_setup_safe(bool __unused)
 +static void hotkey_poll_setup(const bool __unused)
 +{
 +}
 +
 +static void hotkey_poll_setup_safe(const bool __unused)
  {
  }
  
@@@ -2615,11 -2607,16 +2615,11 @@@ static int hotkey_inputdev_open(struct 
  {
        switch (tpacpi_lifecycle) {
        case TPACPI_LIFE_INIT:
 -              /*
 -               * hotkey_init will call hotkey_poll_setup_safe
 -               * at the appropriate moment
 -               */
 -              return 0;
 -      case TPACPI_LIFE_EXITING:
 -              return -EBUSY;
        case TPACPI_LIFE_RUNNING:
                hotkey_poll_setup_safe(false);
                return 0;
 +      case TPACPI_LIFE_EXITING:
 +              return -EBUSY;
        }
  
        /* Should only happen if tpacpi_lifecycle is corrupt */
  static void hotkey_inputdev_close(struct input_dev *dev)
  {
        /* disable hotkey polling when possible */
 -      if (tpacpi_lifecycle == TPACPI_LIFE_RUNNING &&
 +      if (tpacpi_lifecycle != TPACPI_LIFE_EXITING &&
            !(hotkey_source_mask & hotkey_driver_mask))
                hotkey_poll_setup_safe(false);
  }
@@@ -3658,19 -3655,13 +3658,19 @@@ static void hotkey_notify(struct ibm_st
                        break;
                case 3:
                        /* 0x3000-0x3FFF: bay-related wakeups */
 -                      if (hkey == TP_HKEY_EV_BAYEJ_ACK) {
 +                      switch (hkey) {
 +                      case TP_HKEY_EV_BAYEJ_ACK:
                                hotkey_autosleep_ack = 1;
                                printk(TPACPI_INFO
                                       "bay ejected\n");
                                hotkey_wakeup_hotunplug_complete_notify_change();
                                known_ev = true;
 -                      } else {
 +                              break;
 +                      case TP_HKEY_EV_OPTDRV_EJ:
 +                              /* FIXME: kick libata if SATA link offline */
 +                              known_ev = true;
 +                              break;
 +                      default:
                                known_ev = false;
                        }
                        break;
@@@ -3879,7 -3870,7 +3879,7 @@@ enum 
        TP_ACPI_BLUETOOTH_HWPRESENT     = 0x01, /* Bluetooth hw available */
        TP_ACPI_BLUETOOTH_RADIOSSW      = 0x02, /* Bluetooth radio enabled */
        TP_ACPI_BLUETOOTH_RESUMECTRL    = 0x04, /* Bluetooth state at resume:
 -                                                 off / last state */
 +                                                 0 = disable, 1 = enable */
  };
  
  enum {
@@@ -3925,11 -3916,10 +3925,11 @@@ static int bluetooth_set_status(enum tp
        }
  #endif
  
 -      /* We make sure to keep TP_ACPI_BLUETOOTH_RESUMECTRL off */
 -      status = TP_ACPI_BLUETOOTH_RESUMECTRL;
        if (state == TPACPI_RFK_RADIO_ON)
 -              status |= TP_ACPI_BLUETOOTH_RADIOSSW;
 +              status = TP_ACPI_BLUETOOTH_RADIOSSW
 +                        | TP_ACPI_BLUETOOTH_RESUMECTRL;
 +      else
 +              status = 0;
  
        if (!acpi_evalf(hkey_handle, NULL, "SBDC", "vd", status))
                return -EIO;
@@@ -4080,7 -4070,7 +4080,7 @@@ enum 
        TP_ACPI_WANCARD_HWPRESENT       = 0x01, /* Wan hw available */
        TP_ACPI_WANCARD_RADIOSSW        = 0x02, /* Wan radio enabled */
        TP_ACPI_WANCARD_RESUMECTRL      = 0x04, /* Wan state at resume:
 -                                                 off / last state */
 +                                                 0 = disable, 1 = enable */
  };
  
  #define TPACPI_RFK_WWAN_SW_NAME               "tpacpi_wwan_sw"
@@@ -4117,11 -4107,10 +4117,11 @@@ static int wan_set_status(enum tpacpi_r
        }
  #endif
  
 -      /* We make sure to set TP_ACPI_WANCARD_RESUMECTRL */
 -      status = TP_ACPI_WANCARD_RESUMECTRL;
        if (state == TPACPI_RFK_RADIO_ON)
 -              status |= TP_ACPI_WANCARD_RADIOSSW;
 +              status = TP_ACPI_WANCARD_RADIOSSW
 +                       | TP_ACPI_WANCARD_RESUMECTRL;
 +      else
 +              status = 0;
  
        if (!acpi_evalf(hkey_handle, NULL, "SWAN", "vd", status))
                return -EIO;
@@@ -4630,10 -4619,6 +4630,10 @@@ static int video_read(struct seq_file *
                return 0;
        }
  
 +      /* Even reads can crash X.org, so... */
 +      if (!capable(CAP_SYS_ADMIN))
 +              return -EPERM;
 +
        status = video_outputsw_get();
        if (status < 0)
                return status;
@@@ -4667,10 -4652,6 +4667,10 @@@ static int video_write(char *buf
        if (video_supported == TPACPI_VIDEO_NONE)
                return -ENODEV;
  
 +      /* Even reads can crash X.org, let alone writes... */
 +      if (!capable(CAP_SYS_ADMIN))
 +              return -EPERM;
 +
        enable = 0;
        disable = 0;
  
@@@ -5790,7 -5771,7 +5790,7 @@@ static void thermal_exit(void
        case TPACPI_THERMAL_ACPI_TMP07:
        case TPACPI_THERMAL_ACPI_UPDT:
                sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj,
 -                                 &thermal_temp_input16_group);
 +                                 &thermal_temp_input8_group);
                break;
        case TPACPI_THERMAL_NONE:
        default:
@@@ -6152,13 -6133,13 +6152,13 @@@ static const struct tpacpi_quirk bright
        TPACPI_Q_IBM('1', 'Y', TPACPI_BRGHT_Q_EC),      /* T43/p ATI */
  
        /* Models with ATI GPUs that can use ECNVRAM */
 -      TPACPI_Q_IBM('1', 'R', TPACPI_BRGHT_Q_EC),
 +      TPACPI_Q_IBM('1', 'R', TPACPI_BRGHT_Q_EC),      /* R50,51 T40-42 */
        TPACPI_Q_IBM('1', 'Q', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
 -      TPACPI_Q_IBM('7', '6', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
 +      TPACPI_Q_IBM('7', '6', TPACPI_BRGHT_Q_EC),      /* R52 */
        TPACPI_Q_IBM('7', '8', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
  
        /* Models with Intel Extreme Graphics 2 */
 -      TPACPI_Q_IBM('1', 'U', TPACPI_BRGHT_Q_NOEC),
 +      TPACPI_Q_IBM('1', 'U', TPACPI_BRGHT_Q_NOEC),    /* X40 */
        TPACPI_Q_IBM('1', 'V', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
        TPACPI_Q_IBM('1', 'W', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
  
@@@ -6541,8 -6522,7 +6541,8 @@@ static int volume_set_status(const u8 s
        return volume_set_status_ec(status);
  }
  
 -static int volume_set_mute_ec(const bool mute)
 +/* returns < 0 on error, 0 on no change, 1 on change */
 +static int __volume_set_mute_ec(const bool mute)
  {
        int rc;
        u8 s, n;
        n = (mute) ? s | TP_EC_AUDIO_MUTESW_MSK :
                     s & ~TP_EC_AUDIO_MUTESW_MSK;
  
 -      if (n != s)
 +      if (n != s) {
                rc = volume_set_status_ec(n);
 +              if (!rc)
 +                      rc = 1;
 +      }
  
  unlock:
        mutex_unlock(&volume_mutex);
        return rc;
  }
  
 +static int volume_alsa_set_mute(const bool mute)
 +{
 +      dbg_printk(TPACPI_DBG_MIXER, "ALSA: trying to %smute\n",
 +                 (mute) ? "" : "un");
 +      return __volume_set_mute_ec(mute);
 +}
 +
  static int volume_set_mute(const bool mute)
  {
 +      int rc;
 +
        dbg_printk(TPACPI_DBG_MIXER, "trying to %smute\n",
                   (mute) ? "" : "un");
 -      return volume_set_mute_ec(mute);
 +
 +      rc = __volume_set_mute_ec(mute);
 +      return (rc < 0) ? rc : 0;
  }
  
 -static int volume_set_volume_ec(const u8 vol)
 +/* returns < 0 on error, 0 on no change, 1 on change */
 +static int __volume_set_volume_ec(const u8 vol)
  {
        int rc;
        u8 s, n;
  
        n = (s & ~TP_EC_AUDIO_LVL_MSK) | vol;
  
 -      if (n != s)
 +      if (n != s) {
                rc = volume_set_status_ec(n);
 +              if (!rc)
 +                      rc = 1;
 +      }
  
  unlock:
        mutex_unlock(&volume_mutex);
        return rc;
  }
  
 -static int volume_set_volume(const u8 vol)
 +static int volume_alsa_set_volume(const u8 vol)
  {
        dbg_printk(TPACPI_DBG_MIXER,
 -                 "trying to set volume level to %hu\n", vol);
 -      return volume_set_volume_ec(vol);
 +                 "ALSA: trying to set volume level to %hu\n", vol);
 +      return __volume_set_volume_ec(vol);
  }
  
  static void volume_alsa_notify_change(void)
@@@ -6666,7 -6628,7 +6666,7 @@@ static int volume_alsa_vol_get(struct s
  static int volume_alsa_vol_put(struct snd_kcontrol *kcontrol,
                                struct snd_ctl_elem_value *ucontrol)
  {
 -      return volume_set_volume(ucontrol->value.integer.value[0]);
 +      return volume_alsa_set_volume(ucontrol->value.integer.value[0]);
  }
  
  #define volume_alsa_mute_info snd_ctl_boolean_mono_info
@@@ -6689,7 -6651,7 +6689,7 @@@ static int volume_alsa_mute_get(struct 
  static int volume_alsa_mute_put(struct snd_kcontrol *kcontrol,
                                struct snd_ctl_elem_value *ucontrol)
  {
 -      return volume_set_mute(!ucontrol->value.integer.value[0]);
 +      return volume_alsa_set_mute(!ucontrol->value.integer.value[0]);
  }
  
  static struct snd_kcontrol_new volume_alsa_control_vol __devinitdata = {
@@@ -7108,7 -7070,7 +7108,7 @@@ static struct ibm_struct volume_driver_
   *
   *    Fan speed changes of any sort (including those caused by the
   *    disengaged mode) are usually done slowly by the firmware as the
-  *    maximum ammount of fan duty cycle change per second seems to be
+  *    maximum amount of fan duty cycle change per second seems to be
   *    limited.
   *
   *    Reading is not available if GFAN exists.
@@@ -8515,10 -8477,9 +8515,10 @@@ static int __init ibm_init(struct ibm_i
                "%s installed\n", ibm->name);
  
        if (ibm->read) {
 -              mode_t mode;
 +              mode_t mode = iibm->base_procfs_mode;
  
 -              mode = S_IRUGO;
 +              if (!mode)
 +                      mode = S_IRUGO;
                if (ibm->write)
                        mode |= S_IWUSR;
                entry = proc_create_data(ibm->name, mode, proc_dir,
@@@ -8709,7 -8670,6 +8709,7 @@@ static struct ibm_init_struct ibms_init
  #ifdef CONFIG_THINKPAD_ACPI_VIDEO
        {
                .init = video_init,
 +              .base_procfs_mode = S_IRUSR,
                .data = &video_driver_data,
        },
  #endif
@@@ -9072,9 -9032,6 +9072,9 @@@ static int __init thinkpad_acpi_module_
                        return ret;
                }
        }
 +
 +      tpacpi_lifecycle = TPACPI_LIFE_RUNNING;
 +
        ret = input_register_device(tpacpi_inputdev);
        if (ret < 0) {
                printk(TPACPI_ERR "unable to register input device\n");
                tp_features.input_device_registered = 1;
        }
  
 -      tpacpi_lifecycle = TPACPI_LIFE_RUNNING;
        return 0;
  }
  
index 6fde2fabfd9bd920864ab18335093c03588ed8d7,96446a85e0085453b26696a38c9a624e0bdf4fa5..774e7ac837a593ba69d2bf6be41f7a660a129a60
@@@ -48,7 -48,7 +48,7 @@@ struct kmem_cache *scsi_pkt_cachep
  #define FC_SRB_CMD_SENT               (1 << 0)        /* cmd has been sent */
  #define FC_SRB_RCV_STATUS     (1 << 1)        /* response has arrived */
  #define FC_SRB_ABORT_PENDING  (1 << 2)        /* cmd abort sent to device */
- #define FC_SRB_ABORTED                (1 << 3)        /* abort acknowleged */
+ #define FC_SRB_ABORTED                (1 << 3)        /* abort acknowledged */
  #define FC_SRB_DISCONTIG      (1 << 4)        /* non-sequential data recvd */
  #define FC_SRB_COMPL          (1 << 5)        /* fc_io_compl has been run */
  #define FC_SRB_FCP_PROCESSING_TMO (1 << 6)    /* timer function processing */
@@@ -298,6 -298,9 +298,6 @@@ void fc_fcp_ddp_setup(struct fc_fcp_pk
  {
        struct fc_lport *lport;
  
 -      if (!fsp)
 -              return;
 -
        lport = fsp->lp;
        if ((fsp->req_flags & FC_SRB_READ) &&
            (lport->lro_enabled) && (lport->tt.ddp_setup)) {
@@@ -519,7 -522,7 +519,7 @@@ crc_err
   *
   * Called after receiving a Transfer Ready data descriptor.
   * If the LLD is capable of sequence offload then send down the
-  * seq_blen ammount of data in single frame, otherwise send
+  * seq_blen amount of data in single frame, otherwise send
   * multiple frames of the maximum frame payload supported by
   * the target port.
   */
index 08b6634cb994a14ca93bfde822a19d9b61332ee8,c898f47f30baec82587710c7c9c0121b3c91e810..2a40a6eabf4d3a55708d5ceb93494f07a9daaef7
@@@ -50,6 -50,9 +50,6 @@@ static int lpfc_issue_els_fdisc(struct 
                                struct lpfc_nodelist *ndlp, uint8_t retry);
  static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
                                  struct lpfc_iocbq *iocb);
 -static void lpfc_register_new_vport(struct lpfc_hba *phba,
 -                                  struct lpfc_vport *vport,
 -                                  struct lpfc_nodelist *ndlp);
  
  static int lpfc_max_els_tries = 3;
  
@@@ -589,15 -592,6 +589,15 @@@ lpfc_cmpl_els_flogi_fabric(struct lpfc_
                        vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
                        spin_unlock_irq(shost->host_lock);
                }
 +              /*
 +               * If VPI is unreged, driver need to do INIT_VPI
 +               * before re-registering
 +               */
 +              if (phba->sli_rev == LPFC_SLI_REV4) {
 +                      spin_lock_irq(shost->host_lock);
 +                      vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
 +                      spin_unlock_irq(shost->host_lock);
 +              }
        }
  
        if (phba->sli_rev < LPFC_SLI_REV4) {
        } else {
                ndlp->nlp_type |= NLP_FABRIC;
                lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
 -              if (vport->vpi_state & LPFC_VPI_REGISTERED) {
 +              if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
 +                      (vport->vpi_state & LPFC_VPI_REGISTERED)) {
                        lpfc_start_fdiscs(phba);
                        lpfc_do_scr_ns_plogi(phba, vport);
 -              } else
 +              } else if (vport->fc_flag & FC_VFI_REGISTERED)
 +                      lpfc_issue_init_vpi(vport);
 +              else
                        lpfc_issue_reg_vfi(vport);
        }
        return 0;
@@@ -813,9 -804,6 +813,9 @@@ lpfc_cmpl_els_flogi(struct lpfc_hba *ph
                                 irsp->ulpTimeout);
                goto flogifail;
        }
 +      spin_lock_irq(shost->host_lock);
 +      vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
 +      spin_unlock_irq(shost->host_lock);
  
        /*
         * The FLogI succeeded.  Sync the data for the CPU before
@@@ -981,7 -969,7 +981,7 @@@ lpfc_issue_els_flogi(struct lpfc_vport 
   * function returns, it does not guarantee all the IOCBs are actually aborted.
   *
   * Return code
-  *   0 - Sucessfully issued abort iocb on all outstanding flogis (Always 0)
+  *   0 - Successfully issued abort iocb on all outstanding flogis (Always 0)
   **/
  int
  lpfc_els_abort_flogi(struct lpfc_hba *phba)
@@@ -2732,7 -2720,7 +2732,7 @@@ lpfc_els_retry(struct lpfc_hba *phba, s
        if (did == FDMI_DID)
                retry = 1;
  
 -      if ((cmd == ELS_CMD_FLOGI) &&
 +      if (((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) &&
            (phba->fc_topology != TOPOLOGY_LOOP) &&
            !lpfc_error_lost_link(irsp)) {
                /* FLOGI retry policy */
@@@ -3129,7 -3117,7 +3129,7 @@@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba
        if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
            (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
                /* A LS_RJT associated with Default RPI cleanup has its own
-                * seperate code path.
+                * separate code path.
                 */
                if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
                        ls_rjt = 1;
@@@ -4397,7 -4385,7 +4397,7 @@@ lpfc_els_rcv_flogi(struct lpfc_vport *v
  
        did = Fabric_DID;
  
 -      if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3))) {
 +      if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) {
                /* For a FLOGI we accept, then if our portname is greater
                 * then the remote portname we initiate Nport login.
                 */
@@@ -5927,7 -5915,6 +5927,7 @@@ lpfc_cmpl_reg_new_vport(struct lpfc_hb
        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
        struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
        MAILBOX_t *mb = &pmb->u.mb;
 +      int rc;
  
        spin_lock_irq(shost->host_lock);
        vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
                        spin_unlock_irq(shost->host_lock);
                        lpfc_can_disctmo(vport);
                        break;
 +              /* If reg_vpi fail with invalid VPI status, re-init VPI */
 +              case 0x20:
 +                      spin_lock_irq(shost->host_lock);
 +                      vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
 +                      spin_unlock_irq(shost->host_lock);
 +                      lpfc_init_vpi(phba, pmb, vport->vpi);
 +                      pmb->vport = vport;
 +                      pmb->mbox_cmpl = lpfc_init_vpi_cmpl;
 +                      rc = lpfc_sli_issue_mbox(phba, pmb,
 +                              MBX_NOWAIT);
 +                      if (rc == MBX_NOT_FINISHED) {
 +                              lpfc_printf_vlog(vport,
 +                                      KERN_ERR, LOG_MBOX,
 +                                      "2732 Failed to issue INIT_VPI"
 +                                      " mailbox command\n");
 +                      } else {
 +                              lpfc_nlp_put(ndlp);
 +                              return;
 +                      }
 +
                default:
                        /* Try to recover from this error */
                        lpfc_mbx_unreg_vpi(vport);
                        break;
                }
        } else {
 +              spin_lock_irq(shost->host_lock);
                vport->vpi_state |= LPFC_VPI_REGISTERED;
 -              if (vport == phba->pport)
 +              spin_unlock_irq(shost->host_lock);
 +              if (vport == phba->pport) {
                        if (phba->sli_rev < LPFC_SLI_REV4)
                                lpfc_issue_fabric_reglogin(vport);
 -                      else
 -                              lpfc_issue_reg_vfi(vport);
 -              else
 +                      else {
 +                              lpfc_start_fdiscs(phba);
 +                              lpfc_do_scr_ns_plogi(phba, vport);
 +                      }
 +              } else
                        lpfc_do_scr_ns_plogi(phba, vport);
        }
  
   * This routine registers the @vport as a new virtual port with a HBA.
   * It is done through a registering vpi mailbox command.
   **/
 -static void
 +void
  lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
                        struct lpfc_nodelist *ndlp)
  {
@@@ -6054,78 -6017,6 +6054,78 @@@ mbox_err_exit
        return;
  }
  
 +/**
 + * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
 + * @phba: pointer to lpfc hba data structure.
 + *
 + * This routine abort all pending discovery commands and
 + * start a timer to retry FLOGI for the physical port
 + * discovery.
 + **/
 +void
 +lpfc_retry_pport_discovery(struct lpfc_hba *phba)
 +{
 +      struct lpfc_vport **vports;
 +      struct lpfc_nodelist *ndlp;
 +      struct Scsi_Host  *shost;
 +      int i;
 +      uint32_t link_state;
 +
 +      /* Treat this failure as linkdown for all vports */
 +      link_state = phba->link_state;
 +      lpfc_linkdown(phba);
 +      phba->link_state = link_state;
 +
 +      vports = lpfc_create_vport_work_array(phba);
 +
 +      if (vports) {
 +              for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
 +                      ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
 +                      if (ndlp)
 +                              lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
 +                      lpfc_els_flush_cmd(vports[i]);
 +              }
 +              lpfc_destroy_vport_work_array(phba, vports);
 +      }
 +
 +      /* If fabric require FLOGI, then re-instantiate physical login */
 +      ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
 +      if (!ndlp)
 +              return;
 +
 +
 +      shost = lpfc_shost_from_vport(phba->pport);
 +      mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
 +      spin_lock_irq(shost->host_lock);
 +      ndlp->nlp_flag |= NLP_DELAY_TMO;
 +      spin_unlock_irq(shost->host_lock);
 +      ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
 +      phba->pport->port_state = LPFC_FLOGI;
 +      return;
 +}
 +
 +/**
 + * lpfc_fabric_login_reqd - Check if FLOGI required.
 + * @phba: pointer to lpfc hba data structure.
 + * @cmdiocb: pointer to FDISC command iocb.
 + * @rspiocb: pointer to FDISC response iocb.
 + *
 + * This routine checks if a FLOGI is reguired for FDISC
 + * to succeed.
 + **/
 +static int
 +lpfc_fabric_login_reqd(struct lpfc_hba *phba,
 +              struct lpfc_iocbq *cmdiocb,
 +              struct lpfc_iocbq *rspiocb)
 +{
 +
 +      if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) ||
 +              (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED))
 +              return 0;
 +      else
 +              return 1;
 +}
 +
  /**
   * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command
   * @phba: pointer to lpfc hba data structure.
@@@ -6175,12 -6066,6 +6175,12 @@@ lpfc_cmpl_els_fdisc(struct lpfc_hba *ph
                irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
  
        if (irsp->ulpStatus) {
 +
 +              if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
 +                      lpfc_retry_pport_discovery(phba);
 +                      goto out;
 +              }
 +
                /* Check for retry */
                if (lpfc_els_retry(phba, cmdiocb, rspiocb))
                        goto out;
                goto fdisc_failed;
        }
        spin_lock_irq(shost->host_lock);
 +      vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
        vport->fc_flag |= FC_FABRIC;
        if (vport->phba->fc_topology == TOPOLOGY_LOOP)
                vport->fc_flag |=  FC_PUBLIC_LOOP;
                lpfc_mbx_unreg_vpi(vport);
                spin_lock_irq(shost->host_lock);
                vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
 +              vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
                spin_unlock_irq(shost->host_lock);
        }
  
 -      if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
 +      if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
 +              lpfc_issue_init_vpi(vport);
 +      else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
                lpfc_register_new_vport(phba, vport, ndlp);
        else
                lpfc_do_scr_ns_plogi(phba, vport);
index 7f21b47db791bcd64ce068668cd1878e88fcfda7,28c6bfd3e82eacd0f7debebc5fdd2e6b2802ecd8..483fb74bc5922697f4077635f04121bbaa38d293
@@@ -626,7 -626,6 +626,7 @@@ lpfc_sli4_fcp_xri_aborted(struct lpfc_h
                &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
                if (psb->cur_iocbq.sli4_xritag == xri) {
                        list_del(&psb->list);
 +                      psb->exch_busy = 0;
                        psb->status = IOSTAT_SUCCESS;
                        spin_unlock_irqrestore(
                                &phba->sli4_hba.abts_scsi_buf_list_lock,
@@@ -689,12 -688,11 +689,12 @@@ lpfc_sli4_repost_scsi_sgl_list(struct l
                                         list);
                        if (status) {
                                /* Put this back on the abort scsi list */
 -                              psb->status = IOSTAT_LOCAL_REJECT;
 -                              psb->result = IOERR_ABORT_REQUESTED;
 +                              psb->exch_busy = 1;
                                rc++;
 -                      } else
 +                      } else {
 +                              psb->exch_busy = 0;
                                psb->status = IOSTAT_SUCCESS;
 +                      }
                        /* Put it back into the SCSI buffer list */
                        lpfc_release_scsi_buf_s4(phba, psb);
                }
@@@ -798,17 -796,19 +798,17 @@@ lpfc_new_scsi_buf_s4(struct lpfc_vport 
                 */
                sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
                sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
 -              bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_cmnd));
                bf_set(lpfc_sli4_sge_last, sgl, 0);
                sgl->word2 = cpu_to_le32(sgl->word2);
 -              sgl->word3 = cpu_to_le32(sgl->word3);
 +              sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
                sgl++;
  
                /* Setup the physical region for the FCP RSP */
                sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
                sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
 -              bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_rsp));
                bf_set(lpfc_sli4_sge_last, sgl, 1);
                sgl->word2 = cpu_to_le32(sgl->word2);
 -              sgl->word3 = cpu_to_le32(sgl->word3);
 +              sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
  
                /*
                 * Since the IOCB for the FCP I/O is built into this
                                                psb->cur_iocbq.sli4_xritag);
                        if (status) {
                                /* Put this back on the abort scsi list */
 -                              psb->status = IOSTAT_LOCAL_REJECT;
 -                              psb->result = IOERR_ABORT_REQUESTED;
 +                              psb->exch_busy = 1;
                                rc++;
 -                      } else
 +                      } else {
 +                              psb->exch_busy = 0;
                                psb->status = IOSTAT_SUCCESS;
 +                      }
                        /* Put it back into the SCSI buffer list */
                        lpfc_release_scsi_buf_s4(phba, psb);
                        break;
                                 list);
                        if (status) {
                                /* Put this back on the abort scsi list */
 -                              psb->status = IOSTAT_LOCAL_REJECT;
 -                              psb->result = IOERR_ABORT_REQUESTED;
 +                              psb->exch_busy = 1;
                                rc++;
 -                      } else
 +                      } else {
 +                              psb->exch_busy = 0;
                                psb->status = IOSTAT_SUCCESS;
 +                      }
                        /* Put it back into the SCSI buffer list */
                        lpfc_release_scsi_buf_s4(phba, psb);
                }
@@@ -953,7 -951,8 +953,7 @@@ lpfc_release_scsi_buf_s4(struct lpfc_hb
  {
        unsigned long iflag = 0;
  
 -      if (psb->status == IOSTAT_LOCAL_REJECT
 -              && psb->result == IOERR_ABORT_REQUESTED) {
 +      if (psb->exch_busy) {
                spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
                                        iflag);
                psb->pCmd = NULL;
@@@ -1575,7 -1574,7 +1575,7 @@@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_h
                case LPFC_PG_TYPE_NO_DIF:
                        num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
                                        datasegcnt);
-                       /* we shoud have 2 or more entries in buffer list */
+                       /* we should have 2 or more entries in buffer list */
                        if (num_bde < 2)
                                goto err;
                        break;
  
                        num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
                                        datasegcnt, protsegcnt);
-                       /* we shoud have 3 or more entries in buffer list */
+                       /* we should have 3 or more entries in buffer list */
                        if (num_bde < 3)
                                goto err;
                        break;
@@@ -1870,6 -1869,7 +1870,6 @@@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_h
                scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
                        physaddr = sg_dma_address(sgel);
                        dma_len = sg_dma_len(sgel);
 -                      bf_set(lpfc_sli4_sge_len, sgl, sg_dma_len(sgel));
                        sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
                        sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
                        if ((num_bde + 1) == nseg)
                                bf_set(lpfc_sli4_sge_last, sgl, 0);
                        bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
                        sgl->word2 = cpu_to_le32(sgl->word2);
 -                      sgl->word3 = cpu_to_le32(sgl->word3);
 +                      sgl->sge_len = cpu_to_le32(dma_len);
                        dma_offset += dma_len;
                        sgl++;
                }
@@@ -2221,9 -2221,6 +2221,9 @@@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hb
  
        lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
        lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
 +      /* pick up SLI4 exhange busy status from HBA */
 +      lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
 +
        if (pnode && NLP_CHK_NODE_ACT(pnode))
                atomic_dec(&pnode->cmd_pending);
  
@@@ -2640,7 -2637,6 +2640,7 @@@ lpfc_scsi_api_table_setup(struct lpfc_h
        }
        phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
        phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
 +      phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
        return 0;
  }
  
@@@ -2699,13 -2695,6 +2699,13 @@@ lpfc_info(struct Scsi_Host *host
                                 " port %s",
                                 phba->Port);
                }
 +              len = strlen(lpfcinfobuf);
 +              if (phba->sli4_hba.link_state.logical_speed) {
 +                      snprintf(lpfcinfobuf + len,
 +                               384-len,
 +                               " Logical Link Speed: %d Mbps",
 +                               phba->sli4_hba.link_state.logical_speed * 10);
 +              }
        }
        return lpfcinfobuf;
  }
@@@ -3001,7 -2990,6 +3001,7 @@@ lpfc_abort_handler(struct scsi_cmnd *cm
  
        /* ABTS WQE must go to the same WQ as the WQE to be aborted */
        abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
 +      abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
  
        if (lpfc_is_link_up(phba))
                icmd->ulpCommand = CMD_ABORT_XRI_CN;
diff --combined drivers/scsi/sd.c
index 1dd4d8407694b44fac24501ffd0b8db5bf8cd7e5,5d94772d449d8beece537e726c2d0249d9fa0552..83881dfb33c055646b42cc1341f21caebee9f42f
@@@ -1196,10 -1196,19 +1196,10 @@@ static int sd_done(struct scsi_cmnd *SC
                SCpnt->result = 0;
                memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
                break;
 -      case ABORTED_COMMAND:
 -              if (sshdr.asc == 0x10) { /* DIF: Disk detected corruption */
 -                      scsi_print_result(SCpnt);
 -                      scsi_print_sense("sd", SCpnt);
 +      case ABORTED_COMMAND: /* DIF: Target detected corruption */
 +      case ILLEGAL_REQUEST: /* DIX: Host detected corruption */
 +              if (sshdr.asc == 0x10)
                        good_bytes = sd_completed_bytes(SCpnt);
 -              }
 -              break;
 -      case ILLEGAL_REQUEST:
 -              if (sshdr.asc == 0x10) { /* DIX: HBA detected corruption */
 -                      scsi_print_result(SCpnt);
 -                      scsi_print_sense("sd", SCpnt);
 -                      good_bytes = sd_completed_bytes(SCpnt);
 -              }
                break;
        default:
                break;
                sd_dif_complete(SCpnt, good_bytes);
  
        if (scsi_host_dif_capable(sdkp->device->host, sdkp->protection_type)
 -          == SD_DIF_TYPE2_PROTECTION && SCpnt->cmnd != SCpnt->request->cmd)
 +          == SD_DIF_TYPE2_PROTECTION && SCpnt->cmnd != SCpnt->request->cmd) {
 +
 +              /* We have to print a failed command here as the
 +               * extended CDB gets freed before scsi_io_completion()
 +               * is called.
 +               */
 +              if (result)
 +                      scsi_print_command(SCpnt);
 +
                mempool_free(SCpnt->cmnd, sd_cdb_pool);
 +              SCpnt->cmnd = NULL;
 +              SCpnt->cmd_len = 0;
 +      }
  
        return good_bytes;
  }
@@@ -1948,13 -1946,13 +1948,13 @@@ static void sd_read_block_limits(struc
  {
        struct request_queue *q = sdkp->disk->queue;
        unsigned int sector_sz = sdkp->device->sector_size;
 -      char *buffer;
 +      const int vpd_len = 32;
 +      unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
  
 -      /* Block Limits VPD */
 -      buffer = scsi_get_vpd_page(sdkp->device, 0xb0);
 -
 -      if (buffer == NULL)
 -              return;
 +      if (!buffer ||
 +          /* Block Limits VPD */
 +          scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len))
 +              goto out;
  
        blk_queue_io_min(sdkp->disk->queue,
                         get_unaligned_be16(&buffer[6]) * sector_sz);
                                get_unaligned_be32(&buffer[32]) & ~(1 << 31);
        }
  
 + out:
        kfree(buffer);
  }
  
   */
  static void sd_read_block_characteristics(struct scsi_disk *sdkp)
  {
 -      char *buffer;
 +      unsigned char *buffer;
        u16 rot;
 +      const int vpd_len = 32;
  
 -      /* Block Device Characteristics VPD */
 -      buffer = scsi_get_vpd_page(sdkp->device, 0xb1);
 +      buffer = kmalloc(vpd_len, GFP_KERNEL);
  
 -      if (buffer == NULL)
 -              return;
 +      if (!buffer ||
 +          /* Block Device Characteristics VPD */
 +          scsi_get_vpd_page(sdkp->device, 0xb1, buffer, vpd_len))
 +              goto out;
  
        rot = get_unaligned_be16(&buffer[4]);
  
        if (rot == 1)
                queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sdkp->disk->queue);
  
 + out:
        kfree(buffer);
  }
  
@@@ -2111,7 -2105,7 +2111,7 @@@ static int sd_revalidate_disk(struct ge
   *    which is followed by sdaaa.
   *
   *    This is basically 26 base counting with one extra 'nil' entry
-  *    at the beggining from the second digit on and can be
+  *    at the beginning from the second digit on and can be
   *    determined using similar method as 26 base conversion with the
   *    index shifted -1 after each digit is computed.
   *
index 292894a2c24711077df28529315c97f586e696fb,7c14d5c5a8ac37a3357b831778441377c0ffdf12..8d8062b10e2ff5749394bb9f64099fdbab2adfdb
  #define MUSB_DEVCTL_HR                0x02
  #define MUSB_DEVCTL_SESSION   0x01
  
 +/* MUSB ULPI VBUSCONTROL */
 +#define MUSB_ULPI_USE_EXTVBUS 0x01
 +#define MUSB_ULPI_USE_EXTVBUSIND 0x02
 +
  /* TESTMODE */
  #define MUSB_TEST_FORCE_HOST  0x80
  #define MUSB_TEST_FIFO_ACCESS 0x40
  
  /* REVISIT: vctrl/vstatus: optional vendor utmi+phy register at 0x68 */
  #define MUSB_HWVERS           0x6C    /* 8 bit */
 +#define MUSB_ULPI_BUSCONTROL  0x70    /* 8 bit */
  
  #define MUSB_EPINFO           0x78    /* 8 bit */
  #define MUSB_RAMINFO          0x79    /* 8 bit */
@@@ -326,26 -321,6 +326,26 @@@ static inline void  musb_write_rxfifoad
        musb_writew(mbase, MUSB_RXFIFOADD, c_off);
  }
  
 +static inline u8 musb_read_txfifosz(void __iomem *mbase)
 +{
 +      return musb_readb(mbase, MUSB_TXFIFOSZ);
 +}
 +
 +static inline u16 musb_read_txfifoadd(void __iomem *mbase)
 +{
 +      return musb_readw(mbase, MUSB_TXFIFOADD);
 +}
 +
 +static inline u8 musb_read_rxfifosz(void __iomem *mbase)
 +{
 +      return musb_readb(mbase, MUSB_RXFIFOSZ);
 +}
 +
 +static inline u16  musb_read_rxfifoadd(void __iomem *mbase)
 +{
 +      return musb_readw(mbase, MUSB_RXFIFOADD);
 +}
 +
  static inline u8 musb_read_configdata(void __iomem *mbase)
  {
        musb_writeb(mbase, MUSB_INDEX, 0);
@@@ -401,36 -376,6 +401,36 @@@ static inline void  musb_write_txhubpor
                        qh_h_port_reg);
  }
  
 +static inline u8 musb_read_rxfunaddr(void __iomem *mbase, u8 epnum)
 +{
 +      return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_RXFUNCADDR));
 +}
 +
 +static inline u8 musb_read_rxhubaddr(void __iomem *mbase, u8 epnum)
 +{
 +      return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_RXHUBADDR));
 +}
 +
 +static inline u8 musb_read_rxhubport(void __iomem *mbase, u8 epnum)
 +{
 +      return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_RXHUBPORT));
 +}
 +
 +static inline u8  musb_read_txfunaddr(void __iomem *mbase, u8 epnum)
 +{
 +      return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXFUNCADDR));
 +}
 +
 +static inline u8  musb_read_txhubaddr(void __iomem *mbase, u8 epnum)
 +{
 +      return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBADDR));
 +}
 +
 +static inline u8  musb_read_txhubport(void __iomem *mbase, u8 epnum)
 +{
 +      return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBPORT));
 +}
 +
  #else /* CONFIG_BLACKFIN */
  
  #define USB_BASE              USB_FADDR
  #define MUSB_FLAT_OFFSET(_epnum, _offset)     \
        (USB_OFFSET(USB_EP_NI0_TXMAXP) + (0x40 * (_epnum)) + (_offset))
  
- /* Not implemented - HW has seperate Tx/Rx FIFO */
+ /* Not implemented - HW has separate Tx/Rx FIFO */
  #define MUSB_TXCSR_MODE                       0x0000
  
  static inline void musb_write_txfifosz(void __iomem *mbase, u8 c_size)
@@@ -510,22 -455,6 +510,22 @@@ static inline void  musb_write_rxfifoad
  {
  }
  
 +static inline u8 musb_read_txfifosz(void __iomem *mbase)
 +{
 +}
 +
 +static inline u16 musb_read_txfifoadd(void __iomem *mbase)
 +{
 +}
 +
 +static inline u8 musb_read_rxfifosz(void __iomem *mbase)
 +{
 +}
 +
 +static inline u16  musb_read_rxfifoadd(void __iomem *mbase)
 +{
 +}
 +
  static inline u8 musb_read_configdata(void __iomem *mbase)
  {
        return 0;
  
  static inline u16 musb_read_hwvers(void __iomem *mbase)
  {
 -      return 0;
 +      /*
 +       * This register is invisible on Blackfin, actually the MUSB
 +       * RTL version of Blackfin is 1.9, so just harcode its value.
 +       */
 +      return MUSB_HWVERS_1900;
  }
  
  static inline void __iomem *musb_read_target_reg_base(u8 i, void __iomem *mbase)
@@@ -575,30 -500,6 +575,30 @@@ static inline void  musb_write_txhubpor
  {
  }
  
 +static inline u8 musb_read_rxfunaddr(void __iomem *mbase, u8 epnum)
 +{
 +}
 +
 +static inline u8 musb_read_rxhubaddr(void __iomem *mbase, u8 epnum)
 +{
 +}
 +
 +static inline u8 musb_read_rxhubport(void __iomem *mbase, u8 epnum)
 +{
 +}
 +
 +static inline u8  musb_read_txfunaddr(void __iomem *mbase, u8 epnum)
 +{
 +}
 +
 +static inline u8  musb_read_txhubaddr(void __iomem *mbase, u8 epnum)
 +{
 +}
 +
 +static inline void  musb_read_txhubport(void __iomem *mbase, u8 epnum)
 +{
 +}
 +
  #endif /* CONFIG_BLACKFIN */
  
  #endif        /* __MUSB_REGS_H__ */
index baf74b44e6ed801c3941c8984c15c554e89a44fd,52a81a312b8650e8cf147a8c9451ac2723fdec7a..e23c77925e7a45445e6c39f4bf4b75b69cad5b6c
  #include <linux/serial.h>
  #include <linux/delay.h>
  #include <linux/uaccess.h>
 +#include <asm/unaligned.h>
  
  #include "cypress_m8.h"
  
  
 -#ifdef CONFIG_USB_SERIAL_DEBUG
 -      static int debug = 1;
 -#else
 -      static int debug;
 -#endif
 +static int debug;
  static int stats;
  static int interval;
 +static int unstable_bauds;
  
  /*
   * Version Information
  #define CYPRESS_BUF_SIZE      1024
  #define CYPRESS_CLOSING_WAIT  (30*HZ)
  
 -static struct usb_device_id id_table_earthmate [] = {
 +static const struct usb_device_id id_table_earthmate[] = {
        { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) },
        { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) },
        { }                                             /* Terminating entry */
  };
  
 -static struct usb_device_id id_table_cyphidcomrs232 [] = {
 +static const struct usb_device_id id_table_cyphidcomrs232[] = {
        { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
        { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) },
        { }                                             /* Terminating entry */
  };
  
 -static struct usb_device_id id_table_nokiaca42v2 [] = {
 +static const struct usb_device_id id_table_nokiaca42v2[] = {
        { USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) },
        { }                                             /* Terminating entry */
  };
  
 -static struct usb_device_id id_table_combined [] = {
 +static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) },
        { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) },
        { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
@@@ -152,7 -154,7 +152,7 @@@ struct cypress_private 
        int isthrottled;                   /* if throttled, discard reads */
        wait_queue_head_t delta_msr_wait;  /* used for TIOCMIWAIT */
        char prev_status, diff_status;     /* used for TIOCMIWAIT */
-       /* we pass a pointer to this as the arguement sent to
+       /* we pass a pointer to this as the argument sent to
           cypress_set_termios old_termios */
        struct ktermios tmp_termios;       /* stores the old termios settings */
  };
@@@ -293,9 -295,6 +293,9 @@@ static int analyze_baud_rate(struct usb
        struct cypress_private *priv;
        priv = usb_get_serial_port_data(port);
  
 +      if (unstable_bauds)
 +              return new_rate;
 +
        /*
         * The general purpose firmware for the Cypress M8 allows for
         * a maximum speed of 57600bps (I have no idea whether DeLorme
@@@ -345,8 -344,7 +345,8 @@@ static int cypress_serial_control(struc
  {
        int new_baudrate = 0, retval = 0, tries = 0;
        struct cypress_private *priv;
 -      __u8 feature_buffer[5];
 +      u8 *feature_buffer;
 +      const unsigned int feature_len = 5;
        unsigned long flags;
  
        dbg("%s", __func__);
        if (!priv->comm_is_ok)
                return -ENODEV;
  
 +      feature_buffer = kcalloc(feature_len, sizeof(u8), GFP_KERNEL);
 +      if (!feature_buffer)
 +              return -ENOMEM;
 +
        switch (cypress_request_type) {
        case CYPRESS_SET_CONFIG:
 -              new_baudrate = priv->baud_rate;
                /* 0 means 'Hang up' so doesn't change the true bit rate */
 -              if (baud_rate == 0)
 -                      new_baudrate = priv->baud_rate;
 -              /* Change of speed ? */
 -              else if (baud_rate != priv->baud_rate) {
 +              new_baudrate = priv->baud_rate;
 +              if (baud_rate && baud_rate != priv->baud_rate) {
                        dbg("%s - baud rate is changing", __func__);
                        retval = analyze_baud_rate(port, baud_rate);
 -                      if (retval >=  0) {
 +                      if (retval >= 0) {
                                new_baudrate = retval;
                                dbg("%s - New baud rate set to %d",
                                    __func__, new_baudrate);
                dbg("%s - baud rate is being sent as %d",
                                        __func__, new_baudrate);
  
 -              memset(feature_buffer, 0, sizeof(feature_buffer));
                /* fill the feature_buffer with new configuration */
 -              *((u_int32_t *)feature_buffer) = new_baudrate;
 +              put_unaligned_le32(new_baudrate, feature_buffer);
                feature_buffer[4] |= data_bits;   /* assign data bits in 2 bit space ( max 3 ) */
                /* 1 bit gap */
                feature_buffer[4] |= (stop_bits << 3);   /* assign stop bits in 1 bit space */
                                        HID_REQ_SET_REPORT,
                                        USB_DIR_OUT | USB_RECIP_INTERFACE | USB_TYPE_CLASS,
                                        0x0300, 0, feature_buffer,
 -                                      sizeof(feature_buffer), 500);
 +                                      feature_len, 500);
  
                        if (tries++ >= 3)
                                break;
  
 -              } while (retval != sizeof(feature_buffer) &&
 +              } while (retval != feature_len &&
                         retval != -ENODEV);
  
 -              if (retval != sizeof(feature_buffer)) {
 +              if (retval != feature_len) {
                        dev_err(&port->dev, "%s - failed sending serial "
                                "line settings - %d\n", __func__, retval);
                        cypress_set_dead(port);
                        /* Not implemented for this device,
                           and if we try to do it we're likely
                           to crash the hardware. */
 -                      return -ENOTTY;
 +                      retval = -ENOTTY;
 +                      goto out;
                }
                dbg("%s - retreiving serial line settings", __func__);
 -              /* set initial values in feature buffer */
 -              memset(feature_buffer, 0, sizeof(feature_buffer));
 -
                do {
                        retval = usb_control_msg(port->serial->dev,
                                        usb_rcvctrlpipe(port->serial->dev, 0),
                                        HID_REQ_GET_REPORT,
                                        USB_DIR_IN | USB_RECIP_INTERFACE | USB_TYPE_CLASS,
                                        0x0300, 0, feature_buffer,
 -                                      sizeof(feature_buffer), 500);
 +                                      feature_len, 500);
  
                        if (tries++ >= 3)
                                break;
 -              } while (retval != sizeof(feature_buffer)
 +              } while (retval != feature_len
                                                && retval != -ENODEV);
  
 -              if (retval != sizeof(feature_buffer)) {
 +              if (retval != feature_len) {
                        dev_err(&port->dev, "%s - failed to retrieve serial "
                                "line settings - %d\n", __func__, retval);
                        cypress_set_dead(port);
 -                      return retval;
 +                      goto out;
                } else {
                        spin_lock_irqsave(&priv->lock, flags);
                        /* store the config in one byte, and later
                           use bit masks to check values */
                        priv->current_config = feature_buffer[4];
 -                      priv->baud_rate = *((u_int32_t *)feature_buffer);
 +                      priv->baud_rate = get_unaligned_le32(feature_buffer);
                        spin_unlock_irqrestore(&priv->lock, flags);
                }
        }
        spin_lock_irqsave(&priv->lock, flags);
        ++priv->cmd_count;
        spin_unlock_irqrestore(&priv->lock, flags);
 -
 +out:
 +      kfree(feature_buffer);
        return retval;
  } /* cypress_serial_control */
  
@@@ -691,6 -690,7 +691,6 @@@ static void cypress_dtr_rts(struct usb_
  {
        struct cypress_private *priv = usb_get_serial_port_data(port);
        /* drop dtr and rts */
 -      priv = usb_get_serial_port_data(port);
        spin_lock_irq(&priv->lock);
        if (on == 0)
                priv->line_control = 0;
@@@ -1307,9 -1307,13 +1307,9 @@@ static void cypress_read_int_callback(s
                spin_unlock_irqrestore(&priv->lock, flags);
  
        /* process read if there is data other than line status */
 -      if (tty && (bytes > i)) {
 -              bytes = tty_buffer_request_room(tty, bytes);
 -              for (; i < bytes ; ++i) {
 -                      dbg("pushing byte number %d - %d - %c", i, data[i],
 -                                      data[i]);
 -                      tty_insert_flip_char(tty, data[i], tty_flag);
 -              }
 +      if (tty && bytes > i) {
 +              tty_insert_flip_string_fixed_flag(tty, data + i,
 +                              bytes - i, tty_flag);
                tty_flip_buffer_push(tty);
        }
  
  continue_read:
        tty_kref_put(tty);
  
 -      /* Continue trying to always read... unless the port has closed. */
 +      /* Continue trying to always read */
  
 -      if (port->port.count > 0 && priv->comm_is_ok) {
 +      if (priv->comm_is_ok) {
                usb_fill_int_urb(port->interrupt_in_urb, port->serial->dev,
                                usb_rcvintpipe(port->serial->dev,
                                        port->interrupt_in_endpointAddress),
                                cypress_read_int_callback, port,
                                priv->read_urb_interval);
                result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
 -              if (result) {
 +              if (result && result != -EPERM) {
                        dev_err(&urb->dev->dev, "%s - failed resubmitting "
                                        "read urb, error %d\n", __func__,
                                        result);
@@@ -1646,5 -1650,3 +1646,5 @@@ module_param(stats, bool, S_IRUGO | S_I
  MODULE_PARM_DESC(stats, "Enable statistics or not");
  module_param(interval, int, S_IRUGO | S_IWUSR);
  MODULE_PARM_DESC(interval, "Overrides interrupt interval");
 +module_param(unstable_bauds, bool, S_IRUGO | S_IWUSR);
 +MODULE_PARM_DESC(unstable_bauds, "Allow unstable baud rates");
index f37476e22684af0663b46e41d4431692834d741b,96937f1b8b973172b629d49fec3cb9caa562134b..701452ae91979a23129d24d656a5e4a2df7b8946
@@@ -22,7 -22,7 +22,7 @@@
  
  static int debug;
  
 -static struct usb_device_id id_table[] = {
 +static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x065a, 0x0009) },
        { },
  };
@@@ -55,6 -55,7 +55,6 @@@ static void opticon_bulk_callback(struc
        int status = urb->status;
        struct tty_struct *tty;
        int result;
 -      int available_room = 0;
        int data_length;
  
        dbg("%s - port %d", __func__, port->number);
                        /* real data, send it to the tty layer */
                        tty = tty_port_tty_get(&port->port);
                        if (tty) {
 -                              available_room = tty_buffer_request_room(tty,
 -                                                              data_length);
 -                              if (available_room) {
 -                                      tty_insert_flip_string(tty, data,
 -                                                             available_room);
 -                                      tty_flip_buffer_push(tty);
 -                              }
 +                              tty_insert_flip_string(tty, data,
 +                                                             data_length);
 +                              tty_flip_buffer_push(tty);
                                tty_kref_put(tty);
                        }
                } else {
                }
        } else {
                dev_dbg(&priv->udev->dev,
-                       "Improper ammount of data received from the device, "
+                       "Improper amount of data received from the device, "
                        "%d bytes", urb->actual_length);
        }
  
@@@ -212,7 -217,7 +212,7 @@@ static int opticon_write(struct tty_str
        spin_lock_irqsave(&priv->lock, flags);
        if (priv->outstanding_urbs > URB_UPPER_LIMIT) {
                spin_unlock_irqrestore(&priv->lock, flags);
 -              dbg("%s - write limit hit\n", __func__);
 +              dbg("%s - write limit hit", __func__);
                return 0;
        }
        priv->outstanding_urbs++;
@@@ -283,7 -288,7 +283,7 @@@ static int opticon_write_room(struct tt
        spin_lock_irqsave(&priv->lock, flags);
        if (priv->outstanding_urbs > URB_UPPER_LIMIT * 2 / 3) {
                spin_unlock_irqrestore(&priv->lock, flags);
 -              dbg("%s - write limit hit\n", __func__);
 +              dbg("%s - write limit hit", __func__);
                return 0;
        }
        spin_unlock_irqrestore(&priv->lock, flags);
index 72398888858ffb95ed24943033d41b5f2ebe4ca0,aebfcf699644a15ee9011f33d940059abad6b3fb..ee190cc1757ce84dd0b78114fb9ca190b1c7e9e1
@@@ -21,7 -21,7 +21,7 @@@
  
  static int debug;
  
 -static struct usb_device_id id_table[] = {
 +static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x05e0, 0x0600) },
        { },
  };
@@@ -51,6 -51,7 +51,6 @@@ static void symbol_int_callback(struct 
        int status = urb->status;
        struct tty_struct *tty;
        int result;
 -      int available_room = 0;
        int data_length;
  
        dbg("%s - port %d", __func__, port->number);
                 */
                tty = tty_port_tty_get(&port->port);
                if (tty) {
 -                      available_room = tty_buffer_request_room(tty,
 -                                                      data_length);
 -                      if (available_room) {
 -                              tty_insert_flip_string(tty, &data[1],
 -                                                     available_room);
 -                              tty_flip_buffer_push(tty);
 -                      }
 +                      tty_insert_flip_string(tty, &data[1], data_length);
 +                      tty_flip_buffer_push(tty);
                        tty_kref_put(tty);
                }
        } else {
                dev_dbg(&priv->udev->dev,
-                       "Improper ammount of data received from the device, "
+                       "Improper amount of data received from the device, "
                        "%d bytes", urb->actual_length);
        }
  
diff --combined fs/binfmt_elf_fdpic.c
index 6d6a16c5e9bbb6ba1546543ed42bd93f6305dd76,364fcfc0c5dfa17d3a8a616ffcd8f62a2e541b6f..2c32d00a66904883339fe2b25ebe991cdeb6a489
@@@ -34,7 -34,6 +34,7 @@@
  #include <linux/elf.h>
  #include <linux/elf-fdpic.h>
  #include <linux/elfcore.h>
 +#include <linux/coredump.h>
  
  #include <asm/uaccess.h>
  #include <asm/param.h>
@@@ -1216,6 -1215,26 +1216,6 @@@ static int elf_fdpic_map_file_by_direct
   */
  #ifdef CONFIG_ELF_CORE
  
 -/*
 - * These are the only things you should do on a core-file: use only these
 - * functions to write out all the necessary info.
 - */
 -static int dump_write(struct file *file, const void *addr, int nr)
 -{
 -      return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
 -}
 -
 -static int dump_seek(struct file *file, loff_t off)
 -{
 -      if (file->f_op->llseek) {
 -              if (file->f_op->llseek(file, off, SEEK_SET) != off)
 -                      return 0;
 -      } else {
 -              file->f_pos = off;
 -      }
 -      return 1;
 -}
 -
  /*
   * Decide whether a segment is worth dumping; default is yes to be
   * sure (missing info is worse than too much; etc).
@@@ -1294,35 -1313,35 +1294,35 @@@ static int notesize(struct memelfnote *
  
  /* #define DEBUG */
  
 -#define DUMP_WRITE(addr, nr)  \
 -      do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
 -#define DUMP_SEEK(off)        \
 -      do { if (!dump_seek(file, (off))) return 0; } while(0)
 +#define DUMP_WRITE(addr, nr, foffset) \
 +      do { if (!dump_write(file, (addr), (nr))) return 0; *foffset += (nr); } while(0)
  
 -static int writenote(struct memelfnote *men, struct file *file)
 +static int alignfile(struct file *file, loff_t *foffset)
  {
 -      struct elf_note en;
 +      static const char buf[4] = { 0, };
 +      DUMP_WRITE(buf, roundup(*foffset, 4) - *foffset, foffset);
 +      return 1;
 +}
  
 +static int writenote(struct memelfnote *men, struct file *file,
 +                      loff_t *foffset)
 +{
 +      struct elf_note en;
        en.n_namesz = strlen(men->name) + 1;
        en.n_descsz = men->datasz;
        en.n_type = men->type;
  
 -      DUMP_WRITE(&en, sizeof(en));
 -      DUMP_WRITE(men->name, en.n_namesz);
 -      /* XXX - cast from long long to long to avoid need for libgcc.a */
 -      DUMP_SEEK(roundup((unsigned long)file->f_pos, 4));      /* XXX */
 -      DUMP_WRITE(men->data, men->datasz);
 -      DUMP_SEEK(roundup((unsigned long)file->f_pos, 4));      /* XXX */
 +      DUMP_WRITE(&en, sizeof(en), foffset);
 +      DUMP_WRITE(men->name, en.n_namesz, foffset);
 +      if (!alignfile(file, foffset))
 +              return 0;
 +      DUMP_WRITE(men->data, men->datasz, foffset);
 +      if (!alignfile(file, foffset))
 +              return 0;
  
        return 1;
  }
  #undef DUMP_WRITE
 -#undef DUMP_SEEK
 -
 -#define DUMP_WRITE(addr, nr)                          \
 -      if ((size += (nr)) > cprm->limit ||             \
 -          !dump_write(cprm->file, (addr), (nr)))      \
 -              goto end_coredump;
  
  static inline void fill_elf_fdpic_header(struct elfhdr *elf, int segs)
  {
@@@ -1374,7 -1393,7 +1374,7 @@@ static inline void fill_note(struct mem
  
  /*
   * fill up all the fields in prstatus from the given task struct, except
-  * registers which need to be filled up seperately.
+  * registers which need to be filled up separately.
   */
  static void fill_prstatus(struct elf_prstatus *prstatus,
                          struct task_struct *p, long signr)
@@@ -1505,22 -1524,6 +1505,22 @@@ static int elf_dump_thread_status(long 
        return sz;
  }
  
 +static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
 +                           elf_addr_t e_shoff, int segs)
 +{
 +      elf->e_shoff = e_shoff;
 +      elf->e_shentsize = sizeof(*shdr4extnum);
 +      elf->e_shnum = 1;
 +      elf->e_shstrndx = SHN_UNDEF;
 +
 +      memset(shdr4extnum, 0, sizeof(*shdr4extnum));
 +
 +      shdr4extnum->sh_type = SHT_NULL;
 +      shdr4extnum->sh_size = elf->e_shnum;
 +      shdr4extnum->sh_link = elf->e_shstrndx;
 +      shdr4extnum->sh_info = segs;
 +}
 +
  /*
   * dump the segments for an MMU process
   */
@@@ -1549,7 -1552,7 +1549,7 @@@ static int elf_fdpic_dump_segments(stru
                                        err = -EIO;
                                kunmap(page);
                                page_cache_release(page);
 -                      } else if (!dump_seek(file, file->f_pos + PAGE_SIZE))
 +                      } else if (!dump_seek(file, PAGE_SIZE))
                                err = -EFBIG;
                        if (err)
                                goto out;
@@@ -1585,17 -1588,6 +1585,17 @@@ static int elf_fdpic_dump_segments(stru
  }
  #endif
  
 +static size_t elf_core_vma_data_size(unsigned long mm_flags)
 +{
 +      struct vm_area_struct *vma;
 +      size_t size = 0;
 +
 +      for (vma = current->mm->mmap; vma; vma->vm_next)
 +              if (maydump(vma, mm_flags))
 +                      size += vma->vm_end - vma->vm_start;
 +      return size;
 +}
 +
  /*
   * Actual dumper
   *
@@@ -1613,7 -1605,7 +1613,7 @@@ static int elf_fdpic_core_dump(struct c
        int i;
        struct vm_area_struct *vma;
        struct elfhdr *elf = NULL;
 -      loff_t offset = 0, dataoff;
 +      loff_t offset = 0, dataoff, foffset;
        int numnote;
        struct memelfnote *notes = NULL;
        struct elf_prstatus *prstatus = NULL;   /* NT_PRSTATUS */
  #endif
        int thread_status_size = 0;
        elf_addr_t *auxv;
 -      unsigned long mm_flags;
 +      struct elf_phdr *phdr4note = NULL;
 +      struct elf_shdr *shdr4extnum = NULL;
 +      Elf_Half e_phnum;
 +      elf_addr_t e_shoff;
  
        /*
         * We no longer stop all VM operations.
        elf_core_copy_regs(&prstatus->pr_reg, cprm->regs);
  
        segs = current->mm->map_count;
 -#ifdef ELF_CORE_EXTRA_PHDRS
 -      segs += ELF_CORE_EXTRA_PHDRS;
 -#endif
 +      segs += elf_core_extra_phdrs();
 +
 +      /* for notes section */
 +      segs++;
 +
 +      /* If segs > PN_XNUM(0xffff), then e_phnum overflows. To avoid
 +       * this, kernel supports extended numbering. Have a look at
 +       * include/linux/elf.h for further information. */
 +      e_phnum = segs > PN_XNUM ? PN_XNUM : segs;
  
        /* Set up header */
 -      fill_elf_fdpic_header(elf, segs + 1);   /* including notes section */
 +      fill_elf_fdpic_header(elf, e_phnum);
  
        has_dumped = 1;
        current->flags |= PF_DUMPCORE;
        fs = get_fs();
        set_fs(KERNEL_DS);
  
 -      DUMP_WRITE(elf, sizeof(*elf));
        offset += sizeof(*elf);                         /* Elf header */
 -      offset += (segs+1) * sizeof(struct elf_phdr);   /* Program headers */
 +      offset += segs * sizeof(struct elf_phdr);       /* Program headers */
 +      foffset = offset;
  
        /* Write notes phdr entry */
        {
 -              struct elf_phdr phdr;
                int sz = 0;
  
                for (i = 0; i < numnote; i++)
  
                sz += thread_status_size;
  
 -              fill_elf_note_phdr(&phdr, sz, offset);
 +              phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL);
 +              if (!phdr4note)
 +                      goto end_coredump;
 +
 +              fill_elf_note_phdr(phdr4note, sz, offset);
                offset += sz;
 -              DUMP_WRITE(&phdr, sizeof(phdr));
        }
  
        /* Page-align dumped data */
        dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
  
 -      /*
 -       * We must use the same mm->flags while dumping core to avoid
 -       * inconsistency between the program headers and bodies, otherwise an
 -       * unusable core file can be generated.
 -       */
 -      mm_flags = current->mm->flags;
 +      offset += elf_core_vma_data_size(cprm->mm_flags);
 +      offset += elf_core_extra_data_size();
 +      e_shoff = offset;
 +
 +      if (e_phnum == PN_XNUM) {
 +              shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL);
 +              if (!shdr4extnum)
 +                      goto end_coredump;
 +              fill_extnum_info(elf, shdr4extnum, e_shoff, segs);
 +      }
 +
 +      offset = dataoff;
 +
 +      size += sizeof(*elf);
 +      if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
 +              goto end_coredump;
 +
 +      size += sizeof(*phdr4note);
 +      if (size > cprm->limit
 +          || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
 +              goto end_coredump;
  
        /* write program headers for segments dump */
        for (vma = current->mm->mmap; vma; vma = vma->vm_next) {
                phdr.p_offset = offset;
                phdr.p_vaddr = vma->vm_start;
                phdr.p_paddr = 0;
 -              phdr.p_filesz = maydump(vma, mm_flags) ? sz : 0;
 +              phdr.p_filesz = maydump(vma, cprm->mm_flags) ? sz : 0;
                phdr.p_memsz = sz;
                offset += phdr.p_filesz;
                phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
                        phdr.p_flags |= PF_X;
                phdr.p_align = ELF_EXEC_PAGESIZE;
  
 -              DUMP_WRITE(&phdr, sizeof(phdr));
 +              size += sizeof(phdr);
 +              if (size > cprm->limit
 +                  || !dump_write(cprm->file, &phdr, sizeof(phdr)))
 +                      goto end_coredump;
        }
  
 -#ifdef ELF_CORE_WRITE_EXTRA_PHDRS
 -      ELF_CORE_WRITE_EXTRA_PHDRS;
 -#endif
 +      if (!elf_core_write_extra_phdrs(cprm->file, offset, &size, cprm->limit))
 +              goto end_coredump;
  
        /* write out the notes section */
        for (i = 0; i < numnote; i++)
 -              if (!writenote(notes + i, cprm->file))
 +              if (!writenote(notes + i, cprm->file, &foffset))
                        goto end_coredump;
  
        /* write out the thread status notes section */
                                list_entry(t, struct elf_thread_status, list);
  
                for (i = 0; i < tmp->num_notes; i++)
 -                      if (!writenote(&tmp->notes[i], cprm->file))
 +                      if (!writenote(&tmp->notes[i], cprm->file, &foffset))
                                goto end_coredump;
        }
  
 -      if (!dump_seek(cprm->file, dataoff))
 +      if (!dump_seek(cprm->file, dataoff - foffset))
                goto end_coredump;
  
        if (elf_fdpic_dump_segments(cprm->file, &size, &cprm->limit,
 -                                  mm_flags) < 0)
 +                                  cprm->mm_flags) < 0)
                goto end_coredump;
  
 -#ifdef ELF_CORE_WRITE_EXTRA_DATA
 -      ELF_CORE_WRITE_EXTRA_DATA;
 -#endif
 +      if (!elf_core_write_extra_data(cprm->file, &size, cprm->limit))
 +              goto end_coredump;
 +
 +      if (e_phnum == PN_XNUM) {
 +              size += sizeof(*shdr4extnum);
 +              if (size > cprm->limit
 +                  || !dump_write(cprm->file, shdr4extnum,
 +                                 sizeof(*shdr4extnum)))
 +                      goto end_coredump;
 +      }
  
        if (cprm->file->f_pos != offset) {
                /* Sanity check */
@@@ -1869,7 -1826,7 +1869,7 @@@ cleanup
                list_del(tmp);
                kfree(list_entry(tmp, struct elf_thread_status, list));
        }
 -
 +      kfree(phdr4note);
        kfree(elf);
        kfree(prstatus);
        kfree(psinfo);
diff --combined fs/bio.c
index dc17afd672e354810d4b7b2d54e21d466bf8cca8,17376d86f412a5096266a8051efa7616d10d0a14..e1f922184b4506466e20978902fb114905549bcd
+++ b/fs/bio.c
@@@ -264,13 -264,12 +264,12 @@@ EXPORT_SYMBOL(bio_init)
   * bio_alloc_bioset - allocate a bio for I/O
   * @gfp_mask:   the GFP_ mask given to the slab allocator
   * @nr_iovecs:        number of iovecs to pre-allocate
-  * @bs:               the bio_set to allocate from. If %NULL, just use kmalloc
+  * @bs:               the bio_set to allocate from.
   *
   * Description:
-  *   bio_alloc_bioset will first try its own mempool to satisfy the allocation.
+  *   bio_alloc_bioset will try its own mempool to satisfy the allocation.
   *   If %__GFP_WAIT is set then we will block on the internal pool waiting
-  *   for a &struct bio to become free. If a %NULL @bs is passed in, we will
-  *   fall back to just using @kmalloc to allocate the required memory.
+  *   for a &struct bio to become free.
   *
   *   Note that the caller must set ->bi_destructor on successful return
   *   of a bio, to do the appropriate freeing of the bio once the reference
@@@ -507,8 -506,10 +506,8 @@@ int bio_get_nr_vecs(struct block_devic
        int nr_pages;
  
        nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
 -      if (nr_pages > queue_max_phys_segments(q))
 -              nr_pages = queue_max_phys_segments(q);
 -      if (nr_pages > queue_max_hw_segments(q))
 -              nr_pages = queue_max_hw_segments(q);
 +      if (nr_pages > queue_max_segments(q))
 +              nr_pages = queue_max_segments(q);
  
        return nr_pages;
  }
@@@ -573,7 -574,8 +572,7 @@@ static int __bio_add_page(struct reques
         * make this too complex.
         */
  
 -      while (bio->bi_phys_segments >= queue_max_phys_segments(q)
 -             || bio->bi_phys_segments >= queue_max_hw_segments(q)) {
 +      while (bio->bi_phys_segments >= queue_max_segments(q)) {
  
                if (retried_segments)
                        return 0;
diff --combined fs/cifs/cifssmb.c
index 9d17df3e076860924d072605e13f4a97cbb438cf,0e22440d2f0f78ce6068b97aa08733052e2b87d6..6118358998440791183ce19bd6ead2e7cf0c1d1b
@@@ -170,19 -170,19 +170,19 @@@ cifs_reconnect_tcon(struct cifsTconInf
         * need to prevent multiple threads trying to simultaneously
         * reconnect the same SMB session
         */
 -      down(&ses->sesSem);
 +      mutex_lock(&ses->session_mutex);
        if (ses->need_reconnect)
                rc = cifs_setup_session(0, ses, nls_codepage);
  
        /* do we need to reconnect tcon? */
        if (rc || !tcon->need_reconnect) {
 -              up(&ses->sesSem);
 +              mutex_unlock(&ses->session_mutex);
                goto out;
        }
  
        mark_open_files_invalid(tcon);
        rc = CIFSTCon(0, ses, tcon->treeName, tcon, nls_codepage);
 -      up(&ses->sesSem);
 +      mutex_unlock(&ses->session_mutex);
        cFYI(1, ("reconnect tcon rc = %d", rc));
  
        if (rc)
@@@ -700,13 -700,13 +700,13 @@@ CIFSSMBLogoff(const int xid, struct cif
        if (!ses || !ses->server)
                return -EIO;
  
 -      down(&ses->sesSem);
 +      mutex_lock(&ses->session_mutex);
        if (ses->need_reconnect)
                goto session_already_dead; /* no need to send SMBlogoff if uid
                                              already closed due to reconnect */
        rc = small_smb_init(SMB_COM_LOGOFF_ANDX, 2, NULL, (void **)&pSMB);
        if (rc) {
 -              up(&ses->sesSem);
 +              mutex_unlock(&ses->session_mutex);
                return rc;
        }
  
        pSMB->AndXCommand = 0xFF;
        rc = SendReceiveNoRsp(xid, ses, (struct smb_hdr *) pSMB, 0);
  session_already_dead:
 -      up(&ses->sesSem);
 +      mutex_unlock(&ses->session_mutex);
  
        /* if session dead then we do not need to do ulogoff,
                since server closed smb session, no sense reporting
@@@ -3886,7 -3886,7 +3886,7 @@@ parse_DFS_referrals(TRANSACTION2_GET_DF
                goto parse_DFS_referrals_exit;
        }
  
-       /* collect neccessary data from referrals */
+       /* collect necessary data from referrals */
        for (i = 0; i < *num_of_nodes; i++) {
                char *temp;
                int max_len;
@@@ -5269,34 -5269,22 +5269,34 @@@ int CIFSSMBNotify(const int xid, struc
        cifs_buf_release(pSMB);
        return rc;
  }
 +
  #ifdef CONFIG_CIFS_XATTR
 +/*
 + * Do a path-based QUERY_ALL_EAS call and parse the result. This is a common
 + * function used by listxattr and getxattr type calls. When ea_name is set,
 + * it looks for that attribute name and stuffs that value into the EAData
 + * buffer. When ea_name is NULL, it stuffs a list of attribute names into the
 + * buffer. In both cases, the return value is either the length of the
 + * resulting data or a negative error code. If EAData is a NULL pointer then
 + * the data isn't copied to it, but the length is returned.
 + */
  ssize_t
  CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
 -               const unsigned char *searchName,
 -               char *EAData, size_t buf_size,
 -               const struct nls_table *nls_codepage, int remap)
 +              const unsigned char *searchName, const unsigned char *ea_name,
 +              char *EAData, size_t buf_size,
 +              const struct nls_table *nls_codepage, int remap)
  {
                /* BB assumes one setup word */
        TRANSACTION2_QPI_REQ *pSMB = NULL;
        TRANSACTION2_QPI_RSP *pSMBr = NULL;
        int rc = 0;
        int bytes_returned;
 -      int name_len;
 +      int list_len;
 +      struct fealist *ea_response_data;
        struct fea *temp_fea;
        char *temp_ptr;
 -      __u16 params, byte_count;
 +      char *end_of_smb;
 +      __u16 params, byte_count, data_offset;
  
        cFYI(1, ("In Query All EAs path %s", searchName));
  QAllEAsRetry:
                return rc;
  
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 -              name_len =
 +              list_len =
                    cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
                                     PATH_MAX, nls_codepage, remap);
 -              name_len++;     /* trailing null */
 -              name_len *= 2;
 +              list_len++;     /* trailing null */
 +              list_len *= 2;
        } else {        /* BB improve the check for buffer overruns BB */
 -              name_len = strnlen(searchName, PATH_MAX);
 -              name_len++;     /* trailing null */
 -              strncpy(pSMB->FileName, searchName, name_len);
 +              list_len = strnlen(searchName, PATH_MAX);
 +              list_len++;     /* trailing null */
 +              strncpy(pSMB->FileName, searchName, list_len);
        }
  
 -      params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
 +      params = 2 /* level */ + 4 /* reserved */ + list_len /* includes NUL */;
        pSMB->TotalDataCount = 0;
        pSMB->MaxParameterCount = cpu_to_le16(2);
        /* BB find exact max SMB PDU from sess structure BB */
 -      pSMB->MaxDataCount = cpu_to_le16(4000);
 +      pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize);
        pSMB->MaxSetupCount = 0;
        pSMB->Reserved = 0;
        pSMB->Flags = 0;
                         (struct smb_hdr *) pSMBr, &bytes_returned, 0);
        if (rc) {
                cFYI(1, ("Send error in QueryAllEAs = %d", rc));
 -      } else {                /* decode response */
 -              rc = validate_t2((struct smb_t2_rsp *)pSMBr);
 +              goto QAllEAsOut;
 +      }
  
 -              /* BB also check enough total bytes returned */
 -              /* BB we need to improve the validity checking
 -              of these trans2 responses */
 -              if (rc || (pSMBr->ByteCount < 4))
 -                      rc = -EIO;      /* bad smb */
 -         /* else if (pFindData){
 -                      memcpy((char *) pFindData,
 -                             (char *) &pSMBr->hdr.Protocol +
 -                             data_offset, kl);
 -              }*/ else {
 -                      /* check that length of list is not more than bcc */
 -                      /* check that each entry does not go beyond length
 -                         of list */
 -                      /* check that each element of each entry does not
 -                         go beyond end of list */
 -                      __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
 -                      struct fealist *ea_response_data;
 -                      rc = 0;
 -                      /* validate_trans2_offsets() */
 -                      /* BB check if start of smb + data_offset > &bcc+ bcc */
 -                      ea_response_data = (struct fealist *)
 -                              (((char *) &pSMBr->hdr.Protocol) +
 -                              data_offset);
 -                      name_len = le32_to_cpu(ea_response_data->list_len);
 -                      cFYI(1, ("ea length %d", name_len));
 -                      if (name_len <= 8) {
 -                      /* returned EA size zeroed at top of function */
 -                              cFYI(1, ("empty EA list returned from server"));
 -                      } else {
 -                              /* account for ea list len */
 -                              name_len -= 4;
 -                              temp_fea = ea_response_data->list;
 -                              temp_ptr = (char *)temp_fea;
 -                              while (name_len > 0) {
 -                                      __u16 value_len;
 -                                      name_len -= 4;
 -                                      temp_ptr += 4;
 -                                      rc += temp_fea->name_len;
 -                              /* account for prefix user. and trailing null */
 -                                      rc = rc + 5 + 1;
 -                                      if (rc < (int)buf_size) {
 -                                              memcpy(EAData, "user.", 5);
 -                                              EAData += 5;
 -                                              memcpy(EAData, temp_ptr,
 -                                                     temp_fea->name_len);
 -                                              EAData += temp_fea->name_len;
 -                                              /* null terminate name */
 -                                              *EAData = 0;
 -                                              EAData = EAData + 1;
 -                                      } else if (buf_size == 0) {
 -                                              /* skip copy - calc size only */
 -                                      } else {
 -                                              /* stop before overrun buffer */
 -                                              rc = -ERANGE;
 -                                              break;
 -                                      }
 -                                      name_len -= temp_fea->name_len;
 -                                      temp_ptr += temp_fea->name_len;
 -                                      /* account for trailing null */
 -                                      name_len--;
 -                                      temp_ptr++;
 -                                      value_len =
 -                                            le16_to_cpu(temp_fea->value_len);
 -                                      name_len -= value_len;
 -                                      temp_ptr += value_len;
 -                                      /* BB check that temp_ptr is still
 -                                            within the SMB BB*/
 -
 -                                      /* no trailing null to account for
 -                                         in value len */
 -                                      /* go on to next EA */
 -                                      temp_fea = (struct fea *)temp_ptr;
 -                              }
 -                      }
 -              }
 +
 +      /* BB also check enough total bytes returned */
 +      /* BB we need to improve the validity checking
 +      of these trans2 responses */
 +
 +      rc = validate_t2((struct smb_t2_rsp *)pSMBr);
 +      if (rc || (pSMBr->ByteCount < 4)) {
 +              rc = -EIO;      /* bad smb */
 +              goto QAllEAsOut;
        }
 -      cifs_buf_release(pSMB);
 -      if (rc == -EAGAIN)
 -              goto QAllEAsRetry;
  
 -      return (ssize_t)rc;
 -}
 +      /* check that length of list is not more than bcc */
 +      /* check that each entry does not go beyond length
 +         of list */
 +      /* check that each element of each entry does not
 +         go beyond end of list */
 +      /* validate_trans2_offsets() */
 +      /* BB check if start of smb + data_offset > &bcc+ bcc */
  
 -ssize_t CIFSSMBQueryEA(const int xid, struct cifsTconInfo *tcon,
 -              const unsigned char *searchName, const unsigned char *ea_name,
 -              unsigned char *ea_value, size_t buf_size,
 -              const struct nls_table *nls_codepage, int remap)
 -{
 -      TRANSACTION2_QPI_REQ *pSMB = NULL;
 -      TRANSACTION2_QPI_RSP *pSMBr = NULL;
 -      int rc = 0;
 -      int bytes_returned;
 -      int name_len;
 -      struct fea *temp_fea;
 -      char *temp_ptr;
 -      __u16 params, byte_count;
 +      data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
 +      ea_response_data = (struct fealist *)
 +                              (((char *) &pSMBr->hdr.Protocol) + data_offset);
  
 -      cFYI(1, ("In Query EA path %s", searchName));
 -QEARetry:
 -      rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
 -                    (void **) &pSMBr);
 -      if (rc)
 -              return rc;
 +      list_len = le32_to_cpu(ea_response_data->list_len);
 +      cFYI(1, ("ea length %d", list_len));
 +      if (list_len <= 8) {
 +              cFYI(1, ("empty EA list returned from server"));
 +              goto QAllEAsOut;
 +      }
  
 -      if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
 -              name_len =
 -                  cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
 -                                   PATH_MAX, nls_codepage, remap);
 -              name_len++;     /* trailing null */
 -              name_len *= 2;
 -      } else {        /* BB improve the check for buffer overruns BB */
 -              name_len = strnlen(searchName, PATH_MAX);
 -              name_len++;     /* trailing null */
 -              strncpy(pSMB->FileName, searchName, name_len);
 +      /* make sure list_len doesn't go past end of SMB */
 +      end_of_smb = (char *)pByteArea(&pSMBr->hdr) + BCC(&pSMBr->hdr);
 +      if ((char *)ea_response_data + list_len > end_of_smb) {
 +              cFYI(1, ("EA list appears to go beyond SMB"));
 +              rc = -EIO;
 +              goto QAllEAsOut;
        }
  
 -      params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
 -      pSMB->TotalDataCount = 0;
 -      pSMB->MaxParameterCount = cpu_to_le16(2);
 -      /* BB find exact max SMB PDU from sess structure BB */
 -      pSMB->MaxDataCount = cpu_to_le16(4000);
 -      pSMB->MaxSetupCount = 0;
 -      pSMB->Reserved = 0;
 -      pSMB->Flags = 0;
 -      pSMB->Timeout = 0;
 -      pSMB->Reserved2 = 0;
 -      pSMB->ParameterOffset = cpu_to_le16(offsetof(
 -              struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
 -      pSMB->DataCount = 0;
 -      pSMB->DataOffset = 0;
 -      pSMB->SetupCount = 1;
 -      pSMB->Reserved3 = 0;
 -      pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
 -      byte_count = params + 1 /* pad */ ;
 -      pSMB->TotalParameterCount = cpu_to_le16(params);
 -      pSMB->ParameterCount = pSMB->TotalParameterCount;
 -      pSMB->InformationLevel = cpu_to_le16(SMB_INFO_QUERY_ALL_EAS);
 -      pSMB->Reserved4 = 0;
 -      pSMB->hdr.smb_buf_length += byte_count;
 -      pSMB->ByteCount = cpu_to_le16(byte_count);
 +      /* account for ea list len */
 +      list_len -= 4;
 +      temp_fea = ea_response_data->list;
 +      temp_ptr = (char *)temp_fea;
 +      while (list_len > 0) {
 +              unsigned int name_len;
 +              __u16 value_len;
 +
 +              list_len -= 4;
 +              temp_ptr += 4;
 +              /* make sure we can read name_len and value_len */
 +              if (list_len < 0) {
 +                      cFYI(1, ("EA entry goes beyond length of list"));
 +                      rc = -EIO;
 +                      goto QAllEAsOut;
 +              }
  
 -      rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
 -                       (struct smb_hdr *) pSMBr, &bytes_returned, 0);
 -      if (rc) {
 -              cFYI(1, ("Send error in Query EA = %d", rc));
 -      } else {                /* decode response */
 -              rc = validate_t2((struct smb_t2_rsp *)pSMBr);
 +              name_len = temp_fea->name_len;
 +              value_len = le16_to_cpu(temp_fea->value_len);
 +              list_len -= name_len + 1 + value_len;
 +              if (list_len < 0) {
 +                      cFYI(1, ("EA entry goes beyond length of list"));
 +                      rc = -EIO;
 +                      goto QAllEAsOut;
 +              }
  
 -              /* BB also check enough total bytes returned */
 -              /* BB we need to improve the validity checking
 -              of these trans2 responses */
 -              if (rc || (pSMBr->ByteCount < 4))
 -                      rc = -EIO;      /* bad smb */
 -         /* else if (pFindData){
 -                      memcpy((char *) pFindData,
 -                             (char *) &pSMBr->hdr.Protocol +
 -                             data_offset, kl);
 -              }*/ else {
 -                      /* check that length of list is not more than bcc */
 -                      /* check that each entry does not go beyond length
 -                         of list */
 -                      /* check that each element of each entry does not
 -                         go beyond end of list */
 -                      __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
 -                      struct fealist *ea_response_data;
 -                      rc = -ENODATA;
 -                      /* validate_trans2_offsets() */
 -                      /* BB check if start of smb + data_offset > &bcc+ bcc*/
 -                      ea_response_data = (struct fealist *)
 -                              (((char *) &pSMBr->hdr.Protocol) +
 -                              data_offset);
 -                      name_len = le32_to_cpu(ea_response_data->list_len);
 -                      cFYI(1, ("ea length %d", name_len));
 -                      if (name_len <= 8) {
 -                      /* returned EA size zeroed at top of function */
 -                              cFYI(1, ("empty EA list returned from server"));
 -                      } else {
 -                              /* account for ea list len */
 -                              name_len -= 4;
 -                              temp_fea = ea_response_data->list;
 -                              temp_ptr = (char *)temp_fea;
 -                              /* loop through checking if we have a matching
 -                              name and then return the associated value */
 -                              while (name_len > 0) {
 -                                      __u16 value_len;
 -                                      name_len -= 4;
 -                                      temp_ptr += 4;
 -                                      value_len =
 -                                            le16_to_cpu(temp_fea->value_len);
 -                              /* BB validate that value_len falls within SMB,
 -                              even though maximum for name_len is 255 */
 -                                      if (memcmp(temp_fea->name, ea_name,
 -                                                temp_fea->name_len) == 0) {
 -                                              /* found a match */
 -                                              rc = value_len;
 -                              /* account for prefix user. and trailing null */
 -                                              if (rc <= (int)buf_size) {
 -                                                      memcpy(ea_value,
 -                                                              temp_fea->name+temp_fea->name_len+1,
 -                                                              rc);
 -                                                      /* ea values, unlike ea
 -                                                         names, are not null
 -                                                         terminated */
 -                                              } else if (buf_size == 0) {
 -                                              /* skip copy - calc size only */
 -                                              } else {
 -                                              /* stop before overrun buffer */
 -                                                      rc = -ERANGE;
 -                                              }
 -                                              break;
 -                                      }
 -                                      name_len -= temp_fea->name_len;
 -                                      temp_ptr += temp_fea->name_len;
 -                                      /* account for trailing null */
 -                                      name_len--;
 -                                      temp_ptr++;
 -                                      name_len -= value_len;
 -                                      temp_ptr += value_len;
 -                                      /* No trailing null to account for in
 -                                         value_len.  Go on to next EA */
 -                                      temp_fea = (struct fea *)temp_ptr;
 +              if (ea_name) {
 +                      if (strncmp(ea_name, temp_ptr, name_len) == 0) {
 +                              temp_ptr += name_len + 1;
 +                              rc = value_len;
 +                              if (buf_size == 0)
 +                                      goto QAllEAsOut;
 +                              if ((size_t)value_len > buf_size) {
 +                                      rc = -ERANGE;
 +                                      goto QAllEAsOut;
                                }
 +                              memcpy(EAData, temp_ptr, value_len);
 +                              goto QAllEAsOut;
 +                      }
 +              } else {
 +                      /* account for prefix user. and trailing null */
 +                      rc += (5 + 1 + name_len);
 +                      if (rc < (int) buf_size) {
 +                              memcpy(EAData, "user.", 5);
 +                              EAData += 5;
 +                              memcpy(EAData, temp_ptr, name_len);
 +                              EAData += name_len;
 +                              /* null terminate name */
 +                              *EAData = 0;
 +                              ++EAData;
 +                      } else if (buf_size == 0) {
 +                              /* skip copy - calc size only */
 +                      } else {
 +                              /* stop before overrun buffer */
 +                              rc = -ERANGE;
 +                              break;
                        }
                }
 +              temp_ptr += name_len + 1 + value_len;
 +              temp_fea = (struct fea *)temp_ptr;
        }
 +
 +      /* didn't find the named attribute */
 +      if (ea_name)
 +              rc = -ENODATA;
 +
 +QAllEAsOut:
        cifs_buf_release(pSMB);
        if (rc == -EAGAIN)
 -              goto QEARetry;
 +              goto QAllEAsRetry;
  
        return (ssize_t)rc;
  }
diff --combined fs/ext3/super.c
index e844accbf55d85aab676feda3a8af7a945f7dfa9,7761c680f9a1234c62049937c486a393910600b6..1bee604cc6cd4996c3dd36480c054bd07ee4dfbe
@@@ -164,7 -164,7 +164,7 @@@ void ext3_msg(struct super_block *sb, c
   * write out the superblock safely.
   *
   * We'll just use the journal_abort() error code to record an error in
-  * the journal instead.  On recovery, the journal will compain about
+  * the journal instead.  On recovery, the journal will complain about
   * that error until we've noted it down and cleared it.
   */
  
@@@ -181,7 -181,7 +181,7 @@@ static void ext3_handle_error(struct su
        if (!test_opt (sb, ERRORS_CONT)) {
                journal_t *journal = EXT3_SB(sb)->s_journal;
  
 -              EXT3_SB(sb)->s_mount_opt |= EXT3_MOUNT_ABORT;
 +              set_opt(EXT3_SB(sb)->s_mount_opt, ABORT);
                if (journal)
                        journal_abort(journal, -EIO);
        }
@@@ -296,7 -296,7 +296,7 @@@ void ext3_abort (struct super_block * s
                "error: remounting filesystem read-only");
        EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS;
        sb->s_flags |= MS_RDONLY;
 -      EXT3_SB(sb)->s_mount_opt |= EXT3_MOUNT_ABORT;
 +      set_opt(EXT3_SB(sb)->s_mount_opt, ABORT);
        if (EXT3_SB(sb)->s_journal)
                journal_abort(EXT3_SB(sb)->s_journal, -EIO);
  }
@@@ -528,8 -528,6 +528,8 @@@ static void destroy_inodecache(void
  static void ext3_clear_inode(struct inode *inode)
  {
        struct ext3_block_alloc_info *rsv = EXT3_I(inode)->i_block_alloc_info;
 +
 +      dquot_drop(inode);
        ext3_discard_reservation(inode);
        EXT3_I(inode)->i_block_alloc_info = NULL;
        if (unlikely(rsv))
@@@ -564,10 -562,10 +564,10 @@@ static inline void ext3_show_quota_opti
        if (sbi->s_qf_names[GRPQUOTA])
                seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]);
  
 -      if (sbi->s_mount_opt & EXT3_MOUNT_USRQUOTA)
 +      if (test_opt(sb, USRQUOTA))
                seq_puts(seq, ",usrquota");
  
 -      if (sbi->s_mount_opt & EXT3_MOUNT_GRPQUOTA)
 +      if (test_opt(sb, GRPQUOTA))
                seq_puts(seq, ",grpquota");
  #endif
  }
@@@ -658,7 -656,8 +658,7 @@@ static int ext3_show_options(struct seq
        if (test_opt(sb, NOBH))
                seq_puts(seq, ",nobh");
  
 -      seq_printf(seq, ",data=%s", data_mode_string(sbi->s_mount_opt &
 -                                                   EXT3_MOUNT_DATA_FLAGS));
 +      seq_printf(seq, ",data=%s", data_mode_string(test_opt(sb, DATA_FLAGS)));
        if (test_opt(sb, DATA_ERR_ABORT))
                seq_puts(seq, ",data_err=abort");
  
@@@ -752,6 -751,13 +752,6 @@@ static ssize_t ext3_quota_write(struct 
                                const char *data, size_t len, loff_t off);
  
  static const struct dquot_operations ext3_quota_operations = {
 -      .initialize     = dquot_initialize,
 -      .drop           = dquot_drop,
 -      .alloc_space    = dquot_alloc_space,
 -      .alloc_inode    = dquot_alloc_inode,
 -      .free_space     = dquot_free_space,
 -      .free_inode     = dquot_free_inode,
 -      .transfer       = dquot_transfer,
        .write_dquot    = ext3_write_dquot,
        .acquire_dquot  = ext3_acquire_dquot,
        .release_dquot  = ext3_release_dquot,
@@@ -890,63 -896,6 +890,63 @@@ static ext3_fsblk_t get_sb_block(void *
        return sb_block;
  }
  
 +#ifdef CONFIG_QUOTA
 +static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
 +{
 +      struct ext3_sb_info *sbi = EXT3_SB(sb);
 +      char *qname;
 +
 +      if (sb_any_quota_loaded(sb) &&
 +              !sbi->s_qf_names[qtype]) {
 +              ext3_msg(sb, KERN_ERR,
 +                      "Cannot change journaled "
 +                      "quota options when quota turned on");
 +              return 0;
 +      }
 +      qname = match_strdup(args);
 +      if (!qname) {
 +              ext3_msg(sb, KERN_ERR,
 +                      "Not enough memory for storing quotafile name");
 +              return 0;
 +      }
 +      if (sbi->s_qf_names[qtype] &&
 +              strcmp(sbi->s_qf_names[qtype], qname)) {
 +              ext3_msg(sb, KERN_ERR,
 +                      "%s quota file already specified", QTYPE2NAME(qtype));
 +              kfree(qname);
 +              return 0;
 +      }
 +      sbi->s_qf_names[qtype] = qname;
 +      if (strchr(sbi->s_qf_names[qtype], '/')) {
 +              ext3_msg(sb, KERN_ERR,
 +                      "quotafile must be on filesystem root");
 +              kfree(sbi->s_qf_names[qtype]);
 +              sbi->s_qf_names[qtype] = NULL;
 +              return 0;
 +      }
 +      set_opt(sbi->s_mount_opt, QUOTA);
 +      return 1;
 +}
 +
 +static int clear_qf_name(struct super_block *sb, int qtype) {
 +
 +      struct ext3_sb_info *sbi = EXT3_SB(sb);
 +
 +      if (sb_any_quota_loaded(sb) &&
 +              sbi->s_qf_names[qtype]) {
 +              ext3_msg(sb, KERN_ERR, "Cannot change journaled quota options"
 +                      " when quota turned on");
 +              return 0;
 +      }
 +      /*
 +       * The space will be released later when all options are confirmed
 +       * to be correct
 +       */
 +      sbi->s_qf_names[qtype] = NULL;
 +      return 1;
 +}
 +#endif
 +
  static int parse_options (char *options, struct super_block *sb,
                          unsigned int *inum, unsigned long *journal_devnum,
                          ext3_fsblk_t *n_blocks_count, int is_remount)
        int data_opt = 0;
        int option;
  #ifdef CONFIG_QUOTA
 -      int qtype, qfmt;
 -      char *qname;
 +      int qfmt;
  #endif
  
        if (!options)
                        data_opt = EXT3_MOUNT_WRITEBACK_DATA;
                datacheck:
                        if (is_remount) {
 -                              if ((sbi->s_mount_opt & EXT3_MOUNT_DATA_FLAGS)
 -                                              == data_opt)
 +                              if (test_opt(sb, DATA_FLAGS) == data_opt)
                                        break;
                                ext3_msg(sb, KERN_ERR,
                                        "error: cannot change "
                                        "data mode on remount. The filesystem "
                                        "is mounted in data=%s mode and you "
                                        "try to remount it in data=%s mode.",
 -                                      data_mode_string(sbi->s_mount_opt &
 -                                                      EXT3_MOUNT_DATA_FLAGS),
 +                                      data_mode_string(test_opt(sb,
 +                                                      DATA_FLAGS)),
                                        data_mode_string(data_opt));
                                return 0;
                        } else {
 -                              sbi->s_mount_opt &= ~EXT3_MOUNT_DATA_FLAGS;
 +                              clear_opt(sbi->s_mount_opt, DATA_FLAGS);
                                sbi->s_mount_opt |= data_opt;
                        }
                        break;
                        break;
  #ifdef CONFIG_QUOTA
                case Opt_usrjquota:
 -                      qtype = USRQUOTA;
 -                      goto set_qf_name;
 -              case Opt_grpjquota:
 -                      qtype = GRPQUOTA;
 -set_qf_name:
 -                      if (sb_any_quota_loaded(sb) &&
 -                          !sbi->s_qf_names[qtype]) {
 -                              ext3_msg(sb, KERN_ERR,
 -                                      "error: cannot change journaled "
 -                                      "quota options when quota turned on.");
 -                              return 0;
 -                      }
 -                      qname = match_strdup(&args[0]);
 -                      if (!qname) {
 -                              ext3_msg(sb, KERN_ERR,
 -                                      "error: not enough memory for "
 -                                      "storing quotafile name.");
 +                      if (!set_qf_name(sb, USRQUOTA, &args[0]))
                                return 0;
 -                      }
 -                      if (sbi->s_qf_names[qtype] &&
 -                          strcmp(sbi->s_qf_names[qtype], qname)) {
 -                              ext3_msg(sb, KERN_ERR,
 -                                      "error: %s quota file already "
 -                                      "specified.", QTYPE2NAME(qtype));
 -                              kfree(qname);
 -                              return 0;
 -                      }
 -                      sbi->s_qf_names[qtype] = qname;
 -                      if (strchr(sbi->s_qf_names[qtype], '/')) {
 -                              ext3_msg(sb, KERN_ERR,
 -                                      "error: quotafile must be on "
 -                                      "filesystem root.");
 -                              kfree(sbi->s_qf_names[qtype]);
 -                              sbi->s_qf_names[qtype] = NULL;
 +                      break;
 +              case Opt_grpjquota:
 +                      if (!set_qf_name(sb, GRPQUOTA, &args[0]))
                                return 0;
 -                      }
 -                      set_opt(sbi->s_mount_opt, QUOTA);
                        break;
                case Opt_offusrjquota:
 -                      qtype = USRQUOTA;
 -                      goto clear_qf_name;
 +                      if (!clear_qf_name(sb, USRQUOTA))
 +                              return 0;
 +                      break;
                case Opt_offgrpjquota:
 -                      qtype = GRPQUOTA;
 -clear_qf_name:
 -                      if (sb_any_quota_loaded(sb) &&
 -                          sbi->s_qf_names[qtype]) {
 -                              ext3_msg(sb, KERN_ERR, "error: cannot change "
 -                                      "journaled quota options when "
 -                                      "quota turned on.");
 +                      if (!clear_qf_name(sb, GRPQUOTA))
                                return 0;
 -                      }
 -                      /*
 -                       * The space will be released later when all options
 -                       * are confirmed to be correct
 -                       */
 -                      sbi->s_qf_names[qtype] = NULL;
                        break;
                case Opt_jqfmt_vfsold:
                        qfmt = QFMT_VFS_OLD;
@@@ -1251,12 -1244,18 +1251,12 @@@ set_qf_format
        }
  #ifdef CONFIG_QUOTA
        if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
 -              if ((sbi->s_mount_opt & EXT3_MOUNT_USRQUOTA) &&
 -                   sbi->s_qf_names[USRQUOTA])
 +              if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
                        clear_opt(sbi->s_mount_opt, USRQUOTA);
 -
 -              if ((sbi->s_mount_opt & EXT3_MOUNT_GRPQUOTA) &&
 -                   sbi->s_qf_names[GRPQUOTA])
 +              if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
                        clear_opt(sbi->s_mount_opt, GRPQUOTA);
  
 -              if ((sbi->s_qf_names[USRQUOTA] &&
 -                              (sbi->s_mount_opt & EXT3_MOUNT_GRPQUOTA)) ||
 -                  (sbi->s_qf_names[GRPQUOTA] &&
 -                              (sbi->s_mount_opt & EXT3_MOUNT_USRQUOTA))) {
 +              if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
                        ext3_msg(sb, KERN_ERR, "error: old and new quota "
                                        "format mixing.");
                        return 0;
@@@ -1479,7 -1478,7 +1479,7 @@@ static void ext3_orphan_cleanup (struc
                }
  
                list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan);
 -              vfs_dq_init(inode);
 +              dquot_initialize(inode);
                if (inode->i_nlink) {
                        printk(KERN_DEBUG
                                "%s: truncating inode %lu to %Ld bytes\n",
@@@ -1672,11 -1671,11 +1672,11 @@@ static int ext3_fill_super (struct supe
                set_opt(sbi->s_mount_opt, POSIX_ACL);
  #endif
        if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_DATA)
 -              sbi->s_mount_opt |= EXT3_MOUNT_JOURNAL_DATA;
 +              set_opt(sbi->s_mount_opt, JOURNAL_DATA);
        else if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_ORDERED)
 -              sbi->s_mount_opt |= EXT3_MOUNT_ORDERED_DATA;
 +              set_opt(sbi->s_mount_opt, ORDERED_DATA);
        else if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_WBACK)
 -              sbi->s_mount_opt |= EXT3_MOUNT_WRITEBACK_DATA;
 +              set_opt(sbi->s_mount_opt, WRITEBACK_DATA);
  
        if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_PANIC)
                set_opt(sbi->s_mount_opt, ERRORS_PANIC);
                goto failed_mount;
  
        sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
 -              ((sbi->s_mount_opt & EXT3_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
 +              (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
  
        if (le32_to_cpu(es->s_rev_level) == EXT3_GOOD_OLD_REV &&
            (EXT3_HAS_COMPAT_FEATURE(sb, ~0U) ||
@@@ -2562,11 -2561,11 +2562,11 @@@ static int ext3_remount (struct super_b
                goto restore_opts;
        }
  
 -      if (sbi->s_mount_opt & EXT3_MOUNT_ABORT)
 +      if (test_opt(sb, ABORT))
                ext3_abort(sb, __func__, "Abort forced by user");
  
        sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
 -              ((sbi->s_mount_opt & EXT3_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
 +              (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
  
        es = sbi->s_es;
  
  
        if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY) ||
                n_blocks_count > le32_to_cpu(es->s_blocks_count)) {
 -              if (sbi->s_mount_opt & EXT3_MOUNT_ABORT) {
 +              if (test_opt(sb, ABORT)) {
                        err = -EROFS;
                        goto restore_opts;
                }
@@@ -2735,7 -2734,7 +2735,7 @@@ static int ext3_statfs (struct dentry 
   * Process 1                         Process 2
   * ext3_create()                     quota_sync()
   *   journal_start()                   write_dquot()
 - *   vfs_dq_init()                       down(dqio_mutex)
 + *   dquot_initialize()                       down(dqio_mutex)
   *     down(dqio_mutex)                    journal_start()
   *
   */
@@@ -2943,7 -2942,9 +2943,7 @@@ static ssize_t ext3_quota_write(struct 
        sector_t blk = off >> EXT3_BLOCK_SIZE_BITS(sb);
        int err = 0;
        int offset = off & (sb->s_blocksize - 1);
 -      int tocopy;
        int journal_quota = EXT3_SB(sb)->s_qf_names[type] != NULL;
 -      size_t towrite = len;
        struct buffer_head *bh;
        handle_t *handle = journal_current_handle();
  
                        (unsigned long long)off, (unsigned long long)len);
                return -EIO;
        }
 +
 +      /*
 +       * Since we account only one data block in transaction credits,
 +       * then it is impossible to cross a block boundary.
 +       */
 +      if (sb->s_blocksize - offset < len) {
 +              ext3_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
 +                      " cancelled because not block aligned",
 +                      (unsigned long long)off, (unsigned long long)len);
 +              return -EIO;
 +      }
        mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
 -      while (towrite > 0) {
 -              tocopy = sb->s_blocksize - offset < towrite ?
 -                              sb->s_blocksize - offset : towrite;
 -              bh = ext3_bread(handle, inode, blk, 1, &err);
 -              if (!bh)
 +      bh = ext3_bread(handle, inode, blk, 1, &err);
 +      if (!bh)
 +              goto out;
 +      if (journal_quota) {
 +              err = ext3_journal_get_write_access(handle, bh);
 +              if (err) {
 +                      brelse(bh);
                        goto out;
 -              if (journal_quota) {
 -                      err = ext3_journal_get_write_access(handle, bh);
 -                      if (err) {
 -                              brelse(bh);
 -                              goto out;
 -                      }
 -              }
 -              lock_buffer(bh);
 -              memcpy(bh->b_data+offset, data, tocopy);
 -              flush_dcache_page(bh->b_page);
 -              unlock_buffer(bh);
 -              if (journal_quota)
 -                      err = ext3_journal_dirty_metadata(handle, bh);
 -              else {
 -                      /* Always do at least ordered writes for quotas */
 -                      err = ext3_journal_dirty_data(handle, bh);
 -                      mark_buffer_dirty(bh);
                }
 -              brelse(bh);
 -              if (err)
 -                      goto out;
 -              offset = 0;
 -              towrite -= tocopy;
 -              data += tocopy;
 -              blk++;
        }
 +      lock_buffer(bh);
 +      memcpy(bh->b_data+offset, data, len);
 +      flush_dcache_page(bh->b_page);
 +      unlock_buffer(bh);
 +      if (journal_quota)
 +              err = ext3_journal_dirty_metadata(handle, bh);
 +      else {
 +              /* Always do at least ordered writes for quotas */
 +              err = ext3_journal_dirty_data(handle, bh);
 +              mark_buffer_dirty(bh);
 +      }
 +      brelse(bh);
  out:
 -      if (len == towrite) {
 +      if (err) {
                mutex_unlock(&inode->i_mutex);
                return err;
        }
 -      if (inode->i_size < off+len-towrite) {
 -              i_size_write(inode, off+len-towrite);
 +      if (inode->i_size < off + len) {
 +              i_size_write(inode, off + len);
                EXT3_I(inode)->i_disksize = inode->i_size;
        }
        inode->i_version++;
        inode->i_mtime = inode->i_ctime = CURRENT_TIME;
        ext3_mark_inode_dirty(handle, inode);
        mutex_unlock(&inode->i_mutex);
 -      return len - towrite;
 +      return len;
  }
  
  #endif
diff --combined fs/ext4/mballoc.c
index 506713a2ebd8c297f1d855cb5581c750082af95f,b794dd8141a0c01c1cf81f171a204b585631fe62..54df209d2eed5a4d840e2b4b1ee21adff3322eb2
@@@ -69,7 -69,7 +69,7 @@@
   *
   * pa_lstart -> the logical start block for this prealloc space
   * pa_pstart -> the physical start block for this prealloc space
-  * pa_len    -> lenght for this prealloc space
+  * pa_len    -> length for this prealloc space
   * pa_free   ->  free space available in this prealloc space
   *
   * The inode preallocation space is used looking at the _logical_ start
@@@ -441,9 -441,10 +441,9 @@@ static void mb_free_blocks_double(struc
        for (i = 0; i < count; i++) {
                if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
                        ext4_fsblk_t blocknr;
 -                      blocknr = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb);
 +
 +                      blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
                        blocknr += first + i;
 -                      blocknr +=
 -                          le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
                        ext4_grp_locked_error(sb, e4b->bd_group,
                                   __func__, "double-free of inode"
                                   " %lu's block %llu(bit %u in group %u)",
@@@ -1254,9 -1255,10 +1254,9 @@@ static void mb_free_blocks(struct inod
  
                if (!mb_test_bit(block, EXT4_MB_BITMAP(e4b))) {
                        ext4_fsblk_t blocknr;
 -                      blocknr = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb);
 +
 +                      blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
                        blocknr += block;
 -                      blocknr +=
 -                          le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
                        ext4_grp_locked_error(sb, e4b->bd_group,
                                   __func__, "double-free of inode"
                                   " %lu's block %llu(bit %u in group %u)",
@@@ -1629,6 -1631,7 +1629,6 @@@ int ext4_mb_find_by_goal(struct ext4_al
        int max;
        int err;
        struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
 -      struct ext4_super_block *es = sbi->s_es;
        struct ext4_free_extent ex;
  
        if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
        if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
                ext4_fsblk_t start;
  
 -              start = (e4b->bd_group * EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) +
 -                      ex.fe_start + le32_to_cpu(es->s_first_data_block);
 +              start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
 +                      ex.fe_start;
                /* use do_div to get remainder (would be 64-bit modulo) */
                if (do_div(start, sbi->s_stripe) == 0) {
                        ac->ac_found++;
@@@ -1800,8 -1803,8 +1800,8 @@@ void ext4_mb_scan_aligned(struct ext4_a
        BUG_ON(sbi->s_stripe == 0);
  
        /* find first stripe-aligned block in group */
 -      first_group_block = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb)
 -              + le32_to_cpu(sbi->s_es->s_first_data_block);
 +      first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
 +
        a = first_group_block + sbi->s_stripe - 1;
        do_div(a, sbi->s_stripe);
        i = (a * sbi->s_stripe) - first_group_block;
@@@ -2253,7 -2256,7 +2253,7 @@@ int ext4_mb_add_groupinfo(struct super_
  
        INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
        init_rwsem(&meta_group_info[i]->alloc_sem);
 -      meta_group_info[i]->bb_free_root.rb_node = NULL;
 +      meta_group_info[i]->bb_free_root = RB_ROOT;
  
  #ifdef DOUBLE_CHECK
        {
@@@ -2557,9 -2560,12 +2557,9 @@@ static void release_blocks_on_commit(jo
                ext4_unlock_group(sb, entry->group);
                if (test_opt(sb, DISCARD)) {
                        ext4_fsblk_t discard_block;
 -                      struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  
 -                      discard_block = (ext4_fsblk_t)entry->group *
 -                                              EXT4_BLOCKS_PER_GROUP(sb)
 -                                      + entry->start_blk
 -                                      + le32_to_cpu(es->s_first_data_block);
 +                      discard_block = entry->start_blk +
 +                              ext4_group_first_block_no(sb, entry->group);
                        trace_ext4_discard_blocks(sb,
                                        (unsigned long long)discard_block,
                                        entry->count);
@@@ -2697,11 -2703,14 +2697,11 @@@ ext4_mb_mark_diskspace_used(struct ext4
        if (err)
                goto out_err;
  
 -      block = ac->ac_b_ex.fe_group * EXT4_BLOCKS_PER_GROUP(sb)
 -              + ac->ac_b_ex.fe_start
 -              + le32_to_cpu(es->s_first_data_block);
 +      block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
  
        len = ac->ac_b_ex.fe_len;
        if (!ext4_data_block_valid(sbi, block, len)) {
 -              ext4_error(sb, __func__,
 -                         "Allocating blocks %llu-%llu which overlap "
 +              ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
                           "fs metadata\n", block, block+len);
                /* File system mounted not to panic on error
                 * Fix the bitmap and repeat the block allocation
@@@ -3152,7 -3161,9 +3152,7 @@@ ext4_mb_use_preallocated(struct ext4_al
                /* The max size of hash table is PREALLOC_TB_SIZE */
                order = PREALLOC_TB_SIZE - 1;
  
 -      goal_block = ac->ac_g_ex.fe_group * EXT4_BLOCKS_PER_GROUP(ac->ac_sb) +
 -                   ac->ac_g_ex.fe_start +
 -                   le32_to_cpu(EXT4_SB(ac->ac_sb)->s_es->s_first_data_block);
 +      goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
        /*
         * search for the prealloc space that is having
         * minimal distance from the goal block.
@@@ -3515,7 -3526,8 +3515,7 @@@ ext4_mb_release_inode_pa(struct ext4_bu
                if (bit >= end)
                        break;
                next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
 -              start = group * EXT4_BLOCKS_PER_GROUP(sb) + bit +
 -                              le32_to_cpu(sbi->s_es->s_first_data_block);
 +              start = ext4_group_first_block_no(sb, group) + bit;
                mb_debug(1, "    free preallocated %u/%u in group %u\n",
                                (unsigned) start, (unsigned) next - bit,
                                (unsigned) group);
@@@ -3611,13 -3623,15 +3611,13 @@@ ext4_mb_discard_group_preallocations(st
  
        bitmap_bh = ext4_read_block_bitmap(sb, group);
        if (bitmap_bh == NULL) {
 -              ext4_error(sb, __func__, "Error in reading block "
 -                              "bitmap for %u", group);
 +              ext4_error(sb, "Error reading block bitmap for %u", group);
                return 0;
        }
  
        err = ext4_mb_load_buddy(sb, group, &e4b);
        if (err) {
 -              ext4_error(sb, __func__, "Error in loading buddy "
 -                              "information for %u", group);
 +              ext4_error(sb, "Error loading buddy information for %u", group);
                put_bh(bitmap_bh);
                return 0;
        }
@@@ -3790,15 -3804,15 +3790,15 @@@ repeat
  
                err = ext4_mb_load_buddy(sb, group, &e4b);
                if (err) {
 -                      ext4_error(sb, __func__, "Error in loading buddy "
 -                                      "information for %u", group);
 +                      ext4_error(sb, "Error loading buddy information for %u",
 +                                      group);
                        continue;
                }
  
                bitmap_bh = ext4_read_block_bitmap(sb, group);
                if (bitmap_bh == NULL) {
 -                      ext4_error(sb, __func__, "Error in reading block "
 -                                      "bitmap for %u", group);
 +                      ext4_error(sb, "Error reading block bitmap for %u",
 +                                      group);
                        ext4_mb_release_desc(&e4b);
                        continue;
                }
@@@ -3924,7 -3938,7 +3924,7 @@@ static void ext4_mb_group_or_file(struc
  
        /* don't use group allocation for large files */
        size = max(size, isize);
 -      if (size >= sbi->s_mb_stream_request) {
 +      if (size > sbi->s_mb_stream_request) {
                ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
                return;
        }
@@@ -4063,8 -4077,8 +4063,8 @@@ ext4_mb_discard_lg_preallocations(struc
  
                ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
                if (ext4_mb_load_buddy(sb, group, &e4b)) {
 -                      ext4_error(sb, __func__, "Error in loading buddy "
 -                                      "information for %u", group);
 +                      ext4_error(sb, "Error loading buddy information for %u",
 +                                      group);
                        continue;
                }
                ext4_lock_group(sb, group);
@@@ -4240,7 -4254,7 +4240,7 @@@ ext4_fsblk_t ext4_mb_new_blocks(handle_
                        return 0;
                }
                reserv_blks = ar->len;
 -              while (ar->len && vfs_dq_alloc_block(ar->inode, ar->len)) {
 +              while (ar->len && dquot_alloc_block(ar->inode, ar->len)) {
                        ar->flags |= EXT4_MB_HINT_NOPREALLOC;
                        ar->len--;
                }
@@@ -4317,7 -4331,7 +4317,7 @@@ out2
        kmem_cache_free(ext4_ac_cachep, ac);
  out1:
        if (inquota && ar->len < inquota)
 -              vfs_dq_free_block(ar->inode, inquota - ar->len);
 +              dquot_free_block(ar->inode, inquota - ar->len);
  out3:
        if (!ar->len) {
                if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag)
@@@ -4462,10 -4476,10 +4462,10 @@@ void ext4_free_blocks(handle_t *handle
  
        sbi = EXT4_SB(sb);
        es = EXT4_SB(sb)->s_es;
 -      if (!ext4_data_block_valid(sbi, block, count)) {
 -              ext4_error(sb, __func__,
 -                          "Freeing blocks not in datazone - "
 -                          "block = %llu, count = %lu", block, count);
 +      if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
 +          !ext4_data_block_valid(sbi, block, count)) {
 +              ext4_error(sb, "Freeing blocks not in datazone - "
 +                         "block = %llu, count = %lu", block, count);
                goto error_return;
        }
  
@@@ -4533,7 -4547,8 +4533,7 @@@ do_more
            in_range(block + count - 1, ext4_inode_table(sb, gdp),
                      EXT4_SB(sb)->s_itb_per_group)) {
  
 -              ext4_error(sb, __func__,
 -                         "Freeing blocks in system zone - "
 +              ext4_error(sb, "Freeing blocks in system zone - "
                           "Block = %llu, count = %lu", block, count);
                /* err = 0. ext4_std_error should be a no op */
                goto error_return;
        sb->s_dirt = 1;
  error_return:
        if (freed)
 -              vfs_dq_free_block(inode, freed);
 +              dquot_free_block(inode, freed);
        brelse(bitmap_bh);
        ext4_std_error(sb, err);
        if (ac)
diff --combined fs/ext4/super.c
index 2b83b96cb2eb8e6bb01adc90e1a3f23a012049cf,adcb713a5ac586b80eef8bc9eecf5e7382eb73d9..6ea6b67c70a232d6882c27423ed5b8cf12ada88a
@@@ -302,7 -302,7 +302,7 @@@ void ext4_journal_abort_handle(const ch
   * write out the superblock safely.
   *
   * We'll just use the jbd2_journal_abort() error code to record an error in
-  * the journal instead.  On recovery, the journal will compain about
+  * the journal instead.  On recovery, the journal will complain about
   * that error until we've noted it down and cleared it.
   */
  
@@@ -333,7 -333,7 +333,7 @@@ static void ext4_handle_error(struct su
                        sb->s_id);
  }
  
 -void ext4_error(struct super_block *sb, const char *function,
 +void __ext4_error(struct super_block *sb, const char *function,
                const char *fmt, ...)
  {
        va_list args;
        ext4_handle_error(sb);
  }
  
 +void ext4_error_inode(const char *function, struct inode *inode,
 +                    const char *fmt, ...)
 +{
 +      va_list args;
 +
 +      va_start(args, fmt);
 +      printk(KERN_CRIT "EXT4-fs error (device %s): %s: inode #%lu: (comm %s) ",
 +             inode->i_sb->s_id, function, inode->i_ino, current->comm);
 +      vprintk(fmt, args);
 +      printk("\n");
 +      va_end(args);
 +
 +      ext4_handle_error(inode->i_sb);
 +}
 +
 +void ext4_error_file(const char *function, struct file *file,
 +                   const char *fmt, ...)
 +{
 +      va_list args;
 +      struct inode *inode = file->f_dentry->d_inode;
 +      char pathname[80], *path;
 +
 +      va_start(args, fmt);
 +      path = d_path(&(file->f_path), pathname, sizeof(pathname));
 +      if (!path)
 +              path = "(unknown)";
 +      printk(KERN_CRIT
 +             "EXT4-fs error (device %s): %s: inode #%lu (comm %s path %s): ",
 +             inode->i_sb->s_id, function, inode->i_ino, current->comm, path);
 +      vprintk(fmt, args);
 +      printk("\n");
 +      va_end(args);
 +
 +      ext4_handle_error(inode->i_sb);
 +}
 +
  static const char *ext4_decode_error(struct super_block *sb, int errno,
                                     char nbuf[16])
  {
@@@ -486,7 -450,7 +486,7 @@@ void ext4_msg (struct super_block * sb
        va_end(args);
  }
  
 -void ext4_warning(struct super_block *sb, const char *function,
 +void __ext4_warning(struct super_block *sb, const char *function,
                  const char *fmt, ...)
  {
        va_list args;
@@@ -543,7 -507,7 +543,7 @@@ void ext4_update_dynamic_rev(struct sup
        if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
                return;
  
 -      ext4_warning(sb, __func__,
 +      ext4_warning(sb,
                     "updating to rev %d because of new feature flag, "
                     "running e2fsck is recommended",
                     EXT4_DYNAMIC_REV);
@@@ -744,8 -708,7 +744,8 @@@ static struct inode *ext4_alloc_inode(s
  #ifdef CONFIG_QUOTA
        ei->i_reserved_quota = 0;
  #endif
 -      INIT_LIST_HEAD(&ei->i_aio_dio_complete_list);
 +      INIT_LIST_HEAD(&ei->i_completed_io_list);
 +      spin_lock_init(&ei->i_completed_io_lock);
        ei->cur_aio_dio = NULL;
        ei->i_sync_tid = 0;
        ei->i_datasync_tid = 0;
@@@ -798,7 -761,6 +798,7 @@@ static void destroy_inodecache(void
  
  static void ext4_clear_inode(struct inode *inode)
  {
 +      dquot_drop(inode);
        ext4_discard_preallocations(inode);
        if (EXT4_JOURNAL(inode))
                jbd2_journal_release_jbd_inode(EXT4_SB(inode->i_sb)->s_journal,
@@@ -834,10 -796,10 +834,10 @@@ static inline void ext4_show_quota_opti
        if (sbi->s_qf_names[GRPQUOTA])
                seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]);
  
 -      if (sbi->s_mount_opt & EXT4_MOUNT_USRQUOTA)
 +      if (test_opt(sb, USRQUOTA))
                seq_puts(seq, ",usrquota");
  
 -      if (sbi->s_mount_opt & EXT4_MOUNT_GRPQUOTA)
 +      if (test_opt(sb, GRPQUOTA))
                seq_puts(seq, ",grpquota");
  #endif
  }
@@@ -964,9 -926,6 +964,9 @@@ static int ext4_show_options(struct seq
        if (test_opt(sb, NOLOAD))
                seq_puts(seq, ",norecovery");
  
 +      if (test_opt(sb, DIOREAD_NOLOCK))
 +              seq_puts(seq, ",dioread_nolock");
 +
        ext4_show_quota_options(seq, sb);
  
        return 0;
@@@ -1053,9 -1012,19 +1053,9 @@@ static ssize_t ext4_quota_write(struct 
                                const char *data, size_t len, loff_t off);
  
  static const struct dquot_operations ext4_quota_operations = {
 -      .initialize     = dquot_initialize,
 -      .drop           = dquot_drop,
 -      .alloc_space    = dquot_alloc_space,
 -      .reserve_space  = dquot_reserve_space,
 -      .claim_space    = dquot_claim_space,
 -      .release_rsv    = dquot_release_reserved_space,
  #ifdef CONFIG_QUOTA
        .get_reserved_space = ext4_get_reserved_space,
  #endif
 -      .alloc_inode    = dquot_alloc_inode,
 -      .free_space     = dquot_free_space,
 -      .free_inode     = dquot_free_inode,
 -      .transfer       = dquot_transfer,
        .write_dquot    = ext4_write_dquot,
        .acquire_dquot  = ext4_acquire_dquot,
        .release_dquot  = ext4_release_dquot,
@@@ -1140,7 -1109,6 +1140,7 @@@ enum 
        Opt_stripe, Opt_delalloc, Opt_nodelalloc,
        Opt_block_validity, Opt_noblock_validity,
        Opt_inode_readahead_blks, Opt_journal_ioprio,
 +      Opt_dioread_nolock, Opt_dioread_lock,
        Opt_discard, Opt_nodiscard,
  };
  
@@@ -1208,8 -1176,6 +1208,8 @@@ static const match_table_t tokens = 
        {Opt_auto_da_alloc, "auto_da_alloc=%u"},
        {Opt_auto_da_alloc, "auto_da_alloc"},
        {Opt_noauto_da_alloc, "noauto_da_alloc"},
 +      {Opt_dioread_nolock, "dioread_nolock"},
 +      {Opt_dioread_lock, "dioread_lock"},
        {Opt_discard, "discard"},
        {Opt_nodiscard, "nodiscard"},
        {Opt_err, NULL},
@@@ -1239,66 -1205,6 +1239,66 @@@ static ext4_fsblk_t get_sb_block(void *
  }
  
  #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
 +static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
 +      "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
 +
 +#ifdef CONFIG_QUOTA
 +static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
 +{
 +      struct ext4_sb_info *sbi = EXT4_SB(sb);
 +      char *qname;
 +
 +      if (sb_any_quota_loaded(sb) &&
 +              !sbi->s_qf_names[qtype]) {
 +              ext4_msg(sb, KERN_ERR,
 +                      "Cannot change journaled "
 +                      "quota options when quota turned on");
 +              return 0;
 +      }
 +      qname = match_strdup(args);
 +      if (!qname) {
 +              ext4_msg(sb, KERN_ERR,
 +                      "Not enough memory for storing quotafile name");
 +              return 0;
 +      }
 +      if (sbi->s_qf_names[qtype] &&
 +              strcmp(sbi->s_qf_names[qtype], qname)) {
 +              ext4_msg(sb, KERN_ERR,
 +                      "%s quota file already specified", QTYPE2NAME(qtype));
 +              kfree(qname);
 +              return 0;
 +      }
 +      sbi->s_qf_names[qtype] = qname;
 +      if (strchr(sbi->s_qf_names[qtype], '/')) {
 +              ext4_msg(sb, KERN_ERR,
 +                      "quotafile must be on filesystem root");
 +              kfree(sbi->s_qf_names[qtype]);
 +              sbi->s_qf_names[qtype] = NULL;
 +              return 0;
 +      }
 +      set_opt(sbi->s_mount_opt, QUOTA);
 +      return 1;
 +}
 +
 +static int clear_qf_name(struct super_block *sb, int qtype)
 +{
 +
 +      struct ext4_sb_info *sbi = EXT4_SB(sb);
 +
 +      if (sb_any_quota_loaded(sb) &&
 +              sbi->s_qf_names[qtype]) {
 +              ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
 +                      " when quota turned on");
 +              return 0;
 +      }
 +      /*
 +       * The space will be released later when all options are confirmed
 +       * to be correct
 +       */
 +      sbi->s_qf_names[qtype] = NULL;
 +      return 1;
 +}
 +#endif
  
  static int parse_options(char *options, struct super_block *sb,
                         unsigned long *journal_devnum,
        int data_opt = 0;
        int option;
  #ifdef CONFIG_QUOTA
 -      int qtype, qfmt;
 -      char *qname;
 +      int qfmt;
  #endif
  
        if (!options)
                if (!*p)
                        continue;
  
 +              /*
 +               * Initialize args struct so we know whether arg was
 +               * found; some options take optional arguments.
 +               */
 +              args[0].to = args[0].from = 0;
                token = match_token(p, tokens, args);
                switch (token) {
                case Opt_bsd_df:
 +                      ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
                        clear_opt(sbi->s_mount_opt, MINIX_DF);
                        break;
                case Opt_minix_df:
 +                      ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
                        set_opt(sbi->s_mount_opt, MINIX_DF);
 +
                        break;
                case Opt_grpid:
 +                      ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
                        set_opt(sbi->s_mount_opt, GRPID);
 +
                        break;
                case Opt_nogrpid:
 +                      ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
                        clear_opt(sbi->s_mount_opt, GRPID);
 +
                        break;
                case Opt_resuid:
                        if (match_int(&args[0], &option))
                        data_opt = EXT4_MOUNT_WRITEBACK_DATA;
                datacheck:
                        if (is_remount) {
 -                              if ((sbi->s_mount_opt & EXT4_MOUNT_DATA_FLAGS)
 -                                              != data_opt) {
 +                              if (test_opt(sb, DATA_FLAGS) != data_opt) {
                                        ext4_msg(sb, KERN_ERR,
                                                "Cannot change data mode on remount");
                                        return 0;
                                }
                        } else {
 -                              sbi->s_mount_opt &= ~EXT4_MOUNT_DATA_FLAGS;
 +                              clear_opt(sbi->s_mount_opt, DATA_FLAGS);
                                sbi->s_mount_opt |= data_opt;
                        }
                        break;
                        break;
  #ifdef CONFIG_QUOTA
                case Opt_usrjquota:
 -                      qtype = USRQUOTA;
 -                      goto set_qf_name;
 -              case Opt_grpjquota:
 -                      qtype = GRPQUOTA;
 -set_qf_name:
 -                      if (sb_any_quota_loaded(sb) &&
 -                          !sbi->s_qf_names[qtype]) {
 -                              ext4_msg(sb, KERN_ERR,
 -                                     "Cannot change journaled "
 -                                     "quota options when quota turned on");
 +                      if (!set_qf_name(sb, USRQUOTA, &args[0]))
                                return 0;
 -                      }
 -                      qname = match_strdup(&args[0]);
 -                      if (!qname) {
 -                              ext4_msg(sb, KERN_ERR,
 -                                      "Not enough memory for "
 -                                      "storing quotafile name");
 -                              return 0;
 -                      }
 -                      if (sbi->s_qf_names[qtype] &&
 -                          strcmp(sbi->s_qf_names[qtype], qname)) {
 -                              ext4_msg(sb, KERN_ERR,
 -                                      "%s quota file already "
 -                                      "specified", QTYPE2NAME(qtype));
 -                              kfree(qname);
 -                              return 0;
 -                      }
 -                      sbi->s_qf_names[qtype] = qname;
 -                      if (strchr(sbi->s_qf_names[qtype], '/')) {
 -                              ext4_msg(sb, KERN_ERR,
 -                                      "quotafile must be on "
 -                                      "filesystem root");
 -                              kfree(sbi->s_qf_names[qtype]);
 -                              sbi->s_qf_names[qtype] = NULL;
 +                      break;
 +              case Opt_grpjquota:
 +                      if (!set_qf_name(sb, GRPQUOTA, &args[0]))
                                return 0;
 -                      }
 -                      set_opt(sbi->s_mount_opt, QUOTA);
                        break;
                case Opt_offusrjquota:
 -                      qtype = USRQUOTA;
 -                      goto clear_qf_name;
 +                      if (!clear_qf_name(sb, USRQUOTA))
 +                              return 0;
 +                      break;
                case Opt_offgrpjquota:
 -                      qtype = GRPQUOTA;
 -clear_qf_name:
 -                      if (sb_any_quota_loaded(sb) &&
 -                          sbi->s_qf_names[qtype]) {
 -                              ext4_msg(sb, KERN_ERR, "Cannot change "
 -                                      "journaled quota options when "
 -                                      "quota turned on");
 +                      if (!clear_qf_name(sb, GRPQUOTA))
                                return 0;
 -                      }
 -                      /*
 -                       * The space will be released later when all options
 -                       * are confirmed to be correct
 -                       */
 -                      sbi->s_qf_names[qtype] = NULL;
                        break;
 +
                case Opt_jqfmt_vfsold:
                        qfmt = QFMT_VFS_OLD;
                        goto set_qf_format;
@@@ -1581,11 -1518,10 +1581,11 @@@ set_qf_format
                        clear_opt(sbi->s_mount_opt, BARRIER);
                        break;
                case Opt_barrier:
 -                      if (match_int(&args[0], &option)) {
 -                              set_opt(sbi->s_mount_opt, BARRIER);
 -                              break;
 -                      }
 +                      if (args[0].from) {
 +                              if (match_int(&args[0], &option))
 +                                      return 0;
 +                      } else
 +                              option = 1;     /* No argument, default to 1 */
                        if (option)
                                set_opt(sbi->s_mount_opt, BARRIER);
                        else
                        set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC);
                        break;
                case Opt_auto_da_alloc:
 -                      if (match_int(&args[0], &option)) {
 -                              clear_opt(sbi->s_mount_opt, NO_AUTO_DA_ALLOC);
 -                              break;
 -                      }
 +                      if (args[0].from) {
 +                              if (match_int(&args[0], &option))
 +                                      return 0;
 +                      } else
 +                              option = 1;     /* No argument, default to 1 */
                        if (option)
                                clear_opt(sbi->s_mount_opt, NO_AUTO_DA_ALLOC);
                        else
                case Opt_nodiscard:
                        clear_opt(sbi->s_mount_opt, DISCARD);
                        break;
 +              case Opt_dioread_nolock:
 +                      set_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
 +                      break;
 +              case Opt_dioread_lock:
 +                      clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
 +                      break;
                default:
                        ext4_msg(sb, KERN_ERR,
                               "Unrecognized mount option \"%s\" "
        }
  #ifdef CONFIG_QUOTA
        if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
 -              if ((sbi->s_mount_opt & EXT4_MOUNT_USRQUOTA) &&
 -                   sbi->s_qf_names[USRQUOTA])
 +              if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
                        clear_opt(sbi->s_mount_opt, USRQUOTA);
  
 -              if ((sbi->s_mount_opt & EXT4_MOUNT_GRPQUOTA) &&
 -                   sbi->s_qf_names[GRPQUOTA])
 +              if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
                        clear_opt(sbi->s_mount_opt, GRPQUOTA);
  
 -              if ((sbi->s_qf_names[USRQUOTA] &&
 -                              (sbi->s_mount_opt & EXT4_MOUNT_GRPQUOTA)) ||
 -                  (sbi->s_qf_names[GRPQUOTA] &&
 -                              (sbi->s_mount_opt & EXT4_MOUNT_USRQUOTA))) {
 +              if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
                        ext4_msg(sb, KERN_ERR, "old and new quota "
                                        "format mixing");
                        return 0;
@@@ -2005,7 -1939,7 +2005,7 @@@ static void ext4_orphan_cleanup(struct 
                }
  
                list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
 -              vfs_dq_init(inode);
 +              dquot_initialize(inode);
                if (inode->i_nlink) {
                        ext4_msg(sb, KERN_DEBUG,
                                "%s: truncating inode %lu to %lld bytes",
@@@ -2498,11 -2432,8 +2498,11 @@@ static int ext4_fill_super(struct super
        def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
        if (def_mount_opts & EXT4_DEFM_DEBUG)
                set_opt(sbi->s_mount_opt, DEBUG);
 -      if (def_mount_opts & EXT4_DEFM_BSDGROUPS)
 +      if (def_mount_opts & EXT4_DEFM_BSDGROUPS) {
 +              ext4_msg(sb, KERN_WARNING, deprecated_msg, "bsdgroups",
 +                      "2.6.38");
                set_opt(sbi->s_mount_opt, GRPID);
 +      }
        if (def_mount_opts & EXT4_DEFM_UID16)
                set_opt(sbi->s_mount_opt, NO_UID32);
  #ifdef CONFIG_EXT4_FS_XATTR
                set_opt(sbi->s_mount_opt, POSIX_ACL);
  #endif
        if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
 -              sbi->s_mount_opt |= EXT4_MOUNT_JOURNAL_DATA;
 +              set_opt(sbi->s_mount_opt, JOURNAL_DATA);
        else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
 -              sbi->s_mount_opt |= EXT4_MOUNT_ORDERED_DATA;
 +              set_opt(sbi->s_mount_opt, ORDERED_DATA);
        else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
 -              sbi->s_mount_opt |= EXT4_MOUNT_WRITEBACK_DATA;
 +              set_opt(sbi->s_mount_opt, WRITEBACK_DATA);
  
        if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
                set_opt(sbi->s_mount_opt, ERRORS_PANIC);
                goto failed_mount;
  
        sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
 -              ((sbi->s_mount_opt & EXT4_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
 +              (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
  
        if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
            (EXT4_HAS_COMPAT_FEATURE(sb, ~0U) ||
              EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) {
                ext4_msg(sb, KERN_ERR, "required journal recovery "
                       "suppressed and not mounted read-only");
 -              goto failed_mount4;
 +              goto failed_mount_wq;
        } else {
                clear_opt(sbi->s_mount_opt, DATA_FLAGS);
                set_opt(sbi->s_mount_opt, WRITEBACK_DATA);
            !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
                                       JBD2_FEATURE_INCOMPAT_64BIT)) {
                ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
 -              goto failed_mount4;
 +              goto failed_mount_wq;
        }
  
        if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
                    (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
                        ext4_msg(sb, KERN_ERR, "Journal does not support "
                               "requested data journaling mode");
 -                      goto failed_mount4;
 +                      goto failed_mount_wq;
                }
        default:
                break;
        set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
  
  no_journal:
 -
        if (test_opt(sb, NOBH)) {
                if (!(test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)) {
                        ext4_msg(sb, KERN_WARNING, "Ignoring nobh option - "
                                "its supported only with writeback mode");
                        clear_opt(sbi->s_mount_opt, NOBH);
                }
 +              if (test_opt(sb, DIOREAD_NOLOCK)) {
 +                      ext4_msg(sb, KERN_WARNING, "dioread_nolock option is "
 +                              "not supported with nobh mode");
 +                      goto failed_mount_wq;
 +              }
        }
        EXT4_SB(sb)->dio_unwritten_wq = create_workqueue("ext4-dio-unwritten");
        if (!EXT4_SB(sb)->dio_unwritten_wq) {
                         "requested data journaling mode");
                clear_opt(sbi->s_mount_opt, DELALLOC);
        }
 +      if (test_opt(sb, DIOREAD_NOLOCK)) {
 +              if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
 +                      ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock "
 +                              "option - requested data journaling mode");
 +                      clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
 +              }
 +              if (sb->s_blocksize < PAGE_SIZE) {
 +                      ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock "
 +                              "option - block size is too small");
 +                      clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
 +              }
 +      }
  
        err = ext4_setup_system_zone(sb);
        if (err) {
@@@ -3445,9 -3360,10 +3445,9 @@@ static void ext4_clear_journal_err(stru
                char nbuf[16];
  
                errstr = ext4_decode_error(sb, j_errno, nbuf);
 -              ext4_warning(sb, __func__, "Filesystem error recorded "
 +              ext4_warning(sb, "Filesystem error recorded "
                             "from previous mount: %s", errstr);
 -              ext4_warning(sb, __func__, "Marking fs in need of "
 -                           "filesystem check.");
 +              ext4_warning(sb, "Marking fs in need of filesystem check.");
  
                EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
                es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
@@@ -3598,7 -3514,7 +3598,7 @@@ static int ext4_remount(struct super_bl
                ext4_abort(sb, __func__, "Abort forced by user");
  
        sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
 -              ((sbi->s_mount_opt & EXT4_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
 +              (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
  
        es = sbi->s_es;
  
@@@ -3792,7 -3708,7 +3792,7 @@@ static int ext4_statfs(struct dentry *d
   * Process 1                         Process 2
   * ext4_create()                     quota_sync()
   *   jbd2_journal_start()                  write_dquot()
 - *   vfs_dq_init()                         down(dqio_mutex)
 + *   dquot_initialize()                         down(dqio_mutex)
   *     down(dqio_mutex)                    jbd2_journal_start()
   *
   */
@@@ -4001,7 -3917,9 +4001,7 @@@ static ssize_t ext4_quota_write(struct 
        ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
        int err = 0;
        int offset = off & (sb->s_blocksize - 1);
 -      int tocopy;
        int journal_quota = EXT4_SB(sb)->s_qf_names[type] != NULL;
 -      size_t towrite = len;
        struct buffer_head *bh;
        handle_t *handle = journal_current_handle();
  
                        (unsigned long long)off, (unsigned long long)len);
                return -EIO;
        }
 +      /*
 +       * Since we account only one data block in transaction credits,
 +       * then it is impossible to cross a block boundary.
 +       */
 +      if (sb->s_blocksize - offset < len) {
 +              ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
 +                      " cancelled because not block aligned",
 +                      (unsigned long long)off, (unsigned long long)len);
 +              return -EIO;
 +      }
 +
        mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
 -      while (towrite > 0) {
 -              tocopy = sb->s_blocksize - offset < towrite ?
 -                              sb->s_blocksize - offset : towrite;
 -              bh = ext4_bread(handle, inode, blk, 1, &err);
 -              if (!bh)
 +      bh = ext4_bread(handle, inode, blk, 1, &err);
 +      if (!bh)
 +              goto out;
 +      if (journal_quota) {
 +              err = ext4_journal_get_write_access(handle, bh);
 +              if (err) {
 +                      brelse(bh);
                        goto out;
 -              if (journal_quota) {
 -                      err = ext4_journal_get_write_access(handle, bh);
 -                      if (err) {
 -                              brelse(bh);
 -                              goto out;
 -                      }
                }
 -              lock_buffer(bh);
 -              memcpy(bh->b_data+offset, data, tocopy);
 -              flush_dcache_page(bh->b_page);
 -              unlock_buffer(bh);
 -              if (journal_quota)
 -                      err = ext4_handle_dirty_metadata(handle, NULL, bh);
 -              else {
 -                      /* Always do at least ordered writes for quotas */
 -                      err = ext4_jbd2_file_inode(handle, inode);
 -                      mark_buffer_dirty(bh);
 -              }
 -              brelse(bh);
 -              if (err)
 -                      goto out;
 -              offset = 0;
 -              towrite -= tocopy;
 -              data += tocopy;
 -              blk++;
        }
 +      lock_buffer(bh);
 +      memcpy(bh->b_data+offset, data, len);
 +      flush_dcache_page(bh->b_page);
 +      unlock_buffer(bh);
 +      if (journal_quota)
 +              err = ext4_handle_dirty_metadata(handle, NULL, bh);
 +      else {
 +              /* Always do at least ordered writes for quotas */
 +              err = ext4_jbd2_file_inode(handle, inode);
 +              mark_buffer_dirty(bh);
 +      }
 +      brelse(bh);
  out:
 -      if (len == towrite) {
 +      if (err) {
                mutex_unlock(&inode->i_mutex);
                return err;
        }
 -      if (inode->i_size < off+len-towrite) {
 -              i_size_write(inode, off+len-towrite);
 +      if (inode->i_size < off + len) {
 +              i_size_write(inode, off + len);
                EXT4_I(inode)->i_disksize = inode->i_size;
        }
        inode->i_mtime = inode->i_ctime = CURRENT_TIME;
        ext4_mark_inode_dirty(handle, inode);
        mutex_unlock(&inode->i_mutex);
 -      return len - towrite;
 +      return len;
  }
  
  #endif
diff --combined fs/gfs2/ops_fstype.c
index a054b526dc085278e9d237653b03948654d60fa9,0556f7fededd20559a034f06fbb824b6011bbab6..c1309ed1c4969e355cade447a6422f46ba9f0aa1
@@@ -65,6 -65,7 +65,6 @@@ static void gfs2_tune_init(struct gfs2_
        gt->gt_quota_scale_den = 1;
        gt->gt_new_files_jdata = 0;
        gt->gt_max_readahead = 1 << 18;
 -      gt->gt_stall_secs = 600;
        gt->gt_complain_secs = 10;
  }
  
@@@ -81,8 -82,6 +81,8 @@@ static struct gfs2_sbd *init_sbd(struc
  
        gfs2_tune_init(&sdp->sd_tune);
  
 +      init_waitqueue_head(&sdp->sd_glock_wait);
 +      atomic_set(&sdp->sd_glock_disposal, 0);
        spin_lock_init(&sdp->sd_statfs_spin);
  
        spin_lock_init(&sdp->sd_rindex_spin);
@@@ -724,7 -723,7 +724,7 @@@ static int init_journal(struct gfs2_sb
                goto fail;
        }
  
 -      error = -EINVAL;
 +      error = -EUSERS;
        if (!gfs2_jindex_size(sdp)) {
                fs_err(sdp, "no journals!\n");
                goto fail_jindex;
@@@ -984,24 -983,16 +984,24 @@@ static const match_table_t nolock_token
        { Opt_err, NULL },
  };
  
 +static void nolock_put_lock(struct kmem_cache *cachep, struct gfs2_glock *gl)
 +{
 +      struct gfs2_sbd *sdp = gl->gl_sbd;
 +      kmem_cache_free(cachep, gl);
 +      if (atomic_dec_and_test(&sdp->sd_glock_disposal))
 +              wake_up(&sdp->sd_glock_wait);
 +}
 +
  static const struct lm_lockops nolock_ops = {
        .lm_proto_name = "lock_nolock",
 -      .lm_put_lock = kmem_cache_free,
 +      .lm_put_lock = nolock_put_lock,
        .lm_tokens = &nolock_tokens,
  };
  
  /**
   * gfs2_lm_mount - mount a locking protocol
   * @sdp: the filesystem
-  * @args: mount arguements
+  * @args: mount arguments
   * @silent: if 1, don't complain if the FS isn't a GFS2 fs
   *
   * Returns: errno
@@@ -1240,9 -1231,10 +1240,9 @@@ fail_sb
  fail_locking:
        init_locking(sdp, &mount_gh, UNDO);
  fail_lm:
 +      invalidate_inodes(sb);
        gfs2_gl_hash_clear(sdp);
        gfs2_lm_unmount(sdp);
 -      while (invalidate_inodes(sb))
 -              yield();
  fail_sys:
        gfs2_sys_fs_del(sdp);
  fail:
diff --combined fs/jbd/transaction.c
index 99e9fea11077effbc1be2b8a7d244c89d4b823c4,57ae203c8abf193375a41f0a1631210cd5cc2188..5ae71e75a4910aeae6fe37eb1edefe70a7b210cc
@@@ -1398,7 -1398,7 +1398,7 @@@ int journal_stop(handle_t *handle
         * the case where our storage is so fast that it is more optimal to go
         * ahead and force a flush and wait for the transaction to be committed
         * than it is to wait for an arbitrary amount of time for new writers to
-        * join the transaction.  We acheive this by measuring how long it takes
+        * join the transaction.  We achieve this by measuring how long it takes
         * to commit a transaction, and compare it with how long this
         * transaction has been running, and if run time < commit time then we
         * sleep for the delta and commit.  This greatly helps super fast disks
@@@ -1864,21 -1864,6 +1864,21 @@@ static int journal_unmap_buffer(journal
        if (!jh)
                goto zap_buffer_no_jh;
  
 +      /*
 +       * We cannot remove the buffer from checkpoint lists until the
 +       * transaction adding inode to orphan list (let's call it T)
 +       * is committed.  Otherwise if the transaction changing the
 +       * buffer would be cleaned from the journal before T is
 +       * committed, a crash will cause that the correct contents of
 +       * the buffer will be lost.  On the other hand we have to
 +       * clear the buffer dirty bit at latest at the moment when the
 +       * transaction marking the buffer as freed in the filesystem
 +       * structures is committed because from that moment on the
 +       * buffer can be reallocated and used by a different page.
 +       * Since the block hasn't been freed yet but the inode has
 +       * already been added to orphan list, it is safe for us to add
 +       * the buffer to BJ_Forget list of the newest transaction.
 +       */
        transaction = jh->b_transaction;
        if (transaction == NULL) {
                /* First case: not on any transaction.  If it
                        goto zap_buffer;
                }
                /*
 -               * If it is committing, we simply cannot touch it.  We
 -               * can remove it's next_transaction pointer from the
 -               * running transaction if that is set, but nothing
 -               * else. */
 +               * The buffer is committing, we simply cannot touch
 +               * it. So we just set j_next_transaction to the
 +               * running transaction (if there is one) and mark
 +               * buffer as freed so that commit code knows it should
 +               * clear dirty bits when it is done with the buffer.
 +               */
                set_buffer_freed(bh);
 -              if (jh->b_next_transaction) {
 -                      J_ASSERT(jh->b_next_transaction ==
 -                                      journal->j_running_transaction);
 -                      jh->b_next_transaction = NULL;
 -              }
 +              if (journal->j_running_transaction && buffer_jbddirty(bh))
 +                      jh->b_next_transaction = journal->j_running_transaction;
                journal_put_journal_head(jh);
                spin_unlock(&journal->j_list_lock);
                jbd_unlock_bh_state(bh);
@@@ -2134,7 -2120,7 +2134,7 @@@ void journal_file_buffer(struct journal
   */
  void __journal_refile_buffer(struct journal_head *jh)
  {
 -      int was_dirty;
 +      int was_dirty, jlist;
        struct buffer_head *bh = jh2bh(jh);
  
        J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
        __journal_temp_unlink_buffer(jh);
        jh->b_transaction = jh->b_next_transaction;
        jh->b_next_transaction = NULL;
 -      __journal_file_buffer(jh, jh->b_transaction,
 -                              jh->b_modified ? BJ_Metadata : BJ_Reserved);
 +      if (buffer_freed(bh))
 +              jlist = BJ_Forget;
 +      else if (jh->b_modified)
 +              jlist = BJ_Metadata;
 +      else
 +              jlist = BJ_Reserved;
 +      __journal_file_buffer(jh, jh->b_transaction, jlist);
        J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
  
        if (was_dirty)
diff --combined fs/locks.c
index ae9ded026b7cb18a783af01bffac31dd87d6a26e,cde572db112f22ec3ecd0894dc5251ad9f42549b..ab24d49fc04844e93bbedb1cd7c61083cb74e2b8
@@@ -1182,9 -1182,8 +1182,9 @@@ int __break_lease(struct inode *inode, 
        struct file_lock *fl;
        unsigned long break_time;
        int i_have_this_lease = 0;
 +      int want_write = (mode & O_ACCMODE) != O_RDONLY;
  
 -      new_fl = lease_alloc(NULL, mode & FMODE_WRITE ? F_WRLCK : F_RDLCK);
 +      new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
  
        lock_kernel();
  
                if (fl->fl_owner == current->files)
                        i_have_this_lease = 1;
  
 -      if (mode & FMODE_WRITE) {
 +      if (want_write) {
                /* If we want write access, we have to revoke any lease. */
                future = F_UNLCK | F_INPROGRESS;
        } else if (flock->fl_type & F_INPROGRESS) {
@@@ -1455,7 -1454,7 +1455,7 @@@ EXPORT_SYMBOL(generic_setlease)
   *    leases held by processes on this node.
   *
   *    There is also no break_lease method; filesystems that
-  *    handle their own leases shoud break leases themselves from the
+  *    handle their own leases should break leases themselves from the
   *    filesystem's open, create, and (on truncate) setattr methods.
   *
   *    Warning: the only current setlease methods exist only to disable
diff --combined fs/namei.c
index 48e60a187325e00abb01b2c1278d0cb5441bc6d5,e05c243105a0e8e64ff7d3c5bc0bb1fe0c9e1ff3..1c0fca6e899eef7aa768ed704e2216c3d024c7df
@@@ -19,6 -19,7 +19,6 @@@
  #include <linux/slab.h>
  #include <linux/fs.h>
  #include <linux/namei.h>
 -#include <linux/quotaops.h>
  #include <linux/pagemap.h>
  #include <linux/fsnotify.h>
  #include <linux/personality.h>
@@@ -497,6 -498,8 +497,6 @@@ static int link_path_walk(const char *
  
  static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *link)
  {
 -      int res = 0;
 -      char *name;
        if (IS_ERR(link))
                goto fail;
  
                path_get(&nd->root);
        }
  
 -      res = link_path_walk(link, nd);
 -      if (nd->depth || res || nd->last_type!=LAST_NORM)
 -              return res;
 -      /*
 -       * If it is an iterative symlinks resolution in open_namei() we
 -       * have to copy the last component. And all that crap because of
 -       * bloody create() on broken symlinks. Furrfu...
 -       */
 -      name = __getname();
 -      if (unlikely(!name)) {
 -              path_put(&nd->path);
 -              return -ENOMEM;
 -      }
 -      strcpy(name, nd->last.name);
 -      nd->last.name = name;
 -      return 0;
 +      return link_path_walk(link, nd);
  fail:
        path_put(&nd->path);
        return PTR_ERR(link);
@@@ -529,10 -547,10 +529,10 @@@ static inline void path_to_nameidata(st
        nd->path.dentry = path->dentry;
  }
  
 -static __always_inline int __do_follow_link(struct path *path, struct nameidata *nd)
 +static __always_inline int
 +__do_follow_link(struct path *path, struct nameidata *nd, void **p)
  {
        int error;
 -      void *cookie;
        struct dentry *dentry = path->dentry;
  
        touch_atime(path->mnt, dentry);
        }
        mntget(path->mnt);
        nd->last_type = LAST_BIND;
 -      cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
 -      error = PTR_ERR(cookie);
 -      if (!IS_ERR(cookie)) {
 +      *p = dentry->d_inode->i_op->follow_link(dentry, nd);
 +      error = PTR_ERR(*p);
 +      if (!IS_ERR(*p)) {
                char *s = nd_get_link(nd);
                error = 0;
                if (s)
                        if (error)
                                path_put(&nd->path);
                }
 -              if (dentry->d_inode->i_op->put_link)
 -                      dentry->d_inode->i_op->put_link(dentry, nd, cookie);
        }
        return error;
  }
   */
  static inline int do_follow_link(struct path *path, struct nameidata *nd)
  {
 +      void *cookie;
        int err = -ELOOP;
        if (current->link_count >= MAX_NESTED_LINKS)
                goto loop;
        current->link_count++;
        current->total_link_count++;
        nd->depth++;
 -      err = __do_follow_link(path, nd);
 +      err = __do_follow_link(path, nd, &cookie);
 +      if (!IS_ERR(cookie) && path->dentry->d_inode->i_op->put_link)
 +              path->dentry->d_inode->i_op->put_link(path->dentry, nd, cookie);
        path_put(path);
        current->link_count--;
        nd->depth--;
@@@ -672,20 -689,33 +672,20 @@@ static __always_inline void follow_dotd
        set_root(nd);
  
        while(1) {
 -              struct vfsmount *parent;
                struct dentry *old = nd->path.dentry;
  
                if (nd->path.dentry == nd->root.dentry &&
                    nd->path.mnt == nd->root.mnt) {
                        break;
                }
 -              spin_lock(&dcache_lock);
                if (nd->path.dentry != nd->path.mnt->mnt_root) {
 -                      nd->path.dentry = dget(nd->path.dentry->d_parent);
 -                      spin_unlock(&dcache_lock);
 +                      /* rare case of legitimate dget_parent()... */
 +                      nd->path.dentry = dget_parent(nd->path.dentry);
                        dput(old);
                        break;
                }
 -              spin_unlock(&dcache_lock);
 -              spin_lock(&vfsmount_lock);
 -              parent = nd->path.mnt->mnt_parent;
 -              if (parent == nd->path.mnt) {
 -                      spin_unlock(&vfsmount_lock);
 +              if (!follow_up(&nd->path))
                        break;
 -              }
 -              mntget(parent);
 -              nd->path.dentry = dget(nd->path.mnt->mnt_mountpoint);
 -              spin_unlock(&vfsmount_lock);
 -              dput(old);
 -              mntput(nd->path.mnt);
 -              nd->path.mnt = parent;
        }
        follow_mount(&nd->path);
  }
@@@ -792,17 -822,6 +792,17 @@@ fail
        return PTR_ERR(dentry);
  }
  
 +/*
 + * This is a temporary kludge to deal with "automount" symlinks; proper
 + * solution is to trigger them on follow_mount(), so that do_lookup()
 + * would DTRT.  To be killed before 2.6.34-final.
 + */
 +static inline int follow_on_final(struct inode *inode, unsigned lookup_flags)
 +{
 +      return inode && unlikely(inode->i_op->follow_link) &&
 +              ((lookup_flags & LOOKUP_FOLLOW) || S_ISDIR(inode->i_mode));
 +}
 +
  /*
   * Name resolution.
   * This is the basic name resolution function, turning a pathname into
@@@ -923,7 -942,8 +923,7 @@@ last_component
                if (err)
                        break;
                inode = next.dentry->d_inode;
 -              if ((lookup_flags & LOOKUP_FOLLOW)
 -                  && inode && inode->i_op->follow_link) {
 +              if (follow_on_final(inode, lookup_flags)) {
                        err = do_follow_link(&next, nd);
                        if (err)
                                goto return_err;
@@@ -1317,7 -1337,7 +1317,7 @@@ static int may_delete(struct inode *dir
                return -ENOENT;
  
        BUG_ON(victim->d_parent->d_inode != dir);
 -      audit_inode_child(victim->d_name.name, victim, dir);
 +      audit_inode_child(victim, dir);
  
        error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
        if (error)
@@@ -1358,6 -1378,22 +1358,6 @@@ static inline int may_create(struct ino
        return inode_permission(dir, MAY_WRITE | MAY_EXEC);
  }
  
 -/* 
 - * O_DIRECTORY translates into forcing a directory lookup.
 - */
 -static inline int lookup_flags(unsigned int f)
 -{
 -      unsigned long retval = LOOKUP_FOLLOW;
 -
 -      if (f & O_NOFOLLOW)
 -              retval &= ~LOOKUP_FOLLOW;
 -      
 -      if (f & O_DIRECTORY)
 -              retval |= LOOKUP_DIRECTORY;
 -
 -      return retval;
 -}
 -
  /*
   * p1 and p2 should be directories on the same fs.
   */
@@@ -1415,6 -1451,7 +1415,6 @@@ int vfs_create(struct inode *dir, struc
        error = security_inode_create(dir, dentry, mode);
        if (error)
                return error;
 -      vfs_dq_init(dir);
        error = dir->i_op->create(dir, dentry, mode, nd);
        if (!error)
                fsnotify_create(dir, dentry);
@@@ -1456,7 -1493,7 +1456,7 @@@ int may_open(struct path *path, int acc
         * An append-only file must be opened in append mode for writing.
         */
        if (IS_APPEND(inode)) {
 -              if  ((flag & FMODE_WRITE) && !(flag & O_APPEND))
 +              if  ((flag & O_ACCMODE) != O_RDONLY && !(flag & O_APPEND))
                        return -EPERM;
                if (flag & O_TRUNC)
                        return -EPERM;
@@@ -1500,7 -1537,7 +1500,7 @@@ static int handle_truncate(struct path 
   * what get passed to sys_open().
   */
  static int __open_namei_create(struct nameidata *nd, struct path *path,
 -                              int flag, int mode)
 +                              int open_flag, int mode)
  {
        int error;
        struct dentry *dir = nd->path.dentry;
@@@ -1518,7 -1555,7 +1518,7 @@@ out_unlock
        if (error)
                return error;
        /* Don't check for write permission, don't truncate */
 -      return may_open(&nd->path, 0, flag & ~O_TRUNC);
 +      return may_open(&nd->path, 0, open_flag & ~O_TRUNC);
  }
  
  /*
@@@ -1556,132 -1593,129 +1556,132 @@@ static int open_will_truncate(int flag
        return (flag & O_TRUNC);
  }
  
 -/*
 - * Note that the low bits of the passed in "open_flag"
 - * are not the same as in the local variable "flag". See
 - * open_to_namei_flags() for more details.
 - */
 -struct file *do_filp_open(int dfd, const char *pathname,
 -              int open_flag, int mode, int acc_mode)
 +static struct file *finish_open(struct nameidata *nd,
 +                              int open_flag, int acc_mode)
  {
        struct file *filp;
 -      struct nameidata nd;
 -      int error;
 -      struct path path;
 -      struct dentry *dir;
 -      int count = 0;
        int will_truncate;
 -      int flag = open_to_namei_flags(open_flag);
 -      int force_reval = 0;
 +      int error;
  
 +      will_truncate = open_will_truncate(open_flag, nd->path.dentry->d_inode);
 +      if (will_truncate) {
 +              error = mnt_want_write(nd->path.mnt);
 +              if (error)
 +                      goto exit;
 +      }
 +      error = may_open(&nd->path, acc_mode, open_flag);
 +      if (error) {
 +              if (will_truncate)
 +                      mnt_drop_write(nd->path.mnt);
 +              goto exit;
 +      }
 +      filp = nameidata_to_filp(nd);
 +      if (!IS_ERR(filp)) {
 +              error = ima_file_check(filp, acc_mode);
 +              if (error) {
 +                      fput(filp);
 +                      filp = ERR_PTR(error);
 +              }
 +      }
 +      if (!IS_ERR(filp)) {
 +              if (will_truncate) {
 +                      error = handle_truncate(&nd->path);
 +                      if (error) {
 +                              fput(filp);
 +                              filp = ERR_PTR(error);
 +                      }
 +              }
 +      }
        /*
 -       * O_SYNC is implemented as __O_SYNC|O_DSYNC.  As many places only
 -       * check for O_DSYNC if the need any syncing at all we enforce it's
 -       * always set instead of having to deal with possibly weird behaviour
 -       * for malicious applications setting only __O_SYNC.
 +       * It is now safe to drop the mnt write
 +       * because the filp has had a write taken
 +       * on its behalf.
         */
 -      if (open_flag & __O_SYNC)
 -              open_flag |= O_DSYNC;
 -
 -      if (!acc_mode)
 -              acc_mode = MAY_OPEN | ACC_MODE(open_flag);
 +      if (will_truncate)
 +              mnt_drop_write(nd->path.mnt);
 +      return filp;
  
 -      /* O_TRUNC implies we need access checks for write permissions */
 -      if (flag & O_TRUNC)
 -              acc_mode |= MAY_WRITE;
 +exit:
 +      if (!IS_ERR(nd->intent.open.file))
 +              release_open_intent(nd);
 +      path_put(&nd->path);
 +      return ERR_PTR(error);
 +}
  
 -      /* Allow the LSM permission hook to distinguish append 
 -         access from general write access. */
 -      if (flag & O_APPEND)
 -              acc_mode |= MAY_APPEND;
 +static struct file *do_last(struct nameidata *nd, struct path *path,
 +                          int open_flag, int acc_mode,
 +                          int mode, const char *pathname,
 +                          int *want_dir)
 +{
 +      struct dentry *dir = nd->path.dentry;
 +      struct file *filp;
 +      int error = -EISDIR;
  
 -      /*
 -       * The simplest case - just a plain lookup.
 -       */
 -      if (!(flag & O_CREAT)) {
 -              filp = get_empty_filp();
 -
 -              if (filp == NULL)
 -                      return ERR_PTR(-ENFILE);
 -              nd.intent.open.file = filp;
 -              filp->f_flags = open_flag;
 -              nd.intent.open.flags = flag;
 -              nd.intent.open.create_mode = 0;
 -              error = do_path_lookup(dfd, pathname,
 -                                      lookup_flags(flag)|LOOKUP_OPEN, &nd);
 -              if (IS_ERR(nd.intent.open.file)) {
 -                      if (error == 0) {
 -                              error = PTR_ERR(nd.intent.open.file);
 -                              path_put(&nd.path);
 +      switch (nd->last_type) {
 +      case LAST_DOTDOT:
 +              follow_dotdot(nd);
 +              dir = nd->path.dentry;
 +              if (nd->path.mnt->mnt_sb->s_type->fs_flags & FS_REVAL_DOT) {
 +                      if (!dir->d_op->d_revalidate(dir, nd)) {
 +                              error = -ESTALE;
 +                              goto exit;
                        }
 -              } else if (error)
 -                      release_open_intent(&nd);
 -              if (error)
 -                      return ERR_PTR(error);
 +              }
 +              /* fallthrough */
 +      case LAST_DOT:
 +      case LAST_ROOT:
 +              if (open_flag & O_CREAT)
 +                      goto exit;
 +              /* fallthrough */
 +      case LAST_BIND:
 +              audit_inode(pathname, dir);
                goto ok;
        }
  
 -      /*
 -       * Create - we need to know the parent.
 -       */
 -reval:
 -      error = path_init(dfd, pathname, LOOKUP_PARENT, &nd);
 -      if (error)
 -              return ERR_PTR(error);
 -      if (force_reval)
 -              nd.flags |= LOOKUP_REVAL;
 -      error = path_walk(pathname, &nd);
 -      if (error) {
 -              if (nd.root.mnt)
 -                      path_put(&nd.root);
 -              return ERR_PTR(error);
 +      /* trailing slashes? */
 +      if (nd->last.name[nd->last.len]) {
 +              if (open_flag & O_CREAT)
 +                      goto exit;
 +              *want_dir = 1;
        }
 -      if (unlikely(!audit_dummy_context()))
 -              audit_inode(pathname, nd.path.dentry);
  
 -      /*
 -       * We have the parent and last component. First of all, check
 -       * that we are not asked to creat(2) an obvious directory - that
 -       * will not do.
 -       */
 -      error = -EISDIR;
 -      if (nd.last_type != LAST_NORM || nd.last.name[nd.last.len])
 -              goto exit_parent;
 +      /* just plain open? */
 +      if (!(open_flag & O_CREAT)) {
 +              error = do_lookup(nd, &nd->last, path);
 +              if (error)
 +                      goto exit;
 +              error = -ENOENT;
 +              if (!path->dentry->d_inode)
 +                      goto exit_dput;
 +              if (path->dentry->d_inode->i_op->follow_link)
 +                      return NULL;
 +              error = -ENOTDIR;
 +              if (*want_dir && !path->dentry->d_inode->i_op->lookup)
 +                      goto exit_dput;
 +              path_to_nameidata(path, nd);
 +              audit_inode(pathname, nd->path.dentry);
 +              goto ok;
 +      }
  
 -      error = -ENFILE;
 -      filp = get_empty_filp();
 -      if (filp == NULL)
 -              goto exit_parent;
 -      nd.intent.open.file = filp;
 -      filp->f_flags = open_flag;
 -      nd.intent.open.flags = flag;
 -      nd.intent.open.create_mode = mode;
 -      dir = nd.path.dentry;
 -      nd.flags &= ~LOOKUP_PARENT;
 -      nd.flags |= LOOKUP_CREATE | LOOKUP_OPEN;
 -      if (flag & O_EXCL)
 -              nd.flags |= LOOKUP_EXCL;
 +      /* OK, it's O_CREAT */
        mutex_lock(&dir->d_inode->i_mutex);
 -      path.dentry = lookup_hash(&nd);
 -      path.mnt = nd.path.mnt;
  
 -do_last:
 -      error = PTR_ERR(path.dentry);
 -      if (IS_ERR(path.dentry)) {
 +      path->dentry = lookup_hash(nd);
 +      path->mnt = nd->path.mnt;
 +
 +      error = PTR_ERR(path->dentry);
 +      if (IS_ERR(path->dentry)) {
                mutex_unlock(&dir->d_inode->i_mutex);
                goto exit;
        }
  
 -      if (IS_ERR(nd.intent.open.file)) {
 -              error = PTR_ERR(nd.intent.open.file);
 +      if (IS_ERR(nd->intent.open.file)) {
 +              error = PTR_ERR(nd->intent.open.file);
                goto exit_mutex_unlock;
        }
  
        /* Negative dentry, just create the file */
 -      if (!path.dentry->d_inode) {
 +      if (!path->dentry->d_inode) {
                /*
                 * This write is needed to ensure that a
                 * ro->rw transition does not occur between
                 * a permanent write count is taken through
                 * the 'struct file' in nameidata_to_filp().
                 */
 -              error = mnt_want_write(nd.path.mnt);
 +              error = mnt_want_write(nd->path.mnt);
                if (error)
                        goto exit_mutex_unlock;
 -              error = __open_namei_create(&nd, &path, flag, mode);
 +              error = __open_namei_create(nd, path, open_flag, mode);
                if (error) {
 -                      mnt_drop_write(nd.path.mnt);
 +                      mnt_drop_write(nd->path.mnt);
                        goto exit;
                }
 -              filp = nameidata_to_filp(&nd);
 -              mnt_drop_write(nd.path.mnt);
 -              if (nd.root.mnt)
 -                      path_put(&nd.root);
 +              filp = nameidata_to_filp(nd);
 +              mnt_drop_write(nd->path.mnt);
                if (!IS_ERR(filp)) {
 -                      error = ima_path_check(&filp->f_path, filp->f_mode &
 -                                     (MAY_READ | MAY_WRITE | MAY_EXEC));
 +                      error = ima_file_check(filp, acc_mode);
                        if (error) {
                                fput(filp);
                                filp = ERR_PTR(error);
         * It already exists.
         */
        mutex_unlock(&dir->d_inode->i_mutex);
 -      audit_inode(pathname, path.dentry);
 +      audit_inode(pathname, path->dentry);
  
        error = -EEXIST;
 -      if (flag & O_EXCL)
 +      if (open_flag & O_EXCL)
                goto exit_dput;
  
 -      if (__follow_mount(&path)) {
 +      if (__follow_mount(path)) {
                error = -ELOOP;
 -              if (flag & O_NOFOLLOW)
 +              if (open_flag & O_NOFOLLOW)
                        goto exit_dput;
        }
  
        error = -ENOENT;
 -      if (!path.dentry->d_inode)
 +      if (!path->dentry->d_inode)
                goto exit_dput;
 -      if (path.dentry->d_inode->i_op->follow_link)
 -              goto do_link;
  
 -      path_to_nameidata(&path, &nd);
 +      if (path->dentry->d_inode->i_op->follow_link)
 +              return NULL;
 +
 +      path_to_nameidata(path, nd);
        error = -EISDIR;
 -      if (S_ISDIR(path.dentry->d_inode->i_mode))
 +      if (S_ISDIR(path->dentry->d_inode->i_mode))
                goto exit;
  ok:
 +      filp = finish_open(nd, open_flag, acc_mode);
 +      return filp;
 +
 +exit_mutex_unlock:
 +      mutex_unlock(&dir->d_inode->i_mutex);
 +exit_dput:
 +      path_put_conditional(path, nd);
 +exit:
 +      if (!IS_ERR(nd->intent.open.file))
 +              release_open_intent(nd);
 +      path_put(&nd->path);
 +      return ERR_PTR(error);
 +}
 +
 +/*
 + * Note that the low bits of the passed in "open_flag"
 + * are not the same as in the local variable "flag". See
 + * open_to_namei_flags() for more details.
 + */
 +struct file *do_filp_open(int dfd, const char *pathname,
 +              int open_flag, int mode, int acc_mode)
 +{
 +      struct file *filp;
 +      struct nameidata nd;
 +      int error;
 +      struct path path;
 +      int count = 0;
 +      int flag = open_to_namei_flags(open_flag);
 +      int force_reval = 0;
 +      int want_dir = open_flag & O_DIRECTORY;
 +
 +      if (!(open_flag & O_CREAT))
 +              mode = 0;
 +
        /*
 -       * Consider:
 -       * 1. may_open() truncates a file
 -       * 2. a rw->ro mount transition occurs
 -       * 3. nameidata_to_filp() fails due to
 -       *    the ro mount.
 -       * That would be inconsistent, and should
 -       * be avoided. Taking this mnt write here
 -       * ensures that (2) can not occur.
 +       * O_SYNC is implemented as __O_SYNC|O_DSYNC.  As many places only
 +       * check for O_DSYNC if the need any syncing at all we enforce it's
 +       * always set instead of having to deal with possibly weird behaviour
 +       * for malicious applications setting only __O_SYNC.
         */
 -      will_truncate = open_will_truncate(flag, nd.path.dentry->d_inode);
 -      if (will_truncate) {
 -              error = mnt_want_write(nd.path.mnt);
 -              if (error)
 -                      goto exit;
 -      }
 -      error = may_open(&nd.path, acc_mode, flag);
 +      if (open_flag & __O_SYNC)
 +              open_flag |= O_DSYNC;
 +
 +      if (!acc_mode)
 +              acc_mode = MAY_OPEN | ACC_MODE(open_flag);
 +
 +      /* O_TRUNC implies we need access checks for write permissions */
 +      if (open_flag & O_TRUNC)
 +              acc_mode |= MAY_WRITE;
 +
 +      /* Allow the LSM permission hook to distinguish append 
 +         access from general write access. */
 +      if (open_flag & O_APPEND)
 +              acc_mode |= MAY_APPEND;
 +
 +      /* find the parent */
 +reval:
 +      error = path_init(dfd, pathname, LOOKUP_PARENT, &nd);
 +      if (error)
 +              return ERR_PTR(error);
 +      if (force_reval)
 +              nd.flags |= LOOKUP_REVAL;
 +
 +      current->total_link_count = 0;
 +      error = link_path_walk(pathname, &nd);
        if (error) {
 -              if (will_truncate)
 -                      mnt_drop_write(nd.path.mnt);
 -              goto exit;
 -      }
 -      filp = nameidata_to_filp(&nd);
 -      if (!IS_ERR(filp)) {
 -              error = ima_path_check(&filp->f_path, filp->f_mode &
 -                             (MAY_READ | MAY_WRITE | MAY_EXEC));
 -              if (error) {
 -                      fput(filp);
 -                      filp = ERR_PTR(error);
 -              }
 +              filp = ERR_PTR(error);
 +              goto out;
        }
 -      if (!IS_ERR(filp)) {
 -              if (acc_mode & MAY_WRITE)
 -                      vfs_dq_init(nd.path.dentry->d_inode);
 +      if (unlikely(!audit_dummy_context()) && (open_flag & O_CREAT))
 +              audit_inode(pathname, nd.path.dentry);
  
 -              if (will_truncate) {
 -                      error = handle_truncate(&nd.path);
 -                      if (error) {
 -                              fput(filp);
 -                              filp = ERR_PTR(error);
 -                      }
 -              }
 -      }
        /*
 -       * It is now safe to drop the mnt write
 -       * because the filp has had a write taken
 -       * on its behalf.
 +       * We have the parent and last component.
         */
 -      if (will_truncate)
 -              mnt_drop_write(nd.path.mnt);
 +
 +      error = -ENFILE;
 +      filp = get_empty_filp();
 +      if (filp == NULL)
 +              goto exit_parent;
 +      nd.intent.open.file = filp;
 +      filp->f_flags = open_flag;
 +      nd.intent.open.flags = flag;
 +      nd.intent.open.create_mode = mode;
 +      nd.flags &= ~LOOKUP_PARENT;
 +      nd.flags |= LOOKUP_OPEN;
 +      if (open_flag & O_CREAT) {
 +              nd.flags |= LOOKUP_CREATE;
 +              if (open_flag & O_EXCL)
 +                      nd.flags |= LOOKUP_EXCL;
 +      }
 +      filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname, &want_dir);
 +      while (unlikely(!filp)) { /* trailing symlink */
 +              struct path holder;
 +              struct inode *inode = path.dentry->d_inode;
 +              void *cookie;
 +              error = -ELOOP;
 +              /* S_ISDIR part is a temporary automount kludge */
 +              if ((open_flag & O_NOFOLLOW) && !S_ISDIR(inode->i_mode))
 +                      goto exit_dput;
 +              if (count++ == 32)
 +                      goto exit_dput;
 +              /*
 +               * This is subtle. Instead of calling do_follow_link() we do
 +               * the thing by hands. The reason is that this way we have zero
 +               * link_count and path_walk() (called from ->follow_link)
 +               * honoring LOOKUP_PARENT.  After that we have the parent and
 +               * last component, i.e. we are in the same situation as after
 +               * the first path_walk().  Well, almost - if the last component
 +               * is normal we get its copy stored in nd->last.name and we will
 +               * have to putname() it when we are done. Procfs-like symlinks
 +               * just set LAST_BIND.
 +               */
 +              nd.flags |= LOOKUP_PARENT;
 +              error = security_inode_follow_link(path.dentry, &nd);
 +              if (error)
 +                      goto exit_dput;
 +              error = __do_follow_link(&path, &nd, &cookie);
 +              if (unlikely(error)) {
 +                      /* nd.path had been dropped */
 +                      if (!IS_ERR(cookie) && inode->i_op->put_link)
 +                              inode->i_op->put_link(path.dentry, &nd, cookie);
 +                      path_put(&path);
 +                      release_open_intent(&nd);
 +                      filp = ERR_PTR(error);
 +                      goto out;
 +              }
 +              holder = path;
 +              nd.flags &= ~LOOKUP_PARENT;
 +              filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname, &want_dir);
 +              if (inode->i_op->put_link)
 +                      inode->i_op->put_link(holder.dentry, &nd, cookie);
 +              path_put(&holder);
 +      }
 +out:
        if (nd.root.mnt)
                path_put(&nd.root);
 +      if (filp == ERR_PTR(-ESTALE) && !force_reval) {
 +              force_reval = 1;
 +              goto reval;
 +      }
        return filp;
  
 -exit_mutex_unlock:
 -      mutex_unlock(&dir->d_inode->i_mutex);
  exit_dput:
        path_put_conditional(&path, &nd);
 -exit:
        if (!IS_ERR(nd.intent.open.file))
                release_open_intent(&nd);
  exit_parent:
 -      if (nd.root.mnt)
 -              path_put(&nd.root);
        path_put(&nd.path);
 -      return ERR_PTR(error);
 -
 -do_link:
 -      error = -ELOOP;
 -      if (flag & O_NOFOLLOW)
 -              goto exit_dput;
 -      /*
 -       * This is subtle. Instead of calling do_follow_link() we do the
 -       * thing by hands. The reason is that this way we have zero link_count
 -       * and path_walk() (called from ->follow_link) honoring LOOKUP_PARENT.
 -       * After that we have the parent and last component, i.e.
 -       * we are in the same situation as after the first path_walk().
 -       * Well, almost - if the last component is normal we get its copy
 -       * stored in nd->last.name and we will have to putname() it when we
 -       * are done. Procfs-like symlinks just set LAST_BIND.
 -       */
 -      nd.flags |= LOOKUP_PARENT;
 -      error = security_inode_follow_link(path.dentry, &nd);
 -      if (error)
 -              goto exit_dput;
 -      error = __do_follow_link(&path, &nd);
 -      path_put(&path);
 -      if (error) {
 -              /* Does someone understand code flow here? Or it is only
 -               * me so stupid? Anathema to whoever designed this non-sense
 -               * with "intent.open".
 -               */
 -              release_open_intent(&nd);
 -              if (nd.root.mnt)
 -                      path_put(&nd.root);
 -              if (error == -ESTALE && !force_reval) {
 -                      force_reval = 1;
 -                      goto reval;
 -              }
 -              return ERR_PTR(error);
 -      }
 -      nd.flags &= ~LOOKUP_PARENT;
 -      if (nd.last_type == LAST_BIND)
 -              goto ok;
 -      error = -EISDIR;
 -      if (nd.last_type != LAST_NORM)
 -              goto exit;
 -      if (nd.last.name[nd.last.len]) {
 -              __putname(nd.last.name);
 -              goto exit;
 -      }
 -      error = -ELOOP;
 -      if (count++==32) {
 -              __putname(nd.last.name);
 -              goto exit;
 -      }
 -      dir = nd.path.dentry;
 -      mutex_lock(&dir->d_inode->i_mutex);
 -      path.dentry = lookup_hash(&nd);
 -      path.mnt = nd.path.mnt;
 -      __putname(nd.last.name);
 -      goto do_last;
 +      filp = ERR_PTR(error);
 +      goto out;
  }
  
  /**
@@@ -1981,6 -1988,7 +1981,6 @@@ int vfs_mknod(struct inode *dir, struc
        if (error)
                return error;
  
 -      vfs_dq_init(dir);
        error = dir->i_op->mknod(dir, dentry, mode, dev);
        if (!error)
                fsnotify_create(dir, dentry);
@@@ -2079,6 -2087,7 +2079,6 @@@ int vfs_mkdir(struct inode *dir, struc
        if (error)
                return error;
  
 -      vfs_dq_init(dir);
        error = dir->i_op->mkdir(dir, dentry, mode);
        if (!error)
                fsnotify_mkdir(dir, dentry);
@@@ -2164,6 -2173,8 +2164,6 @@@ int vfs_rmdir(struct inode *dir, struc
        if (!dir->i_op->rmdir)
                return -EPERM;
  
 -      vfs_dq_init(dir);
 -
        mutex_lock(&dentry->d_inode->i_mutex);
        dentry_unhash(dentry);
        if (d_mountpoint(dentry))
@@@ -2249,16 -2260,15 +2249,16 @@@ int vfs_unlink(struct inode *dir, struc
        if (!dir->i_op->unlink)
                return -EPERM;
  
 -      vfs_dq_init(dir);
 -
        mutex_lock(&dentry->d_inode->i_mutex);
        if (d_mountpoint(dentry))
                error = -EBUSY;
        else {
                error = security_inode_unlink(dir, dentry);
 -              if (!error)
 +              if (!error) {
                        error = dir->i_op->unlink(dir, dentry);
 +                      if (!error)
 +                              dentry->d_inode->i_flags |= S_DEAD;
 +              }
        }
        mutex_unlock(&dentry->d_inode->i_mutex);
  
@@@ -2361,6 -2371,7 +2361,6 @@@ int vfs_symlink(struct inode *dir, stru
        if (error)
                return error;
  
 -      vfs_dq_init(dir);
        error = dir->i_op->symlink(dir, dentry, oldname);
        if (!error)
                fsnotify_create(dir, dentry);
@@@ -2444,6 -2455,7 +2444,6 @@@ int vfs_link(struct dentry *old_dentry
                return error;
  
        mutex_lock(&inode->i_mutex);
 -      vfs_dq_init(dir);
        error = dir->i_op->link(old_dentry, dir, new_dentry);
        mutex_unlock(&inode->i_mutex);
        if (!error)
@@@ -2544,7 -2556,7 +2544,7 @@@ SYSCALL_DEFINE2(link, const char __use
   *    e) conversion from fhandle to dentry may come in the wrong moment - when
   *       we are removing the target. Solution: we will have to grab ->i_mutex
   *       in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on
-  *       ->i_mutex on parents, which works but leads to some truely excessive
+  *       ->i_mutex on parents, which works but leads to some truly excessive
   *       locking].
   */
  static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
@@@ -2609,8 -2621,6 +2609,8 @@@ static int vfs_rename_other(struct inod
        else
                error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry);
        if (!error) {
 +              if (target)
 +                      target->i_flags |= S_DEAD;
                if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE))
                        d_move(old_dentry, new_dentry);
        }
@@@ -2644,15 -2654,20 +2644,15 @@@ int vfs_rename(struct inode *old_dir, s
        if (!old_dir->i_op->rename)
                return -EPERM;
  
 -      vfs_dq_init(old_dir);
 -      vfs_dq_init(new_dir);
 -
        old_name = fsnotify_oldname_init(old_dentry->d_name.name);
  
        if (is_dir)
                error = vfs_rename_dir(old_dir,old_dentry,new_dir,new_dentry);
        else
                error = vfs_rename_other(old_dir,old_dentry,new_dir,new_dentry);
 -      if (!error) {
 -              const char *new_name = old_dentry->d_name.name;
 -              fsnotify_move(old_dir, new_dir, old_name, new_name, is_dir,
 +      if (!error)
 +              fsnotify_move(old_dir, new_dir, old_name, is_dir,
                              new_dentry->d_inode, old_dentry);
 -      }
        fsnotify_oldname_free(old_name);
  
        return error;
diff --combined fs/nfsd/nfs4xdr.c
index 78c7e24e5129b595670dfc13d362af692ae5359f,143d43a93b7250b7a30ebbb8438076c9a257a911..c47b4d7bafa72d400b6249d0b0c7cd6908f0fd85
@@@ -1434,7 -1434,7 +1434,7 @@@ nfsd4_decode_compound(struct nfsd4_comp
                }
                op->opnum = ntohl(*argp->p++);
  
 -              if (op->opnum >= OP_ACCESS && op->opnum < ops->nops)
 +              if (op->opnum >= FIRST_NFS4_OP && op->opnum <= LAST_NFS4_OP)
                        op->status = ops->decoders[op->opnum](argp, &op->u);
                else {
                        op->opnum = OP_ILLEGAL;
@@@ -1528,7 -1528,7 +1528,7 @@@ static void write_cinfo(__be32 **p, str
        } } while (0);
  
  /* Encode as an array of strings the string given with components
-  * seperated @sep.
+  * separated @sep.
   */
  static __be32 nfsd4_encode_components(char sep, char *components,
                                   __be32 **pp, int *buflen)
@@@ -2121,15 -2121,9 +2121,15 @@@ out_acl
                 * and this is the root of a cross-mounted filesystem.
                 */
                if (ignore_crossmnt == 0 &&
 -                  exp->ex_path.mnt->mnt_root->d_inode == dentry->d_inode) {
 -                      err = vfs_getattr(exp->ex_path.mnt->mnt_parent,
 -                              exp->ex_path.mnt->mnt_mountpoint, &stat);
 +                  dentry == exp->ex_path.mnt->mnt_root) {
 +                      struct path path = exp->ex_path;
 +                      path_get(&path);
 +                      while (follow_up(&path)) {
 +                              if (path.dentry != path.mnt->mnt_root)
 +                                      break;
 +                      }
 +                      err = vfs_getattr(path.mnt, path.dentry, &stat);
 +                      path_put(&path);
                        if (err)
                                goto out_nfserr;
                }
diff --combined fs/ocfs2/dlmglue.c
index 8298608d4165bf5b49e06e48a5302ea1015038d9,ccb9c44f478d6d1edabfbae114b3d956d3ca11b7..50c4ee805da46910da11dc5fa88b3eb4b8486d52
@@@ -297,11 -297,6 +297,11 @@@ static inline int ocfs2_is_inode_lock(s
                lockres->l_type == OCFS2_LOCK_TYPE_OPEN;
  }
  
 +static inline struct ocfs2_lock_res *ocfs2_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb)
 +{
 +      return container_of(lksb, struct ocfs2_lock_res, l_lksb);
 +}
 +
  static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres)
  {
        BUG_ON(!ocfs2_is_inode_lock(lockres));
@@@ -880,14 -875,6 +880,14 @@@ static inline void ocfs2_generic_handle
                lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
  
        lockres->l_level = lockres->l_requested;
 +
 +      /*
 +       * We set the OCFS2_LOCK_UPCONVERT_FINISHING flag before clearing
 +       * the OCFS2_LOCK_BUSY flag to prevent the dc thread from
 +       * downconverting the lock before the upconvert has fully completed.
 +       */
 +      lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
 +
        lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
  
        mlog_exit_void();
@@@ -920,6 -907,8 +920,6 @@@ static int ocfs2_generic_handle_bast(st
  
        assert_spin_locked(&lockres->l_lock);
  
 -      lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
 -
        if (level > lockres->l_blocking) {
                /* only schedule a downconvert if we haven't already scheduled
                 * one that goes low enough to satisfy the level we're
                lockres->l_blocking = level;
        }
  
 +      mlog(ML_BASTS, "lockres %s, block %d, level %d, l_block %d, dwn %d\n",
 +           lockres->l_name, level, lockres->l_level, lockres->l_blocking,
 +           needs_downconvert);
 +
 +      if (needs_downconvert)
 +              lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
 +
        mlog_exit(needs_downconvert);
        return needs_downconvert;
  }
@@@ -1049,17 -1031,18 +1049,17 @@@ static unsigned int lockres_set_pending
        return lockres->l_pending_gen;
  }
  
 -
 -static void ocfs2_blocking_ast(void *opaque, int level)
 +static void ocfs2_blocking_ast(struct ocfs2_dlm_lksb *lksb, int level)
  {
 -      struct ocfs2_lock_res *lockres = opaque;
 +      struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
        struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
        int needs_downconvert;
        unsigned long flags;
  
        BUG_ON(level <= DLM_LOCK_NL);
  
 -      mlog(0, "BAST fired for lockres %s, blocking %d, level %d type %s\n",
 -           lockres->l_name, level, lockres->l_level,
 +      mlog(ML_BASTS, "BAST fired for lockres %s, blocking %d, level %d, "
 +           "type %s\n", lockres->l_name, level, lockres->l_level,
             ocfs2_lock_type_string(lockres->l_type));
  
        /*
        ocfs2_wake_downconvert_thread(osb);
  }
  
 -static void ocfs2_locking_ast(void *opaque)
 +static void ocfs2_locking_ast(struct ocfs2_dlm_lksb *lksb)
  {
 -      struct ocfs2_lock_res *lockres = opaque;
 +      struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
        struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
        unsigned long flags;
        int status;
                return;
        }
  
 +      mlog(ML_BASTS, "AST fired for lockres %s, action %d, unlock %d, "
 +           "level %d => %d\n", lockres->l_name, lockres->l_action,
 +           lockres->l_unlock_action, lockres->l_level, lockres->l_requested);
 +
        switch(lockres->l_action) {
        case OCFS2_AST_ATTACH:
                ocfs2_generic_handle_attach_action(lockres);
                ocfs2_generic_handle_downconvert_action(lockres);
                break;
        default:
 -              mlog(ML_ERROR, "lockres %s: ast fired with invalid action: %u "
 -                   "lockres flags = 0x%lx, unlock action: %u\n",
 +              mlog(ML_ERROR, "lockres %s: AST fired with invalid action: %u, "
 +                   "flags 0x%lx, unlock: %u\n",
                     lockres->l_name, lockres->l_action, lockres->l_flags,
                     lockres->l_unlock_action);
                BUG();
        spin_unlock_irqrestore(&lockres->l_lock, flags);
  }
  
 +static void ocfs2_unlock_ast(struct ocfs2_dlm_lksb *lksb, int error)
 +{
 +      struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
 +      unsigned long flags;
 +
 +      mlog_entry_void();
 +
 +      mlog(ML_BASTS, "UNLOCK AST fired for lockres %s, action = %d\n",
 +           lockres->l_name, lockres->l_unlock_action);
 +
 +      spin_lock_irqsave(&lockres->l_lock, flags);
 +      if (error) {
 +              mlog(ML_ERROR, "Dlm passes error %d for lock %s, "
 +                   "unlock_action %d\n", error, lockres->l_name,
 +                   lockres->l_unlock_action);
 +              spin_unlock_irqrestore(&lockres->l_lock, flags);
 +              mlog_exit_void();
 +              return;
 +      }
 +
 +      switch(lockres->l_unlock_action) {
 +      case OCFS2_UNLOCK_CANCEL_CONVERT:
 +              mlog(0, "Cancel convert success for %s\n", lockres->l_name);
 +              lockres->l_action = OCFS2_AST_INVALID;
 +              /* Downconvert thread may have requeued this lock, we
 +               * need to wake it. */
 +              if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
 +                      ocfs2_wake_downconvert_thread(ocfs2_get_lockres_osb(lockres));
 +              break;
 +      case OCFS2_UNLOCK_DROP_LOCK:
 +              lockres->l_level = DLM_LOCK_IV;
 +              break;
 +      default:
 +              BUG();
 +      }
 +
 +      lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
 +      lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
 +      wake_up(&lockres->l_event);
 +      spin_unlock_irqrestore(&lockres->l_lock, flags);
 +
 +      mlog_exit_void();
 +}
 +
 +/*
 + * This is the filesystem locking protocol.  It provides the lock handling
 + * hooks for the underlying DLM.  It has a maximum version number.
 + * The version number allows interoperability with systems running at
 + * the same major number and an equal or smaller minor number.
 + *
 + * Whenever the filesystem does new things with locks (adds or removes a
 + * lock, orders them differently, does different things underneath a lock),
 + * the version must be changed.  The protocol is negotiated when joining
 + * the dlm domain.  A node may join the domain if its major version is
 + * identical to all other nodes and its minor version is greater than
 + * or equal to all other nodes.  When its minor version is greater than
 + * the other nodes, it will run at the minor version specified by the
 + * other nodes.
 + *
 + * If a locking change is made that will not be compatible with older
 + * versions, the major number must be increased and the minor version set
 + * to zero.  If a change merely adds a behavior that can be disabled when
 + * speaking to older versions, the minor version must be increased.  If a
 + * change adds a fully backwards compatible change (eg, LVB changes that
 + * are just ignored by older versions), the version does not need to be
 + * updated.
 + */
 +static struct ocfs2_locking_protocol lproto = {
 +      .lp_max_version = {
 +              .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
 +              .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
 +      },
 +      .lp_lock_ast            = ocfs2_locking_ast,
 +      .lp_blocking_ast        = ocfs2_blocking_ast,
 +      .lp_unlock_ast          = ocfs2_unlock_ast,
 +};
 +
 +void ocfs2_set_locking_protocol(void)
 +{
 +      ocfs2_stack_glue_set_max_proto_version(&lproto.lp_max_version);
 +}
 +
  static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
                                                int convert)
  {
        mlog_entry_void();
        spin_lock_irqsave(&lockres->l_lock, flags);
        lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
 +      lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
        if (convert)
                lockres->l_action = OCFS2_AST_INVALID;
        else
@@@ -1283,7 -1179,8 +1283,7 @@@ static int ocfs2_lock_create(struct ocf
                             &lockres->l_lksb,
                             dlm_flags,
                             lockres->l_name,
 -                           OCFS2_LOCK_ID_MAX_LEN - 1,
 -                           lockres);
 +                           OCFS2_LOCK_ID_MAX_LEN - 1);
        lockres_clear_pending(lockres, gen, osb);
        if (ret) {
                ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
@@@ -1426,13 -1323,13 +1426,13 @@@ static int __ocfs2_cluster_lock(struct 
  again:
        wait = 0;
  
 +      spin_lock_irqsave(&lockres->l_lock, flags);
 +
        if (catch_signals && signal_pending(current)) {
                ret = -ERESTARTSYS;
 -              goto out;
 +              goto unlock;
        }
  
 -      spin_lock_irqsave(&lockres->l_lock, flags);
 -
        mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
                        "Cluster lock called on freeing lockres %s! flags "
                        "0x%lx\n", lockres->l_name, lockres->l_flags);
                goto unlock;
        }
  
 +      if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) {
 +              /*
 +               * We've upconverted. If the lock now has a level we can
 +               * work with, we take it. If, however, the lock is not at the
 +               * required level, we go thru the full cycle. One way this could
 +               * happen is if a process requesting an upconvert to PR is
 +               * closely followed by another requesting upconvert to an EX.
 +               * If the process requesting EX lands here, we want it to
 +               * continue attempting to upconvert and let the process
 +               * requesting PR take the lock.
 +               * If multiple processes request upconvert to PR, the first one
 +               * here will take the lock. The others will have to go thru the
 +               * OCFS2_LOCK_BLOCKED check to ensure that there is no pending
 +               * downconvert request.
 +               */
 +              if (level <= lockres->l_level)
 +                      goto update_holders;
 +      }
 +
        if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
            !ocfs2_may_continue_on_blocked_lock(lockres, level)) {
                /* is the lock is currently blocked on behalf of
                BUG_ON(level == DLM_LOCK_IV);
                BUG_ON(level == DLM_LOCK_NL);
  
 -              mlog(0, "lock %s, convert from %d to level = %d\n",
 +              mlog(ML_BASTS, "lockres %s, convert from %d to %d\n",
                     lockres->l_name, lockres->l_level, level);
  
                /* call dlm_lock to upgrade lock now */
                                     &lockres->l_lksb,
                                     lkm_flags,
                                     lockres->l_name,
 -                                   OCFS2_LOCK_ID_MAX_LEN - 1,
 -                                   lockres);
 +                                   OCFS2_LOCK_ID_MAX_LEN - 1);
                lockres_clear_pending(lockres, gen, osb);
                if (ret) {
                        if (!(lkm_flags & DLM_LKF_NOQUEUE) ||
                goto again;
        }
  
 +update_holders:
        /* Ok, if we get here then we're good to go. */
        ocfs2_inc_holders(lockres, level);
  
        ret = 0;
  unlock:
 +      lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
 +
        spin_unlock_irqrestore(&lockres->l_lock, flags);
  out:
        /*
@@@ -1881,7 -1757,7 +1881,7 @@@ out
   * ocfs2_file_lock() and ocfs2_file_unlock() map to a single pair of
   * flock() calls. The locking approach this requires is sufficiently
   * different from all other cluster lock types that we implement a
-  * seperate path to the "low-level" dlm calls. In particular:
+  * separate path to the "low-level" dlm calls. In particular:
   *
   * - No optimization of lock levels is done - we take at exactly
   *   what's been requested.
@@@ -1951,7 -1827,8 +1951,7 @@@ int ocfs2_file_lock(struct file *file, 
        spin_unlock_irqrestore(&lockres->l_lock, flags);
  
        ret = ocfs2_dlm_lock(osb->cconn, level, &lockres->l_lksb, lkm_flags,
 -                           lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1,
 -                           lockres);
 +                           lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1);
        if (ret) {
                if (!trylock || (ret != -EAGAIN)) {
                        ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
@@@ -3080,7 -2957,7 +3080,7 @@@ int ocfs2_dlm_init(struct ocfs2_super *
        status = ocfs2_cluster_connect(osb->osb_cluster_stack,
                                       osb->uuid_str,
                                       strlen(osb->uuid_str),
 -                                     ocfs2_do_node_down, osb,
 +                                     &lproto, ocfs2_do_node_down, osb,
                                       &conn);
        if (status) {
                mlog_errno(status);
@@@ -3147,6 -3024,50 +3147,6 @@@ void ocfs2_dlm_shutdown(struct ocfs2_su
        mlog_exit_void();
  }
  
 -static void ocfs2_unlock_ast(void *opaque, int error)
 -{
 -      struct ocfs2_lock_res *lockres = opaque;
 -      unsigned long flags;
 -
 -      mlog_entry_void();
 -
 -      mlog(0, "UNLOCK AST called on lock %s, action = %d\n", lockres->l_name,
 -           lockres->l_unlock_action);
 -
 -      spin_lock_irqsave(&lockres->l_lock, flags);
 -      if (error) {
 -              mlog(ML_ERROR, "Dlm passes error %d for lock %s, "
 -                   "unlock_action %d\n", error, lockres->l_name,
 -                   lockres->l_unlock_action);
 -              spin_unlock_irqrestore(&lockres->l_lock, flags);
 -              mlog_exit_void();
 -              return;
 -      }
 -
 -      switch(lockres->l_unlock_action) {
 -      case OCFS2_UNLOCK_CANCEL_CONVERT:
 -              mlog(0, "Cancel convert success for %s\n", lockres->l_name);
 -              lockres->l_action = OCFS2_AST_INVALID;
 -              /* Downconvert thread may have requeued this lock, we
 -               * need to wake it. */
 -              if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
 -                      ocfs2_wake_downconvert_thread(ocfs2_get_lockres_osb(lockres));
 -              break;
 -      case OCFS2_UNLOCK_DROP_LOCK:
 -              lockres->l_level = DLM_LOCK_IV;
 -              break;
 -      default:
 -              BUG();
 -      }
 -
 -      lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
 -      lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
 -      wake_up(&lockres->l_event);
 -      spin_unlock_irqrestore(&lockres->l_lock, flags);
 -
 -      mlog_exit_void();
 -}
 -
  static int ocfs2_drop_lock(struct ocfs2_super *osb,
                           struct ocfs2_lock_res *lockres)
  {
  
        mlog(0, "lock %s\n", lockres->l_name);
  
 -      ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, lkm_flags,
 -                             lockres);
 +      ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, lkm_flags);
        if (ret) {
                ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
                mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags);
@@@ -3233,7 -3155,7 +3233,7 @@@ out
  /* Mark the lockres as being dropped. It will no longer be
   * queued if blocking, but we still may have to wait on it
   * being dequeued from the downconvert thread before we can consider
 - * it safe to drop. 
 + * it safe to drop.
   *
   * You can *not* attempt to call cluster_lock on this lockres anymore. */
  void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres)
@@@ -3322,20 -3244,13 +3322,20 @@@ static unsigned int ocfs2_prepare_downc
        BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
  
        if (lockres->l_level <= new_level) {
 -              mlog(ML_ERROR, "lockres->l_level (%d) <= new_level (%d)\n",
 -                   lockres->l_level, new_level);
 +              mlog(ML_ERROR, "lockres %s, lvl %d <= %d, blcklst %d, mask %d, "
 +                   "type %d, flags 0x%lx, hold %d %d, act %d %d, req %d, "
 +                   "block %d, pgen %d\n", lockres->l_name, lockres->l_level,
 +                   new_level, list_empty(&lockres->l_blocked_list),
 +                   list_empty(&lockres->l_mask_waiters), lockres->l_type,
 +                   lockres->l_flags, lockres->l_ro_holders,
 +                   lockres->l_ex_holders, lockres->l_action,
 +                   lockres->l_unlock_action, lockres->l_requested,
 +                   lockres->l_blocking, lockres->l_pending_gen);
                BUG();
        }
  
 -      mlog(0, "lock %s, new_level = %d, l_blocking = %d\n",
 -           lockres->l_name, new_level, lockres->l_blocking);
 +      mlog(ML_BASTS, "lockres %s, level %d => %d, blocking %d\n",
 +           lockres->l_name, lockres->l_level, new_level, lockres->l_blocking);
  
        lockres->l_action = OCFS2_AST_DOWNCONVERT;
        lockres->l_requested = new_level;
@@@ -3354,9 -3269,6 +3354,9 @@@ static int ocfs2_downconvert_lock(struc
  
        mlog_entry_void();
  
 +      mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
 +           lockres->l_level, new_level);
 +
        if (lvb)
                dlm_flags |= DLM_LKF_VALBLK;
  
                             &lockres->l_lksb,
                             dlm_flags,
                             lockres->l_name,
 -                           OCFS2_LOCK_ID_MAX_LEN - 1,
 -                           lockres);
 +                           OCFS2_LOCK_ID_MAX_LEN - 1);
        lockres_clear_pending(lockres, generation, osb);
        if (ret) {
                ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
@@@ -3386,12 -3299,14 +3386,12 @@@ static int ocfs2_prepare_cancel_convert
        assert_spin_locked(&lockres->l_lock);
  
        mlog_entry_void();
 -      mlog(0, "lock %s\n", lockres->l_name);
  
        if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
                /* If we're already trying to cancel a lock conversion
                 * then just drop the spinlock and allow the caller to
                 * requeue this lock. */
 -
 -              mlog(0, "Lockres %s, skip convert\n", lockres->l_name);
 +              mlog(ML_BASTS, "lockres %s, skip convert\n", lockres->l_name);
                return 0;
        }
  
                        "lock %s, invalid flags: 0x%lx\n",
                        lockres->l_name, lockres->l_flags);
  
 +      mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
 +
        return 1;
  }
  
@@@ -3417,15 -3330,16 +3417,15 @@@ static int ocfs2_cancel_convert(struct 
        int ret;
  
        mlog_entry_void();
 -      mlog(0, "lock %s\n", lockres->l_name);
  
        ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb,
 -                             DLM_LKF_CANCEL, lockres);
 +                             DLM_LKF_CANCEL);
        if (ret) {
                ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
                ocfs2_recover_from_dlm_error(lockres, 0);
        }
  
 -      mlog(0, "lock %s return from ocfs2_dlm_unlock\n", lockres->l_name);
 +      mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
  
        mlog_exit(ret);
        return ret;
@@@ -3438,7 -3352,6 +3438,7 @@@ static int ocfs2_unblock_lock(struct oc
        unsigned long flags;
        int blocking;
        int new_level;
 +      int level;
        int ret = 0;
        int set_lvb = 0;
        unsigned int gen;
  
        spin_lock_irqsave(&lockres->l_lock, flags);
  
 -      BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
 -
  recheck:
 +      /*
 +       * Is it still blocking? If not, we have no more work to do.
 +       */
 +      if (!(lockres->l_flags & OCFS2_LOCK_BLOCKED)) {
 +              BUG_ON(lockres->l_blocking != DLM_LOCK_NL);
 +              spin_unlock_irqrestore(&lockres->l_lock, flags);
 +              ret = 0;
 +              goto leave;
 +      }
 +
        if (lockres->l_flags & OCFS2_LOCK_BUSY) {
                /* XXX
                 * This is a *big* race.  The OCFS2_LOCK_PENDING flag
                 * at the same time they set OCFS2_DLM_BUSY.  They must
                 * clear OCFS2_DLM_PENDING after dlm_lock() returns.
                 */
 -              if (lockres->l_flags & OCFS2_LOCK_PENDING)
 +              if (lockres->l_flags & OCFS2_LOCK_PENDING) {
 +                      mlog(ML_BASTS, "lockres %s, ReQ: Pending\n",
 +                           lockres->l_name);
                        goto leave_requeue;
 +              }
  
                ctl->requeue = 1;
                ret = ocfs2_prepare_cancel_convert(osb, lockres);
                goto leave;
        }
  
 +      /*
 +       * This prevents livelocks. OCFS2_LOCK_UPCONVERT_FINISHING flag is
 +       * set when the ast is received for an upconvert just before the
 +       * OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast
 +       * on the heels of the ast, we want to delay the downconvert just
 +       * enough to allow the up requestor to do its task. Because this
 +       * lock is in the blocked queue, the lock will be downconverted
 +       * as soon as the requestor is done with the lock.
 +       */
 +      if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING)
 +              goto leave_requeue;
 +
 +      /*
 +       * How can we block and yet be at NL?  We were trying to upconvert
 +       * from NL and got canceled.  The code comes back here, and now
 +       * we notice and clear BLOCKING.
 +       */
 +      if (lockres->l_level == DLM_LOCK_NL) {
 +              BUG_ON(lockres->l_ex_holders || lockres->l_ro_holders);
 +              mlog(ML_BASTS, "lockres %s, Aborting dc\n", lockres->l_name);
 +              lockres->l_blocking = DLM_LOCK_NL;
 +              lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
 +              spin_unlock_irqrestore(&lockres->l_lock, flags);
 +              goto leave;
 +      }
 +
        /* if we're blocking an exclusive and we have *any* holders,
         * then requeue. */
        if ((lockres->l_blocking == DLM_LOCK_EX)
 -          && (lockres->l_ex_holders || lockres->l_ro_holders))
 +          && (lockres->l_ex_holders || lockres->l_ro_holders)) {
 +              mlog(ML_BASTS, "lockres %s, ReQ: EX/PR Holders %u,%u\n",
 +                   lockres->l_name, lockres->l_ex_holders,
 +                   lockres->l_ro_holders);
                goto leave_requeue;
 +      }
  
        /* If it's a PR we're blocking, then only
         * requeue if we've got any EX holders */
        if (lockres->l_blocking == DLM_LOCK_PR &&
 -          lockres->l_ex_holders)
 +          lockres->l_ex_holders) {
 +              mlog(ML_BASTS, "lockres %s, ReQ: EX Holders %u\n",
 +                   lockres->l_name, lockres->l_ex_holders);
                goto leave_requeue;
 +      }
  
        /*
         * Can we get a lock in this state if the holder counts are
         * zero? The meta data unblock code used to check this.
         */
        if ((lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
 -          && (lockres->l_flags & OCFS2_LOCK_REFRESHING))
 +          && (lockres->l_flags & OCFS2_LOCK_REFRESHING)) {
 +              mlog(ML_BASTS, "lockres %s, ReQ: Lock Refreshing\n",
 +                   lockres->l_name);
                goto leave_requeue;
 +      }
  
        new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
  
        if (lockres->l_ops->check_downconvert
 -          && !lockres->l_ops->check_downconvert(lockres, new_level))
 +          && !lockres->l_ops->check_downconvert(lockres, new_level)) {
 +              mlog(ML_BASTS, "lockres %s, ReQ: Checkpointing\n",
 +                   lockres->l_name);
                goto leave_requeue;
 +      }
  
        /* If we get here, then we know that there are no more
         * incompatible holders (and anyone asking for an incompatible
         * may sleep, so we save off a copy of what we're blocking as
         * it may change while we're not holding the spin lock. */
        blocking = lockres->l_blocking;
 +      level = lockres->l_level;
        spin_unlock_irqrestore(&lockres->l_lock, flags);
  
        ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
  
 -      if (ctl->unblock_action == UNBLOCK_STOP_POST)
 +      if (ctl->unblock_action == UNBLOCK_STOP_POST) {
 +              mlog(ML_BASTS, "lockres %s, UNBLOCK_STOP_POST\n",
 +                   lockres->l_name);
                goto leave;
 +      }
  
        spin_lock_irqsave(&lockres->l_lock, flags);
 -      if (blocking != lockres->l_blocking) {
 +      if ((blocking != lockres->l_blocking) || (level != lockres->l_level)) {
                /* If this changed underneath us, then we can't drop
                 * it just yet. */
 +              mlog(ML_BASTS, "lockres %s, block=%d:%d, level=%d:%d, "
 +                   "Recheck\n", lockres->l_name, blocking,
 +                   lockres->l_blocking, level, lockres->l_level);
                goto recheck;
        }
  
@@@ -3987,6 -3843,45 +3987,6 @@@ void ocfs2_refcount_unlock(struct ocfs2
                ocfs2_cluster_unlock(osb, lockres, level);
  }
  
 -/*
 - * This is the filesystem locking protocol.  It provides the lock handling
 - * hooks for the underlying DLM.  It has a maximum version number.
 - * The version number allows interoperability with systems running at
 - * the same major number and an equal or smaller minor number.
 - *
 - * Whenever the filesystem does new things with locks (adds or removes a
 - * lock, orders them differently, does different things underneath a lock),
 - * the version must be changed.  The protocol is negotiated when joining
 - * the dlm domain.  A node may join the domain if its major version is
 - * identical to all other nodes and its minor version is greater than
 - * or equal to all other nodes.  When its minor version is greater than
 - * the other nodes, it will run at the minor version specified by the
 - * other nodes.
 - *
 - * If a locking change is made that will not be compatible with older
 - * versions, the major number must be increased and the minor version set
 - * to zero.  If a change merely adds a behavior that can be disabled when
 - * speaking to older versions, the minor version must be increased.  If a
 - * change adds a fully backwards compatible change (eg, LVB changes that
 - * are just ignored by older versions), the version does not need to be
 - * updated.
 - */
 -static struct ocfs2_locking_protocol lproto = {
 -      .lp_max_version = {
 -              .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
 -              .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
 -      },
 -      .lp_lock_ast            = ocfs2_locking_ast,
 -      .lp_blocking_ast        = ocfs2_blocking_ast,
 -      .lp_unlock_ast          = ocfs2_unlock_ast,
 -};
 -
 -void ocfs2_set_locking_protocol(void)
 -{
 -      ocfs2_stack_glue_set_locking_protocol(&lproto);
 -}
 -
 -
  static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
                                       struct ocfs2_lock_res *lockres)
  {
        BUG_ON(!lockres);
        BUG_ON(!lockres->l_ops);
  
 -      mlog(0, "lockres %s blocked.\n", lockres->l_name);
 +      mlog(ML_BASTS, "lockres %s blocked\n", lockres->l_name);
  
        /* Detect whether a lock has been marked as going away while
         * the downconvert thread was processing other things. A lock can
@@@ -4026,7 -3921,7 +4026,7 @@@ unqueue
        } else
                ocfs2_schedule_blocked_lock(osb, lockres);
  
 -      mlog(0, "lockres %s, requeue = %s.\n", lockres->l_name,
 +      mlog(ML_BASTS, "lockres %s, requeue = %s.\n", lockres->l_name,
             ctl.requeue ? "yes" : "no");
        spin_unlock_irqrestore(&lockres->l_lock, flags);
  
@@@ -4048,7 -3943,7 +4048,7 @@@ static void ocfs2_schedule_blocked_lock
                /* Do not schedule a lock for downconvert when it's on
                 * the way to destruction - any nodes wanting access
                 * to the resource will get it soon. */
 -              mlog(0, "Lockres %s won't be scheduled: flags 0x%lx\n",
 +              mlog(ML_BASTS, "lockres %s won't be scheduled: flags 0x%lx\n",
                     lockres->l_name, lockres->l_flags);
                return;
        }
diff --combined fs/ocfs2/extent_map.c
index 5328529e7fd289e963da8cfa7b66d5706babb00c,83e9b1249aed21c7303c2eb6b19c7e10bfbb933c..c562a7581cf93b709e79d70be22034f1e5be7a63
@@@ -192,7 -192,7 +192,7 @@@ static int ocfs2_try_to_merge_extent_ma
                emi->ei_clusters += ins->ei_clusters;
                return 1;
        } else if ((ins->ei_phys + ins->ei_clusters) == emi->ei_phys &&
 -                 (ins->ei_cpos + ins->ei_clusters) == emi->ei_phys &&
 +                 (ins->ei_cpos + ins->ei_clusters) == emi->ei_cpos &&
                   ins->ei_flags == emi->ei_flags) {
                emi->ei_phys = ins->ei_phys;
                emi->ei_cpos = ins->ei_cpos;
@@@ -453,7 -453,7 +453,7 @@@ static int ocfs2_get_clusters_nocache(s
        if (i == -1) {
                /*
                 * Holes can be larger than the maximum size of an
-                * extent, so we return their lengths in a seperate
+                * extent, so we return their lengths in a separate
                 * field.
                 */
                if (hole_len) {
diff --combined fs/reiserfs/bitmap.c
index dc014f7def0523869e970574c722473d74f1e316,ecc04b5ede574108f3158755749baf13c4c0456f..483442e66ed6d2066869fa770ec63c6808a3b283
@@@ -169,7 -169,7 +169,7 @@@ static int scan_bitmap_block(struct rei
                        return 0;       // No free blocks in this bitmap
                }
  
-               /* search for a first zero bit -- beggining of a window */
+               /* search for a first zero bit -- beginning of a window */
                *beg = reiserfs_find_next_zero_le_bit
                    ((unsigned long *)(bh->b_data), boundary, *beg);
  
@@@ -425,7 -425,7 +425,7 @@@ static void _reiserfs_free_block(struc
  
        journal_mark_dirty(th, s, sbh);
        if (for_unformatted)
 -              vfs_dq_free_block_nodirty(inode, 1);
 +              dquot_free_block_nodirty(inode, 1);
  }
  
  void reiserfs_free_block(struct reiserfs_transaction_handle *th,
@@@ -1049,7 -1049,7 +1049,7 @@@ static inline int blocknrs_and_prealloc
                               amount_needed, hint->inode->i_uid);
  #endif
                quota_ret =
 -                  vfs_dq_alloc_block_nodirty(hint->inode, amount_needed);
 +                  dquot_alloc_block_nodirty(hint->inode, amount_needed);
                if (quota_ret)  /* Quota exceeded? */
                        return QUOTA_EXCEEDED;
                if (hint->preallocate && hint->prealloc_size) {
                                       "reiserquota: allocating (prealloc) %d blocks id=%u",
                                       hint->prealloc_size, hint->inode->i_uid);
  #endif
 -                      quota_ret = vfs_dq_prealloc_block_nodirty(hint->inode,
 +                      quota_ret = dquot_prealloc_block_nodirty(hint->inode,
                                                         hint->prealloc_size);
                        if (quota_ret)
                                hint->preallocate = hint->prealloc_size = 0;
                                               hint->inode->i_uid);
  #endif
                                /* Free not allocated blocks */
 -                              vfs_dq_free_block_nodirty(hint->inode,
 +                              dquot_free_block_nodirty(hint->inode,
                                        amount_needed + hint->prealloc_size -
                                        nr_allocated);
                        }
                               REISERFS_I(hint->inode)->i_prealloc_count,
                               hint->inode->i_uid);
  #endif
 -              vfs_dq_free_block_nodirty(hint->inode, amount_needed +
 +              dquot_free_block_nodirty(hint->inode, amount_needed +
                                         hint->prealloc_size - nr_allocated -
                                         REISERFS_I(hint->inode)->
                                         i_prealloc_count);
diff --combined fs/udf/inode.c
index b57ab0402d8971bfd1770c083381c1eabb7c115b,772a4fa557f2e4dfcec646562b5366837e319762..86f0ccb807658c35136d739429e25959818f6b1f
@@@ -36,7 -36,6 +36,7 @@@
  #include <linux/pagemap.h>
  #include <linux/buffer_head.h>
  #include <linux/writeback.h>
 +#include <linux/quotaops.h>
  #include <linux/slab.h>
  #include <linux/crc-itu-t.h>
  
@@@ -71,9 -70,6 +71,9 @@@ static int udf_get_block(struct inode *
  
  void udf_delete_inode(struct inode *inode)
  {
 +      if (!is_bad_inode(inode))
 +              dquot_initialize(inode);
 +
        truncate_inode_pages(&inode->i_data, 0);
  
        if (is_bad_inode(inode))
@@@ -106,14 -102,12 +106,14 @@@ void udf_clear_inode(struct inode *inod
        if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
            inode->i_size != iinfo->i_lenExtents) {
                printk(KERN_WARNING "UDF-fs (%s): Inode %lu (mode %o) has "
-                       "inode size %llu different from extent lenght %llu. "
+                       "inode size %llu different from extent length %llu. "
                        "Filesystem need not be standards compliant.\n",
                        inode->i_sb->s_id, inode->i_ino, inode->i_mode,
                        (unsigned long long)inode->i_size,
                        (unsigned long long)iinfo->i_lenExtents);
        }
 +
 +      dquot_drop(inode);
        kfree(iinfo->i_ext.i_data);
        iinfo->i_ext.i_data = NULL;
  }
@@@ -1379,12 -1373,12 +1379,12 @@@ static mode_t udf_convert_permissions(s
        return mode;
  }
  
 -int udf_write_inode(struct inode *inode, int sync)
 +int udf_write_inode(struct inode *inode, struct writeback_control *wbc)
  {
        int ret;
  
        lock_kernel();
 -      ret = udf_update_inode(inode, sync);
 +      ret = udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
        unlock_kernel();
  
        return ret;
@@@ -1678,7 -1672,7 +1678,7 @@@ int8_t udf_add_aext(struct inode *inode
                return -1;
  
        if (epos->offset + (2 * adsize) > inode->i_sb->s_blocksize) {
 -              char *sptr, *dptr;
 +              unsigned char *sptr, *dptr;
                struct buffer_head *nbh;
                int err, loffset;
                struct kernel_lb_addr obloc = epos->block;
diff --combined include/linux/mmzone.h
index bc209d8b7b5cf8772fd1dea25c0e0d2570b01548,e60a340fe890622020d5791e6cc7fb64e163cee4..cf9e458e96b0e4237260a533cad7a7c8e9d26f1a
@@@ -184,7 -184,13 +184,7 @@@ struct per_cpu_pageset 
        s8 stat_threshold;
        s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
  #endif
 -} ____cacheline_aligned_in_smp;
 -
 -#ifdef CONFIG_NUMA
 -#define zone_pcp(__z, __cpu) ((__z)->pageset[(__cpu)])
 -#else
 -#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)])
 -#endif
 +};
  
  #endif /* !__GENERATING_BOUNDS.H */
  
@@@ -300,13 -306,14 +300,13 @@@ struct zone 
         */
        unsigned long           min_unmapped_pages;
        unsigned long           min_slab_pages;
 -      struct per_cpu_pageset  *pageset[NR_CPUS];
 -#else
 -      struct per_cpu_pageset  pageset[NR_CPUS];
  #endif
 +      struct per_cpu_pageset __percpu *pageset;
        /*
         * free areas of different sizes
         */
        spinlock_t              lock;
 +      int                     all_unreclaimable; /* All pages pinned */
  #ifdef CONFIG_MEMORY_HOTPLUG
        /* see spanned/present_pages for more description */
        seqlock_t               span_seqlock;
         * prev_priority holds the scanning priority for this zone.  It is
         * defined as the scanning priority at which we achieved our reclaim
         * target at the previous try_to_free_pages() or balance_pgdat()
-        * invokation.
+        * invocation.
         *
         * We use prev_priority as a measure of how much stress page reclaim is
         * under - it drives the swappiness decision: whether to unmap mapped
  } ____cacheline_internodealigned_in_smp;
  
  typedef enum {
 -      ZONE_ALL_UNRECLAIMABLE,         /* all pages pinned */
        ZONE_RECLAIM_LOCKED,            /* prevents concurrent reclaim */
        ZONE_OOM_LOCKED,                /* zone is in OOM killer zonelist */
  } zone_flags_t;
@@@ -437,6 -445,11 +437,6 @@@ static inline void zone_clear_flag(stru
        clear_bit(flag, &zone->flags);
  }
  
 -static inline int zone_is_all_unreclaimable(const struct zone *zone)
 -{
 -      return test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->flags);
 -}
 -
  static inline int zone_is_reclaim_locked(const struct zone *zone)
  {
        return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
@@@ -607,9 -620,7 +607,9 @@@ typedef struct pglist_data 
        struct page_cgroup *node_page_cgroup;
  #endif
  #endif
 +#ifndef CONFIG_NO_BOOTMEM
        struct bootmem_data *bdata;
 +#endif
  #ifdef CONFIG_MEMORY_HOTPLUG
        /*
         * Must be held any time you expect node_start_pfn, node_present_pages
diff --combined include/linux/sched.h
index 46c6f8d5dc06d2dce20f0f6f52e1884519403651,a70957b138ed3ee6fffb7bfb01e4521908a40be5..c0ead33af5d0affcbf7b52561126b74b0f7c27a2
@@@ -97,7 -97,7 +97,7 @@@ struct sched_param 
  struct exec_domain;
  struct futex_pi_state;
  struct robust_list_head;
 -struct bio;
 +struct bio_list;
  struct fs_struct;
  struct bts_context;
  struct perf_event_context;
@@@ -310,7 -310,6 +310,7 @@@ extern void sched_show_task(struct task
  #ifdef CONFIG_DETECT_SOFTLOCKUP
  extern void softlockup_tick(void);
  extern void touch_softlockup_watchdog(void);
 +extern void touch_softlockup_watchdog_sync(void);
  extern void touch_all_softlockup_watchdogs(void);
  extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
                                    void __user *buffer,
@@@ -324,9 -323,6 +324,9 @@@ static inline void softlockup_tick(void
  static inline void touch_softlockup_watchdog(void)
  {
  }
 +static inline void touch_softlockup_watchdog_sync(void)
 +{
 +}
  static inline void touch_all_softlockup_watchdogs(void)
  {
  }
@@@ -396,6 -392,60 +396,6 @@@ extern void arch_unmap_area_topdown(str
  static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
  #endif
  
 -#if USE_SPLIT_PTLOCKS
 -/*
 - * The mm counters are not protected by its page_table_lock,
 - * so must be incremented atomically.
 - */
 -#define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value)
 -#define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member))
 -#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member)
 -#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
 -#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
 -
 -#else  /* !USE_SPLIT_PTLOCKS */
 -/*
 - * The mm counters are protected by its page_table_lock,
 - * so can be incremented directly.
 - */
 -#define set_mm_counter(mm, member, value) (mm)->_##member = (value)
 -#define get_mm_counter(mm, member) ((mm)->_##member)
 -#define add_mm_counter(mm, member, value) (mm)->_##member += (value)
 -#define inc_mm_counter(mm, member) (mm)->_##member++
 -#define dec_mm_counter(mm, member) (mm)->_##member--
 -
 -#endif /* !USE_SPLIT_PTLOCKS */
 -
 -#define get_mm_rss(mm)                                        \
 -      (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))
 -#define update_hiwater_rss(mm)        do {                    \
 -      unsigned long _rss = get_mm_rss(mm);            \
 -      if ((mm)->hiwater_rss < _rss)                   \
 -              (mm)->hiwater_rss = _rss;               \
 -} while (0)
 -#define update_hiwater_vm(mm) do {                    \
 -      if ((mm)->hiwater_vm < (mm)->total_vm)          \
 -              (mm)->hiwater_vm = (mm)->total_vm;      \
 -} while (0)
 -
 -static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
 -{
 -      return max(mm->hiwater_rss, get_mm_rss(mm));
 -}
 -
 -static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
 -                                       struct mm_struct *mm)
 -{
 -      unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
 -
 -      if (*maxrss < hiwater_rss)
 -              *maxrss = hiwater_rss;
 -}
 -
 -static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
 -{
 -      return max(mm->hiwater_vm, mm->total_vm);
 -}
  
  extern void set_dumpable(struct mm_struct *mm, int value);
  extern int get_dumpable(struct mm_struct *mm);
@@@ -686,6 -736,14 +686,6 @@@ struct user_struct 
        uid_t uid;
        struct user_namespace *user_ns;
  
 -#ifdef CONFIG_USER_SCHED
 -      struct task_group *tg;
 -#ifdef CONFIG_SYSFS
 -      struct kobject kobj;
 -      struct delayed_work work;
 -#endif
 -#endif
 -
  #ifdef CONFIG_PERF_EVENTS
        atomic_long_t locked_vm;
  #endif
@@@ -816,10 -874,7 +816,10 @@@ static inline int sd_balance_for_mc_pow
        if (sched_smt_power_savings)
                return SD_POWERSAVINGS_BALANCE;
  
 -      return SD_PREFER_SIBLING;
 +      if (!sched_mc_power_savings)
 +              return SD_PREFER_SIBLING;
 +
 +      return 0;
  }
  
  static inline int sd_balance_for_package_power(void)
@@@ -1025,8 -1080,7 +1025,8 @@@ struct sched_domain
  struct sched_class {
        const struct sched_class *next;
  
 -      void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
 +      void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup,
 +                            bool head);
        void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
        void (*yield_task) (struct rq *rq);
  
  #ifdef CONFIG_SMP
        int  (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
  
 -      unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
 -                      struct rq *busiest, unsigned long max_load_move,
 -                      struct sched_domain *sd, enum cpu_idle_type idle,
 -                      int *all_pinned, int *this_best_prio);
 -
 -      int (*move_one_task) (struct rq *this_rq, int this_cpu,
 -                            struct rq *busiest, struct sched_domain *sd,
 -                            enum cpu_idle_type idle);
        void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
        void (*post_schedule) (struct rq *this_rq);
        void (*task_waking) (struct rq *this_rq, struct task_struct *task);
@@@ -1220,9 -1282,7 +1220,9 @@@ struct task_struct 
        struct plist_node pushable_tasks;
  
        struct mm_struct *mm, *active_mm;
 -
 +#if defined(SPLIT_RSS_COUNTING)
 +      struct task_rss_stat    rss_stat;
 +#endif
  /* task state */
        int exit_state;
        int exit_code, exit_signal;
        void *journal_info;
  
  /* stacked block device info */
 -      struct bio *bio_list, **bio_tail;
 +      struct bio_list *bio_list;
  
  /* VM state */
        struct reclaim_state *reclaim_state;
  
        struct list_head        *scm_work_list;
  #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       /* Index of current stored adress in ret_stack */
+       /* Index of current stored address in ret_stack */
        int curr_ret_stack;
        /* Stack of return addresses for return function tracing */
        struct ftrace_ret_stack *ret_stack;
@@@ -2453,9 -2513,13 +2453,9 @@@ extern long sched_getaffinity(pid_t pid
  
  extern void normalize_rt_tasks(void);
  
 -#ifdef CONFIG_GROUP_SCHED
 +#ifdef CONFIG_CGROUP_SCHED
  
  extern struct task_group init_task_group;
 -#ifdef CONFIG_USER_SCHED
 -extern struct task_group root_task_group;
 -extern void set_tg_uid(struct user_struct *user);
 -#endif
  
  extern struct task_group *sched_create_group(struct task_group *parent);
  extern void sched_destroy_group(struct task_group *tg);
diff --combined kernel/irq/chip.c
index d70394f12ee914e313057e2e560f96088a136af0,ec8a96382461b161f5991fef48ad3af79b0310dc..42ec11b2af8af5205c09523a9b18b3bf6fdd98ef
  
  #include "internals.h"
  
 -/**
 - *    dynamic_irq_init - initialize a dynamically allocated irq
 - *    @irq:   irq number to initialize
 - */
 -void dynamic_irq_init(unsigned int irq)
 +static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data)
  {
        struct irq_desc *desc;
        unsigned long flags;
@@@ -37,8 -41,7 +37,8 @@@
        desc->depth = 1;
        desc->msi_desc = NULL;
        desc->handler_data = NULL;
 -      desc->chip_data = NULL;
 +      if (!keep_chip_data)
 +              desc->chip_data = NULL;
        desc->action = NULL;
        desc->irq_count = 0;
        desc->irqs_unhandled = 0;
  }
  
  /**
 - *    dynamic_irq_cleanup - cleanup a dynamically allocated irq
 + *    dynamic_irq_init - initialize a dynamically allocated irq
   *    @irq:   irq number to initialize
   */
 -void dynamic_irq_cleanup(unsigned int irq)
 +void dynamic_irq_init(unsigned int irq)
 +{
 +      dynamic_irq_init_x(irq, false);
 +}
 +
 +/**
 + *    dynamic_irq_init_keep_chip_data - initialize a dynamically allocated irq
 + *    @irq:   irq number to initialize
 + *
 + *    does not set irq_to_desc(irq)->chip_data to NULL
 + */
 +void dynamic_irq_init_keep_chip_data(unsigned int irq)
 +{
 +      dynamic_irq_init_x(irq, true);
 +}
 +
 +static void dynamic_irq_cleanup_x(unsigned int irq, bool keep_chip_data)
  {
        struct irq_desc *desc = irq_to_desc(irq);
        unsigned long flags;
@@@ -90,8 -77,7 +90,8 @@@
        }
        desc->msi_desc = NULL;
        desc->handler_data = NULL;
 -      desc->chip_data = NULL;
 +      if (!keep_chip_data)
 +              desc->chip_data = NULL;
        desc->handle_irq = handle_bad_irq;
        desc->chip = &no_irq_chip;
        desc->name = NULL;
        raw_spin_unlock_irqrestore(&desc->lock, flags);
  }
  
 +/**
 + *    dynamic_irq_cleanup - cleanup a dynamically allocated irq
 + *    @irq:   irq number to initialize
 + */
 +void dynamic_irq_cleanup(unsigned int irq)
 +{
 +      dynamic_irq_cleanup_x(irq, false);
 +}
 +
 +/**
 + *    dynamic_irq_cleanup_keep_chip_data - cleanup a dynamically allocated irq
 + *    @irq:   irq number to initialize
 + *
 + *    does not set irq_to_desc(irq)->chip_data to NULL
 + */
 +void dynamic_irq_cleanup_keep_chip_data(unsigned int irq)
 +{
 +      dynamic_irq_cleanup_x(irq, true);
 +}
 +
  
  /**
   *    set_irq_chip - set the irq chip for an irq
@@@ -554,7 -520,7 +554,7 @@@ out
   *    signal. The occurence is latched into the irq controller hardware
   *    and must be acked in order to be reenabled. After the ack another
   *    interrupt can happen on the same source even before the first one
-  *    is handled by the assosiacted event handler. If this happens it
+  *    is handled by the associated event handler. If this happens it
   *    might be necessary to disable (mask) the interrupt depending on the
   *    controller hardware. This requires to reenable the interrupt inside
   *    of the loop which handles the interrupts which have arrived while
diff --combined kernel/ksysfs.c
index 6b1ccc3f020585a06d454178bf050bce4b25b805,ac08efca54c354e049c4f302d97bb5c6c234a840..21fe3c426948bb426be3f3c6adea8c53bca4049a
@@@ -33,7 -33,7 +33,7 @@@ static ssize_t uevent_seqnum_show(struc
  }
  KERNEL_ATTR_RO(uevent_seqnum);
  
- /* uevent helper program, used during early boo */
+ /* uevent helper program, used during early boot */
  static ssize_t uevent_helper_show(struct kobject *kobj,
                                  struct kobj_attribute *attr, char *buf)
  {
@@@ -197,8 -197,16 +197,8 @@@ static int __init ksysfs_init(void
                        goto group_exit;
        }
  
 -      /* create the /sys/kernel/uids/ directory */
 -      error = uids_sysfs_init();
 -      if (error)
 -              goto notes_exit;
 -
        return 0;
  
 -notes_exit:
 -      if (notes_size > 0)
 -              sysfs_remove_bin_file(kernel_kobj, &notes_attr);
  group_exit:
        sysfs_remove_group(kernel_kobj, &kernel_attr_group);
  kset_exit:
diff --combined kernel/params.c
index 8d95f5451b227d17cb40e1290dea72b028ca0ec9,2278ce244cf810b53e6fe5ce3658bc04e6a4b40b..0c4fba6640902cba629a46b94cefee6541a06fc3
@@@ -24,6 -24,7 +24,6 @@@
  #include <linux/err.h>
  #include <linux/slab.h>
  #include <linux/ctype.h>
 -#include <linux/string.h>
  
  #if 0
  #define DEBUGP printk
@@@ -401,8 -402,8 +401,8 @@@ int param_get_string(char *buffer, stru
  }
  
  /* sysfs output in /sys/modules/XYZ/parameters/ */
- #define to_module_attr(n) container_of(n, struct module_attribute, attr);
- #define to_module_kobject(n) container_of(n, struct module_kobject, kobj);
+ #define to_module_attr(n) container_of(n, struct module_attribute, attr)
+ #define to_module_kobject(n) container_of(n, struct module_kobject, kobj)
  
  extern struct kernel_param __start___param[], __stop___param[];
  
@@@ -420,7 -421,7 +420,7 @@@ struct module_param_attr
  };
  
  #ifdef CONFIG_SYSFS
- #define to_param_attr(n) container_of(n, struct param_attribute, mattr);
+ #define to_param_attr(n) container_of(n, struct param_attribute, mattr)
  
  static ssize_t param_attr_show(struct module_attribute *mattr,
                               struct module *mod, char *buf)
diff --combined kernel/sched_cpupri.c
index 82095bf2099f7c5e535757ad655bb9a68e1f290d,3db4b1a0e921f168d3a6018ce548c6e6be6eb4cf..fccf9fbb0d7bc9bd6c2ca6dceadd8560d2dbc492
@@@ -47,7 -47,9 +47,7 @@@ static int convert_prio(int prio
  }
  
  #define for_each_cpupri_active(array, idx)                    \
 -  for (idx = find_first_bit(array, CPUPRI_NR_PRIORITIES);     \
 -       idx < CPUPRI_NR_PRIORITIES;                            \
 -       idx = find_next_bit(array, CPUPRI_NR_PRIORITIES, idx+1))
 +      for_each_set_bit(idx, array, CPUPRI_NR_PRIORITIES)
  
  /**
   * cpupri_find - find the best (lowest-pri) CPU in the system
@@@ -56,7 -58,7 +56,7 @@@
   * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
   *
   * Note: This function returns the recommended CPUs as calculated during the
-  * current invokation.  By the time the call returns, the CPUs may have in
+  * current invocation.  By the time the call returns, the CPUs may have in
   * fact changed priorities any number of times.  While not ideal, it is not
   * an issue of correctness since the normal rebalancer logic will correct
   * any discrepancies created by racing against the uncertainty of the current
index 0287f9f52f5ae48098ce2694053121a640e5b3a2,9ab578f1bb65d423e50ce679376fa5921e70a245..a2f0fe9518318fd7813f47e05dab8a5bacb2ed50
@@@ -20,7 -20,6 +20,7 @@@
  #include <linux/cpu.h>
  #include <linux/fs.h>
  
 +#include <asm/local.h>
  #include "trace.h"
  
  /*
@@@ -2542,7 -2541,7 +2542,7 @@@ EXPORT_SYMBOL_GPL(ring_buffer_record_di
   * @buffer: The ring buffer to enable writes
   *
   * Note, multiple disables will need the same number of enables
-  * to truely enable the writing (much like preempt_disable).
+  * to truly enable the writing (much like preempt_disable).
   */
  void ring_buffer_record_enable(struct ring_buffer *buffer)
  {
@@@ -2578,7 -2577,7 +2578,7 @@@ EXPORT_SYMBOL_GPL(ring_buffer_record_di
   * @cpu: The CPU to enable.
   *
   * Note, multiple disables will need the same number of enables
-  * to truely enable the writing (much like preempt_disable).
+  * to truly enable the writing (much like preempt_disable).
   */
  void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
  {
diff --combined kernel/trace/trace.h
index fd05bcaf91b06cc8c979d02bb3decb1f3696df3a,e4b32c8aa85f4e34e2578fc7c0e23c14e8595693..09b39112a5e2ecbf04942760719891f2db289fd5
@@@ -497,7 -497,6 +497,7 @@@ trace_print_graph_duration(unsigned lon
  #ifdef CONFIG_DYNAMIC_FTRACE
  /* TODO: make this variable */
  #define FTRACE_GRAPH_MAX_FUNCS                32
 +extern int ftrace_graph_filter_enabled;
  extern int ftrace_graph_count;
  extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
  
@@@ -505,7 -504,7 +505,7 @@@ static inline int ftrace_graph_addr(uns
  {
        int i;
  
 -      if (!ftrace_graph_count || test_tsk_trace_graph(current))
 +      if (!ftrace_graph_filter_enabled)
                return 1;
  
        for (i = 0; i < ftrace_graph_count; i++) {
@@@ -550,7 -549,7 +550,7 @@@ static inline int ftrace_trace_task(str
   * struct trace_parser - servers for reading the user input separated by spaces
   * @cont: set if the input is not complete - no final space char was found
   * @buffer: holds the parsed user input
-  * @idx: user input lenght
+  * @idx: user input length
   * @size: buffer size
   */
  struct trace_parser {
@@@ -792,8 -791,7 +792,8 @@@ extern const char *__stop___trace_bprin
  
  #undef FTRACE_ENTRY
  #define FTRACE_ENTRY(call, struct_name, id, tstruct, print)           \
 -      extern struct ftrace_event_call event_##call;
 +      extern struct ftrace_event_call                                 \
 +      __attribute__((__aligned__(4))) event_##call;
  #undef FTRACE_ENTRY_DUP
  #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print)               \
        FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
diff --combined mm/slub.c
index 0bfd3863d521b29dfd25888bdd3692ef74ea4495,00e0961b11fe2e60a4114680dcd32d9b3603a412..75f9b0c1d8839860aba3da366490b2276a48cec7
+++ b/mm/slub.c
   * Set of flags that will prevent slab merging
   */
  #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
 -              SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE)
 +              SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
 +              SLAB_FAILSLAB)
  
  #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
                SLAB_CACHE_DMA | SLAB_NOTRACK)
@@@ -218,10 -217,10 +218,10 @@@ static inline void sysfs_slab_remove(st
  
  #endif
  
 -static inline void stat(struct kmem_cache_cpu *c, enum stat_item si)
 +static inline void stat(struct kmem_cache *s, enum stat_item si)
  {
  #ifdef CONFIG_SLUB_STATS
 -      c->stat[si]++;
 +      __this_cpu_inc(s->cpu_slab->stat[si]);
  #endif
  }
  
@@@ -243,6 -242,15 +243,6 @@@ static inline struct kmem_cache_node *g
  #endif
  }
  
 -static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu)
 -{
 -#ifdef CONFIG_SMP
 -      return s->cpu_slab[cpu];
 -#else
 -      return &s->cpu_slab;
 -#endif
 -}
 -
  /* Verify that a pointer has an address that is valid within a slab page */
  static inline int check_valid_pointer(struct kmem_cache *s,
                                struct page *page, const void *object)
        return 1;
  }
  
 -/*
 - * Slow version of get and set free pointer.
 - *
 - * This version requires touching the cache lines of kmem_cache which
 - * we avoid to do in the fast alloc free paths. There we obtain the offset
 - * from the page struct.
 - */
  static inline void *get_freepointer(struct kmem_cache *s, void *object)
  {
        return *(void **)(object + s->offset);
@@@ -1005,9 -1020,6 +1005,9 @@@ static int __init setup_slub_debug(cha
                case 't':
                        slub_debug |= SLAB_TRACE;
                        break;
 +              case 'a':
 +                      slub_debug |= SLAB_FAILSLAB;
 +                      break;
                default:
                        printk(KERN_ERR "slub_debug option '%c' "
                                "unknown. skipped\n", *str);
@@@ -1112,7 -1124,7 +1112,7 @@@ static struct page *allocate_slab(struc
                if (!page)
                        return NULL;
  
 -              stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK);
 +              stat(s, ORDER_FALLBACK);
        }
  
        if (kmemcheck_enabled
@@@ -1410,22 -1422,23 +1410,22 @@@ static struct page *get_partial(struct 
  static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
  {
        struct kmem_cache_node *n = get_node(s, page_to_nid(page));
 -      struct kmem_cache_cpu *c = get_cpu_slab(s, smp_processor_id());
  
        __ClearPageSlubFrozen(page);
        if (page->inuse) {
  
                if (page->freelist) {
                        add_partial(n, page, tail);
 -                      stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
 +                      stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
                } else {
 -                      stat(c, DEACTIVATE_FULL);
 +                      stat(s, DEACTIVATE_FULL);
                        if (SLABDEBUG && PageSlubDebug(page) &&
                                                (s->flags & SLAB_STORE_USER))
                                add_full(n, page);
                }
                slab_unlock(page);
        } else {
 -              stat(c, DEACTIVATE_EMPTY);
 +              stat(s, DEACTIVATE_EMPTY);
                if (n->nr_partial < s->min_partial) {
                        /*
                         * Adding an empty slab to the partial slabs in order
                        slab_unlock(page);
                } else {
                        slab_unlock(page);
 -                      stat(get_cpu_slab(s, raw_smp_processor_id()), FREE_SLAB);
 +                      stat(s, FREE_SLAB);
                        discard_slab(s, page);
                }
        }
@@@ -1456,7 -1469,7 +1456,7 @@@ static void deactivate_slab(struct kmem
        int tail = 1;
  
        if (page->freelist)
 -              stat(c, DEACTIVATE_REMOTE_FREES);
 +              stat(s, DEACTIVATE_REMOTE_FREES);
        /*
         * Merge cpu freelist into slab freelist. Typically we get here
         * because both freelists are empty. So this is unlikely
  
                /* Retrieve object from cpu_freelist */
                object = c->freelist;
 -              c->freelist = c->freelist[c->offset];
 +              c->freelist = get_freepointer(s, c->freelist);
  
                /* And put onto the regular freelist */
 -              object[c->offset] = page->freelist;
 +              set_freepointer(s, object, page->freelist);
                page->freelist = object;
                page->inuse--;
        }
  
  static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
  {
 -      stat(c, CPUSLAB_FLUSH);
 +      stat(s, CPUSLAB_FLUSH);
        slab_lock(c->page);
        deactivate_slab(s, c);
  }
   */
  static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
  {
 -      struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
 +      struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
  
        if (likely(c && c->page))
                flush_slab(s, c);
@@@ -1622,7 -1635,7 +1622,7 @@@ static void *__slab_alloc(struct kmem_c
        if (unlikely(!node_match(c, node)))
                goto another_slab;
  
 -      stat(c, ALLOC_REFILL);
 +      stat(s, ALLOC_REFILL);
  
  load_freelist:
        object = c->page->freelist;
        if (unlikely(SLABDEBUG && PageSlubDebug(c->page)))
                goto debug;
  
 -      c->freelist = object[c->offset];
 +      c->freelist = get_freepointer(s, object);
        c->page->inuse = c->page->objects;
        c->page->freelist = NULL;
        c->node = page_to_nid(c->page);
  unlock_out:
        slab_unlock(c->page);
 -      stat(c, ALLOC_SLOWPATH);
 +      stat(s, ALLOC_SLOWPATH);
        return object;
  
  another_slab:
@@@ -1647,7 -1660,7 +1647,7 @@@ new_slab
        new = get_partial(s, gfpflags, node);
        if (new) {
                c->page = new;
 -              stat(c, ALLOC_FROM_PARTIAL);
 +              stat(s, ALLOC_FROM_PARTIAL);
                goto load_freelist;
        }
  
                local_irq_disable();
  
        if (new) {
 -              c = get_cpu_slab(s, smp_processor_id());
 -              stat(c, ALLOC_SLAB);
 +              c = __this_cpu_ptr(s->cpu_slab);
 +              stat(s, ALLOC_SLAB);
                if (c->page)
                        flush_slab(s, c);
                slab_lock(new);
@@@ -1677,7 -1690,7 +1677,7 @@@ debug
                goto another_slab;
  
        c->page->inuse++;
 -      c->page->freelist = object[c->offset];
 +      c->page->freelist = get_freepointer(s, object);
        c->node = -1;
        goto unlock_out;
  }
@@@ -1698,33 -1711,35 +1698,33 @@@ static __always_inline void *slab_alloc
        void **object;
        struct kmem_cache_cpu *c;
        unsigned long flags;
 -      unsigned int objsize;
  
        gfpflags &= gfp_allowed_mask;
  
        lockdep_trace_alloc(gfpflags);
        might_sleep_if(gfpflags & __GFP_WAIT);
  
 -      if (should_failslab(s->objsize, gfpflags))
 +      if (should_failslab(s->objsize, gfpflags, s->flags))
                return NULL;
  
        local_irq_save(flags);
 -      c = get_cpu_slab(s, smp_processor_id());
 -      objsize = c->objsize;
 -      if (unlikely(!c->freelist || !node_match(c, node)))
 +      c = __this_cpu_ptr(s->cpu_slab);
 +      object = c->freelist;
 +      if (unlikely(!object || !node_match(c, node)))
  
                object = __slab_alloc(s, gfpflags, node, addr, c);
  
        else {
 -              object = c->freelist;
 -              c->freelist = object[c->offset];
 -              stat(c, ALLOC_FASTPATH);
 +              c->freelist = get_freepointer(s, object);
 +              stat(s, ALLOC_FASTPATH);
        }
        local_irq_restore(flags);
  
        if (unlikely(gfpflags & __GFP_ZERO) && object)
 -              memset(object, 0, objsize);
 +              memset(object, 0, s->objsize);
  
 -      kmemcheck_slab_alloc(s, gfpflags, object, c->objsize);
 -      kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags);
 +      kmemcheck_slab_alloc(s, gfpflags, object, s->objsize);
 +      kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, gfpflags);
  
        return object;
  }
@@@ -1779,25 -1794,26 +1779,25 @@@ EXPORT_SYMBOL(kmem_cache_alloc_node_not
   * handling required then we can return immediately.
   */
  static void __slab_free(struct kmem_cache *s, struct page *page,
 -                      void *x, unsigned long addr, unsigned int offset)
 +                      void *x, unsigned long addr)
  {
        void *prior;
        void **object = (void *)x;
 -      struct kmem_cache_cpu *c;
  
 -      c = get_cpu_slab(s, raw_smp_processor_id());
 -      stat(c, FREE_SLOWPATH);
 +      stat(s, FREE_SLOWPATH);
        slab_lock(page);
  
        if (unlikely(SLABDEBUG && PageSlubDebug(page)))
                goto debug;
  
  checks_ok:
 -      prior = object[offset] = page->freelist;
 +      prior = page->freelist;
 +      set_freepointer(s, object, prior);
        page->freelist = object;
        page->inuse--;
  
        if (unlikely(PageSlubFrozen(page))) {
 -              stat(c, FREE_FROZEN);
 +              stat(s, FREE_FROZEN);
                goto out_unlock;
        }
  
         */
        if (unlikely(!prior)) {
                add_partial(get_node(s, page_to_nid(page)), page, 1);
 -              stat(c, FREE_ADD_PARTIAL);
 +              stat(s, FREE_ADD_PARTIAL);
        }
  
  out_unlock:
@@@ -1823,10 -1839,10 +1823,10 @@@ slab_empty
                 * Slab still on the partial list.
                 */
                remove_partial(s, page);
 -              stat(c, FREE_REMOVE_PARTIAL);
 +              stat(s, FREE_REMOVE_PARTIAL);
        }
        slab_unlock(page);
 -      stat(c, FREE_SLAB);
 +      stat(s, FREE_SLAB);
        discard_slab(s, page);
        return;
  
@@@ -1856,17 -1872,17 +1856,17 @@@ static __always_inline void slab_free(s
  
        kmemleak_free_recursive(x, s->flags);
        local_irq_save(flags);
 -      c = get_cpu_slab(s, smp_processor_id());
 -      kmemcheck_slab_free(s, object, c->objsize);
 -      debug_check_no_locks_freed(object, c->objsize);
 +      c = __this_cpu_ptr(s->cpu_slab);
 +      kmemcheck_slab_free(s, object, s->objsize);
 +      debug_check_no_locks_freed(object, s->objsize);
        if (!(s->flags & SLAB_DEBUG_OBJECTS))
 -              debug_check_no_obj_freed(object, c->objsize);
 +              debug_check_no_obj_freed(object, s->objsize);
        if (likely(page == c->page && c->node >= 0)) {
 -              object[c->offset] = c->freelist;
 +              set_freepointer(s, object, c->freelist);
                c->freelist = object;
 -              stat(c, FREE_FASTPATH);
 +              stat(s, FREE_FASTPATH);
        } else
 -              __slab_free(s, page, x, addr, c->offset);
 +              __slab_free(s, page, x, addr);
  
        local_irq_restore(flags);
  }
@@@ -2053,6 -2069,19 +2053,6 @@@ static unsigned long calculate_alignmen
        return ALIGN(align, sizeof(void *));
  }
  
 -static void init_kmem_cache_cpu(struct kmem_cache *s,
 -                      struct kmem_cache_cpu *c)
 -{
 -      c->page = NULL;
 -      c->freelist = NULL;
 -      c->node = 0;
 -      c->offset = s->offset / sizeof(void *);
 -      c->objsize = s->objsize;
 -#ifdef CONFIG_SLUB_STATS
 -      memset(c->stat, 0, NR_SLUB_STAT_ITEMS * sizeof(unsigned));
 -#endif
 -}
 -
  static void
  init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
  {
  #endif
  }
  
 -#ifdef CONFIG_SMP
 -/*
 - * Per cpu array for per cpu structures.
 - *
 - * The per cpu array places all kmem_cache_cpu structures from one processor
 - * close together meaning that it becomes possible that multiple per cpu
 - * structures are contained in one cacheline. This may be particularly
 - * beneficial for the kmalloc caches.
 - *
 - * A desktop system typically has around 60-80 slabs. With 100 here we are
 - * likely able to get per cpu structures for all caches from the array defined
 - * here. We must be able to cover all kmalloc caches during bootstrap.
 - *
 - * If the per cpu array is exhausted then fall back to kmalloc
 - * of individual cachelines. No sharing is possible then.
 - */
 -#define NR_KMEM_CACHE_CPU 100
 -
 -static DEFINE_PER_CPU(struct kmem_cache_cpu [NR_KMEM_CACHE_CPU],
 -                    kmem_cache_cpu);
 -
 -static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
 -static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS);
 -
 -static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,
 -                                                      int cpu, gfp_t flags)
 -{
 -      struct kmem_cache_cpu *c = per_cpu(kmem_cache_cpu_free, cpu);
 -
 -      if (c)
 -              per_cpu(kmem_cache_cpu_free, cpu) =
 -                              (void *)c->freelist;
 -      else {
 -              /* Table overflow: So allocate ourselves */
 -              c = kmalloc_node(
 -                      ALIGN(sizeof(struct kmem_cache_cpu), cache_line_size()),
 -                      flags, cpu_to_node(cpu));
 -              if (!c)
 -                      return NULL;
 -      }
 -
 -      init_kmem_cache_cpu(s, c);
 -      return c;
 -}
 -
 -static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu)
 -{
 -      if (c < per_cpu(kmem_cache_cpu, cpu) ||
 -                      c >= per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) {
 -              kfree(c);
 -              return;
 -      }
 -      c->freelist = (void *)per_cpu(kmem_cache_cpu_free, cpu);
 -      per_cpu(kmem_cache_cpu_free, cpu) = c;
 -}
 -
 -static void free_kmem_cache_cpus(struct kmem_cache *s)
 -{
 -      int cpu;
 -
 -      for_each_online_cpu(cpu) {
 -              struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
 -
 -              if (c) {
 -                      s->cpu_slab[cpu] = NULL;
 -                      free_kmem_cache_cpu(c, cpu);
 -              }
 -      }
 -}
 -
 -static int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
 -{
 -      int cpu;
 -
 -      for_each_online_cpu(cpu) {
 -              struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
 -
 -              if (c)
 -                      continue;
 -
 -              c = alloc_kmem_cache_cpu(s, cpu, flags);
 -              if (!c) {
 -                      free_kmem_cache_cpus(s);
 -                      return 0;
 -              }
 -              s->cpu_slab[cpu] = c;
 -      }
 -      return 1;
 -}
 -
 -/*
 - * Initialize the per cpu array.
 - */
 -static void init_alloc_cpu_cpu(int cpu)
 -{
 -      int i;
 +static DEFINE_PER_CPU(struct kmem_cache_cpu, kmalloc_percpu[KMALLOC_CACHES]);
  
 -      if (cpumask_test_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once)))
 -              return;
 -
 -      for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--)
 -              free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu);
 -
 -      cpumask_set_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once));
 -}
 -
 -static void __init init_alloc_cpu(void)
 +static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
  {
 -      int cpu;
 -
 -      for_each_online_cpu(cpu)
 -              init_alloc_cpu_cpu(cpu);
 -  }
 +      if (s < kmalloc_caches + KMALLOC_CACHES && s >= kmalloc_caches)
 +              /*
 +               * Boot time creation of the kmalloc array. Use static per cpu data
 +               * since the per cpu allocator is not available yet.
 +               */
 +              s->cpu_slab = kmalloc_percpu + (s - kmalloc_caches);
 +      else
 +              s->cpu_slab =  alloc_percpu(struct kmem_cache_cpu);
  
 -#else
 -static inline void free_kmem_cache_cpus(struct kmem_cache *s) {}
 -static inline void init_alloc_cpu(void) {}
 +      if (!s->cpu_slab)
 +              return 0;
  
 -static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
 -{
 -      init_kmem_cache_cpu(s, &s->cpu_slab);
        return 1;
  }
 -#endif
  
  #ifdef CONFIG_NUMA
  /*
@@@ -2152,8 -2287,7 +2152,8 @@@ static int init_kmem_cache_nodes(struc
        int node;
        int local_node;
  
 -      if (slab_state >= UP)
 +      if (slab_state >= UP && (s < kmalloc_caches ||
 +                      s > kmalloc_caches + KMALLOC_CACHES))
                local_node = page_to_nid(virt_to_page(s));
        else
                local_node = 0;
@@@ -2368,7 -2502,6 +2368,7 @@@ static int kmem_cache_open(struct kmem_
  
        if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA))
                return 1;
 +
        free_kmem_cache_nodes(s);
  error:
        if (flags & SLAB_PANIC)
@@@ -2476,8 -2609,9 +2476,8 @@@ static inline int kmem_cache_close(stru
        int node;
  
        flush_all(s);
 -
 +      free_percpu(s->cpu_slab);
        /* Attempt to free all objects */
 -      free_kmem_cache_cpus(s);
        for_each_node_state(node, N_NORMAL_MEMORY) {
                struct kmem_cache_node *n = get_node(s, node);
  
@@@ -2517,7 -2651,7 +2517,7 @@@ EXPORT_SYMBOL(kmem_cache_destroy)
   *            Kmalloc subsystem
   *******************************************************************/
  
 -struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned;
 +struct kmem_cache kmalloc_caches[KMALLOC_CACHES] __cacheline_aligned;
  EXPORT_SYMBOL(kmalloc_caches);
  
  static int __init setup_slub_min_order(char *str)
@@@ -2607,7 -2741,6 +2607,7 @@@ static noinline struct kmem_cache *dma_
        char *text;
        size_t realsize;
        unsigned long slabflags;
 +      int i;
  
        s = kmalloc_caches_dma[index];
        if (s)
        realsize = kmalloc_caches[index].objsize;
        text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
                         (unsigned int)realsize);
 -      s = kmalloc(kmem_size, flags & ~SLUB_DMA);
 +
 +      s = NULL;
 +      for (i = 0; i < KMALLOC_CACHES; i++)
 +              if (!kmalloc_caches[i].size)
 +                      break;
 +
 +      BUG_ON(i >= KMALLOC_CACHES);
 +      s = kmalloc_caches + i;
  
        /*
         * Must defer sysfs creation to a workqueue because we don't know
        if (slab_state >= SYSFS)
                slabflags |= __SYSFS_ADD_DEFERRED;
  
 -      if (!s || !text || !kmem_cache_open(s, flags, text,
 +      if (!text || !kmem_cache_open(s, flags, text,
                        realsize, ARCH_KMALLOC_MINALIGN, slabflags, NULL)) {
 -              kfree(s);
 +              s->size = 0;
                kfree(text);
                goto unlock_out;
        }
@@@ -2960,7 -3086,7 +2960,7 @@@ static void slab_mem_offline_callback(v
                        /*
                         * if n->nr_slabs > 0, slabs still exist on the node
                         * that is going down. We were unable to free them,
-                        * and offline_pages() function shoudn't call this
+                        * and offline_pages() function shouldn't call this
                         * callback. So, we must fail.
                         */
                        BUG_ON(slabs_node(s, offline_node));
@@@ -3050,6 -3176,8 +3050,6 @@@ void __init kmem_cache_init(void
        int i;
        int caches = 0;
  
 -      init_alloc_cpu();
 -
  #ifdef CONFIG_NUMA
        /*
         * Must first have the slab cache available for the allocations of the
  
  #ifdef CONFIG_SMP
        register_cpu_notifier(&slab_notifier);
 -      kmem_size = offsetof(struct kmem_cache, cpu_slab) +
 -                              nr_cpu_ids * sizeof(struct kmem_cache_cpu *);
 +#endif
 +#ifdef CONFIG_NUMA
 +      kmem_size = offsetof(struct kmem_cache, node) +
 +                              nr_node_ids * sizeof(struct kmem_cache_node *);
  #else
        kmem_size = sizeof(struct kmem_cache);
  #endif
@@@ -3225,12 -3351,22 +3225,12 @@@ struct kmem_cache *kmem_cache_create(co
        down_write(&slub_lock);
        s = find_mergeable(size, align, flags, name, ctor);
        if (s) {
 -              int cpu;
 -
                s->refcount++;
                /*
                 * Adjust the object sizes so that we clear
                 * the complete object on kzalloc.
                 */
                s->objsize = max(s->objsize, (int)size);
 -
 -              /*
 -               * And then we need to update the object size in the
 -               * per cpu structures
 -               */
 -              for_each_online_cpu(cpu)
 -                      get_cpu_slab(s, cpu)->objsize = s->objsize;
 -
                s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
                up_write(&slub_lock);
  
@@@ -3284,15 -3420,29 +3284,15 @@@ static int __cpuinit slab_cpuup_callbac
        unsigned long flags;
  
        switch (action) {
 -      case CPU_UP_PREPARE:
 -      case CPU_UP_PREPARE_FROZEN:
 -              init_alloc_cpu_cpu(cpu);
 -              down_read(&slub_lock);
 -              list_for_each_entry(s, &slab_caches, list)
 -                      s->cpu_slab[cpu] = alloc_kmem_cache_cpu(s, cpu,
 -                                                      GFP_KERNEL);
 -              up_read(&slub_lock);
 -              break;
 -
        case CPU_UP_CANCELED:
        case CPU_UP_CANCELED_FROZEN:
        case CPU_DEAD:
        case CPU_DEAD_FROZEN:
                down_read(&slub_lock);
                list_for_each_entry(s, &slab_caches, list) {
 -                      struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
 -
                        local_irq_save(flags);
                        __flush_cpu_slab(s, cpu);
                        local_irq_restore(flags);
 -                      free_kmem_cache_cpu(c, cpu);
 -                      s->cpu_slab[cpu] = NULL;
                }
                up_read(&slub_lock);
                break;
@@@ -3778,7 -3928,7 +3778,7 @@@ static ssize_t show_slab_objects(struc
                int cpu;
  
                for_each_possible_cpu(cpu) {
 -                      struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
 +                      struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
  
                        if (!c || c->node < 0)
                                continue;
@@@ -4021,23 -4171,6 +4021,23 @@@ static ssize_t trace_store(struct kmem_
  }
  SLAB_ATTR(trace);
  
 +#ifdef CONFIG_FAILSLAB
 +static ssize_t failslab_show(struct kmem_cache *s, char *buf)
 +{
 +      return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
 +}
 +
 +static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
 +                                                      size_t length)
 +{
 +      s->flags &= ~SLAB_FAILSLAB;
 +      if (buf[0] == '1')
 +              s->flags |= SLAB_FAILSLAB;
 +      return length;
 +}
 +SLAB_ATTR(failslab);
 +#endif
 +
  static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
  {
        return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
@@@ -4220,7 -4353,7 +4220,7 @@@ static int show_stat(struct kmem_cache 
                return -ENOMEM;
  
        for_each_online_cpu(cpu) {
 -              unsigned x = get_cpu_slab(s, cpu)->stat[si];
 +              unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
  
                data[cpu] = x;
                sum += x;
@@@ -4243,7 -4376,7 +4243,7 @@@ static void clear_stat(struct kmem_cach
        int cpu;
  
        for_each_online_cpu(cpu)
 -              get_cpu_slab(s, cpu)->stat[si] = 0;
 +              per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
  }
  
  #define STAT_ATTR(si, text)                                   \
@@@ -4334,10 -4467,6 +4334,10 @@@ static struct attribute *slab_attrs[] 
        &deactivate_remote_frees_attr.attr,
        &order_fallback_attr.attr,
  #endif
 +#ifdef CONFIG_FAILSLAB
 +      &failslab_attr.attr,
 +#endif
 +
        NULL
  };
  
diff --combined net/ipv4/tcp_timer.c
index a17629b8912ee7134eaaa780957222b287bad6da,aff48d657181f64e402c23ab3560a0d8683cfacc..b2e6bbccaee17eef52a3aca0466db51f89ea0d48
@@@ -29,7 -29,6 +29,7 @@@ int sysctl_tcp_keepalive_intvl __read_m
  int sysctl_tcp_retries1 __read_mostly = TCP_RETR1;
  int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
  int sysctl_tcp_orphan_retries __read_mostly;
 +int sysctl_tcp_thin_linear_timeouts __read_mostly;
  
  static void tcp_write_timer(unsigned long);
  static void tcp_delack_timer(unsigned long);
@@@ -134,7 -133,7 +134,7 @@@ static void tcp_mtu_probing(struct inet
  }
  
  /* This function calculates a "timeout" which is equivalent to the timeout of a
-  * TCP connection after "boundary" unsucessful, exponentially backed-off
+  * TCP connection after "boundary" unsuccessful, exponentially backed-off
   * retransmissions with an initial RTO of TCP_RTO_MIN.
   */
  static bool retransmits_timed_out(struct sock *sk,
@@@ -416,25 -415,7 +416,25 @@@ void tcp_retransmit_timer(struct sock *
        icsk->icsk_retransmits++;
  
  out_reset_timer:
 -      icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
 +      /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
 +       * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
 +       * might be increased if the stream oscillates between thin and thick,
 +       * thus the old value might already be too high compared to the value
 +       * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
 +       * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
 +       * exponential backoff behaviour to avoid continue hammering
 +       * linear-timeout retransmissions into a black hole
 +       */
 +      if (sk->sk_state == TCP_ESTABLISHED &&
 +          (tp->thin_lto || sysctl_tcp_thin_linear_timeouts) &&
 +          tcp_stream_is_thin(tp) &&
 +          icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
 +              icsk->icsk_backoff = 0;
 +              icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
 +      } else {
 +              /* Use normal (exponential) backoff */
 +              icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
 +      }
        inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
        if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1))
                __sk_dst_reset(sk);
@@@ -493,12 -474,6 +493,12 @@@ static void tcp_synack_timer(struct soc
                                   TCP_TIMEOUT_INIT, TCP_RTO_MAX);
  }
  
 +void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req)
 +{
 +      NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
 +}
 +EXPORT_SYMBOL(tcp_syn_ack_timeout);
 +
  void tcp_set_keepalive(struct sock *sk, int val)
  {
        if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
index bc4e20e57ff566e0d67d7c3ad7f4347094ad1c55,54e4c8bb23e7406dacf0f68c609841d06fd57ca5..1a29c4a8139e0ce7d3338a887e5d50836004ae1d
@@@ -102,7 -102,7 +102,7 @@@ static struct sta_info *mesh_plink_allo
        if (local->num_sta >= MESH_MAX_PLINKS)
                return NULL;
  
 -      sta = sta_info_alloc(sdata, hw_addr, GFP_ATOMIC);
 +      sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL);
        if (!sta)
                return NULL;
  
@@@ -169,7 -169,7 +169,7 @@@ static int mesh_plink_frame_tx(struct i
        mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
                                          IEEE80211_STYPE_ACTION);
        memcpy(mgmt->da, da, ETH_ALEN);
 -      memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
 +      memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
        /* BSSID is left zeroed, wildcard value */
        mgmt->u.action.category = MESH_PLINK_CATEGORY;
        mgmt->u.action.u.plink_action.action_code = action;
@@@ -234,14 -234,14 +234,14 @@@ void mesh_neighbour_update(u8 *hw_addr
  
        rcu_read_lock();
  
 -      sta = sta_info_get(local, hw_addr);
 +      sta = sta_info_get(sdata, hw_addr);
        if (!sta) {
 +              rcu_read_unlock();
 +
                sta = mesh_plink_alloc(sdata, hw_addr, rates);
 -              if (!sta) {
 -                      rcu_read_unlock();
 +              if (!sta)
                        return;
 -              }
 -              if (sta_info_insert(sta)) {
 +              if (sta_info_insert_rcu(sta)) {
                        rcu_read_unlock();
                        return;
                }
@@@ -455,7 -455,7 +455,7 @@@ void mesh_rx_plink_frame(struct ieee802
  
        rcu_read_lock();
  
 -      sta = sta_info_get(local, mgmt->sa);
 +      sta = sta_info_get(sdata, mgmt->sa);
        if (!sta && ftype != PLINK_OPEN) {
                mpl_dbg("Mesh plink: cls or cnf from unknown peer\n");
                rcu_read_unlock();
        } else if (!sta) {
                /* ftype == PLINK_OPEN */
                u32 rates;
 +
 +              rcu_read_unlock();
 +
                if (!mesh_plink_free_count(sdata)) {
                        mpl_dbg("Mesh plink error: no more free plinks\n");
 -                      rcu_read_unlock();
                        return;
                }
  
                sta = mesh_plink_alloc(sdata, mgmt->sa, rates);
                if (!sta) {
                        mpl_dbg("Mesh plink error: plink table full\n");
 -                      rcu_read_unlock();
                        return;
                }
 -              if (sta_info_insert(sta)) {
 +              if (sta_info_insert_rcu(sta)) {
                        rcu_read_unlock();
                        return;
                }
                break;
        default:
                /* should not get here, PLINK_BLOCKED is dealt with at the
-                * beggining of the function
+                * beginning of the function
                 */
                spin_unlock_bh(&sta->lock);
                break;
index 8dd75d90efc0821932e94d7824267237e06bfdab,fbe94adee7ac73c9089492696205a8e424d775f6..c6cd1b84eddd4e2f8a48f892282384dbc6515de0
  #include <linux/inet.h>
  #include <linux/in.h>
  #include <linux/udp.h>
 +#include <linux/tcp.h>
  #include <linux/netfilter.h>
  
  #include <net/netfilter/nf_conntrack.h>
  #include <net/netfilter/nf_conntrack_core.h>
  #include <net/netfilter/nf_conntrack_expect.h>
  #include <net/netfilter/nf_conntrack_helper.h>
 +#include <net/netfilter/nf_conntrack_zones.h>
  #include <linux/netfilter/nf_conntrack_sip.h>
  
  MODULE_LICENSE("GPL");
@@@ -52,16 -50,12 +52,16 @@@ module_param(sip_direct_media, int, 060
  MODULE_PARM_DESC(sip_direct_media, "Expect Media streams between signalling "
                                   "endpoints only (default 1)");
  
 -unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb,
 +unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb, unsigned int dataoff,
                                const char **dptr,
                                unsigned int *datalen) __read_mostly;
  EXPORT_SYMBOL_GPL(nf_nat_sip_hook);
  
 +void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb, s16 off) __read_mostly;
 +EXPORT_SYMBOL_GPL(nf_nat_sip_seq_adjust_hook);
 +
  unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
 +                                     unsigned int dataoff,
                                       const char **dptr,
                                       unsigned int *datalen,
                                       struct nf_conntrack_expect *exp,
                                       unsigned int matchlen) __read_mostly;
  EXPORT_SYMBOL_GPL(nf_nat_sip_expect_hook);
  
 -unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb,
 +unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb, unsigned int dataoff,
                                     const char **dptr,
 -                                   unsigned int dataoff,
                                     unsigned int *datalen,
 +                                   unsigned int sdpoff,
                                     enum sdp_header_types type,
                                     enum sdp_header_types term,
                                     const union nf_inet_addr *addr)
                                     __read_mostly;
  EXPORT_SYMBOL_GPL(nf_nat_sdp_addr_hook);
  
 -unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb,
 +unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb, unsigned int dataoff,
                                     const char **dptr,
                                     unsigned int *datalen,
                                     unsigned int matchoff,
  EXPORT_SYMBOL_GPL(nf_nat_sdp_port_hook);
  
  unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb,
 -                                      const char **dptr,
                                        unsigned int dataoff,
 +                                      const char **dptr,
                                        unsigned int *datalen,
 +                                      unsigned int sdpoff,
                                        const union nf_inet_addr *addr)
                                        __read_mostly;
  EXPORT_SYMBOL_GPL(nf_nat_sdp_session_hook);
  
 -unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb,
 +unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb, unsigned int dataoff,
                                      const char **dptr,
                                      unsigned int *datalen,
                                      struct nf_conntrack_expect *rtp_exp,
@@@ -243,13 -236,12 +243,13 @@@ int ct_sip_parse_request(const struct n
                return 0;
  
        /* Find SIP URI */
 -      limit -= strlen("sip:");
 -      for (; dptr < limit; dptr++) {
 +      for (; dptr < limit - strlen("sip:"); dptr++) {
                if (*dptr == '\r' || *dptr == '\n')
                        return -1;
 -              if (strnicmp(dptr, "sip:", strlen("sip:")) == 0)
 +              if (strnicmp(dptr, "sip:", strlen("sip:")) == 0) {
 +                      dptr += strlen("sip:");
                        break;
 +              }
        }
        if (!skp_epaddr_len(ct, dptr, limit, &shift))
                return 0;
@@@ -284,7 -276,7 +284,7 @@@ EXPORT_SYMBOL_GPL(ct_sip_parse_request)
   * tabs, spaces and continuation lines, which are treated as a single whitespace
   * character.
   *
-  * Some headers may appear multiple times. A comma seperated list of values is
+  * Some headers may appear multiple times. A comma separated list of values is
   * equivalent to multiple headers.
   */
  static const struct sip_header ct_sip_hdrs[] = {
        [SIP_HDR_FROM]                  = SIP_HDR("From", "f", "sip:", skp_epaddr_len),
        [SIP_HDR_TO]                    = SIP_HDR("To", "t", "sip:", skp_epaddr_len),
        [SIP_HDR_CONTACT]               = SIP_HDR("Contact", "m", "sip:", skp_epaddr_len),
 -      [SIP_HDR_VIA]                   = SIP_HDR("Via", "v", "UDP ", epaddr_len),
 +      [SIP_HDR_VIA_UDP]               = SIP_HDR("Via", "v", "UDP ", epaddr_len),
 +      [SIP_HDR_VIA_TCP]               = SIP_HDR("Via", "v", "TCP ", epaddr_len),
        [SIP_HDR_EXPIRES]               = SIP_HDR("Expires", NULL, NULL, digits_len),
        [SIP_HDR_CONTENT_LENGTH]        = SIP_HDR("Content-Length", "l", NULL, digits_len),
  };
@@@ -421,7 -412,7 +421,7 @@@ int ct_sip_get_header(const struct nf_c
  }
  EXPORT_SYMBOL_GPL(ct_sip_get_header);
  
- /* Get next header field in a list of comma seperated values */
+ /* Get next header field in a list of comma separated values */
  static int ct_sip_next_header(const struct nf_conn *ct, const char *dptr,
                              unsigned int dataoff, unsigned int datalen,
                              enum sip_header_types type,
@@@ -525,33 -516,6 +525,33 @@@ int ct_sip_parse_header_uri(const struc
  }
  EXPORT_SYMBOL_GPL(ct_sip_parse_header_uri);
  
 +static int ct_sip_parse_param(const struct nf_conn *ct, const char *dptr,
 +                            unsigned int dataoff, unsigned int datalen,
 +                            const char *name,
 +                            unsigned int *matchoff, unsigned int *matchlen)
 +{
 +      const char *limit = dptr + datalen;
 +      const char *start;
 +      const char *end;
 +
 +      limit = ct_sip_header_search(dptr + dataoff, limit, ",", strlen(","));
 +      if (!limit)
 +              limit = dptr + datalen;
 +
 +      start = ct_sip_header_search(dptr + dataoff, limit, name, strlen(name));
 +      if (!start)
 +              return 0;
 +      start += strlen(name);
 +
 +      end = ct_sip_header_search(start, limit, ";", strlen(";"));
 +      if (!end)
 +              end = limit;
 +
 +      *matchoff = start - dptr;
 +      *matchlen = end - start;
 +      return 1;
 +}
 +
  /* Parse address from header parameter and return address, offset and length */
  int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr,
                               unsigned int dataoff, unsigned int datalen,
@@@ -610,29 -574,6 +610,29 @@@ int ct_sip_parse_numerical_param(const 
  }
  EXPORT_SYMBOL_GPL(ct_sip_parse_numerical_param);
  
 +static int ct_sip_parse_transport(struct nf_conn *ct, const char *dptr,
 +                                unsigned int dataoff, unsigned int datalen,
 +                                u8 *proto)
 +{
 +      unsigned int matchoff, matchlen;
 +
 +      if (ct_sip_parse_param(ct, dptr, dataoff, datalen, "transport=",
 +                             &matchoff, &matchlen)) {
 +              if (!strnicmp(dptr + matchoff, "TCP", strlen("TCP")))
 +                      *proto = IPPROTO_TCP;
 +              else if (!strnicmp(dptr + matchoff, "UDP", strlen("UDP")))
 +                      *proto = IPPROTO_UDP;
 +              else
 +                      return 0;
 +
 +              if (*proto != nf_ct_protonum(ct))
 +                      return 0;
 +      } else
 +              *proto = nf_ct_protonum(ct);
 +
 +      return 1;
 +}
 +
  /* SDP header parsing: a SDP session description contains an ordered set of
   * headers, starting with a section containing general session parameters,
   * optionally followed by multiple media descriptions.
@@@ -741,7 -682,7 +741,7 @@@ static int ct_sip_parse_sdp_addr(const 
  
  static int refresh_signalling_expectation(struct nf_conn *ct,
                                          union nf_inet_addr *addr,
 -                                        __be16 port,
 +                                        u8 proto, __be16 port,
                                          unsigned int expires)
  {
        struct nf_conn_help *help = nfct_help(ct);
        hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) {
                if (exp->class != SIP_EXPECT_SIGNALLING ||
                    !nf_inet_addr_cmp(&exp->tuple.dst.u3, addr) ||
 +                  exp->tuple.dst.protonum != proto ||
                    exp->tuple.dst.u.udp.port != port)
                        continue;
                if (!del_timer(&exp->timeout))
@@@ -788,7 -728,7 +788,7 @@@ static void flush_expectations(struct n
        spin_unlock_bh(&nf_conntrack_lock);
  }
  
 -static int set_expected_rtp_rtcp(struct sk_buff *skb,
 +static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int dataoff,
                                 const char **dptr, unsigned int *datalen,
                                 union nf_inet_addr *daddr, __be16 port,
                                 enum sip_expectation_classes class,
  
        rcu_read_lock();
        do {
 -              exp = __nf_ct_expect_find(net, &tuple);
 +              exp = __nf_ct_expect_find(net, nf_ct_zone(ct), &tuple);
  
                if (!exp || exp->master == ct ||
                    nfct_help(exp->master)->helper != nfct_help(ct)->helper ||
        if (direct_rtp) {
                nf_nat_sdp_port = rcu_dereference(nf_nat_sdp_port_hook);
                if (nf_nat_sdp_port &&
 -                  !nf_nat_sdp_port(skb, dptr, datalen,
 +                  !nf_nat_sdp_port(skb, dataoff, dptr, datalen,
                                     mediaoff, medialen, ntohs(rtp_port)))
                        goto err1;
        }
  
        nf_nat_sdp_media = rcu_dereference(nf_nat_sdp_media_hook);
        if (nf_nat_sdp_media && ct->status & IPS_NAT_MASK && !direct_rtp)
 -              ret = nf_nat_sdp_media(skb, dptr, datalen, rtp_exp, rtcp_exp,
 +              ret = nf_nat_sdp_media(skb, dataoff, dptr, datalen,
 +                                     rtp_exp, rtcp_exp,
                                       mediaoff, medialen, daddr);
        else {
                if (nf_ct_expect_related(rtp_exp) == 0) {
@@@ -908,7 -847,6 +908,7 @@@ err1
  static const struct sdp_media_type sdp_media_types[] = {
        SDP_MEDIA_TYPE("audio ", SIP_EXPECT_AUDIO),
        SDP_MEDIA_TYPE("video ", SIP_EXPECT_VIDEO),
 +      SDP_MEDIA_TYPE("image ", SIP_EXPECT_IMAGE),
  };
  
  static const struct sdp_media_type *sdp_media_type(const char *dptr,
        return NULL;
  }
  
 -static int process_sdp(struct sk_buff *skb,
 +static int process_sdp(struct sk_buff *skb, unsigned int dataoff,
                       const char **dptr, unsigned int *datalen,
                       unsigned int cseq)
  {
        enum ip_conntrack_info ctinfo;
        struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
 -      struct nf_conn_help *help = nfct_help(ct);
        unsigned int matchoff, matchlen;
        unsigned int mediaoff, medialen;
        unsigned int sdpoff;
                else
                        return NF_DROP;
  
 -              ret = set_expected_rtp_rtcp(skb, dptr, datalen,
 +              ret = set_expected_rtp_rtcp(skb, dataoff, dptr, datalen,
                                            &rtp_addr, htons(port), t->class,
                                            mediaoff, medialen);
                if (ret != NF_ACCEPT)
  
                /* Update media connection address if present */
                if (maddr_len && nf_nat_sdp_addr && ct->status & IPS_NAT_MASK) {
 -                      ret = nf_nat_sdp_addr(skb, dptr, mediaoff, datalen,
 -                                            c_hdr, SDP_HDR_MEDIA, &rtp_addr);
 +                      ret = nf_nat_sdp_addr(skb, dataoff, dptr, datalen,
 +                                            mediaoff, c_hdr, SDP_HDR_MEDIA,
 +                                            &rtp_addr);
                        if (ret != NF_ACCEPT)
                                return ret;
                }
        /* Update session connection and owner addresses */
        nf_nat_sdp_session = rcu_dereference(nf_nat_sdp_session_hook);
        if (nf_nat_sdp_session && ct->status & IPS_NAT_MASK)
 -              ret = nf_nat_sdp_session(skb, dptr, sdpoff, datalen, &rtp_addr);
 -
 -      if (ret == NF_ACCEPT && i > 0)
 -              help->help.ct_sip_info.invite_cseq = cseq;
 +              ret = nf_nat_sdp_session(skb, dataoff, dptr, datalen, sdpoff,
 +                                       &rtp_addr);
  
        return ret;
  }
 -static int process_invite_response(struct sk_buff *skb,
 +static int process_invite_response(struct sk_buff *skb, unsigned int dataoff,
                                   const char **dptr, unsigned int *datalen,
                                   unsigned int cseq, unsigned int code)
  {
  
        if ((code >= 100 && code <= 199) ||
            (code >= 200 && code <= 299))
 -              return process_sdp(skb, dptr, datalen, cseq);
 +              return process_sdp(skb, dataoff, dptr, datalen, cseq);
        else if (help->help.ct_sip_info.invite_cseq == cseq)
                flush_expectations(ct, true);
        return NF_ACCEPT;
  }
  
 -static int process_update_response(struct sk_buff *skb,
 +static int process_update_response(struct sk_buff *skb, unsigned int dataoff,
                                   const char **dptr, unsigned int *datalen,
                                   unsigned int cseq, unsigned int code)
  {
  
        if ((code >= 100 && code <= 199) ||
            (code >= 200 && code <= 299))
 -              return process_sdp(skb, dptr, datalen, cseq);
 +              return process_sdp(skb, dataoff, dptr, datalen, cseq);
        else if (help->help.ct_sip_info.invite_cseq == cseq)
                flush_expectations(ct, true);
        return NF_ACCEPT;
  }
  
 -static int process_prack_response(struct sk_buff *skb,
 +static int process_prack_response(struct sk_buff *skb, unsigned int dataoff,
                                  const char **dptr, unsigned int *datalen,
                                  unsigned int cseq, unsigned int code)
  {
  
        if ((code >= 100 && code <= 199) ||
            (code >= 200 && code <= 299))
 -              return process_sdp(skb, dptr, datalen, cseq);
 +              return process_sdp(skb, dataoff, dptr, datalen, cseq);
        else if (help->help.ct_sip_info.invite_cseq == cseq)
                flush_expectations(ct, true);
        return NF_ACCEPT;
  }
  
 -static int process_bye_request(struct sk_buff *skb,
 +static int process_invite_request(struct sk_buff *skb, unsigned int dataoff,
 +                                const char **dptr, unsigned int *datalen,
 +                                unsigned int cseq)
 +{
 +      enum ip_conntrack_info ctinfo;
 +      struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
 +      struct nf_conn_help *help = nfct_help(ct);
 +      unsigned int ret;
 +
 +      flush_expectations(ct, true);
 +      ret = process_sdp(skb, dataoff, dptr, datalen, cseq);
 +      if (ret == NF_ACCEPT)
 +              help->help.ct_sip_info.invite_cseq = cseq;
 +      return ret;
 +}
 +
 +static int process_bye_request(struct sk_buff *skb, unsigned int dataoff,
                               const char **dptr, unsigned int *datalen,
                               unsigned int cseq)
  {
   * signalling connections. The expectation is marked inactive and is activated
   * when receiving a response indicating success from the registrar.
   */
 -static int process_register_request(struct sk_buff *skb,
 +static int process_register_request(struct sk_buff *skb, unsigned int dataoff,
                                    const char **dptr, unsigned int *datalen,
                                    unsigned int cseq)
  {
        struct nf_conntrack_expect *exp;
        union nf_inet_addr *saddr, daddr;
        __be16 port;
 +      u8 proto;
        unsigned int expires = 0;
        int ret;
        typeof(nf_nat_sip_expect_hook) nf_nat_sip_expect;
        if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, &daddr))
                return NF_ACCEPT;
  
 +      if (ct_sip_parse_transport(ct, *dptr, matchoff + matchlen, *datalen,
 +                                 &proto) == 0)
 +              return NF_ACCEPT;
 +
        if (ct_sip_parse_numerical_param(ct, *dptr,
                                         matchoff + matchlen, *datalen,
                                         "expires=", NULL, NULL, &expires) < 0)
                saddr = &ct->tuplehash[!dir].tuple.src.u3;
  
        nf_ct_expect_init(exp, SIP_EXPECT_SIGNALLING, nf_ct_l3num(ct),
 -                        saddr, &daddr, IPPROTO_UDP, NULL, &port);
 +                        saddr, &daddr, proto, NULL, &port);
        exp->timeout.expires = sip_timeout * HZ;
        exp->helper = nfct_help(ct)->helper;
        exp->flags = NF_CT_EXPECT_PERMANENT | NF_CT_EXPECT_INACTIVE;
  
        nf_nat_sip_expect = rcu_dereference(nf_nat_sip_expect_hook);
        if (nf_nat_sip_expect && ct->status & IPS_NAT_MASK)
 -              ret = nf_nat_sip_expect(skb, dptr, datalen, exp,
 +              ret = nf_nat_sip_expect(skb, dataoff, dptr, datalen, exp,
                                        matchoff, matchlen);
        else {
                if (nf_ct_expect_related(exp) != 0)
@@@ -1197,7 -1116,7 +1197,7 @@@ store_cseq
        return ret;
  }
  
 -static int process_register_response(struct sk_buff *skb,
 +static int process_register_response(struct sk_buff *skb, unsigned int dataoff,
                                     const char **dptr, unsigned int *datalen,
                                     unsigned int cseq, unsigned int code)
  {
        enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
        union nf_inet_addr addr;
        __be16 port;
 -      unsigned int matchoff, matchlen, dataoff = 0;
 +      u8 proto;
 +      unsigned int matchoff, matchlen, coff = 0;
        unsigned int expires = 0;
        int in_contact = 0, ret;
  
        while (1) {
                unsigned int c_expires = expires;
  
 -              ret = ct_sip_parse_header_uri(ct, *dptr, &dataoff, *datalen,
 +              ret = ct_sip_parse_header_uri(ct, *dptr, &coff, *datalen,
                                              SIP_HDR_CONTACT, &in_contact,
                                              &matchoff, &matchlen,
                                              &addr, &port);
                if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, &addr))
                        continue;
  
 +              if (ct_sip_parse_transport(ct, *dptr, matchoff + matchlen,
 +                                         *datalen, &proto) == 0)
 +                      continue;
 +
                ret = ct_sip_parse_numerical_param(ct, *dptr,
                                                   matchoff + matchlen,
                                                   *datalen, "expires=",
                        return NF_DROP;
                if (c_expires == 0)
                        break;
 -              if (refresh_signalling_expectation(ct, &addr, port, c_expires))
 +              if (refresh_signalling_expectation(ct, &addr, proto, port,
 +                                                 c_expires))
                        return NF_ACCEPT;
        }
  
@@@ -1271,7 -1184,7 +1271,7 @@@ flush
  }
  
  static const struct sip_handler sip_handlers[] = {
 -      SIP_HANDLER("INVITE", process_sdp, process_invite_response),
 +      SIP_HANDLER("INVITE", process_invite_request, process_invite_response),
        SIP_HANDLER("UPDATE", process_sdp, process_update_response),
        SIP_HANDLER("ACK", process_sdp, NULL),
        SIP_HANDLER("PRACK", process_sdp, process_prack_response),
        SIP_HANDLER("REGISTER", process_register_request, process_register_response),
  };
  
 -static int process_sip_response(struct sk_buff *skb,
 +static int process_sip_response(struct sk_buff *skb, unsigned int dataoff,
                                const char **dptr, unsigned int *datalen)
  {
        enum ip_conntrack_info ctinfo;
        struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
 -      unsigned int matchoff, matchlen;
 -      unsigned int code, cseq, dataoff, i;
 +      unsigned int matchoff, matchlen, matchend;
 +      unsigned int code, cseq, i;
  
        if (*datalen < strlen("SIP/2.0 200"))
                return NF_ACCEPT;
        cseq = simple_strtoul(*dptr + matchoff, NULL, 10);
        if (!cseq)
                return NF_DROP;
 -      dataoff = matchoff + matchlen + 1;
 +      matchend = matchoff + matchlen + 1;
  
        for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) {
                const struct sip_handler *handler;
                handler = &sip_handlers[i];
                if (handler->response == NULL)
                        continue;
 -              if (*datalen < dataoff + handler->len ||
 -                  strnicmp(*dptr + dataoff, handler->method, handler->len))
 +              if (*datalen < matchend + handler->len ||
 +                  strnicmp(*dptr + matchend, handler->method, handler->len))
                        continue;
 -              return handler->response(skb, dptr, datalen, cseq, code);
 +              return handler->response(skb, dataoff, dptr, datalen,
 +                                       cseq, code);
        }
        return NF_ACCEPT;
  }
  
 -static int process_sip_request(struct sk_buff *skb,
 +static int process_sip_request(struct sk_buff *skb, unsigned int dataoff,
                               const char **dptr, unsigned int *datalen)
  {
        enum ip_conntrack_info ctinfo;
                if (!cseq)
                        return NF_DROP;
  
 -              return handler->request(skb, dptr, datalen, cseq);
 +              return handler->request(skb, dataoff, dptr, datalen, cseq);
        }
        return NF_ACCEPT;
  }
  
 -static int sip_help(struct sk_buff *skb,
 -                  unsigned int protoff,
 -                  struct nf_conn *ct,
 -                  enum ip_conntrack_info ctinfo)
 +static int process_sip_msg(struct sk_buff *skb, struct nf_conn *ct,
 +                         unsigned int dataoff, const char **dptr,
 +                         unsigned int *datalen)
 +{
 +      typeof(nf_nat_sip_hook) nf_nat_sip;
 +      int ret;
 +
 +      if (strnicmp(*dptr, "SIP/2.0 ", strlen("SIP/2.0 ")) != 0)
 +              ret = process_sip_request(skb, dataoff, dptr, datalen);
 +      else
 +              ret = process_sip_response(skb, dataoff, dptr, datalen);
 +
 +      if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) {
 +              nf_nat_sip = rcu_dereference(nf_nat_sip_hook);
 +              if (nf_nat_sip && !nf_nat_sip(skb, dataoff, dptr, datalen))
 +                      ret = NF_DROP;
 +      }
 +
 +      return ret;
 +}
 +
 +static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
 +                      struct nf_conn *ct, enum ip_conntrack_info ctinfo)
  {
 +      struct tcphdr *th, _tcph;
        unsigned int dataoff, datalen;
 -      const char *dptr;
 +      unsigned int matchoff, matchlen, clen;
 +      unsigned int msglen, origlen;
 +      const char *dptr, *end;
 +      s16 diff, tdiff = 0;
        int ret;
 -      typeof(nf_nat_sip_hook) nf_nat_sip;
 +      typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust;
 +
 +      if (ctinfo != IP_CT_ESTABLISHED &&
 +          ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY)
 +              return NF_ACCEPT;
  
        /* No Data ? */
 -      dataoff = protoff + sizeof(struct udphdr);
 +      th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
 +      if (th == NULL)
 +              return NF_ACCEPT;
 +      dataoff = protoff + th->doff * 4;
        if (dataoff >= skb->len)
                return NF_ACCEPT;
  
        nf_ct_refresh(ct, skb, sip_timeout * HZ);
  
 -      if (!skb_is_nonlinear(skb))
 -              dptr = skb->data + dataoff;
 -      else {
 +      if (skb_is_nonlinear(skb)) {
                pr_debug("Copy of skbuff not supported yet.\n");
                return NF_ACCEPT;
        }
  
 +      dptr = skb->data + dataoff;
        datalen = skb->len - dataoff;
        if (datalen < strlen("SIP/2.0 200"))
                return NF_ACCEPT;
  
 -      if (strnicmp(dptr, "SIP/2.0 ", strlen("SIP/2.0 ")) != 0)
 -              ret = process_sip_request(skb, &dptr, &datalen);
 -      else
 -              ret = process_sip_response(skb, &dptr, &datalen);
 +      while (1) {
 +              if (ct_sip_get_header(ct, dptr, 0, datalen,
 +                                    SIP_HDR_CONTENT_LENGTH,
 +                                    &matchoff, &matchlen) <= 0)
 +                      break;
 +
 +              clen = simple_strtoul(dptr + matchoff, (char **)&end, 10);
 +              if (dptr + matchoff == end)
 +                      break;
 +
 +              if (end + strlen("\r\n\r\n") > dptr + datalen)
 +                      break;
 +              if (end[0] != '\r' || end[1] != '\n' ||
 +                  end[2] != '\r' || end[3] != '\n')
 +                      break;
 +              end += strlen("\r\n\r\n") + clen;
 +
 +              msglen = origlen = end - dptr;
 +
 +              ret = process_sip_msg(skb, ct, dataoff, &dptr, &msglen);
 +              if (ret != NF_ACCEPT)
 +                      break;
 +              diff     = msglen - origlen;
 +              tdiff   += diff;
 +
 +              dataoff += msglen;
 +              dptr    += msglen;
 +              datalen  = datalen + diff - msglen;
 +      }
  
        if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) {
 -              nf_nat_sip = rcu_dereference(nf_nat_sip_hook);
 -              if (nf_nat_sip && !nf_nat_sip(skb, &dptr, &datalen))
 -                      ret = NF_DROP;
 +              nf_nat_sip_seq_adjust = rcu_dereference(nf_nat_sip_seq_adjust_hook);
 +              if (nf_nat_sip_seq_adjust)
 +                      nf_nat_sip_seq_adjust(skb, tdiff);
        }
  
        return ret;
  }
  
 -static struct nf_conntrack_helper sip[MAX_PORTS][2] __read_mostly;
 -static char sip_names[MAX_PORTS][2][sizeof("sip-65535")] __read_mostly;
 +static int sip_help_udp(struct sk_buff *skb, unsigned int protoff,
 +                      struct nf_conn *ct, enum ip_conntrack_info ctinfo)
 +{
 +      unsigned int dataoff, datalen;
 +      const char *dptr;
 +
 +      /* No Data ? */
 +      dataoff = protoff + sizeof(struct udphdr);
 +      if (dataoff >= skb->len)
 +              return NF_ACCEPT;
 +
 +      nf_ct_refresh(ct, skb, sip_timeout * HZ);
 +
 +      if (skb_is_nonlinear(skb)) {
 +              pr_debug("Copy of skbuff not supported yet.\n");
 +              return NF_ACCEPT;
 +      }
 +
 +      dptr = skb->data + dataoff;
 +      datalen = skb->len - dataoff;
 +      if (datalen < strlen("SIP/2.0 200"))
 +              return NF_ACCEPT;
 +
 +      return process_sip_msg(skb, ct, dataoff, &dptr, &datalen);
 +}
 +
 +static struct nf_conntrack_helper sip[MAX_PORTS][4] __read_mostly;
 +static char sip_names[MAX_PORTS][4][sizeof("sip-65535")] __read_mostly;
  
  static const struct nf_conntrack_expect_policy sip_exp_policy[SIP_EXPECT_MAX + 1] = {
        [SIP_EXPECT_SIGNALLING] = {
 +              .name           = "signalling",
                .max_expected   = 1,
                .timeout        = 3 * 60,
        },
        [SIP_EXPECT_AUDIO] = {
 +              .name           = "audio",
                .max_expected   = 2 * IP_CT_DIR_MAX,
                .timeout        = 3 * 60,
        },
        [SIP_EXPECT_VIDEO] = {
 +              .name           = "video",
                .max_expected   = 2 * IP_CT_DIR_MAX,
                .timeout        = 3 * 60,
        },
 +      [SIP_EXPECT_IMAGE] = {
 +              .name           = "image",
 +              .max_expected   = IP_CT_DIR_MAX,
 +              .timeout        = 3 * 60,
 +      },
  };
  
  static void nf_conntrack_sip_fini(void)
        int i, j;
  
        for (i = 0; i < ports_c; i++) {
 -              for (j = 0; j < 2; j++) {
 +              for (j = 0; j < ARRAY_SIZE(sip[i]); j++) {
                        if (sip[i][j].me == NULL)
                                continue;
                        nf_conntrack_helper_unregister(&sip[i][j]);
@@@ -1519,24 -1343,14 +1519,24 @@@ static int __init nf_conntrack_sip_init
                memset(&sip[i], 0, sizeof(sip[i]));
  
                sip[i][0].tuple.src.l3num = AF_INET;
 -              sip[i][1].tuple.src.l3num = AF_INET6;
 -              for (j = 0; j < 2; j++) {
 -                      sip[i][j].tuple.dst.protonum = IPPROTO_UDP;
 +              sip[i][0].tuple.dst.protonum = IPPROTO_UDP;
 +              sip[i][0].help = sip_help_udp;
 +              sip[i][1].tuple.src.l3num = AF_INET;
 +              sip[i][1].tuple.dst.protonum = IPPROTO_TCP;
 +              sip[i][1].help = sip_help_tcp;
 +
 +              sip[i][2].tuple.src.l3num = AF_INET6;
 +              sip[i][2].tuple.dst.protonum = IPPROTO_UDP;
 +              sip[i][2].help = sip_help_udp;
 +              sip[i][3].tuple.src.l3num = AF_INET6;
 +              sip[i][3].tuple.dst.protonum = IPPROTO_TCP;
 +              sip[i][3].help = sip_help_tcp;
 +
 +              for (j = 0; j < ARRAY_SIZE(sip[i]); j++) {
                        sip[i][j].tuple.src.u.udp.port = htons(ports[i]);
                        sip[i][j].expect_policy = sip_exp_policy;
                        sip[i][j].expect_class_max = SIP_EXPECT_MAX;
                        sip[i][j].me = THIS_MODULE;
 -                      sip[i][j].help = sip_help;
  
                        tmpname = &sip_names[i][j][0];
                        if (ports[i] == SIP_PORT)
index d952806b64690f3a46756b6025d170bdd002764d,cbaac92dad5939b70e097c9279e6c7585d5bb029..9e9c48963942ad39b722be5790c8a4b01750b1d4
@@@ -1,6 -1,6 +1,6 @@@
  /*
   *    xt_hashlimit - Netfilter module to limit the number of packets per time
-  *    seperately for each hashbucket (sourceip/sourceport/dstip/dstport)
+  *    separately for each hashbucket (sourceip/sourceport/dstip/dstport)
   *
   *    (C) 2003-2004 by Harald Welte <laforge@netfilter.org>
   *    Copyright Â© CC Computer Consultants GmbH, 2007 - 2008
@@@ -26,7 -26,6 +26,7 @@@
  #endif
  
  #include <net/net_namespace.h>
 +#include <net/netns/generic.h>
  
  #include <linux/netfilter/x_tables.h>
  #include <linux/netfilter_ipv4/ip_tables.h>
@@@ -41,19 -40,9 +41,19 @@@ MODULE_DESCRIPTION("Xtables: per hash-b
  MODULE_ALIAS("ipt_hashlimit");
  MODULE_ALIAS("ip6t_hashlimit");
  
 +struct hashlimit_net {
 +      struct hlist_head       htables;
 +      struct proc_dir_entry   *ipt_hashlimit;
 +      struct proc_dir_entry   *ip6t_hashlimit;
 +};
 +
 +static int hashlimit_net_id;
 +static inline struct hashlimit_net *hashlimit_pernet(struct net *net)
 +{
 +      return net_generic(net, hashlimit_net_id);
 +}
 +
  /* need to declare this at the top */
 -static struct proc_dir_entry *hashlimit_procdir4;
 -static struct proc_dir_entry *hashlimit_procdir6;
  static const struct file_operations dl_file_ops;
  
  /* hash table crap */
@@@ -90,26 -79,27 +90,26 @@@ struct dsthash_ent 
  
  struct xt_hashlimit_htable {
        struct hlist_node node;         /* global list of all htables */
 -      atomic_t use;
 +      int use;
        u_int8_t family;
 +      bool rnd_initialized;
  
        struct hashlimit_cfg1 cfg;      /* config */
  
        /* used internally */
        spinlock_t lock;                /* lock for list_head */
        u_int32_t rnd;                  /* random seed for hash */
 -      int rnd_initialized;
        unsigned int count;             /* number entries in table */
        struct timer_list timer;        /* timer for gc */
  
        /* seq_file stuff */
        struct proc_dir_entry *pde;
 +      struct net *net;
  
        struct hlist_head hash[0];      /* hashtable itself */
  };
  
 -static DEFINE_SPINLOCK(hashlimit_lock);       /* protects htables list */
 -static DEFINE_MUTEX(hlimit_mutex);    /* additional checkentry protection */
 -static HLIST_HEAD(hashlimit_htables);
 +static DEFINE_MUTEX(hashlimit_mutex); /* protects htables list */
  static struct kmem_cache *hashlimit_cachep __read_mostly;
  
  static inline bool dst_cmp(const struct dsthash_ent *ent,
@@@ -160,7 -150,7 +160,7 @@@ dsthash_alloc_init(struct xt_hashlimit_
         * the first hashtable entry */
        if (!ht->rnd_initialized) {
                get_random_bytes(&ht->rnd, sizeof(ht->rnd));
 -              ht->rnd_initialized = 1;
 +              ht->rnd_initialized = true;
        }
  
        if (ht->cfg.max && ht->count >= ht->cfg.max) {
@@@ -195,9 -185,8 +195,9 @@@ dsthash_free(struct xt_hashlimit_htabl
  }
  static void htable_gc(unsigned long htlong);
  
 -static int htable_create_v0(struct xt_hashlimit_info *minfo, u_int8_t family)
 +static int htable_create_v0(struct net *net, struct xt_hashlimit_info *minfo, u_int8_t family)
  {
 +      struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
        struct xt_hashlimit_htable *hinfo;
        unsigned int size;
        unsigned int i;
        for (i = 0; i < hinfo->cfg.size; i++)
                INIT_HLIST_HEAD(&hinfo->hash[i]);
  
 -      atomic_set(&hinfo->use, 1);
 +      hinfo->use = 1;
        hinfo->count = 0;
        hinfo->family = family;
 -      hinfo->rnd_initialized = 0;
 +      hinfo->rnd_initialized = false;
        spin_lock_init(&hinfo->lock);
        hinfo->pde = proc_create_data(minfo->name, 0,
                (family == NFPROTO_IPV4) ?
 -              hashlimit_procdir4 : hashlimit_procdir6,
 +              hashlimit_net->ipt_hashlimit : hashlimit_net->ip6t_hashlimit,
                &dl_file_ops, hinfo);
        if (!hinfo->pde) {
                vfree(hinfo);
                return -1;
        }
 +      hinfo->net = net;
  
        setup_timer(&hinfo->timer, htable_gc, (unsigned long )hinfo);
        hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval);
        add_timer(&hinfo->timer);
  
 -      spin_lock_bh(&hashlimit_lock);
 -      hlist_add_head(&hinfo->node, &hashlimit_htables);
 -      spin_unlock_bh(&hashlimit_lock);
 +      hlist_add_head(&hinfo->node, &hashlimit_net->htables);
  
        return 0;
  }
  
 -static int htable_create(struct xt_hashlimit_mtinfo1 *minfo, u_int8_t family)
 +static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo,
 +                       u_int8_t family)
  {
 +      struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
        struct xt_hashlimit_htable *hinfo;
        unsigned int size;
        unsigned int i;
        for (i = 0; i < hinfo->cfg.size; i++)
                INIT_HLIST_HEAD(&hinfo->hash[i]);
  
 -      atomic_set(&hinfo->use, 1);
 +      hinfo->use = 1;
        hinfo->count = 0;
        hinfo->family = family;
 -      hinfo->rnd_initialized = 0;
 +      hinfo->rnd_initialized = false;
        spin_lock_init(&hinfo->lock);
  
        hinfo->pde = proc_create_data(minfo->name, 0,
                (family == NFPROTO_IPV4) ?
 -              hashlimit_procdir4 : hashlimit_procdir6,
 +              hashlimit_net->ipt_hashlimit : hashlimit_net->ip6t_hashlimit,
                &dl_file_ops, hinfo);
        if (hinfo->pde == NULL) {
                vfree(hinfo);
                return -1;
        }
 +      hinfo->net = net;
  
        setup_timer(&hinfo->timer, htable_gc, (unsigned long)hinfo);
        hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval);
        add_timer(&hinfo->timer);
  
 -      spin_lock_bh(&hashlimit_lock);
 -      hlist_add_head(&hinfo->node, &hashlimit_htables);
 -      spin_unlock_bh(&hashlimit_lock);
 +      hlist_add_head(&hinfo->node, &hashlimit_net->htables);
  
        return 0;
  }
@@@ -375,46 -364,43 +375,46 @@@ static void htable_gc(unsigned long htl
  
  static void htable_destroy(struct xt_hashlimit_htable *hinfo)
  {
 +      struct hashlimit_net *hashlimit_net = hashlimit_pernet(hinfo->net);
 +      struct proc_dir_entry *parent;
 +
        del_timer_sync(&hinfo->timer);
  
 -      /* remove proc entry */
 -      remove_proc_entry(hinfo->pde->name,
 -                        hinfo->family == NFPROTO_IPV4 ? hashlimit_procdir4 :
 -                                                   hashlimit_procdir6);
 +      if (hinfo->family == NFPROTO_IPV4)
 +              parent = hashlimit_net->ipt_hashlimit;
 +      else
 +              parent = hashlimit_net->ip6t_hashlimit;
 +      remove_proc_entry(hinfo->pde->name, parent);
        htable_selective_cleanup(hinfo, select_all);
        vfree(hinfo);
  }
  
 -static struct xt_hashlimit_htable *htable_find_get(const char *name,
 +static struct xt_hashlimit_htable *htable_find_get(struct net *net,
 +                                                 const char *name,
                                                   u_int8_t family)
  {
 +      struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
        struct xt_hashlimit_htable *hinfo;
        struct hlist_node *pos;
  
 -      spin_lock_bh(&hashlimit_lock);
 -      hlist_for_each_entry(hinfo, pos, &hashlimit_htables, node) {
 +      hlist_for_each_entry(hinfo, pos, &hashlimit_net->htables, node) {
                if (!strcmp(name, hinfo->pde->name) &&
                    hinfo->family == family) {
 -                      atomic_inc(&hinfo->use);
 -                      spin_unlock_bh(&hashlimit_lock);
 +                      hinfo->use++;
                        return hinfo;
                }
        }
 -      spin_unlock_bh(&hashlimit_lock);
        return NULL;
  }
  
  static void htable_put(struct xt_hashlimit_htable *hinfo)
  {
 -      if (atomic_dec_and_test(&hinfo->use)) {
 -              spin_lock_bh(&hashlimit_lock);
 +      mutex_lock(&hashlimit_mutex);
 +      if (--hinfo->use == 0) {
                hlist_del(&hinfo->node);
 -              spin_unlock_bh(&hashlimit_lock);
                htable_destroy(hinfo);
        }
 +      mutex_unlock(&hashlimit_mutex);
  }
  
  /* The algorithm used is the Simple Token Bucket Filter (TBF)
@@@ -679,7 -665,6 +679,7 @@@ hashlimit_mt(const struct sk_buff *skb
  
  static bool hashlimit_mt_check_v0(const struct xt_mtchk_param *par)
  {
 +      struct net *net = par->net;
        struct xt_hashlimit_info *r = par->matchinfo;
  
        /* Check for overflow. */
        if (r->name[sizeof(r->name) - 1] != '\0')
                return false;
  
 -      /* This is the best we've got: We cannot release and re-grab lock,
 -       * since checkentry() is called before x_tables.c grabs xt_mutex.
 -       * We also cannot grab the hashtable spinlock, since htable_create will
 -       * call vmalloc, and that can sleep.  And we cannot just re-search
 -       * the list of htable's in htable_create(), since then we would
 -       * create duplicate proc files. -HW */
 -      mutex_lock(&hlimit_mutex);
 -      r->hinfo = htable_find_get(r->name, par->match->family);
 -      if (!r->hinfo && htable_create_v0(r, par->match->family) != 0) {
 -              mutex_unlock(&hlimit_mutex);
 +      mutex_lock(&hashlimit_mutex);
 +      r->hinfo = htable_find_get(net, r->name, par->match->family);
 +      if (!r->hinfo && htable_create_v0(net, r, par->match->family) != 0) {
 +              mutex_unlock(&hashlimit_mutex);
                return false;
        }
 -      mutex_unlock(&hlimit_mutex);
 +      mutex_unlock(&hashlimit_mutex);
  
        return true;
  }
  
  static bool hashlimit_mt_check(const struct xt_mtchk_param *par)
  {
 +      struct net *net = par->net;
        struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
  
        /* Check for overflow. */
                        return false;
        }
  
 -      /* This is the best we've got: We cannot release and re-grab lock,
 -       * since checkentry() is called before x_tables.c grabs xt_mutex.
 -       * We also cannot grab the hashtable spinlock, since htable_create will
 -       * call vmalloc, and that can sleep.  And we cannot just re-search
 -       * the list of htable's in htable_create(), since then we would
 -       * create duplicate proc files. -HW */
 -      mutex_lock(&hlimit_mutex);
 -      info->hinfo = htable_find_get(info->name, par->match->family);
 -      if (!info->hinfo && htable_create(info, par->match->family) != 0) {
 -              mutex_unlock(&hlimit_mutex);
 +      mutex_lock(&hashlimit_mutex);
 +      info->hinfo = htable_find_get(net, info->name, par->match->family);
 +      if (!info->hinfo && htable_create(net, info, par->match->family) != 0) {
 +              mutex_unlock(&hashlimit_mutex);
                return false;
        }
 -      mutex_unlock(&hlimit_mutex);
 +      mutex_unlock(&hashlimit_mutex);
        return true;
  }
  
@@@ -771,7 -767,7 +771,7 @@@ struct compat_xt_hashlimit_info 
        compat_uptr_t master;
  };
  
 -static void hashlimit_mt_compat_from_user(void *dst, void *src)
 +static void hashlimit_mt_compat_from_user(void *dst, const void *src)
  {
        int off = offsetof(struct compat_xt_hashlimit_info, hinfo);
  
        memset(dst + off, 0, sizeof(struct compat_xt_hashlimit_info) - off);
  }
  
 -static int hashlimit_mt_compat_to_user(void __user *dst, void *src)
 +static int hashlimit_mt_compat_to_user(void __user *dst, const void *src)
  {
        int off = offsetof(struct compat_xt_hashlimit_info, hinfo);
  
@@@ -845,7 -841,8 +845,7 @@@ static struct xt_match hashlimit_mt_reg
  static void *dl_seq_start(struct seq_file *s, loff_t *pos)
        __acquires(htable->lock)
  {
 -      struct proc_dir_entry *pde = s->private;
 -      struct xt_hashlimit_htable *htable = pde->data;
 +      struct xt_hashlimit_htable *htable = s->private;
        unsigned int *bucket;
  
        spin_lock_bh(&htable->lock);
  
  static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
  {
 -      struct proc_dir_entry *pde = s->private;
 -      struct xt_hashlimit_htable *htable = pde->data;
 +      struct xt_hashlimit_htable *htable = s->private;
        unsigned int *bucket = (unsigned int *)v;
  
        *pos = ++(*bucket);
  static void dl_seq_stop(struct seq_file *s, void *v)
        __releases(htable->lock)
  {
 -      struct proc_dir_entry *pde = s->private;
 -      struct xt_hashlimit_htable *htable = pde->data;
 +      struct xt_hashlimit_htable *htable = s->private;
        unsigned int *bucket = (unsigned int *)v;
  
        kfree(bucket);
@@@ -918,7 -917,8 +918,7 @@@ static int dl_seq_real_show(struct dsth
  
  static int dl_seq_show(struct seq_file *s, void *v)
  {
 -      struct proc_dir_entry *pde = s->private;
 -      struct xt_hashlimit_htable *htable = pde->data;
 +      struct xt_hashlimit_htable *htable = s->private;
        unsigned int *bucket = (unsigned int *)v;
        struct dsthash_ent *ent;
        struct hlist_node *pos;
@@@ -944,7 -944,7 +944,7 @@@ static int dl_proc_open(struct inode *i
  
        if (!ret) {
                struct seq_file *sf = file->private_data;
 -              sf->private = PDE(inode);
 +              sf->private = PDE(inode)->data;
        }
        return ret;
  }
@@@ -957,61 -957,10 +957,61 @@@ static const struct file_operations dl_
        .release = seq_release
  };
  
 +static int __net_init hashlimit_proc_net_init(struct net *net)
 +{
 +      struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
 +
 +      hashlimit_net->ipt_hashlimit = proc_mkdir("ipt_hashlimit", net->proc_net);
 +      if (!hashlimit_net->ipt_hashlimit)
 +              return -ENOMEM;
 +#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
 +      hashlimit_net->ip6t_hashlimit = proc_mkdir("ip6t_hashlimit", net->proc_net);
 +      if (!hashlimit_net->ip6t_hashlimit) {
 +              proc_net_remove(net, "ipt_hashlimit");
 +              return -ENOMEM;
 +      }
 +#endif
 +      return 0;
 +}
 +
 +static void __net_exit hashlimit_proc_net_exit(struct net *net)
 +{
 +      proc_net_remove(net, "ipt_hashlimit");
 +#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
 +      proc_net_remove(net, "ip6t_hashlimit");
 +#endif
 +}
 +
 +static int __net_init hashlimit_net_init(struct net *net)
 +{
 +      struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
 +
 +      INIT_HLIST_HEAD(&hashlimit_net->htables);
 +      return hashlimit_proc_net_init(net);
 +}
 +
 +static void __net_exit hashlimit_net_exit(struct net *net)
 +{
 +      struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
 +
 +      BUG_ON(!hlist_empty(&hashlimit_net->htables));
 +      hashlimit_proc_net_exit(net);
 +}
 +
 +static struct pernet_operations hashlimit_net_ops = {
 +      .init   = hashlimit_net_init,
 +      .exit   = hashlimit_net_exit,
 +      .id     = &hashlimit_net_id,
 +      .size   = sizeof(struct hashlimit_net),
 +};
 +
  static int __init hashlimit_mt_init(void)
  {
        int err;
  
 +      err = register_pernet_subsys(&hashlimit_net_ops);
 +      if (err < 0)
 +              return err;
        err = xt_register_matches(hashlimit_mt_reg,
              ARRAY_SIZE(hashlimit_mt_reg));
        if (err < 0)
                printk(KERN_ERR "xt_hashlimit: unable to create slab cache\n");
                goto err2;
        }
 -      hashlimit_procdir4 = proc_mkdir("ipt_hashlimit", init_net.proc_net);
 -      if (!hashlimit_procdir4) {
 -              printk(KERN_ERR "xt_hashlimit: unable to create proc dir "
 -                              "entry\n");
 -              goto err3;
 -      }
 -      err = 0;
 -#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
 -      hashlimit_procdir6 = proc_mkdir("ip6t_hashlimit", init_net.proc_net);
 -      if (!hashlimit_procdir6) {
 -              printk(KERN_ERR "xt_hashlimit: unable to create proc dir "
 -                              "entry\n");
 -              err = -ENOMEM;
 -      }
 -#endif
 -      if (!err)
 -              return 0;
 -      remove_proc_entry("ipt_hashlimit", init_net.proc_net);
 -err3:
 -      kmem_cache_destroy(hashlimit_cachep);
 +      return 0;
 +
  err2:
        xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg));
  err1:
 +      unregister_pernet_subsys(&hashlimit_net_ops);
        return err;
  
  }
  
  static void __exit hashlimit_mt_exit(void)
  {
 -      remove_proc_entry("ipt_hashlimit", init_net.proc_net);
 -#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
 -      remove_proc_entry("ip6t_hashlimit", init_net.proc_net);
 -#endif
        kmem_cache_destroy(hashlimit_cachep);
        xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg));
 +      unregister_pernet_subsys(&hashlimit_net_ops);
  }
  
  module_init(hashlimit_mt_init);
diff --combined security/selinux/avc.c
index db0fd9f334994fe5a29ae7dfc31f1cef6fd263bb,3328b1f4504737a92fddb59e7f9eec3215f82c6c..989fef82563a4a10de2b8f47e90852dd0ee324fc
@@@ -337,7 -337,7 +337,7 @@@ static inline struct avc_node *avc_sear
   * Look up an AVC entry that is valid for the
   * (@ssid, @tsid), interpreting the permissions
   * based on @tclass.  If a valid AVC entry exists,
-  * then this function return the avc_node.
+  * then this function returns the avc_node.
   * Otherwise, this function returns NULL.
   */
  static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass)
@@@ -489,14 -489,17 +489,14 @@@ void avc_audit(u32 ssid, u32 tsid
        struct common_audit_data stack_data;
        u32 denied, audited;
        denied = requested & ~avd->allowed;
 -      if (denied) {
 -              audited = denied;
 -              if (!(audited & avd->auditdeny))
 -                      return;
 -      } else if (result) {
 +      if (denied)
 +              audited = denied & avd->auditdeny;
 +      else if (result)
                audited = denied = requested;
 -      } else {
 -              audited = requested;
 -              if (!(audited & avd->auditallow))
 -                      return;
 -      }
 +      else
 +              audited = requested & avd->auditallow;
 +      if (!audited)
 +              return;
        if (!a) {
                a = &stack_data;
                memset(a, 0, sizeof(*a));
   * @perms: permissions
   *
   * Register a callback function for events in the set @events
-  * related to the SID pair (@ssid, @tsid) and
+  * related to the SID pair (@ssid, @tsid) 
   * and the permissions @perms, interpreting
   * @perms based on @tclass.  Returns %0 on success or
   * -%ENOMEM if insufficient memory exists to add the callback.
@@@ -568,7 -571,7 +568,7 @@@ static inline int avc_sidcmp(u32 x, u3
   *
   * if a valid AVC entry doesn't exist,this function returns -ENOENT.
   * if kmalloc() called internal returns NULL, this function returns -ENOMEM.
-  * otherwise, this function update the AVC entry. The original AVC-entry object
+  * otherwise, this function updates the AVC entry. The original AVC-entry object
   * will release later by RCU.
   */
  static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass,
@@@ -743,7 -746,9 +743,7 @@@ int avc_has_perm_noaudit(u32 ssid, u32 
                else
                        avd = &avd_entry;
  
 -              rc = security_compute_av(ssid, tsid, tclass, requested, avd);
 -              if (rc)
 -                      goto out;
 +              security_compute_av(ssid, tsid, tclass, avd);
                rcu_read_lock();
                node = avc_insert(ssid, tsid, tclass, avd);
        } else {
        }
  
        rcu_read_unlock();
 -out:
        return rc;
  }
  
index 3d72c1effeef7937e70ba72f2f01be235c8c0a0a,db0ed1cbd9824a69d6b15afc5cfd180e7d5c65fc..547b713d720449a7bdd746ca04fbf5a6cbe84931
@@@ -512,7 -512,7 +512,7 @@@ static char channel_map_madi_ss[HDSPM_M
  };
  
  
 -static struct pci_device_id snd_hdspm_ids[] __devinitdata = {
 +static DEFINE_PCI_DEVICE_TABLE(snd_hdspm_ids) = {
        {
         .vendor = PCI_VENDOR_ID_XILINX,
         .device = PCI_DEVICE_ID_XILINX_HAMMERFALL_DSP_MADI,
@@@ -2479,7 -2479,7 +2479,7 @@@ static int snd_hdspm_put_qs_wire(struc
     on MADICARD 
    - playback mixer matrix: [channelout+64] [output] [value]
    - input(thru) mixer matrix: [channelin] [output] [value]
-   (better do 2 kontrols for seperation ?)
+   (better do 2 kontrols for separation ?)
  */
  
  #define HDSPM_MIXER(xname, xindex) \
index a54dc77b7f343f649b24ff58b9f401a6e8333235,427614a2762bc3b10135d32b4a3f427b0a9f2f48..056b787b6ee09d1f9948c67d0d7acb2d9abbf887
@@@ -990,7 -990,7 +990,7 @@@ static int wm8990_set_dai_pll(struct sn
                reg = snd_soc_read(codec, WM8990_CLOCKING_2);
                snd_soc_write(codec, WM8990_CLOCKING_2, reg | WM8990_SYSCLK_SRC);
  
-               /* set up N , fractional mode and pre-divisor if neccessary */
+               /* set up N , fractional mode and pre-divisor if necessary */
                snd_soc_write(codec, WM8990_PLL1, pll_div.n | WM8990_SDM |
                        (pll_div.div2?WM8990_PRESCALE:0));
                snd_soc_write(codec, WM8990_PLL2, (u8)(pll_div.k>>8));
@@@ -1319,6 -1319,10 +1319,6 @@@ static int wm8990_suspend(struct platfo
        struct snd_soc_device *socdev = platform_get_drvdata(pdev);
        struct snd_soc_codec *codec = socdev->card->codec;
  
 -      /* we only need to suspend if we are a valid card */
 -      if (!codec->card)
 -              return 0;
 -
        wm8990_set_bias_level(codec, SND_SOC_BIAS_OFF);
        return 0;
  }
@@@ -1331,6 -1335,10 +1331,6 @@@ static int wm8990_resume(struct platfor
        u8 data[2];
        u16 *cache = codec->reg_cache;
  
 -      /* we only need to resume if we are a valid card */
 -      if (!codec->card)
 -              return 0;
 -
        /* Sync reg_cache with the hardware */
        for (i = 0; i < ARRAY_SIZE(wm8990_reg); i++) {
                if (i + 1 == WM8990_RESET)
diff --combined tools/perf/perf.c
index 57cb107c1f13291a7c94272d473bfea5862f6281,89eae67a358ee41fbc806df623d85402ebd947a9..cd32c200cdb32169c03369e0f505b8e974822306
@@@ -48,8 -48,7 +48,8 @@@ int check_pager_config(const char *cmd
        return c.val;
  }
  
 -static void commit_pager_choice(void) {
 +static void commit_pager_choice(void)
 +{
        switch (use_pager) {
        case 0:
                setenv("PERF_PAGER", "cat", 1);
@@@ -71,7 -70,7 +71,7 @@@ static void set_debugfs_path(void
                 "tracing/events");
  }
  
 -static int handle_options(const char*** argv, int* argc, int* envchanged)
 +static int handle_options(const char ***argv, int *argc, int *envchanged)
  {
        int handled = 0;
  
                                *envchanged = 1;
                } else if (!strcmp(cmd, "--perf-dir")) {
                        if (*argc < 2) {
 -                              fprintf(stderr, "No directory given for --perf-dir.\n" );
 +                              fprintf(stderr, "No directory given for --perf-dir.\n");
                                usage(perf_usage_string);
                        }
                        setenv(PERF_DIR_ENVIRONMENT, (*argv)[1], 1);
                                *envchanged = 1;
                } else if (!strcmp(cmd, "--work-tree")) {
                        if (*argc < 2) {
 -                              fprintf(stderr, "No directory given for --work-tree.\n" );
 +                              fprintf(stderr, "No directory given for --work-tree.\n");
                                usage(perf_usage_string);
                        }
                        setenv(PERF_WORK_TREE_ENVIRONMENT, (*argv)[1], 1);
@@@ -169,7 -168,7 +169,7 @@@ static int handle_alias(int *argcp, con
  {
        int envchanged = 0, ret = 0, saved_errno = errno;
        int count, option_count;
 -      const char** new_argv;
 +      const char **new_argv;
        const char *alias_command;
        char *alias_string;
  
                if (!strcmp(alias_command, new_argv[0]))
                        die("recursive alias: %s", alias_command);
  
 -              new_argv = realloc(new_argv, sizeof(char*) *
 +              new_argv = realloc(new_argv, sizeof(char *) *
                                    (count + *argcp + 1));
                /* insert after command name */
 -              memcpy(new_argv + count, *argv + 1, sizeof(char*) * *argcp);
 -              new_argv[count+*argcp] = NULL;
 +              memcpy(new_argv + count, *argv + 1, sizeof(char *) * *argcp);
 +              new_argv[count + *argcp] = NULL;
  
                *argv = new_argv;
                *argcp += count - 1;
@@@ -286,7 -285,6 +286,7 @@@ static void handle_internal_command(in
  {
        const char *cmd = argv[0];
        static struct cmd_struct commands[] = {
 +              { "buildid-cache", cmd_buildid_cache, 0 },
                { "buildid-list", cmd_buildid_list, 0 },
                { "diff",       cmd_diff,       0 },
                { "help",       cmd_help,       0 },
                { "sched",      cmd_sched,      0 },
                { "probe",      cmd_probe,      0 },
                { "kmem",       cmd_kmem,       0 },
 +              { "lock",       cmd_lock,       0 },
        };
        unsigned int i;
        static const char ext[] = STRIP_EXTENSION;
@@@ -391,7 -388,7 +391,7 @@@ static int run_argv(int *argcp, const c
  /* mini /proc/mounts parser: searching for "^blah /mount/point debugfs" */
  static void get_debugfs_mntpt(void)
  {
 -      const char *path = debugfs_find_mountpoint();
 +      const char *path = debugfs_mount(NULL);
  
        if (path)
                strncpy(debugfs_mntpt, path, sizeof(debugfs_mntpt));
@@@ -445,15 -442,15 +445,15 @@@ int main(int argc, const char **argv
  
        /*
         * We use PATH to find perf commands, but we prepend some higher
-        * precidence paths: the "--exec-path" option, the PERF_EXEC_PATH
+        * precedence paths: the "--exec-path" option, the PERF_EXEC_PATH
         * environment, and the $(perfexecdir) from the Makefile at build
         * time.
         */
        setup_path();
  
        while (1) {
 -              static int done_help = 0;
 -              static int was_alias = 0;
 +              static int done_help;
 +              static int was_alias;
  
                was_alias = run_argv(&argc, &argv);
                if (errno != ENOENT)