Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless
authorJohn W. Linville <linville@tuxdriver.com>
Mon, 18 Mar 2013 13:39:21 +0000 (09:39 -0400)
committerJohn W. Linville <linville@tuxdriver.com>
Mon, 18 Mar 2013 13:39:21 +0000 (09:39 -0400)
Conflicts:
net/nfc/llcp/llcp.c

101 files changed:
Documentation/device-mapper/dm-raid.txt
Documentation/networking/tuntap.txt
MAINTAINERS
arch/powerpc/crypto/sha1-powerpc-asm.S
arch/powerpc/include/asm/bitops.h
arch/powerpc/include/asm/reg.h
arch/powerpc/include/asm/systbl.h
arch/powerpc/include/asm/unistd.h
arch/powerpc/include/uapi/asm/unistd.h
arch/powerpc/kernel/cpu_setup_power.S
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/platforms/pseries/hvcserver.c
drivers/bcma/driver_pci_host.c
drivers/bluetooth/ath3k.c
drivers/bluetooth/btusb.c
drivers/char/hw_random/core.c
drivers/connector/cn_proc.c
drivers/gpio/gpio-ich.c
drivers/gpio/gpiolib.c
drivers/isdn/hisax/st5481_usb.c
drivers/md/Kconfig
drivers/md/dm-raid.c
drivers/md/md.c
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid10.h
drivers/md/raid5.c
drivers/net/bonding/bond_main.c
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_hw.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fec.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/ich8lan.h
drivers/net/ethernet/intel/e1000e/regs.h
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_hwmon.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/efx.h
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/rx.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/phy/micrel.c
drivers/net/phy/phy_device.c
drivers/net/usb/Kconfig
drivers/net/usb/Makefile
drivers/net/usb/asix_devices.c
drivers/net/usb/ax88179_178a.c [new file with mode: 0644]
drivers/net/usb/cdc_ncm.c
drivers/net/vxlan.c
drivers/net/wireless/ath/ath9k/common.h
drivers/net/wireless/ath/ath9k/htc.h
drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/libertas/if_sdio.c
drivers/net/wireless/mwifiex/join.c
drivers/net/wireless/mwifiex/pcie.c
drivers/net/wireless/rt2x00/Kconfig
drivers/net/wireless/rt2x00/rt2800pci.c
drivers/net/wireless/rt2x00/rt2x00dev.c
drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
drivers/tty/hvc/hvcs.c
include/linux/hardirq.h
include/linux/netfilter/ipset/ip_set_ahash.h
include/linux/smpboot.h
include/net/tcp.h
kernel/smpboot.c
kernel/softirq.c
kernel/stop_machine.c
net/caif/caif_dev.c
net/caif/caif_usb.c
net/core/dev.c
net/ipv4/ip_input.c
net/ipv4/ip_options.c
net/ipv4/tcp_input.c
net/ipv6/ip6_input.c
net/ipv6/route.c
net/irda/ircomm/ircomm_tty.c
net/irda/iriap.c
net/l2tp/l2tp_ppp.c
net/netfilter/ipset/ip_set_core.c
net/nfc/llcp/llcp.c
net/nfc/llcp/sock.c
net/rds/message.c
net/sched/sch_qfq.c
net/sctp/endpointola.c
net/sctp/socket.c
net/sctp/ssnmap.c
net/sctp/tsnmap.c
net/sctp/ulpqueue.c

index 56fb62b09fc59ad757fc81de6ad6e478b3b0184b..b428556197c99a0eea19d3b040bfbe3c0ffcf06e 100644 (file)
@@ -30,6 +30,7 @@ The target is named "raid" and it accepts the following parameters:
   raid10        Various RAID10 inspired algorithms chosen by additional params
                - RAID10: Striped Mirrors (aka 'Striping on top of mirrors')
                - RAID1E: Integrated Adjacent Stripe Mirroring
+               - RAID1E: Integrated Offset Stripe Mirroring
                -  and other similar RAID10 variants
 
   Reference: Chapter 4 of
@@ -64,15 +65,15 @@ The target is named "raid" and it accepts the following parameters:
                synchronisation state for each region.
 
         [raid10_copies   <# copies>]
-        [raid10_format   near]
+        [raid10_format   <near|far|offset>]
                These two options are used to alter the default layout of
                a RAID10 configuration.  The number of copies is can be
-               specified, but the default is 2.  There are other variations
-               to how the copies are laid down - the default and only current
-               option is "near".  Near copies are what most people think of
-               with respect to mirroring.  If these options are left
-               unspecified, or 'raid10_copies 2' and/or 'raid10_format near'
-               are given, then the layouts for 2, 3 and 4 devices are:
+               specified, but the default is 2.  There are also three
+               variations to how the copies are laid down - the default
+               is "near".  Near copies are what most people think of with
+               respect to mirroring.  If these options are left unspecified,
+               or 'raid10_copies 2' and/or 'raid10_format near' are given,
+               then the layouts for 2, 3 and 4 devices are:
                2 drives         3 drives          4 drives
                --------         ----------        --------------
                A1  A1           A1  A1  A2        A1  A1  A2  A2
@@ -85,6 +86,33 @@ The target is named "raid" and it accepts the following parameters:
                3-device layout is what might be called a 'RAID1E - Integrated
                Adjacent Stripe Mirroring'.
 
+               If 'raid10_copies 2' and 'raid10_format far', then the layouts
+               for 2, 3 and 4 devices are:
+               2 drives             3 drives             4 drives
+               --------             --------------       --------------------
+               A1  A2               A1   A2   A3         A1   A2   A3   A4
+               A3  A4               A4   A5   A6         A5   A6   A7   A8
+               A5  A6               A7   A8   A9         A9   A10  A11  A12
+               ..  ..               ..   ..   ..         ..   ..   ..   ..
+               A2  A1               A3   A1   A2         A2   A1   A4   A3
+               A4  A3               A6   A4   A5         A6   A5   A8   A7
+               A6  A5               A9   A7   A8         A10  A9   A12  A11
+               ..  ..               ..   ..   ..         ..   ..   ..   ..
+
+               If 'raid10_copies 2' and 'raid10_format offset', then the
+               layouts for 2, 3 and 4 devices are:
+               2 drives       3 drives           4 drives
+               --------       ------------       -----------------
+               A1  A2         A1  A2  A3         A1  A2  A3  A4
+               A2  A1         A3  A1  A2         A2  A1  A4  A3
+               A3  A4         A4  A5  A6         A5  A6  A7  A8
+               A4  A3         A6  A4  A5         A6  A5  A8  A7
+               A5  A6         A7  A8  A9         A9  A10 A11 A12
+               A6  A5         A9  A7  A8         A10 A9  A12 A11
+               ..  ..         ..  ..  ..         ..  ..  ..  ..
+               Here we see layouts closely akin to 'RAID1E - Integrated
+               Offset Stripe Mirroring'.
+
 <#raid_devs>: The number of devices composing the array.
        Each device consists of two entries.  The first is the device
        containing the metadata (if any); the second is the one containing the
@@ -142,3 +170,5 @@ Version History
 1.3.0  Added support for RAID 10
 1.3.1  Allow device replacement/rebuild for RAID 10
 1.3.2   Fix/improve redundancy checking for RAID10
+1.4.0  Non-functional change.  Removes arg from mapping function.
+1.4.1   Add RAID10 "far" and "offset" algorithm support.
index c0aab985bad9ac4a159ea9f96e837bee28475291..949d5dcdd9a348fd646f89c62f604d9d29433d2c 100644 (file)
@@ -105,6 +105,83 @@ Copyright (C) 1999-2000 Maxim Krasnyansky <max_mk@yahoo.com>
      Proto [2 bytes]
      Raw protocol(IP, IPv6, etc) frame.
 
+  3.3 Multiqueue tuntap interface:
+
+  From version 3.8, Linux supports multiqueue tuntap which can uses multiple
+  file descriptors (queues) to parallelize packets sending or receiving. The
+  device allocation is the same as before, and if user wants to create multiple
+  queues, TUNSETIFF with the same device name must be called many times with
+  IFF_MULTI_QUEUE flag.
+
+  char *dev should be the name of the device, queues is the number of queues to
+  be created, fds is used to store and return the file descriptors (queues)
+  created to the caller. Each file descriptor were served as the interface of a
+  queue which could be accessed by userspace.
+
+  #include <linux/if.h>
+  #include <linux/if_tun.h>
+
+  int tun_alloc_mq(char *dev, int queues, int *fds)
+  {
+      struct ifreq ifr;
+      int fd, err, i;
+
+      if (!dev)
+          return -1;
+
+      memset(&ifr, 0, sizeof(ifr));
+      /* Flags: IFF_TUN   - TUN device (no Ethernet headers)
+       *        IFF_TAP   - TAP device
+       *
+       *        IFF_NO_PI - Do not provide packet information
+       *        IFF_MULTI_QUEUE - Create a queue of multiqueue device
+       */
+      ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_MULTI_QUEUE;
+      strcpy(ifr.ifr_name, dev);
+
+      for (i = 0; i < queues; i++) {
+          if ((fd = open("/dev/net/tun", O_RDWR)) < 0)
+             goto err;
+          err = ioctl(fd, TUNSETIFF, (void *)&ifr);
+          if (err) {
+             close(fd);
+             goto err;
+          }
+          fds[i] = fd;
+      }
+
+      return 0;
+  err:
+      for (--i; i >= 0; i--)
+          close(fds[i]);
+      return err;
+  }
+
+  A new ioctl(TUNSETQUEUE) were introduced to enable or disable a queue. When
+  calling it with IFF_DETACH_QUEUE flag, the queue were disabled. And when
+  calling it with IFF_ATTACH_QUEUE flag, the queue were enabled. The queue were
+  enabled by default after it was created through TUNSETIFF.
+
+  fd is the file descriptor (queue) that we want to enable or disable, when
+  enable is true we enable it, otherwise we disable it
+
+  #include <linux/if.h>
+  #include <linux/if_tun.h>
+
+  int tun_set_queue(int fd, int enable)
+  {
+      struct ifreq ifr;
+
+      memset(&ifr, 0, sizeof(ifr));
+
+      if (enable)
+         ifr.ifr_flags = IFF_ATTACH_QUEUE;
+      else
+         ifr.ifr_flags = IFF_DETACH_QUEUE;
+
+      return ioctl(fd, TUNSETQUEUE, (void *)&ifr);
+  }
+
 Universal TUN/TAP device driver Frequently Asked Question.
    
 1. What platforms are supported by TUN/TAP driver ?
index 7f68b5e0ce6b2818220ab50ee816a8b710096f44..143507e7f6dffbe589915d0eccf1eb9d56847a9d 100644 (file)
@@ -114,12 +114,6 @@ Maintainers List (try to look for most precise areas first)
 
                -----------------------------------
 
-3C505 NETWORK DRIVER
-M:     Philip Blundell <philb@gnu.org>
-L:     netdev@vger.kernel.org
-S:     Maintained
-F:     drivers/net/ethernet/i825xx/3c505*
-
 3C59X NETWORK DRIVER
 M:     Steffen Klassert <klassert@mathematik.tu-chemnitz.de>
 L:     netdev@vger.kernel.org
@@ -2361,12 +2355,6 @@ W:       http://www.arm.linux.org.uk/
 S:     Maintained
 F:     drivers/video/cyber2000fb.*
 
-CYCLADES 2X SYNC CARD DRIVER
-M:     Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
-W:     http://oops.ghostprotocols.net:81/blog
-S:     Maintained
-F:     drivers/net/wan/cycx*
-
 CYCLADES ASYNC MUX DRIVER
 W:     http://www.cyclades.com/
 S:     Orphan
@@ -3067,12 +3055,6 @@ T:       git git://git.kernel.org/pub/scm/linux/kernel/git/kristoffer/linux-hpc.git
 F:     drivers/video/s1d13xxxfb.c
 F:     include/video/s1d13xxxfb.h
 
-ETHEREXPRESS-16 NETWORK DRIVER
-M:     Philip Blundell <philb@gnu.org>
-L:     netdev@vger.kernel.org
-S:     Maintained
-F:     drivers/net/ethernet/i825xx/eexpress.*
-
 ETHERNET BRIDGE
 M:     Stephen Hemminger <stephen@networkplumber.org>
 L:     bridge@lists.linux-foundation.org
index a5f8264d2d3c3c58d5755af56d5e911fc3670212..125e16520061289aff815417fd7aa18ec41e3df7 100644 (file)
        STEPUP4((t)+16, fn)
 
 _GLOBAL(powerpc_sha_transform)
-       PPC_STLU r1,-STACKFRAMESIZE(r1)
+       PPC_STLU r1,-INT_FRAME_SIZE(r1)
        SAVE_8GPRS(14, r1)
        SAVE_10GPRS(22, r1)
 
@@ -175,5 +175,5 @@ _GLOBAL(powerpc_sha_transform)
 
        REST_8GPRS(14, r1)
        REST_10GPRS(22, r1)
-       addi    r1,r1,STACKFRAMESIZE
+       addi    r1,r1,INT_FRAME_SIZE
        blr
index ef918a2328bba044173e61a799d23f8ae3cf2576..08bd299c75b113d1dd55ea827853bb616fdb5899 100644 (file)
@@ -52,8 +52,6 @@
 #define smp_mb__before_clear_bit()     smp_mb()
 #define smp_mb__after_clear_bit()      smp_mb()
 
-#define BITOP_LE_SWIZZLE       ((BITS_PER_LONG-1) & ~0x7)
-
 /* Macro for generating the ***_bits() functions */
 #define DEFINE_BITOP(fn, op, prefix, postfix)  \
 static __inline__ void fn(unsigned long mask,  \
index e6658612203010aec0d3ba6ebb1f9c646e639575..c9c67fc888c93d229b23a617d0fd9c2aa7a71e79 100644 (file)
 #define SPRN_HSRR0     0x13A   /* Hypervisor Save/Restore 0 */
 #define SPRN_HSRR1     0x13B   /* Hypervisor Save/Restore 1 */
 #define SPRN_FSCR      0x099   /* Facility Status & Control Register */
-#define FSCR_TAR       (1<<8)  /* Enable Target Adress Register */
+#define   FSCR_TAR     (1 << (63-55)) /* Enable Target Address Register */
+#define   FSCR_DSCR    (1 << (63-61)) /* Enable Data Stream Control Register */
 #define SPRN_TAR       0x32f   /* Target Address Register */
 #define SPRN_LPCR      0x13E   /* LPAR Control Register */
 #define   LPCR_VPM0    (1ul << (63-0))
index 535b6d8a41ccae912590c0ec14d2fb355f0f4800..ebbec52d21bd69c634a547a82d707f1d7244b2e0 100644 (file)
@@ -358,3 +358,4 @@ SYSCALL_SPU(setns)
 COMPAT_SYS(process_vm_readv)
 COMPAT_SYS(process_vm_writev)
 SYSCALL(finit_module)
+SYSCALL(ni_syscall) /* sys_kcmp */
index f25b5c45c4359632d2b5259438173f493bfb6810..1487f0f12293bfd31ecdc1816072c52fc12d1b11 100644 (file)
@@ -12,7 +12,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define __NR_syscalls          354
+#define __NR_syscalls          355
 
 #define __NR__exit __NR_exit
 #define NR_syscalls    __NR_syscalls
index 8c478c6c6b1e41c7090b06a8b52815f7d537ff89..74cb4d72d6739baafba86eeb4fe1d591f5dd99fd 100644 (file)
 #define __NR_process_vm_readv  351
 #define __NR_process_vm_writev 352
 #define __NR_finit_module      353
+#define __NR_kcmp              354
 
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
index d29facbf9a288490a7552331e498eea259eb713f..ea847abb0d0a5fae6738c1bed0dc863d78358ded 100644 (file)
@@ -48,6 +48,7 @@ _GLOBAL(__restore_cpu_power7)
 
 _GLOBAL(__setup_cpu_power8)
        mflr    r11
+       bl      __init_FSCR
        bl      __init_hvmode_206
        mtlr    r11
        beqlr
@@ -56,13 +57,13 @@ _GLOBAL(__setup_cpu_power8)
        mfspr   r3,SPRN_LPCR
        oris    r3, r3, LPCR_AIL_3@h
        bl      __init_LPCR
-       bl      __init_FSCR
        bl      __init_TLB
        mtlr    r11
        blr
 
 _GLOBAL(__restore_cpu_power8)
        mflr    r11
+       bl      __init_FSCR
        mfmsr   r3
        rldicl. r0,r3,4,63
        beqlr
@@ -115,7 +116,7 @@ __init_LPCR:
 
 __init_FSCR:
        mfspr   r3,SPRN_FSCR
-       ori     r3,r3,FSCR_TAR
+       ori     r3,r3,FSCR_TAR|FSCR_DSCR
        mtspr   SPRN_FSCR,r3
        blr
 
index a8a5361fb70c716b74fe6abd880ce877be52c10a..87ef8f5ee5bc52242af82aec893b274a6a8ec47d 100644 (file)
@@ -74,13 +74,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)                              \
        mflr    r10 ;                                           \
        ld      r12,PACAKBASE(r13) ;                            \
        LOAD_HANDLER(r12, system_call_entry_direct) ;           \
-       mtlr    r12 ;                                           \
+       mtctr   r12 ;                                           \
        mfspr   r12,SPRN_SRR1 ;                                 \
        /* Re-use of r13... No spare regs to do this */ \
        li      r13,MSR_RI ;                                    \
        mtmsrd  r13,1 ;                                         \
        GET_PACA(r13) ; /* get r13 back */                      \
-       blr ;
+       bctr ;
 #else
        /* We can branch directly */
 #define SYSCALL_PSERIES_2_DIRECT                               \
index fcf4b4cbeaf331aa7986b0b705bc4bbe2148ee7b..4557e91626c43bd58d95a1730456557eab3aee2f 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/list.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/string.h>
 
 #include <asm/hvcall.h>
 #include <asm/hvcserver.h>
@@ -188,9 +189,9 @@ int hvcs_get_partner_info(uint32_t unit_address, struct list_head *head,
                        = (unsigned int)last_p_partition_ID;
 
                /* copy the Null-term char too */
-               strncpy(&next_partner_info->location_code[0],
+               strlcpy(&next_partner_info->location_code[0],
                        (char *)&pi_buff[2],
-                       strlen((char *)&pi_buff[2]) + 1);
+                       sizeof(next_partner_info->location_code));
 
                list_add_tail(&(next_partner_info->node), head);
                next_partner_info = NULL;
index d3bde6cec927643bdf02445e387e97d4d5f29d69..30629a3d44cc517b62ea2fff5f8a65e505266b81 100644 (file)
@@ -404,6 +404,8 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
                return;
        }
 
+       spin_lock_init(&pc_host->cfgspace_lock);
+
        pc->host_controller = pc_host;
        pc_host->pci_controller.io_resource = &pc_host->io_resource;
        pc_host->pci_controller.mem_resource = &pc_host->mem_resource;
index a8a41e07a221695684fd78770980749b25ff2b5a..b282af181b44dc974daab7ef4941e9373b833506 100644 (file)
@@ -74,8 +74,10 @@ static struct usb_device_id ath3k_table[] = {
 
        /* Atheros AR3012 with sflash firmware*/
        { USB_DEVICE(0x0CF3, 0x3004) },
+       { USB_DEVICE(0x0CF3, 0x3008) },
        { USB_DEVICE(0x0CF3, 0x311D) },
        { USB_DEVICE(0x13d3, 0x3375) },
+       { USB_DEVICE(0x04CA, 0x3004) },
        { USB_DEVICE(0x04CA, 0x3005) },
        { USB_DEVICE(0x04CA, 0x3006) },
        { USB_DEVICE(0x04CA, 0x3008) },
@@ -106,8 +108,10 @@ static struct usb_device_id ath3k_blist_tbl[] = {
 
        /* Atheros AR3012 with sflash firmware*/
        { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
index 7e351e345476c1bcc2141f95e41fe43a3e65e612..e547851870e74811d71aa7511d5be022a0a862ab 100644 (file)
@@ -132,8 +132,10 @@ static struct usb_device_id blacklist_table[] = {
 
        /* Atheros 3012 with sflash firmware */
        { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
index 1bafb40ec8a213480f355b480819f092fdfebd08..69ae5972713cf814e968c3c9d9ad8743d3dd240d 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/init.h>
 #include <linux/miscdevice.h>
 #include <linux/delay.h>
+#include <linux/slab.h>
 #include <asm/uaccess.h>
 
 
@@ -52,8 +53,12 @@ static struct hwrng *current_rng;
 static LIST_HEAD(rng_list);
 static DEFINE_MUTEX(rng_mutex);
 static int data_avail;
-static u8 rng_buffer[SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES]
-       __cacheline_aligned;
+static u8 *rng_buffer;
+
+static size_t rng_buffer_size(void)
+{
+       return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
+}
 
 static inline int hwrng_init(struct hwrng *rng)
 {
@@ -116,7 +121,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
 
                if (!data_avail) {
                        bytes_read = rng_get_data(current_rng, rng_buffer,
-                               sizeof(rng_buffer),
+                               rng_buffer_size(),
                                !(filp->f_flags & O_NONBLOCK));
                        if (bytes_read < 0) {
                                err = bytes_read;
@@ -307,6 +312,14 @@ int hwrng_register(struct hwrng *rng)
 
        mutex_lock(&rng_mutex);
 
+       /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
+       err = -ENOMEM;
+       if (!rng_buffer) {
+               rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
+               if (!rng_buffer)
+                       goto out_unlock;
+       }
+
        /* Must not register two RNGs with the same name. */
        err = -EEXIST;
        list_for_each_entry(tmp, &rng_list, list) {
index fce2000eec31d658efb52251b7f6df60822d1798..1110478dd0fdb83b6bbdbae5fa1f40c0a2406655 100644 (file)
@@ -313,6 +313,12 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
            (task_active_pid_ns(current) != &init_pid_ns))
                return;
 
+       /* Can only change if privileged. */
+       if (!capable(CAP_NET_ADMIN)) {
+               err = EPERM;
+               goto out;
+       }
+
        mc_op = (enum proc_cn_mcast_op *)msg->data;
        switch (*mc_op) {
        case PROC_CN_MCAST_LISTEN:
@@ -325,6 +331,8 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
                err = EINVAL;
                break;
        }
+
+out:
        cn_proc_ack(err, msg->seq, msg->ack);
 }
 
index 6f2306db85915eca2690895eda7e8f6b375091ab..f9dbd503fc40fb0f1bccd0fa8f0995816a06b538 100644 (file)
@@ -128,9 +128,9 @@ static int ichx_read_bit(int reg, unsigned nr)
        return data & (1 << bit) ? 1 : 0;
 }
 
-static int ichx_gpio_check_available(struct gpio_chip *gpio, unsigned nr)
+static bool ichx_gpio_check_available(struct gpio_chip *gpio, unsigned nr)
 {
-       return (ichx_priv.use_gpio & (1 << (nr / 32))) ? 0 : -ENXIO;
+       return ichx_priv.use_gpio & (1 << (nr / 32));
 }
 
 static int ichx_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
index fff9786cdc643120f933154366237a8d6b332e8d..c2534d62911cfd18434c9b2bb172cad265603e0d 100644 (file)
@@ -88,13 +88,14 @@ static int gpiod_request(struct gpio_desc *desc, const char *label);
 static void gpiod_free(struct gpio_desc *desc);
 static int gpiod_direction_input(struct gpio_desc *desc);
 static int gpiod_direction_output(struct gpio_desc *desc, int value);
+static int gpiod_get_direction(const struct gpio_desc *desc);
 static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
-static int gpiod_get_value_cansleep(struct gpio_desc *desc);
+static int gpiod_get_value_cansleep(const struct gpio_desc *desc);
 static void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
-static int gpiod_get_value(struct gpio_desc *desc);
+static int gpiod_get_value(const struct gpio_desc *desc);
 static void gpiod_set_value(struct gpio_desc *desc, int value);
-static int gpiod_cansleep(struct gpio_desc *desc);
-static int gpiod_to_irq(struct gpio_desc *desc);
+static int gpiod_cansleep(const struct gpio_desc *desc);
+static int gpiod_to_irq(const struct gpio_desc *desc);
 static int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
 static int gpiod_export_link(struct device *dev, const char *name,
                             struct gpio_desc *desc);
@@ -171,12 +172,12 @@ static int gpio_ensure_requested(struct gpio_desc *desc)
        return 0;
 }
 
-/* caller holds gpio_lock *OR* gpio is marked as requested */
-static struct gpio_chip *gpiod_to_chip(struct gpio_desc *desc)
+static struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
 {
-       return desc->chip;
+       return desc ? desc->chip : NULL;
 }
 
+/* caller holds gpio_lock *OR* gpio is marked as requested */
 struct gpio_chip *gpio_to_chip(unsigned gpio)
 {
        return gpiod_to_chip(gpio_to_desc(gpio));
@@ -207,7 +208,7 @@ static int gpiochip_find_base(int ngpio)
 }
 
 /* caller ensures gpio is valid and requested, chip->get_direction may sleep  */
-static int gpiod_get_direction(struct gpio_desc *desc)
+static int gpiod_get_direction(const struct gpio_desc *desc)
 {
        struct gpio_chip        *chip;
        unsigned                offset;
@@ -223,11 +224,13 @@ static int gpiod_get_direction(struct gpio_desc *desc)
        if (status > 0) {
                /* GPIOF_DIR_IN, or other positive */
                status = 1;
-               clear_bit(FLAG_IS_OUT, &desc->flags);
+               /* FLAG_IS_OUT is just a cache of the result of get_direction(),
+                * so it does not affect constness per se */
+               clear_bit(FLAG_IS_OUT, &((struct gpio_desc *)desc)->flags);
        }
        if (status == 0) {
                /* GPIOF_DIR_OUT */
-               set_bit(FLAG_IS_OUT, &desc->flags);
+               set_bit(FLAG_IS_OUT, &((struct gpio_desc *)desc)->flags);
        }
        return status;
 }
@@ -263,7 +266,7 @@ static DEFINE_MUTEX(sysfs_lock);
 static ssize_t gpio_direction_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
-       struct gpio_desc        *desc = dev_get_drvdata(dev);
+       const struct gpio_desc  *desc = dev_get_drvdata(dev);
        ssize_t                 status;
 
        mutex_lock(&sysfs_lock);
@@ -654,6 +657,11 @@ static ssize_t export_store(struct class *class,
                goto done;
 
        desc = gpio_to_desc(gpio);
+       /* reject invalid GPIOs */
+       if (!desc) {
+               pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
+               return -EINVAL;
+       }
 
        /* No extra locking here; FLAG_SYSFS just signifies that the
         * request and export were done by on behalf of userspace, so
@@ -690,12 +698,14 @@ static ssize_t unexport_store(struct class *class,
        if (status < 0)
                goto done;
 
-       status = -EINVAL;
-
        desc = gpio_to_desc(gpio);
        /* reject bogus commands (gpio_unexport ignores them) */
-       if (!desc)
-               goto done;
+       if (!desc) {
+               pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
+               return -EINVAL;
+       }
+
+       status = -EINVAL;
 
        /* No extra locking here; FLAG_SYSFS just signifies that the
         * request and export were done by on behalf of userspace, so
@@ -846,8 +856,10 @@ static int gpiod_export_link(struct device *dev, const char *name,
 {
        int                     status = -EINVAL;
 
-       if (!desc)
-               goto done;
+       if (!desc) {
+               pr_warn("%s: invalid GPIO\n", __func__);
+               return -EINVAL;
+       }
 
        mutex_lock(&sysfs_lock);
 
@@ -865,7 +877,6 @@ static int gpiod_export_link(struct device *dev, const char *name,
 
        mutex_unlock(&sysfs_lock);
 
-done:
        if (status)
                pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
                         status);
@@ -896,8 +907,10 @@ static int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
        struct device           *dev = NULL;
        int                     status = -EINVAL;
 
-       if (!desc)
-               goto done;
+       if (!desc) {
+               pr_warn("%s: invalid GPIO\n", __func__);
+               return -EINVAL;
+       }
 
        mutex_lock(&sysfs_lock);
 
@@ -914,7 +927,6 @@ static int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
 unlock:
        mutex_unlock(&sysfs_lock);
 
-done:
        if (status)
                pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
                         status);
@@ -940,8 +952,8 @@ static void gpiod_unexport(struct gpio_desc *desc)
        struct device           *dev = NULL;
 
        if (!desc) {
-               status = -EINVAL;
-               goto done;
+               pr_warn("%s: invalid GPIO\n", __func__);
+               return;
        }
 
        mutex_lock(&sysfs_lock);
@@ -962,7 +974,7 @@ static void gpiod_unexport(struct gpio_desc *desc)
                device_unregister(dev);
                put_device(dev);
        }
-done:
+
        if (status)
                pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
                         status);
@@ -1384,12 +1396,13 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
        int                     status = -EPROBE_DEFER;
        unsigned long           flags;
 
-       spin_lock_irqsave(&gpio_lock, flags);
-
        if (!desc) {
-               status = -EINVAL;
-               goto done;
+               pr_warn("%s: invalid GPIO\n", __func__);
+               return -EINVAL;
        }
+
+       spin_lock_irqsave(&gpio_lock, flags);
+
        chip = desc->chip;
        if (chip == NULL)
                goto done;
@@ -1432,8 +1445,7 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
 done:
        if (status)
                pr_debug("_gpio_request: gpio-%d (%s) status %d\n",
-                        desc ? desc_to_gpio(desc) : -1,
-                        label ? : "?", status);
+                        desc_to_gpio(desc), label ? : "?", status);
        spin_unlock_irqrestore(&gpio_lock, flags);
        return status;
 }
@@ -1616,10 +1628,13 @@ static int gpiod_direction_input(struct gpio_desc *desc)
        int                     status = -EINVAL;
        int                     offset;
 
+       if (!desc) {
+               pr_warn("%s: invalid GPIO\n", __func__);
+               return -EINVAL;
+       }
+
        spin_lock_irqsave(&gpio_lock, flags);
 
-       if (!desc)
-               goto fail;
        chip = desc->chip;
        if (!chip || !chip->get || !chip->direction_input)
                goto fail;
@@ -1655,13 +1670,9 @@ lose:
        return status;
 fail:
        spin_unlock_irqrestore(&gpio_lock, flags);
-       if (status) {
-               int gpio = -1;
-               if (desc)
-                       gpio = desc_to_gpio(desc);
-               pr_debug("%s: gpio-%d status %d\n",
-                       __func__, gpio, status);
-       }
+       if (status)
+               pr_debug("%s: gpio-%d status %d\n", __func__,
+                        desc_to_gpio(desc), status);
        return status;
 }
 
@@ -1678,6 +1689,11 @@ static int gpiod_direction_output(struct gpio_desc *desc, int value)
        int                     status = -EINVAL;
        int offset;
 
+       if (!desc) {
+               pr_warn("%s: invalid GPIO\n", __func__);
+               return -EINVAL;
+       }
+
        /* Open drain pin should not be driven to 1 */
        if (value && test_bit(FLAG_OPEN_DRAIN,  &desc->flags))
                return gpiod_direction_input(desc);
@@ -1688,8 +1704,6 @@ static int gpiod_direction_output(struct gpio_desc *desc, int value)
 
        spin_lock_irqsave(&gpio_lock, flags);
 
-       if (!desc)
-               goto fail;
        chip = desc->chip;
        if (!chip || !chip->set || !chip->direction_output)
                goto fail;
@@ -1725,13 +1739,9 @@ lose:
        return status;
 fail:
        spin_unlock_irqrestore(&gpio_lock, flags);
-       if (status) {
-               int gpio = -1;
-               if (desc)
-                       gpio = desc_to_gpio(desc);
-               pr_debug("%s: gpio-%d status %d\n",
-                       __func__, gpio, status);
-       }
+       if (status)
+               pr_debug("%s: gpio-%d status %d\n", __func__,
+                        desc_to_gpio(desc), status);
        return status;
 }
 
@@ -1753,10 +1763,13 @@ static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
        int                     status = -EINVAL;
        int                     offset;
 
+       if (!desc) {
+               pr_warn("%s: invalid GPIO\n", __func__);
+               return -EINVAL;
+       }
+
        spin_lock_irqsave(&gpio_lock, flags);
 
-       if (!desc)
-               goto fail;
        chip = desc->chip;
        if (!chip || !chip->set || !chip->set_debounce)
                goto fail;
@@ -1776,13 +1789,9 @@ static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
 
 fail:
        spin_unlock_irqrestore(&gpio_lock, flags);
-       if (status) {
-               int gpio = -1;
-               if (desc)
-                       gpio = desc_to_gpio(desc);
-               pr_debug("%s: gpio-%d status %d\n",
-                       __func__, gpio, status);
-       }
+       if (status)
+               pr_debug("%s: gpio-%d status %d\n", __func__,
+                        desc_to_gpio(desc), status);
 
        return status;
 }
@@ -1824,12 +1833,14 @@ EXPORT_SYMBOL_GPL(gpio_set_debounce);
  * It returns the zero or nonzero value provided by the associated
  * gpio_chip.get() method; or zero if no such method is provided.
  */
-static int gpiod_get_value(struct gpio_desc *desc)
+static int gpiod_get_value(const struct gpio_desc *desc)
 {
        struct gpio_chip        *chip;
        int value;
        int offset;
 
+       if (!desc)
+               return 0;
        chip = desc->chip;
        offset = gpio_chip_hwgpio(desc);
        /* Should be using gpio_get_value_cansleep() */
@@ -1912,6 +1923,8 @@ static void gpiod_set_value(struct gpio_desc *desc, int value)
 {
        struct gpio_chip        *chip;
 
+       if (!desc)
+               return;
        chip = desc->chip;
        /* Should be using gpio_set_value_cansleep() */
        WARN_ON(chip->can_sleep);
@@ -1938,8 +1951,10 @@ EXPORT_SYMBOL_GPL(__gpio_set_value);
  * This is used directly or indirectly to implement gpio_cansleep().  It
  * returns nonzero if access reading or writing the GPIO value can sleep.
  */
-static int gpiod_cansleep(struct gpio_desc *desc)
+static int gpiod_cansleep(const struct gpio_desc *desc)
 {
+       if (!desc)
+               return 0;
        /* only call this on GPIOs that are valid! */
        return desc->chip->can_sleep;
 }
@@ -1959,11 +1974,13 @@ EXPORT_SYMBOL_GPL(__gpio_cansleep);
  * It returns the number of the IRQ signaled by this (input) GPIO,
  * or a negative errno.
  */
-static int gpiod_to_irq(struct gpio_desc *desc)
+static int gpiod_to_irq(const struct gpio_desc *desc)
 {
        struct gpio_chip        *chip;
        int                     offset;
 
+       if (!desc)
+               return -EINVAL;
        chip = desc->chip;
        offset = gpio_chip_hwgpio(desc);
        return chip->to_irq ? chip->to_irq(chip, offset) : -ENXIO;
@@ -1980,13 +1997,15 @@ EXPORT_SYMBOL_GPL(__gpio_to_irq);
  * Common examples include ones connected to I2C or SPI chips.
  */
 
-static int gpiod_get_value_cansleep(struct gpio_desc *desc)
+static int gpiod_get_value_cansleep(const struct gpio_desc *desc)
 {
        struct gpio_chip        *chip;
        int value;
        int offset;
 
        might_sleep_if(extra_checks);
+       if (!desc)
+               return 0;
        chip = desc->chip;
        offset = gpio_chip_hwgpio(desc);
        value = chip->get ? chip->get(chip, offset) : 0;
@@ -2005,6 +2024,8 @@ static void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
        struct gpio_chip        *chip;
 
        might_sleep_if(extra_checks);
+       if (!desc)
+               return;
        chip = desc->chip;
        trace_gpio_value(desc_to_gpio(desc), 0, value);
        if (test_bit(FLAG_OPEN_DRAIN,  &desc->flags))
index 017c67ea3f4c7681ef5fb0fbd66b7009b89ed14d..ead0a4fb7448643faa66cb8c3fceee3c2b62d9fd 100644 (file)
@@ -294,13 +294,13 @@ int st5481_setup_usb(struct st5481_adapter *adapter)
        // Allocate URBs and buffers for interrupt endpoint
        urb = usb_alloc_urb(0, GFP_KERNEL);
        if (!urb) {
-               return -ENOMEM;
+               goto err1;
        }
        intr->urb = urb;
 
        buf = kmalloc(INT_PKT_SIZE, GFP_KERNEL);
        if (!buf) {
-               return -ENOMEM;
+               goto err2;
        }
 
        endpoint = &altsetting->endpoint[EP_INT-1];
@@ -313,6 +313,14 @@ int st5481_setup_usb(struct st5481_adapter *adapter)
                         endpoint->desc.bInterval);
 
        return 0;
+err2:
+       usb_free_urb(intr->urb);
+       intr->urb = NULL;
+err1:
+       usb_free_urb(ctrl->urb);
+       ctrl->urb = NULL;
+
+       return -ENOMEM;
 }
 
 /*
index e30b490055aa8ce3ef3edf2229b6cd2724bf794a..4d8d90b4fe7812ea5169619d3945b0722d46f1ec 100644 (file)
@@ -154,17 +154,6 @@ config MD_RAID456
 
          If unsure, say Y.
 
-config MULTICORE_RAID456
-       bool "RAID-4/RAID-5/RAID-6 Multicore processing (EXPERIMENTAL)"
-       depends on MD_RAID456
-       depends on SMP
-       depends on EXPERIMENTAL
-       ---help---
-         Enable the raid456 module to dispatch per-stripe raid operations to a
-         thread pool.
-
-         If unsure, say N.
-
 config MD_MULTIPATH
        tristate "Multipath I/O support"
        depends on BLK_DEV_MD
index 9a01d1e4c78302a0fef677e2c516f2bf3daeca70..311e3d35b272e4ae30fa32b3bca9f567394896ef 100644 (file)
@@ -91,15 +91,44 @@ static struct raid_type {
        {"raid6_nc", "RAID6 (N continue)",              2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}
 };
 
+static char *raid10_md_layout_to_format(int layout)
+{
+       /*
+        * Bit 16 and 17 stand for "offset" and "use_far_sets"
+        * Refer to MD's raid10.c for details
+        */
+       if ((layout & 0x10000) && (layout & 0x20000))
+               return "offset";
+
+       if ((layout & 0xFF) > 1)
+               return "near";
+
+       return "far";
+}
+
 static unsigned raid10_md_layout_to_copies(int layout)
 {
-       return layout & 0xFF;
+       if ((layout & 0xFF) > 1)
+               return layout & 0xFF;
+       return (layout >> 8) & 0xFF;
 }
 
 static int raid10_format_to_md_layout(char *format, unsigned copies)
 {
-       /* 1 "far" copy, and 'copies' "near" copies */
-       return (1 << 8) | (copies & 0xFF);
+       unsigned n = 1, f = 1;
+
+       if (!strcmp("near", format))
+               n = copies;
+       else
+               f = copies;
+
+       if (!strcmp("offset", format))
+               return 0x30000 | (f << 8) | n;
+
+       if (!strcmp("far", format))
+               return 0x20000 | (f << 8) | n;
+
+       return (f << 8) | n;
 }
 
 static struct raid_type *get_raid_type(char *name)
@@ -352,6 +381,7 @@ static int validate_raid_redundancy(struct raid_set *rs)
 {
        unsigned i, rebuild_cnt = 0;
        unsigned rebuilds_per_group, copies, d;
+       unsigned group_size, last_group_start;
 
        for (i = 0; i < rs->md.raid_disks; i++)
                if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
@@ -379,9 +409,6 @@ static int validate_raid_redundancy(struct raid_set *rs)
                 * as long as the failed devices occur in different mirror
                 * groups (i.e. different stripes).
                 *
-                * Right now, we only allow for "near" copies.  When other
-                * formats are added, we will have to check those too.
-                *
                 * When checking "near" format, make sure no adjacent devices
                 * have failed beyond what can be handled.  In addition to the
                 * simple case where the number of devices is a multiple of the
@@ -391,14 +418,41 @@ static int validate_raid_redundancy(struct raid_set *rs)
                 *          A    A    B    B    C
                 *          C    D    D    E    E
                 */
-               for (i = 0; i < rs->md.raid_disks * copies; i++) {
-                       if (!(i % copies))
+               if (!strcmp("near", raid10_md_layout_to_format(rs->md.layout))) {
+                       for (i = 0; i < rs->md.raid_disks * copies; i++) {
+                               if (!(i % copies))
+                                       rebuilds_per_group = 0;
+                               d = i % rs->md.raid_disks;
+                               if ((!rs->dev[d].rdev.sb_page ||
+                                    !test_bit(In_sync, &rs->dev[d].rdev.flags)) &&
+                                   (++rebuilds_per_group >= copies))
+                                       goto too_many;
+                       }
+                       break;
+               }
+
+               /*
+                * When checking "far" and "offset" formats, we need to ensure
+                * that the device that holds its copy is not also dead or
+                * being rebuilt.  (Note that "far" and "offset" formats only
+                * support two copies right now.  These formats also only ever
+                * use the 'use_far_sets' variant.)
+                *
+                * This check is somewhat complicated by the need to account
+                * for arrays that are not a multiple of (far) copies.  This
+                * results in the need to treat the last (potentially larger)
+                * set differently.
+                */
+               group_size = (rs->md.raid_disks / copies);
+               last_group_start = (rs->md.raid_disks / group_size) - 1;
+               last_group_start *= group_size;
+               for (i = 0; i < rs->md.raid_disks; i++) {
+                       if (!(i % copies) && !(i > last_group_start))
                                rebuilds_per_group = 0;
-                       d = i % rs->md.raid_disks;
-                       if ((!rs->dev[d].rdev.sb_page ||
-                            !test_bit(In_sync, &rs->dev[d].rdev.flags)) &&
+                       if ((!rs->dev[i].rdev.sb_page ||
+                            !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
                            (++rebuilds_per_group >= copies))
-                               goto too_many;
+                                       goto too_many;
                }
                break;
        default:
@@ -433,7 +487,7 @@ too_many:
  *
  * RAID10-only options:
  *    [raid10_copies <# copies>]        Number of copies.  (Default: 2)
- *    [raid10_format <near>]            Layout algorithm.  (Default: near)
+ *    [raid10_format <near|far|offset>] Layout algorithm.  (Default: near)
  */
 static int parse_raid_params(struct raid_set *rs, char **argv,
                             unsigned num_raid_params)
@@ -520,7 +574,9 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
                                rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type";
                                return -EINVAL;
                        }
-                       if (strcmp("near", argv[i])) {
+                       if (strcmp("near", argv[i]) &&
+                           strcmp("far", argv[i]) &&
+                           strcmp("offset", argv[i])) {
                                rs->ti->error = "Invalid 'raid10_format' value given";
                                return -EINVAL;
                        }
@@ -644,6 +700,15 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
                        return -EINVAL;
                }
 
+               /*
+                * If the format is not "near", we only support
+                * two copies at the moment.
+                */
+               if (strcmp("near", raid10_format) && (raid10_copies > 2)) {
+                       rs->ti->error = "Too many copies for given RAID10 format.";
+                       return -EINVAL;
+               }
+
                /* (Len * #mirrors) / #devices */
                sectors_per_dev = rs->ti->len * raid10_copies;
                sector_div(sectors_per_dev, rs->md.raid_disks);
@@ -854,17 +919,30 @@ static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev)
        /*
         * Reshaping is not currently allowed
         */
-       if ((le32_to_cpu(sb->level) != mddev->level) ||
-           (le32_to_cpu(sb->layout) != mddev->layout) ||
-           (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors)) {
-               DMERR("Reshaping arrays not yet supported.");
+       if (le32_to_cpu(sb->level) != mddev->level) {
+               DMERR("Reshaping arrays not yet supported. (RAID level change)");
+               return -EINVAL;
+       }
+       if (le32_to_cpu(sb->layout) != mddev->layout) {
+               DMERR("Reshaping arrays not yet supported. (RAID layout change)");
+               DMERR("  0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
+               DMERR("  Old layout: %s w/ %d copies",
+                     raid10_md_layout_to_format(le32_to_cpu(sb->layout)),
+                     raid10_md_layout_to_copies(le32_to_cpu(sb->layout)));
+               DMERR("  New layout: %s w/ %d copies",
+                     raid10_md_layout_to_format(mddev->layout),
+                     raid10_md_layout_to_copies(mddev->layout));
+               return -EINVAL;
+       }
+       if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) {
+               DMERR("Reshaping arrays not yet supported. (stripe sectors change)");
                return -EINVAL;
        }
 
        /* We can only change the number of devices in RAID1 right now */
        if ((rs->raid_type->level != 1) &&
            (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
-               DMERR("Reshaping arrays not yet supported.");
+               DMERR("Reshaping arrays not yet supported. (device count change)");
                return -EINVAL;
        }
 
@@ -1329,7 +1407,8 @@ static void raid_status(struct dm_target *ti, status_type_t type,
                               raid10_md_layout_to_copies(rs->md.layout));
 
                if (rs->print_flags & DMPF_RAID10_FORMAT)
-                       DMEMIT(" raid10_format near");
+                       DMEMIT(" raid10_format %s",
+                              raid10_md_layout_to_format(rs->md.layout));
 
                DMEMIT(" %d", rs->md.raid_disks);
                for (i = 0; i < rs->md.raid_disks; i++) {
@@ -1418,6 +1497,10 @@ static struct target_type raid_target = {
 
 static int __init dm_raid_init(void)
 {
+       DMINFO("Loading target version %u.%u.%u",
+              raid_target.version[0],
+              raid_target.version[1],
+              raid_target.version[2]);
        return dm_register_target(&raid_target);
 }
 
index 3db3d1b271f7ef18650a227a8dd07dbc2b8fefbc..fcb878f88796c8ab884da058b36ea5f287fe2eab 100644 (file)
@@ -307,6 +307,10 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
                bio_io_error(bio);
                return;
        }
+       if (mddev->ro == 1 && unlikely(rw == WRITE)) {
+               bio_endio(bio, bio_sectors(bio) == 0 ? 0 : -EROFS);
+               return;
+       }
        smp_rmb(); /* Ensure implications of  'active' are visible */
        rcu_read_lock();
        if (mddev->suspended) {
@@ -2994,6 +2998,9 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
                } else if (!sectors)
                        sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
                                rdev->data_offset;
+               if (!my_mddev->pers->resize)
+                       /* Cannot change size for RAID0 or Linear etc */
+                       return -EINVAL;
        }
        if (sectors < my_mddev->dev_sectors)
                return -EINVAL; /* component must fit device */
@@ -6525,7 +6532,17 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
                        mddev->ro = 0;
                        sysfs_notify_dirent_safe(mddev->sysfs_state);
                        set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
-                       md_wakeup_thread(mddev->thread);
+                       /* mddev_unlock will wake thread */
+                       /* If a device failed while we were read-only, we
+                        * need to make sure the metadata is updated now.
+                        */
+                       if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
+                               mddev_unlock(mddev);
+                               wait_event(mddev->sb_wait,
+                                          !test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
+                                          !test_bit(MD_CHANGE_PENDING, &mddev->flags));
+                               mddev_lock(mddev);
+                       }
                } else {
                        err = -EROFS;
                        goto abort_unlock;
index 24b359717a7e8917a4955e36651e6eb4009dc393..0505452de8d6ee2b3533c7930d62df544636b0c3 100644 (file)
@@ -175,7 +175,13 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
                        rdev1->new_raid_disk = j;
                }
 
-               if (j < 0 || j >= mddev->raid_disks) {
+               if (j < 0) {
+                       printk(KERN_ERR
+                              "md/raid0:%s: remove inactive devices before converting to RAID0\n",
+                              mdname(mddev));
+                       goto abort;
+               }
+               if (j >= mddev->raid_disks) {
                        printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
                               "aborting!\n", mdname(mddev), j);
                        goto abort;
@@ -289,7 +295,7 @@ abort:
        kfree(conf->strip_zone);
        kfree(conf->devlist);
        kfree(conf);
-       *private_conf = NULL;
+       *private_conf = ERR_PTR(err);
        return err;
 }
 
@@ -411,7 +417,8 @@ static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks
                  "%s does not support generic reshape\n", __func__);
 
        rdev_for_each(rdev, mddev)
-               array_sectors += rdev->sectors;
+               array_sectors += (rdev->sectors &
+                                 ~(sector_t)(mddev->chunk_sectors-1));
 
        return array_sectors;
 }
index d5bddfc4010e4f77ea6d28a3f8fde5ff304bcc6e..fd86b372692db6d6bd5c6afdd498ae16be49f2c3 100644 (file)
@@ -967,6 +967,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
                bio_list_merge(&conf->pending_bio_list, &plug->pending);
                conf->pending_count += plug->pending_cnt;
                spin_unlock_irq(&conf->device_lock);
+               wake_up(&conf->wait_barrier);
                md_wakeup_thread(mddev->thread);
                kfree(plug);
                return;
@@ -1000,6 +1001,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
        const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
        const unsigned long do_discard = (bio->bi_rw
                                          & (REQ_DISCARD | REQ_SECURE));
+       const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
        struct md_rdev *blocked_rdev;
        struct blk_plug_cb *cb;
        struct raid1_plug_cb *plug = NULL;
@@ -1301,7 +1303,8 @@ read_again:
                                   conf->mirrors[i].rdev->data_offset);
                mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
                mbio->bi_end_io = raid1_end_write_request;
-               mbio->bi_rw = WRITE | do_flush_fua | do_sync | do_discard;
+               mbio->bi_rw =
+                       WRITE | do_flush_fua | do_sync | do_discard | do_same;
                mbio->bi_private = r1_bio;
 
                atomic_inc(&r1_bio->remaining);
@@ -2818,6 +2821,9 @@ static int run(struct mddev *mddev)
        if (IS_ERR(conf))
                return PTR_ERR(conf);
 
+       if (mddev->queue)
+               blk_queue_max_write_same_sectors(mddev->queue,
+                                                mddev->chunk_sectors);
        rdev_for_each(rdev, mddev) {
                if (!mddev->gendisk)
                        continue;
index 64d48249c03bf09f73580f4465b59192faa005ab..77b562d18a90c4d27d9a5c43e82518e174ed84c7 100644 (file)
  *    near_copies (stored in low byte of layout)
  *    far_copies (stored in second byte of layout)
  *    far_offset (stored in bit 16 of layout )
+ *    use_far_sets (stored in bit 17 of layout )
  *
- * The data to be stored is divided into chunks using chunksize.
- * Each device is divided into far_copies sections.
- * In each section, chunks are laid out in a style similar to raid0, but
- * near_copies copies of each chunk is stored (each on a different drive).
- * The starting device for each section is offset near_copies from the starting
- * device of the previous section.
- * Thus they are (near_copies*far_copies) of each chunk, and each is on a different
- * drive.
- * near_copies and far_copies must be at least one, and their product is at most
- * raid_disks.
+ * The data to be stored is divided into chunks using chunksize.  Each device
+ * is divided into far_copies sections.   In each section, chunks are laid out
+ * in a style similar to raid0, but near_copies copies of each chunk is stored
+ * (each on a different drive).  The starting device for each section is offset
+ * near_copies from the starting device of the previous section.  Thus there
+ * are (near_copies * far_copies) of each chunk, and each is on a different
+ * drive.  near_copies and far_copies must be at least one, and their product
+ * is at most raid_disks.
  *
  * If far_offset is true, then the far_copies are handled a bit differently.
- * The copies are still in different stripes, but instead of be very far apart
- * on disk, there are adjacent stripes.
+ * The copies are still in different stripes, but instead of being very far
+ * apart on disk, there are adjacent stripes.
+ *
+ * The far and offset algorithms are handled slightly differently if
+ * 'use_far_sets' is true.  In this case, the array's devices are grouped into
+ * sets that are (near_copies * far_copies) in size.  The far copied stripes
+ * are still shifted by 'near_copies' devices, but this shifting stays confined
+ * to the set rather than the entire array.  This is done to improve the number
+ * of device combinations that can fail without causing the array to fail.
+ * Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk
+ * on a device):
+ *    A B C D    A B C D E
+ *      ...         ...
+ *    D A B C    E A B C D
+ * Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s):
+ *    [A B] [C D]    [A B] [C D E]
+ *    |...| |...|    |...| | ... |
+ *    [B A] [D C]    [B A] [E C D]
  */
 
 /*
@@ -535,6 +550,13 @@ static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
        sector_t stripe;
        int dev;
        int slot = 0;
+       int last_far_set_start, last_far_set_size;
+
+       last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
+       last_far_set_start *= geo->far_set_size;
+
+       last_far_set_size = geo->far_set_size;
+       last_far_set_size += (geo->raid_disks % geo->far_set_size);
 
        /* now calculate first sector/dev */
        chunk = r10bio->sector >> geo->chunk_shift;
@@ -551,15 +573,25 @@ static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
        /* and calculate all the others */
        for (n = 0; n < geo->near_copies; n++) {
                int d = dev;
+               int set;
                sector_t s = sector;
-               r10bio->devs[slot].addr = sector;
                r10bio->devs[slot].devnum = d;
+               r10bio->devs[slot].addr = s;
                slot++;
 
                for (f = 1; f < geo->far_copies; f++) {
+                       set = d / geo->far_set_size;
                        d += geo->near_copies;
-                       if (d >= geo->raid_disks)
-                               d -= geo->raid_disks;
+
+                       if ((geo->raid_disks % geo->far_set_size) &&
+                           (d > last_far_set_start)) {
+                               d -= last_far_set_start;
+                               d %= last_far_set_size;
+                               d += last_far_set_start;
+                       } else {
+                               d %= geo->far_set_size;
+                               d += geo->far_set_size * set;
+                       }
                        s += geo->stride;
                        r10bio->devs[slot].devnum = d;
                        r10bio->devs[slot].addr = s;
@@ -595,6 +627,20 @@ static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
         * or recovery, so reshape isn't happening
         */
        struct geom *geo = &conf->geo;
+       int far_set_start = (dev / geo->far_set_size) * geo->far_set_size;
+       int far_set_size = geo->far_set_size;
+       int last_far_set_start;
+
+       if (geo->raid_disks % geo->far_set_size) {
+               last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
+               last_far_set_start *= geo->far_set_size;
+
+               if (dev >= last_far_set_start) {
+                       far_set_size = geo->far_set_size;
+                       far_set_size += (geo->raid_disks % geo->far_set_size);
+                       far_set_start = last_far_set_start;
+               }
+       }
 
        offset = sector & geo->chunk_mask;
        if (geo->far_offset) {
@@ -602,13 +648,13 @@ static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
                chunk = sector >> geo->chunk_shift;
                fc = sector_div(chunk, geo->far_copies);
                dev -= fc * geo->near_copies;
-               if (dev < 0)
-                       dev += geo->raid_disks;
+               if (dev < far_set_start)
+                       dev += far_set_size;
        } else {
                while (sector >= geo->stride) {
                        sector -= geo->stride;
-                       if (dev < geo->near_copies)
-                               dev += geo->raid_disks - geo->near_copies;
+                       if (dev < (geo->near_copies + far_set_start))
+                               dev += far_set_size - geo->near_copies;
                        else
                                dev -= geo->near_copies;
                }
@@ -1073,6 +1119,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
                bio_list_merge(&conf->pending_bio_list, &plug->pending);
                conf->pending_count += plug->pending_cnt;
                spin_unlock_irq(&conf->device_lock);
+               wake_up(&conf->wait_barrier);
                md_wakeup_thread(mddev->thread);
                kfree(plug);
                return;
@@ -1105,6 +1152,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
        const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
        const unsigned long do_discard = (bio->bi_rw
                                          & (REQ_DISCARD | REQ_SECURE));
+       const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
        unsigned long flags;
        struct md_rdev *blocked_rdev;
        struct blk_plug_cb *cb;
@@ -1460,7 +1508,8 @@ retry_write:
                                                              rdev));
                        mbio->bi_bdev = rdev->bdev;
                        mbio->bi_end_io = raid10_end_write_request;
-                       mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
+                       mbio->bi_rw =
+                               WRITE | do_sync | do_fua | do_discard | do_same;
                        mbio->bi_private = r10_bio;
 
                        atomic_inc(&r10_bio->remaining);
@@ -1502,7 +1551,8 @@ retry_write:
                                                   r10_bio, rdev));
                        mbio->bi_bdev = rdev->bdev;
                        mbio->bi_end_io = raid10_end_write_request;
-                       mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
+                       mbio->bi_rw =
+                               WRITE | do_sync | do_fua | do_discard | do_same;
                        mbio->bi_private = r10_bio;
 
                        atomic_inc(&r10_bio->remaining);
@@ -3436,7 +3486,7 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
                disks = mddev->raid_disks + mddev->delta_disks;
                break;
        }
-       if (layout >> 17)
+       if (layout >> 18)
                return -1;
        if (chunk < (PAGE_SIZE >> 9) ||
            !is_power_of_2(chunk))
@@ -3448,6 +3498,7 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
        geo->near_copies = nc;
        geo->far_copies = fc;
        geo->far_offset = fo;
+       geo->far_set_size = (layout & (1<<17)) ? disks / fc : disks;
        geo->chunk_mask = chunk - 1;
        geo->chunk_shift = ffz(~chunk);
        return nc*fc;
@@ -3569,6 +3620,8 @@ static int run(struct mddev *mddev)
        if (mddev->queue) {
                blk_queue_max_discard_sectors(mddev->queue,
                                              mddev->chunk_sectors);
+               blk_queue_max_write_same_sectors(mddev->queue,
+                                                mddev->chunk_sectors);
                blk_queue_io_min(mddev->queue, chunk_size);
                if (conf->geo.raid_disks % conf->geo.near_copies)
                        blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
index 1054cf602345250f059ef81fae8bf9ef330cccd1..157d69e83ff401972f395db9bb281faa8df75a65 100644 (file)
@@ -33,6 +33,11 @@ struct r10conf {
                                               * far_offset, in which case it is
                                               * 1 stripe.
                                               */
+               int             far_set_size; /* The number of devices in a set,
+                                              * where a 'set' are devices that
+                                              * contain far/offset copies of
+                                              * each other.
+                                              */
                int             chunk_shift; /* shift from chunks to sectors */
                sector_t        chunk_mask;
        } prev, geo;
index 5af2d270908178b2628a3db42c508417fc4f5579..3ee2912889e7110274acabc28df3c6664abcb0f7 100644 (file)
@@ -1403,7 +1403,7 @@ static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu
                           &sh->ops.zero_sum_result, percpu->spare_page, &submit);
 }
 
-static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
+static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
 {
        int overlap_clear = 0, i, disks = sh->disks;
        struct dma_async_tx_descriptor *tx = NULL;
@@ -1468,36 +1468,6 @@ static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
        put_cpu();
 }
 
-#ifdef CONFIG_MULTICORE_RAID456
-static void async_run_ops(void *param, async_cookie_t cookie)
-{
-       struct stripe_head *sh = param;
-       unsigned long ops_request = sh->ops.request;
-
-       clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state);
-       wake_up(&sh->ops.wait_for_ops);
-
-       __raid_run_ops(sh, ops_request);
-       release_stripe(sh);
-}
-
-static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
-{
-       /* since handle_stripe can be called outside of raid5d context
-        * we need to ensure sh->ops.request is de-staged before another
-        * request arrives
-        */
-       wait_event(sh->ops.wait_for_ops,
-                  !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state));
-       sh->ops.request = ops_request;
-
-       atomic_inc(&sh->count);
-       async_schedule(async_run_ops, sh);
-}
-#else
-#define raid_run_ops __raid_run_ops
-#endif
-
 static int grow_one_stripe(struct r5conf *conf)
 {
        struct stripe_head *sh;
@@ -1506,9 +1476,6 @@ static int grow_one_stripe(struct r5conf *conf)
                return 0;
 
        sh->raid_conf = conf;
-       #ifdef CONFIG_MULTICORE_RAID456
-       init_waitqueue_head(&sh->ops.wait_for_ops);
-       #endif
 
        spin_lock_init(&sh->stripe_lock);
 
@@ -1627,9 +1594,6 @@ static int resize_stripes(struct r5conf *conf, int newsize)
                        break;
 
                nsh->raid_conf = conf;
-               #ifdef CONFIG_MULTICORE_RAID456
-               init_waitqueue_head(&nsh->ops.wait_for_ops);
-               #endif
                spin_lock_init(&nsh->stripe_lock);
 
                list_add(&nsh->lru, &newstripes);
index 11d01d67b3f510d7e5d159784aee36f6c612cb12..7bd068a6056a3ef3c66bde04f108d1bbf5dcd926 100644 (file)
@@ -1629,7 +1629,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 
        /* If this is the first slave, then we need to set the master's hardware
         * address to be the same as the slave's. */
-       if (bond->dev_addr_from_first)
+       if (bond->slave_cnt == 0 && bond->dev_addr_from_first)
                bond_set_dev_addr(bond->dev, slave_dev);
 
        new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
index 639049d7e92de5bda8cd901a711f7cc4626d1302..da5f4397f87c2744e13d4cb3bf0c99fb85d4666a 100644 (file)
@@ -301,12 +301,16 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
                        bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
                                  ring->start);
                } else {
+                       /* Omit CRC. */
+                       len -= ETH_FCS_LEN;
+
                        new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
                        if (new_skb) {
                                skb_put(new_skb, len);
                                skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
                                                                 new_skb->data,
                                                                 len);
+                               skb_checksum_none_assert(skb);
                                new_skb->protocol =
                                        eth_type_trans(new_skb, bgmac->net_dev);
                                netif_receive_skb(new_skb);
index ecac04a3687c6b2967ccc3e8e1ab78bf2107e4e9..a923bc4d5a1f5540704423215a8f772bd3a8831a 100644 (file)
@@ -3142,7 +3142,7 @@ static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
                tsum = ~csum_fold(csum_add((__force __wsum) csum,
                                  csum_partial(t_header, -fix, 0)));
 
-       return bswab16(csum);
+       return bswab16(tsum);
 }
 
 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
index 9a674b14b403dc605bfb10c448ea8a175a2614ae..edfa67adf2f975d6b5d6dc382e4cf6d5617a2b3b 100644 (file)
@@ -281,6 +281,8 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                        cmd->lp_advertising |= ADVERTISED_2500baseX_Full;
                if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE)
                        cmd->lp_advertising |= ADVERTISED_10000baseT_Full;
+               if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE)
+                       cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full;
        }
 
        cmd->maxtxpkt = 0;
@@ -463,6 +465,10 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                                                ADVERTISED_10000baseKR_Full))
                                bp->link_params.speed_cap_mask[cfg_idx] |=
                                        PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
+
+                       if (cmd->advertising & ADVERTISED_20000baseKR2_Full)
+                               bp->link_params.speed_cap_mask[cfg_idx] |=
+                                       PORT_HW_CFG_SPEED_CAPABILITY_D0_20G;
                }
        } else { /* forced speed */
                /* advertise the requested speed and duplex if supported */
index 1663e0b6b5a01f0baee17e6584dc6b65cf07344c..31c5787970dbc342ad2ea4fc959d81baa187d356 100644 (file)
@@ -10422,6 +10422,28 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
                                         MDIO_PMA_DEVAD,
                                         MDIO_PMA_REG_8481_LED1_MASK,
                                         0x0);
+                       if (phy->type ==
+                           PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
+                               /* Disable MI_INT interrupt before setting LED4
+                                * source to constant off.
+                                */
+                               if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+                                          params->port*4) &
+                                   NIG_MASK_MI_INT) {
+                                       params->link_flags |=
+                                       LINK_FLAGS_INT_DISABLED;
+
+                                       bnx2x_bits_dis(
+                                               bp,
+                                               NIG_REG_MASK_INTERRUPT_PORT0 +
+                                               params->port*4,
+                                               NIG_MASK_MI_INT);
+                               }
+                               bnx2x_cl45_write(bp, phy,
+                                                MDIO_PMA_DEVAD,
+                                                MDIO_PMA_REG_8481_SIGNAL_MASK,
+                                                0x0);
+                       }
                }
                break;
        case LED_MODE_ON:
@@ -10468,6 +10490,28 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
                                         MDIO_PMA_DEVAD,
                                         MDIO_PMA_REG_8481_LED1_MASK,
                                         0x20);
+                       if (phy->type ==
+                           PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
+                               /* Disable MI_INT interrupt before setting LED4
+                                * source to constant on.
+                                */
+                               if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+                                          params->port*4) &
+                                   NIG_MASK_MI_INT) {
+                                       params->link_flags |=
+                                       LINK_FLAGS_INT_DISABLED;
+
+                                       bnx2x_bits_dis(
+                                               bp,
+                                               NIG_REG_MASK_INTERRUPT_PORT0 +
+                                               params->port*4,
+                                               NIG_MASK_MI_INT);
+                               }
+                               bnx2x_cl45_write(bp, phy,
+                                                MDIO_PMA_DEVAD,
+                                                MDIO_PMA_REG_8481_SIGNAL_MASK,
+                                                0x20);
+                       }
                }
                break;
 
@@ -10532,6 +10576,22 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
                                         MDIO_PMA_DEVAD,
                                         MDIO_PMA_REG_8481_LINK_SIGNAL,
                                         val);
+                       if (phy->type ==
+                           PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
+                               /* Restore LED4 source to external link,
+                                * and re-enable interrupts.
+                                */
+                               bnx2x_cl45_write(bp, phy,
+                                                MDIO_PMA_DEVAD,
+                                                MDIO_PMA_REG_8481_SIGNAL_MASK,
+                                                0x40);
+                               if (params->link_flags &
+                                   LINK_FLAGS_INT_DISABLED) {
+                                       bnx2x_link_int_enable(params);
+                                       params->link_flags &=
+                                               ~LINK_FLAGS_INT_DISABLED;
+                               }
+                       }
                }
                break;
        }
@@ -11791,6 +11851,8 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
                        phy->media_type = ETH_PHY_KR;
                        phy->flags |= FLAGS_WC_DUAL_MODE;
                        phy->supported &= (SUPPORTED_20000baseKR2_Full |
+                                          SUPPORTED_10000baseT_Full |
+                                          SUPPORTED_1000baseT_Full |
                                           SUPPORTED_Autoneg |
                                           SUPPORTED_FIBRE |
                                           SUPPORTED_Pause |
@@ -13437,7 +13499,7 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
                struct bnx2x_phy *phy = &params->phy[INT_PHY];
                bnx2x_set_aer_mmd(params, phy);
                if ((phy->supported & SUPPORTED_20000baseKR2_Full) &&
-                   (phy->speed_cap_mask & SPEED_20000))
+                   (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
                        bnx2x_check_kr2_wa(params, vars, phy);
                bnx2x_check_over_curr(params, vars);
                if (vars->rx_tx_asic_rst)
index d25c7d79787a1bacdfb0dc526bc9814fdfab8d2d..be5c195d03dd01addf2706c81896d1872b94d96c 100644 (file)
@@ -307,7 +307,8 @@ struct link_params {
        struct bnx2x *bp;
        u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
                                req_flow_ctrl is set to AUTO */
-       u16 rsrv1;
+       u16 link_flags;
+#define LINK_FLAGS_INT_DISABLED                (1<<0)
        u32 lfa_base;
 };
 
index 28ceb84141851e26e8d49f1175099235a512d54b..29aff55f2eea414bf8dec52d61bee70a034fc12c 100644 (file)
@@ -349,6 +349,7 @@ struct be_adapter {
        struct pci_dev *pdev;
        struct net_device *netdev;
 
+       u8 __iomem *csr;        /* CSR BAR used only for BE2/3 */
        u8 __iomem *db;         /* Door Bell */
 
        struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
index 071aea79d218f01eb166d910a310ef0f49efa470..3c9b4f12e3e516ff8775c208a3b138c9acd5c109 100644 (file)
@@ -473,19 +473,17 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
        return 0;
 }
 
-static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
+static u16 be_POST_stage_get(struct be_adapter *adapter)
 {
        u32 sem;
-       u32 reg = skyhawk_chip(adapter) ? SLIPORT_SEMAPHORE_OFFSET_SH :
-                                         SLIPORT_SEMAPHORE_OFFSET_BE;
 
-       pci_read_config_dword(adapter->pdev, reg, &sem);
-       *stage = sem & POST_STAGE_MASK;
-
-       if ((sem >> POST_ERR_SHIFT) & POST_ERR_MASK)
-               return -1;
+       if (BEx_chip(adapter))
+               sem  = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
        else
-               return 0;
+               pci_read_config_dword(adapter->pdev,
+                                     SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
+
+       return sem & POST_STAGE_MASK;
 }
 
 int lancer_wait_ready(struct be_adapter *adapter)
@@ -579,19 +577,17 @@ int be_fw_wait_ready(struct be_adapter *adapter)
        }
 
        do {
-               status = be_POST_stage_get(adapter, &stage);
-               if (status) {
-                       dev_err(dev, "POST error; stage=0x%x\n", stage);
-                       return -1;
-               } else if (stage != POST_STAGE_ARMFW_RDY) {
-                       if (msleep_interruptible(2000)) {
-                               dev_err(dev, "Waiting for POST aborted\n");
-                               return -EINTR;
-                       }
-                       timeout += 2;
-               } else {
+               stage = be_POST_stage_get(adapter);
+               if (stage == POST_STAGE_ARMFW_RDY)
                        return 0;
+
+               dev_info(dev, "Waiting for POST, %ds elapsed\n",
+                        timeout);
+               if (msleep_interruptible(2000)) {
+                       dev_err(dev, "Waiting for POST aborted\n");
+                       return -EINTR;
                }
+               timeout += 2;
        } while (timeout < 60);
 
        dev_err(dev, "POST timeout; stage=0x%x\n", stage);
index 541d4530d5bfadb2d038b3aa4e233a069cf9c94a..62dc220695f724999c8df269531274ba38ec0b4a 100644 (file)
@@ -32,8 +32,8 @@
 #define MPU_EP_CONTROL                 0
 
 /********** MPU semphore: used for SH & BE  *************/
-#define SLIPORT_SEMAPHORE_OFFSET_BE            0x7c
-#define SLIPORT_SEMAPHORE_OFFSET_SH            0x94
+#define SLIPORT_SEMAPHORE_OFFSET_BEx           0xac  /* CSR BAR offset */
+#define SLIPORT_SEMAPHORE_OFFSET_SH            0x94  /* PCI-CFG offset */
 #define POST_STAGE_MASK                                0x0000FFFF
 #define POST_ERR_MASK                          0x1
 #define POST_ERR_SHIFT                         31
index 3860888ac711a7fe7630cad2c06bb3819eb1f3a0..08e54f3d288bc9a2e23faa8bb954dcb4bd8cf584 100644 (file)
@@ -3688,6 +3688,8 @@ static void be_netdev_init(struct net_device *netdev)
 
 static void be_unmap_pci_bars(struct be_adapter *adapter)
 {
+       if (adapter->csr)
+               pci_iounmap(adapter->pdev, adapter->csr);
        if (adapter->db)
                pci_iounmap(adapter->pdev, adapter->db);
 }
@@ -3721,6 +3723,12 @@ static int be_map_pci_bars(struct be_adapter *adapter)
        adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
                                SLI_INTF_IF_TYPE_SHIFT;
 
+       if (BEx_chip(adapter) && be_physfn(adapter)) {
+               adapter->csr = pci_iomap(adapter->pdev, 2, 0);
+               if (adapter->csr == NULL)
+                       return -ENOMEM;
+       }
+
        addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
        if (addr == NULL)
                goto pci_map_err;
@@ -4329,6 +4337,8 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
        pci_restore_state(pdev);
 
        /* Check if card is ok and fw is ready */
+       dev_info(&adapter->pdev->dev,
+                "Waiting for FW to be ready after EEH reset\n");
        status = be_fw_wait_ready(adapter);
        if (status)
                return PCI_ERS_RESULT_DISCONNECT;
index fccc3bf2141d7a757f97ac307a9f3d5678a4b57c..069a155d16ed40c57da5e70a8088eef767d0690b 100644 (file)
@@ -246,14 +246,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        struct bufdesc *bdp;
        void *bufaddr;
        unsigned short  status;
-       unsigned long flags;
+       unsigned int index;
 
        if (!fep->link) {
                /* Link is down or autonegotiation is in progress. */
                return NETDEV_TX_BUSY;
        }
 
-       spin_lock_irqsave(&fep->hw_lock, flags);
        /* Fill in a Tx ring entry */
        bdp = fep->cur_tx;
 
@@ -264,7 +263,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                 * This should not happen, since ndev->tbusy should be set.
                 */
                printk("%s: tx queue full!.\n", ndev->name);
-               spin_unlock_irqrestore(&fep->hw_lock, flags);
                return NETDEV_TX_BUSY;
        }
 
@@ -280,13 +278,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
         * 4-byte boundaries. Use bounce buffers to copy data
         * and get it aligned. Ugh.
         */
+       if (fep->bufdesc_ex)
+               index = (struct bufdesc_ex *)bdp -
+                       (struct bufdesc_ex *)fep->tx_bd_base;
+       else
+               index = bdp - fep->tx_bd_base;
+
        if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
-               unsigned int index;
-               if (fep->bufdesc_ex)
-                       index = (struct bufdesc_ex *)bdp -
-                               (struct bufdesc_ex *)fep->tx_bd_base;
-               else
-                       index = bdp - fep->tx_bd_base;
                memcpy(fep->tx_bounce[index], skb->data, skb->len);
                bufaddr = fep->tx_bounce[index];
        }
@@ -300,10 +298,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                swap_buffer(bufaddr, skb->len);
 
        /* Save skb pointer */
-       fep->tx_skbuff[fep->skb_cur] = skb;
-
-       ndev->stats.tx_bytes += skb->len;
-       fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
+       fep->tx_skbuff[index] = skb;
 
        /* Push the data cache so the CPM does not get stale memory
         * data.
@@ -331,26 +326,22 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                        ebdp->cbd_esc = BD_ENET_TX_INT;
                }
        }
-       /* Trigger transmission start */
-       writel(0, fep->hwp + FEC_X_DES_ACTIVE);
-
        /* If this was the last BD in the ring, start at the beginning again. */
        if (status & BD_ENET_TX_WRAP)
                bdp = fep->tx_bd_base;
        else
                bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
 
-       if (bdp == fep->dirty_tx) {
-               fep->tx_full = 1;
+       fep->cur_tx = bdp;
+
+       if (fep->cur_tx == fep->dirty_tx)
                netif_stop_queue(ndev);
-       }
 
-       fep->cur_tx = bdp;
+       /* Trigger transmission start */
+       writel(0, fep->hwp + FEC_X_DES_ACTIVE);
 
        skb_tx_timestamp(skb);
 
-       spin_unlock_irqrestore(&fep->hw_lock, flags);
-
        return NETDEV_TX_OK;
 }
 
@@ -406,11 +397,8 @@ fec_restart(struct net_device *ndev, int duplex)
                writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
                        * RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
 
-       fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
        fep->cur_rx = fep->rx_bd_base;
 
-       /* Reset SKB transmit buffers. */
-       fep->skb_cur = fep->skb_dirty = 0;
        for (i = 0; i <= TX_RING_MOD_MASK; i++) {
                if (fep->tx_skbuff[i]) {
                        dev_kfree_skb_any(fep->tx_skbuff[i]);
@@ -573,20 +561,35 @@ fec_enet_tx(struct net_device *ndev)
        struct bufdesc *bdp;
        unsigned short status;
        struct  sk_buff *skb;
+       int     index = 0;
 
        fep = netdev_priv(ndev);
-       spin_lock(&fep->hw_lock);
        bdp = fep->dirty_tx;
 
+       /* get next bdp of dirty_tx */
+       if (bdp->cbd_sc & BD_ENET_TX_WRAP)
+               bdp = fep->tx_bd_base;
+       else
+               bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+
        while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
-               if (bdp == fep->cur_tx && fep->tx_full == 0)
+
+               /* current queue is empty */
+               if (bdp == fep->cur_tx)
                        break;
 
+               if (fep->bufdesc_ex)
+                       index = (struct bufdesc_ex *)bdp -
+                               (struct bufdesc_ex *)fep->tx_bd_base;
+               else
+                       index = bdp - fep->tx_bd_base;
+
                dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
                                FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
                bdp->cbd_bufaddr = 0;
 
-               skb = fep->tx_skbuff[fep->skb_dirty];
+               skb = fep->tx_skbuff[index];
+
                /* Check for errors. */
                if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
                                   BD_ENET_TX_RL | BD_ENET_TX_UN |
@@ -631,8 +634,9 @@ fec_enet_tx(struct net_device *ndev)
 
                /* Free the sk buffer associated with this last transmit */
                dev_kfree_skb_any(skb);
-               fep->tx_skbuff[fep->skb_dirty] = NULL;
-               fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;
+               fep->tx_skbuff[index] = NULL;
+
+               fep->dirty_tx = bdp;
 
                /* Update pointer to next buffer descriptor to be transmitted */
                if (status & BD_ENET_TX_WRAP)
@@ -642,14 +646,12 @@ fec_enet_tx(struct net_device *ndev)
 
                /* Since we have freed up a buffer, the ring is no longer full
                 */
-               if (fep->tx_full) {
-                       fep->tx_full = 0;
+               if (fep->dirty_tx != fep->cur_tx) {
                        if (netif_queue_stopped(ndev))
                                netif_wake_queue(ndev);
                }
        }
-       fep->dirty_tx = bdp;
-       spin_unlock(&fep->hw_lock);
+       return;
 }
 
 
@@ -816,7 +818,7 @@ fec_enet_interrupt(int irq, void *dev_id)
                int_events = readl(fep->hwp + FEC_IEVENT);
                writel(int_events, fep->hwp + FEC_IEVENT);
 
-               if (int_events & FEC_ENET_RXF) {
+               if (int_events & (FEC_ENET_RXF | FEC_ENET_TXF)) {
                        ret = IRQ_HANDLED;
 
                        /* Disable the RX interrupt */
@@ -827,15 +829,6 @@ fec_enet_interrupt(int irq, void *dev_id)
                        }
                }
 
-               /* Transmit OK, or non-fatal error. Update the buffer
-                * descriptors. FEC handles all errors, we just discover
-                * them as part of the transmit process.
-                */
-               if (int_events & FEC_ENET_TXF) {
-                       ret = IRQ_HANDLED;
-                       fec_enet_tx(ndev);
-               }
-
                if (int_events & FEC_ENET_MII) {
                        ret = IRQ_HANDLED;
                        complete(&fep->mdio_done);
@@ -851,6 +844,8 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
        int pkts = fec_enet_rx(ndev, budget);
        struct fec_enet_private *fep = netdev_priv(ndev);
 
+       fec_enet_tx(ndev);
+
        if (pkts < budget) {
                napi_complete(napi);
                writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
@@ -1646,6 +1641,7 @@ static int fec_enet_init(struct net_device *ndev)
 
        /* ...and the same for transmit */
        bdp = fep->tx_bd_base;
+       fep->cur_tx = bdp;
        for (i = 0; i < TX_RING_SIZE; i++) {
 
                /* Initialize the BD for every fragment in the page. */
@@ -1657,6 +1653,7 @@ static int fec_enet_init(struct net_device *ndev)
        /* Set the last buffer to wrap */
        bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
        bdp->cbd_sc |= BD_SC_WRAP;
+       fep->dirty_tx = bdp;
 
        fec_restart(ndev, 0);
 
index 01579b8e37c4f979ea668b28538be4e22e255baa..f5390071efd0622832686d753f501c49039906c2 100644 (file)
@@ -97,6 +97,13 @@ struct bufdesc {
        unsigned short cbd_sc;  /* Control and status info */
        unsigned long cbd_bufaddr;      /* Buffer address */
 };
+#else
+struct bufdesc {
+       unsigned short  cbd_sc;                 /* Control and status info */
+       unsigned short  cbd_datlen;             /* Data length */
+       unsigned long   cbd_bufaddr;            /* Buffer address */
+};
+#endif
 
 struct bufdesc_ex {
        struct bufdesc desc;
@@ -107,14 +114,6 @@ struct bufdesc_ex {
        unsigned short res0[4];
 };
 
-#else
-struct bufdesc {
-       unsigned short  cbd_sc;                 /* Control and status info */
-       unsigned short  cbd_datlen;             /* Data length */
-       unsigned long   cbd_bufaddr;            /* Buffer address */
-};
-#endif
-
 /*
  *     The following definitions courtesy of commproc.h, which where
  *     Copyright (c) 1997 Dan Malek (dmalek@jlc.net).
@@ -214,8 +213,6 @@ struct fec_enet_private {
        unsigned char *tx_bounce[TX_RING_SIZE];
        struct  sk_buff *tx_skbuff[TX_RING_SIZE];
        struct  sk_buff *rx_skbuff[RX_RING_SIZE];
-       ushort  skb_cur;
-       ushort  skb_dirty;
 
        /* CPM dual port RAM relative addresses */
        dma_addr_t      bd_dma;
@@ -227,7 +224,6 @@ struct fec_enet_private {
        /* The ring entries to be free()ed */
        struct bufdesc  *dirty_tx;
 
-       uint    tx_full;
        /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
        spinlock_t hw_lock;
 
index dff7bff8b8e0f1d5ee4e6ca4af510516b3d7fd49..121a865c7fbd12855b32b5dff2883332f3b9485b 100644 (file)
@@ -781,6 +781,59 @@ release:
        return ret_val;
 }
 
+/**
+ *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
+ *  @hw:   pointer to the HW structure
+ *  @link: link up bool flag
+ *
+ *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
+ *  preventing further DMA write requests.  Workaround the issue by disabling
+ *  the de-assertion of the clock request when in 1Gpbs mode.
+ **/
+static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
+{
+       u32 fextnvm6 = er32(FEXTNVM6);
+       s32 ret_val = 0;
+
+       if (link && (er32(STATUS) & E1000_STATUS_SPEED_1000)) {
+               u16 kmrn_reg;
+
+               ret_val = hw->phy.ops.acquire(hw);
+               if (ret_val)
+                       return ret_val;
+
+               ret_val =
+                   e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
+                                               &kmrn_reg);
+               if (ret_val)
+                       goto release;
+
+               ret_val =
+                   e1000e_write_kmrn_reg_locked(hw,
+                                                E1000_KMRNCTRLSTA_K1_CONFIG,
+                                                kmrn_reg &
+                                                ~E1000_KMRNCTRLSTA_K1_ENABLE);
+               if (ret_val)
+                       goto release;
+
+               usleep_range(10, 20);
+
+               ew32(FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
+
+               ret_val =
+                   e1000e_write_kmrn_reg_locked(hw,
+                                                E1000_KMRNCTRLSTA_K1_CONFIG,
+                                                kmrn_reg);
+release:
+               hw->phy.ops.release(hw);
+       } else {
+               /* clear FEXTNVM6 bit 8 on link down or 10/100 */
+               ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
+       }
+
+       return ret_val;
+}
+
 /**
  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
  *  @hw: pointer to the HW structure
@@ -818,6 +871,14 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
                        return ret_val;
        }
 
+       /* Work-around I218 hang issue */
+       if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
+           (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
+               ret_val = e1000_k1_workaround_lpt_lp(hw, link);
+               if (ret_val)
+                       return ret_val;
+       }
+
        /* Clear link partner's EEE ability */
        hw->dev_spec.ich8lan.eee_lp_ability = 0;
 
@@ -3954,8 +4015,16 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
 
        phy_ctrl = er32(PHY_CTRL);
        phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
+
        if (hw->phy.type == e1000_phy_i217) {
-               u16 phy_reg;
+               u16 phy_reg, device_id = hw->adapter->pdev->device;
+
+               if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
+                   (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
+                       u32 fextnvm6 = er32(FEXTNVM6);
+
+                       ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
+               }
 
                ret_val = hw->phy.ops.acquire(hw);
                if (ret_val)
index b6d3174d7d2d737ff599b2f3f17d6bb7bb1b7cc6..8bf4655c2e17f25a26b70bb6c0ad268034bf8c04 100644 (file)
@@ -92,6 +92,8 @@
 #define E1000_FEXTNVM4_BEACON_DURATION_8USEC   0x7
 #define E1000_FEXTNVM4_BEACON_DURATION_16USEC  0x3
 
+#define E1000_FEXTNVM6_REQ_PLL_CLK     0x00000100
+
 #define PCIE_ICH8_SNOOP_ALL    PCIE_NO_SNOOP_ALL
 
 #define E1000_ICH_RAR_ENTRIES  7
index 794fe14976667059b3cb33924f49dec84cde2aef..a7e6a3e37257b34f200ba01c3525fd3269964943 100644 (file)
@@ -42,6 +42,7 @@
 #define E1000_FEXTNVM  0x00028 /* Future Extended NVM - RW */
 #define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
 #define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
+#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
 #define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
 #define E1000_FCT      0x00030 /* Flow Control Type - RW */
 #define E1000_VET      0x00038 /* VLAN Ether Type - RW */
index 84e7e0909def4bb866411851b2a8ceecffeb1212..b64542acfa3449bda2ee5c793d83de6cda1f097c 100644 (file)
@@ -1361,11 +1361,16 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
        switch (hw->phy.type) {
        case e1000_phy_i210:
        case e1000_phy_m88:
-               if (hw->phy.id == I347AT4_E_PHY_ID ||
-                   hw->phy.id == M88E1112_E_PHY_ID)
+               switch (hw->phy.id) {
+               case I347AT4_E_PHY_ID:
+               case M88E1112_E_PHY_ID:
+               case I210_I_PHY_ID:
                        ret_val = igb_copper_link_setup_m88_gen2(hw);
-               else
+                       break;
+               default:
                        ret_val = igb_copper_link_setup_m88(hw);
+                       break;
+               }
                break;
        case e1000_phy_igp_3:
                ret_val = igb_copper_link_setup_igp(hw);
index d27edbc63923a2f2429ef85d3e7713c50f8e8469..25151401c2abe54dcec7fe1fe6f48791e0660a64 100644 (file)
@@ -447,7 +447,7 @@ struct igb_adapter {
 #endif
        struct i2c_algo_bit_data i2c_algo;
        struct i2c_adapter i2c_adap;
-       struct igb_i2c_client_list *i2c_clients;
+       struct i2c_client *i2c_client;
 };
 
 #define IGB_FLAG_HAS_MSI               (1 << 0)
index 0a9b073d0b033ea7e03f745e4983e048804daf1f..4623502054d5347b2723811fe1cdca0d1e93ae0e 100644 (file)
 #include <linux/pci.h>
 
 #ifdef CONFIG_IGB_HWMON
+struct i2c_board_info i350_sensor_info = {
+       I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)),
+};
+
 /* hwmon callback functions */
 static ssize_t igb_hwmon_show_location(struct device *dev,
                                         struct device_attribute *attr,
@@ -188,6 +192,7 @@ int igb_sysfs_init(struct igb_adapter *adapter)
        unsigned int i;
        int n_attrs;
        int rc = 0;
+       struct i2c_client *client = NULL;
 
        /* If this method isn't defined we don't support thermals */
        if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL)
@@ -198,6 +203,15 @@ int igb_sysfs_init(struct igb_adapter *adapter)
                if (rc)
                        goto exit;
 
+       /* init i2c_client */
+       client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
+       if (client == NULL) {
+               dev_info(&adapter->pdev->dev,
+                       "Failed to create new i2c device..\n");
+               goto exit;
+       }
+       adapter->i2c_client = client;
+
        /* Allocation space for max attributes
         * max num sensors * values (loc, temp, max, caution)
         */
index ed79a1c53b59b0c101f1e5a2c2363f67f1fe0b04..4dbd62968c7a18090a61029249c04f6ee3b1335e 100644 (file)
@@ -1923,10 +1923,6 @@ void igb_set_fw_version(struct igb_adapter *adapter)
        return;
 }
 
-static const struct i2c_board_info i350_sensor_info = {
-       I2C_BOARD_INFO("i350bb", 0Xf8),
-};
-
 /*  igb_init_i2c - Init I2C interface
  *  @adapter: pointer to adapter structure
  *
@@ -6227,13 +6223,6 @@ static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring,
        /* If we spanned a buffer we have a huge mess so test for it */
        BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)));
 
-       /* Guarantee this function can be used by verifying buffer sizes */
-       BUILD_BUG_ON(SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) < (NET_SKB_PAD +
-                                                       NET_IP_ALIGN +
-                                                       IGB_TS_HDR_LEN +
-                                                       ETH_FRAME_LEN +
-                                                       ETH_FCS_LEN));
-
        rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
        page = rx_buffer->page;
        prefetchw(page);
@@ -7724,67 +7713,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
        }
 }
 
-static DEFINE_SPINLOCK(i2c_clients_lock);
-
-/*  igb_get_i2c_client - returns matching client
- *  in adapters's client list.
- *  @adapter: adapter struct
- *  @dev_addr: device address of i2c needed.
- */
-static struct i2c_client *
-igb_get_i2c_client(struct igb_adapter *adapter, u8 dev_addr)
-{
-       ulong flags;
-       struct igb_i2c_client_list *client_list;
-       struct i2c_client *client = NULL;
-       struct i2c_board_info client_info = {
-               I2C_BOARD_INFO("igb", 0x00),
-       };
-
-       spin_lock_irqsave(&i2c_clients_lock, flags);
-       client_list = adapter->i2c_clients;
-
-       /* See if we already have an i2c_client */
-       while (client_list) {
-               if (client_list->client->addr == (dev_addr >> 1)) {
-                       client = client_list->client;
-                       goto exit;
-               } else {
-                       client_list = client_list->next;
-               }
-       }
-
-       /* no client_list found, create a new one */
-       client_list = kzalloc(sizeof(*client_list), GFP_ATOMIC);
-       if (client_list == NULL)
-               goto exit;
-
-       /* dev_addr passed to us is left-shifted by 1 bit
-        * i2c_new_device call expects it to be flush to the right.
-        */
-       client_info.addr = dev_addr >> 1;
-       client_info.platform_data = adapter;
-       client_list->client = i2c_new_device(&adapter->i2c_adap, &client_info);
-       if (client_list->client == NULL) {
-               dev_info(&adapter->pdev->dev,
-                       "Failed to create new i2c device..\n");
-               goto err_no_client;
-       }
-
-       /* insert new client at head of list */
-       client_list->next = adapter->i2c_clients;
-       adapter->i2c_clients = client_list;
-
-       client = client_list->client;
-       goto exit;
-
-err_no_client:
-       kfree(client_list);
-exit:
-       spin_unlock_irqrestore(&i2c_clients_lock, flags);
-       return client;
-}
-
 /*  igb_read_i2c_byte - Reads 8 bit word over I2C
  *  @hw: pointer to hardware structure
  *  @byte_offset: byte offset to read
@@ -7798,7 +7726,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
                                u8 dev_addr, u8 *data)
 {
        struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
-       struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr);
+       struct i2c_client *this_client = adapter->i2c_client;
        s32 status;
        u16 swfw_mask = 0;
 
@@ -7835,7 +7763,7 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
                                 u8 dev_addr, u8 data)
 {
        struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
-       struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr);
+       struct i2c_client *this_client = adapter->i2c_client;
        s32 status;
        u16 swfw_mask = E1000_SWFW_PHY0_SM;
 
index 8900398ba103929135278c1046e50126d627eabd..28fb50a1e9c350851b3844592bf85adea877f604 100644 (file)
@@ -4765,8 +4765,10 @@ static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
 
        RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
 
-       rtl_tx_performance_tweak(pdev,
-               (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
+       if (tp->dev->mtu <= ETH_DATA_LEN) {
+               rtl_tx_performance_tweak(pdev, (0x5 << MAX_READ_REQUEST_SHIFT) |
+                                        PCI_EXP_DEVCTL_NOSNOOP_EN);
+       }
 }
 
 static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
@@ -4789,7 +4791,8 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
 
        RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
 
-       rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+       if (tp->dev->mtu <= ETH_DATA_LEN)
+               rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 
        rtl_disable_clock_request(pdev);
 
@@ -4822,7 +4825,8 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
 
        RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
 
-       rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+       if (tp->dev->mtu <= ETH_DATA_LEN)
+               rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 
        RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
 }
@@ -4841,7 +4845,8 @@ static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
 
        RTL_W8(MaxTxPacketSize, TxPacketMax);
 
-       rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+       if (tp->dev->mtu <= ETH_DATA_LEN)
+               rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 
        RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
 }
@@ -4901,7 +4906,8 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp)
 
        RTL_W8(MaxTxPacketSize, TxPacketMax);
 
-       rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+       if (tp->dev->mtu <= ETH_DATA_LEN)
+               rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 
        RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
 }
@@ -4913,7 +4919,8 @@ static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
 
        rtl_csi_access_enable_1(tp);
 
-       rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+       if (tp->dev->mtu <= ETH_DATA_LEN)
+               rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 
        RTL_W8(MaxTxPacketSize, TxPacketMax);
 
@@ -4972,7 +4979,8 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
 
        rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
 
-       rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+       if (tp->dev->mtu <= ETH_DATA_LEN)
+               rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 
        RTL_W8(MaxTxPacketSize, TxPacketMax);
 
@@ -4998,7 +5006,8 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
 
        rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
 
-       rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+       if (tp->dev->mtu <= ETH_DATA_LEN)
+               rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 
        rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
index bf57b3cb16abdbc1e89e8aaa83c99a9c47888552..0bc00991d31010cf1ec4efd95ec95afba8aa5d53 100644 (file)
@@ -779,6 +779,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
                                                tx_queue->txd.entries);
        }
 
+       efx_device_detach_sync(efx);
        efx_stop_all(efx);
        efx_stop_interrupts(efx, true);
 
@@ -832,6 +833,7 @@ out:
 
        efx_start_interrupts(efx, true);
        efx_start_all(efx);
+       netif_device_attach(efx->net_dev);
        return rc;
 
 rollback:
@@ -1641,8 +1643,12 @@ static void efx_stop_all(struct efx_nic *efx)
        /* Flush efx_mac_work(), refill_workqueue, monitor_work */
        efx_flush_all(efx);
 
-       /* Stop the kernel transmit interface late, so the watchdog
-        * timer isn't ticking over the flush */
+       /* Stop the kernel transmit interface.  This is only valid if
+        * the device is stopped or detached; otherwise the watchdog
+        * may fire immediately.
+        */
+       WARN_ON(netif_running(efx->net_dev) &&
+               netif_device_present(efx->net_dev));
        netif_tx_disable(efx->net_dev);
 
        efx_stop_datapath(efx);
@@ -1963,16 +1969,18 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
        if (new_mtu > EFX_MAX_MTU)
                return -EINVAL;
 
-       efx_stop_all(efx);
-
        netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
 
+       efx_device_detach_sync(efx);
+       efx_stop_all(efx);
+
        mutex_lock(&efx->mac_lock);
        net_dev->mtu = new_mtu;
        efx->type->reconfigure_mac(efx);
        mutex_unlock(&efx->mac_lock);
 
        efx_start_all(efx);
+       netif_device_attach(efx->net_dev);
        return 0;
 }
 
index 50247dfe8f574d2a7184c8ea675b5ecabdcc37cc..d2f790df6dcbf6872084c49d74a8a63efb58f569 100644 (file)
@@ -171,9 +171,9 @@ static inline void efx_device_detach_sync(struct efx_nic *efx)
         * TX scheduler is stopped when we're done and before
         * netif_device_present() becomes false.
         */
-       netif_tx_lock(dev);
+       netif_tx_lock_bh(dev);
        netif_device_detach(dev);
-       netif_tx_unlock(dev);
+       netif_tx_unlock_bh(dev);
 }
 
 #endif /* EFX_EFX_H */
index 2d756c1d71425823100f572065e224a59ca16813..0a90abd2421b62540d7e2fb2888caa0a4a3abf3a 100644 (file)
@@ -210,6 +210,7 @@ struct efx_tx_queue {
  *     Will be %NULL if the buffer slot is currently free.
  * @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE.
  *     Will be %NULL if the buffer slot is currently free.
+ * @page_offset: Offset within page. Valid iff @flags & %EFX_RX_BUF_PAGE.
  * @len: Buffer length, in bytes.
  * @flags: Flags for buffer and packet state.
  */
@@ -219,7 +220,8 @@ struct efx_rx_buffer {
                struct sk_buff *skb;
                struct page *page;
        } u;
-       unsigned int len;
+       u16 page_offset;
+       u16 len;
        u16 flags;
 };
 #define EFX_RX_BUF_PAGE                0x0001
index d780a0d096b4c5b0b87036c6404089309592d43b..bb579a6128c8ce883ac7156ea24ac541d4901e38 100644 (file)
@@ -90,11 +90,7 @@ static unsigned int rx_refill_threshold;
 static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
                                             struct efx_rx_buffer *buf)
 {
-       /* Offset is always within one page, so we don't need to consider
-        * the page order.
-        */
-       return ((unsigned int) buf->dma_addr & (PAGE_SIZE - 1)) +
-               efx->type->rx_buffer_hash_size;
+       return buf->page_offset + efx->type->rx_buffer_hash_size;
 }
 static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
 {
@@ -187,6 +183,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
        struct efx_nic *efx = rx_queue->efx;
        struct efx_rx_buffer *rx_buf;
        struct page *page;
+       unsigned int page_offset;
        struct efx_rx_page_state *state;
        dma_addr_t dma_addr;
        unsigned index, count;
@@ -211,12 +208,14 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
                state->dma_addr = dma_addr;
 
                dma_addr += sizeof(struct efx_rx_page_state);
+               page_offset = sizeof(struct efx_rx_page_state);
 
        split:
                index = rx_queue->added_count & rx_queue->ptr_mask;
                rx_buf = efx_rx_buffer(rx_queue, index);
                rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
                rx_buf->u.page = page;
+               rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
                rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
                rx_buf->flags = EFX_RX_BUF_PAGE;
                ++rx_queue->added_count;
@@ -227,6 +226,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
                        /* Use the second half of the page */
                        get_page(page);
                        dma_addr += (PAGE_SIZE >> 1);
+                       page_offset += (PAGE_SIZE >> 1);
                        ++count;
                        goto split;
                }
@@ -236,7 +236,8 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
 }
 
 static void efx_unmap_rx_buffer(struct efx_nic *efx,
-                               struct efx_rx_buffer *rx_buf)
+                               struct efx_rx_buffer *rx_buf,
+                               unsigned int used_len)
 {
        if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
                struct efx_rx_page_state *state;
@@ -247,6 +248,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
                                       state->dma_addr,
                                       efx_rx_buf_size(efx),
                                       DMA_FROM_DEVICE);
+               } else if (used_len) {
+                       dma_sync_single_for_cpu(&efx->pci_dev->dev,
+                                               rx_buf->dma_addr, used_len,
+                                               DMA_FROM_DEVICE);
                }
        } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
                dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
@@ -269,7 +274,7 @@ static void efx_free_rx_buffer(struct efx_nic *efx,
 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
                               struct efx_rx_buffer *rx_buf)
 {
-       efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
+       efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);
        efx_free_rx_buffer(rx_queue->efx, rx_buf);
 }
 
@@ -535,10 +540,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
                goto out;
        }
 
-       /* Release card resources - assumes all RX buffers consumed in-order
-        * per RX queue
+       /* Release and/or sync DMA mapping - assumes all RX buffers
+        * consumed in-order per RX queue
         */
-       efx_unmap_rx_buffer(efx, rx_buf);
+       efx_unmap_rx_buffer(efx, rx_buf, len);
 
        /* Prefetch nice and early so data will (hopefully) be in cache by
         * the time we look at it.
index 7e93df6585e7fd7cce9db2ba3f9ed3187f2b699c..01ffbc48698298d1678b478599c1b46f2ee55ef9 100644 (file)
@@ -731,7 +731,7 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
 
        writel(vlan, &priv->host_port_regs->port_vlan);
 
-       for (i = 0; i < 2; i++)
+       for (i = 0; i < priv->data.slaves; i++)
                slave_write(priv->slaves + i, vlan, reg);
 
        cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port,
index 29934446436ac0e6dd7df8d80797a3b4351d77ac..abf7b6153d00b8527b997e4e947b48a63449af4b 100644 (file)
@@ -257,8 +257,7 @@ static struct phy_driver ksphy_driver[] = {
        .phy_id         = PHY_ID_KSZ9021,
        .phy_id_mask    = 0x000ffffe,
        .name           = "Micrel KSZ9021 Gigabit PHY",
-       .features       = (PHY_GBIT_FEATURES | SUPPORTED_Pause
-                               | SUPPORTED_Asym_Pause),
+       .features       = (PHY_GBIT_FEATURES | SUPPORTED_Pause),
        .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
        .config_init    = kszphy_config_init,
        .config_aneg    = genphy_config_aneg,
index 9930f999956172fa20ad5e5a34d3db5297fe9c12..3657b4a29124b57bc335417a1389f792aaf2ce79 100644 (file)
@@ -44,13 +44,13 @@ MODULE_LICENSE("GPL");
 
 void phy_device_free(struct phy_device *phydev)
 {
-       kfree(phydev);
+       put_device(&phydev->dev);
 }
 EXPORT_SYMBOL(phy_device_free);
 
 static void phy_device_release(struct device *dev)
 {
-       phy_device_free(to_phy_device(dev));
+       kfree(to_phy_device(dev));
 }
 
 static struct phy_driver genphy_driver;
@@ -201,6 +201,8 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
           there's no driver _already_ loaded. */
        request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phy_id));
 
+       device_initialize(&dev->dev);
+
        return dev;
 }
 EXPORT_SYMBOL(phy_device_create);
@@ -363,9 +365,9 @@ int phy_device_register(struct phy_device *phydev)
        /* Run all of the fixups for this PHY */
        phy_scan_fixups(phydev);
 
-       err = device_register(&phydev->dev);
+       err = device_add(&phydev->dev);
        if (err) {
-               pr_err("phy %d failed to register\n", phydev->addr);
+               pr_err("PHY %d failed to add\n", phydev->addr);
                goto out;
        }
 
index da92ed3797aa32e763c1496a5dce9cca4a273bd5..3b6e9b83342db08c28f318c38253d315ae60317c 100644 (file)
@@ -156,6 +156,24 @@ config USB_NET_AX8817X
          This driver creates an interface named "ethX", where X depends on
          what other networking devices you have in use.
 
+config USB_NET_AX88179_178A
+       tristate "ASIX AX88179/178A USB 3.0/2.0 to Gigabit Ethernet"
+       depends on USB_USBNET
+       select CRC32
+       select PHYLIB
+       default y
+       help
+         This option adds support for ASIX AX88179 based USB 3.0/2.0
+         to Gigabit Ethernet adapters.
+
+         This driver should work with at least the following devices:
+           * ASIX AX88179
+           * ASIX AX88178A
+           * Sitcomm LN-032
+
+         This driver creates an interface named "ethX", where X depends on
+         what other networking devices you have in use.
+
 config USB_NET_CDCETHER
        tristate "CDC Ethernet support (smart devices such as cable modems)"
        depends on USB_USBNET
index 478691326f37fc3303199a07dd9e83ae087615be..119b06c9aa167235571ccc8649e25191497e505d 100644 (file)
@@ -9,6 +9,7 @@ obj-$(CONFIG_USB_RTL8150)       += rtl8150.o
 obj-$(CONFIG_USB_HSO)          += hso.o
 obj-$(CONFIG_USB_NET_AX8817X)  += asix.o
 asix-y := asix_devices.o asix_common.o ax88172a.o
+obj-$(CONFIG_USB_NET_AX88179_178A)      += ax88179_178a.o
 obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o
 obj-$(CONFIG_USB_NET_CDC_EEM)  += cdc_eem.o
 obj-$(CONFIG_USB_NET_DM9601)   += dm9601.o
index 2205dbc8d32fc2bbcb74e5c82047913edc3fdd24..709753469099c032aad55c3b98da120a0f930b2e 100644 (file)
@@ -924,6 +924,29 @@ static const struct driver_info ax88178_info = {
        .tx_fixup = asix_tx_fixup,
 };
 
+/*
+ * USBLINK 20F9 "USB 2.0 LAN" USB ethernet adapter, typically found in
+ * no-name packaging.
+ * USB device strings are:
+ *   1: Manufacturer: USBLINK
+ *   2: Product: HG20F9 USB2.0
+ *   3: Serial: 000003
+ * Appears to be compatible with Asix 88772B.
+ */
+static const struct driver_info hg20f9_info = {
+       .description = "HG20F9 USB 2.0 Ethernet",
+       .bind = ax88772_bind,
+       .unbind = ax88772_unbind,
+       .status = asix_status,
+       .link_reset = ax88772_link_reset,
+       .reset = ax88772_reset,
+       .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
+                FLAG_MULTI_PACKET,
+       .rx_fixup = asix_rx_fixup_common,
+       .tx_fixup = asix_tx_fixup,
+       .data = FLAG_EEPROM_MAC,
+};
+
 extern const struct driver_info ax88172a_info;
 
 static const struct usb_device_id      products [] = {
@@ -1063,6 +1086,14 @@ static const struct usb_device_id        products [] = {
        /* ASIX 88172a demo board */
        USB_DEVICE(0x0b95, 0x172a),
        .driver_info = (unsigned long) &ax88172a_info,
+}, {
+       /*
+        * USBLINK HG20F9 "USB 2.0 LAN"
+        * Appears to have gazumped Linksys's manufacturer ID but
+        * doesn't (yet) conflict with any known Linksys product.
+        */
+       USB_DEVICE(0x066b, 0x20f9),
+       .driver_info = (unsigned long) &hg20f9_info,
 },
        { },            // END
 };
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
new file mode 100644 (file)
index 0000000..71c27d8
--- /dev/null
@@ -0,0 +1,1448 @@
+/*
+ * ASIX AX88179/178A USB 3.0/2.0 to Gigabit Ethernet Devices
+ *
+ * Copyright (C) 2011-2013 ASIX
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <linux/usb/usbnet.h>
+
+#define AX88179_PHY_ID                         0x03
+#define AX_EEPROM_LEN                          0x100
+#define AX88179_EEPROM_MAGIC                   0x17900b95
+#define AX_MCAST_FLTSIZE                       8
+#define AX_MAX_MCAST                           64
+#define AX_INT_PPLS_LINK                       ((u32)BIT(16))
+#define AX_RXHDR_L4_TYPE_MASK                  0x1c
+#define AX_RXHDR_L4_TYPE_UDP                   4
+#define AX_RXHDR_L4_TYPE_TCP                   16
+#define AX_RXHDR_L3CSUM_ERR                    2
+#define AX_RXHDR_L4CSUM_ERR                    1
+#define AX_RXHDR_CRC_ERR                       ((u32)BIT(31))
+#define AX_RXHDR_DROP_ERR                      ((u32)BIT(30))
+#define AX_ACCESS_MAC                          0x01
+#define AX_ACCESS_PHY                          0x02
+#define AX_ACCESS_EEPROM                       0x04
+#define AX_ACCESS_EFUS                         0x05
+#define AX_PAUSE_WATERLVL_HIGH                 0x54
+#define AX_PAUSE_WATERLVL_LOW                  0x55
+
+#define PHYSICAL_LINK_STATUS                   0x02
+       #define AX_USB_SS               0x04
+       #define AX_USB_HS               0x02
+
+#define GENERAL_STATUS                         0x03
+/* Check AX88179 version. UA1:Bit2 = 0,  UA2:Bit2 = 1 */
+       #define AX_SECLD                0x04
+
+#define AX_SROM_ADDR                           0x07
+#define AX_SROM_CMD                            0x0a
+       #define EEP_RD                  0x04
+       #define EEP_BUSY                0x10
+
+#define AX_SROM_DATA_LOW                       0x08
+#define AX_SROM_DATA_HIGH                      0x09
+
+#define AX_RX_CTL                              0x0b
+       #define AX_RX_CTL_DROPCRCERR    0x0100
+       #define AX_RX_CTL_IPE           0x0200
+       #define AX_RX_CTL_START         0x0080
+       #define AX_RX_CTL_AP            0x0020
+       #define AX_RX_CTL_AM            0x0010
+       #define AX_RX_CTL_AB            0x0008
+       #define AX_RX_CTL_AMALL         0x0002
+       #define AX_RX_CTL_PRO           0x0001
+       #define AX_RX_CTL_STOP          0x0000
+
+#define AX_NODE_ID                             0x10
+#define AX_MULFLTARY                           0x16
+
+#define AX_MEDIUM_STATUS_MODE                  0x22
+       #define AX_MEDIUM_GIGAMODE      0x01
+       #define AX_MEDIUM_FULL_DUPLEX   0x02
+       #define AX_MEDIUM_ALWAYS_ONE    0x04
+       #define AX_MEDIUM_EN_125MHZ     0x08
+       #define AX_MEDIUM_RXFLOW_CTRLEN 0x10
+       #define AX_MEDIUM_TXFLOW_CTRLEN 0x20
+       #define AX_MEDIUM_RECEIVE_EN    0x100
+       #define AX_MEDIUM_PS            0x200
+       #define AX_MEDIUM_JUMBO_EN      0x8040
+
+#define AX_MONITOR_MOD                         0x24
+       #define AX_MONITOR_MODE_RWLC    0x02
+       #define AX_MONITOR_MODE_RWMP    0x04
+       #define AX_MONITOR_MODE_PMEPOL  0x20
+       #define AX_MONITOR_MODE_PMETYPE 0x40
+
+#define AX_GPIO_CTRL                           0x25
+       #define AX_GPIO_CTRL_GPIO3EN    0x80
+       #define AX_GPIO_CTRL_GPIO2EN    0x40
+       #define AX_GPIO_CTRL_GPIO1EN    0x20
+
+#define AX_PHYPWR_RSTCTL                       0x26
+       #define AX_PHYPWR_RSTCTL_BZ     0x0010
+       #define AX_PHYPWR_RSTCTL_IPRL   0x0020
+       #define AX_PHYPWR_RSTCTL_AT     0x1000
+
+#define AX_RX_BULKIN_QCTRL                     0x2e
+#define AX_CLK_SELECT                          0x33
+       #define AX_CLK_SELECT_BCS       0x01
+       #define AX_CLK_SELECT_ACS       0x02
+       #define AX_CLK_SELECT_ULR       0x08
+
+#define AX_RXCOE_CTL                           0x34
+       #define AX_RXCOE_IP             0x01
+       #define AX_RXCOE_TCP            0x02
+       #define AX_RXCOE_UDP            0x04
+       #define AX_RXCOE_TCPV6          0x20
+       #define AX_RXCOE_UDPV6          0x40
+
+#define AX_TXCOE_CTL                           0x35
+       #define AX_TXCOE_IP             0x01
+       #define AX_TXCOE_TCP            0x02
+       #define AX_TXCOE_UDP            0x04
+       #define AX_TXCOE_TCPV6          0x20
+       #define AX_TXCOE_UDPV6          0x40
+
+#define AX_LEDCTRL                             0x73
+
+#define GMII_PHY_PHYSR                         0x11
+       #define GMII_PHY_PHYSR_SMASK    0xc000
+       #define GMII_PHY_PHYSR_GIGA     0x8000
+       #define GMII_PHY_PHYSR_100      0x4000
+       #define GMII_PHY_PHYSR_FULL     0x2000
+       #define GMII_PHY_PHYSR_LINK     0x400
+
+#define GMII_LED_ACT                           0x1a
+       #define GMII_LED_ACTIVE_MASK    0xff8f
+       #define GMII_LED0_ACTIVE        BIT(4)
+       #define GMII_LED1_ACTIVE        BIT(5)
+       #define GMII_LED2_ACTIVE        BIT(6)
+
+#define GMII_LED_LINK                          0x1c
+       #define GMII_LED_LINK_MASK      0xf888
+       #define GMII_LED0_LINK_10       BIT(0)
+       #define GMII_LED0_LINK_100      BIT(1)
+       #define GMII_LED0_LINK_1000     BIT(2)
+       #define GMII_LED1_LINK_10       BIT(4)
+       #define GMII_LED1_LINK_100      BIT(5)
+       #define GMII_LED1_LINK_1000     BIT(6)
+       #define GMII_LED2_LINK_10       BIT(8)
+       #define GMII_LED2_LINK_100      BIT(9)
+       #define GMII_LED2_LINK_1000     BIT(10)
+       #define LED0_ACTIVE             BIT(0)
+       #define LED0_LINK_10            BIT(1)
+       #define LED0_LINK_100           BIT(2)
+       #define LED0_LINK_1000          BIT(3)
+       #define LED0_FD                 BIT(4)
+       #define LED0_USB3_MASK          0x001f
+       #define LED1_ACTIVE             BIT(5)
+       #define LED1_LINK_10            BIT(6)
+       #define LED1_LINK_100           BIT(7)
+       #define LED1_LINK_1000          BIT(8)
+       #define LED1_FD                 BIT(9)
+       #define LED1_USB3_MASK          0x03e0
+       #define LED2_ACTIVE             BIT(10)
+       #define LED2_LINK_1000          BIT(13)
+       #define LED2_LINK_100           BIT(12)
+       #define LED2_LINK_10            BIT(11)
+       #define LED2_FD                 BIT(14)
+       #define LED_VALID               BIT(15)
+       #define LED2_USB3_MASK          0x7c00
+
+#define GMII_PHYPAGE                           0x1e
+#define GMII_PHY_PAGE_SELECT                   0x1f
+       #define GMII_PHY_PGSEL_EXT      0x0007
+       #define GMII_PHY_PGSEL_PAGE0    0x0000
+
+struct ax88179_data {
+       u16 rxctl;
+       u16 reserved;
+};
+
+struct ax88179_int_data {
+       __le32 intdata1;
+       __le32 intdata2;
+};
+
+static const struct {
+       unsigned char ctrl, timer_l, timer_h, size, ifg;
+} AX88179_BULKIN_SIZE[] =      {
+       {7, 0x4f, 0,    0x12, 0xff},
+       {7, 0x20, 3,    0x16, 0xff},
+       {7, 0xae, 7,    0x18, 0xff},
+       {7, 0xcc, 0x4c, 0x18, 8},
+};
+
+static int __ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+                             u16 size, void *data, int in_pm)
+{
+       int ret;
+       int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
+
+       BUG_ON(!dev);
+
+       if (!in_pm)
+               fn = usbnet_read_cmd;
+       else
+               fn = usbnet_read_cmd_nopm;
+
+       ret = fn(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+                value, index, data, size);
+
+       if (unlikely(ret < 0))
+               netdev_warn(dev->net, "Failed to read reg index 0x%04x: %d\n",
+                           index, ret);
+
+       return ret;
+}
+
+static int __ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+                              u16 size, void *data, int in_pm)
+{
+       int ret;
+       int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
+
+       BUG_ON(!dev);
+
+       if (!in_pm)
+               fn = usbnet_write_cmd;
+       else
+               fn = usbnet_write_cmd_nopm;
+
+       ret = fn(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+                value, index, data, size);
+
+       if (unlikely(ret < 0))
+               netdev_warn(dev->net, "Failed to write reg index 0x%04x: %d\n",
+                           index, ret);
+
+       return ret;
+}
+
+static void ax88179_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value,
+                                   u16 index, u16 size, void *data)
+{
+       u16 buf;
+
+       if (2 == size) {
+               buf = *((u16 *)data);
+               cpu_to_le16s(&buf);
+               usbnet_write_cmd_async(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR |
+                                      USB_RECIP_DEVICE, value, index, &buf,
+                                      size);
+       } else {
+               usbnet_write_cmd_async(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR |
+                                      USB_RECIP_DEVICE, value, index, data,
+                                      size);
+       }
+}
+
+static int ax88179_read_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
+                                u16 index, u16 size, void *data)
+{
+       int ret;
+
+       if (2 == size) {
+               u16 buf;
+               ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 1);
+               le16_to_cpus(&buf);
+               *((u16 *)data) = buf;
+       } else if (4 == size) {
+               u32 buf;
+               ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 1);
+               le32_to_cpus(&buf);
+               *((u32 *)data) = buf;
+       } else {
+               ret = __ax88179_read_cmd(dev, cmd, value, index, size, data, 1);
+       }
+
+       return ret;
+}
+
+static int ax88179_write_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
+                                 u16 index, u16 size, void *data)
+{
+       int ret;
+
+       if (2 == size) {
+               u16 buf;
+               buf = *((u16 *)data);
+               cpu_to_le16s(&buf);
+               ret = __ax88179_write_cmd(dev, cmd, value, index,
+                                         size, &buf, 1);
+       } else {
+               ret = __ax88179_write_cmd(dev, cmd, value, index,
+                                         size, data, 1);
+       }
+
+       return ret;
+}
+
+static int ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+                           u16 size, void *data)
+{
+       int ret;
+
+       if (2 == size) {
+               u16 buf;
+               ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0);
+               le16_to_cpus(&buf);
+               *((u16 *)data) = buf;
+       } else if (4 == size) {
+               u32 buf;
+               ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0);
+               le32_to_cpus(&buf);
+               *((u32 *)data) = buf;
+       } else {
+               ret = __ax88179_read_cmd(dev, cmd, value, index, size, data, 0);
+       }
+
+       return ret;
+}
+
+static int ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+                            u16 size, void *data)
+{
+       int ret;
+
+       if (2 == size) {
+               u16 buf;
+               buf = *((u16 *)data);
+               cpu_to_le16s(&buf);
+               ret = __ax88179_write_cmd(dev, cmd, value, index,
+                                         size, &buf, 0);
+       } else {
+               ret = __ax88179_write_cmd(dev, cmd, value, index,
+                                         size, data, 0);
+       }
+
+       return ret;
+}
+
+static void ax88179_status(struct usbnet *dev, struct urb *urb)
+{
+       struct ax88179_int_data *event;
+       u32 link;
+
+       if (urb->actual_length < 8)
+               return;
+
+       event = urb->transfer_buffer;
+       le32_to_cpus((void *)&event->intdata1);
+
+       link = (((__force u32)event->intdata1) & AX_INT_PPLS_LINK) >> 16;
+
+       if (netif_carrier_ok(dev->net) != link) {
+               if (link)
+                       usbnet_defer_kevent(dev, EVENT_LINK_RESET);
+               else
+                       netif_carrier_off(dev->net);
+
+               netdev_info(dev->net, "ax88179 - Link status is: %d\n", link);
+       }
+}
+
+static int ax88179_mdio_read(struct net_device *netdev, int phy_id, int loc)
+{
+       struct usbnet *dev = netdev_priv(netdev);
+       u16 res;
+
+       ax88179_read_cmd(dev, AX_ACCESS_PHY, phy_id, (__u16)loc, 2, &res);
+       return res;
+}
+
+static void ax88179_mdio_write(struct net_device *netdev, int phy_id, int loc,
+                              int val)
+{
+       struct usbnet *dev = netdev_priv(netdev);
+       u16 res = (u16) val;
+
+       ax88179_write_cmd(dev, AX_ACCESS_PHY, phy_id, (__u16)loc, 2, &res);
+}
+
+static int ax88179_suspend(struct usb_interface *intf, pm_message_t message)
+{
+       struct usbnet *dev = usb_get_intfdata(intf);
+       u16 tmp16;
+       u8 tmp8;
+
+       usbnet_suspend(intf, message);
+
+       /* Disable RX path */
+       ax88179_read_cmd_nopm(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+                             2, 2, &tmp16);
+       tmp16 &= ~AX_MEDIUM_RECEIVE_EN;
+       ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+                              2, 2, &tmp16);
+
+       /* Force bulk-in zero length */
+       ax88179_read_cmd_nopm(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL,
+                             2, 2, &tmp16);
+
+       tmp16 |= AX_PHYPWR_RSTCTL_BZ | AX_PHYPWR_RSTCTL_IPRL;
+       ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL,
+                              2, 2, &tmp16);
+
+       /* change clock */
+       tmp8 = 0;
+       ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
+
+       /* Configure RX control register => stop operation */
+       tmp16 = AX_RX_CTL_STOP;
+       ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, &tmp16);
+
+       return 0;
+}
+
+/* This function is used to enable the autodetach function. */
+/* This function is determined by offset 0x43 of EEPROM */
+static int ax88179_auto_detach(struct usbnet *dev, int in_pm)
+{
+       u16 tmp16;
+       u8 tmp8;
+       int (*fnr)(struct usbnet *, u8, u16, u16, u16, void *);
+       int (*fnw)(struct usbnet *, u8, u16, u16, u16, void *);
+
+       if (!in_pm) {
+               fnr = ax88179_read_cmd;
+               fnw = ax88179_write_cmd;
+       } else {
+               fnr = ax88179_read_cmd_nopm;
+               fnw = ax88179_write_cmd_nopm;
+       }
+
+       if (fnr(dev, AX_ACCESS_EEPROM, 0x43, 1, 2, &tmp16) < 0)
+               return 0;
+
+       if ((tmp16 == 0xFFFF) || (!(tmp16 & 0x0100)))
+               return 0;
+
+       /* Enable Auto Detach bit */
+       tmp8 = 0;
+       fnr(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
+       tmp8 |= AX_CLK_SELECT_ULR;
+       fnw(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
+
+       fnr(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, &tmp16);
+       tmp16 |= AX_PHYPWR_RSTCTL_AT;
+       fnw(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, &tmp16);
+
+       return 0;
+}
+
+static int ax88179_resume(struct usb_interface *intf)
+{
+       struct usbnet *dev = usb_get_intfdata(intf);
+       u16 tmp16;
+       u8 tmp8;
+
+       netif_carrier_off(dev->net);
+
+       /* Power up ethernet PHY */
+       tmp16 = 0;
+       ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL,
+                              2, 2, &tmp16);
+       udelay(1000);
+
+       tmp16 = AX_PHYPWR_RSTCTL_IPRL;
+       ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL,
+                              2, 2, &tmp16);
+       msleep(200);
+
+       /* Ethernet PHY Auto Detach*/
+       ax88179_auto_detach(dev, 1);
+
+       /* Enable clock */
+       ax88179_read_cmd_nopm(dev, AX_ACCESS_MAC,  AX_CLK_SELECT, 1, 1, &tmp8);
+       tmp8 |= AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS;
+       ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
+       msleep(100);
+
+       /* Configure RX control register => start operation */
+       tmp16 = AX_RX_CTL_DROPCRCERR | AX_RX_CTL_IPE | AX_RX_CTL_START |
+               AX_RX_CTL_AP | AX_RX_CTL_AMALL | AX_RX_CTL_AB;
+       ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, &tmp16);
+
+       return usbnet_resume(intf);
+}
+
+static void
+ax88179_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
+{
+       struct usbnet *dev = netdev_priv(net);
+       u8 opt;
+
+       if (ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MONITOR_MOD,
+                            1, 1, &opt) < 0) {
+               wolinfo->supported = 0;
+               wolinfo->wolopts = 0;
+               return;
+       }
+
+       wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
+       wolinfo->wolopts = 0;
+       if (opt & AX_MONITOR_MODE_RWLC)
+               wolinfo->wolopts |= WAKE_PHY;
+       if (opt & AX_MONITOR_MODE_RWMP)
+               wolinfo->wolopts |= WAKE_MAGIC;
+}
+
+static int
+ax88179_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
+{
+       struct usbnet *dev = netdev_priv(net);
+       u8 opt = 0;
+
+       if (wolinfo->wolopts & WAKE_PHY)
+               opt |= AX_MONITOR_MODE_RWLC;
+       if (wolinfo->wolopts & WAKE_MAGIC)
+               opt |= AX_MONITOR_MODE_RWMP;
+
+       if (ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MONITOR_MOD,
+                             1, 1, &opt) < 0)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int ax88179_get_eeprom_len(struct net_device *net)
+{
+       return AX_EEPROM_LEN;
+}
+
+static int
+ax88179_get_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
+                  u8 *data)
+{
+       struct usbnet *dev = netdev_priv(net);
+       u16 *eeprom_buff;
+       int first_word, last_word;
+       int i, ret;
+
+       if (eeprom->len == 0)
+               return -EINVAL;
+
+       eeprom->magic = AX88179_EEPROM_MAGIC;
+
+       first_word = eeprom->offset >> 1;
+       last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+       eeprom_buff = kmalloc(sizeof(u16) * (last_word - first_word + 1),
+                             GFP_KERNEL);
+       if (!eeprom_buff)
+               return -ENOMEM;
+
+       /* ax88179/178A returns 2 bytes from eeprom on read */
+       for (i = first_word; i <= last_word; i++) {
+               ret = __ax88179_read_cmd(dev, AX_ACCESS_EEPROM, i, 1, 2,
+                                        &eeprom_buff[i - first_word],
+                                        0);
+               if (ret < 0) {
+                       kfree(eeprom_buff);
+                       return -EIO;
+               }
+       }
+
+       memcpy(data, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
+       kfree(eeprom_buff);
+       return 0;
+}
+
+static int ax88179_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
+{
+       struct usbnet *dev = netdev_priv(net);
+       return mii_ethtool_gset(&dev->mii, cmd);
+}
+
+static int ax88179_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
+{
+       struct usbnet *dev = netdev_priv(net);
+       return mii_ethtool_sset(&dev->mii, cmd);
+}
+
+
+static int ax88179_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
+{
+       struct usbnet *dev = netdev_priv(net);
+       return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
+}
+
+static const struct ethtool_ops ax88179_ethtool_ops = {
+       .get_link               = ethtool_op_get_link,
+       .get_msglevel           = usbnet_get_msglevel,
+       .set_msglevel           = usbnet_set_msglevel,
+       .get_wol                = ax88179_get_wol,
+       .set_wol                = ax88179_set_wol,
+       .get_eeprom_len         = ax88179_get_eeprom_len,
+       .get_eeprom             = ax88179_get_eeprom,
+       .get_settings           = ax88179_get_settings,
+       .set_settings           = ax88179_set_settings,
+       .nway_reset             = usbnet_nway_reset,
+};
+
+static void ax88179_set_multicast(struct net_device *net)
+{
+       struct usbnet *dev = netdev_priv(net);
+       struct ax88179_data *data = (struct ax88179_data *)dev->data;
+       u8 *m_filter = ((u8 *)dev->data) + 12;
+
+       data->rxctl = (AX_RX_CTL_START | AX_RX_CTL_AB | AX_RX_CTL_IPE);
+
+       if (net->flags & IFF_PROMISC) {
+               data->rxctl |= AX_RX_CTL_PRO;
+       } else if (net->flags & IFF_ALLMULTI ||
+                  netdev_mc_count(net) > AX_MAX_MCAST) {
+               data->rxctl |= AX_RX_CTL_AMALL;
+       } else if (netdev_mc_empty(net)) {
+               /* just broadcast and directed */
+       } else {
+               /* We use the 20 byte dev->data for our 8 byte filter buffer
+                * to avoid allocating memory that is tricky to free later
+                */
+               u32 crc_bits;
+               struct netdev_hw_addr *ha;
+
+               memset(m_filter, 0, AX_MCAST_FLTSIZE);
+
+               netdev_for_each_mc_addr(ha, net) {
+                       crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
+                       *(m_filter + (crc_bits >> 3)) |= (1 << (crc_bits & 7));
+               }
+
+               ax88179_write_cmd_async(dev, AX_ACCESS_MAC, AX_MULFLTARY,
+                                       AX_MCAST_FLTSIZE, AX_MCAST_FLTSIZE,
+                                       m_filter);
+
+               data->rxctl |= AX_RX_CTL_AM;
+       }
+
+       ax88179_write_cmd_async(dev, AX_ACCESS_MAC, AX_RX_CTL,
+                               2, 2, &data->rxctl);
+}
+
+static int
+ax88179_set_features(struct net_device *net, netdev_features_t features)
+{
+       u8 tmp;
+       struct usbnet *dev = netdev_priv(net);
+       netdev_features_t changed = net->features ^ features;
+
+       if (changed & NETIF_F_IP_CSUM) {
+               ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_TXCOE_CTL, 1, 1, &tmp);
+               tmp ^= AX_TXCOE_TCP | AX_TXCOE_UDP;
+               ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_TXCOE_CTL, 1, 1, &tmp);
+       }
+
+       if (changed & NETIF_F_IPV6_CSUM) {
+               ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_TXCOE_CTL, 1, 1, &tmp);
+               tmp ^= AX_TXCOE_TCPV6 | AX_TXCOE_UDPV6;
+               ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_TXCOE_CTL, 1, 1, &tmp);
+       }
+
+       if (changed & NETIF_F_RXCSUM) {
+               ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_RXCOE_CTL, 1, 1, &tmp);
+               tmp ^= AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
+                      AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6;
+               ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RXCOE_CTL, 1, 1, &tmp);
+       }
+
+       return 0;
+}
+
+static int ax88179_change_mtu(struct net_device *net, int new_mtu)
+{
+       struct usbnet *dev = netdev_priv(net);
+       u16 tmp16;
+
+       if (new_mtu <= 0 || new_mtu > 4088)
+               return -EINVAL;
+
+       net->mtu = new_mtu;
+       dev->hard_mtu = net->mtu + net->hard_header_len;
+
+       if (net->mtu > 1500) {
+               ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+                                2, 2, &tmp16);
+               tmp16 |= AX_MEDIUM_JUMBO_EN;
+               ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+                                 2, 2, &tmp16);
+       } else {
+               ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+                                2, 2, &tmp16);
+               tmp16 &= ~AX_MEDIUM_JUMBO_EN;
+               ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+                                 2, 2, &tmp16);
+       }
+
+       return 0;
+}
+
+static int ax88179_set_mac_addr(struct net_device *net, void *p)
+{
+       struct usbnet *dev = netdev_priv(net);
+       struct sockaddr *addr = p;
+
+       if (netif_running(net))
+               return -EBUSY;
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
+
+       /* Set the MAC address */
+       return ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
+                                ETH_ALEN, net->dev_addr);
+}
+
+static const struct net_device_ops ax88179_netdev_ops = {
+       .ndo_open               = usbnet_open,
+       .ndo_stop               = usbnet_stop,
+       .ndo_start_xmit         = usbnet_start_xmit,
+       .ndo_tx_timeout         = usbnet_tx_timeout,
+       .ndo_change_mtu         = ax88179_change_mtu,
+       .ndo_set_mac_address    = ax88179_set_mac_addr,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_do_ioctl           = ax88179_ioctl,
+       .ndo_set_rx_mode        = ax88179_set_multicast,
+       .ndo_set_features       = ax88179_set_features,
+};
+
+static int ax88179_check_eeprom(struct usbnet *dev)
+{
+       u8 i, buf, eeprom[20];
+       u16 csum, delay = HZ / 10;
+       unsigned long jtimeout;
+
+       /* Read EEPROM content */
+       for (i = 0; i < 6; i++) {
+               buf = i;
+               if (ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_SROM_ADDR,
+                                     1, 1, &buf) < 0)
+                       return -EINVAL;
+
+               buf = EEP_RD;
+               if (ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_SROM_CMD,
+                                     1, 1, &buf) < 0)
+                       return -EINVAL;
+
+               jtimeout = jiffies + delay;
+               do {
+                       ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_SROM_CMD,
+                                        1, 1, &buf);
+
+                       if (time_after(jiffies, jtimeout))
+                               return -EINVAL;
+
+               } while (buf & EEP_BUSY);
+
+               __ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_SROM_DATA_LOW,
+                                  2, 2, &eeprom[i * 2], 0);
+
+               if ((i == 0) && (eeprom[0] == 0xFF))
+                       return -EINVAL;
+       }
+
+       csum = eeprom[6] + eeprom[7] + eeprom[8] + eeprom[9];
+       csum = (csum >> 8) + (csum & 0xff);
+       if ((csum + eeprom[10]) != 0xff)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int ax88179_check_efuse(struct usbnet *dev, u16 *ledmode)
+{
+       u8      i;
+       u8      efuse[64];
+       u16     csum = 0;
+
+       if (ax88179_read_cmd(dev, AX_ACCESS_EFUS, 0, 64, 64, efuse) < 0)
+               return -EINVAL;
+
+       if (*efuse == 0xFF)
+               return -EINVAL;
+
+       for (i = 0; i < 64; i++)
+               csum = csum + efuse[i];
+
+       while (csum > 255)
+               csum = (csum & 0x00FF) + ((csum >> 8) & 0x00FF);
+
+       if (csum != 0xFF)
+               return -EINVAL;
+
+       *ledmode = (efuse[51] << 8) | efuse[52];
+
+       return 0;
+}
+
+static int ax88179_convert_old_led(struct usbnet *dev, u16 *ledvalue)
+{
+       u16 led;
+
+       /* Loaded the old eFuse LED Mode */
+       if (ax88179_read_cmd(dev, AX_ACCESS_EEPROM, 0x3C, 1, 2, &led) < 0)
+               return -EINVAL;
+
+       led >>= 8;
+       switch (led) {
+       case 0xFF:
+               led = LED0_ACTIVE | LED1_LINK_10 | LED1_LINK_100 |
+                     LED1_LINK_1000 | LED2_ACTIVE | LED2_LINK_10 |
+                     LED2_LINK_100 | LED2_LINK_1000 | LED_VALID;
+               break;
+       case 0xFE:
+               led = LED0_ACTIVE | LED1_LINK_1000 | LED2_LINK_100 | LED_VALID;
+               break;
+       case 0xFD:
+               led = LED0_ACTIVE | LED1_LINK_1000 | LED2_LINK_100 |
+                     LED2_LINK_10 | LED_VALID;
+               break;
+       case 0xFC:
+               led = LED0_ACTIVE | LED1_ACTIVE | LED1_LINK_1000 | LED2_ACTIVE |
+                     LED2_LINK_100 | LED2_LINK_10 | LED_VALID;
+               break;
+       default:
+               led = LED0_ACTIVE | LED1_LINK_10 | LED1_LINK_100 |
+                     LED1_LINK_1000 | LED2_ACTIVE | LED2_LINK_10 |
+                     LED2_LINK_100 | LED2_LINK_1000 | LED_VALID;
+               break;
+       }
+
+       *ledvalue = led;
+
+       return 0;
+}
+
+static int ax88179_led_setting(struct usbnet *dev)
+{
+       u8 ledfd, value = 0;
+       u16 tmp, ledact, ledlink, ledvalue = 0, delay = HZ / 10;
+       unsigned long jtimeout;
+
+       /* Check AX88179 version. UA1 or UA2*/
+       ax88179_read_cmd(dev, AX_ACCESS_MAC, GENERAL_STATUS, 1, 1, &value);
+
+       if (!(value & AX_SECLD)) {      /* UA1 */
+               value = AX_GPIO_CTRL_GPIO3EN | AX_GPIO_CTRL_GPIO2EN |
+                       AX_GPIO_CTRL_GPIO1EN;
+               if (ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_GPIO_CTRL,
+                                     1, 1, &value) < 0)
+                       return -EINVAL;
+       }
+
+       /* Check EEPROM */
+       if (!ax88179_check_eeprom(dev)) {
+               value = 0x42;
+               if (ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_SROM_ADDR,
+                                     1, 1, &value) < 0)
+                       return -EINVAL;
+
+               value = EEP_RD;
+               if (ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_SROM_CMD,
+                                     1, 1, &value) < 0)
+                       return -EINVAL;
+
+               jtimeout = jiffies + delay;
+               do {
+                       ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_SROM_CMD,
+                                        1, 1, &value);
+
+                       if (time_after(jiffies, jtimeout))
+                               return -EINVAL;
+
+               } while (value & EEP_BUSY);
+
+               ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_SROM_DATA_HIGH,
+                                1, 1, &value);
+               ledvalue = (value << 8);
+
+               ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_SROM_DATA_LOW,
+                                1, 1, &value);
+               ledvalue |= value;
+
+               /* load internal ROM for defaule setting */
+               if ((ledvalue == 0xFFFF) || ((ledvalue & LED_VALID) == 0))
+                       ax88179_convert_old_led(dev, &ledvalue);
+
+       } else if (!ax88179_check_efuse(dev, &ledvalue)) {
+               if ((ledvalue == 0xFFFF) || ((ledvalue & LED_VALID) == 0))
+                       ax88179_convert_old_led(dev, &ledvalue);
+       } else {
+               ax88179_convert_old_led(dev, &ledvalue);
+       }
+
+       tmp = GMII_PHY_PGSEL_EXT;
+       ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+                         GMII_PHY_PAGE_SELECT, 2, &tmp);
+
+       tmp = 0x2c;
+       ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+                         GMII_PHYPAGE, 2, &tmp);
+
+       ax88179_read_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+                        GMII_LED_ACT, 2, &ledact);
+
+       ax88179_read_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+                        GMII_LED_LINK, 2, &ledlink);
+
+       ledact &= GMII_LED_ACTIVE_MASK;
+       ledlink &= GMII_LED_LINK_MASK;
+
+       if (ledvalue & LED0_ACTIVE)
+               ledact |= GMII_LED0_ACTIVE;
+
+       if (ledvalue & LED1_ACTIVE)
+               ledact |= GMII_LED1_ACTIVE;
+
+       if (ledvalue & LED2_ACTIVE)
+               ledact |= GMII_LED2_ACTIVE;
+
+       if (ledvalue & LED0_LINK_10)
+               ledlink |= GMII_LED0_LINK_10;
+
+       if (ledvalue & LED1_LINK_10)
+               ledlink |= GMII_LED1_LINK_10;
+
+       if (ledvalue & LED2_LINK_10)
+               ledlink |= GMII_LED2_LINK_10;
+
+       if (ledvalue & LED0_LINK_100)
+               ledlink |= GMII_LED0_LINK_100;
+
+       if (ledvalue & LED1_LINK_100)
+               ledlink |= GMII_LED1_LINK_100;
+
+       if (ledvalue & LED2_LINK_100)
+               ledlink |= GMII_LED2_LINK_100;
+
+       if (ledvalue & LED0_LINK_1000)
+               ledlink |= GMII_LED0_LINK_1000;
+
+       if (ledvalue & LED1_LINK_1000)
+               ledlink |= GMII_LED1_LINK_1000;
+
+       if (ledvalue & LED2_LINK_1000)
+               ledlink |= GMII_LED2_LINK_1000;
+
+       tmp = ledact;
+       ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+                         GMII_LED_ACT, 2, &tmp);
+
+       tmp = ledlink;
+       ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+                         GMII_LED_LINK, 2, &tmp);
+
+       tmp = GMII_PHY_PGSEL_PAGE0;
+       ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+                         GMII_PHY_PAGE_SELECT, 2, &tmp);
+
+       /* LED full duplex setting */
+       ledfd = 0;
+       if (ledvalue & LED0_FD)
+               ledfd |= 0x01;
+       else if ((ledvalue & LED0_USB3_MASK) == 0)
+               ledfd |= 0x02;
+
+       if (ledvalue & LED1_FD)
+               ledfd |= 0x04;
+       else if ((ledvalue & LED1_USB3_MASK) == 0)
+               ledfd |= 0x08;
+
+       if (ledvalue & LED2_FD)
+               ledfd |= 0x10;
+       else if ((ledvalue & LED2_USB3_MASK) == 0)
+               ledfd |= 0x20;
+
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_LEDCTRL, 1, 1, &ledfd);
+
+       return 0;
+}
+
+static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+       u8 buf[5];
+       u16 *tmp16;
+       u8 *tmp;
+       struct ax88179_data *ax179_data = (struct ax88179_data *)dev->data;
+
+       usbnet_get_endpoints(dev, intf);
+
+       tmp16 = (u16 *)buf;
+       tmp = (u8 *)buf;
+
+       memset(ax179_data, 0, sizeof(*ax179_data));
+
+       /* Power up ethernet PHY */
+       *tmp16 = 0;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
+       *tmp16 = AX_PHYPWR_RSTCTL_IPRL;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
+       msleep(200);
+
+       *tmp = AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, tmp);
+       msleep(100);
+
+       ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
+                        ETH_ALEN, dev->net->dev_addr);
+       memcpy(dev->net->perm_addr, dev->net->dev_addr, ETH_ALEN);
+
+       /* RX bulk configuration */
+       memcpy(tmp, &AX88179_BULKIN_SIZE[0], 5);
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_BULKIN_QCTRL, 5, 5, tmp);
+
+       dev->rx_urb_size = 1024 * 20;
+
+       *tmp = 0x34;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PAUSE_WATERLVL_LOW, 1, 1, tmp);
+
+       *tmp = 0x52;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PAUSE_WATERLVL_HIGH,
+                         1, 1, tmp);
+
+       dev->net->netdev_ops = &ax88179_netdev_ops;
+       dev->net->ethtool_ops = &ax88179_ethtool_ops;
+       dev->net->needed_headroom = 8;
+
+       /* Initialize MII structure */
+       dev->mii.dev = dev->net;
+       dev->mii.mdio_read = ax88179_mdio_read;
+       dev->mii.mdio_write = ax88179_mdio_write;
+       dev->mii.phy_id_mask = 0xff;
+       dev->mii.reg_num_mask = 0xff;
+       dev->mii.phy_id = 0x03;
+       dev->mii.supports_gmii = 1;
+
+       dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+                             NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
+
+       dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+                                NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
+
+       /* Enable checksum offload */
+       *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
+              AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RXCOE_CTL, 1, 1, tmp);
+
+       *tmp = AX_TXCOE_IP | AX_TXCOE_TCP | AX_TXCOE_UDP |
+              AX_TXCOE_TCPV6 | AX_TXCOE_UDPV6;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_TXCOE_CTL, 1, 1, tmp);
+
+       /* Configure RX control register => start operation */
+       *tmp16 = AX_RX_CTL_DROPCRCERR | AX_RX_CTL_IPE | AX_RX_CTL_START |
+                AX_RX_CTL_AP | AX_RX_CTL_AMALL | AX_RX_CTL_AB;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, tmp16);
+
+       *tmp = AX_MONITOR_MODE_PMETYPE | AX_MONITOR_MODE_PMEPOL |
+              AX_MONITOR_MODE_RWMP;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MONITOR_MOD, 1, 1, tmp);
+
+       /* Configure default medium type => giga */
+       *tmp16 = AX_MEDIUM_RECEIVE_EN | AX_MEDIUM_TXFLOW_CTRLEN |
+                AX_MEDIUM_RXFLOW_CTRLEN | AX_MEDIUM_ALWAYS_ONE |
+                AX_MEDIUM_FULL_DUPLEX | AX_MEDIUM_GIGAMODE;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+                         2, 2, tmp16);
+
+       ax88179_led_setting(dev);
+
+       /* Restart autoneg */
+       mii_nway_restart(&dev->mii);
+
+       netif_carrier_off(dev->net);
+
+       return 0;
+}
+
+static void ax88179_unbind(struct usbnet *dev, struct usb_interface *intf)
+{
+       u16 tmp16;
+
+       /* Configure RX control register => stop operation */
+       tmp16 = AX_RX_CTL_STOP;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, &tmp16);
+
+       tmp16 = 0;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp16);
+
+       /* Power down ethernet PHY */
+       tmp16 = 0;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, &tmp16);
+}
+
+static void
+ax88179_rx_checksum(struct sk_buff *skb, u32 *pkt_hdr)
+{
+       skb->ip_summed = CHECKSUM_NONE;
+
+       /* checksum error bit is set */
+       if ((*pkt_hdr & AX_RXHDR_L3CSUM_ERR) ||
+           (*pkt_hdr & AX_RXHDR_L4CSUM_ERR))
+               return;
+
+       /* It must be a TCP or UDP packet with a valid checksum */
+       if (((*pkt_hdr & AX_RXHDR_L4_TYPE_MASK) == AX_RXHDR_L4_TYPE_TCP) ||
+           ((*pkt_hdr & AX_RXHDR_L4_TYPE_MASK) == AX_RXHDR_L4_TYPE_UDP))
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+       struct sk_buff *ax_skb;
+       int pkt_cnt;
+       u32 rx_hdr;
+       u16 hdr_off;
+       u32 *pkt_hdr;
+
+       skb_trim(skb, skb->len - 4);
+       memcpy(&rx_hdr, skb_tail_pointer(skb), 4);
+       le32_to_cpus(&rx_hdr);
+
+       pkt_cnt = (u16)rx_hdr;
+       hdr_off = (u16)(rx_hdr >> 16);
+       pkt_hdr = (u32 *)(skb->data + hdr_off);
+
+       while (pkt_cnt--) {
+               u16 pkt_len;
+
+               le32_to_cpus(pkt_hdr);
+               pkt_len = (*pkt_hdr >> 16) & 0x1fff;
+
+               /* Check CRC or runt packet */
+               if ((*pkt_hdr & AX_RXHDR_CRC_ERR) ||
+                   (*pkt_hdr & AX_RXHDR_DROP_ERR)) {
+                       skb_pull(skb, (pkt_len + 7) & 0xFFF8);
+                       pkt_hdr++;
+                       continue;
+               }
+
+               if (pkt_cnt == 0) {
+                       /* Skip IP alignment psudo header */
+                       skb_pull(skb, 2);
+                       skb->len = pkt_len;
+                       skb_set_tail_pointer(skb, pkt_len);
+                       skb->truesize = pkt_len + sizeof(struct sk_buff);
+                       ax88179_rx_checksum(skb, pkt_hdr);
+                       return 1;
+               }
+
+               ax_skb = skb_clone(skb, GFP_ATOMIC);
+               if (ax_skb) {
+                       ax_skb->len = pkt_len;
+                       ax_skb->data = skb->data + 2;
+                       skb_set_tail_pointer(ax_skb, pkt_len);
+                       ax_skb->truesize = pkt_len + sizeof(struct sk_buff);
+                       ax88179_rx_checksum(ax_skb, pkt_hdr);
+                       usbnet_skb_return(dev, ax_skb);
+               } else {
+                       return 0;
+               }
+
+               skb_pull(skb, (pkt_len + 7) & 0xFFF8);
+               pkt_hdr++;
+       }
+       return 1;
+}
+
+static struct sk_buff *
+ax88179_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
+{
+       u32 tx_hdr1, tx_hdr2;
+       int frame_size = dev->maxpacket;
+       int mss = skb_shinfo(skb)->gso_size;
+       int headroom;
+       int tailroom;
+
+       tx_hdr1 = skb->len;
+       tx_hdr2 = mss;
+       if (((skb->len + 8) % frame_size) == 0)
+               tx_hdr2 |= 0x80008000;  /* Enable padding */
+
+       skb_linearize(skb);
+       headroom = skb_headroom(skb);
+       tailroom = skb_tailroom(skb);
+
+       if (!skb_header_cloned(skb) &&
+           !skb_cloned(skb) &&
+           (headroom + tailroom) >= 8) {
+               if (headroom < 8) {
+                       skb->data = memmove(skb->head + 8, skb->data, skb->len);
+                       skb_set_tail_pointer(skb, skb->len);
+               }
+       } else {
+               struct sk_buff *skb2;
+
+               skb2 = skb_copy_expand(skb, 8, 0, flags);
+               dev_kfree_skb_any(skb);
+               skb = skb2;
+               if (!skb)
+                       return NULL;
+       }
+
+       skb_push(skb, 4);
+       cpu_to_le32s(&tx_hdr2);
+       skb_copy_to_linear_data(skb, &tx_hdr2, 4);
+
+       skb_push(skb, 4);
+       cpu_to_le32s(&tx_hdr1);
+       skb_copy_to_linear_data(skb, &tx_hdr1, 4);
+
+       return skb;
+}
+
+static int ax88179_link_reset(struct usbnet *dev)
+{
+       struct ax88179_data *ax179_data = (struct ax88179_data *)dev->data;
+       u8 tmp[5], link_sts;
+       u16 mode, tmp16, delay = HZ / 10;
+       u32 tmp32 = 0x40000000;
+       unsigned long jtimeout;
+
+       jtimeout = jiffies + delay;
+       while (tmp32 & 0x40000000) {
+               mode = 0;
+               ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, &mode);
+               ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2,
+                                 &ax179_data->rxctl);
+
+               /*link up, check the usb device control TX FIFO full or empty*/
+               ax88179_read_cmd(dev, 0x81, 0x8c, 0, 4, &tmp32);
+
+               if (time_after(jiffies, jtimeout))
+                       return 0;
+       }
+
+       mode = AX_MEDIUM_RECEIVE_EN | AX_MEDIUM_TXFLOW_CTRLEN |
+              AX_MEDIUM_RXFLOW_CTRLEN | AX_MEDIUM_ALWAYS_ONE;
+
+       ax88179_read_cmd(dev, AX_ACCESS_MAC, PHYSICAL_LINK_STATUS,
+                        1, 1, &link_sts);
+
+       ax88179_read_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+                        GMII_PHY_PHYSR, 2, &tmp16);
+
+       if (!(tmp16 & GMII_PHY_PHYSR_LINK)) {
+               return 0;
+       } else if (GMII_PHY_PHYSR_GIGA == (tmp16 & GMII_PHY_PHYSR_SMASK)) {
+               mode |= AX_MEDIUM_GIGAMODE | AX_MEDIUM_EN_125MHZ;
+               if (dev->net->mtu > 1500)
+                       mode |= AX_MEDIUM_JUMBO_EN;
+
+               if (link_sts & AX_USB_SS)
+                       memcpy(tmp, &AX88179_BULKIN_SIZE[0], 5);
+               else if (link_sts & AX_USB_HS)
+                       memcpy(tmp, &AX88179_BULKIN_SIZE[1], 5);
+               else
+                       memcpy(tmp, &AX88179_BULKIN_SIZE[3], 5);
+       } else if (GMII_PHY_PHYSR_100 == (tmp16 & GMII_PHY_PHYSR_SMASK)) {
+               mode |= AX_MEDIUM_PS;
+
+               if (link_sts & (AX_USB_SS | AX_USB_HS))
+                       memcpy(tmp, &AX88179_BULKIN_SIZE[2], 5);
+               else
+                       memcpy(tmp, &AX88179_BULKIN_SIZE[3], 5);
+       } else {
+               memcpy(tmp, &AX88179_BULKIN_SIZE[3], 5);
+       }
+
+       /* RX bulk configuration */
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_BULKIN_QCTRL, 5, 5, tmp);
+
+       dev->rx_urb_size = (1024 * (tmp[3] + 2));
+
+       if (tmp16 & GMII_PHY_PHYSR_FULL)
+               mode |= AX_MEDIUM_FULL_DUPLEX;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+                         2, 2, &mode);
+
+       netif_carrier_on(dev->net);
+
+       return 0;
+}
+
+static int ax88179_reset(struct usbnet *dev)
+{
+       u8 buf[5];
+       u16 *tmp16;
+       u8 *tmp;
+
+       tmp16 = (u16 *)buf;
+       tmp = (u8 *)buf;
+
+       /* Power up ethernet PHY */
+       *tmp16 = 0;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
+
+       *tmp16 = AX_PHYPWR_RSTCTL_IPRL;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
+       msleep(200);
+
+       *tmp = AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, tmp);
+       msleep(100);
+
+       /* Ethernet PHY Auto Detach*/
+       ax88179_auto_detach(dev, 0);
+
+       ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN, ETH_ALEN,
+                        dev->net->dev_addr);
+       memcpy(dev->net->perm_addr, dev->net->dev_addr, ETH_ALEN);
+
+       /* RX bulk configuration */
+       memcpy(tmp, &AX88179_BULKIN_SIZE[0], 5);
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_BULKIN_QCTRL, 5, 5, tmp);
+
+       dev->rx_urb_size = 1024 * 20;
+
+       *tmp = 0x34;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PAUSE_WATERLVL_LOW, 1, 1, tmp);
+
+       *tmp = 0x52;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PAUSE_WATERLVL_HIGH,
+                         1, 1, tmp);
+
+       dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+                             NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
+
+       dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+                                NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
+
+       /* Enable checksum offload */
+       *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
+              AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RXCOE_CTL, 1, 1, tmp);
+
+       *tmp = AX_TXCOE_IP | AX_TXCOE_TCP | AX_TXCOE_UDP |
+              AX_TXCOE_TCPV6 | AX_TXCOE_UDPV6;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_TXCOE_CTL, 1, 1, tmp);
+
+       /* Configure RX control register => start operation */
+       *tmp16 = AX_RX_CTL_DROPCRCERR | AX_RX_CTL_IPE | AX_RX_CTL_START |
+                AX_RX_CTL_AP | AX_RX_CTL_AMALL | AX_RX_CTL_AB;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, tmp16);
+
+       *tmp = AX_MONITOR_MODE_PMETYPE | AX_MONITOR_MODE_PMEPOL |
+              AX_MONITOR_MODE_RWMP;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MONITOR_MOD, 1, 1, tmp);
+
+       /* Configure default medium type => giga */
+       *tmp16 = AX_MEDIUM_RECEIVE_EN | AX_MEDIUM_TXFLOW_CTRLEN |
+                AX_MEDIUM_RXFLOW_CTRLEN | AX_MEDIUM_ALWAYS_ONE |
+                AX_MEDIUM_FULL_DUPLEX | AX_MEDIUM_GIGAMODE;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+                         2, 2, tmp16);
+
+       ax88179_led_setting(dev);
+
+       /* Restart autoneg */
+       mii_nway_restart(&dev->mii);
+
+       netif_carrier_off(dev->net);
+
+       return 0;
+}
+
+static int ax88179_stop(struct usbnet *dev)
+{
+       u16 tmp16;
+
+       ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+                        2, 2, &tmp16);
+       tmp16 &= ~AX_MEDIUM_RECEIVE_EN;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+                         2, 2, &tmp16);
+
+       return 0;
+}
+
+static const struct driver_info ax88179_info = {
+       .description = "ASIX AX88179 USB 3.0 Gigibit Ethernet",
+       .bind = ax88179_bind,
+       .unbind = ax88179_unbind,
+       .status = ax88179_status,
+       .link_reset = ax88179_link_reset,
+       .reset = ax88179_reset,
+       .stop = ax88179_stop,
+       .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+       .rx_fixup = ax88179_rx_fixup,
+       .tx_fixup = ax88179_tx_fixup,
+};
+
+static const struct driver_info ax88178a_info = {
+       .description = "ASIX AX88178A USB 2.0 Gigibit Ethernet",
+       .bind = ax88179_bind,
+       .unbind = ax88179_unbind,
+       .status = ax88179_status,
+       .link_reset = ax88179_link_reset,
+       .reset = ax88179_reset,
+       .stop = ax88179_stop,
+       .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+       .rx_fixup = ax88179_rx_fixup,
+       .tx_fixup = ax88179_tx_fixup,
+};
+
+static const struct driver_info sitecom_info = {
+       .description = "Sitecom USB 3.0 to Gigabit Adapter",
+       .bind = ax88179_bind,
+       .unbind = ax88179_unbind,
+       .status = ax88179_status,
+       .link_reset = ax88179_link_reset,
+       .reset = ax88179_reset,
+       .stop = ax88179_stop,
+       .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+       .rx_fixup = ax88179_rx_fixup,
+       .tx_fixup = ax88179_tx_fixup,
+};
+
+static const struct usb_device_id products[] = {
+{
+       /* ASIX AX88179 10/100/1000 */
+       USB_DEVICE(0x0b95, 0x1790),
+       .driver_info = (unsigned long)&ax88179_info,
+}, {
+       /* ASIX AX88178A 10/100/1000 */
+       USB_DEVICE(0x0b95, 0x178a),
+       .driver_info = (unsigned long)&ax88178a_info,
+}, {
+       /* Sitecom USB 3.0 to Gigabit Adapter */
+       USB_DEVICE(0x0df6, 0x0072),
+       .driver_info = (unsigned long) &sitecom_info,
+},
+       { },
+};
+MODULE_DEVICE_TABLE(usb, products);
+
+static struct usb_driver ax88179_178a_driver = {
+       .name =         "ax88179_178a",
+       .id_table =     products,
+       .probe =        usbnet_probe,
+       .suspend =      ax88179_suspend,
+       .resume =       ax88179_resume,
+       .disconnect =   usbnet_disconnect,
+       .supports_autosuspend = 1,
+       .disable_hub_initiated_lpm = 1,
+};
+
+module_usb_driver(ax88179_178a_driver);
+
+MODULE_DESCRIPTION("ASIX AX88179/178A based USB 3.0/2.0 Gigabit Ethernet Devices");
+MODULE_LICENSE("GPL");
index 4a8c25a222940d3061b61336976db5f1e80aef58..61b74a2b89ac4fbcf2ac494fc0045645efc379a0 100644 (file)
@@ -1213,6 +1213,14 @@ static const struct usb_device_id cdc_devs[] = {
          .driver_info = (unsigned long) &wwan_info,
        },
 
+       /* tag Huawei devices as wwan */
+       { USB_VENDOR_AND_INTERFACE_INFO(0x12d1,
+                                       USB_CLASS_COMM,
+                                       USB_CDC_SUBCLASS_NCM,
+                                       USB_CDC_PROTO_NONE),
+         .driver_info = (unsigned long)&wwan_info,
+       },
+
        /* Huawei NCM devices disguised as vendor specific */
        { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x16),
          .driver_info = (unsigned long)&wwan_info,
index f10e58ac9c1b6078527ecbc88b2c59fe54d4ea66..c3e3d2929ee34cbdb0813dc93423e532a895a2a6 100644 (file)
@@ -961,6 +961,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
        iph->ttl        = ttl ? : ip4_dst_hoplimit(&rt->dst);
        tunnel_ip_select_ident(skb, old_iph, &rt->dst);
 
+       nf_reset(skb);
+
        vxlan_set_owner(dev, skb);
 
        /* See iptunnel_xmit() */
index eca95a0c389049257397949ea38b0db93dbbc5fa..6102476a65dea79e1958d76376fc112bbafde049 100644 (file)
@@ -27,7 +27,7 @@
 #define WME_MAX_BA              WME_BA_BMP_SIZE
 #define ATH_TID_MAX_BUFS        (2 * WME_MAX_BA)
 
-#define ATH_RSSI_DUMMY_MARKER   0x127
+#define ATH_RSSI_DUMMY_MARKER   127
 #define ATH_RSSI_LPF_LEN               10
 #define RSSI_LPF_THRESHOLD             -20
 #define ATH_RSSI_EP_MULTIPLIER     (1<<7)
index 96bfb18078fa14b0013d4b3e93e7544b16639746..d3b099d7898b692b181029c808d41969554035ee 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/firmware.h>
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
+#include <linux/etherdevice.h>
 #include <linux/leds.h>
 #include <linux/slab.h>
 #include <net/mac80211.h>
index 3ad1fd05c5e769fc429325377470c888edb354a2..bd8251c1c7494fe69a140d4089d6d2d586c0eaae 100644 (file)
@@ -1067,15 +1067,19 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
 
        last_rssi = priv->rx.last_rssi;
 
-       if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
-               rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi,
-                                                    ATH_RSSI_EP_MULTIPLIER);
+       if (ieee80211_is_beacon(hdr->frame_control) &&
+           !is_zero_ether_addr(common->curbssid) &&
+           ether_addr_equal(hdr->addr3, common->curbssid)) {
+               s8 rssi = rxbuf->rxstatus.rs_rssi;
 
-       if (rxbuf->rxstatus.rs_rssi < 0)
-               rxbuf->rxstatus.rs_rssi = 0;
+               if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
+                       rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
 
-       if (ieee80211_is_beacon(fc))
-               priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi;
+               if (rssi < 0)
+                       rssi = 0;
+
+               priv->ah->stats.avgbrssi = rssi;
+       }
 
        rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp);
        rx_status->band = hw->conf.channel->band;
index 767222f2ba5c067a0b76fc91b254ce169030ea78..4fa2bb1670505644146a33d15c93237287ec1582 100644 (file)
@@ -1463,7 +1463,9 @@ static bool ath9k_hw_chip_reset(struct ath_hw *ah,
                        reset_type = ATH9K_RESET_POWER_ON;
                else
                        reset_type = ATH9K_RESET_COLD;
-       }
+       } else if (ah->chip_fullsleep || REG_READ(ah, AR_Q_TXE) ||
+                  (REG_READ(ah, AR_CR) & AR_CR_RXE))
+               reset_type = ATH9K_RESET_COLD;
 
        if (!ath9k_hw_set_reset_reg(ah, reset_type))
                return false;
index 739309e70d8be7872e9272c9e25dcb7e3a5b2a4a..45578335e4200f2f47a3fc8284f9bd09127209ee 100644 (file)
@@ -825,6 +825,11 @@ static void if_sdio_finish_power_on(struct if_sdio_card *card)
 
        sdio_release_host(func);
 
+       /* Set fw_ready before queuing any commands so that
+        * lbs_thread won't block from sending them to firmware.
+        */
+       priv->fw_ready = 1;
+
        /*
         * FUNC_INIT is required for SD8688 WLAN/BT multiple functions
         */
@@ -839,7 +844,6 @@ static void if_sdio_finish_power_on(struct if_sdio_card *card)
                        netdev_alert(priv->dev, "CMD_FUNC_INIT cmd failed\n");
        }
 
-       priv->fw_ready = 1;
        wake_up(&card->pwron_waitq);
 
        if (!card->started) {
index 246aa62a48172d849cff8bd3d42947b28d7803a0..2fe0ceba4400ad017d99ca5324707aa96093d6be 100644 (file)
@@ -1117,10 +1117,9 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
                adhoc_join->bss_descriptor.bssid,
                adhoc_join->bss_descriptor.ssid);
 
-       for (i = 0; bss_desc->supported_rates[i] &&
-                       i < MWIFIEX_SUPPORTED_RATES;
-                       i++)
-                       ;
+       for (i = 0; i < MWIFIEX_SUPPORTED_RATES &&
+                   bss_desc->supported_rates[i]; i++)
+               ;
        rates_size = i;
 
        /* Copy Data Rates from the Rates recorded in scan response */
index b813a27ee6130b679dd587b2de7b254d643813d8..6283294398bf8cbc5b6c8f6891d83424897b2dfc 100644 (file)
@@ -302,7 +302,7 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
                i++;
                usleep_range(10, 20);
                /* 50ms max wait */
-               if (i == 50000)
+               if (i == 5000)
                        break;
        }
 
index 44d6ead433411eeddc801dc8e1eea0cce40d67e4..2bf4efa331867acaea139bb81dcca75a66532ef4 100644 (file)
@@ -55,10 +55,10 @@ config RT61PCI
 
 config RT2800PCI
        tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support"
-       depends on PCI || RALINK_RT288X || RALINK_RT305X
+       depends on PCI || SOC_RT288X || SOC_RT305X
        select RT2800_LIB
        select RT2X00_LIB_PCI if PCI
-       select RT2X00_LIB_SOC if RALINK_RT288X || RALINK_RT305X
+       select RT2X00_LIB_SOC if SOC_RT288X || SOC_RT305X
        select RT2X00_LIB_FIRMWARE
        select RT2X00_LIB_CRYPTO
        select CRC_CCITT
index 48a01aa21f1c94039d9485d5e635926d58a17750..ded73da4de0b0d52cd6331d837c8c68e86b87749 100644 (file)
@@ -89,7 +89,7 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
        rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
 }
 
-#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X)
+#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
 static int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
 {
        void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE);
@@ -107,7 +107,7 @@ static inline int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
 {
        return -ENOMEM;
 }
-#endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT305X */
+#endif /* CONFIG_SOC_RT288X || CONFIG_SOC_RT305X */
 
 #ifdef CONFIG_PCI
 static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
@@ -1177,7 +1177,7 @@ MODULE_DEVICE_TABLE(pci, rt2800pci_device_table);
 #endif /* CONFIG_PCI */
 MODULE_LICENSE("GPL");
 
-#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X)
+#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
 static int rt2800soc_probe(struct platform_device *pdev)
 {
        return rt2x00soc_probe(pdev, &rt2800pci_ops);
@@ -1194,7 +1194,7 @@ static struct platform_driver rt2800soc_driver = {
        .suspend        = rt2x00soc_suspend,
        .resume         = rt2x00soc_resume,
 };
-#endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT305X */
+#endif /* CONFIG_SOC_RT288X || CONFIG_SOC_RT305X */
 
 #ifdef CONFIG_PCI
 static int rt2800pci_probe(struct pci_dev *pci_dev,
@@ -1217,7 +1217,7 @@ static int __init rt2800pci_init(void)
 {
        int ret = 0;
 
-#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X)
+#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
        ret = platform_driver_register(&rt2800soc_driver);
        if (ret)
                return ret;
@@ -1225,7 +1225,7 @@ static int __init rt2800pci_init(void)
 #ifdef CONFIG_PCI
        ret = pci_register_driver(&rt2800pci_driver);
        if (ret) {
-#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X)
+#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
                platform_driver_unregister(&rt2800soc_driver);
 #endif
                return ret;
@@ -1240,7 +1240,7 @@ static void __exit rt2800pci_exit(void)
 #ifdef CONFIG_PCI
        pci_unregister_driver(&rt2800pci_driver);
 #endif
-#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X)
+#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
        platform_driver_unregister(&rt2800soc_driver);
 #endif
 }
index 1031db66474a6403e67ab5a8f5789820343ac1a6..189744db65e073ec96ff396d7ae8fee46a3c3fcd 100644 (file)
@@ -1236,8 +1236,10 @@ static inline void rt2x00lib_set_if_combinations(struct rt2x00_dev *rt2x00dev)
         */
        if_limit = &rt2x00dev->if_limits_ap;
        if_limit->max = rt2x00dev->ops->max_ap_intf;
-       if_limit->types = BIT(NL80211_IFTYPE_AP) |
-                       BIT(NL80211_IFTYPE_MESH_POINT);
+       if_limit->types = BIT(NL80211_IFTYPE_AP);
+#ifdef CONFIG_MAC80211_MESH
+       if_limit->types |= BIT(NL80211_IFTYPE_MESH_POINT);
+#endif
 
        /*
         * Build up AP interface combinations structure.
@@ -1309,7 +1311,9 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
                rt2x00dev->hw->wiphy->interface_modes |=
                    BIT(NL80211_IFTYPE_ADHOC) |
                    BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
                    BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
                    BIT(NL80211_IFTYPE_WDS);
 
        rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
index b1ccff474c7953f2431cb2cd0390f2e0ebf89591..c08d0f4c5f3d3fe8416195951ea6547a80429030 100644 (file)
@@ -1376,75 +1376,58 @@ void rtl92cu_card_disable(struct ieee80211_hw *hw)
 }
 
 void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
-{
-       /* dummy routine needed for callback from rtl_op_configure_filter() */
-}
-
-/*========================================================================== */
-
-static void _rtl92cu_set_check_bssid(struct ieee80211_hw *hw,
-                             enum nl80211_iftype type)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
        struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       u8 filterout_non_associated_bssid = false;
+       u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
 
-       switch (type) {
-       case NL80211_IFTYPE_ADHOC:
-       case NL80211_IFTYPE_STATION:
-               filterout_non_associated_bssid = true;
-               break;
-       case NL80211_IFTYPE_UNSPECIFIED:
-       case NL80211_IFTYPE_AP:
-       default:
-               break;
-       }
-       if (filterout_non_associated_bssid) {
+       if (rtlpriv->psc.rfpwr_state != ERFON)
+               return;
+
+       if (check_bssid) {
+               u8 tmp;
                if (IS_NORMAL_CHIP(rtlhal->version)) {
-                       switch (rtlphy->current_io_type) {
-                       case IO_CMD_RESUME_DM_BY_SCAN:
-                               reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
-                               rtlpriv->cfg->ops->set_hw_reg(hw,
-                                                HW_VAR_RCR, (u8 *)(&reg_rcr));
-                               /* enable update TSF */
-                               _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4));
-                               break;
-                       case IO_CMD_PAUSE_DM_BY_SCAN:
-                               reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);
-                               rtlpriv->cfg->ops->set_hw_reg(hw,
-                                                HW_VAR_RCR, (u8 *)(&reg_rcr));
-                               /* disable update TSF */
-                               _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
-                               break;
-                       }
+                       reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+                       tmp = BIT(4);
                } else {
-                       reg_rcr |= (RCR_CBSSID);
-                       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
-                                                     (u8 *)(&reg_rcr));
-                       _rtl92cu_set_bcn_ctrl_reg(hw, 0, (BIT(4)|BIT(5)));
+                       reg_rcr |= RCR_CBSSID;
+                       tmp = BIT(4) | BIT(5);
                }
-       } else if (filterout_non_associated_bssid == false) {
+               rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+                                             (u8 *) (&reg_rcr));
+               _rtl92cu_set_bcn_ctrl_reg(hw, 0, tmp);
+       } else {
+               u8 tmp;
                if (IS_NORMAL_CHIP(rtlhal->version)) {
-                       reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
-                       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
-                                                     (u8 *)(&reg_rcr));
-                       _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
+                       reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+                       tmp = BIT(4);
                } else {
-                       reg_rcr &= (~RCR_CBSSID);
-                       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
-                                                     (u8 *)(&reg_rcr));
-                       _rtl92cu_set_bcn_ctrl_reg(hw, (BIT(4)|BIT(5)), 0);
+                       reg_rcr &= ~RCR_CBSSID;
+                       tmp = BIT(4) | BIT(5);
                }
+               reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
+               rtlpriv->cfg->ops->set_hw_reg(hw,
+                                             HW_VAR_RCR, (u8 *) (&reg_rcr));
+               _rtl92cu_set_bcn_ctrl_reg(hw, tmp, 0);
        }
 }
 
+/*========================================================================== */
+
 int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
 {
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
        if (_rtl92cu_set_media_status(hw, type))
                return -EOPNOTSUPP;
-       _rtl92cu_set_check_bssid(hw, type);
+
+       if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
+               if (type != NL80211_IFTYPE_AP)
+                       rtl92cu_set_check_bssid(hw, true);
+       } else {
+               rtl92cu_set_check_bssid(hw, false);
+       }
+
        return 0;
 }
 
@@ -2058,8 +2041,6 @@ void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
                               (shortgi_rate << 4) | (shortgi_rate);
        }
        rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
-       RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
-                rtl_read_dword(rtlpriv, REG_ARFR0));
 }
 
 void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
index 1956593ee89d17b13d24e13c725eef8dd59cf7e7..81e939e90c4c44f7ead9bad36eac03f98f998421 100644 (file)
@@ -881,17 +881,12 @@ static struct vio_driver hvcs_vio_driver = {
 /* Only called from hvcs_get_pi please */
 static void hvcs_set_pi(struct hvcs_partner_info *pi, struct hvcs_struct *hvcsd)
 {
-       int clclength;
-
        hvcsd->p_unit_address = pi->unit_address;
        hvcsd->p_partition_ID  = pi->partition_ID;
-       clclength = strlen(&pi->location_code[0]);
-       if (clclength > HVCS_CLC_LENGTH)
-               clclength = HVCS_CLC_LENGTH;
 
        /* copy the null-term char too */
-       strncpy(&hvcsd->p_location_code[0],
-                       &pi->location_code[0], clclength + 1);
+       strlcpy(&hvcsd->p_location_code[0],
+                       &pi->location_code[0], sizeof(hvcsd->p_location_code));
 }
 
 /*
index 29eb805ea4a6b2c98236bbd0f24cc00a45ec53df..c1d6555d2567468f86a6c1b6d745d927f1a25815 100644 (file)
 
 #ifdef CONFIG_PREEMPT_COUNT
 # define preemptible() (preempt_count() == 0 && !irqs_disabled())
-# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
 #else
 # define preemptible() 0
-# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
 #endif
 
 #if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS)
index ef9acd3c84506fcd3525166501387445181e92c0..01d25e6fc792472692f6e601de77a7b83fe69734 100644 (file)
@@ -854,6 +854,8 @@ type_pf_tresize(struct ip_set *set, bool retried)
 retry:
        ret = 0;
        htable_bits++;
+       pr_debug("attempt to resize set %s from %u to %u, t %p\n",
+                set->name, orig->htable_bits, htable_bits, orig);
        if (!htable_bits) {
                /* In case we have plenty of memory :-) */
                pr_warning("Cannot increase the hashsize of set %s further\n",
@@ -873,7 +875,7 @@ retry:
                        data = ahash_tdata(n, j);
                        m = hbucket(t, HKEY(data, h->initval, htable_bits));
                        ret = type_pf_elem_tadd(m, data, AHASH_MAX(h), 0,
-                                               type_pf_data_timeout(data));
+                                               ip_set_timeout_get(type_pf_data_timeout(data)));
                        if (ret < 0) {
                                read_unlock_bh(&set->lock);
                                ahash_destroy(t);
index c65dee059913c8d429a614bf3c9b16e088369216..13e929679550ab4967b72a73e544dbf531f74004 100644 (file)
@@ -24,6 +24,9 @@ struct smpboot_thread_data;
  *                     parked (cpu offline)
  * @unpark:            Optional unpark function, called when the thread is
  *                     unparked (cpu online)
+ * @pre_unpark:                Optional unpark function, called before the thread is
+ *                     unparked (cpu online). This is not guaranteed to be
+ *                     called on the target cpu of the thread. Careful!
  * @selfparking:       Thread is not parked by the park function.
  * @thread_comm:       The base name of the thread
  */
@@ -37,6 +40,7 @@ struct smp_hotplug_thread {
        void                            (*cleanup)(unsigned int cpu, bool online);
        void                            (*park)(unsigned int cpu);
        void                            (*unpark)(unsigned int cpu);
+       void                            (*pre_unpark)(unsigned int cpu);
        bool                            selfparking;
        const char                      *thread_comm;
 };
index 23f2e98d4b654dbd497def51219cbe0806c23cb8..cf0694d4ad60f62be703eaec51db142b9c5c4e29 100644 (file)
@@ -1045,6 +1045,10 @@ static inline bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
        if (sysctl_tcp_low_latency || !tp->ucopy.task)
                return false;
 
+       if (skb->len <= tcp_hdrlen(skb) &&
+           skb_queue_len(&tp->ucopy.prequeue) == 0)
+               return false;
+
        __skb_queue_tail(&tp->ucopy.prequeue, skb);
        tp->ucopy.memory += skb->truesize;
        if (tp->ucopy.memory > sk->sk_rcvbuf) {
index b9bde572782932f24d8a3a9f3a4f0670cb07b633..25d3d8b6e4e15795b21e63e3a0302027477cea58 100644 (file)
@@ -209,6 +209,8 @@ static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cp
 {
        struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
 
+       if (ht->pre_unpark)
+               ht->pre_unpark(cpu);
        kthread_unpark(tsk);
 }
 
index b4d252fd195b927afd402977b64efb3faa7ed96f..14d7758074aadf4d1c43947ecef675e8bb6c044e 100644 (file)
@@ -323,18 +323,10 @@ void irq_enter(void)
 
 static inline void invoke_softirq(void)
 {
-       if (!force_irqthreads) {
-#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
+       if (!force_irqthreads)
                __do_softirq();
-#else
-               do_softirq();
-#endif
-       } else {
-               __local_bh_disable((unsigned long)__builtin_return_address(0),
-                               SOFTIRQ_OFFSET);
+       else
                wakeup_softirqd();
-               __local_bh_enable(SOFTIRQ_OFFSET);
-       }
 }
 
 /*
@@ -342,9 +334,15 @@ static inline void invoke_softirq(void)
  */
 void irq_exit(void)
 {
+#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
+       local_irq_disable();
+#else
+       WARN_ON_ONCE(!irqs_disabled());
+#endif
+
        account_irq_exit_time(current);
        trace_hardirq_exit();
-       sub_preempt_count(IRQ_EXIT_OFFSET);
+       sub_preempt_count(HARDIRQ_OFFSET);
        if (!in_interrupt() && local_softirq_pending())
                invoke_softirq();
 
@@ -354,7 +352,6 @@ void irq_exit(void)
                tick_nohz_irq_exit();
 #endif
        rcu_irq_exit();
-       sched_preempt_enable_no_resched();
 }
 
 /*
index 95d178c62d5a8537c18fa2d6f1d947aed1a93448..c09f2955ae3055b42f1edde601ee1eb431bfc18a 100644 (file)
@@ -336,7 +336,7 @@ static struct smp_hotplug_thread cpu_stop_threads = {
        .create                 = cpu_stop_create,
        .setup                  = cpu_stop_unpark,
        .park                   = cpu_stop_park,
-       .unpark                 = cpu_stop_unpark,
+       .pre_unpark             = cpu_stop_unpark,
        .selfparking            = true,
 };
 
index 1ae1d9cb278d4047083214ba0336dd846f4822a1..21760f0089749217e03790e02b49998fc5a7958c 100644 (file)
@@ -118,7 +118,7 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
        return NULL;
 }
 
-void caif_flow_cb(struct sk_buff *skb)
+static void caif_flow_cb(struct sk_buff *skb)
 {
        struct caif_device_entry *caifd;
        void (*dtor)(struct sk_buff *skb) = NULL;
index 3ebc8cbc91fff419097799784dad4aa374ced101..ef8ebaa993cf3ca9c915f8d2a230a3d04af79c34 100644 (file)
@@ -81,8 +81,8 @@ static void cfusbl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
                layr->up->ctrlcmd(layr->up, ctrl, layr->id);
 }
 
-struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN],
-                                       u8 braddr[ETH_ALEN])
+static struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN],
+                                     u8 braddr[ETH_ALEN])
 {
        struct cfusbl *this = kmalloc(sizeof(struct cfusbl), GFP_ATOMIC);
 
index a06a7a58dd1181b4134adf7cd95d4b1b5512cda3..8f152f904f706c9456bec2157b61e592819f3ef4 100644 (file)
@@ -4103,7 +4103,7 @@ static void net_rx_action(struct softirq_action *h)
                 * Allow this to run for 2 jiffies since which will allow
                 * an average latency of 1.5/HZ.
                 */
-               if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
+               if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
                        goto softnet_break;
 
                local_irq_enable();
@@ -4780,7 +4780,7 @@ EXPORT_SYMBOL(dev_set_mac_address);
 /**
  *     dev_change_carrier - Change device carrier
  *     @dev: device
- *     @new_carries: new value
+ *     @new_carrier: new value
  *
  *     Change device carrier
  */
index 87abd3e2bd329d7ee3630cbc3ed4770d35e6370e..2bdf802e28e270c4717c06bc64469eab073f068d 100644 (file)
@@ -228,9 +228,11 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
                                        icmp_send(skb, ICMP_DEST_UNREACH,
                                                  ICMP_PROT_UNREACH, 0);
                                }
-                       } else
+                               kfree_skb(skb);
+                       } else {
                                IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS);
-                       kfree_skb(skb);
+                               consume_skb(skb);
+                       }
                }
        }
  out:
index f6289bf6f3325edc2c78541eca88e546e5ec97c9..310a3647c83d948949e8c76ef8e89b68def338dd 100644 (file)
@@ -423,7 +423,7 @@ int ip_options_compile(struct net *net,
                                        put_unaligned_be32(midtime, timeptr);
                                        opt->is_changed = 1;
                                }
-                       } else {
+                       } else if ((optptr[3]&0xF) != IPOPT_TS_PRESPEC) {
                                unsigned int overflow = optptr[3]>>4;
                                if (overflow == 15) {
                                        pp_ptr = optptr + 3;
index a759e19496d2f57f508db27f21ab73b2b320523a..0d9bdacce99f46a77982d89c86a7764fe9ef15f8 100644 (file)
@@ -5485,6 +5485,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
                                if (tcp_checksum_complete_user(sk, skb))
                                        goto csum_error;
 
+                               if ((int)skb->truesize > sk->sk_forward_alloc)
+                                       goto step5;
+
                                /* Predicted packet is in window by definition.
                                 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
                                 * Hence, check seq<=rcv_wup reduces to:
@@ -5496,9 +5499,6 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
 
                                tcp_rcv_rtt_measure_ts(sk, skb);
 
-                               if ((int)skb->truesize > sk->sk_forward_alloc)
-                                       goto step5;
-
                                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
 
                                /* Bulk data transfer: receiver */
index 5b10414e619e5020fa4285a1e20790bdf5a8e845..b1876e52091e1ea2568b5b1acf9411df5986ef84 100644 (file)
@@ -241,9 +241,11 @@ resubmit:
                                icmpv6_send(skb, ICMPV6_PARAMPROB,
                                            ICMPV6_UNK_NEXTHDR, nhoff);
                        }
-               } else
+                       kfree_skb(skb);
+               } else {
                        IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS);
-               kfree_skb(skb);
+                       consume_skb(skb);
+               }
        }
        rcu_read_unlock();
        return 0;
index 928266569689e7624cde4206f657df495a0bdd43..e5fe0041adfa389388060bffae7055db3508c407 100644 (file)
@@ -1915,7 +1915,8 @@ void rt6_purge_dflt_routers(struct net *net)
 restart:
        read_lock_bh(&table->tb6_lock);
        for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
-               if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) {
+               if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
+                   (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
                        dst_hold(&rt->dst);
                        read_unlock_bh(&table->tb6_lock);
                        ip6_del_rt(rt);
index 9a5fd3c3e530c5dc82c04ccf04abf0f93b0f3a28..362ba47968e41de822122f9c215e012ba566cd59 100644 (file)
@@ -280,7 +280,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
        struct tty_port *port = &self->port;
        DECLARE_WAITQUEUE(wait, current);
        int             retval;
-       int             do_clocal = 0, extra_count = 0;
+       int             do_clocal = 0;
        unsigned long   flags;
 
        IRDA_DEBUG(2, "%s()\n", __func__ );
@@ -289,8 +289,15 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
         * If non-blocking mode is set, or the port is not enabled,
         * then make the check up front and then exit.
         */
-       if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
-               /* nonblock mode is set or port is not enabled */
+       if (test_bit(TTY_IO_ERROR, &tty->flags)) {
+               port->flags |= ASYNC_NORMAL_ACTIVE;
+               return 0;
+       }
+
+       if (filp->f_flags & O_NONBLOCK) {
+               /* nonblock mode is set */
+               if (tty->termios.c_cflag & CBAUD)
+                       tty_port_raise_dtr_rts(port);
                port->flags |= ASYNC_NORMAL_ACTIVE;
                IRDA_DEBUG(1, "%s(), O_NONBLOCK requested!\n", __func__ );
                return 0;
@@ -315,18 +322,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
              __FILE__, __LINE__, tty->driver->name, port->count);
 
        spin_lock_irqsave(&port->lock, flags);
-       if (!tty_hung_up_p(filp)) {
-               extra_count = 1;
+       if (!tty_hung_up_p(filp))
                port->count--;
-       }
-       spin_unlock_irqrestore(&port->lock, flags);
        port->blocked_open++;
+       spin_unlock_irqrestore(&port->lock, flags);
 
        while (1) {
                if (tty->termios.c_cflag & CBAUD)
                        tty_port_raise_dtr_rts(port);
 
-               current->state = TASK_INTERRUPTIBLE;
+               set_current_state(TASK_INTERRUPTIBLE);
 
                if (tty_hung_up_p(filp) ||
                    !test_bit(ASYNCB_INITIALIZED, &port->flags)) {
@@ -361,13 +366,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
        __set_current_state(TASK_RUNNING);
        remove_wait_queue(&port->open_wait, &wait);
 
-       if (extra_count) {
-               /* ++ is not atomic, so this should be protected - Jean II */
-               spin_lock_irqsave(&port->lock, flags);
+       spin_lock_irqsave(&port->lock, flags);
+       if (!tty_hung_up_p(filp))
                port->count++;
-               spin_unlock_irqrestore(&port->lock, flags);
-       }
        port->blocked_open--;
+       spin_unlock_irqrestore(&port->lock, flags);
 
        IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
              __FILE__, __LINE__, tty->driver->name, port->count);
index e71e85ba2bf1c180b2038d62021283ddf7982b07..29340a9a6fb9937f9bc848ffda5c49c51cca47ee 100644 (file)
@@ -495,8 +495,11 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self,
 /*             case CS_ISO_8859_9: */
 /*             case CS_UNICODE: */
                default:
-                       IRDA_DEBUG(0, "%s(), charset %s, not supported\n",
-                                  __func__, ias_charset_types[charset]);
+                       IRDA_DEBUG(0, "%s(), charset [%d] %s, not supported\n",
+                                  __func__, charset,
+                                  charset < ARRAY_SIZE(ias_charset_types) ?
+                                       ias_charset_types[charset] :
+                                       "(unknown)");
 
                        /* Aborting, close connection! */
                        iriap_disconnect_request(self);
index 3f4e3afc191a524d57c29659324ddc0ac6cd9c1e..6a53371dba1f1b357f9cd4f34b7af166ed324eab 100644 (file)
@@ -355,6 +355,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
        l2tp_xmit_skb(session, skb, session->hdr_len);
 
        sock_put(ps->tunnel_sock);
+       sock_put(sk);
 
        return error;
 
index f82b2e606cfd53048d5e208855d0d18fb5539a74..1ba9dbc0e107cae96efdf58b4b18d7c4d38d9e6b 100644 (file)
@@ -1470,7 +1470,8 @@ ip_set_utest(struct sock *ctnl, struct sk_buff *skb,
        if (ret == -EAGAIN)
                ret = 1;
 
-       return ret < 0 ? ret : ret > 0 ? 0 : -IPSET_ERR_EXIST;
+       return (ret < 0 && ret != -ENOTEMPTY) ? ret :
+               ret > 0 ? 0 : -IPSET_ERR_EXIST;
 }
 
 /* Get headed data of a set */
index 3361170cb26224f75b35e06777e04d5dce3ca92f..bb67b98b979728e88624ea1bfb8349be190a183e 100644 (file)
@@ -68,7 +68,8 @@ static void nfc_llcp_socket_purge(struct nfc_llcp_sock *sock)
        }
 }
 
-static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
+static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen,
+                                   int err)
 {
        struct sock *sk;
        struct hlist_node *tmp;
@@ -100,7 +101,10 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
 
                                nfc_llcp_accept_unlink(accept_sk);
 
+                               if (err)
+                                       accept_sk->sk_err = err;
                                accept_sk->sk_state = LLCP_CLOSED;
+                               accept_sk->sk_state_change(sk);
 
                                bh_unlock_sock(accept_sk);
 
@@ -123,7 +127,10 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
                        continue;
                }
 
+               if (err)
+                       sk->sk_err = err;
                sk->sk_state = LLCP_CLOSED;
+               sk->sk_state_change(sk);
 
                bh_unlock_sock(sk);
 
@@ -133,6 +140,36 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
        }
 
        write_unlock(&local->sockets.lock);
+
+       /*
+        * If we want to keep the listening sockets alive,
+        * we don't touch the RAW ones.
+        */
+       if (listen == true)
+               return;
+
+       write_lock(&local->raw_sockets.lock);
+
+       sk_for_each_safe(sk, tmp, &local->raw_sockets.head) {
+               llcp_sock = nfc_llcp_sock(sk);
+
+               bh_lock_sock(sk);
+
+               nfc_llcp_socket_purge(llcp_sock);
+
+               if (err)
+                       sk->sk_err = err;
+               sk->sk_state = LLCP_CLOSED;
+               sk->sk_state_change(sk);
+
+               bh_unlock_sock(sk);
+
+               sock_orphan(sk);
+
+               sk_del_node_init(sk);
+       }
+
+       write_unlock(&local->raw_sockets.lock);
 }
 
 struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local)
@@ -142,14 +179,9 @@ struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local)
        return local;
 }
 
-static void local_release(struct kref *ref)
+static void local_cleanup(struct nfc_llcp_local *local, bool listen)
 {
-       struct nfc_llcp_local *local;
-
-       local = container_of(ref, struct nfc_llcp_local, ref);
-
-       list_del(&local->list);
-       nfc_llcp_socket_release(local, false);
+       nfc_llcp_socket_release(local, listen, ENXIO);
        del_timer_sync(&local->link_timer);
        skb_queue_purge(&local->tx_queue);
        cancel_work_sync(&local->tx_work);
@@ -159,6 +191,16 @@ static void local_release(struct kref *ref)
        del_timer_sync(&local->sdreq_timer);
        cancel_work_sync(&local->sdreq_timeout_work);
        nfc_llcp_free_sdp_tlv_list(&local->pending_sdreqs);
+}
+
+static void local_release(struct kref *ref)
+{
+       struct nfc_llcp_local *local;
+
+       local = container_of(ref, struct nfc_llcp_local, ref);
+
+       list_del(&local->list);
+       local_cleanup(local, false);
        kfree(local);
 }
 
@@ -1433,7 +1475,7 @@ void nfc_llcp_mac_is_down(struct nfc_dev *dev)
                return;
 
        /* Close and purge all existing sockets */
-       nfc_llcp_socket_release(local, true);
+       nfc_llcp_socket_release(local, true, 0);
 }
 
 void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
@@ -1519,6 +1561,8 @@ void nfc_llcp_unregister_device(struct nfc_dev *dev)
                return;
        }
 
+       local_cleanup(local, false);
+
        nfc_llcp_local_put(local);
 }
 
index 827d7d755d0951d880ad433fc524f0f401e4c882..f1b377e247fef0ee578c8a8a63139fd4ade88835 100644 (file)
@@ -396,6 +396,8 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *parent,
 
                        pr_debug("Returning sk state %d\n", sk->sk_state);
 
+                       sk_acceptq_removed(parent);
+
                        return sk;
                }
 
index f0a4658f3273e2e87bac1ae88058e1209ed3332a..aba232f9f3081968081edb0ed6ece382e954b653 100644 (file)
@@ -82,10 +82,7 @@ static void rds_message_purge(struct rds_message *rm)
 void rds_message_put(struct rds_message *rm)
 {
        rdsdebug("put rm %p ref %d\n", rm, atomic_read(&rm->m_refcount));
-       if (atomic_read(&rm->m_refcount) == 0) {
-printk(KERN_CRIT "danger refcount zero on %p\n", rm);
-WARN_ON(1);
-       }
+       WARN(!atomic_read(&rm->m_refcount), "danger refcount zero on %p\n", rm);
        if (atomic_dec_and_test(&rm->m_refcount)) {
                BUG_ON(!list_empty(&rm->m_sock_item));
                BUG_ON(!list_empty(&rm->m_conn_item));
@@ -197,6 +194,9 @@ struct rds_message *rds_message_alloc(unsigned int extra_len, gfp_t gfp)
 {
        struct rds_message *rm;
 
+       if (extra_len > KMALLOC_MAX_SIZE - sizeof(struct rds_message))
+               return NULL;
+
        rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp);
        if (!rm)
                goto out;
index e9a77f621c3dfc87d49ed534003a20ceb1d98f8d..d51852bba01c981c9f9834dad82cfbcfec904508 100644 (file)
@@ -298,6 +298,10 @@ static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
            new_num_classes == q->max_agg_classes - 1) /* agg no more full */
                hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs);
 
+       /* The next assignment may let
+        * agg->initial_budget > agg->budgetmax
+        * hold, we will take it into account in charge_actual_service().
+        */
        agg->budgetmax = new_num_classes * agg->lmax;
        new_agg_weight = agg->class_weight * new_num_classes;
        agg->inv_w = ONE_FP/new_agg_weight;
@@ -817,7 +821,7 @@ static void qfq_make_eligible(struct qfq_sched *q)
        unsigned long old_vslot = q->oldV >> q->min_slot_shift;
 
        if (vslot != old_vslot) {
-               unsigned long mask = (1UL << fls(vslot ^ old_vslot)) - 1;
+               unsigned long mask = (1ULL << fls(vslot ^ old_vslot)) - 1;
                qfq_move_groups(q, mask, IR, ER);
                qfq_move_groups(q, mask, IB, EB);
        }
@@ -988,12 +992,23 @@ static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg,
 /* Update F according to the actual service received by the aggregate. */
 static inline void charge_actual_service(struct qfq_aggregate *agg)
 {
-       /* compute the service received by the aggregate */
-       u32 service_received = agg->initial_budget - agg->budget;
+       /* Compute the service received by the aggregate, taking into
+        * account that, after decreasing the number of classes in
+        * agg, it may happen that
+        * agg->initial_budget - agg->budget > agg->bugdetmax
+        */
+       u32 service_received = min(agg->budgetmax,
+                                  agg->initial_budget - agg->budget);
 
        agg->F = agg->S + (u64)service_received * agg->inv_w;
 }
 
+static inline void qfq_update_agg_ts(struct qfq_sched *q,
+                                    struct qfq_aggregate *agg,
+                                    enum update_reason reason);
+
+static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg);
+
 static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
 {
        struct qfq_sched *q = qdisc_priv(sch);
@@ -1021,7 +1036,7 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
                in_serv_agg->initial_budget = in_serv_agg->budget =
                        in_serv_agg->budgetmax;
 
-               if (!list_empty(&in_serv_agg->active))
+               if (!list_empty(&in_serv_agg->active)) {
                        /*
                         * Still active: reschedule for
                         * service. Possible optimization: if no other
@@ -1032,8 +1047,9 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
                         * handle it, we would need to maintain an
                         * extra num_active_aggs field.
                        */
-                       qfq_activate_agg(q, in_serv_agg, requeue);
-               else if (sch->q.qlen == 0) { /* no aggregate to serve */
+                       qfq_update_agg_ts(q, in_serv_agg, requeue);
+                       qfq_schedule_agg(q, in_serv_agg);
+               } else if (sch->q.qlen == 0) { /* no aggregate to serve */
                        q->in_serv_agg = NULL;
                        return NULL;
                }
@@ -1052,7 +1068,15 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
        qdisc_bstats_update(sch, skb);
 
        agg_dequeue(in_serv_agg, cl, len);
-       in_serv_agg->budget -= len;
+       /* If lmax is lowered, through qfq_change_class, for a class
+        * owning pending packets with larger size than the new value
+        * of lmax, then the following condition may hold.
+        */
+       if (unlikely(in_serv_agg->budget < len))
+               in_serv_agg->budget = 0;
+       else
+               in_serv_agg->budget -= len;
+
        q->V += (u64)len * IWSUM;
        pr_debug("qfq dequeue: len %u F %lld now %lld\n",
                 len, (unsigned long long) in_serv_agg->F,
@@ -1217,17 +1241,11 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        cl->deficit = agg->lmax;
        list_add_tail(&cl->alist, &agg->active);
 
-       if (list_first_entry(&agg->active, struct qfq_class, alist) != cl)
-               return err; /* aggregate was not empty, nothing else to do */
+       if (list_first_entry(&agg->active, struct qfq_class, alist) != cl ||
+           q->in_serv_agg == agg)
+               return err; /* non-empty or in service, nothing else to do */
 
-       /* recharge budget */
-       agg->initial_budget = agg->budget = agg->budgetmax;
-
-       qfq_update_agg_ts(q, agg, enqueue);
-       if (q->in_serv_agg == NULL)
-               q->in_serv_agg = agg;
-       else if (agg != q->in_serv_agg)
-               qfq_schedule_agg(q, agg);
+       qfq_activate_agg(q, agg, enqueue);
 
        return err;
 }
@@ -1261,7 +1279,8 @@ static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
                /* group was surely ineligible, remove */
                __clear_bit(grp->index, &q->bitmaps[IR]);
                __clear_bit(grp->index, &q->bitmaps[IB]);
-       } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V))
+       } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V) &&
+                  q->in_serv_agg == NULL)
                q->V = roundedS;
 
        grp->S = roundedS;
@@ -1284,8 +1303,15 @@ skip_update:
 static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
                             enum update_reason reason)
 {
+       agg->initial_budget = agg->budget = agg->budgetmax; /* recharge budg. */
+
        qfq_update_agg_ts(q, agg, reason);
-       qfq_schedule_agg(q, agg);
+       if (q->in_serv_agg == NULL) { /* no aggr. in service or scheduled */
+               q->in_serv_agg = agg; /* start serving this aggregate */
+                /* update V: to be in service, agg must be eligible */
+               q->oldV = q->V = agg->S;
+       } else if (agg != q->in_serv_agg)
+               qfq_schedule_agg(q, agg);
 }
 
 static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp,
@@ -1357,8 +1383,6 @@ static void qfq_deactivate_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
                        __set_bit(grp->index, &q->bitmaps[s]);
                }
        }
-
-       qfq_update_eligible(q);
 }
 
 static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
index 2b3ef03c60984050d542834433bd9a1e0ef6645a..12ed45dbe75d6b779fd0dcf4d38f6126e23c03fb 100644 (file)
@@ -155,7 +155,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
 
        /* SCTP-AUTH extensions*/
        INIT_LIST_HEAD(&ep->endpoint_shared_keys);
-       null_key = sctp_auth_shkey_create(0, GFP_KERNEL);
+       null_key = sctp_auth_shkey_create(0, gfp);
        if (!null_key)
                goto nomem;
 
index c99458df3f3fd458eac2d5f5388b6fba6aed4e3a..b9070736b8d9a24383be81b1764d191e854d3e63 100644 (file)
@@ -5653,6 +5653,9 @@ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
        if (len < sizeof(sctp_assoc_t))
                return -EINVAL;
 
+       /* Allow the struct to grow and fill in as much as possible */
+       len = min_t(size_t, len, sizeof(sas));
+
        if (copy_from_user(&sas, optval, len))
                return -EFAULT;
 
@@ -5686,9 +5689,6 @@ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
        /* Mark beginning of a new observation period */
        asoc->stats.max_obs_rto = asoc->rto_min;
 
-       /* Allow the struct to grow and fill in as much as possible */
-       len = min_t(size_t, len, sizeof(sas));
-
        if (put_user(len, optlen))
                return -EFAULT;
 
index 442ad4ed6315fab99ec5b78d012e65a8236e2fae..825ea94415b39818f637b54c5abe112394edc322 100644 (file)
@@ -41,8 +41,6 @@
 #include <net/sctp/sctp.h>
 #include <net/sctp/sm.h>
 
-#define MAX_KMALLOC_SIZE       131072
-
 static struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in,
                                            __u16 out);
 
@@ -65,7 +63,7 @@ struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out,
        int size;
 
        size = sctp_ssnmap_size(in, out);
-       if (size <= MAX_KMALLOC_SIZE)
+       if (size <= KMALLOC_MAX_SIZE)
                retval = kmalloc(size, gfp);
        else
                retval = (struct sctp_ssnmap *)
@@ -82,7 +80,7 @@ struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out,
        return retval;
 
 fail_map:
-       if (size <= MAX_KMALLOC_SIZE)
+       if (size <= KMALLOC_MAX_SIZE)
                kfree(retval);
        else
                free_pages((unsigned long)retval, get_order(size));
@@ -124,7 +122,7 @@ void sctp_ssnmap_free(struct sctp_ssnmap *map)
                int size;
 
                size = sctp_ssnmap_size(map->in.len, map->out.len);
-               if (size <= MAX_KMALLOC_SIZE)
+               if (size <= KMALLOC_MAX_SIZE)
                        kfree(map);
                else
                        free_pages((unsigned long)map, get_order(size));
index 5f25e0c92c31e48460536cd2bb0fc29549172b93..396c45174e5b696d90c6940d7aa0ce511c70293c 100644 (file)
@@ -51,7 +51,7 @@
 static void sctp_tsnmap_update(struct sctp_tsnmap *map);
 static void sctp_tsnmap_find_gap_ack(unsigned long *map, __u16 off,
                                     __u16 len, __u16 *start, __u16 *end);
-static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 gap);
+static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size);
 
 /* Initialize a block of memory as a tsnmap.  */
 struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *map, __u16 len,
@@ -124,7 +124,7 @@ int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn,
 
        gap = tsn - map->base_tsn;
 
-       if (gap >= map->len && !sctp_tsnmap_grow(map, gap))
+       if (gap >= map->len && !sctp_tsnmap_grow(map, gap + 1))
                return -ENOMEM;
 
        if (!sctp_tsnmap_has_gap(map) && gap == 0) {
@@ -360,23 +360,24 @@ __u16 sctp_tsnmap_num_gabs(struct sctp_tsnmap *map,
        return ngaps;
 }
 
-static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 gap)
+static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size)
 {
        unsigned long *new;
        unsigned long inc;
        u16  len;
 
-       if (gap >= SCTP_TSN_MAP_SIZE)
+       if (size > SCTP_TSN_MAP_SIZE)
                return 0;
 
-       inc = ALIGN((gap - map->len),BITS_PER_LONG) + SCTP_TSN_MAP_INCREMENT;
+       inc = ALIGN((size - map->len), BITS_PER_LONG) + SCTP_TSN_MAP_INCREMENT;
        len = min_t(u16, map->len + inc, SCTP_TSN_MAP_SIZE);
 
        new = kzalloc(len>>3, GFP_ATOMIC);
        if (!new)
                return 0;
 
-       bitmap_copy(new, map->tsn_map, map->max_tsn_seen - map->base_tsn);
+       bitmap_copy(new, map->tsn_map,
+               map->max_tsn_seen - map->cumulative_tsn_ack_point);
        kfree(map->tsn_map);
        map->tsn_map = new;
        map->len = len;
index ada17464b65bf23a089c036fba47989a2a1d4b8b..0fd5b3d2df03158d17f0d356825209e571e419d7 100644 (file)
@@ -106,6 +106,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
 {
        struct sk_buff_head temp;
        struct sctp_ulpevent *event;
+       int event_eor = 0;
 
        /* Create an event from the incoming chunk. */
        event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
@@ -127,10 +128,12 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
        /* Send event to the ULP.  'event' is the sctp_ulpevent for
         * very first SKB on the 'temp' list.
         */
-       if (event)
+       if (event) {
+               event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
                sctp_ulpq_tail_event(ulpq, event);
+       }
 
-       return 0;
+       return event_eor;
 }
 
 /* Add a new event for propagation to the ULP.  */
@@ -540,14 +543,19 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
                ctsn = cevent->tsn;
 
                switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+               case SCTP_DATA_FIRST_FRAG:
+                       if (!first_frag)
+                               return NULL;
+                       goto done;
                case SCTP_DATA_MIDDLE_FRAG:
                        if (!first_frag) {
                                first_frag = pos;
                                next_tsn = ctsn + 1;
                                last_frag = pos;
-                       } else if (next_tsn == ctsn)
+                       } else if (next_tsn == ctsn) {
                                next_tsn++;
-                       else
+                               last_frag = pos;
+                       } else
                                goto done;
                        break;
                case SCTP_DATA_LAST_FRAG:
@@ -651,6 +659,14 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
                        } else
                                goto done;
                        break;
+
+               case SCTP_DATA_LAST_FRAG:
+                       if (!first_frag)
+                               return NULL;
+                       else
+                               goto done;
+                       break;
+
                default:
                        return NULL;
                }
@@ -962,20 +978,43 @@ static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
                struct sk_buff_head *list, __u16 needed)
 {
        __u16 freed = 0;
-       __u32 tsn;
-       struct sk_buff *skb;
+       __u32 tsn, last_tsn;
+       struct sk_buff *skb, *flist, *last;
        struct sctp_ulpevent *event;
        struct sctp_tsnmap *tsnmap;
 
        tsnmap = &ulpq->asoc->peer.tsn_map;
 
-       while ((skb = __skb_dequeue_tail(list)) != NULL) {
-               freed += skb_headlen(skb);
+       while ((skb = skb_peek_tail(list)) != NULL) {
                event = sctp_skb2event(skb);
                tsn = event->tsn;
 
+               /* Don't renege below the Cumulative TSN ACK Point. */
+               if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
+                       break;
+
+               /* Events in ordering queue may have multiple fragments
+                * corresponding to additional TSNs.  Sum the total
+                * freed space; find the last TSN.
+                */
+               freed += skb_headlen(skb);
+               flist = skb_shinfo(skb)->frag_list;
+               for (last = flist; flist; flist = flist->next) {
+                       last = flist;
+                       freed += skb_headlen(last);
+               }
+               if (last)
+                       last_tsn = sctp_skb2event(last)->tsn;
+               else
+                       last_tsn = tsn;
+
+               /* Unlink the event, then renege all applicable TSNs. */
+               __skb_unlink(skb, list);
                sctp_ulpevent_free(event);
-               sctp_tsnmap_renege(tsnmap, tsn);
+               while (TSN_lte(tsn, last_tsn)) {
+                       sctp_tsnmap_renege(tsnmap, tsn);
+                       tsn++;
+               }
                if (freed >= needed)
                        return freed;
        }
@@ -1002,16 +1041,28 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
        struct sctp_ulpevent *event;
        struct sctp_association *asoc;
        struct sctp_sock *sp;
+       __u32 ctsn;
+       struct sk_buff *skb;
 
        asoc = ulpq->asoc;
        sp = sctp_sk(asoc->base.sk);
 
        /* If the association is already in Partial Delivery mode
-        * we have noting to do.
+        * we have nothing to do.
         */
        if (ulpq->pd_mode)
                return;
 
+       /* Data must be at or below the Cumulative TSN ACK Point to
+        * start partial delivery.
+        */
+       skb = skb_peek(&asoc->ulpq.reasm);
+       if (skb != NULL) {
+               ctsn = sctp_skb2event(skb)->tsn;
+               if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
+                       return;
+       }
+
        /* If the user enabled fragment interleave socket option,
         * multiple associations can enter partial delivery.
         * Otherwise, we can only enter partial delivery if the
@@ -1054,12 +1105,16 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
        }
        /* If able to free enough room, accept this chunk. */
        if (chunk && (freed >= needed)) {
-               __u32 tsn;
-               tsn = ntohl(chunk->subh.data_hdr->tsn);
-               sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport);
-               sctp_ulpq_tail_data(ulpq, chunk, gfp);
-
-               sctp_ulpq_partial_delivery(ulpq, gfp);
+               int retval;
+               retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
+               /*
+                * Enter partial delivery if chunk has not been
+                * delivered; otherwise, drain the reassembly queue.
+                */
+               if (retval <= 0)
+                       sctp_ulpq_partial_delivery(ulpq, gfp);
+               else if (retval == 1)
+                       sctp_ulpq_reasm_drain(ulpq);
        }
 
        sk_mem_reclaim(asoc->base.sk);