Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 3 May 2015 20:22:32 +0000 (13:22 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 3 May 2015 20:22:32 +0000 (13:22 -0700)
Pull SCSI fixes from James Bottomley:
 "This is three logical fixes (as 5 patches).

  The 3ware class of drivers were causing an oops with multiqueue by
  tearing down the command mappings after completing the command (where
  the variables in the command used to tear down the mapping were
  no-longer valid).  There's also a fix for the qnap iscsi target which
  was choking on us sending it commands that were too long and a fix for
  the reworked aha1542 allocating GFP_KERNEL under a lock"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
  3w-9xxx: fix command completion race
  3w-xxxx: fix command completion race
  3w-sas: fix command completion race
  aha1542: Allocate memory before taking a lock
  SCSI: add 1024 max sectors black list flag

188 files changed:
Documentation/kernel-parameters.txt
Documentation/module-signing.txt
Documentation/networking/mpls-sysctl.txt
Documentation/networking/scaling.txt
Documentation/powerpc/transactional_memory.txt
MAINTAINERS
arch/arm64/Kconfig
arch/arm64/include/asm/barrier.h
arch/arm64/kernel/perf_event.c
arch/arm64/mm/dma-mapping.c
arch/powerpc/include/uapi/asm/tm.h
arch/powerpc/kernel/eeh.c
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/idle_power7.S
arch/powerpc/kvm/book3s_xics.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/powerpc/platforms/pseries/dlpar.c
arch/s390/Kconfig
arch/s390/crypto/crypt_s390.h
arch/s390/crypto/prng.c
arch/s390/include/asm/kexec.h
arch/s390/include/asm/mmu.h
arch/s390/include/asm/mmu_context.h
arch/s390/include/asm/pgalloc.h
arch/s390/include/asm/pgtable.h
arch/s390/mm/hugetlbpage.c
arch/s390/mm/pgtable.c
arch/tile/kernel/setup.c
arch/x86/include/asm/pvclock.h
arch/x86/kernel/pvclock.c
arch/x86/kvm/x86.c
arch/x86/vdso/vclock_gettime.c
drivers/acpi/sbs.c
drivers/block/rbd.c
drivers/cpuidle/cpuidle.c
drivers/dma/Kconfig
drivers/dma/dmaengine.c
drivers/dma/sh/usb-dmac.c
drivers/md/dm-ioctl.c
drivers/md/dm.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_procfs.c
drivers/net/bonding/bonding_priv.h [new file with mode: 0644]
drivers/net/can/Kconfig
drivers/net/can/usb/kvaser_usb.c
drivers/net/ethernet/8390/etherh.c
drivers/net/ethernet/altera/altera_msgdmahw.h
drivers/net/ethernet/altera/altera_tse_main.c
drivers/net/ethernet/amd/Kconfig
drivers/net/ethernet/arc/Kconfig
drivers/net/ethernet/atheros/atl1e/atl1e_hw.h
drivers/net/ethernet/broadcom/bcmsysport.h
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/ibm/ehea/ehea_main.c
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/marvell/pxa168_eth.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
drivers/net/ethernet/rocker/rocker.c
drivers/net/ethernet/ti/netcp_ethss.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/phy/mdio-gpio.c
drivers/net/phy/mdio-mux-gpio.c
drivers/net/ppp/ppp_mppe.c
drivers/net/vxlan.c
drivers/s390/char/con3215.c
drivers/sh/pm_runtime.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/serial/atmel_serial.c
drivers/tty/serial/of_serial.c
drivers/tty/serial/samsung.c
drivers/tty/serial/serial_core.c
drivers/tty/serial/uartlite.c
drivers/tty/serial/xilinx_uartps.c
drivers/tty/tty_ioctl.c
drivers/usb/chipidea/otg_fsm.c
drivers/usb/class/cdc-acm.c
drivers/usb/host/ehci-msm.c
drivers/usb/storage/uas-detect.h
drivers/usb/storage/uas.c
drivers/usb/storage/usb.c
fs/btrfs/delayed-inode.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/volumes.c
include/acpi/actypes.h
include/linux/kexec.h
include/linux/netdevice.h
include/linux/netfilter_bridge.h
include/linux/rhashtable.h
include/linux/rtnetlink.h
include/linux/sched.h
include/linux/skbuff.h
include/linux/tty.h
include/linux/usb_usual.h
include/net/bonding.h
include/net/inet_connection_sock.h
include/net/request_sock.h
include/sound/designware_i2s.h
include/sound/emu10k1.h
include/sound/soc-dapm.h
include/sound/soc.h
include/sound/spear_dma.h
include/uapi/linux/virtio_ring.h
kernel/Makefile
kernel/bpf/core.c
kernel/kexec.c
kernel/sched/core.c
kernel/sched/idle.c
lib/rhashtable.c
net/bridge/br_mdb.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/core/dev.c
net/core/rtnetlink.c
net/core/skbuff.c
net/dccp/ipv4.c
net/dccp/ipv6.c
net/dccp/minisocks.c
net/dsa/dsa.c
net/ipv4/inet_connection_sock.c
net/ipv4/ping.c
net/ipv4/route.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv6/ip6_gre.c
net/ipv6/tcp_ipv6.c
net/mpls/af_mpls.c
net/mpls/internal.h
net/netfilter/nf_tables_api.c
net/netfilter/nft_reject.c
net/netfilter/nft_reject_inet.c
net/netlink/af_netlink.c
net/sched/act_connmark.c
net/tipc/bearer.c
net/tipc/link.c
net/tipc/server.c
net/tipc/socket.c
net/unix/garbage.c
sound/pci/emu10k1/emu10k1.c
sound/pci/emu10k1/emu10k1_callback.c
sound/pci/emu10k1/emu10k1_main.c
sound/pci/emu10k1/emupcm.c
sound/pci/emu10k1/memory.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_generic.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/thinkpad_helper.c
sound/soc/codecs/rt5645.c
sound/soc/codecs/rt5677.c
sound/soc/codecs/tfa9879.c
sound/soc/fsl/fsl_ssi.c
sound/soc/intel/Makefile
sound/soc/intel/baytrail/sst-baytrail-ipc.c
sound/soc/intel/haswell/sst-haswell-ipc.c
sound/soc/qcom/lpass-cpu.c
sound/soc/samsung/s3c24xx-i2s.c
sound/soc/sh/rcar/dma.c
sound/synth/emux/emux_oss.c
sound/synth/emux/emux_seq.c
tools/testing/selftests/powerpc/pmu/Makefile
tools/testing/selftests/powerpc/tm/Makefile

index f6befa9855c128b193620cd83a0d654a9e5428df..61ab1628a057cc2c4d8b11d892d834f7e5f7773a 100644 (file)
@@ -3787,6 +3787,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                                        READ_CAPACITY_16 command);
                                f = NO_REPORT_OPCODES (don't use report opcodes
                                        command, uas only);
+                               g = MAX_SECTORS_240 (don't transfer more than
+                                       240 sectors at a time, uas only);
                                h = CAPACITY_HEURISTICS (decrease the
                                        reported device capacity by one
                                        sector if the number is odd);
index 09c2382ad0556b196a3b4bb941b9e86597e8ebc5..c72702ec1ded8b0b5d728e6cd0519a5862d2d3c7 100644 (file)
@@ -119,9 +119,9 @@ Most notably, in the x509.genkey file, the req_distinguished_name section
 should be altered from the default:
 
        [ req_distinguished_name ]
-       O = Magrathea
-       CN = Glacier signing key
-       emailAddress = slartibartfast@magrathea.h2g2
+       #O = Unspecified company
+       CN = Build time autogenerated kernel key
+       #emailAddress = unspecified.user@unspecified.company
 
 The generated RSA key size can also be set with:
 
index 639ddf0ece9b5fdd20bcc858c3067d4e20407163..9ed15f86c17c86ffa69fa3527e932b62f4b9ee20 100644 (file)
@@ -18,3 +18,12 @@ platform_labels - INTEGER
 
        Possible values: 0 - 1048575
        Default: 0
+
+conf/<interface>/input - BOOL
+       Control whether packets can be input on this interface.
+
+       If disabled, packets will be discarded without further
+       processing.
+
+       0 - disabled (default)
+       not 0 - enabled
index cbfac0949635c1d109930b051e6c4d96dc4c74d1..59f4db2a0c85c02df4f6cee3176ceb173333cac1 100644 (file)
@@ -282,7 +282,7 @@ following is true:
 
 - The current CPU's queue head counter >= the recorded tail counter
   value in rps_dev_flow[i]
-- The current CPU is unset (equal to RPS_NO_CPU)
+- The current CPU is unset (>= nr_cpu_ids)
 - The current CPU is offline
 
 After this check, the packet is sent to the (possibly updated) current
index ba0a2a4a54ba1ffcb484786381b91f5113a62ad6..ded69794a5c09da762db7c4c61cc3d23e619d0fa 100644 (file)
@@ -74,23 +74,22 @@ Causes of transaction aborts
 Syscalls
 ========
 
-Syscalls made from within an active transaction will not be performed and the
-transaction will be doomed by the kernel with the failure code TM_CAUSE_SYSCALL
-| TM_CAUSE_PERSISTENT.
+Performing syscalls from within transaction is not recommended, and can lead
+to unpredictable results.
 
-Syscalls made from within a suspended transaction are performed as normal and
-the transaction is not explicitly doomed by the kernel.  However, what the
-kernel does to perform the syscall may result in the transaction being doomed
-by the hardware.  The syscall is performed in suspended mode so any side
-effects will be persistent, independent of transaction success or failure.  No
-guarantees are provided by the kernel about which syscalls will affect
-transaction success.
+Syscalls do not by design abort transactions, but beware: The kernel code will
+not be running in transactional state.  The effect of syscalls will always
+remain visible, but depending on the call they may abort your transaction as a
+side-effect, read soon-to-be-aborted transactional data that should not remain
+invisible, etc.  If you constantly retry a transaction that constantly aborts
+itself by calling a syscall, you'll have a livelock & make no progress.
 
-Care must be taken when relying on syscalls to abort during active transactions
-if the calls are made via a library.  Libraries may cache values (which may
-give the appearance of success) or perform operations that cause transaction
-failure before entering the kernel (which may produce different failure codes).
-Examples are glibc's getpid() and lazy symbol resolution.
+Simple syscalls (e.g. sigprocmask()) "could" be OK.  Even things like write()
+from, say, printf() should be OK as long as the kernel does not access any
+memory that was accessed transactionally.
+
+Consider any syscalls that happen to work as debug-only -- not recommended for
+production use.  Best to queue them up till after the transaction is over.
 
 
 Signals
@@ -177,7 +176,8 @@ kernel aborted a transaction:
  TM_CAUSE_RESCHED       Thread was rescheduled.
  TM_CAUSE_TLBI          Software TLB invalid.
  TM_CAUSE_FAC_UNAV      FP/VEC/VSX unavailable trap.
- TM_CAUSE_SYSCALL       Syscall from active transaction.
+ TM_CAUSE_SYSCALL       Currently unused; future syscalls that must abort
+                        transactions for consistency will use this.
  TM_CAUSE_SIGNAL        Signal delivered.
  TM_CAUSE_MISC          Currently unused.
  TM_CAUSE_ALIGNMENT     Alignment fault.
index 2e5bbc0d68b26e188e2d7e8154cf80e47fabdda1..16227759dfa81db156234b93b494bca6673e1fab 100644 (file)
@@ -10523,7 +10523,6 @@ F:      include/linux/virtio_console.h
 F:     include/uapi/linux/virtio_console.h
 
 VIRTIO CORE, NET AND BLOCK DRIVERS
-M:     Rusty Russell <rusty@rustcorp.com.au>
 M:     "Michael S. Tsirkin" <mst@redhat.com>
 L:     virtualization@lists.linux-foundation.org
 S:     Maintained
index 4269dba63cf165d3590c16700896dc44726a898f..7796af4b1d6f65f6352ed40cd6dfcaf580cfd63d 100644 (file)
@@ -31,6 +31,7 @@ config ARM64
        select GENERIC_EARLY_IOREMAP
        select GENERIC_IRQ_PROBE
        select GENERIC_IRQ_SHOW
+       select GENERIC_IRQ_SHOW_LEVEL
        select GENERIC_PCI_IOMAP
        select GENERIC_SCHED_CLOCK
        select GENERIC_SMP_IDLE_THREAD
index a5abb0062d6e943d67ca4ff5479e887194482367..71f19c4dc0dee8c8a74bbdea252f73432e0b1aa5 100644 (file)
@@ -65,6 +65,14 @@ do {                                                                 \
 do {                                                                   \
        compiletime_assert_atomic_type(*p);                             \
        switch (sizeof(*p)) {                                           \
+       case 1:                                                         \
+               asm volatile ("stlrb %w1, %0"                           \
+                               : "=Q" (*p) : "r" (v) : "memory");      \
+               break;                                                  \
+       case 2:                                                         \
+               asm volatile ("stlrh %w1, %0"                           \
+                               : "=Q" (*p) : "r" (v) : "memory");      \
+               break;                                                  \
        case 4:                                                         \
                asm volatile ("stlr %w1, %0"                            \
                                : "=Q" (*p) : "r" (v) : "memory");      \
@@ -81,6 +89,14 @@ do {                                                                 \
        typeof(*p) ___p1;                                               \
        compiletime_assert_atomic_type(*p);                             \
        switch (sizeof(*p)) {                                           \
+       case 1:                                                         \
+               asm volatile ("ldarb %w0, %1"                           \
+                       : "=r" (___p1) : "Q" (*p) : "memory");          \
+               break;                                                  \
+       case 2:                                                         \
+               asm volatile ("ldarh %w0, %1"                           \
+                       : "=r" (___p1) : "Q" (*p) : "memory");          \
+               break;                                                  \
        case 4:                                                         \
                asm volatile ("ldar %w0, %1"                            \
                        : "=r" (___p1) : "Q" (*p) : "memory");          \
index 195991dadc3772c67a084396d28aa2a7c8b2019e..23f25acf43a9021ee9b4ad0f79e3b3e5d6ef49c4 100644 (file)
@@ -1310,7 +1310,7 @@ static const struct of_device_id armpmu_of_device_ids[] = {
 
 static int armpmu_device_probe(struct platform_device *pdev)
 {
-       int i, *irqs;
+       int i, irq, *irqs;
 
        if (!cpu_pmu)
                return -ENODEV;
@@ -1319,6 +1319,11 @@ static int armpmu_device_probe(struct platform_device *pdev)
        if (!irqs)
                return -ENOMEM;
 
+       /* Don't bother with PPIs; they're already affine */
+       irq = platform_get_irq(pdev, 0);
+       if (irq >= 0 && irq_is_percpu(irq))
+               return 0;
+
        for (i = 0; i < pdev->num_resources; ++i) {
                struct device_node *dn;
                int cpu;
@@ -1327,7 +1332,7 @@ static int armpmu_device_probe(struct platform_device *pdev)
                                      i);
                if (!dn) {
                        pr_warn("Failed to parse %s/interrupt-affinity[%d]\n",
-                               of_node_full_name(dn), i);
+                               of_node_full_name(pdev->dev.of_node), i);
                        break;
                }
 
index ef7d112f5ce0df9ca1c1a793301c2c1b3874e23f..b0bd4e5fd5cf9c2599ddbc509bccb233838638a4 100644 (file)
@@ -67,8 +67,7 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
 
                *ret_page = phys_to_page(phys);
                ptr = (void *)val;
-               if (flags & __GFP_ZERO)
-                       memset(ptr, 0, size);
+               memset(ptr, 0, size);
        }
 
        return ptr;
@@ -105,7 +104,6 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
                struct page *page;
                void *addr;
 
-               size = PAGE_ALIGN(size);
                page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
                                                        get_order(size));
                if (!page)
@@ -113,8 +111,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
 
                *dma_handle = phys_to_dma(dev, page_to_phys(page));
                addr = page_address(page);
-               if (flags & __GFP_ZERO)
-                       memset(addr, 0, size);
+               memset(addr, 0, size);
                return addr;
        } else {
                return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
@@ -195,6 +192,8 @@ static void __dma_free(struct device *dev, size_t size,
 {
        void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
 
+       size = PAGE_ALIGN(size);
+
        if (!is_device_dma_coherent(dev)) {
                if (__free_from_pool(vaddr, size))
                        return;
index 5047659815a54ffd69037b245eebd9f6c64a679d..5d836b7c1176242ae2c943c0bcc81cf6bcb031cf 100644 (file)
@@ -11,7 +11,7 @@
 #define TM_CAUSE_RESCHED       0xde
 #define TM_CAUSE_TLBI          0xdc
 #define TM_CAUSE_FAC_UNAV      0xda
-#define TM_CAUSE_SYSCALL       0xd8
+#define TM_CAUSE_SYSCALL       0xd8  /* future use */
 #define TM_CAUSE_MISC          0xd6  /* future use */
 #define TM_CAUSE_SIGNAL                0xd4
 #define TM_CAUSE_ALIGNMENT     0xd2
index 44b480e3a5afd1998ce7f92afac532c8926a7aba..9ee61d15653d6ec46546b93ea72601072176e4b7 100644 (file)
@@ -749,21 +749,24 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat
                eeh_unfreeze_pe(pe, false);
                eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
                eeh_pe_dev_traverse(pe, eeh_restore_dev_state, dev);
+               eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
                break;
        case pcie_hot_reset:
+               eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
                eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
                eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev);
                eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
                eeh_ops->reset(pe, EEH_RESET_HOT);
                break;
        case pcie_warm_reset:
+               eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
                eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
                eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev);
                eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
                eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL);
                break;
        default:
-               eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
+               eeh_pe_state_clear(pe, EEH_PE_ISOLATED | EEH_PE_CFG_BLOCKED);
                return -EINVAL;
        };
 
@@ -1058,6 +1061,9 @@ void eeh_add_device_early(struct pci_dn *pdn)
        if (!edev || !eeh_enabled())
                return;
 
+       if (!eeh_has_flag(EEH_PROBE_MODE_DEVTREE))
+               return;
+
        /* USB Bus children of PCI devices will not have BUID's */
        phb = edev->phb;
        if (NULL == phb ||
@@ -1112,6 +1118,9 @@ void eeh_add_device_late(struct pci_dev *dev)
                return;
        }
 
+       if (eeh_has_flag(EEH_PROBE_MODE_DEV))
+               eeh_ops->probe(pdn, NULL);
+
        /*
         * The EEH cache might not be removed correctly because of
         * unbalanced kref to the device during unplug time, which
index 8ca9434c40e6a705e1eccb8e8ddcca727bf68044..afbc20019c2efba2b81b7cd6298941753d9776b5 100644 (file)
@@ -34,7 +34,6 @@
 #include <asm/ftrace.h>
 #include <asm/hw_irq.h>
 #include <asm/context_tracking.h>
-#include <asm/tm.h>
 
 /*
  * System calls.
@@ -146,24 +145,6 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
        andi.   r11,r10,_TIF_SYSCALL_DOTRACE
        bne     syscall_dotrace
 .Lsyscall_dotrace_cont:
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-BEGIN_FTR_SECTION
-       b       1f
-END_FTR_SECTION_IFCLR(CPU_FTR_TM)
-       extrdi. r11, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
-       beq+    1f
-
-       /* Doom the transaction and don't perform the syscall: */
-       mfmsr   r11
-       li      r12, 1
-       rldimi  r11, r12, MSR_TM_LG, 63-MSR_TM_LG
-       mtmsrd  r11, 0
-       li      r11, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
-       TABORT(R11)
-
-       b       .Lsyscall_exit
-1:
-#endif
        cmpldi  0,r0,NR_syscalls
        bge-    syscall_enosys
 
index eeaa0d5f69d5e60271ccca1790aaf98559ca92fa..ccde8f084ce426ab382964ec5e2eba389317be44 100644 (file)
@@ -501,9 +501,11 @@ BEGIN_FTR_SECTION
        CHECK_HMI_INTERRUPT
 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
        ld      r1,PACAR1(r13)
+       ld      r6,_CCR(r1)
        ld      r4,_MSR(r1)
        ld      r5,_NIP(r1)
        addi    r1,r1,INT_FRAME_SIZE
+       mtcr    r6
        mtspr   SPRN_SRR1,r4
        mtspr   SPRN_SRR0,r5
        rfid
index 8f3e6cc54d95e1a5b170e8c149db536cbe042308..c6ca7db646735428fb14bde6af9ed1eb68b98351 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/err.h>
 #include <linux/gfp.h>
 #include <linux/anon_inodes.h>
+#include <linux/spinlock.h>
 
 #include <asm/uaccess.h>
 #include <asm/kvm_book3s.h>
@@ -20,7 +21,6 @@
 #include <asm/xics.h>
 #include <asm/debug.h>
 #include <asm/time.h>
-#include <asm/spinlock.h>
 
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
index 920c252d1f49329616eddb2cf3ba4f2ec1f202b3..f8bc950efcae39a63f213290b65d36fec1667b60 100644 (file)
@@ -2693,7 +2693,6 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
                hose->last_busno = 0xff;
        }
        hose->private_data = phb;
-       hose->controller_ops = pnv_pci_controller_ops;
        phb->hub_id = hub_id;
        phb->opal_id = phb_id;
        phb->type = ioda_type;
@@ -2812,6 +2811,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
        pnv_pci_controller_ops.enable_device_hook = pnv_pci_enable_device_hook;
        pnv_pci_controller_ops.window_alignment = pnv_pci_window_alignment;
        pnv_pci_controller_ops.reset_secondary_bus = pnv_pci_reset_secondary_bus;
+       hose->controller_ops = pnv_pci_controller_ops;
 
 #ifdef CONFIG_PCI_IOV
        ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources;
index b4b11096ea8b74c3695c46deff77c25d2409f8f4..019d34aaf054bc843d6a2d5ac531e9207e6c42ab 100644 (file)
@@ -412,6 +412,10 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
        if (rc)
                return -EINVAL;
 
+       rc = dlpar_acquire_drc(drc_index);
+       if (rc)
+               return -EINVAL;
+
        parent = of_find_node_by_path("/cpus");
        if (!parent)
                return -ENODEV;
@@ -422,12 +426,6 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
 
        of_node_put(parent);
 
-       rc = dlpar_acquire_drc(drc_index);
-       if (rc) {
-               dlpar_free_cc_nodes(dn);
-               return -EINVAL;
-       }
-
        rc = dlpar_attach_node(dn);
        if (rc) {
                dlpar_release_drc(drc_index);
index 8e58c614c37d7203e39f389ab33e31b8542328c2..b06dc383926846c73e85a6c635ff0fac0a3ddcab 100644 (file)
@@ -115,7 +115,7 @@ config S390
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_TRACEHOOK
        select HAVE_ARCH_TRANSPARENT_HUGEPAGE
-       select HAVE_BPF_JIT if PACK_STACK && HAVE_MARCH_Z9_109_FEATURES
+       select HAVE_BPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
        select HAVE_CMPXCHG_DOUBLE
        select HAVE_CMPXCHG_LOCAL
        select HAVE_DEBUG_KMEMLEAK
index ba3b2aefddf55fa6dfa5a75f947273e0196a65e0..d9c4c313fbc61d5bbf73c8a3c7f88eb74c9d6cee 100644 (file)
@@ -3,9 +3,10 @@
  *
  * Support for s390 cryptographic instructions.
  *
- *   Copyright IBM Corp. 2003, 2007
+ *   Copyright IBM Corp. 2003, 2015
  *   Author(s): Thomas Spatzier
  *             Jan Glauber (jan.glauber@de.ibm.com)
+ *             Harald Freudenberger (freude@de.ibm.com)
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
 #define CRYPT_S390_MSA 0x1
 #define CRYPT_S390_MSA3        0x2
 #define CRYPT_S390_MSA4        0x4
+#define CRYPT_S390_MSA5        0x8
 
 /* s390 cryptographic operations */
 enum crypt_s390_operations {
-       CRYPT_S390_KM   = 0x0100,
-       CRYPT_S390_KMC  = 0x0200,
-       CRYPT_S390_KIMD = 0x0300,
-       CRYPT_S390_KLMD = 0x0400,
-       CRYPT_S390_KMAC = 0x0500,
-       CRYPT_S390_KMCTR = 0x0600
+       CRYPT_S390_KM    = 0x0100,
+       CRYPT_S390_KMC   = 0x0200,
+       CRYPT_S390_KIMD  = 0x0300,
+       CRYPT_S390_KLMD  = 0x0400,
+       CRYPT_S390_KMAC  = 0x0500,
+       CRYPT_S390_KMCTR = 0x0600,
+       CRYPT_S390_PPNO  = 0x0700
 };
 
 /*
@@ -138,6 +141,16 @@ enum crypt_s390_kmac_func {
        KMAC_TDEA_192 = CRYPT_S390_KMAC | 3
 };
 
+/*
+ * function codes for PPNO (PERFORM PSEUDORANDOM NUMBER
+ * OPERATION) instruction
+ */
+enum crypt_s390_ppno_func {
+       PPNO_QUERY            = CRYPT_S390_PPNO | 0,
+       PPNO_SHA512_DRNG_GEN  = CRYPT_S390_PPNO | 3,
+       PPNO_SHA512_DRNG_SEED = CRYPT_S390_PPNO | 0x83
+};
+
 /**
  * crypt_s390_km:
  * @func: the function code passed to KM; see crypt_s390_km_func
@@ -162,11 +175,11 @@ static inline int crypt_s390_km(long func, void *param,
        int ret;
 
        asm volatile(
-               "0:     .insn   rre,0xb92e0000,%3,%1 \n" /* KM opcode */
-               "1:     brc     1,0b \n" /* handle partial completion */
+               "0:     .insn   rre,0xb92e0000,%3,%1\n" /* KM opcode */
+               "1:     brc     1,0b\n" /* handle partial completion */
                "       la      %0,0\n"
                "2:\n"
-               EX_TABLE(0b,2b) EX_TABLE(1b,2b)
+               EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
                : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
                : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
        if (ret < 0)
@@ -198,11 +211,11 @@ static inline int crypt_s390_kmc(long func, void *param,
        int ret;
 
        asm volatile(
-               "0:     .insn   rre,0xb92f0000,%3,%1 \n" /* KMC opcode */
-               "1:     brc     1,0b \n" /* handle partial completion */
+               "0:     .insn   rre,0xb92f0000,%3,%1\n" /* KMC opcode */
+               "1:     brc     1,0b\n" /* handle partial completion */
                "       la      %0,0\n"
                "2:\n"
-               EX_TABLE(0b,2b) EX_TABLE(1b,2b)
+               EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
                : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
                : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
        if (ret < 0)
@@ -233,11 +246,11 @@ static inline int crypt_s390_kimd(long func, void *param,
        int ret;
 
        asm volatile(
-               "0:     .insn   rre,0xb93e0000,%1,%1 \n" /* KIMD opcode */
-               "1:     brc     1,0b \n" /* handle partial completion */
+               "0:     .insn   rre,0xb93e0000,%1,%1\n" /* KIMD opcode */
+               "1:     brc     1,0b\n" /* handle partial completion */
                "       la      %0,0\n"
                "2:\n"
-               EX_TABLE(0b,2b) EX_TABLE(1b,2b)
+               EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
                : "=d" (ret), "+a" (__src), "+d" (__src_len)
                : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
        if (ret < 0)
@@ -267,11 +280,11 @@ static inline int crypt_s390_klmd(long func, void *param,
        int ret;
 
        asm volatile(
-               "0:     .insn   rre,0xb93f0000,%1,%1 \n" /* KLMD opcode */
-               "1:     brc     1,0b \n" /* handle partial completion */
+               "0:     .insn   rre,0xb93f0000,%1,%1\n" /* KLMD opcode */
+               "1:     brc     1,0b\n" /* handle partial completion */
                "       la      %0,0\n"
                "2:\n"
-               EX_TABLE(0b,2b) EX_TABLE(1b,2b)
+               EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
                : "=d" (ret), "+a" (__src), "+d" (__src_len)
                : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
        if (ret < 0)
@@ -302,11 +315,11 @@ static inline int crypt_s390_kmac(long func, void *param,
        int ret;
 
        asm volatile(
-               "0:     .insn   rre,0xb91e0000,%1,%1 \n" /* KLAC opcode */
-               "1:     brc     1,0b \n" /* handle partial completion */
+               "0:     .insn   rre,0xb91e0000,%1,%1\n" /* KLAC opcode */
+               "1:     brc     1,0b\n" /* handle partial completion */
                "       la      %0,0\n"
                "2:\n"
-               EX_TABLE(0b,2b) EX_TABLE(1b,2b)
+               EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
                : "=d" (ret), "+a" (__src), "+d" (__src_len)
                : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
        if (ret < 0)
@@ -340,11 +353,11 @@ static inline int crypt_s390_kmctr(long func, void *param, u8 *dest,
        int ret = -1;
 
        asm volatile(
-               "0:     .insn   rrf,0xb92d0000,%3,%1,%4,0 \n" /* KMCTR opcode */
-               "1:     brc     1,0b \n" /* handle partial completion */
+               "0:     .insn   rrf,0xb92d0000,%3,%1,%4,0\n" /* KMCTR opcode */
+               "1:     brc     1,0b\n" /* handle partial completion */
                "       la      %0,0\n"
                "2:\n"
-               EX_TABLE(0b,2b) EX_TABLE(1b,2b)
+               EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
                : "+d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest),
                  "+a" (__ctr)
                : "d" (__func), "a" (__param) : "cc", "memory");
@@ -353,6 +366,47 @@ static inline int crypt_s390_kmctr(long func, void *param, u8 *dest,
        return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
 }
 
+/**
+ * crypt_s390_ppno:
+ * @func: the function code passed to PPNO; see crypt_s390_ppno_func
+ * @param: address of parameter block; see POP for details on each func
+ * @dest: address of destination memory area
+ * @dest_len: size of destination memory area in bytes
+ * @seed: address of seed data
+ * @seed_len: size of seed data in bytes
+ *
+ * Executes the PPNO (PERFORM PSEUDORANDOM NUMBER OPERATION)
+ * operation of the CPU.
+ *
+ * Returns -1 for failure, 0 for the query func, number of random
+ * bytes stored in dest buffer for generate function
+ */
+static inline int crypt_s390_ppno(long func, void *param,
+                                 u8 *dest, long dest_len,
+                                 const u8 *seed, long seed_len)
+{
+       register long  __func     asm("0") = func & CRYPT_S390_FUNC_MASK;
+       register void *__param    asm("1") = param;    /* param block (240 bytes) */
+       register u8   *__dest     asm("2") = dest;     /* buf for recv random bytes */
+       register long  __dest_len asm("3") = dest_len; /* requested random bytes */
+       register const u8 *__seed asm("4") = seed;     /* buf with seed data */
+       register long  __seed_len asm("5") = seed_len; /* bytes in seed buf */
+       int ret = -1;
+
+       asm volatile (
+               "0:     .insn   rre,0xb93c0000,%1,%5\n" /* PPNO opcode */
+               "1:     brc     1,0b\n"   /* handle partial completion */
+               "       la      %0,0\n"
+               "2:\n"
+               EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
+               : "+d" (ret), "+a"(__dest), "+d"(__dest_len)
+               : "d"(__func), "a"(__param), "a"(__seed), "d"(__seed_len)
+               : "cc", "memory");
+       if (ret < 0)
+               return ret;
+       return (func & CRYPT_S390_FUNC_MASK) ? dest_len - __dest_len : 0;
+}
+
 /**
  * crypt_s390_func_available:
  * @func: the function code of the specific function; 0 if op in general
@@ -373,6 +427,9 @@ static inline int crypt_s390_func_available(int func,
                return 0;
        if (facility_mask & CRYPT_S390_MSA4 && !test_facility(77))
                return 0;
+       if (facility_mask & CRYPT_S390_MSA5 && !test_facility(57))
+               return 0;
+
        switch (func & CRYPT_S390_OP_MASK) {
        case CRYPT_S390_KM:
                ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0);
@@ -390,8 +447,12 @@ static inline int crypt_s390_func_available(int func,
                ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0);
                break;
        case CRYPT_S390_KMCTR:
-               ret = crypt_s390_kmctr(KMCTR_QUERY, &status, NULL, NULL, 0,
-                                      NULL);
+               ret = crypt_s390_kmctr(KMCTR_QUERY, &status,
+                                      NULL, NULL, 0, NULL);
+               break;
+       case CRYPT_S390_PPNO:
+               ret = crypt_s390_ppno(PPNO_QUERY, &status,
+                                     NULL, 0, NULL, 0);
                break;
        default:
                return 0;
@@ -419,15 +480,14 @@ static inline int crypt_s390_pcc(long func, void *param)
        int ret = -1;
 
        asm volatile(
-               "0:     .insn   rre,0xb92c0000,0,0 \n" /* PCC opcode */
-               "1:     brc     1,0b \n" /* handle partial completion */
+               "0:     .insn   rre,0xb92c0000,0,0\n" /* PCC opcode */
+               "1:     brc     1,0b\n" /* handle partial completion */
                "       la      %0,0\n"
                "2:\n"
-               EX_TABLE(0b,2b) EX_TABLE(1b,2b)
+               EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
                : "+d" (ret)
                : "d" (__func), "a" (__param) : "cc", "memory");
        return ret;
 }
 
-
 #endif /* _CRYPTO_ARCH_S390_CRYPT_S390_H */
index 94a35a4c1b486c02200f6be510ecafd8a59701c6..1f374b39a4ec9a933249b3924d393d118084112f 100644 (file)
 /*
- * Copyright IBM Corp. 2006, 2007
+ * Copyright IBM Corp. 2006, 2015
  * Author(s): Jan Glauber <jan.glauber@de.ibm.com>
+ *           Harald Freudenberger <freude@de.ibm.com>
  * Driver for the s390 pseudo random number generator
  */
+
+#define KMSG_COMPONENT "prng"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
 #include <linux/fs.h>
+#include <linux/fips.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/device.h>
 #include <linux/miscdevice.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
+#include <linux/mutex.h>
 #include <linux/random.h>
 #include <linux/slab.h>
 #include <asm/debug.h>
 #include <asm/uaccess.h>
+#include <asm/timex.h>
 
 #include "crypt_s390.h"
 
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jan Glauber <jan.glauber@de.ibm.com>");
+MODULE_AUTHOR("IBM Corporation");
 MODULE_DESCRIPTION("s390 PRNG interface");
 
-static int prng_chunk_size = 256;
-module_param(prng_chunk_size, int, S_IRUSR | S_IRGRP | S_IROTH);
+
+#define PRNG_MODE_AUTO   0
+#define PRNG_MODE_TDES   1
+#define PRNG_MODE_SHA512  2
+
+static unsigned int prng_mode = PRNG_MODE_AUTO;
+module_param_named(mode, prng_mode, int, 0);
+MODULE_PARM_DESC(prng_mode, "PRNG mode: 0 - auto, 1 - TDES, 2 - SHA512");
+
+
+#define PRNG_CHUNKSIZE_TDES_MIN   8
+#define PRNG_CHUNKSIZE_TDES_MAX   (64*1024)
+#define PRNG_CHUNKSIZE_SHA512_MIN 64
+#define PRNG_CHUNKSIZE_SHA512_MAX (64*1024)
+
+static unsigned int prng_chunk_size = 256;
+module_param_named(chunksize, prng_chunk_size, int, 0);
 MODULE_PARM_DESC(prng_chunk_size, "PRNG read chunk size in bytes");
 
-static int prng_entropy_limit = 4096;
-module_param(prng_entropy_limit, int, S_IRUSR | S_IRGRP | S_IROTH | S_IWUSR);
-MODULE_PARM_DESC(prng_entropy_limit,
-       "PRNG add entropy after that much bytes were produced");
+
+#define PRNG_RESEED_LIMIT_TDES          4096
+#define PRNG_RESEED_LIMIT_TDES_LOWER    4096
+#define PRNG_RESEED_LIMIT_SHA512       100000
+#define PRNG_RESEED_LIMIT_SHA512_LOWER 10000
+
+static unsigned int prng_reseed_limit;
+module_param_named(reseed_limit, prng_reseed_limit, int, 0);
+MODULE_PARM_DESC(prng_reseed_limit, "PRNG reseed limit");
+
 
 /*
  * Any one who considers arithmetical methods of producing random digits is,
  * of course, in a state of sin. -- John von Neumann
  */
 
-struct s390_prng_data {
-       unsigned long count; /* how many bytes were produced */
-       char *buf;
+static int prng_errorflag;
+
+#define PRNG_GEN_ENTROPY_FAILED  1
+#define PRNG_SELFTEST_FAILED    2
+#define PRNG_INSTANTIATE_FAILED  3
+#define PRNG_SEED_FAILED        4
+#define PRNG_RESEED_FAILED      5
+#define PRNG_GEN_FAILED                 6
+
+struct prng_ws_s {
+       u8  parm_block[32];
+       u32 reseed_counter;
+       u64 byte_counter;
 };
 
-static struct s390_prng_data *p;
+struct ppno_ws_s {
+       u32 res;
+       u32 reseed_counter;
+       u64 stream_bytes;
+       u8  V[112];
+       u8  C[112];
+};
 
-/* copied from libica, use a non-zero initial parameter block */
-static unsigned char parm_block[32] = {
-0x0F,0x2B,0x8E,0x63,0x8C,0x8E,0xD2,0x52,0x64,0xB7,0xA0,0x7B,0x75,0x28,0xB8,0xF4,
-0x75,0x5F,0xD2,0xA6,0x8D,0x97,0x11,0xFF,0x49,0xD8,0x23,0xF3,0x7E,0x21,0xEC,0xA0,
+struct prng_data_s {
+       struct mutex mutex;
+       union {
+               struct prng_ws_s prngws;
+               struct ppno_ws_s ppnows;
+       };
+       u8 *buf;
+       u32 rest;
+       u8 *prev;
 };
 
-static int prng_open(struct inode *inode, struct file *file)
+static struct prng_data_s *prng_data;
+
+/* initial parameter block for tdes mode, copied from libica */
+static const u8 initial_parm_block[32] __initconst = {
+       0x0F, 0x2B, 0x8E, 0x63, 0x8C, 0x8E, 0xD2, 0x52,
+       0x64, 0xB7, 0xA0, 0x7B, 0x75, 0x28, 0xB8, 0xF4,
+       0x75, 0x5F, 0xD2, 0xA6, 0x8D, 0x97, 0x11, 0xFF,
+       0x49, 0xD8, 0x23, 0xF3, 0x7E, 0x21, 0xEC, 0xA0 };
+
+
+/*** helper functions ***/
+
+static int generate_entropy(u8 *ebuf, size_t nbytes)
 {
-       return nonseekable_open(inode, file);
+       int n, ret = 0;
+       u8 *pg, *h, hash[32];
+
+       pg = (u8 *) __get_free_page(GFP_KERNEL);
+       if (!pg) {
+               prng_errorflag = PRNG_GEN_ENTROPY_FAILED;
+               return -ENOMEM;
+       }
+
+       while (nbytes) {
+               /* fill page with urandom bytes */
+               get_random_bytes(pg, PAGE_SIZE);
+               /* exor page with stckf values */
+               for (n = 0; n < sizeof(PAGE_SIZE/sizeof(u64)); n++) {
+                       u64 *p = ((u64 *)pg) + n;
+                       *p ^= get_tod_clock_fast();
+               }
+               n = (nbytes < sizeof(hash)) ? nbytes : sizeof(hash);
+               if (n < sizeof(hash))
+                       h = hash;
+               else
+                       h = ebuf;
+               /* generate sha256 from this page */
+               if (crypt_s390_kimd(KIMD_SHA_256, h,
+                                   pg, PAGE_SIZE) != PAGE_SIZE) {
+                       prng_errorflag = PRNG_GEN_ENTROPY_FAILED;
+                       ret = -EIO;
+                       goto out;
+               }
+               if (n < sizeof(hash))
+                       memcpy(ebuf, hash, n);
+               ret += n;
+               ebuf += n;
+               nbytes -= n;
+       }
+
+out:
+       free_page((unsigned long)pg);
+       return ret;
 }
 
-static void prng_add_entropy(void)
+
+/*** tdes functions ***/
+
+static void prng_tdes_add_entropy(void)
 {
        __u64 entropy[4];
        unsigned int i;
        int ret;
 
        for (i = 0; i < 16; i++) {
-               ret = crypt_s390_kmc(KMC_PRNG, parm_block, (char *)entropy,
-                                    (char *)entropy, sizeof(entropy));
+               ret = crypt_s390_kmc(KMC_PRNG, prng_data->prngws.parm_block,
+                                    (char *)entropy, (char *)entropy,
+                                    sizeof(entropy));
                BUG_ON(ret < 0 || ret != sizeof(entropy));
-               memcpy(parm_block, entropy, sizeof(entropy));
+               memcpy(prng_data->prngws.parm_block, entropy, sizeof(entropy));
        }
 }
 
-static void prng_seed(int nbytes)
+
+static void prng_tdes_seed(int nbytes)
 {
        char buf[16];
        int i = 0;
 
-       BUG_ON(nbytes > 16);
+       BUG_ON(nbytes > sizeof(buf));
+
        get_random_bytes(buf, nbytes);
 
        /* Add the entropy */
        while (nbytes >= 8) {
-               *((__u64 *)parm_block) ^= *((__u64 *)(buf+i));
-               prng_add_entropy();
+               *((__u64 *)prng_data->prngws.parm_block) ^= *((__u64 *)(buf+i));
+               prng_tdes_add_entropy();
                i += 8;
                nbytes -= 8;
        }
-       prng_add_entropy();
+       prng_tdes_add_entropy();
+       prng_data->prngws.reseed_counter = 0;
+}
+
+
+static int __init prng_tdes_instantiate(void)
+{
+       int datalen;
+
+       pr_debug("prng runs in TDES mode with "
+                "chunksize=%d and reseed_limit=%u\n",
+                prng_chunk_size, prng_reseed_limit);
+
+       /* memory allocation, prng_data struct init, mutex init */
+       datalen = sizeof(struct prng_data_s) + prng_chunk_size;
+       prng_data = kzalloc(datalen, GFP_KERNEL);
+       if (!prng_data) {
+               prng_errorflag = PRNG_INSTANTIATE_FAILED;
+               return -ENOMEM;
+       }
+       mutex_init(&prng_data->mutex);
+       prng_data->buf = ((u8 *)prng_data) + sizeof(struct prng_data_s);
+       memcpy(prng_data->prngws.parm_block, initial_parm_block, 32);
+
+       /* initialize the PRNG, add 128 bits of entropy */
+       prng_tdes_seed(16);
+
+       return 0;
 }
 
-static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes,
-                        loff_t *ppos)
+
+static void prng_tdes_deinstantiate(void)
+{
+       pr_debug("The prng module stopped "
+                "after running in triple DES mode\n");
+       kzfree(prng_data);
+}
+
+
+/*** sha512 functions ***/
+
+static int __init prng_sha512_selftest(void)
 {
-       int chunk, n;
+       /* NIST DRBG testvector for Hash Drbg, Sha-512, Count #0 */
+       static const u8 seed[] __initconst = {
+               0x6b, 0x50, 0xa7, 0xd8, 0xf8, 0xa5, 0x5d, 0x7a,
+               0x3d, 0xf8, 0xbb, 0x40, 0xbc, 0xc3, 0xb7, 0x22,
+               0xd8, 0x70, 0x8d, 0xe6, 0x7f, 0xda, 0x01, 0x0b,
+               0x03, 0xc4, 0xc8, 0x4d, 0x72, 0x09, 0x6f, 0x8c,
+               0x3e, 0xc6, 0x49, 0xcc, 0x62, 0x56, 0xd9, 0xfa,
+               0x31, 0xdb, 0x7a, 0x29, 0x04, 0xaa, 0xf0, 0x25 };
+       static const u8 V0[] __initconst = {
+               0x00, 0xad, 0xe3, 0x6f, 0x9a, 0x01, 0xc7, 0x76,
+               0x61, 0x34, 0x35, 0xf5, 0x4e, 0x24, 0x74, 0x22,
+               0x21, 0x9a, 0x29, 0x89, 0xc7, 0x93, 0x2e, 0x60,
+               0x1e, 0xe8, 0x14, 0x24, 0x8d, 0xd5, 0x03, 0xf1,
+               0x65, 0x5d, 0x08, 0x22, 0x72, 0xd5, 0xad, 0x95,
+               0xe1, 0x23, 0x1e, 0x8a, 0xa7, 0x13, 0xd9, 0x2b,
+               0x5e, 0xbc, 0xbb, 0x80, 0xab, 0x8d, 0xe5, 0x79,
+               0xab, 0x5b, 0x47, 0x4e, 0xdd, 0xee, 0x6b, 0x03,
+               0x8f, 0x0f, 0x5c, 0x5e, 0xa9, 0x1a, 0x83, 0xdd,
+               0xd3, 0x88, 0xb2, 0x75, 0x4b, 0xce, 0x83, 0x36,
+               0x57, 0x4b, 0xf1, 0x5c, 0xca, 0x7e, 0x09, 0xc0,
+               0xd3, 0x89, 0xc6, 0xe0, 0xda, 0xc4, 0x81, 0x7e,
+               0x5b, 0xf9, 0xe1, 0x01, 0xc1, 0x92, 0x05, 0xea,
+               0xf5, 0x2f, 0xc6, 0xc6, 0xc7, 0x8f, 0xbc, 0xf4 };
+       static const u8 C0[] __initconst = {
+               0x00, 0xf4, 0xa3, 0xe5, 0xa0, 0x72, 0x63, 0x95,
+               0xc6, 0x4f, 0x48, 0xd0, 0x8b, 0x5b, 0x5f, 0x8e,
+               0x6b, 0x96, 0x1f, 0x16, 0xed, 0xbc, 0x66, 0x94,
+               0x45, 0x31, 0xd7, 0x47, 0x73, 0x22, 0xa5, 0x86,
+               0xce, 0xc0, 0x4c, 0xac, 0x63, 0xb8, 0x39, 0x50,
+               0xbf, 0xe6, 0x59, 0x6c, 0x38, 0x58, 0x99, 0x1f,
+               0x27, 0xa7, 0x9d, 0x71, 0x2a, 0xb3, 0x7b, 0xf9,
+               0xfb, 0x17, 0x86, 0xaa, 0x99, 0x81, 0xaa, 0x43,
+               0xe4, 0x37, 0xd3, 0x1e, 0x6e, 0xe5, 0xe6, 0xee,
+               0xc2, 0xed, 0x95, 0x4f, 0x53, 0x0e, 0x46, 0x8a,
+               0xcc, 0x45, 0xa5, 0xdb, 0x69, 0x0d, 0x81, 0xc9,
+               0x32, 0x92, 0xbc, 0x8f, 0x33, 0xe6, 0xf6, 0x09,
+               0x7c, 0x8e, 0x05, 0x19, 0x0d, 0xf1, 0xb6, 0xcc,
+               0xf3, 0x02, 0x21, 0x90, 0x25, 0xec, 0xed, 0x0e };
+       static const u8 random[] __initconst = {
+               0x95, 0xb7, 0xf1, 0x7e, 0x98, 0x02, 0xd3, 0x57,
+               0x73, 0x92, 0xc6, 0xa9, 0xc0, 0x80, 0x83, 0xb6,
+               0x7d, 0xd1, 0x29, 0x22, 0x65, 0xb5, 0xf4, 0x2d,
+               0x23, 0x7f, 0x1c, 0x55, 0xbb, 0x9b, 0x10, 0xbf,
+               0xcf, 0xd8, 0x2c, 0x77, 0xa3, 0x78, 0xb8, 0x26,
+               0x6a, 0x00, 0x99, 0x14, 0x3b, 0x3c, 0x2d, 0x64,
+               0x61, 0x1e, 0xee, 0xb6, 0x9a, 0xcd, 0xc0, 0x55,
+               0x95, 0x7c, 0x13, 0x9e, 0x8b, 0x19, 0x0c, 0x7a,
+               0x06, 0x95, 0x5f, 0x2c, 0x79, 0x7c, 0x27, 0x78,
+               0xde, 0x94, 0x03, 0x96, 0xa5, 0x01, 0xf4, 0x0e,
+               0x91, 0x39, 0x6a, 0xcf, 0x8d, 0x7e, 0x45, 0xeb,
+               0xdb, 0xb5, 0x3b, 0xbf, 0x8c, 0x97, 0x52, 0x30,
+               0xd2, 0xf0, 0xff, 0x91, 0x06, 0xc7, 0x61, 0x19,
+               0xae, 0x49, 0x8e, 0x7f, 0xbc, 0x03, 0xd9, 0x0f,
+               0x8e, 0x4c, 0x51, 0x62, 0x7a, 0xed, 0x5c, 0x8d,
+               0x42, 0x63, 0xd5, 0xd2, 0xb9, 0x78, 0x87, 0x3a,
+               0x0d, 0xe5, 0x96, 0xee, 0x6d, 0xc7, 0xf7, 0xc2,
+               0x9e, 0x37, 0xee, 0xe8, 0xb3, 0x4c, 0x90, 0xdd,
+               0x1c, 0xf6, 0xa9, 0xdd, 0xb2, 0x2b, 0x4c, 0xbd,
+               0x08, 0x6b, 0x14, 0xb3, 0x5d, 0xe9, 0x3d, 0xa2,
+               0xd5, 0xcb, 0x18, 0x06, 0x69, 0x8c, 0xbd, 0x7b,
+               0xbb, 0x67, 0xbf, 0xe3, 0xd3, 0x1f, 0xd2, 0xd1,
+               0xdb, 0xd2, 0xa1, 0xe0, 0x58, 0xa3, 0xeb, 0x99,
+               0xd7, 0xe5, 0x1f, 0x1a, 0x93, 0x8e, 0xed, 0x5e,
+               0x1c, 0x1d, 0xe2, 0x3a, 0x6b, 0x43, 0x45, 0xd3,
+               0x19, 0x14, 0x09, 0xf9, 0x2f, 0x39, 0xb3, 0x67,
+               0x0d, 0x8d, 0xbf, 0xb6, 0x35, 0xd8, 0xe6, 0xa3,
+               0x69, 0x32, 0xd8, 0x10, 0x33, 0xd1, 0x44, 0x8d,
+               0x63, 0xb4, 0x03, 0xdd, 0xf8, 0x8e, 0x12, 0x1b,
+               0x6e, 0x81, 0x9a, 0xc3, 0x81, 0x22, 0x6c, 0x13,
+               0x21, 0xe4, 0xb0, 0x86, 0x44, 0xf6, 0x72, 0x7c,
+               0x36, 0x8c, 0x5a, 0x9f, 0x7a, 0x4b, 0x3e, 0xe2 };
+
        int ret = 0;
-       int tmp;
+       u8 buf[sizeof(random)];
+       struct ppno_ws_s ws;
+
+       memset(&ws, 0, sizeof(ws));
+
+       /* initial seed */
+       ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED,
+                             &ws, NULL, 0,
+                             seed, sizeof(seed));
+       if (ret < 0) {
+               pr_err("The prng self test seed operation for the "
+                      "SHA-512 mode failed with rc=%d\n", ret);
+               prng_errorflag = PRNG_SELFTEST_FAILED;
+               return -EIO;
+       }
+
+       /* check working states V and C */
+       if (memcmp(ws.V, V0, sizeof(V0)) != 0
+           || memcmp(ws.C, C0, sizeof(C0)) != 0) {
+               pr_err("The prng self test state test "
+                      "for the SHA-512 mode failed\n");
+               prng_errorflag = PRNG_SELFTEST_FAILED;
+               return -EIO;
+       }
+
+       /* generate random bytes */
+       ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
+                             &ws, buf, sizeof(buf),
+                             NULL, 0);
+       if (ret < 0) {
+               pr_err("The prng self test generate operation for "
+                      "the SHA-512 mode failed with rc=%d\n", ret);
+               prng_errorflag = PRNG_SELFTEST_FAILED;
+               return -EIO;
+       }
+       ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
+                             &ws, buf, sizeof(buf),
+                             NULL, 0);
+       if (ret < 0) {
+               pr_err("The prng self test generate operation for "
+                      "the SHA-512 mode failed with rc=%d\n", ret);
+               prng_errorflag = PRNG_SELFTEST_FAILED;
+               return -EIO;
+       }
+
+       /* check against expected data */
+       if (memcmp(buf, random, sizeof(random)) != 0) {
+               pr_err("The prng self test data test "
+                      "for the SHA-512 mode failed\n");
+               prng_errorflag = PRNG_SELFTEST_FAILED;
+               return -EIO;
+       }
+
+       return 0;
+}
+
+
+static int __init prng_sha512_instantiate(void)
+{
+       int ret, datalen;
+       u8 seed[64];
+
+       pr_debug("prng runs in SHA-512 mode "
+                "with chunksize=%d and reseed_limit=%u\n",
+                prng_chunk_size, prng_reseed_limit);
+
+       /* memory allocation, prng_data struct init, mutex init */
+       datalen = sizeof(struct prng_data_s) + prng_chunk_size;
+       if (fips_enabled)
+               datalen += prng_chunk_size;
+       prng_data = kzalloc(datalen, GFP_KERNEL);
+       if (!prng_data) {
+               prng_errorflag = PRNG_INSTANTIATE_FAILED;
+               return -ENOMEM;
+       }
+       mutex_init(&prng_data->mutex);
+       prng_data->buf = ((u8 *)prng_data) + sizeof(struct prng_data_s);
+
+       /* selftest */
+       ret = prng_sha512_selftest();
+       if (ret)
+               goto outfree;
+
+       /* generate initial seed bytestring, first 48 bytes of entropy */
+       ret = generate_entropy(seed, 48);
+       if (ret != 48)
+               goto outfree;
+       /* followed by 16 bytes of unique nonce */
+       get_tod_clock_ext(seed + 48);
+
+       /* initial seed of the ppno drng */
+       ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED,
+                             &prng_data->ppnows, NULL, 0,
+                             seed, sizeof(seed));
+       if (ret < 0) {
+               prng_errorflag = PRNG_SEED_FAILED;
+               ret = -EIO;
+               goto outfree;
+       }
+
+       /* if fips mode is enabled, generate a first block of random
+          bytes for the FIPS 140-2 Conditional Self Test */
+       if (fips_enabled) {
+               prng_data->prev = prng_data->buf + prng_chunk_size;
+               ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
+                                     &prng_data->ppnows,
+                                     prng_data->prev,
+                                     prng_chunk_size,
+                                     NULL, 0);
+               if (ret < 0 || ret != prng_chunk_size) {
+                       prng_errorflag = PRNG_GEN_FAILED;
+                       ret = -EIO;
+                       goto outfree;
+               }
+       }
+
+       return 0;
+
+outfree:
+       kfree(prng_data);
+       return ret;
+}
+
+
+static void prng_sha512_deinstantiate(void)
+{
+       pr_debug("The prng module stopped after running in SHA-512 mode\n");
+       kzfree(prng_data);
+}
+
+
+static int prng_sha512_reseed(void)
+{
+       int ret;
+       u8 seed[32];
+
+       /* generate 32 bytes of fresh entropy */
+       ret = generate_entropy(seed, sizeof(seed));
+       if (ret != sizeof(seed))
+               return ret;
+
+       /* do a reseed of the ppno drng with this bytestring */
+       ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED,
+                             &prng_data->ppnows, NULL, 0,
+                             seed, sizeof(seed));
+       if (ret) {
+               prng_errorflag = PRNG_RESEED_FAILED;
+               return -EIO;
+       }
+
+       return 0;
+}
+
+
+static int prng_sha512_generate(u8 *buf, size_t nbytes)
+{
+       int ret;
+
+       /* reseed needed ? */
+       if (prng_data->ppnows.reseed_counter > prng_reseed_limit) {
+               ret = prng_sha512_reseed();
+               if (ret)
+                       return ret;
+       }
+
+       /* PPNO generate */
+       ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
+                             &prng_data->ppnows, buf, nbytes,
+                             NULL, 0);
+       if (ret < 0 || ret != nbytes) {
+               prng_errorflag = PRNG_GEN_FAILED;
+               return -EIO;
+       }
+
+       /* FIPS 140-2 Conditional Self Test */
+       if (fips_enabled) {
+               if (!memcmp(prng_data->prev, buf, nbytes)) {
+                       prng_errorflag = PRNG_GEN_FAILED;
+                       return -EILSEQ;
+               }
+               memcpy(prng_data->prev, buf, nbytes);
+       }
+
+       return ret;
+}
+
+
+/*** file io functions ***/
+
+static int prng_open(struct inode *inode, struct file *file)
+{
+       return nonseekable_open(inode, file);
+}
+
+
+static ssize_t prng_tdes_read(struct file *file, char __user *ubuf,
+                             size_t nbytes, loff_t *ppos)
+{
+       int chunk, n, tmp, ret = 0;
+
+       /* lock prng_data struct */
+       if (mutex_lock_interruptible(&prng_data->mutex))
+               return -ERESTARTSYS;
 
-       /* nbytes can be arbitrary length, we split it into chunks */
        while (nbytes) {
-               /* same as in extract_entropy_user in random.c */
                if (need_resched()) {
                        if (signal_pending(current)) {
                                if (ret == 0)
                                        ret = -ERESTARTSYS;
                                break;
                        }
+                       /* give mutex free before calling schedule() */
+                       mutex_unlock(&prng_data->mutex);
                        schedule();
+                       /* occopy mutex again */
+                       if (mutex_lock_interruptible(&prng_data->mutex)) {
+                               if (ret == 0)
+                                       ret = -ERESTARTSYS;
+                               return ret;
+                       }
                }
 
                /*
@@ -112,12 +535,11 @@ static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes,
                /* PRNG only likes multiples of 8 bytes */
                n = (chunk + 7) & -8;
 
-               if (p->count > prng_entropy_limit)
-                       prng_seed(8);
+               if (prng_data->prngws.reseed_counter > prng_reseed_limit)
+                       prng_tdes_seed(8);
 
                /* if the CPU supports PRNG stckf is present too */
-               asm volatile(".insn     s,0xb27c0000,%0"
-                            : "=m" (*((unsigned long long *)p->buf)) : : "cc");
+               *((unsigned long long *)prng_data->buf) = get_tod_clock_fast();
 
                /*
                 * Beside the STCKF the input for the TDES-EDE is the output
@@ -132,35 +554,259 @@ static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes,
                 * Note: you can still get strict X9.17 conformity by setting
                 * prng_chunk_size to 8 bytes.
                */
-               tmp = crypt_s390_kmc(KMC_PRNG, parm_block, p->buf, p->buf, n);
-               BUG_ON((tmp < 0) || (tmp != n));
+               tmp = crypt_s390_kmc(KMC_PRNG, prng_data->prngws.parm_block,
+                                    prng_data->buf, prng_data->buf, n);
+               if (tmp < 0 || tmp != n) {
+                       ret = -EIO;
+                       break;
+               }
 
-               p->count += n;
+               prng_data->prngws.byte_counter += n;
+               prng_data->prngws.reseed_counter += n;
 
-               if (copy_to_user(ubuf, p->buf, chunk))
+               if (copy_to_user(ubuf, prng_data->buf, chunk))
                        return -EFAULT;
 
                nbytes -= chunk;
                ret += chunk;
                ubuf += chunk;
        }
+
+       /* unlock prng_data struct */
+       mutex_unlock(&prng_data->mutex);
+
        return ret;
 }
 
-static const struct file_operations prng_fops = {
+
+static ssize_t prng_sha512_read(struct file *file, char __user *ubuf,
+                               size_t nbytes, loff_t *ppos)
+{
+       int n, ret = 0;
+       u8 *p;
+
+       /* if errorflag is set do nothing and return 'broken pipe' */
+       if (prng_errorflag)
+               return -EPIPE;
+
+       /* lock prng_data struct */
+       if (mutex_lock_interruptible(&prng_data->mutex))
+               return -ERESTARTSYS;
+
+       while (nbytes) {
+               if (need_resched()) {
+                       if (signal_pending(current)) {
+                               if (ret == 0)
+                                       ret = -ERESTARTSYS;
+                               break;
+                       }
+                       /* give mutex free before calling schedule() */
+                       mutex_unlock(&prng_data->mutex);
+                       schedule();
+                       /* occopy mutex again */
+                       if (mutex_lock_interruptible(&prng_data->mutex)) {
+                               if (ret == 0)
+                                       ret = -ERESTARTSYS;
+                               return ret;
+                       }
+               }
+               if (prng_data->rest) {
+                       /* push left over random bytes from the previous read */
+                       p = prng_data->buf + prng_chunk_size - prng_data->rest;
+                       n = (nbytes < prng_data->rest) ?
+                               nbytes : prng_data->rest;
+                       prng_data->rest -= n;
+               } else {
+                       /* generate one chunk of random bytes into read buf */
+                       p = prng_data->buf;
+                       n = prng_sha512_generate(p, prng_chunk_size);
+                       if (n < 0) {
+                               ret = n;
+                               break;
+                       }
+                       if (nbytes < prng_chunk_size) {
+                               n = nbytes;
+                               prng_data->rest = prng_chunk_size - n;
+                       } else {
+                               n = prng_chunk_size;
+                               prng_data->rest = 0;
+                       }
+               }
+               if (copy_to_user(ubuf, p, n)) {
+                       ret = -EFAULT;
+                       break;
+               }
+               ubuf += n;
+               nbytes -= n;
+               ret += n;
+       }
+
+       /* unlock prng_data struct */
+       mutex_unlock(&prng_data->mutex);
+
+       return ret;
+}
+
+
+/*** sysfs stuff ***/
+
+static const struct file_operations prng_sha512_fops = {
+       .owner          = THIS_MODULE,
+       .open           = &prng_open,
+       .release        = NULL,
+       .read           = &prng_sha512_read,
+       .llseek         = noop_llseek,
+};
+static const struct file_operations prng_tdes_fops = {
        .owner          = THIS_MODULE,
        .open           = &prng_open,
        .release        = NULL,
-       .read           = &prng_read,
+       .read           = &prng_tdes_read,
        .llseek         = noop_llseek,
 };
 
-static struct miscdevice prng_dev = {
+static struct miscdevice prng_sha512_dev = {
+       .name   = "prandom",
+       .minor  = MISC_DYNAMIC_MINOR,
+       .fops   = &prng_sha512_fops,
+};
+static struct miscdevice prng_tdes_dev = {
        .name   = "prandom",
        .minor  = MISC_DYNAMIC_MINOR,
-       .fops   = &prng_fops,
+       .fops   = &prng_tdes_fops,
 };
 
+
+/* chunksize attribute (ro) */
+static ssize_t prng_chunksize_show(struct device *dev,
+                                  struct device_attribute *attr,
+                                  char *buf)
+{
+       return snprintf(buf, PAGE_SIZE, "%u\n", prng_chunk_size);
+}
+static DEVICE_ATTR(chunksize, 0444, prng_chunksize_show, NULL);
+
+/* counter attribute (ro) */
+static ssize_t prng_counter_show(struct device *dev,
+                                struct device_attribute *attr,
+                                char *buf)
+{
+       u64 counter;
+
+       if (mutex_lock_interruptible(&prng_data->mutex))
+               return -ERESTARTSYS;
+       if (prng_mode == PRNG_MODE_SHA512)
+               counter = prng_data->ppnows.stream_bytes;
+       else
+               counter = prng_data->prngws.byte_counter;
+       mutex_unlock(&prng_data->mutex);
+
+       return snprintf(buf, PAGE_SIZE, "%llu\n", counter);
+}
+static DEVICE_ATTR(byte_counter, 0444, prng_counter_show, NULL);
+
+/* errorflag attribute (ro) */
+static ssize_t prng_errorflag_show(struct device *dev,
+                                  struct device_attribute *attr,
+                                  char *buf)
+{
+       return snprintf(buf, PAGE_SIZE, "%d\n", prng_errorflag);
+}
+static DEVICE_ATTR(errorflag, 0444, prng_errorflag_show, NULL);
+
+/* mode attribute (ro) */
+static ssize_t prng_mode_show(struct device *dev,
+                             struct device_attribute *attr,
+                             char *buf)
+{
+       if (prng_mode == PRNG_MODE_TDES)
+               return snprintf(buf, PAGE_SIZE, "TDES\n");
+       else
+               return snprintf(buf, PAGE_SIZE, "SHA512\n");
+}
+static DEVICE_ATTR(mode, 0444, prng_mode_show, NULL);
+
+/* reseed attribute (w) */
+static ssize_t prng_reseed_store(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf, size_t count)
+{
+       if (mutex_lock_interruptible(&prng_data->mutex))
+               return -ERESTARTSYS;
+       prng_sha512_reseed();
+       mutex_unlock(&prng_data->mutex);
+
+       return count;
+}
+static DEVICE_ATTR(reseed, 0200, NULL, prng_reseed_store);
+
+/* reseed limit attribute (rw) */
+static ssize_t prng_reseed_limit_show(struct device *dev,
+                                     struct device_attribute *attr,
+                                     char *buf)
+{
+       return snprintf(buf, PAGE_SIZE, "%u\n", prng_reseed_limit);
+}
+static ssize_t prng_reseed_limit_store(struct device *dev,
+                                      struct device_attribute *attr,
+                                      const char *buf, size_t count)
+{
+       unsigned limit;
+
+       if (sscanf(buf, "%u\n", &limit) != 1)
+               return -EINVAL;
+
+       if (prng_mode == PRNG_MODE_SHA512) {
+               if (limit < PRNG_RESEED_LIMIT_SHA512_LOWER)
+                       return -EINVAL;
+       } else {
+               if (limit < PRNG_RESEED_LIMIT_TDES_LOWER)
+                       return -EINVAL;
+       }
+
+       prng_reseed_limit = limit;
+
+       return count;
+}
+static DEVICE_ATTR(reseed_limit, 0644,
+                  prng_reseed_limit_show, prng_reseed_limit_store);
+
+/* strength attribute (ro) */
+static ssize_t prng_strength_show(struct device *dev,
+                                 struct device_attribute *attr,
+                                 char *buf)
+{
+       return snprintf(buf, PAGE_SIZE, "256\n");
+}
+static DEVICE_ATTR(strength, 0444, prng_strength_show, NULL);
+
+static struct attribute *prng_sha512_dev_attrs[] = {
+       &dev_attr_errorflag.attr,
+       &dev_attr_chunksize.attr,
+       &dev_attr_byte_counter.attr,
+       &dev_attr_mode.attr,
+       &dev_attr_reseed.attr,
+       &dev_attr_reseed_limit.attr,
+       &dev_attr_strength.attr,
+       NULL
+};
+static struct attribute *prng_tdes_dev_attrs[] = {
+       &dev_attr_chunksize.attr,
+       &dev_attr_byte_counter.attr,
+       &dev_attr_mode.attr,
+       NULL
+};
+
+static struct attribute_group prng_sha512_dev_attr_group = {
+       .attrs = prng_sha512_dev_attrs
+};
+static struct attribute_group prng_tdes_dev_attr_group = {
+       .attrs = prng_tdes_dev_attrs
+};
+
+
+/*** module init and exit ***/
+
 static int __init prng_init(void)
 {
        int ret;
@@ -169,43 +815,105 @@ static int __init prng_init(void)
        if (!crypt_s390_func_available(KMC_PRNG, CRYPT_S390_MSA))
                return -EOPNOTSUPP;
 
-       if (prng_chunk_size < 8)
-               return -EINVAL;
+       /* choose prng mode */
+       if (prng_mode != PRNG_MODE_TDES) {
+               /* check for MSA5 support for PPNO operations */
+               if (!crypt_s390_func_available(PPNO_SHA512_DRNG_GEN,
+                                              CRYPT_S390_MSA5)) {
+                       if (prng_mode == PRNG_MODE_SHA512) {
+                               pr_err("The prng module cannot "
+                                      "start in SHA-512 mode\n");
+                               return -EOPNOTSUPP;
+                       }
+                       prng_mode = PRNG_MODE_TDES;
+               } else
+                       prng_mode = PRNG_MODE_SHA512;
+       }
 
-       p = kmalloc(sizeof(struct s390_prng_data), GFP_KERNEL);
-       if (!p)
-               return -ENOMEM;
-       p->count = 0;
+       if (prng_mode == PRNG_MODE_SHA512) {
 
-       p->buf = kmalloc(prng_chunk_size, GFP_KERNEL);
-       if (!p->buf) {
-               ret = -ENOMEM;
-               goto out_free;
-       }
+               /* SHA512 mode */
 
-       /* initialize the PRNG, add 128 bits of entropy */
-       prng_seed(16);
+               if (prng_chunk_size < PRNG_CHUNKSIZE_SHA512_MIN
+                   || prng_chunk_size > PRNG_CHUNKSIZE_SHA512_MAX)
+                       return -EINVAL;
+               prng_chunk_size = (prng_chunk_size + 0x3f) & ~0x3f;
 
-       ret = misc_register(&prng_dev);
-       if (ret)
-               goto out_buf;
-       return 0;
+               if (prng_reseed_limit == 0)
+                       prng_reseed_limit = PRNG_RESEED_LIMIT_SHA512;
+               else if (prng_reseed_limit < PRNG_RESEED_LIMIT_SHA512_LOWER)
+                       return -EINVAL;
+
+               ret = prng_sha512_instantiate();
+               if (ret)
+                       goto out;
+
+               ret = misc_register(&prng_sha512_dev);
+               if (ret) {
+                       prng_sha512_deinstantiate();
+                       goto out;
+               }
+               ret = sysfs_create_group(&prng_sha512_dev.this_device->kobj,
+                                        &prng_sha512_dev_attr_group);
+               if (ret) {
+                       misc_deregister(&prng_sha512_dev);
+                       prng_sha512_deinstantiate();
+                       goto out;
+               }
 
-out_buf:
-       kfree(p->buf);
-out_free:
-       kfree(p);
+       } else {
+
+               /* TDES mode */
+
+               if (prng_chunk_size < PRNG_CHUNKSIZE_TDES_MIN
+                   || prng_chunk_size > PRNG_CHUNKSIZE_TDES_MAX)
+                       return -EINVAL;
+               prng_chunk_size = (prng_chunk_size + 0x07) & ~0x07;
+
+               if (prng_reseed_limit == 0)
+                       prng_reseed_limit = PRNG_RESEED_LIMIT_TDES;
+               else if (prng_reseed_limit < PRNG_RESEED_LIMIT_TDES_LOWER)
+                       return -EINVAL;
+
+               ret = prng_tdes_instantiate();
+               if (ret)
+                       goto out;
+
+               ret = misc_register(&prng_tdes_dev);
+               if (ret) {
+                       prng_tdes_deinstantiate();
+                       goto out;
+               }
+               ret = sysfs_create_group(&prng_tdes_dev.this_device->kobj,
+                                        &prng_tdes_dev_attr_group);
+               if (ret) {
+                       misc_deregister(&prng_tdes_dev);
+                       prng_tdes_deinstantiate();
+                       goto out;
+               }
+
+       }
+
+out:
        return ret;
 }
 
+
 static void __exit prng_exit(void)
 {
-       /* wipe me */
-       kzfree(p->buf);
-       kfree(p);
-
-       misc_deregister(&prng_dev);
+       if (prng_mode == PRNG_MODE_SHA512) {
+               sysfs_remove_group(&prng_sha512_dev.this_device->kobj,
+                                  &prng_sha512_dev_attr_group);
+               misc_deregister(&prng_sha512_dev);
+               prng_sha512_deinstantiate();
+       } else {
+               sysfs_remove_group(&prng_tdes_dev.this_device->kobj,
+                                  &prng_tdes_dev_attr_group);
+               misc_deregister(&prng_tdes_dev);
+               prng_tdes_deinstantiate();
+       }
 }
 
+
 module_init(prng_init);
 module_exit(prng_exit);
index 694bcd6bd927bff60bf76ceede7a222877e02999..2f924bc30e358542201bdde082ab6d0289f3403c 100644 (file)
@@ -26,6 +26,9 @@
 /* Not more than 2GB */
 #define KEXEC_CONTROL_MEMORY_LIMIT (1UL<<31)
 
+/* Allocate control page with GFP_DMA */
+#define KEXEC_CONTROL_MEMORY_GFP GFP_DMA
+
 /* Maximum address we can use for the crash control pages */
 #define KEXEC_CRASH_CONTROL_MEMORY_LIMIT (-1UL)
 
index a5e656260a70183dd4f3768c3be082fed5988603..d29ad9545b4187a18c660e1ad62922b052447478 100644 (file)
@@ -14,7 +14,9 @@ typedef struct {
        unsigned long asce_bits;
        unsigned long asce_limit;
        unsigned long vdso_base;
-       /* The mmu context has extended page tables. */
+       /* The mmu context allocates 4K page tables. */
+       unsigned int alloc_pgste:1;
+       /* The mmu context uses extended page tables. */
        unsigned int has_pgste:1;
        /* The mmu context uses storage keys. */
        unsigned int use_skey:1;
index d25d9ff10ba8fe2e2d7e53fea8c2a73b16facf61..fb1b93ea3e3fead0efde9f30e819c55eecb88da9 100644 (file)
@@ -20,8 +20,11 @@ static inline int init_new_context(struct task_struct *tsk,
        mm->context.flush_mm = 0;
        mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
        mm->context.asce_bits |= _ASCE_TYPE_REGION3;
+#ifdef CONFIG_PGSTE
+       mm->context.alloc_pgste = page_table_allocate_pgste;
        mm->context.has_pgste = 0;
        mm->context.use_skey = 0;
+#endif
        mm->context.asce_limit = STACK_TOP_MAX;
        crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
        return 0;
index 51e7fb634ebc1e6bab2bfdac3655443096296ad3..7b7858f158b4574b549ab99648f977a3e249068c 100644 (file)
@@ -21,6 +21,7 @@ void crst_table_free(struct mm_struct *, unsigned long *);
 unsigned long *page_table_alloc(struct mm_struct *);
 void page_table_free(struct mm_struct *, unsigned long *);
 void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
+extern int page_table_allocate_pgste;
 
 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
                          unsigned long key, bool nq);
index 989cfae9e202cf1d5adfdd894af3324997aec65a..fc642399b489d896f2b4494cc57456ab7bd2ddb9 100644 (file)
 #define _ASM_S390_PGTABLE_H
 
 /*
- * The Linux memory management assumes a three-level page table setup. For
- * s390 31 bit we "fold" the mid level into the top-level page table, so
- * that we physically have the same two-level page table as the s390 mmu
- * expects in 31 bit mode. For s390 64 bit we use three of the five levels
- * the hardware provides (region first and region second tables are not
- * used).
+ * The Linux memory management assumes a three-level page table setup.
+ * For s390 64 bit we use up to four of the five levels the hardware
+ * provides (region first tables are not used).
  *
  * The "pgd_xxx()" functions are trivial for a folded two-level
  * setup: the pgd is never bad, and a pmd always exists (as it's folded
@@ -101,8 +98,8 @@ extern unsigned long zero_page_mask;
 
 #ifndef __ASSEMBLY__
 /*
- * The vmalloc and module area will always be on the topmost area of the kernel
- * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc and modules.
+ * The vmalloc and module area will always be on the topmost area of the
+ * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
  * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
  * modules will reside. That makes sure that inter module branches always
  * happen without trampolines and in addition the placement within a 2GB frame
@@ -131,38 +128,6 @@ static inline int is_module_addr(void *addr)
 }
 
 /*
- * A 31 bit pagetable entry of S390 has following format:
- *  |   PFRA          |    |  OS  |
- * 0                   0IP0
- * 00000000001111111111222222222233
- * 01234567890123456789012345678901
- *
- * I Page-Invalid Bit:    Page is not available for address-translation
- * P Page-Protection Bit: Store access not possible for page
- *
- * A 31 bit segmenttable entry of S390 has following format:
- *  |   P-table origin      |  |PTL
- * 0                         IC
- * 00000000001111111111222222222233
- * 01234567890123456789012345678901
- *
- * I Segment-Invalid Bit:    Segment is not available for address-translation
- * C Common-Segment Bit:     Segment is not private (PoP 3-30)
- * PTL Page-Table-Length:    Page-table length (PTL+1*16 entries -> up to 256)
- *
- * The 31 bit segmenttable origin of S390 has following format:
- *
- *  |S-table origin   |     | STL |
- * X                   **GPS
- * 00000000001111111111222222222233
- * 01234567890123456789012345678901
- *
- * X Space-Switch event:
- * G Segment-Invalid Bit:     *
- * P Private-Space Bit:       Segment is not private (PoP 3-30)
- * S Storage-Alteration:
- * STL Segment-Table-Length:  Segment-table length (STL+1*16 entries -> up to 2048)
- *
  * A 64 bit pagetable entry of S390 has following format:
  * |                    PFRA                         |0IPC|  OS  |
  * 0000000000111111111122222222223333333333444444444455555555556666
@@ -220,7 +185,6 @@ static inline int is_module_addr(void *addr)
 
 /* Software bits in the page table entry */
 #define _PAGE_PRESENT  0x001           /* SW pte present bit */
-#define _PAGE_TYPE     0x002           /* SW pte type bit */
 #define _PAGE_YOUNG    0x004           /* SW pte young bit */
 #define _PAGE_DIRTY    0x008           /* SW pte dirty bit */
 #define _PAGE_READ     0x010           /* SW pte read bit */
@@ -240,31 +204,34 @@ static inline int is_module_addr(void *addr)
  * table lock held.
  *
  * The following table gives the different possible bit combinations for
- * the pte hardware and software bits in the last 12 bits of a pte:
+ * the pte hardware and software bits in the last 12 bits of a pte
+ * (. unassigned bit, x don't care, t swap type):
  *
  *                             842100000000
  *                             000084210000
  *                             000000008421
- *                             .IR...wrdytp
- * empty                       .10...000000
- * swap                                .10...xxxx10
- * file                                .11...xxxxx0
- * prot-none, clean, old       .11...000001
- * prot-none, clean, young     .11...000101
- * prot-none, dirty, old       .10...001001
- * prot-none, dirty, young     .10...001101
- * read-only, clean, old       .11...010001
- * read-only, clean, young     .01...010101
- * read-only, dirty, old       .11...011001
- * read-only, dirty, young     .01...011101
- * read-write, clean, old      .11...110001
- * read-write, clean, young    .01...110101
- * read-write, dirty, old      .10...111001
- * read-write, dirty, young    .00...111101
+ *                             .IR.uswrdy.p
+ * empty                       .10.00000000
+ * swap                                .11..ttttt.0
+ * prot-none, clean, old       .11.xx0000.1
+ * prot-none, clean, young     .11.xx0001.1
+ * prot-none, dirty, old       .10.xx0010.1
+ * prot-none, dirty, young     .10.xx0011.1
+ * read-only, clean, old       .11.xx0100.1
+ * read-only, clean, young     .01.xx0101.1
+ * read-only, dirty, old       .11.xx0110.1
+ * read-only, dirty, young     .01.xx0111.1
+ * read-write, clean, old      .11.xx1100.1
+ * read-write, clean, young    .01.xx1101.1
+ * read-write, dirty, old      .10.xx1110.1
+ * read-write, dirty, young    .00.xx1111.1
+ * HW-bits: R read-only, I invalid
+ * SW-bits: p present, y young, d dirty, r read, w write, s special,
+ *         u unused, l large
  *
- * pte_present is true for the bit pattern .xx...xxxxx1, (pte & 0x001) == 0x001
- * pte_none    is true for the bit pattern .10...xxxx00, (pte & 0x603) == 0x400
- * pte_swap    is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402
+ * pte_none    is true for the bit pattern .10.00000000, pte == 0x400
+ * pte_swap    is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
+ * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
  */
 
 /* Bits in the segment/region table address-space-control-element */
@@ -335,6 +302,8 @@ static inline int is_module_addr(void *addr)
  * read-write, dirty, young    11..0...0...11
  * The segment table origin is used to distinguish empty (origin==0) from
  * read-write, old segment table entries (origin!=0)
+ * HW-bits: R read-only, I invalid
+ * SW-bits: y young, d dirty, r read, w write
  */
 
 #define _SEGMENT_ENTRY_SPLIT_BIT 11    /* THP splitting bit number */
@@ -423,6 +392,15 @@ static inline int mm_has_pgste(struct mm_struct *mm)
        return 0;
 }
 
+static inline int mm_alloc_pgste(struct mm_struct *mm)
+{
+#ifdef CONFIG_PGSTE
+       if (unlikely(mm->context.alloc_pgste))
+               return 1;
+#endif
+       return 0;
+}
+
 /*
  * In the case that a guest uses storage keys
  * faults should no longer be backed by zero pages
@@ -582,10 +560,9 @@ static inline int pte_none(pte_t pte)
 
 static inline int pte_swap(pte_t pte)
 {
-       /* Bit pattern: (pte & 0x603) == 0x402 */
-       return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT |
-                               _PAGE_TYPE | _PAGE_PRESENT))
-               == (_PAGE_INVALID | _PAGE_TYPE);
+       /* Bit pattern: (pte & 0x201) == 0x200 */
+       return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
+               == _PAGE_PROTECT;
 }
 
 static inline int pte_special(pte_t pte)
@@ -1586,51 +1563,51 @@ static inline int has_transparent_hugepage(void)
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
 /*
- * 31 bit swap entry format:
- * A page-table entry has some bits we have to treat in a special way.
- * Bits 0, 20 and bit 23 have to be zero, otherwise an specification
- * exception will occur instead of a page translation exception. The
- * specifiation exception has the bad habit not to store necessary
- * information in the lowcore.
- * Bits 21, 22, 30 and 31 are used to indicate the page type.
- * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
- * This leaves the bits 1-19 and bits 24-29 to store type and offset.
- * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
- * plus 24 for the offset.
- * 0|     offset        |0110|o|type |00|
- * 0 0000000001111111111 2222 2 22222 33
- * 0 1234567890123456789 0123 4 56789 01
- *
  * 64 bit swap entry format:
  * A page-table entry has some bits we have to treat in a special way.
  * Bits 52 and bit 55 have to be zero, otherwise an specification
  * exception will occur instead of a page translation exception. The
  * specifiation exception has the bad habit not to store necessary
  * information in the lowcore.
- * Bits 53, 54, 62 and 63 are used to indicate the page type.
- * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
- * This leaves the bits 0-51 and bits 56-61 to store type and offset.
- * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
- * plus 56 for the offset.
- * |                      offset                        |0110|o|type |00|
- *  0000000000111111111122222222223333333333444444444455 5555 5 55566 66
- *  0123456789012345678901234567890123456789012345678901 2345 6 78901 23
+ * Bits 54 and 63 are used to indicate the page type.
+ * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
+ * This leaves the bits 0-51 and bits 56-62 to store type and offset.
+ * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
+ * for the offset.
+ * |                     offset                        |01100|type |00|
+ * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
+ * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
  */
 
-#define __SWP_OFFSET_MASK (~0UL >> 11)
+#define __SWP_OFFSET_MASK      ((1UL << 52) - 1)
+#define __SWP_OFFSET_SHIFT     12
+#define __SWP_TYPE_MASK                ((1UL << 5) - 1)
+#define __SWP_TYPE_SHIFT       2
 
 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
 {
        pte_t pte;
-       offset &= __SWP_OFFSET_MASK;
-       pte_val(pte) = _PAGE_INVALID | _PAGE_TYPE | ((type & 0x1f) << 2) |
-               ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
+
+       pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
+       pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
+       pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
        return pte;
 }
 
-#define __swp_type(entry)      (((entry).val >> 2) & 0x1f)
-#define __swp_offset(entry)    (((entry).val >> 11) | (((entry).val >> 7) & 1))
-#define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
+static inline unsigned long __swp_type(swp_entry_t entry)
+{
+       return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
+}
+
+static inline unsigned long __swp_offset(swp_entry_t entry)
+{
+       return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
+}
+
+static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
+{
+       return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
+}
 
 #define __pte_to_swp_entry(pte)        ((swp_entry_t) { pte_val(pte) })
 #define __swp_entry_to_pte(x)  ((pte_t) { (x).val })
index 210ffede0153130d40ce222a15c559057b114e1e..e617e74b7be22aade65168f2c103c32b18b18653 100644 (file)
@@ -14,20 +14,23 @@ static inline pmd_t __pte_to_pmd(pte_t pte)
 
        /*
         * Convert encoding               pte bits         pmd bits
-        *                              .IR...wrdytp    dy..R...I...wr
-        * empty                        .10...000000 -> 00..0...1...00
-        * prot-none, clean, old        .11...000001 -> 00..1...1...00
-        * prot-none, clean, young      .11...000101 -> 01..1...1...00
-        * prot-none, dirty, old        .10...001001 -> 10..1...1...00
-        * prot-none, dirty, young      .10...001101 -> 11..1...1...00
-        * read-only, clean, old        .11...010001 -> 00..1...1...01
-        * read-only, clean, young      .01...010101 -> 01..1...0...01
-        * read-only, dirty, old        .11...011001 -> 10..1...1...01
-        * read-only, dirty, young      .01...011101 -> 11..1...0...01
-        * read-write, clean, old       .11...110001 -> 00..0...1...11
-        * read-write, clean, young     .01...110101 -> 01..0...0...11
-        * read-write, dirty, old       .10...111001 -> 10..0...1...11
-        * read-write, dirty, young     .00...111101 -> 11..0...0...11
+        *                              lIR.uswrdy.p    dy..R...I...wr
+        * empty                        010.000000.0 -> 00..0...1...00
+        * prot-none, clean, old        111.000000.1 -> 00..1...1...00
+        * prot-none, clean, young      111.000001.1 -> 01..1...1...00
+        * prot-none, dirty, old        111.000010.1 -> 10..1...1...00
+        * prot-none, dirty, young      111.000011.1 -> 11..1...1...00
+        * read-only, clean, old        111.000100.1 -> 00..1...1...01
+        * read-only, clean, young      101.000101.1 -> 01..1...0...01
+        * read-only, dirty, old        111.000110.1 -> 10..1...1...01
+        * read-only, dirty, young      101.000111.1 -> 11..1...0...01
+        * read-write, clean, old       111.001100.1 -> 00..1...1...11
+        * read-write, clean, young     101.001101.1 -> 01..1...0...11
+        * read-write, dirty, old       110.001110.1 -> 10..0...1...11
+        * read-write, dirty, young     100.001111.1 -> 11..0...0...11
+        * HW-bits: R read-only, I invalid
+        * SW-bits: p present, y young, d dirty, r read, w write, s special,
+        *          u unused, l large
         */
        if (pte_present(pte)) {
                pmd_val(pmd) = pte_val(pte) & PAGE_MASK;
@@ -48,20 +51,23 @@ static inline pte_t __pmd_to_pte(pmd_t pmd)
 
        /*
         * Convert encoding                pmd bits         pte bits
-        *                              dy..R...I...wr    .IR...wrdytp
-        * empty                        00..0...1...00 -> .10...001100
-        * prot-none, clean, old        00..0...1...00 -> .10...000001
-        * prot-none, clean, young      01..0...1...00 -> .10...000101
-        * prot-none, dirty, old        10..0...1...00 -> .10...001001
-        * prot-none, dirty, young      11..0...1...00 -> .10...001101
-        * read-only, clean, old        00..1...1...01 -> .11...010001
-        * read-only, clean, young      01..1...1...01 -> .11...010101
-        * read-only, dirty, old        10..1...1...01 -> .11...011001
-        * read-only, dirty, young      11..1...1...01 -> .11...011101
-        * read-write, clean, old       00..0...1...11 -> .10...110001
-        * read-write, clean, young     01..0...1...11 -> .10...110101
-        * read-write, dirty, old       10..0...1...11 -> .10...111001
-        * read-write, dirty, young     11..0...1...11 -> .10...111101
+        *                              dy..R...I...wr    lIR.uswrdy.p
+        * empty                        00..0...1...00 -> 010.000000.0
+        * prot-none, clean, old        00..1...1...00 -> 111.000000.1
+        * prot-none, clean, young      01..1...1...00 -> 111.000001.1
+        * prot-none, dirty, old        10..1...1...00 -> 111.000010.1
+        * prot-none, dirty, young      11..1...1...00 -> 111.000011.1
+        * read-only, clean, old        00..1...1...01 -> 111.000100.1
+        * read-only, clean, young      01..1...0...01 -> 101.000101.1
+        * read-only, dirty, old        10..1...1...01 -> 111.000110.1
+        * read-only, dirty, young      11..1...0...01 -> 101.000111.1
+        * read-write, clean, old       00..1...1...11 -> 111.001100.1
+        * read-write, clean, young     01..1...0...11 -> 101.001101.1
+        * read-write, dirty, old       10..0...1...11 -> 110.001110.1
+        * read-write, dirty, young     11..0...0...11 -> 100.001111.1
+        * HW-bits: R read-only, I invalid
+        * SW-bits: p present, y young, d dirty, r read, w write, s special,
+        *          u unused, l large
         */
        if (pmd_present(pmd)) {
                pte_val(pte) = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN_LARGE;
@@ -70,8 +76,8 @@ static inline pte_t __pmd_to_pte(pmd_t pmd)
                pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) << 4;
                pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) << 5;
                pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT);
-               pmd_val(pmd) |= (pte_val(pte) & _PAGE_DIRTY) << 10;
-               pmd_val(pmd) |= (pte_val(pte) & _PAGE_YOUNG) << 10;
+               pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) >> 10;
+               pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) >> 10;
        } else
                pte_val(pte) = _PAGE_INVALID;
        return pte;
index 33f5894591138ea75ec12b2e9d6cf64492b6ced3..b33f66110ca9401418f7d657b951d8bfb84d99c3 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/rcupdate.h>
 #include <linux/slab.h>
 #include <linux/swapops.h>
+#include <linux/sysctl.h>
 #include <linux/ksm.h>
 #include <linux/mman.h>
 
@@ -920,6 +921,40 @@ unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr)
 }
 EXPORT_SYMBOL(get_guest_storage_key);
 
+static int page_table_allocate_pgste_min = 0;
+static int page_table_allocate_pgste_max = 1;
+int page_table_allocate_pgste = 0;
+EXPORT_SYMBOL(page_table_allocate_pgste);
+
+static struct ctl_table page_table_sysctl[] = {
+       {
+               .procname       = "allocate_pgste",
+               .data           = &page_table_allocate_pgste,
+               .maxlen         = sizeof(int),
+               .mode           = S_IRUGO | S_IWUSR,
+               .proc_handler   = proc_dointvec,
+               .extra1         = &page_table_allocate_pgste_min,
+               .extra2         = &page_table_allocate_pgste_max,
+       },
+       { }
+};
+
+static struct ctl_table page_table_sysctl_dir[] = {
+       {
+               .procname       = "vm",
+               .maxlen         = 0,
+               .mode           = 0555,
+               .child          = page_table_sysctl,
+       },
+       { }
+};
+
+static int __init page_table_register_sysctl(void)
+{
+       return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
+}
+__initcall(page_table_register_sysctl);
+
 #else /* CONFIG_PGSTE */
 
 static inline int page_table_with_pgste(struct page *page)
@@ -963,7 +998,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
        struct page *uninitialized_var(page);
        unsigned int mask, bit;
 
-       if (mm_has_pgste(mm))
+       if (mm_alloc_pgste(mm))
                return page_table_alloc_pgste(mm);
        /* Allocate fragments of a 4K page as 1K/2K page table */
        spin_lock_bh(&mm->context.list_lock);
@@ -1165,116 +1200,25 @@ static inline void thp_split_mm(struct mm_struct *mm)
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
-static unsigned long page_table_realloc_pmd(struct mmu_gather *tlb,
-                               struct mm_struct *mm, pud_t *pud,
-                               unsigned long addr, unsigned long end)
-{
-       unsigned long next, *table, *new;
-       struct page *page;
-       spinlock_t *ptl;
-       pmd_t *pmd;
-
-       pmd = pmd_offset(pud, addr);
-       do {
-               next = pmd_addr_end(addr, end);
-again:
-               if (pmd_none_or_clear_bad(pmd))
-                       continue;
-               table = (unsigned long *) pmd_deref(*pmd);
-               page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
-               if (page_table_with_pgste(page))
-                       continue;
-               /* Allocate new page table with pgstes */
-               new = page_table_alloc_pgste(mm);
-               if (!new)
-                       return -ENOMEM;
-
-               ptl = pmd_lock(mm, pmd);
-               if (likely((unsigned long *) pmd_deref(*pmd) == table)) {
-                       /* Nuke pmd entry pointing to the "short" page table */
-                       pmdp_flush_lazy(mm, addr, pmd);
-                       pmd_clear(pmd);
-                       /* Copy ptes from old table to new table */
-                       memcpy(new, table, PAGE_SIZE/2);
-                       clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
-                       /* Establish new table */
-                       pmd_populate(mm, pmd, (pte_t *) new);
-                       /* Free old table with rcu, there might be a walker! */
-                       page_table_free_rcu(tlb, table, addr);
-                       new = NULL;
-               }
-               spin_unlock(ptl);
-               if (new) {
-                       page_table_free_pgste(new);
-                       goto again;
-               }
-       } while (pmd++, addr = next, addr != end);
-
-       return addr;
-}
-
-static unsigned long page_table_realloc_pud(struct mmu_gather *tlb,
-                                  struct mm_struct *mm, pgd_t *pgd,
-                                  unsigned long addr, unsigned long end)
-{
-       unsigned long next;
-       pud_t *pud;
-
-       pud = pud_offset(pgd, addr);
-       do {
-               next = pud_addr_end(addr, end);
-               if (pud_none_or_clear_bad(pud))
-                       continue;
-               next = page_table_realloc_pmd(tlb, mm, pud, addr, next);
-               if (unlikely(IS_ERR_VALUE(next)))
-                       return next;
-       } while (pud++, addr = next, addr != end);
-
-       return addr;
-}
-
-static unsigned long page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm,
-                                       unsigned long addr, unsigned long end)
-{
-       unsigned long next;
-       pgd_t *pgd;
-
-       pgd = pgd_offset(mm, addr);
-       do {
-               next = pgd_addr_end(addr, end);
-               if (pgd_none_or_clear_bad(pgd))
-                       continue;
-               next = page_table_realloc_pud(tlb, mm, pgd, addr, next);
-               if (unlikely(IS_ERR_VALUE(next)))
-                       return next;
-       } while (pgd++, addr = next, addr != end);
-
-       return 0;
-}
-
 /*
  * switch on pgstes for its userspace process (for kvm)
  */
 int s390_enable_sie(void)
 {
-       struct task_struct *tsk = current;
-       struct mm_struct *mm = tsk->mm;
-       struct mmu_gather tlb;
+       struct mm_struct *mm = current->mm;
 
        /* Do we have pgstes? if yes, we are done */
-       if (mm_has_pgste(tsk->mm))
+       if (mm_has_pgste(mm))
                return 0;
-
+       /* Fail if the page tables are 2K */
+       if (!mm_alloc_pgste(mm))
+               return -EINVAL;
        down_write(&mm->mmap_sem);
+       mm->context.has_pgste = 1;
        /* split thp mappings and disable thp for future mappings */
        thp_split_mm(mm);
-       /* Reallocate the page tables with pgstes */
-       tlb_gather_mmu(&tlb, mm, 0, TASK_SIZE);
-       if (!page_table_realloc(&tlb, mm, 0, TASK_SIZE))
-               mm->context.has_pgste = 1;
-       tlb_finish_mmu(&tlb, 0, TASK_SIZE);
        up_write(&mm->mmap_sem);
-       return mm->context.has_pgste ? 0 : -ENOMEM;
+       return 0;
 }
 EXPORT_SYMBOL_GPL(s390_enable_sie);
 
index 6873f006f7d04fb1e71f8e9be386854431acb513..d366675e4bf88ef10e18f53637abd314b69b3725 100644 (file)
@@ -774,7 +774,7 @@ static void __init zone_sizes_init(void)
                 * though, there'll be no lowmem, so we just alloc_bootmem
                 * the memmap.  There will be no percpu memory either.
                 */
-               if (i != 0 && cpumask_test_cpu(i, &isolnodes)) {
+               if (i != 0 && node_isset(i, isolnodes)) {
                        node_memmap_pfn[i] =
                                alloc_bootmem_pfn(0, memmap_size, 0);
                        BUG_ON(node_percpu[i] != 0);
index 25b1cc07d49668c8a40306bf2ec81e4e2a11988e..d6b078e9fa28a3f4588237cb9a122f5b5ce53162 100644 (file)
@@ -95,7 +95,6 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
 
 struct pvclock_vsyscall_time_info {
        struct pvclock_vcpu_time_info pvti;
-       u32 migrate_count;
 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
 
 #define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
index e5ecd20e72dd56d82447c94c17e6e85ae29eba90..2f355d229a587771680b28080d92fd06f345d7e7 100644 (file)
@@ -141,46 +141,7 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
        set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
 }
 
-static struct pvclock_vsyscall_time_info *pvclock_vdso_info;
-
-static struct pvclock_vsyscall_time_info *
-pvclock_get_vsyscall_user_time_info(int cpu)
-{
-       if (!pvclock_vdso_info) {
-               BUG();
-               return NULL;
-       }
-
-       return &pvclock_vdso_info[cpu];
-}
-
-struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu)
-{
-       return &pvclock_get_vsyscall_user_time_info(cpu)->pvti;
-}
-
 #ifdef CONFIG_X86_64
-static int pvclock_task_migrate(struct notifier_block *nb, unsigned long l,
-                               void *v)
-{
-       struct task_migration_notifier *mn = v;
-       struct pvclock_vsyscall_time_info *pvti;
-
-       pvti = pvclock_get_vsyscall_user_time_info(mn->from_cpu);
-
-       /* this is NULL when pvclock vsyscall is not initialized */
-       if (unlikely(pvti == NULL))
-               return NOTIFY_DONE;
-
-       pvti->migrate_count++;
-
-       return NOTIFY_DONE;
-}
-
-static struct notifier_block pvclock_migrate = {
-       .notifier_call = pvclock_task_migrate,
-};
-
 /*
  * Initialize the generic pvclock vsyscall state.  This will allocate
  * a/some page(s) for the per-vcpu pvclock information, set up a
@@ -194,17 +155,12 @@ int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i,
 
        WARN_ON (size != PVCLOCK_VSYSCALL_NR_PAGES*PAGE_SIZE);
 
-       pvclock_vdso_info = i;
-
        for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) {
                __set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx,
                             __pa(i) + (idx*PAGE_SIZE),
                             PAGE_KERNEL_VVAR);
        }
 
-
-       register_task_migration_notifier(&pvclock_migrate);
-
        return 0;
 }
 #endif
index ed31c31b2485b1e06476f93ab7934b216ed1cacc..c73efcd03e294a2e5bc2ef276dc7fa4a6e9d837f 100644 (file)
@@ -1669,12 +1669,28 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
                &guest_hv_clock, sizeof(guest_hv_clock))))
                return 0;
 
-       /*
-        * The interface expects us to write an even number signaling that the
-        * update is finished. Since the guest won't see the intermediate
-        * state, we just increase by 2 at the end.
+       /* This VCPU is paused, but it's legal for a guest to read another
+        * VCPU's kvmclock, so we really have to follow the specification where
+        * it says that version is odd if data is being modified, and even after
+        * it is consistent.
+        *
+        * Version field updates must be kept separate.  This is because
+        * kvm_write_guest_cached might use a "rep movs" instruction, and
+        * writes within a string instruction are weakly ordered.  So there
+        * are three writes overall.
+        *
+        * As a small optimization, only write the version field in the first
+        * and third write.  The vcpu->pv_time cache is still valid, because the
+        * version field is the first in the struct.
         */
-       vcpu->hv_clock.version = guest_hv_clock.version + 2;
+       BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
+
+       vcpu->hv_clock.version = guest_hv_clock.version + 1;
+       kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+                               &vcpu->hv_clock,
+                               sizeof(vcpu->hv_clock.version));
+
+       smp_wmb();
 
        /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
        pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
@@ -1695,6 +1711,13 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
        kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
                                &vcpu->hv_clock,
                                sizeof(vcpu->hv_clock));
+
+       smp_wmb();
+
+       vcpu->hv_clock.version++;
+       kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+                               &vcpu->hv_clock,
+                               sizeof(vcpu->hv_clock.version));
        return 0;
 }
 
index 40d2473836c923acc5705018bf3aebf50cfb12b8..9793322751e02f63ddba0d1b8fef5f21b0a4d502 100644 (file)
@@ -82,15 +82,18 @@ static notrace cycle_t vread_pvclock(int *mode)
        cycle_t ret;
        u64 last;
        u32 version;
-       u32 migrate_count;
        u8 flags;
        unsigned cpu, cpu1;
 
 
        /*
-        * When looping to get a consistent (time-info, tsc) pair, we
-        * also need to deal with the possibility we can switch vcpus,
-        * so make sure we always re-fetch time-info for the current vcpu.
+        * Note: hypervisor must guarantee that:
+        * 1. cpu ID number maps 1:1 to per-CPU pvclock time info.
+        * 2. that per-CPU pvclock time info is updated if the
+        *    underlying CPU changes.
+        * 3. that version is increased whenever underlying CPU
+        *    changes.
+        *
         */
        do {
                cpu = __getcpu() & VGETCPU_CPU_MASK;
@@ -99,27 +102,20 @@ static notrace cycle_t vread_pvclock(int *mode)
                 * __getcpu() calls (Gleb).
                 */
 
-               /* Make sure migrate_count will change if we leave the VCPU. */
-               do {
-                       pvti = get_pvti(cpu);
-                       migrate_count = pvti->migrate_count;
-
-                       cpu1 = cpu;
-                       cpu = __getcpu() & VGETCPU_CPU_MASK;
-               } while (unlikely(cpu != cpu1));
+               pvti = get_pvti(cpu);
 
                version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags);
 
                /*
                 * Test we're still on the cpu as well as the version.
-                * - We must read TSC of pvti's VCPU.
-                * - KVM doesn't follow the versioning protocol, so data could
-                *   change before version if we left the VCPU.
+                * We could have been migrated just after the first
+                * vgetcpu but before fetching the version, so we
+                * wouldn't notice a version change.
                 */
-               smp_rmb();
-       } while (unlikely((pvti->pvti.version & 1) ||
-                         pvti->pvti.version != version ||
-                         pvti->migrate_count != migrate_count));
+               cpu1 = __getcpu() & VGETCPU_CPU_MASK;
+       } while (unlikely(cpu != cpu1 ||
+                         (pvti->pvti.version & 1) ||
+                         pvti->pvti.version != version));
 
        if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT)))
                *mode = VCLOCK_NONE;
index cd827625cf079207f36a2e2f2b86e00ae649faa4..01504c819e8f6692382a2b8138fb72a40bb50780 100644 (file)
@@ -684,7 +684,7 @@ static int acpi_sbs_add(struct acpi_device *device)
        if (!sbs_manager_broken) {
                result = acpi_manager_get_info(sbs);
                if (!result) {
-                       sbs->manager_present = 0;
+                       sbs->manager_present = 1;
                        for (id = 0; id < MAX_SBS_BAT; ++id)
                                if ((sbs->batteries_supported & (1 << id)))
                                        acpi_battery_add(sbs, id);
index 812523330a78d438e1397b29803f309b8e360211..ec6c5c6e1ac94b2bcbe0619a7fe62b9e7d0ce4a5 100644 (file)
@@ -2264,6 +2264,11 @@ static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
                        result, xferred);
                if (!img_request->result)
                        img_request->result = result;
+               /*
+                * Need to end I/O on the entire obj_request worth of
+                * bytes in case of error.
+                */
+               xferred = obj_request->length;
        }
 
        /* Image object requests don't own their page array */
index 7a73a279e179a52b9ea209e00b3f61f797c4d4cb..61c417b9e53f8795175b29d71dfd201f15be151c 100644 (file)
@@ -158,9 +158,18 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
        int entered_state;
 
        struct cpuidle_state *target_state = &drv->states[index];
+       bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
        ktime_t time_start, time_end;
        s64 diff;
 
+       /*
+        * Tell the time framework to switch to a broadcast timer because our
+        * local timer will be shut down.  If a local timer is used from another
+        * CPU as a broadcast timer, this call may fail if it is not available.
+        */
+       if (broadcast && tick_broadcast_enter())
+               return -EBUSY;
+
        trace_cpu_idle_rcuidle(index, dev->cpu);
        time_start = ktime_get();
 
@@ -169,6 +178,13 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
        time_end = ktime_get();
        trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
 
+       if (broadcast) {
+               if (WARN_ON_ONCE(!irqs_disabled()))
+                       local_irq_disable();
+
+               tick_broadcast_exit();
+       }
+
        if (!cpuidle_state_is_coupled(dev, drv, entered_state))
                local_irq_enable();
 
index fd7ac13f2574ab0aab2ecf0c5f6b38019459da99..bda2cb06dc7a450c6e58e0145edba08acbc6f376 100644 (file)
@@ -437,6 +437,7 @@ config IMG_MDC_DMA
 
 config XGENE_DMA
        tristate "APM X-Gene DMA support"
+       depends on ARCH_XGENE || COMPILE_TEST
        select DMA_ENGINE
        select DMA_ENGINE_RAID
        select ASYNC_TX_ENABLE_CHANNEL_SWITCH
index 0e035a8cf40146ccf191d3e26418dc04ed7da0b8..2890d744bb1bb902cc095fb87841c492cef7542c 100644 (file)
@@ -571,11 +571,15 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
 
        chan = private_candidate(&mask, device, NULL, NULL);
        if (chan) {
+               dma_cap_set(DMA_PRIVATE, device->cap_mask);
+               device->privatecnt++;
                err = dma_chan_get(chan);
                if (err) {
                        pr_debug("%s: failed to get %s: (%d)\n",
                                __func__, dma_chan_name(chan), err);
                        chan = NULL;
+                       if (--device->privatecnt == 0)
+                               dma_cap_clear(DMA_PRIVATE, device->cap_mask);
                }
        }
 
index f705798ce3eb1129151a2d5472067b845f947781..ebd8a5f398b08ee2bc883ccb06a7752657eb4877 100644 (file)
@@ -673,6 +673,7 @@ static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec,
  * Power management
  */
 
+#ifdef CONFIG_PM
 static int usb_dmac_runtime_suspend(struct device *dev)
 {
        struct usb_dmac *dmac = dev_get_drvdata(dev);
@@ -690,6 +691,7 @@ static int usb_dmac_runtime_resume(struct device *dev)
 
        return usb_dmac_init(dmac);
 }
+#endif /* CONFIG_PM */
 
 static const struct dev_pm_ops usb_dmac_pm = {
        SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume,
index c8a18e4ee9dce262bb2c5bb87392018176b6ccfc..720ceeb7fa9b29118bea02dcb59e82a9f2638ee6 100644 (file)
@@ -1298,21 +1298,22 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
                goto err_unlock_md_type;
        }
 
-       if (dm_get_md_type(md) == DM_TYPE_NONE)
+       if (dm_get_md_type(md) == DM_TYPE_NONE) {
                /* Initial table load: acquire type of table. */
                dm_set_md_type(md, dm_table_get_type(t));
-       else if (dm_get_md_type(md) != dm_table_get_type(t)) {
+
+               /* setup md->queue to reflect md's type (may block) */
+               r = dm_setup_md_queue(md);
+               if (r) {
+                       DMWARN("unable to set up device queue for new table.");
+                       goto err_unlock_md_type;
+               }
+       } else if (dm_get_md_type(md) != dm_table_get_type(t)) {
                DMWARN("can't change device type after initial table load.");
                r = -EINVAL;
                goto err_unlock_md_type;
        }
 
-       /* setup md->queue to reflect md's type (may block) */
-       r = dm_setup_md_queue(md);
-       if (r) {
-               DMWARN("unable to set up device queue for new table.");
-               goto err_unlock_md_type;
-       }
        dm_unlock_md_type(md);
 
        /* stage inactive table */
index f8c7ca3e8947378484a6d3f9745c363c78ab2879..a930b72314ac985da702f8b47a8054a75b2e2ba8 100644 (file)
@@ -1082,18 +1082,26 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
        dm_put(md);
 }
 
-static void free_rq_clone(struct request *clone)
+static void free_rq_clone(struct request *clone, bool must_be_mapped)
 {
        struct dm_rq_target_io *tio = clone->end_io_data;
        struct mapped_device *md = tio->md;
 
+       WARN_ON_ONCE(must_be_mapped && !clone->q);
+
        blk_rq_unprep_clone(clone);
 
-       if (clone->q->mq_ops)
+       if (md->type == DM_TYPE_MQ_REQUEST_BASED)
+               /* stacked on blk-mq queue(s) */
                tio->ti->type->release_clone_rq(clone);
        else if (!md->queue->mq_ops)
                /* request_fn queue stacked on request_fn queue(s) */
                free_clone_request(md, clone);
+       /*
+        * NOTE: for the blk-mq queue stacked on request_fn queue(s) case:
+        * no need to call free_clone_request() because we leverage blk-mq by
+        * allocating the clone at the end of the blk-mq pdu (see: clone_rq)
+        */
 
        if (!md->queue->mq_ops)
                free_rq_tio(tio);
@@ -1124,7 +1132,7 @@ static void dm_end_request(struct request *clone, int error)
                        rq->sense_len = clone->sense_len;
        }
 
-       free_rq_clone(clone);
+       free_rq_clone(clone, true);
        if (!rq->q->mq_ops)
                blk_end_request_all(rq, error);
        else
@@ -1143,7 +1151,7 @@ static void dm_unprep_request(struct request *rq)
        }
 
        if (clone)
-               free_rq_clone(clone);
+               free_rq_clone(clone, false);
 }
 
 /*
@@ -2662,9 +2670,6 @@ static int dm_init_request_based_queue(struct mapped_device *md)
 {
        struct request_queue *q = NULL;
 
-       if (md->queue->elevator)
-               return 0;
-
        /* Fully initialize the queue */
        q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
        if (!q)
index 78dde56ae6e6fa9dd7d04e64ae1969023d285238..d5fe5d5f490f3efa70e022fdac9b64bd89311e22 100644 (file)
@@ -82,6 +82,8 @@
 #include <net/bond_3ad.h>
 #include <net/bond_alb.h>
 
+#include "bonding_priv.h"
+
 /*---------------------------- Module parameters ----------------------------*/
 
 /* monitor all links that often (in milliseconds). <=0 disables monitoring */
@@ -4542,6 +4544,8 @@ unsigned int bond_get_num_tx_queues(void)
 int bond_create(struct net *net, const char *name)
 {
        struct net_device *bond_dev;
+       struct bonding *bond;
+       struct alb_bond_info *bond_info;
        int res;
 
        rtnl_lock();
@@ -4555,6 +4559,14 @@ int bond_create(struct net *net, const char *name)
                return -ENOMEM;
        }
 
+       /*
+        * Initialize rx_hashtbl_used_head to RLB_NULL_INDEX.
+        * It is set to 0 by default which is wrong.
+        */
+       bond = netdev_priv(bond_dev);
+       bond_info = &(BOND_ALB_INFO(bond));
+       bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
+
        dev_net_set(bond_dev, net);
        bond_dev->rtnl_link_ops = &bond_link_ops;
 
index 62694cfc05b6548aff5c4f7186021f8c2a4ee570..b20b35acb47d3465063cdde30a1018321b344b56 100644 (file)
@@ -4,6 +4,7 @@
 #include <net/netns/generic.h>
 #include <net/bonding.h>
 
+#include "bonding_priv.h"
 
 static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
        __acquires(RCU)
diff --git a/drivers/net/bonding/bonding_priv.h b/drivers/net/bonding/bonding_priv.h
new file mode 100644 (file)
index 0000000..5a4d81a
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Bond several ethernet interfaces into a Cisco, running 'Etherchannel'.
+ *
+ * Portions are (c) Copyright 1995 Simon "Guru Aleph-Null" Janes
+ * NCM: Network and Communications Management, Inc.
+ *
+ * BUT, I'm the one who modified it for ethernet, so:
+ * (c) Copyright 1999, Thomas Davis, tadavis@lbl.gov
+ *
+ *     This software may be used and distributed according to the terms
+ *     of the GNU Public License, incorporated herein by reference.
+ *
+ */
+
+#ifndef _BONDING_PRIV_H
+#define _BONDING_PRIV_H
+
+#define DRV_VERSION    "3.7.1"
+#define DRV_RELDATE    "April 27, 2011"
+#define DRV_NAME       "bonding"
+#define DRV_DESCRIPTION        "Ethernet Channel Bonding Driver"
+
+#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
+
+#endif
index 58808f6514520c869631b356d6476f8cbc060a14..e8c96b8e86f48e66e68b2cd5285c0766865f067e 100644 (file)
@@ -112,7 +112,7 @@ config PCH_CAN
 
 config CAN_GRCAN
        tristate "Aeroflex Gaisler GRCAN and GRHCAN CAN devices"
-       depends on OF
+       depends on OF && HAS_DMA
        ---help---
          Say Y here if you want to use Aeroflex Gaisler GRCAN or GRHCAN.
          Note that the driver supports little endian, even though little
index 4643914859b2c7894f7556cfe023e1903eeb9cc6..8b17a9065b0b193a0c5e5a93048c637f0f7fbad3 100644 (file)
@@ -1102,7 +1102,7 @@ static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv,
 
        if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME |
                                         MSG_FLAG_NERR)) {
-               netdev_err(priv->netdev, "Unknow error (flags: 0x%02x)\n",
+               netdev_err(priv->netdev, "Unknown error (flags: 0x%02x)\n",
                           msg->u.rx_can_header.flag);
 
                stats->rx_errors++;
index b36ee9e0d220c0f03e0b62e5fc3a6d9014411fab..d686b9cac29f0b4ac2805455b6ba8f1f99017a74 100644 (file)
@@ -523,7 +523,7 @@ static int etherh_addr(char *addr, struct expansion_card *ec)
        char *s;
        
        if (!ecard_readchunk(&cd, ec, 0xf5, 0)) {
-               printk(KERN_ERR "%s: unable to read podule description string\n",
+               printk(KERN_ERR "%s: unable to read module description string\n",
                       dev_name(&ec->dev));
                goto no_addr;
        }
index eba070f1678265d413b94737db86ae89e57e8e54..89cd11d866420475ae9fd9bc2fa295afa08d4a36 100644 (file)
@@ -58,15 +58,12 @@ struct msgdma_extended_desc {
 /* Tx buffer control flags
  */
 #define MSGDMA_DESC_CTL_TX_FIRST       (MSGDMA_DESC_CTL_GEN_SOP |      \
-                                        MSGDMA_DESC_CTL_TR_ERR_IRQ |   \
                                         MSGDMA_DESC_CTL_GO)
 
-#define MSGDMA_DESC_CTL_TX_MIDDLE      (MSGDMA_DESC_CTL_TR_ERR_IRQ |   \
-                                        MSGDMA_DESC_CTL_GO)
+#define MSGDMA_DESC_CTL_TX_MIDDLE      (MSGDMA_DESC_CTL_GO)
 
 #define MSGDMA_DESC_CTL_TX_LAST                (MSGDMA_DESC_CTL_GEN_EOP |      \
                                         MSGDMA_DESC_CTL_TR_COMP_IRQ |  \
-                                        MSGDMA_DESC_CTL_TR_ERR_IRQ |   \
                                         MSGDMA_DESC_CTL_GO)
 
 #define MSGDMA_DESC_CTL_TX_SINGLE      (MSGDMA_DESC_CTL_GEN_SOP |      \
index 90a76306ad0fafd441376fb524f1e00e1d202163..da48e66377b5ff42dc497a5ac4685ca1e16a1eb9 100644 (file)
@@ -391,6 +391,12 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
                                   "RCV pktstatus %08X pktlength %08X\n",
                                   pktstatus, pktlength);
 
+               /* DMA trasfer from TSE starts with 2 aditional bytes for
+                * IP payload alignment. Status returned by get_rx_status()
+                * contains DMA transfer length. Packet is 2 bytes shorter.
+                */
+               pktlength -= 2;
+
                count++;
                next_entry = (++priv->rx_cons) % priv->rx_ring_size;
 
@@ -777,6 +783,8 @@ static int init_phy(struct net_device *dev)
        struct altera_tse_private *priv = netdev_priv(dev);
        struct phy_device *phydev;
        struct device_node *phynode;
+       bool fixed_link = false;
+       int rc = 0;
 
        /* Avoid init phy in case of no phy present */
        if (!priv->phy_iface)
@@ -789,13 +797,32 @@ static int init_phy(struct net_device *dev)
        phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0);
 
        if (!phynode) {
-               netdev_dbg(dev, "no phy-handle found\n");
-               if (!priv->mdio) {
-                       netdev_err(dev,
-                                  "No phy-handle nor local mdio specified\n");
-                       return -ENODEV;
+               /* check if a fixed-link is defined in device-tree */
+               if (of_phy_is_fixed_link(priv->device->of_node)) {
+                       rc = of_phy_register_fixed_link(priv->device->of_node);
+                       if (rc < 0) {
+                               netdev_err(dev, "cannot register fixed PHY\n");
+                               return rc;
+                       }
+
+                       /* In the case of a fixed PHY, the DT node associated
+                        * to the PHY is the Ethernet MAC DT node.
+                        */
+                       phynode = of_node_get(priv->device->of_node);
+                       fixed_link = true;
+
+                       netdev_dbg(dev, "fixed-link detected\n");
+                       phydev = of_phy_connect(dev, phynode,
+                                               &altera_tse_adjust_link,
+                                               0, priv->phy_iface);
+               } else {
+                       netdev_dbg(dev, "no phy-handle found\n");
+                       if (!priv->mdio) {
+                               netdev_err(dev, "No phy-handle nor local mdio specified\n");
+                               return -ENODEV;
+                       }
+                       phydev = connect_local_phy(dev);
                }
-               phydev = connect_local_phy(dev);
        } else {
                netdev_dbg(dev, "phy-handle found\n");
                phydev = of_phy_connect(dev, phynode,
@@ -819,10 +846,10 @@ static int init_phy(struct net_device *dev)
        /* Broken HW is sometimes missing the pull-up resistor on the
         * MDIO line, which results in reads to non-existent devices returning
         * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
-        * device as well.
+        * device as well. If a fixed-link is used the phy_id is always 0.
         * Note: phydev->phy_id is the result of reading the UID PHY registers.
         */
-       if (phydev->phy_id == 0) {
+       if ((phydev->phy_id == 0) && !fixed_link) {
                netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id);
                phy_disconnect(phydev);
                return -ENODEV;
index c638c85f3954bc685db3ccd9ac338d1375b8de02..089c269637b725da7876db7e4988baa77b5ec171 100644 (file)
@@ -179,7 +179,7 @@ config SUNLANCE
 
 config AMD_XGBE
        tristate "AMD 10GbE Ethernet driver"
-       depends on (OF_NET || ACPI) && HAS_IOMEM
+       depends on (OF_NET || ACPI) && HAS_IOMEM && HAS_DMA
        select PHYLIB
        select AMD_XGBE_PHY
        select BITREVERSE
index 8e262e2b39b63fc5b1e26cca09c66c08b76169c9..dea29ee24da4a28ce1effc2a9c72a77b516021cc 100644 (file)
@@ -25,8 +25,7 @@ config ARC_EMAC_CORE
 config ARC_EMAC
        tristate "ARC EMAC support"
        select ARC_EMAC_CORE
-       depends on OF_IRQ
-       depends on OF_NET
+       depends on OF_IRQ && OF_NET && HAS_DMA
        ---help---
          On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x
          non-standard on-chip ethernet device ARC EMAC 10/100 is used.
@@ -35,7 +34,7 @@ config ARC_EMAC
 config EMAC_ROCKCHIP
        tristate "Rockchip EMAC support"
        select ARC_EMAC_CORE
-       depends on OF_IRQ && OF_NET && REGULATOR
+       depends on OF_IRQ && OF_NET && REGULATOR && HAS_DMA
        ---help---
          Support for Rockchip RK3066/RK3188 EMAC ethernet controllers.
          This selects Rockchip SoC glue layer support for the
index 74df16aef7933871fb87c29a511ddc5b19182a01..88a6271de5bc96fde0993f6d9096a7904aa78c7b 100644 (file)
@@ -129,7 +129,7 @@ s32 atl1e_restart_autoneg(struct atl1e_hw *hw);
 #define     TWSI_CTRL_LD_SLV_ADDR_SHIFT     8
 #define     TWSI_CTRL_SW_LDSTART            0x800
 #define     TWSI_CTRL_HW_LDSTART            0x1000
-#define     TWSI_CTRL_SMB_SLV_ADDR_MASK     0x0x7F
+#define     TWSI_CTRL_SMB_SLV_ADDR_MASK     0x7F
 #define     TWSI_CTRL_SMB_SLV_ADDR_SHIFT    15
 #define     TWSI_CTRL_LD_EXIST              0x400000
 #define     TWSI_CTRL_READ_FREQ_SEL_MASK    0x3
index 7e3d87a88c76a81e2c36b65559d8b2b0bcf34217..e2c043eabbf39d165644312aba5bbecb4f07fcf8 100644 (file)
@@ -543,7 +543,7 @@ struct bcm_sysport_tx_counters {
        u32     jbr;            /* RO # of xmited jabber count*/
        u32     bytes;          /* RO # of xmited byte count */
        u32     pok;            /* RO # of xmited good pkt */
-       u32     uc;             /* RO (0x0x4f0)# of xmited unitcast pkt */
+       u32     uc;             /* RO (0x4f0) # of xmited unicast pkt */
 };
 
 struct bcm_sysport_mib {
index de77d3a74abc82f0c8b77dff3200799e70634674..21e3c38c7c752dd75674a62aeb8211bc78477808 100644 (file)
@@ -1260,7 +1260,7 @@ static int bgmac_poll(struct napi_struct *napi, int weight)
 
        /* Poll again if more events arrived in the meantime */
        if (bgmac_read(bgmac, BGMAC_INT_STATUS) & (BGMAC_IS_TX0 | BGMAC_IS_RX))
-               return handled;
+               return weight;
 
        if (handled < weight) {
                napi_complete(napi);
index 355d5fea5be9c3847597371fe86092956929bf02..a3b0f7a0c61e0d6ffeefcd88ae81ab751554e085 100644 (file)
@@ -521,6 +521,7 @@ struct bnx2x_fp_txdata {
 };
 
 enum bnx2x_tpa_mode_t {
+       TPA_MODE_DISABLED,
        TPA_MODE_LRO,
        TPA_MODE_GRO
 };
@@ -589,7 +590,6 @@ struct bnx2x_fastpath {
 
        /* TPA related */
        struct bnx2x_agg_info   *tpa_info;
-       u8                      disable_tpa;
 #ifdef BNX2X_STOP_ON_ERROR
        u64                     tpa_queue_used;
 #endif
@@ -1545,9 +1545,7 @@ struct bnx2x {
 #define USING_MSIX_FLAG                        (1 << 5)
 #define USING_MSI_FLAG                 (1 << 6)
 #define DISABLE_MSI_FLAG               (1 << 7)
-#define TPA_ENABLE_FLAG                        (1 << 8)
 #define NO_MCP_FLAG                    (1 << 9)
-#define GRO_ENABLE_FLAG                        (1 << 10)
 #define MF_FUNC_DIS                    (1 << 11)
 #define OWN_CNIC_IRQ                   (1 << 12)
 #define NO_ISCSI_OOO_FLAG              (1 << 13)
index 2f63467bce465ff9e8a5f50bf04be86b5150d77d..a8bb8f664d3d7f9a031158d5cac82820fbed7d6d 100644 (file)
@@ -947,10 +947,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
                        u16 frag_size, pages;
 #ifdef BNX2X_STOP_ON_ERROR
                        /* sanity check */
-                       if (fp->disable_tpa &&
+                       if (fp->mode == TPA_MODE_DISABLED &&
                            (CQE_TYPE_START(cqe_fp_type) ||
                             CQE_TYPE_STOP(cqe_fp_type)))
-                               BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
+                               BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
                                          CQE_TYPE(cqe_fp_type));
 #endif
 
@@ -1396,7 +1396,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
                DP(NETIF_MSG_IFUP,
                   "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
 
-               if (!fp->disable_tpa) {
+               if (fp->mode != TPA_MODE_DISABLED) {
                        /* Fill the per-aggregation pool */
                        for (i = 0; i < MAX_AGG_QS(bp); i++) {
                                struct bnx2x_agg_info *tpa_info =
@@ -1410,7 +1410,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
                                        BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
                                                  j);
                                        bnx2x_free_tpa_pool(bp, fp, i);
-                                       fp->disable_tpa = 1;
+                                       fp->mode = TPA_MODE_DISABLED;
                                        break;
                                }
                                dma_unmap_addr_set(first_buf, mapping, 0);
@@ -1438,7 +1438,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
                                                                ring_prod);
                                        bnx2x_free_tpa_pool(bp, fp,
                                                            MAX_AGG_QS(bp));
-                                       fp->disable_tpa = 1;
+                                       fp->mode = TPA_MODE_DISABLED;
                                        ring_prod = 0;
                                        break;
                                }
@@ -1560,7 +1560,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
 
                bnx2x_free_rx_bds(fp);
 
-               if (!fp->disable_tpa)
+               if (fp->mode != TPA_MODE_DISABLED)
                        bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
        }
 }
@@ -2477,17 +2477,19 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
        /* set the tpa flag for each queue. The tpa flag determines the queue
         * minimal size so it must be set prior to queue memory allocation
         */
-       fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
-                                 (bp->flags & GRO_ENABLE_FLAG &&
-                                  bnx2x_mtu_allows_gro(bp->dev->mtu)));
-       if (bp->flags & TPA_ENABLE_FLAG)
+       if (bp->dev->features & NETIF_F_LRO)
                fp->mode = TPA_MODE_LRO;
-       else if (bp->flags & GRO_ENABLE_FLAG)
+       else if (bp->dev->features & NETIF_F_GRO &&
+                bnx2x_mtu_allows_gro(bp->dev->mtu))
                fp->mode = TPA_MODE_GRO;
+       else
+               fp->mode = TPA_MODE_DISABLED;
 
-       /* We don't want TPA on an FCoE L2 ring */
-       if (IS_FCOE_FP(fp))
-               fp->disable_tpa = 1;
+       /* We don't want TPA if it's disabled in bp
+        * or if this is an FCoE L2 ring.
+        */
+       if (bp->disable_tpa || IS_FCOE_FP(fp))
+               fp->mode = TPA_MODE_DISABLED;
 }
 
 int bnx2x_load_cnic(struct bnx2x *bp)
@@ -2608,7 +2610,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
        /*
         * Zero fastpath structures preserving invariants like napi, which are
         * allocated only once, fp index, max_cos, bp pointer.
-        * Also set fp->disable_tpa and txdata_ptr.
+        * Also set fp->mode and txdata_ptr.
         */
        DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
        for_each_queue(bp, i)
@@ -3247,7 +3249,7 @@ int bnx2x_low_latency_recv(struct napi_struct *napi)
 
        if ((bp->state == BNX2X_STATE_CLOSED) ||
            (bp->state == BNX2X_STATE_ERROR) ||
-           (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
+           (bp->dev->features & (NETIF_F_LRO | NETIF_F_GRO)))
                return LL_FLUSH_FAILED;
 
        if (!bnx2x_fp_lock_poll(fp))
@@ -4543,7 +4545,7 @@ alloc_mem_err:
         * In these cases we disable the queue
         * Min size is different for OOO, TPA and non-TPA queues
         */
-       if (ring_size < (fp->disable_tpa ?
+       if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
                                MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
                        /* release memory allocated for this queue */
                        bnx2x_free_fp_mem_at(bp, index);
@@ -4809,66 +4811,71 @@ netdev_features_t bnx2x_fix_features(struct net_device *dev,
 {
        struct bnx2x *bp = netdev_priv(dev);
 
+       if (pci_num_vf(bp->pdev)) {
+               netdev_features_t changed = dev->features ^ features;
+
+               /* Revert the requested changes in features if they
+                * would require internal reload of PF in bnx2x_set_features().
+                */
+               if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
+                       features &= ~NETIF_F_RXCSUM;
+                       features |= dev->features & NETIF_F_RXCSUM;
+               }
+
+               if (changed & NETIF_F_LOOPBACK) {
+                       features &= ~NETIF_F_LOOPBACK;
+                       features |= dev->features & NETIF_F_LOOPBACK;
+               }
+       }
+
        /* TPA requires Rx CSUM offloading */
        if (!(features & NETIF_F_RXCSUM)) {
                features &= ~NETIF_F_LRO;
                features &= ~NETIF_F_GRO;
        }
 
-       /* Note: do not disable SW GRO in kernel when HW GRO is off */
-       if (bp->disable_tpa)
-               features &= ~NETIF_F_LRO;
-
        return features;
 }
 
 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
 {
        struct bnx2x *bp = netdev_priv(dev);
-       u32 flags = bp->flags;
-       u32 changes;
+       netdev_features_t changes = features ^ dev->features;
        bool bnx2x_reload = false;
+       int rc;
 
-       if (features & NETIF_F_LRO)
-               flags |= TPA_ENABLE_FLAG;
-       else
-               flags &= ~TPA_ENABLE_FLAG;
-
-       if (features & NETIF_F_GRO)
-               flags |= GRO_ENABLE_FLAG;
-       else
-               flags &= ~GRO_ENABLE_FLAG;
-
-       if (features & NETIF_F_LOOPBACK) {
-               if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
-                       bp->link_params.loopback_mode = LOOPBACK_BMAC;
-                       bnx2x_reload = true;
-               }
-       } else {
-               if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
-                       bp->link_params.loopback_mode = LOOPBACK_NONE;
-                       bnx2x_reload = true;
+       /* VFs or non SRIOV PFs should be able to change loopback feature */
+       if (!pci_num_vf(bp->pdev)) {
+               if (features & NETIF_F_LOOPBACK) {
+                       if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
+                               bp->link_params.loopback_mode = LOOPBACK_BMAC;
+                               bnx2x_reload = true;
+                       }
+               } else {
+                       if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
+                               bp->link_params.loopback_mode = LOOPBACK_NONE;
+                               bnx2x_reload = true;
+                       }
                }
        }
 
-       changes = flags ^ bp->flags;
-
        /* if GRO is changed while LRO is enabled, don't force a reload */
-       if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
-               changes &= ~GRO_ENABLE_FLAG;
+       if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
+               changes &= ~NETIF_F_GRO;
 
        /* if GRO is changed while HW TPA is off, don't force a reload */
-       if ((changes & GRO_ENABLE_FLAG) && bp->disable_tpa)
-               changes &= ~GRO_ENABLE_FLAG;
+       if ((changes & NETIF_F_GRO) && bp->disable_tpa)
+               changes &= ~NETIF_F_GRO;
 
        if (changes)
                bnx2x_reload = true;
 
-       bp->flags = flags;
-
        if (bnx2x_reload) {
-               if (bp->recovery_state == BNX2X_RECOVERY_DONE)
-                       return bnx2x_reload_if_running(dev);
+               if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
+                       dev->features = features;
+                       rc = bnx2x_reload_if_running(dev);
+                       return rc ? rc : 1;
+               }
                /* else: bnx2x_nic_load() will be called at end of recovery */
        }
 
@@ -4931,6 +4938,11 @@ int bnx2x_resume(struct pci_dev *pdev)
        }
        bp = netdev_priv(dev);
 
+       if (pci_num_vf(bp->pdev)) {
+               DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
+               return -EPERM;
+       }
+
        if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
                BNX2X_ERR("Handling parity error recovery. Try again later\n");
                return -EAGAIN;
index adcacda7af7b10e70b053821acc1e2dfe722732a..d7a71758e87615de36fe06664a914291bdb3cfa3 100644 (file)
@@ -969,7 +969,7 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
 {
        int i;
 
-       if (fp->disable_tpa)
+       if (fp->mode == TPA_MODE_DISABLED)
                return;
 
        for (i = 0; i < last; i++)
index e3d853cab7c9644c241cd42ba1a2844b82e55176..48ed005ba73fd3a9d9aa550871b647fdd0b59350 100644 (file)
@@ -1843,6 +1843,12 @@ static int bnx2x_set_ringparam(struct net_device *dev,
           "set ring params command parameters: rx_pending = %d, tx_pending = %d\n",
           ering->rx_pending, ering->tx_pending);
 
+       if (pci_num_vf(bp->pdev)) {
+               DP(BNX2X_MSG_IOV,
+                  "VFs are enabled, can not change ring parameters\n");
+               return -EPERM;
+       }
+
        if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
                DP(BNX2X_MSG_ETHTOOL,
                   "Handling parity error recovery. Try again later\n");
@@ -2899,6 +2905,12 @@ static void bnx2x_self_test(struct net_device *dev,
        u8 is_serdes, link_up;
        int rc, cnt = 0;
 
+       if (pci_num_vf(bp->pdev)) {
+               DP(BNX2X_MSG_IOV,
+                  "VFs are enabled, can not perform self test\n");
+               return;
+       }
+
        if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
                netdev_err(bp->dev,
                           "Handling parity error recovery. Try again later\n");
@@ -3468,6 +3480,11 @@ static int bnx2x_set_channels(struct net_device *dev,
           channels->rx_count, channels->tx_count, channels->other_count,
           channels->combined_count);
 
+       if (pci_num_vf(bp->pdev)) {
+               DP(BNX2X_MSG_IOV, "VFs are enabled, can not set channels\n");
+               return -EPERM;
+       }
+
        /* We don't support separate rx / tx channels.
         * We don't allow setting 'other' channels.
         */
index b9f85fccb419be528ae328efc3af4303f0498103..556dcc162a6252a36fe550110b4abbb3d1bbcf60 100644 (file)
@@ -3128,7 +3128,7 @@ static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
                __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
        }
 
-       if (!fp->disable_tpa) {
+       if (fp->mode != TPA_MODE_DISABLED) {
                __set_bit(BNX2X_Q_FLG_TPA, &flags);
                __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
                if (fp->mode == TPA_MODE_GRO)
@@ -3176,7 +3176,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
        u16 sge_sz = 0;
        u16 tpa_agg_size = 0;
 
-       if (!fp->disable_tpa) {
+       if (fp->mode != TPA_MODE_DISABLED) {
                pause->sge_th_lo = SGE_TH_LO(bp);
                pause->sge_th_hi = SGE_TH_HI(bp);
 
@@ -3304,7 +3304,7 @@ static void bnx2x_pf_init(struct bnx2x *bp)
        /* This flag is relevant for E1x only.
         * E2 doesn't have a TPA configuration in a function level.
         */
-       flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
+       flags |= (bp->dev->features & NETIF_F_LRO) ? FUNC_FLG_TPA : 0;
 
        func_init.func_flgs = flags;
        func_init.pf_id = BP_FUNC(bp);
@@ -12107,11 +12107,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
 
        /* Set TPA flags */
        if (bp->disable_tpa) {
-               bp->flags &= ~(TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
+               bp->dev->hw_features &= ~NETIF_F_LRO;
                bp->dev->features &= ~NETIF_F_LRO;
-       } else {
-               bp->flags |= (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
-               bp->dev->features |= NETIF_F_LRO;
        }
 
        if (CHIP_IS_E1(bp))
@@ -13371,6 +13368,12 @@ static int bnx2x_init_one(struct pci_dev *pdev,
        bool is_vf;
        int cnic_cnt;
 
+       /* Management FW 'remembers' living interfaces. Allow it some time
+        * to forget previously living interfaces, allowing a proper re-load.
+        */
+       if (is_kdump_kernel())
+               msleep(5000);
+
        /* An estimated maximum supported CoS number according to the chip
         * version.
         * We will try to roughly estimate the maximum number of CoSes this chip
index 15b2d164756058c6c5fb154bdc128f52aa3148c3..06b8c0d8fd3b12ab4e864c8c0971cc52380c007c 100644 (file)
@@ -594,7 +594,7 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
        bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
 
        /* select tpa mode to request */
-       if (!fp->disable_tpa) {
+       if (fp->mode != TPA_MODE_DISABLED) {
                flags |= VFPF_QUEUE_FLG_TPA;
                flags |= VFPF_QUEUE_FLG_TPA_IPV6;
                if (fp->mode == TPA_MODE_GRO)
index 1270b189a9a2ffd7776985f8e0a96fada80f8de8..069952fa5d644b62b7d1a04fdb8b615a4ed3d69a 100644 (file)
@@ -18129,7 +18129,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
 
        rtnl_lock();
 
-       tp->pcierr_recovery = true;
+       /* We needn't recover from permanent error */
+       if (state == pci_channel_io_frozen)
+               tp->pcierr_recovery = true;
 
        /* We probably don't have netdev yet */
        if (!netdev || !netif_running(netdev))
index 9f5387249f242374437581e6c2df7c037917f83a..4104d49f005d4a825eb14b99efeb5f158ab456d5 100644 (file)
@@ -707,6 +707,9 @@ static void gem_rx_refill(struct macb *bp)
 
                        /* properly align Ethernet header */
                        skb_reserve(skb, NET_IP_ALIGN);
+               } else {
+                       bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED);
+                       bp->rx_ring[entry].ctrl = 0;
                }
        }
 
@@ -1473,9 +1476,9 @@ static void macb_init_rings(struct macb *bp)
        for (i = 0; i < TX_RING_SIZE; i++) {
                bp->queues[0].tx_ring[i].addr = 0;
                bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED);
-               bp->queues[0].tx_head = 0;
-               bp->queues[0].tx_tail = 0;
        }
+       bp->queues[0].tx_head = 0;
+       bp->queues[0].tx_tail = 0;
        bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
 
        bp->rx_tail = 0;
index 5959e3ae72da213e11587e8cd27a2bc9759755d0..e8578a742f2a29b14a2eaec01216a8e47a68e12a 100644 (file)
@@ -492,7 +492,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
                memoffset = (mtype * (edc_size * 1024 * 1024));
        else {
                mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
-                                                     MA_EXT_MEMORY1_BAR_A));
+                                                     MA_EXT_MEMORY0_BAR_A));
                memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
        }
 
index fb0bc3c3620e9cf87983b1c425e0f24d431bffc9..a6dcbf850c1fd4e09462d40f5f0e7cc08cfb2088 100644 (file)
@@ -4846,7 +4846,8 @@ err:
 }
 
 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
-                                struct net_device *dev, u32 filter_mask)
+                                struct net_device *dev, u32 filter_mask,
+                                int nlflags)
 {
        struct be_adapter *adapter = netdev_priv(dev);
        int status = 0;
@@ -4868,7 +4869,7 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
        return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
                                       hsw_mode == PORT_FWD_TYPE_VEPA ?
                                       BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
-                                      0, 0);
+                                      0, 0, nlflags);
 }
 
 #ifdef CONFIG_BE2NET_VXLAN
index f6a3a7abd468e1f25fd4c33e874a38f0c85bc4dd..66d47e448e4d175aeefecddacc53f8f858f0085b 100644 (file)
@@ -988,7 +988,10 @@ fec_restart(struct net_device *ndev)
                rcntl |= 0x40000000 | 0x00000020;
 
                /* RGMII, RMII or MII */
-               if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII)
+               if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII ||
+                   fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
+                   fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
+                   fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
                        rcntl |= (1 << 6);
                else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
                        rcntl |= (1 << 8);
index 291c87036e173c792a1b26234e2b99e8cd67bf65..2a0dc127df3f4e099e273a77715ad87358385aff 100644 (file)
@@ -3347,7 +3347,7 @@ static int ehea_register_memory_hooks(void)
 {
        int ret = 0;
 
-       if (atomic_inc_and_test(&ehea_memory_hooks_registered))
+       if (atomic_inc_return(&ehea_memory_hooks_registered) > 1)
                return 0;
 
        ret = ehea_create_busmap();
@@ -3381,12 +3381,14 @@ out3:
 out2:
        unregister_reboot_notifier(&ehea_reboot_nb);
 out:
+       atomic_dec(&ehea_memory_hooks_registered);
        return ret;
 }
 
 static void ehea_unregister_memory_hooks(void)
 {
-       if (atomic_read(&ehea_memory_hooks_registered))
+       /* Only remove the hooks if we've registered them */
+       if (atomic_read(&ehea_memory_hooks_registered) == 0)
                return;
 
        unregister_reboot_notifier(&ehea_reboot_nb);
index cd7675ac5bf9ed8b8658996d2d27190c6b20245f..18134766a11409c6c976f00ac0431de748c03073 100644 (file)
@@ -1238,7 +1238,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
                return -EINVAL;
 
        for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
-               if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size)
+               if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size)
                        break;
 
        if (i == IBMVETH_NUM_BUFF_POOLS)
@@ -1257,7 +1257,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
        for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
                adapter->rx_buff_pool[i].active = 1;
 
-               if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
+               if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) {
                        dev->mtu = new_mtu;
                        vio_cmo_set_dev_desired(viodev,
                                                ibmveth_get_desired_dma
index 24481cd7e59ac94e3e4ec14528938338aeaf4000..a54c14491e3b6a4dbc168980dd44d399b6766487 100644 (file)
@@ -8053,10 +8053,10 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
 #ifdef HAVE_BRIDGE_FILTER
 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
                                   struct net_device *dev,
-                                  u32 __always_unused filter_mask)
+                                  u32 __always_unused filter_mask, int nlflags)
 #else
 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
-                                  struct net_device *dev)
+                                  struct net_device *dev, int nlflags)
 #endif /* HAVE_BRIDGE_FILTER */
 {
        struct i40e_netdev_priv *np = netdev_priv(dev);
@@ -8078,7 +8078,8 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
        if (!veb)
                return 0;
 
-       return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode);
+       return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
+                                      nlflags);
 }
 #endif /* HAVE_BRIDGE_ATTRIBS */
 
index d3f4b0ceb3f781216599408248b351cd4854bc92..5be12a00e1f447744f2497131cea1a70e313fd1f 100644 (file)
@@ -8044,7 +8044,7 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
 
 static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
                                    struct net_device *dev,
-                                   u32 filter_mask)
+                                   u32 filter_mask, int nlflags)
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
 
@@ -8052,7 +8052,7 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
                return 0;
 
        return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
-                                      adapter->bridge_mode, 0, 0);
+                                      adapter->bridge_mode, 0, 0, nlflags);
 }
 
 static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
index af829c57840039e54e853bd5c4230ab0c6f17699..7ace07dad6a31d4b18ab5aa1e5335c0727cff596 100644 (file)
@@ -1508,7 +1508,8 @@ static int pxa168_eth_probe(struct platform_device *pdev)
                np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
                if (!np) {
                        dev_err(&pdev->dev, "missing phy-handle\n");
-                       return -EINVAL;
+                       err = -EINVAL;
+                       goto err_netdev;
                }
                of_property_read_u32(np, "reg", &pep->phy_addr);
                pep->phy_intf = of_get_phy_mode(pdev->dev.of_node);
@@ -1526,7 +1527,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
        pep->smi_bus = mdiobus_alloc();
        if (pep->smi_bus == NULL) {
                err = -ENOMEM;
-               goto err_base;
+               goto err_netdev;
        }
        pep->smi_bus->priv = pep;
        pep->smi_bus->name = "pxa168_eth smi";
@@ -1551,13 +1552,10 @@ err_mdiobus:
        mdiobus_unregister(pep->smi_bus);
 err_free_mdio:
        mdiobus_free(pep->smi_bus);
-err_base:
-       iounmap(pep->base);
 err_netdev:
        free_netdev(dev);
 err_clk:
-       clk_disable(clk);
-       clk_put(clk);
+       clk_disable_unprepare(clk);
        return err;
 }
 
@@ -1574,13 +1572,9 @@ static int pxa168_eth_remove(struct platform_device *pdev)
        if (pep->phy)
                phy_disconnect(pep->phy);
        if (pep->clk) {
-               clk_disable(pep->clk);
-               clk_put(pep->clk);
-               pep->clk = NULL;
+               clk_disable_unprepare(pep->clk);
        }
 
-       iounmap(pep->base);
-       pep->base = NULL;
        mdiobus_unregister(pep->smi_bus);
        mdiobus_free(pep->smi_bus);
        unregister_netdev(dev);
index 3f44e2bbb9824caad9068e7ce6f03e1a2df382f2..a2ddf3d75ff8ff8956763b924bc2fda8b156c222 100644 (file)
@@ -1102,20 +1102,21 @@ static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc)
        struct mlx4_en_priv *priv = netdev_priv(dev);
 
        /* check if requested function is supported by the device */
-       if ((hfunc == ETH_RSS_HASH_TOP &&
-            !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) ||
-           (hfunc == ETH_RSS_HASH_XOR &&
-            !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR)))
-               return -EINVAL;
+       if (hfunc == ETH_RSS_HASH_TOP) {
+               if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP))
+                       return -EINVAL;
+               if (!(dev->features & NETIF_F_RXHASH))
+                       en_warn(priv, "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
+               return 0;
+       } else if (hfunc == ETH_RSS_HASH_XOR) {
+               if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR))
+                       return -EINVAL;
+               if (dev->features & NETIF_F_RXHASH)
+                       en_warn(priv, "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
+               return 0;
+       }
 
-       priv->rss_hash_fn = hfunc;
-       if (hfunc == ETH_RSS_HASH_TOP && !(dev->features & NETIF_F_RXHASH))
-               en_warn(priv,
-                       "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
-       if (hfunc == ETH_RSS_HASH_XOR && (dev->features & NETIF_F_RXHASH))
-               en_warn(priv,
-                       "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
-       return 0;
+       return -EINVAL;
 }
 
 static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
@@ -1189,6 +1190,8 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
                priv->prof->rss_rings = rss_rings;
        if (key)
                memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE);
+       if (hfunc !=  ETH_RSS_HASH_NO_CHANGE)
+               priv->rss_hash_fn = hfunc;
 
        if (port_up) {
                err = mlx4_en_start_port(dev);
index 0f1afc085d580b34e0eda1eaa4b1cdc1737c71be..32f5ec7374723d1315f4234f77b12ffbe5adcfe0 100644 (file)
@@ -1467,6 +1467,7 @@ static void mlx4_en_service_task(struct work_struct *work)
                if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
                        mlx4_en_ptp_overflow_check(mdev);
 
+               mlx4_en_recover_from_oom(priv);
                queue_delayed_work(mdev->workqueue, &priv->service_task,
                                   SERVICE_TASK_DELAY);
        }
@@ -1721,7 +1722,7 @@ mac_err:
 cq_err:
        while (rx_index--) {
                mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
-               mlx4_en_free_affinity_hint(priv, i);
+               mlx4_en_free_affinity_hint(priv, rx_index);
        }
        for (i = 0; i < priv->rx_ring_num; i++)
                mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
index 4fdd3c37e47bf7c7862b9edf569be6f7f38e8dae..2a77a6b191216b19059c89fa8ad386252684806c 100644 (file)
@@ -244,6 +244,12 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
        return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp);
 }
 
+static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring)
+{
+       BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size);
+       return ring->prod == ring->cons;
+}
+
 static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
 {
        *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
@@ -315,8 +321,7 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
               ring->cons, ring->prod);
 
        /* Unmap and free Rx buffers */
-       BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
-       while (ring->cons != ring->prod) {
+       while (!mlx4_en_is_ring_empty(ring)) {
                index = ring->cons & ring->size_mask;
                en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
                mlx4_en_free_rx_desc(priv, ring, index);
@@ -491,6 +496,23 @@ err_allocator:
        return err;
 }
 
+/* We recover from out of memory by scheduling our napi poll
+ * function (mlx4_en_process_cq), which tries to allocate
+ * all missing RX buffers (call to mlx4_en_refill_rx_buffers).
+ */
+void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
+{
+       int ring;
+
+       if (!priv->port_up)
+               return;
+
+       for (ring = 0; ring < priv->rx_ring_num; ring++) {
+               if (mlx4_en_is_ring_empty(priv->rx_ring[ring]))
+                       napi_reschedule(&priv->rx_cq[ring]->napi);
+       }
+}
+
 void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
                             struct mlx4_en_rx_ring **pring,
                             u32 size, u16 stride)
index 1783705273d89773c0a462cb28684f2969e55ac4..f7bf312fb44311b1c436c4eb3706341db92c1db0 100644 (file)
@@ -143,8 +143,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
        ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
        ring->queue_index = queue_index;
 
-       if (queue_index < priv->num_tx_rings_p_up && cpu_online(queue_index))
-               cpumask_set_cpu(queue_index, &ring->affinity_mask);
+       if (queue_index < priv->num_tx_rings_p_up)
+               cpumask_set_cpu_local_first(queue_index,
+                                           priv->mdev->dev->numa_node,
+                                           &ring->affinity_mask);
 
        *pring = ring;
        return 0;
@@ -213,7 +215,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
 
        err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
                               &ring->qp, &ring->qp_state);
-       if (!user_prio && cpu_online(ring->queue_index))
+       if (!cpumask_empty(&ring->affinity_mask))
                netif_set_xps_queue(priv->dev, &ring->affinity_mask,
                                    ring->queue_index);
 
index a4079811b176f1afeba6c16e84bbcc29e8d3e463..e30bf57ad7a18ff559eb4bba122252eaf0308964 100644 (file)
@@ -56,11 +56,13 @@ MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: on)");
 #define MLX4_GET(dest, source, offset)                               \
        do {                                                          \
                void *__p = (char *) (source) + (offset);             \
+               u64 val;                                              \
                switch (sizeof (dest)) {                              \
                case 1: (dest) = *(u8 *) __p;       break;            \
                case 2: (dest) = be16_to_cpup(__p); break;            \
                case 4: (dest) = be32_to_cpup(__p); break;            \
-               case 8: (dest) = be64_to_cpup(__p); break;            \
+               case 8: val = get_unaligned((u64 *)__p);              \
+                       (dest) = be64_to_cpu(val);  break;            \
                default: __buggy_use_of_MLX4_GET();                   \
                }                                                     \
        } while (0)
@@ -1605,9 +1607,17 @@ static void get_board_id(void *vsd, char *board_id)
                 * swaps each 4-byte word before passing it back to
                 * us.  Therefore we need to swab it before printing.
                 */
-               for (i = 0; i < 4; ++i)
-                       ((u32 *) board_id)[i] =
-                               swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
+               u32 *bid_u32 = (u32 *)board_id;
+
+               for (i = 0; i < 4; ++i) {
+                       u32 *addr;
+                       u32 val;
+
+                       addr = (u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4);
+                       val = get_unaligned(addr);
+                       val = swab32(val);
+                       put_unaligned(val, &bid_u32[i]);
+               }
        }
 }
 
index 9de30216b146bb09188a6867307b5bbcf7aa9dd0..d021f079f181b06bb6ec73250ea8493ad87d1cee 100644 (file)
@@ -774,6 +774,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
 void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
                                struct mlx4_en_tx_ring *ring);
 void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
+void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv);
 int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
                           struct mlx4_en_rx_ring **pring,
                           u32 size, u16 stride, int node);
index 1412f5af05ecf521e41ff109dc1a24e7621090ce..2bae50292dcd814a2b8cb338da1bb0a6beac82f0 100644 (file)
 #include <net/ip.h>
 #include <net/tcp.h>
 #include <asm/byteorder.h>
-#include <asm/io.h>
 #include <asm/processor.h>
-#ifdef CONFIG_MTRR
-#include <asm/mtrr.h>
-#endif
 #include <net/busy_poll.h>
 
 #include "myri10ge_mcp.h"
@@ -242,8 +238,7 @@ struct myri10ge_priv {
        unsigned int rdma_tags_available;
        int intr_coal_delay;
        __be32 __iomem *intr_coal_delay_ptr;
-       int mtrr;
-       int wc_enabled;
+       int wc_cookie;
        int down_cnt;
        wait_queue_head_t down_wq;
        struct work_struct watchdog_work;
@@ -1905,7 +1900,7 @@ static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = {
        "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
        "tx_heartbeat_errors", "tx_window_errors",
        /* device-specific stats */
-       "tx_boundary", "WC", "irq", "MSI", "MSIX",
+       "tx_boundary", "irq", "MSI", "MSIX",
        "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
        "serial_number", "watchdog_resets",
 #ifdef CONFIG_MYRI10GE_DCA
@@ -1984,7 +1979,6 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
                data[i] = ((u64 *)&link_stats)[i];
 
        data[i++] = (unsigned int)mgp->tx_boundary;
-       data[i++] = (unsigned int)mgp->wc_enabled;
        data[i++] = (unsigned int)mgp->pdev->irq;
        data[i++] = (unsigned int)mgp->msi_enabled;
        data[i++] = (unsigned int)mgp->msix_enabled;
@@ -4040,14 +4034,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        mgp->board_span = pci_resource_len(pdev, 0);
        mgp->iomem_base = pci_resource_start(pdev, 0);
-       mgp->mtrr = -1;
-       mgp->wc_enabled = 0;
-#ifdef CONFIG_MTRR
-       mgp->mtrr = mtrr_add(mgp->iomem_base, mgp->board_span,
-                            MTRR_TYPE_WRCOMB, 1);
-       if (mgp->mtrr >= 0)
-               mgp->wc_enabled = 1;
-#endif
+       mgp->wc_cookie = arch_phys_wc_add(mgp->iomem_base, mgp->board_span);
        mgp->sram = ioremap_wc(mgp->iomem_base, mgp->board_span);
        if (mgp->sram == NULL) {
                dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n",
@@ -4146,14 +4133,14 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto abort_with_state;
        }
        if (mgp->msix_enabled)
-               dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, WC %s\n",
+               dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, MTRR %s, WC Enabled\n",
                         mgp->num_slices, mgp->tx_boundary, mgp->fw_name,
-                        (mgp->wc_enabled ? "Enabled" : "Disabled"));
+                        (mgp->wc_cookie > 0 ? "Enabled" : "Disabled"));
        else
-               dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n",
+               dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, MTRR %s, WC Enabled\n",
                         mgp->msi_enabled ? "MSI" : "xPIC",
                         pdev->irq, mgp->tx_boundary, mgp->fw_name,
-                        (mgp->wc_enabled ? "Enabled" : "Disabled"));
+                        (mgp->wc_cookie > 0 ? "Enabled" : "Disabled"));
 
        board_number++;
        return 0;
@@ -4175,10 +4162,7 @@ abort_with_ioremap:
        iounmap(mgp->sram);
 
 abort_with_mtrr:
-#ifdef CONFIG_MTRR
-       if (mgp->mtrr >= 0)
-               mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
-#endif
+       arch_phys_wc_del(mgp->wc_cookie);
        dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
                          mgp->cmd, mgp->cmd_bus);
 
@@ -4220,11 +4204,7 @@ static void myri10ge_remove(struct pci_dev *pdev)
        pci_restore_state(pdev);
 
        iounmap(mgp->sram);
-
-#ifdef CONFIG_MTRR
-       if (mgp->mtrr >= 0)
-               mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
-#endif
+       arch_phys_wc_del(mgp->wc_cookie);
        myri10ge_free_slices(mgp);
        kfree(mgp->msix_vectors);
        dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
index 5c4068353f664e8924f832c3d681ec41f53465c6..8da7c3faf8178c05576c74833fc828f197104f52 100644 (file)
@@ -135,7 +135,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
        int i, j;
        struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
 
-       spin_lock(&adapter->tx_clean_lock);
+       spin_lock_bh(&adapter->tx_clean_lock);
        cmd_buf = tx_ring->cmd_buf_arr;
        for (i = 0; i < tx_ring->num_desc; i++) {
                buffrag = cmd_buf->frag_array;
@@ -159,7 +159,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
                }
                cmd_buf++;
        }
-       spin_unlock(&adapter->tx_clean_lock);
+       spin_unlock_bh(&adapter->tx_clean_lock);
 }
 
 void netxen_free_sw_resources(struct netxen_adapter *adapter)
index a570a60533be5531c5881ace3683e69e9d80ece5..ec251531bd9f8ecd1e64295b4f1f5c35fe475b79 100644 (file)
@@ -4176,14 +4176,15 @@ static int rocker_port_bridge_setlink(struct net_device *dev,
 
 static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
                                      struct net_device *dev,
-                                     u32 filter_mask)
+                                     u32 filter_mask, int nlflags)
 {
        struct rocker_port *rocker_port = netdev_priv(dev);
        u16 mode = BRIDGE_MODE_UNDEF;
        u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
 
        return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
-                                      rocker_port->brport_flags, mask);
+                                      rocker_port->brport_flags, mask,
+                                      nlflags);
 }
 
 static int rocker_port_get_phys_port_name(struct net_device *dev,
index 2bef655279f32a4ffb6097b295362e2da22867c0..9b7e0a34c98b10aca5eed610c47f33b2eedbbd00 100644 (file)
@@ -1765,7 +1765,9 @@ static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
                                     ALE_PORT_STATE,
                                     ALE_PORT_STATE_FORWARD);
 
-               if (ndev && slave->open)
+               if (ndev && slave->open &&
+                   slave->link_interface != SGMII_LINK_MAC_PHY &&
+                   slave->link_interface != XGMII_LINK_MAC_PHY)
                        netif_carrier_on(ndev);
        } else {
                writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
@@ -1773,7 +1775,9 @@ static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
                cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
                                     ALE_PORT_STATE,
                                     ALE_PORT_STATE_DISABLE);
-               if (ndev)
+               if (ndev &&
+                   slave->link_interface != SGMII_LINK_MAC_PHY &&
+                   slave->link_interface != XGMII_LINK_MAC_PHY)
                        netif_carrier_off(ndev);
        }
 
index a10b31664709f51215435d94a9439c65d311221b..41071d32bc8e0e1259726aa647bc8a77324ffdd9 100644 (file)
@@ -128,7 +128,6 @@ struct ndis_tcp_ip_checksum_info;
 struct hv_netvsc_packet {
        /* Bookkeeping stuff */
        u32 status;
-       bool part_of_skb;
 
        bool is_data_pkt;
        bool xmit_more; /* from skb */
@@ -612,6 +611,15 @@ struct multi_send_data {
        u32 count; /* counter of batched packets */
 };
 
+/* The context of the netvsc device  */
+struct net_device_context {
+       /* point back to our device context */
+       struct hv_device *device_ctx;
+       struct delayed_work dwork;
+       struct work_struct work;
+       u32 msg_enable; /* debug level */
+};
+
 /* Per netvsc device */
 struct netvsc_device {
        struct hv_device *dev;
@@ -667,6 +675,9 @@ struct netvsc_device {
        struct multi_send_data msd[NR_CPUS];
        u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
        u32 pkt_align; /* alignment bytes, e.g. 8 */
+
+       /* The net device context */
+       struct net_device_context *nd_ctx;
 };
 
 /* NdisInitialize message */
index 2e8ad0636b466668e8939e4eabe442160e6c2402..2d9ef533cc4837c5bd7b46c96958f9f1b9eda323 100644 (file)
@@ -889,11 +889,6 @@ int netvsc_send(struct hv_device *device,
                } else {
                        packet->page_buf_cnt = 0;
                        packet->total_data_buflen += msd_len;
-                       if (!packet->part_of_skb) {
-                               skb = (struct sk_buff *)(unsigned long)packet->
-                                      send_completion_tid;
-                               packet->send_completion_tid = 0;
-                       }
                }
 
                if (msdp->pkt)
@@ -1197,6 +1192,9 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
         */
        ndev = net_device->ndev;
 
+       /* Add netvsc_device context to netvsc_device */
+       net_device->nd_ctx = netdev_priv(ndev);
+
        /* Initialize the NetVSC channel extension */
        init_completion(&net_device->channel_init_wait);
 
index a3a9d3898a6e8a80ddb21cb11864c09006e47554..5993c7e2d723a7e42d6022c90cb8e495420a49ad 100644 (file)
 
 #include "hyperv_net.h"
 
-struct net_device_context {
-       /* point back to our device context */
-       struct hv_device *device_ctx;
-       struct delayed_work dwork;
-       struct work_struct work;
-};
 
 #define RING_SIZE_MIN 64
 static int ring_size = 128;
 module_param(ring_size, int, S_IRUGO);
 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
 
+static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
+                               NETIF_MSG_LINK | NETIF_MSG_IFUP |
+                               NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
+                               NETIF_MSG_TX_ERR;
+
+static int debug = -1;
+module_param(debug, int, S_IRUGO);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
 static void do_set_multicast(struct work_struct *w)
 {
        struct net_device_context *ndevctx =
@@ -235,9 +238,6 @@ void netvsc_xmit_completion(void *context)
        struct sk_buff *skb = (struct sk_buff *)
                (unsigned long)packet->send_completion_tid;
 
-       if (!packet->part_of_skb)
-               kfree(packet);
-
        if (skb)
                dev_kfree_skb_any(skb);
 }
@@ -389,7 +389,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
        u32 net_trans_info;
        u32 hash;
        u32 skb_length;
-       u32 head_room;
        u32 pkt_sz;
        struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
 
@@ -402,7 +401,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
 
 check_size:
        skb_length = skb->len;
-       head_room = skb_headroom(skb);
        num_data_pgs = netvsc_get_slots(skb) + 2;
        if (num_data_pgs > MAX_PAGE_BUFFER_COUNT && linear) {
                net_alert_ratelimited("packet too big: %u pages (%u bytes)\n",
@@ -421,20 +419,14 @@ check_size:
 
        pkt_sz = sizeof(struct hv_netvsc_packet) + RNDIS_AND_PPI_SIZE;
 
-       if (head_room < pkt_sz) {
-               packet = kmalloc(pkt_sz, GFP_ATOMIC);
-               if (!packet) {
-                       /* out of memory, drop packet */
-                       netdev_err(net, "unable to alloc hv_netvsc_packet\n");
-                       ret = -ENOMEM;
-                       goto drop;
-               }
-               packet->part_of_skb = false;
-       } else {
-               /* Use the headroom for building up the packet */
-               packet = (struct hv_netvsc_packet *)skb->head;
-               packet->part_of_skb = true;
+       ret = skb_cow_head(skb, pkt_sz);
+       if (ret) {
+               netdev_err(net, "unable to alloc hv_netvsc_packet\n");
+               ret = -ENOMEM;
+               goto drop;
        }
+       /* Use the headroom for building up the packet */
+       packet = (struct hv_netvsc_packet *)skb->head;
 
        packet->status = 0;
        packet->xmit_more = skb->xmit_more;
@@ -591,8 +583,6 @@ drop:
                net->stats.tx_bytes += skb_length;
                net->stats.tx_packets++;
        } else {
-               if (packet && !packet->part_of_skb)
-                       kfree(packet);
                if (ret != -EAGAIN) {
                        dev_kfree_skb_any(skb);
                        net->stats.tx_dropped++;
@@ -888,6 +878,11 @@ static int netvsc_probe(struct hv_device *dev,
 
        net_device_ctx = netdev_priv(net);
        net_device_ctx->device_ctx = dev;
+       net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
+       if (netif_msg_probe(net_device_ctx))
+               netdev_dbg(net, "netvsc msg_enable: %d\n",
+                          net_device_ctx->msg_enable);
+
        hv_set_drvdata(dev, net);
        INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
        INIT_WORK(&net_device_ctx->work, do_set_multicast);
index 0d92efefd796c9b631beeca1b35ad9979cb36d2f..9118cea918821cb6bbe83a2f97a71134a58fd5dd 100644 (file)
@@ -429,7 +429,8 @@ int rndis_filter_receive(struct hv_device *dev,
 
        rndis_msg = pkt->data;
 
-       dump_rndis_message(dev, rndis_msg);
+       if (netif_msg_rx_err(net_dev->nd_ctx))
+               dump_rndis_message(dev, rndis_msg);
 
        switch (rndis_msg->ndis_msg_type) {
        case RNDIS_MSG_PACKET:
index 49ce7ece5af30c04c3e632a1719ba45d9b47f3b5..c9cb486c753d053c8b6da529a0c078d89f9c2a2f 100644 (file)
@@ -80,7 +80,8 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
                 * assume the pin serves as pull-up. If direction is
                 * output, the default value is high.
                 */
-               gpio_set_value(bitbang->mdo, 1 ^ bitbang->mdo_active_low);
+               gpio_set_value_cansleep(bitbang->mdo,
+                                       1 ^ bitbang->mdo_active_low);
                return;
        }
 
@@ -96,7 +97,8 @@ static int mdio_get(struct mdiobb_ctrl *ctrl)
        struct mdio_gpio_info *bitbang =
                container_of(ctrl, struct mdio_gpio_info, ctrl);
 
-       return gpio_get_value(bitbang->mdio) ^ bitbang->mdio_active_low;
+       return gpio_get_value_cansleep(bitbang->mdio) ^
+               bitbang->mdio_active_low;
 }
 
 static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
@@ -105,9 +107,11 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
                container_of(ctrl, struct mdio_gpio_info, ctrl);
 
        if (bitbang->mdo)
-               gpio_set_value(bitbang->mdo, what ^ bitbang->mdo_active_low);
+               gpio_set_value_cansleep(bitbang->mdo,
+                                       what ^ bitbang->mdo_active_low);
        else
-               gpio_set_value(bitbang->mdio, what ^ bitbang->mdio_active_low);
+               gpio_set_value_cansleep(bitbang->mdio,
+                                       what ^ bitbang->mdio_active_low);
 }
 
 static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
@@ -115,7 +119,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
        struct mdio_gpio_info *bitbang =
                container_of(ctrl, struct mdio_gpio_info, ctrl);
 
-       gpio_set_value(bitbang->mdc, what ^ bitbang->mdc_active_low);
+       gpio_set_value_cansleep(bitbang->mdc, what ^ bitbang->mdc_active_low);
 }
 
 static struct mdiobb_ops mdio_gpio_ops = {
index 1a87a585e74df9abac74d5a60c8715917b179ff9..66edd99bc302ddc5c5bdd0d0757acd7923adf8f4 100644 (file)
 #include <linux/module.h>
 #include <linux/phy.h>
 #include <linux/mdio-mux.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
 
 #define DRV_VERSION "1.1"
 #define DRV_DESCRIPTION "GPIO controlled MDIO bus multiplexer driver"
 
-#define MDIO_MUX_GPIO_MAX_BITS 8
-
 struct mdio_mux_gpio_state {
-       struct gpio_desc *gpio[MDIO_MUX_GPIO_MAX_BITS];
-       unsigned int num_gpios;
+       struct gpio_descs *gpios;
        void *mux_handle;
 };
 
 static int mdio_mux_gpio_switch_fn(int current_child, int desired_child,
                                   void *data)
 {
-       int values[MDIO_MUX_GPIO_MAX_BITS];
-       unsigned int n;
        struct mdio_mux_gpio_state *s = data;
+       int values[s->gpios->ndescs];
+       unsigned int n;
 
        if (current_child == desired_child)
                return 0;
 
-       for (n = 0; n < s->num_gpios; n++) {
+       for (n = 0; n < s->gpios->ndescs; n++)
                values[n] = (desired_child >> n) & 1;
-       }
-       gpiod_set_array_cansleep(s->num_gpios, s->gpio, values);
+
+       gpiod_set_array_cansleep(s->gpios->ndescs, s->gpios->desc, values);
 
        return 0;
 }
@@ -46,56 +43,33 @@ static int mdio_mux_gpio_switch_fn(int current_child, int desired_child,
 static int mdio_mux_gpio_probe(struct platform_device *pdev)
 {
        struct mdio_mux_gpio_state *s;
-       int num_gpios;
-       unsigned int n;
        int r;
 
-       if (!pdev->dev.of_node)
-               return -ENODEV;
-
-       num_gpios = of_gpio_count(pdev->dev.of_node);
-       if (num_gpios <= 0 || num_gpios > MDIO_MUX_GPIO_MAX_BITS)
-               return -ENODEV;
-
        s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
        if (!s)
                return -ENOMEM;
 
-       s->num_gpios = num_gpios;
-
-       for (n = 0; n < num_gpios; ) {
-               struct gpio_desc *gpio = gpiod_get_index(&pdev->dev, NULL, n,
-                                                        GPIOD_OUT_LOW);
-               if (IS_ERR(gpio)) {
-                       r = PTR_ERR(gpio);
-                       goto err;
-               }
-               s->gpio[n] = gpio;
-               n++;
-       }
+       s->gpios = gpiod_get_array(&pdev->dev, NULL, GPIOD_OUT_LOW);
+       if (IS_ERR(s->gpios))
+               return PTR_ERR(s->gpios);
 
        r = mdio_mux_init(&pdev->dev,
                          mdio_mux_gpio_switch_fn, &s->mux_handle, s);
 
-       if (r == 0) {
-               pdev->dev.platform_data = s;
-               return 0;
-       }
-err:
-       while (n) {
-               n--;
-               gpiod_put(s->gpio[n]);
+       if (r != 0) {
+               gpiod_put_array(s->gpios);
+               return r;
        }
-       return r;
+
+       pdev->dev.platform_data = s;
+       return 0;
 }
 
 static int mdio_mux_gpio_remove(struct platform_device *pdev)
 {
-       unsigned int n;
        struct mdio_mux_gpio_state *s = dev_get_platdata(&pdev->dev);
        mdio_mux_uninit(s->mux_handle);
-       for (n = 0; n < s->num_gpios; n++)
-               gpiod_put(s->gpio[n]);
+       gpiod_put_array(s->gpios);
        return 0;
 }
 
index 911b21602ff271885c2dbfb4c5c04f69fb028e7e..05005c660d4d954527d278130824f232383c2d49 100644 (file)
@@ -478,7 +478,6 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
        struct blkcipher_desc desc = { .tfm = state->arc4 };
        unsigned ccount;
        int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED;
-       int sanity = 0;
        struct scatterlist sg_in[1], sg_out[1];
 
        if (isize <= PPP_HDRLEN + MPPE_OVHD) {
@@ -514,31 +513,19 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
                       "mppe_decompress[%d]: ENCRYPTED bit not set!\n",
                       state->unit);
                state->sanity_errors += 100;
-               sanity = 1;
+               goto sanity_error;
        }
        if (!state->stateful && !flushed) {
                printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set in "
                       "stateless mode!\n", state->unit);
                state->sanity_errors += 100;
-               sanity = 1;
+               goto sanity_error;
        }
        if (state->stateful && ((ccount & 0xff) == 0xff) && !flushed) {
                printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set on "
                       "flag packet!\n", state->unit);
                state->sanity_errors += 100;
-               sanity = 1;
-       }
-
-       if (sanity) {
-               if (state->sanity_errors < SANITY_MAX)
-                       return DECOMP_ERROR;
-               else
-                       /*
-                        * Take LCP down if the peer is sending too many bogons.
-                        * We don't want to do this for a single or just a few
-                        * instances since it could just be due to packet corruption.
-                        */
-                       return DECOMP_FATALERROR;
+               goto sanity_error;
        }
 
        /*
@@ -546,6 +533,13 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
         */
 
        if (!state->stateful) {
+               /* Discard late packet */
+               if ((ccount - state->ccount) % MPPE_CCOUNT_SPACE
+                                               > MPPE_CCOUNT_SPACE / 2) {
+                       state->sanity_errors++;
+                       goto sanity_error;
+               }
+
                /* RFC 3078, sec 8.1.  Rekey for every packet. */
                while (state->ccount != ccount) {
                        mppe_rekey(state, 0);
@@ -649,6 +643,16 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
        state->sanity_errors >>= 1;
 
        return osize;
+
+sanity_error:
+       if (state->sanity_errors < SANITY_MAX)
+               return DECOMP_ERROR;
+       else
+               /* Take LCP down if the peer is sending too many bogons.
+                * We don't want to do this for a single or just a few
+                * instances since it could just be due to packet corruption.
+                */
+               return DECOMP_FATALERROR;
 }
 
 /*
index 154116aafd0d8c5cb6caab9056a2245cbc3c783b..27a5f954f8e999cc809dc5ed0bad08d2e06e0f18 100644 (file)
@@ -730,12 +730,8 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
                        /* Only change unicasts */
                        if (!(is_multicast_ether_addr(f->eth_addr) ||
                             is_zero_ether_addr(f->eth_addr))) {
-                               int rc = vxlan_fdb_replace(f, ip, port, vni,
+                               notify |= vxlan_fdb_replace(f, ip, port, vni,
                                                           ifindex);
-
-                               if (rc < 0)
-                                       return rc;
-                               notify |= rc;
                        } else
                                return -EOPNOTSUPP;
                }
index c43aca69fb30dffed727c210b725b46a365440dd..0fc3fe5fd5b810c77fd24bc99e269414151e3405 100644 (file)
@@ -667,6 +667,8 @@ static struct raw3215_info *raw3215_alloc_info(void)
        info->buffer = kzalloc(RAW3215_BUFFER_SIZE, GFP_KERNEL | GFP_DMA);
        info->inbuf = kzalloc(RAW3215_INBUF_SIZE, GFP_KERNEL | GFP_DMA);
        if (!info->buffer || !info->inbuf) {
+               kfree(info->inbuf);
+               kfree(info->buffer);
                kfree(info);
                return NULL;
        }
index cd4c293f0dd0d375be6d6d815108aeb588304fec..fe8875f0d7be1155883655150ec57c3115356bbe 100644 (file)
@@ -80,9 +80,10 @@ static int __init sh_pm_runtime_init(void)
        if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) {
                if (!of_machine_is_compatible("renesas,emev2") &&
                    !of_machine_is_compatible("renesas,r7s72100") &&
-                   !of_machine_is_compatible("renesas,r8a73a4") &&
 #ifndef CONFIG_PM_GENERIC_DOMAINS_OF
+                   !of_machine_is_compatible("renesas,r8a73a4") &&
                    !of_machine_is_compatible("renesas,r8a7740") &&
+                   !of_machine_is_compatible("renesas,sh73a0") &&
 #endif
                    !of_machine_is_compatible("renesas,r8a7778") &&
                    !of_machine_is_compatible("renesas,r8a7779") &&
@@ -90,9 +91,7 @@ static int __init sh_pm_runtime_init(void)
                    !of_machine_is_compatible("renesas,r8a7791") &&
                    !of_machine_is_compatible("renesas,r8a7792") &&
                    !of_machine_is_compatible("renesas,r8a7793") &&
-                   !of_machine_is_compatible("renesas,r8a7794") &&
-                   !of_machine_is_compatible("renesas,sh7372") &&
-                   !of_machine_is_compatible("renesas,sh73a0"))
+                   !of_machine_is_compatible("renesas,r8a7794"))
                        return 0;
        }
 
index 08da4d3e21621ab03ed62a305562bf8f21a5f092..46bcebba54b2ff44fc32de373dfc8b09fc58687a 100644 (file)
@@ -1998,6 +1998,8 @@ pci_wch_ch38x_setup(struct serial_private *priv,
 #define PCIE_DEVICE_ID_WCH_CH382_2S1P  0x3250
 #define PCIE_DEVICE_ID_WCH_CH384_4S    0x3470
 
+#define PCI_DEVICE_ID_EXAR_XR17V8358   0x8358
+
 /* Unknown vendors/cards - this should not be in linux/pci_ids.h */
 #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584        0x1584
 #define PCI_SUBDEVICE_ID_UNKNOWN_0x1588        0x1588
@@ -2520,6 +2522,13 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
                .subdevice      = PCI_ANY_ID,
                .setup          = pci_xr17v35x_setup,
        },
+       {
+               .vendor = PCI_VENDOR_ID_EXAR,
+               .device = PCI_DEVICE_ID_EXAR_XR17V8358,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .setup          = pci_xr17v35x_setup,
+       },
        /*
         * Xircom cards
         */
@@ -2999,6 +3008,7 @@ enum pci_board_num_t {
        pbn_exar_XR17V352,
        pbn_exar_XR17V354,
        pbn_exar_XR17V358,
+       pbn_exar_XR17V8358,
        pbn_exar_ibm_saturn,
        pbn_pasemi_1682M,
        pbn_ni8430_2,
@@ -3685,6 +3695,14 @@ static struct pciserial_board pci_boards[] = {
                .reg_shift      = 0,
                .first_offset   = 0,
        },
+       [pbn_exar_XR17V8358] = {
+               .flags          = FL_BASE0,
+               .num_ports      = 16,
+               .base_baud      = 7812500,
+               .uart_offset    = 0x400,
+               .reg_shift      = 0,
+               .first_offset   = 0,
+       },
        [pbn_exar_ibm_saturn] = {
                .flags          = FL_BASE0,
                .num_ports      = 1,
@@ -5080,7 +5098,7 @@ static struct pci_device_id serial_pci_tbl[] = {
                0,
                0, pbn_exar_XR17C158 },
        /*
-        * Exar Corp. XR17V35[248] Dual/Quad/Octal PCIe UARTs
+        * Exar Corp. XR17V[48]35[248] Dual/Quad/Octal/Hexa PCIe UARTs
         */
        {       PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17V352,
                PCI_ANY_ID, PCI_ANY_ID,
@@ -5094,7 +5112,10 @@ static struct pci_device_id serial_pci_tbl[] = {
                PCI_ANY_ID, PCI_ANY_ID,
                0,
                0, pbn_exar_XR17V358 },
-
+       {       PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17V8358,
+               PCI_ANY_ID, PCI_ANY_ID,
+               0,
+               0, pbn_exar_XR17V8358 },
        /*
         * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke)
         */
index d58fe4763d9e1bdacf07577e3043414e22f8ecbc..27dade29646b7c8d962494cf37daee398c774514 100644 (file)
@@ -880,6 +880,7 @@ static int atmel_prepare_tx_dma(struct uart_port *port)
        config.direction = DMA_MEM_TO_DEV;
        config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
        config.dst_addr = port->mapbase + ATMEL_US_THR;
+       config.dst_maxburst = 1;
 
        ret = dmaengine_slave_config(atmel_port->chan_tx,
                                     &config);
@@ -1059,6 +1060,7 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
        config.direction = DMA_DEV_TO_MEM;
        config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
        config.src_addr = port->mapbase + ATMEL_US_RHR;
+       config.src_maxburst = 1;
 
        ret = dmaengine_slave_config(atmel_port->chan_rx,
                                     &config);
index 5b73afb9f9f34343371477be0c48d535f600dc86..137381e649e5bec1c2ac5bf616bc9da6e813772a 100644 (file)
@@ -346,7 +346,6 @@ static const struct of_device_id of_platform_serial_table[] = {
        { .compatible = "ibm,qpace-nwp-serial",
                .data = (void *)PORT_NWPSERIAL, },
 #endif
-       { .type = "serial",         .data = (void *)PORT_UNKNOWN, },
        { /* end of list */ },
 };
 
index cf08876922f1446e55a2d8ca79bf87e0d4e24fed..a0ae942d9562d818c3e35a30c71d1bd1cb12e426 100644 (file)
@@ -1068,8 +1068,9 @@ static int s3c64xx_serial_startup(struct uart_port *port)
        spin_lock_irqsave(&port->lock, flags);
 
        ufcon = rd_regl(port, S3C2410_UFCON);
-       ufcon |= S3C2410_UFCON_RESETRX | S3C2410_UFCON_RESETTX |
-                       S5PV210_UFCON_RXTRIG8;
+       ufcon |= S3C2410_UFCON_RESETRX | S5PV210_UFCON_RXTRIG8;
+       if (!uart_console(port))
+               ufcon |= S3C2410_UFCON_RESETTX;
        wr_regl(port, S3C2410_UFCON, ufcon);
 
        enable_rx_pio(ourport);
index eb5b03be9dfdf5ff1dddbf5ce7bd271596975967..0b7bb12dfc68bc7edc45f7274a4e75acb9238a59 100644 (file)
@@ -1770,7 +1770,7 @@ static const struct file_operations uart_proc_fops = {
  *     @port: the port to write the message
  *     @s: array of characters
  *     @count: number of characters in string to write
- *     @write: function to write character to port
+ *     @putchar: function to write character to port
  */
 void uart_console_write(struct uart_port *port, const char *s,
                        unsigned int count,
index 708eead850b032bdc74c17031c66b0d58abb192a..b1c6bd3d483fa87a0372928ebd59cbdf9173d17d 100644 (file)
@@ -632,7 +632,8 @@ MODULE_DEVICE_TABLE(of, ulite_of_match);
 
 static int ulite_probe(struct platform_device *pdev)
 {
-       struct resource *res, *res2;
+       struct resource *res;
+       int irq;
        int id = pdev->id;
 #ifdef CONFIG_OF
        const __be32 *prop;
@@ -646,11 +647,11 @@ static int ulite_probe(struct platform_device *pdev)
        if (!res)
                return -ENODEV;
 
-       res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-       if (!res2)
-               return -ENODEV;
+       irq = platform_get_irq(pdev, 0);
+       if (irq <= 0)
+               return -ENXIO;
 
-       return ulite_assign(&pdev->dev, id, res->start, res2->start);
+       return ulite_assign(&pdev->dev, id, res->start, irq);
 }
 
 static int ulite_remove(struct platform_device *pdev)
index f218ec658f5d200e415d6bdf7503f7bc83f7a825..3ddbac767db3c43e3f5e3afd1e5dd482af86f88a 100644 (file)
@@ -1331,9 +1331,9 @@ static SIMPLE_DEV_PM_OPS(cdns_uart_dev_pm_ops, cdns_uart_suspend,
  */
 static int cdns_uart_probe(struct platform_device *pdev)
 {
-       int rc, id;
+       int rc, id, irq;
        struct uart_port *port;
-       struct resource *res, *res2;
+       struct resource *res;
        struct cdns_uart *cdns_uart_data;
 
        cdns_uart_data = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_data),
@@ -1380,9 +1380,9 @@ static int cdns_uart_probe(struct platform_device *pdev)
                goto err_out_clk_disable;
        }
 
-       res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-       if (!res2) {
-               rc = -ENODEV;
+       irq = platform_get_irq(pdev, 0);
+       if (irq <= 0) {
+               rc = -ENXIO;
                goto err_out_clk_disable;
        }
 
@@ -1411,7 +1411,7 @@ static int cdns_uart_probe(struct platform_device *pdev)
                 * and triggers invocation of the config_port() entry point.
                 */
                port->mapbase = res->start;
-               port->irq = res2->start;
+               port->irq = irq;
                port->dev = &pdev->dev;
                port->uartclk = clk_get_rate(cdns_uart_data->uartclk);
                port->private_data = cdns_uart_data;
index 632fc815206169281af9aa13f6ca345eb846eabe..8e53fe4696647e6bbfd8951e3a96a0d070ce28af 100644 (file)
@@ -536,7 +536,7 @@ EXPORT_SYMBOL(tty_termios_hw_change);
  *     Locking: termios_rwsem
  */
 
-static int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios)
+int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios)
 {
        struct ktermios old_termios;
        struct tty_ldisc *ld;
@@ -569,6 +569,7 @@ static int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios)
        up_write(&tty->termios_rwsem);
        return 0;
 }
+EXPORT_SYMBOL_GPL(tty_set_termios);
 
 /**
  *     set_termios             -       set termios values for a tty
index 083acf45ad5aba6cc9e0fe6c9c7d94b3e0fcd85c..19d655a743b55eb8e17f4308bc722a40bfe4e505 100644 (file)
@@ -520,7 +520,6 @@ static int ci_otg_start_host(struct otg_fsm *fsm, int on)
 {
        struct ci_hdrc  *ci = container_of(fsm, struct ci_hdrc, fsm);
 
-       mutex_unlock(&fsm->lock);
        if (on) {
                ci_role_stop(ci);
                ci_role_start(ci, CI_ROLE_HOST);
@@ -529,7 +528,6 @@ static int ci_otg_start_host(struct otg_fsm *fsm, int on)
                hw_device_reset(ci);
                ci_role_start(ci, CI_ROLE_GADGET);
        }
-       mutex_lock(&fsm->lock);
        return 0;
 }
 
@@ -537,12 +535,10 @@ static int ci_otg_start_gadget(struct otg_fsm *fsm, int on)
 {
        struct ci_hdrc  *ci = container_of(fsm, struct ci_hdrc, fsm);
 
-       mutex_unlock(&fsm->lock);
        if (on)
                usb_gadget_vbus_connect(&ci->gadget);
        else
                usb_gadget_vbus_disconnect(&ci->gadget);
-       mutex_lock(&fsm->lock);
 
        return 0;
 }
index 3e15add665e236f2ef15bd1a9858dd9eabcb0c96..5c8f58114677daa1c65d5bd46255b71e8fd7fe72 100644 (file)
@@ -1142,11 +1142,16 @@ static int acm_probe(struct usb_interface *intf,
        }
 
        while (buflen > 0) {
+               elength = buffer[0];
+               if (!elength) {
+                       dev_err(&intf->dev, "skipping garbage byte\n");
+                       elength = 1;
+                       goto next_desc;
+               }
                if (buffer[1] != USB_DT_CS_INTERFACE) {
                        dev_err(&intf->dev, "skipping garbage\n");
                        goto next_desc;
                }
-               elength = buffer[0];
 
                switch (buffer[2]) {
                case USB_CDC_UNION_TYPE: /* we've found it */
index 9db74ca7e5b98224dd9889a3a09e3a8e4941668c..275c92e53a5972615166310742b63af0a8d61cf5 100644 (file)
@@ -88,13 +88,20 @@ static int ehci_msm_probe(struct platform_device *pdev)
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       hcd->regs = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(hcd->regs)) {
-               ret = PTR_ERR(hcd->regs);
+       if (!res) {
+               dev_err(&pdev->dev, "Unable to get memory resource\n");
+               ret = -ENODEV;
                goto put_hcd;
        }
+
        hcd->rsrc_start = res->start;
        hcd->rsrc_len = resource_size(res);
+       hcd->regs = devm_ioremap(&pdev->dev, hcd->rsrc_start, hcd->rsrc_len);
+       if (!hcd->regs) {
+               dev_err(&pdev->dev, "ioremap failed\n");
+               ret = -ENOMEM;
+               goto put_hcd;
+       }
 
        /*
         * OTG driver takes care of PHY initialization, clock management,
index 9893d696fc973e9e4183b57b56b3ceb22570942f..f58caa9e6a27e6e1a7161a66e5e9a97698cc1e1a 100644 (file)
@@ -51,7 +51,8 @@ static int uas_find_endpoints(struct usb_host_interface *alt,
 }
 
 static int uas_use_uas_driver(struct usb_interface *intf,
-                             const struct usb_device_id *id)
+                             const struct usb_device_id *id,
+                             unsigned long *flags_ret)
 {
        struct usb_host_endpoint *eps[4] = { };
        struct usb_device *udev = interface_to_usbdev(intf);
@@ -73,7 +74,7 @@ static int uas_use_uas_driver(struct usb_interface *intf,
         * this writing the following versions exist:
         * ASM1051 - no uas support version
         * ASM1051 - with broken (*) uas support
-        * ASM1053 - with working uas support
+        * ASM1053 - with working uas support, but problems with large xfers
         * ASM1153 - with working uas support
         *
         * Devices with these chips re-use a number of device-ids over the
@@ -103,6 +104,9 @@ static int uas_use_uas_driver(struct usb_interface *intf,
                } else if (usb_ss_max_streams(&eps[1]->ss_ep_comp) == 32) {
                        /* Possibly an ASM1051, disable uas */
                        flags |= US_FL_IGNORE_UAS;
+               } else {
+                       /* ASM1053, these have issues with large transfers */
+                       flags |= US_FL_MAX_SECTORS_240;
                }
        }
 
@@ -132,5 +136,8 @@ static int uas_use_uas_driver(struct usb_interface *intf,
                return 0;
        }
 
+       if (flags_ret)
+               *flags_ret = flags;
+
        return 1;
 }
index 6cdabdc119a73bc172d5b2e52f024113bde09d3b..6d3122afeed33e9cfe1b571c3acaf19dac967da4 100644 (file)
@@ -759,7 +759,10 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
 
 static int uas_slave_alloc(struct scsi_device *sdev)
 {
-       sdev->hostdata = (void *)sdev->host->hostdata;
+       struct uas_dev_info *devinfo =
+               (struct uas_dev_info *)sdev->host->hostdata;
+
+       sdev->hostdata = devinfo;
 
        /* USB has unusual DMA-alignment requirements: Although the
         * starting address of each scatter-gather element doesn't matter,
@@ -778,6 +781,11 @@ static int uas_slave_alloc(struct scsi_device *sdev)
         */
        blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
 
+       if (devinfo->flags & US_FL_MAX_SECTORS_64)
+               blk_queue_max_hw_sectors(sdev->request_queue, 64);
+       else if (devinfo->flags & US_FL_MAX_SECTORS_240)
+               blk_queue_max_hw_sectors(sdev->request_queue, 240);
+
        return 0;
 }
 
@@ -887,8 +895,9 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
        struct Scsi_Host *shost = NULL;
        struct uas_dev_info *devinfo;
        struct usb_device *udev = interface_to_usbdev(intf);
+       unsigned long dev_flags;
 
-       if (!uas_use_uas_driver(intf, id))
+       if (!uas_use_uas_driver(intf, id, &dev_flags))
                return -ENODEV;
 
        if (uas_switch_interface(udev, intf))
@@ -910,8 +919,7 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
        devinfo->udev = udev;
        devinfo->resetting = 0;
        devinfo->shutdown = 0;
-       devinfo->flags = id->driver_info;
-       usb_stor_adjust_quirks(udev, &devinfo->flags);
+       devinfo->flags = dev_flags;
        init_usb_anchor(&devinfo->cmd_urbs);
        init_usb_anchor(&devinfo->sense_urbs);
        init_usb_anchor(&devinfo->data_urbs);
index 5600c33fcadb219e52e8f985bf692c6ff3a1025e..6c10c888f35fb976b94b1ea3fddd61c4bcabe520 100644 (file)
@@ -479,7 +479,8 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
                        US_FL_SINGLE_LUN | US_FL_NO_WP_DETECT |
                        US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
                        US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE |
-                       US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES);
+                       US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES |
+                       US_FL_MAX_SECTORS_240);
 
        p = quirks;
        while (*p) {
@@ -520,6 +521,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
                case 'f':
                        f |= US_FL_NO_REPORT_OPCODES;
                        break;
+               case 'g':
+                       f |= US_FL_MAX_SECTORS_240;
+                       break;
                case 'h':
                        f |= US_FL_CAPACITY_HEURISTICS;
                        break;
@@ -1080,7 +1084,7 @@ static int storage_probe(struct usb_interface *intf,
 
        /* If uas is enabled and this device can do uas then ignore it. */
 #if IS_ENABLED(CONFIG_USB_UAS)
-       if (uas_use_uas_driver(intf, id))
+       if (uas_use_uas_driver(intf, id, NULL))
                return -ENXIO;
 #endif
 
index cde698a07d210f446ba4562c7a09647515610176..a2ae42720a6afe92701de402b837d56dd66d03b4 100644 (file)
@@ -1802,6 +1802,8 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev)
        set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
        inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
        BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
+        BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
+
        inode->i_version = btrfs_stack_inode_sequence(inode_item);
        inode->i_rdev = 0;
        *rdev = btrfs_stack_inode_rdev(inode_item);
index 1eef4ee01d1a3c7fd78e4546dd1bd2570f9c7783..0ec8e228b89f42505cc0c9c8ffef96e1a2f3f9a4 100644 (file)
@@ -3178,8 +3178,8 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
        bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
        write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
        btrfs_mark_buffer_dirty(leaf);
-       btrfs_release_path(path);
 fail:
+       btrfs_release_path(path);
        if (ret)
                btrfs_abort_transaction(trans, root, ret);
        return ret;
@@ -3305,8 +3305,7 @@ again:
 
        spin_lock(&block_group->lock);
        if (block_group->cached != BTRFS_CACHE_FINISHED ||
-           !btrfs_test_opt(root, SPACE_CACHE) ||
-           block_group->delalloc_bytes) {
+           !btrfs_test_opt(root, SPACE_CACHE)) {
                /*
                 * don't bother trying to write stuff out _if_
                 * a) we're not cached,
@@ -3408,17 +3407,14 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
        int loops = 0;
 
        spin_lock(&cur_trans->dirty_bgs_lock);
-       if (!list_empty(&cur_trans->dirty_bgs)) {
-               list_splice_init(&cur_trans->dirty_bgs, &dirty);
+       if (list_empty(&cur_trans->dirty_bgs)) {
+               spin_unlock(&cur_trans->dirty_bgs_lock);
+               return 0;
        }
+       list_splice_init(&cur_trans->dirty_bgs, &dirty);
        spin_unlock(&cur_trans->dirty_bgs_lock);
 
 again:
-       if (list_empty(&dirty)) {
-               btrfs_free_path(path);
-               return 0;
-       }
-
        /*
         * make sure all the block groups on our dirty list actually
         * exist
@@ -3431,18 +3427,16 @@ again:
                        return -ENOMEM;
        }
 
+       /*
+        * cache_write_mutex is here only to save us from balance or automatic
+        * removal of empty block groups deleting this block group while we are
+        * writing out the cache
+        */
+       mutex_lock(&trans->transaction->cache_write_mutex);
        while (!list_empty(&dirty)) {
                cache = list_first_entry(&dirty,
                                         struct btrfs_block_group_cache,
                                         dirty_list);
-
-               /*
-                * cache_write_mutex is here only to save us from balance
-                * deleting this block group while we are writing out the
-                * cache
-                */
-               mutex_lock(&trans->transaction->cache_write_mutex);
-
                /*
                 * this can happen if something re-dirties a block
                 * group that is already under IO.  Just wait for it to
@@ -3495,7 +3489,6 @@ again:
                }
                if (!ret)
                        ret = write_one_cache_group(trans, root, path, cache);
-               mutex_unlock(&trans->transaction->cache_write_mutex);
 
                /* if its not on the io list, we need to put the block group */
                if (should_put)
@@ -3503,7 +3496,16 @@ again:
 
                if (ret)
                        break;
+
+               /*
+                * Avoid blocking other tasks for too long. It might even save
+                * us from writing caches for block groups that are going to be
+                * removed.
+                */
+               mutex_unlock(&trans->transaction->cache_write_mutex);
+               mutex_lock(&trans->transaction->cache_write_mutex);
        }
+       mutex_unlock(&trans->transaction->cache_write_mutex);
 
        /*
         * go through delayed refs for all the stuff we've just kicked off
@@ -3514,8 +3516,15 @@ again:
                loops++;
                spin_lock(&cur_trans->dirty_bgs_lock);
                list_splice_init(&cur_trans->dirty_bgs, &dirty);
+               /*
+                * dirty_bgs_lock protects us from concurrent block group
+                * deletes too (not just cache_write_mutex).
+                */
+               if (!list_empty(&dirty)) {
+                       spin_unlock(&cur_trans->dirty_bgs_lock);
+                       goto again;
+               }
                spin_unlock(&cur_trans->dirty_bgs_lock);
-               goto again;
        }
 
        btrfs_free_path(path);
@@ -7537,7 +7546,7 @@ static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
  * returns the key for the extent through ins, and a tree buffer for
  * the first block of the extent through buf.
  *
- * returns the tree buffer or NULL.
+ * returns the tree buffer or an ERR_PTR on error.
  */
 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
                                        struct btrfs_root *root,
@@ -7548,6 +7557,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
        struct btrfs_key ins;
        struct btrfs_block_rsv *block_rsv;
        struct extent_buffer *buf;
+       struct btrfs_delayed_extent_op *extent_op;
        u64 flags = 0;
        int ret;
        u32 blocksize = root->nodesize;
@@ -7568,13 +7578,14 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
 
        ret = btrfs_reserve_extent(root, blocksize, blocksize,
                                   empty_size, hint, &ins, 0, 0);
-       if (ret) {
-               unuse_block_rsv(root->fs_info, block_rsv, blocksize);
-               return ERR_PTR(ret);
-       }
+       if (ret)
+               goto out_unuse;
 
        buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
-       BUG_ON(IS_ERR(buf)); /* -ENOMEM */
+       if (IS_ERR(buf)) {
+               ret = PTR_ERR(buf);
+               goto out_free_reserved;
+       }
 
        if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
                if (parent == 0)
@@ -7584,9 +7595,11 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
                BUG_ON(parent > 0);
 
        if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
-               struct btrfs_delayed_extent_op *extent_op;
                extent_op = btrfs_alloc_delayed_extent_op();
-               BUG_ON(!extent_op); /* -ENOMEM */
+               if (!extent_op) {
+                       ret = -ENOMEM;
+                       goto out_free_buf;
+               }
                if (key)
                        memcpy(&extent_op->key, key, sizeof(extent_op->key));
                else
@@ -7601,13 +7614,24 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
                extent_op->level = level;
 
                ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
-                                       ins.objectid,
-                                       ins.offset, parent, root_objectid,
-                                       level, BTRFS_ADD_DELAYED_EXTENT,
-                                       extent_op, 0);
-               BUG_ON(ret); /* -ENOMEM */
+                                                ins.objectid, ins.offset,
+                                                parent, root_objectid, level,
+                                                BTRFS_ADD_DELAYED_EXTENT,
+                                                extent_op, 0);
+               if (ret)
+                       goto out_free_delayed;
        }
        return buf;
+
+out_free_delayed:
+       btrfs_free_delayed_extent_op(extent_op);
+out_free_buf:
+       free_extent_buffer(buf);
+out_free_reserved:
+       btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0);
+out_unuse:
+       unuse_block_rsv(root->fs_info, block_rsv, blocksize);
+       return ERR_PTR(ret);
 }
 
 struct walk_control {
index 782f3bc4651d319529394b1ce29e20d58d01e62c..43af5a61ad25b4dbb3f4c0f2cb89160c120d1ac7 100644 (file)
@@ -4560,36 +4560,37 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
        do {
                index--;
                page = eb->pages[index];
-               if (page && mapped) {
+               if (!page)
+                       continue;
+               if (mapped)
                        spin_lock(&page->mapping->private_lock);
+               /*
+                * We do this since we'll remove the pages after we've
+                * removed the eb from the radix tree, so we could race
+                * and have this page now attached to the new eb.  So
+                * only clear page_private if it's still connected to
+                * this eb.
+                */
+               if (PagePrivate(page) &&
+                   page->private == (unsigned long)eb) {
+                       BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
+                       BUG_ON(PageDirty(page));
+                       BUG_ON(PageWriteback(page));
                        /*
-                        * We do this since we'll remove the pages after we've
-                        * removed the eb from the radix tree, so we could race
-                        * and have this page now attached to the new eb.  So
-                        * only clear page_private if it's still connected to
-                        * this eb.
+                        * We need to make sure we haven't be attached
+                        * to a new eb.
                         */
-                       if (PagePrivate(page) &&
-                           page->private == (unsigned long)eb) {
-                               BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
-                               BUG_ON(PageDirty(page));
-                               BUG_ON(PageWriteback(page));
-                               /*
-                                * We need to make sure we haven't be attached
-                                * to a new eb.
-                                */
-                               ClearPagePrivate(page);
-                               set_page_private(page, 0);
-                               /* One for the page private */
-                               page_cache_release(page);
-                       }
-                       spin_unlock(&page->mapping->private_lock);
-
-               }
-               if (page) {
-                       /* One for when we alloced the page */
+                       ClearPagePrivate(page);
+                       set_page_private(page, 0);
+                       /* One for the page private */
                        page_cache_release(page);
                }
+
+               if (mapped)
+                       spin_unlock(&page->mapping->private_lock);
+
+               /* One for when we alloced the page */
+               page_cache_release(page);
        } while (index != 0);
 }
 
@@ -4870,6 +4871,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
                                mark_extent_buffer_accessed(exists, p);
                                goto free_eb;
                        }
+                       exists = NULL;
 
                        /*
                         * Do this so attach doesn't complain and we need to
@@ -4933,12 +4935,12 @@ again:
        return eb;
 
 free_eb:
+       WARN_ON(!atomic_dec_and_test(&eb->refs));
        for (i = 0; i < num_pages; i++) {
                if (eb->pages[i])
                        unlock_page(eb->pages[i]);
        }
 
-       WARN_ON(!atomic_dec_and_test(&eb->refs));
        btrfs_release_extent_buffer(eb);
        return exists;
 }
index 81fa75a8e1f38e644be2e8bc2e02a4d15f5707c5..41c510b7cc110891bbc3818638137e3e516d5800 100644 (file)
@@ -1218,7 +1218,7 @@ out:
  *
  * This function writes out a free space cache struct to disk for quick recovery
  * on mount.  This will return 0 if it was successfull in writing the cache out,
- * and -1 if it was not.
+ * or an errno if it was not.
  */
 static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
                                   struct btrfs_free_space_ctl *ctl,
@@ -1235,12 +1235,12 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
        int must_iput = 0;
 
        if (!i_size_read(inode))
-               return -1;
+               return -EIO;
 
        WARN_ON(io_ctl->pages);
        ret = io_ctl_init(io_ctl, inode, root, 1);
        if (ret)
-               return -1;
+               return ret;
 
        if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) {
                down_write(&block_group->data_rwsem);
@@ -1258,7 +1258,9 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
        }
 
        /* Lock all pages first so we can lock the extent safely. */
-       io_ctl_prepare_pages(io_ctl, inode, 0);
+       ret = io_ctl_prepare_pages(io_ctl, inode, 0);
+       if (ret)
+               goto out;
 
        lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
                         0, &cached_state);
index ada4d24ed11b71c3f20a90b9a8524d4c69bf2d20..8bb013672aee061e81eb03fcba6d51db9cd169af 100644 (file)
@@ -3632,25 +3632,28 @@ static void btrfs_read_locked_inode(struct inode *inode)
        BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
        BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
 
+       inode->i_version = btrfs_inode_sequence(leaf, inode_item);
+       inode->i_generation = BTRFS_I(inode)->generation;
+       inode->i_rdev = 0;
+       rdev = btrfs_inode_rdev(leaf, inode_item);
+
+       BTRFS_I(inode)->index_cnt = (u64)-1;
+       BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
+
+cache_index:
        /*
         * If we were modified in the current generation and evicted from memory
         * and then re-read we need to do a full sync since we don't have any
         * idea about which extents were modified before we were evicted from
         * cache.
+        *
+        * This is required for both inode re-read from disk and delayed inode
+        * in delayed_nodes_tree.
         */
        if (BTRFS_I(inode)->last_trans == root->fs_info->generation)
                set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
                        &BTRFS_I(inode)->runtime_flags);
 
-       inode->i_version = btrfs_inode_sequence(leaf, inode_item);
-       inode->i_generation = BTRFS_I(inode)->generation;
-       inode->i_rdev = 0;
-       rdev = btrfs_inode_rdev(leaf, inode_item);
-
-       BTRFS_I(inode)->index_cnt = (u64)-1;
-       BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
-
-cache_index:
        path->slots[0]++;
        if (inode->i_nlink != 1 ||
            path->slots[0] >= btrfs_header_nritems(leaf))
index b05653f182c231639080abe76688a473f263284e..1c22c65185045c61b170db3f1db8f2c3627bbd84 100644 (file)
@@ -2410,7 +2410,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
                        "Attempt to delete subvolume %llu during send",
                        dest->root_key.objectid);
                err = -EPERM;
-               goto out_dput;
+               goto out_unlock_inode;
        }
 
        d_invalidate(dentry);
@@ -2505,6 +2505,7 @@ out_up_write:
                                root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
                spin_unlock(&dest->root_item_lock);
        }
+out_unlock_inode:
        mutex_unlock(&inode->i_mutex);
        if (!err) {
                shrink_dcache_sb(root->fs_info->sb);
index 8bcd2a00751785e0a6d41df74a3475e1acd8c455..96aebf3bcd5b37d35604c76ec584d29686ad34e2 100644 (file)
@@ -1058,6 +1058,7 @@ static int contains_pending_extent(struct btrfs_trans_handle *trans,
        struct extent_map *em;
        struct list_head *search_list = &trans->transaction->pending_chunks;
        int ret = 0;
+       u64 physical_start = *start;
 
 again:
        list_for_each_entry(em, search_list, list) {
@@ -1068,9 +1069,9 @@ again:
                for (i = 0; i < map->num_stripes; i++) {
                        if (map->stripes[i].dev != device)
                                continue;
-                       if (map->stripes[i].physical >= *start + len ||
+                       if (map->stripes[i].physical >= physical_start + len ||
                            map->stripes[i].physical + em->orig_block_len <=
-                           *start)
+                           physical_start)
                                continue;
                        *start = map->stripes[i].physical +
                                em->orig_block_len;
@@ -1193,8 +1194,14 @@ again:
                         */
                        if (contains_pending_extent(trans, device,
                                                    &search_start,
-                                                   hole_size))
-                               hole_size = 0;
+                                                   hole_size)) {
+                               if (key.offset >= search_start) {
+                                       hole_size = key.offset - search_start;
+                               } else {
+                                       WARN_ON_ONCE(1);
+                                       hole_size = 0;
+                               }
+                       }
 
                        if (hole_size > max_hole_size) {
                                max_hole_start = search_start;
index f5ca0e989bba417b275c781a0de86b247c1d8619..1c3002e1db20c8f91a31f17e8d6d323ed76ea404 100644 (file)
 
 #ifndef ACPI_USE_SYSTEM_INTTYPES
 
-typedef unsigned char u8;
 typedef unsigned char u8;
 typedef unsigned short u16;
 typedef short s16;
index e60a745ac1982cd0ba11c29bad97d3175c681002..e804306ef5e88d7b02b8b7399b4d525c16b69cd3 100644 (file)
 #error KEXEC_CONTROL_MEMORY_LIMIT not defined
 #endif
 
+#ifndef KEXEC_CONTROL_MEMORY_GFP
+#define KEXEC_CONTROL_MEMORY_GFP GFP_KERNEL
+#endif
+
 #ifndef KEXEC_CONTROL_PAGE_SIZE
 #error KEXEC_CONTROL_PAGE_SIZE not defined
 #endif
index bcbde799ec690157d47b64a3c7e0728865b54bfa..1899c74a712791ba33f7e17f2d139da21da70c42 100644 (file)
@@ -60,6 +60,7 @@ struct phy_device;
 struct wireless_dev;
 /* 802.15.4 specific */
 struct wpan_dev;
+struct mpls_dev;
 
 void netdev_set_default_ethtool_ops(struct net_device *dev,
                                    const struct ethtool_ops *ops);
@@ -976,7 +977,8 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
  * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
  *                          u16 flags)
  * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
- *                          struct net_device *dev, u32 filter_mask)
+ *                          struct net_device *dev, u32 filter_mask,
+ *                          int nlflags)
  * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
  *                          u16 flags);
  *
@@ -1172,7 +1174,8 @@ struct net_device_ops {
        int                     (*ndo_bridge_getlink)(struct sk_buff *skb,
                                                      u32 pid, u32 seq,
                                                      struct net_device *dev,
-                                                     u32 filter_mask);
+                                                     u32 filter_mask,
+                                                     int nlflags);
        int                     (*ndo_bridge_dellink)(struct net_device *dev,
                                                      struct nlmsghdr *nlh,
                                                      u16 flags);
@@ -1627,6 +1630,9 @@ struct net_device {
        void                    *ax25_ptr;
        struct wireless_dev     *ieee80211_ptr;
        struct wpan_dev         *ieee802154_ptr;
+#if IS_ENABLED(CONFIG_MPLS_ROUTING)
+       struct mpls_dev __rcu   *mpls_ptr;
+#endif
 
 /*
  * Cache lines mostly used on receive path (including eth_type_trans())
@@ -2021,10 +2027,10 @@ struct pcpu_sw_netstats {
 ({                                                             \
        typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \
        if (pcpu_stats) {                                       \
-               int i;                                          \
-               for_each_possible_cpu(i) {                      \
+               int __cpu;                                      \
+               for_each_possible_cpu(__cpu) {                  \
                        typeof(type) *stat;                     \
-                       stat = per_cpu_ptr(pcpu_stats, i);      \
+                       stat = per_cpu_ptr(pcpu_stats, __cpu);  \
                        u64_stats_init(&stat->syncp);           \
                }                                               \
        }                                                       \
index ab8f76dba6680f485943545407daeb117620b489..f2fdb5a520709551462ca8b95cf9793b2f182af6 100644 (file)
@@ -39,12 +39,24 @@ static inline void br_drop_fake_rtable(struct sk_buff *skb)
 
 static inline int nf_bridge_get_physinif(const struct sk_buff *skb)
 {
-       return skb->nf_bridge ? skb->nf_bridge->physindev->ifindex : 0;
+       struct nf_bridge_info *nf_bridge;
+
+       if (skb->nf_bridge == NULL)
+               return 0;
+
+       nf_bridge = skb->nf_bridge;
+       return nf_bridge->physindev ? nf_bridge->physindev->ifindex : 0;
 }
 
 static inline int nf_bridge_get_physoutif(const struct sk_buff *skb)
 {
-       return skb->nf_bridge ? skb->nf_bridge->physoutdev->ifindex : 0;
+       struct nf_bridge_info *nf_bridge;
+
+       if (skb->nf_bridge == NULL)
+               return 0;
+
+       nf_bridge = skb->nf_bridge;
+       return nf_bridge->physoutdev ? nf_bridge->physoutdev->ifindex : 0;
 }
 
 static inline struct net_device *
index e23d242d1230ff899f37478bcb3a8b92769129d2..dbcbcc59aa92e77de6c1c6065b403c0fa7d0dd15 100644 (file)
@@ -282,7 +282,8 @@ static inline bool rht_shrink_below_30(const struct rhashtable *ht,
 static inline bool rht_grow_above_100(const struct rhashtable *ht,
                                      const struct bucket_table *tbl)
 {
-       return atomic_read(&ht->nelems) > tbl->size;
+       return atomic_read(&ht->nelems) > tbl->size &&
+               (!ht->p.max_size || tbl->size < ht->p.max_size);
 }
 
 /* The bucket lock is selected based on the hash and protects mutations
index 2da5d1081ad990b57a07715ea0751ae10a80ac54..7b8e260c4a27df00b8eca70d5cd4f52864ba6d87 100644 (file)
@@ -122,5 +122,5 @@ extern int ndo_dflt_fdb_del(struct ndmsg *ndm,
 
 extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
                                   struct net_device *dev, u16 mode,
-                                  u32 flags, u32 mask);
+                                  u32 flags, u32 mask, int nlflags);
 #endif /* __LINUX_RTNETLINK_H */
index 8222ae40ecb0167d55f3b59027277d2a9e98d80d..26a2e6122734f8237ac44d47fb6bf4e96cca124b 100644 (file)
@@ -175,14 +175,6 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
 extern void calc_global_load(unsigned long ticks);
 extern void update_cpu_load_nohz(void);
 
-/* Notifier for when a task gets migrated to a new CPU */
-struct task_migration_notifier {
-       struct task_struct *task;
-       int from_cpu;
-       int to_cpu;
-};
-extern void register_task_migration_notifier(struct notifier_block *n);
-
 extern unsigned long get_parent_ip(unsigned long addr);
 
 extern void dump_cpu_task(int cpu);
index 06793b598f44828c5e36804c530aa5fd2400e8e2..66e374d62f64347025ed0a0bc7dade0744e16818 100644 (file)
@@ -773,6 +773,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
 
 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
                            int node);
+struct sk_buff *__build_skb(void *data, unsigned int frag_size);
 struct sk_buff *build_skb(void *data, unsigned int frag_size);
 static inline struct sk_buff *alloc_skb(unsigned int size,
                                        gfp_t priority)
index 358a337af598564f16b03bfa68a696ff85d4513b..fe5623c9af715fda985da02bcecb1c9451b3789c 100644 (file)
@@ -491,6 +491,7 @@ static inline speed_t tty_get_baud_rate(struct tty_struct *tty)
 
 extern void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old);
 extern int tty_termios_hw_change(struct ktermios *a, struct ktermios *b);
+extern int tty_set_termios(struct tty_struct *tty, struct ktermios *kt);
 
 extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *);
 extern void tty_ldisc_deref(struct tty_ldisc *);
index a7f2604c5f25928938f2a55051f31f22ba469a49..7f5f78bd15ad448414fa8c06fb61fa25fb152b5f 100644 (file)
@@ -77,6 +77,8 @@
                /* Cannot handle ATA_12 or ATA_16 CDBs */       \
        US_FLAG(NO_REPORT_OPCODES,      0x04000000)             \
                /* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */ \
+       US_FLAG(MAX_SECTORS_240,        0x08000000)             \
+               /* Sets max_sectors to 240 */                   \
 
 #define US_FLAG(name, value)   US_FL_##name = value ,
 enum { US_DO_ALL_FLAGS };
index fda6feeb6c1f3fc3ae7c4929935901e31982166b..78ed135e9dea6a9971d15e3b786a64de170b310c 100644 (file)
 #include <net/bond_alb.h>
 #include <net/bond_options.h>
 
-#define DRV_VERSION    "3.7.1"
-#define DRV_RELDATE    "April 27, 2011"
-#define DRV_NAME       "bonding"
-#define DRV_DESCRIPTION        "Ethernet Channel Bonding Driver"
-
-#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
-
 #define BOND_MAX_ARP_TARGETS   16
 
 #define BOND_DEFAULT_MIIMON    100
index 7b5887cd11723441418daa5ec306d8c8b4d7c1f1..48a8158235874b1625c65651b0bd92eedd999fe5 100644 (file)
@@ -279,12 +279,6 @@ static inline void inet_csk_reqsk_queue_add(struct sock *sk,
 void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
                                   unsigned long timeout);
 
-static inline void inet_csk_reqsk_queue_removed(struct sock *sk,
-                                               struct request_sock *req)
-{
-       reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
-}
-
 static inline void inet_csk_reqsk_queue_added(struct sock *sk,
                                              const unsigned long timeout)
 {
@@ -306,19 +300,7 @@ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
        return reqsk_queue_is_full(&inet_csk(sk)->icsk_accept_queue);
 }
 
-static inline void inet_csk_reqsk_queue_unlink(struct sock *sk,
-                                              struct request_sock *req)
-{
-       reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req);
-}
-
-static inline void inet_csk_reqsk_queue_drop(struct sock *sk,
-                                            struct request_sock *req)
-{
-       inet_csk_reqsk_queue_unlink(sk, req);
-       inet_csk_reqsk_queue_removed(sk, req);
-       reqsk_put(req);
-}
+void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
 
 void inet_csk_destroy_sock(struct sock *sk);
 void inet_csk_prepare_forced_close(struct sock *sk);
index fe41f3ceb008d767d594de6a042393ba463b509b..9f4265ce88927b0fe1d7418dbeff5f306855ae15 100644 (file)
@@ -212,24 +212,6 @@ static inline int reqsk_queue_empty(struct request_sock_queue *queue)
        return queue->rskq_accept_head == NULL;
 }
 
-static inline void reqsk_queue_unlink(struct request_sock_queue *queue,
-                                     struct request_sock *req)
-{
-       struct listen_sock *lopt = queue->listen_opt;
-       struct request_sock **prev;
-
-       spin_lock(&queue->syn_wait_lock);
-
-       prev = &lopt->syn_table[req->rsk_hash];
-       while (*prev != req)
-               prev = &(*prev)->dl_next;
-       *prev = req->dl_next;
-
-       spin_unlock(&queue->syn_wait_lock);
-       if (del_timer(&req->rsk_timer))
-               reqsk_put(req);
-}
-
 static inline void reqsk_queue_add(struct request_sock_queue *queue,
                                   struct request_sock *req,
                                   struct sock *parent,
index 26f406e0f673437498406acbdf84841a87862763..3a8fca9409a7a0c1fad472155854b26f0a122487 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (ST) 2012 Rajeev Kumar (rajeev-dlh.kumar@st.com)
+ * Copyright (ST) 2012 Rajeev Kumar (rajeevkumar.linux@gmail.com)
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 0de95ccb92cf58ed0536cca855d1255b0276b4fb..5bd134651f5eef50d7c0faee3c66457b66b1307f 100644 (file)
@@ -41,7 +41,8 @@
 
 #define EMUPAGESIZE     4096
 #define MAXREQVOICES    8
-#define MAXPAGES        8192
+#define MAXPAGES0       4096   /* 32 bit mode */
+#define MAXPAGES1       8192   /* 31 bit mode */
 #define RESERVED        0
 #define NUM_MIDI        16
 #define NUM_G           64              /* use all channels */
@@ -50,8 +51,7 @@
 
 /* FIXME? - according to the OSS driver the EMU10K1 needs a 29 bit DMA mask */
 #define EMU10K1_DMA_MASK       0x7fffffffUL    /* 31bit */
-#define AUDIGY_DMA_MASK                0x7fffffffUL    /* 31bit FIXME - 32 should work? */
-                                               /* See ALSA bug #1276 - rlrevell */
+#define AUDIGY_DMA_MASK                0xffffffffUL    /* 32bit mode */
 
 #define TMEMSIZE        256*1024
 #define TMEMSIZEREG     4
 
 #define MAPB                   0x0d            /* Cache map B                                          */
 
-#define MAP_PTE_MASK           0xffffe000      /* The 19 MSBs of the PTE indexed by the PTI            */
-#define MAP_PTI_MASK           0x00001fff      /* The 13 bit index to one of the 8192 PTE dwords       */
+#define MAP_PTE_MASK0          0xfffff000      /* The 20 MSBs of the PTE indexed by the PTI            */
+#define MAP_PTI_MASK0          0x00000fff      /* The 12 bit index to one of the 4096 PTE dwords       */
+
+#define MAP_PTE_MASK1          0xffffe000      /* The 19 MSBs of the PTE indexed by the PTI            */
+#define MAP_PTI_MASK1          0x00001fff      /* The 13 bit index to one of the 8192 PTE dwords       */
 
 /* 0x0e, 0x0f: Not used */
 
@@ -1704,6 +1707,7 @@ struct snd_emu10k1 {
        unsigned short model;                   /* subsystem id */
        unsigned int card_type;                 /* EMU10K1_CARD_* */
        unsigned int ecard_ctrl;                /* ecard control bits */
+       unsigned int address_mode;              /* address mode */
        unsigned long dma_mask;                 /* PCI DMA mask */
        unsigned int delay_pcm_irq;             /* in samples */
        int max_cache_pages;                    /* max memory size / PAGE_SIZE */
index 0bc83647d3fa39e05fcd153467eec20aa4c74556..1065095c6973555f2ba8f835b5321df647b5bc21 100644 (file)
@@ -287,7 +287,7 @@ struct device;
        .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_READWRITE,\
        .tlv.p = (tlv_array), \
        .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
-       .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) }
+       .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 1) }
 #define SOC_DAPM_SINGLE_TLV_VIRT(xname, max, tlv_array) \
        SOC_DAPM_SINGLE(xname, SND_SOC_NOPM, 0, max, 0, tlv_array)
 #define SOC_DAPM_ENUM(xname, xenum) \
index fcb312b3f25809e781098e2bd2514ae89f59329f..f6226914acfee0d2486d8e57095dbaead1b4a8cc 100644 (file)
@@ -387,8 +387,20 @@ int snd_soc_codec_set_pll(struct snd_soc_codec *codec, int pll_id, int source,
 int snd_soc_register_card(struct snd_soc_card *card);
 int snd_soc_unregister_card(struct snd_soc_card *card);
 int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card);
+#ifdef CONFIG_PM_SLEEP
 int snd_soc_suspend(struct device *dev);
 int snd_soc_resume(struct device *dev);
+#else
+static inline int snd_soc_suspend(struct device *dev)
+{
+       return 0;
+}
+
+static inline int snd_soc_resume(struct device *dev)
+{
+       return 0;
+}
+#endif
 int snd_soc_poweroff(struct device *dev);
 int snd_soc_register_platform(struct device *dev,
                const struct snd_soc_platform_driver *platform_drv);
index 65aca51fe255e487f4a3b36e5708b5159b089c03..e290de4e7e82d3d8779be63e983ec234aef16bfe 100644 (file)
@@ -1,7 +1,7 @@
 /*
 * linux/spear_dma.h
 *
-* Copyright (ST) 2012 Rajeev Kumar (rajeev-dlh.kumar@st.com)
+* Copyright (ST) 2012 Rajeev Kumar (rajeevkumar.linux@gmail.com)
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
index a3318f31e8e7fd05f317c595e8c4ce3bc3595fac..915980ac68dfa8cc1dc973b8a7e659fc56383c22 100644 (file)
@@ -155,7 +155,7 @@ static inline unsigned vring_size(unsigned int num, unsigned long align)
 }
 
 /* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
-/* Assuming a given event_idx value from the other size, if
+/* Assuming a given event_idx value from the other side, if
  * we have just incremented index from old to new_idx,
  * should we trigger an event? */
 static inline int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old)
index 0f8f8b0bc1bf660b9c8c4d7a51f2d698e1534a17..60c302cfb4d3cbb976f0b2d69f28a96899b1d4ce 100644 (file)
@@ -197,9 +197,9 @@ x509.genkey:
        @echo >>x509.genkey "x509_extensions = myexts"
        @echo >>x509.genkey
        @echo >>x509.genkey "[ req_distinguished_name ]"
-       @echo >>x509.genkey "O = Magrathea"
-       @echo >>x509.genkey "CN = Glacier signing key"
-       @echo >>x509.genkey "emailAddress = slartibartfast@magrathea.h2g2"
+       @echo >>x509.genkey "#O = Unspecified company"
+       @echo >>x509.genkey "CN = Build time autogenerated kernel key"
+       @echo >>x509.genkey "#emailAddress = unspecified.user@unspecified.company"
        @echo >>x509.genkey
        @echo >>x509.genkey "[ myexts ]"
        @echo >>x509.genkey "basicConstraints=critical,CA:FALSE"
index 4139a0f8b558e4e7db4f79adbd353c97c7fda553..54f0e7fcd0e288b4506fab80091dcc39069ae422 100644 (file)
@@ -357,8 +357,8 @@ select_insn:
        ALU64_MOD_X:
                if (unlikely(SRC == 0))
                        return 0;
-               tmp = DST;
-               DST = do_div(tmp, SRC);
+               div64_u64_rem(DST, SRC, &tmp);
+               DST = tmp;
                CONT;
        ALU_MOD_X:
                if (unlikely(SRC == 0))
@@ -367,8 +367,8 @@ select_insn:
                DST = do_div(tmp, (u32) SRC);
                CONT;
        ALU64_MOD_K:
-               tmp = DST;
-               DST = do_div(tmp, IMM);
+               div64_u64_rem(DST, IMM, &tmp);
+               DST = tmp;
                CONT;
        ALU_MOD_K:
                tmp = (u32) DST;
@@ -377,7 +377,7 @@ select_insn:
        ALU64_DIV_X:
                if (unlikely(SRC == 0))
                        return 0;
-               do_div(DST, SRC);
+               DST = div64_u64(DST, SRC);
                CONT;
        ALU_DIV_X:
                if (unlikely(SRC == 0))
@@ -387,7 +387,7 @@ select_insn:
                DST = (u32) tmp;
                CONT;
        ALU64_DIV_K:
-               do_div(DST, IMM);
+               DST = div64_u64(DST, IMM);
                CONT;
        ALU_DIV_K:
                tmp = (u32) DST;
index 38c25b1f2fd5c7e4922f36cf884519c1bd485a85..7a36fdcca5bfb064a6709021782c98bd2a6de179 100644 (file)
@@ -707,7 +707,7 @@ static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
        do {
                unsigned long pfn, epfn, addr, eaddr;
 
-               pages = kimage_alloc_pages(GFP_KERNEL, order);
+               pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
                if (!pages)
                        break;
                pfn   = page_to_pfn(pages);
index f9123a82cbb614eb26cab55c0f58540a5a3eb24b..fe22f7510bceab3fad0f008379a91f34db074d37 100644 (file)
@@ -1016,13 +1016,6 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
                rq_clock_skip_update(rq, true);
 }
 
-static ATOMIC_NOTIFIER_HEAD(task_migration_notifier);
-
-void register_task_migration_notifier(struct notifier_block *n)
-{
-       atomic_notifier_chain_register(&task_migration_notifier, n);
-}
-
 #ifdef CONFIG_SMP
 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
 {
@@ -1053,18 +1046,10 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
        trace_sched_migrate_task(p, new_cpu);
 
        if (task_cpu(p) != new_cpu) {
-               struct task_migration_notifier tmn;
-
                if (p->sched_class->migrate_task_rq)
                        p->sched_class->migrate_task_rq(p, new_cpu);
                p->se.nr_migrations++;
                perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
-
-               tmn.task = p;
-               tmn.from_cpu = task_cpu(p);
-               tmn.to_cpu = new_cpu;
-
-               atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn);
        }
 
        __set_task_cpu(p, new_cpu);
index deef1caa94c6779ea13e48690ffc2d9de2e7dddc..fefcb1fa5160139a41e9dd1fee74b20ef39105e0 100644 (file)
@@ -81,7 +81,6 @@ static void cpuidle_idle_call(void)
        struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
        struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
        int next_state, entered_state;
-       unsigned int broadcast;
        bool reflect;
 
        /*
@@ -150,17 +149,6 @@ static void cpuidle_idle_call(void)
                goto exit_idle;
        }
 
-       broadcast = drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP;
-
-       /*
-        * Tell the time framework to switch to a broadcast timer
-        * because our local timer will be shutdown. If a local timer
-        * is used from another cpu as a broadcast timer, this call may
-        * fail if it is not available
-        */
-       if (broadcast && tick_broadcast_enter())
-               goto use_default;
-
        /* Take note of the planned idle state. */
        idle_set_state(this_rq(), &drv->states[next_state]);
 
@@ -174,8 +162,8 @@ static void cpuidle_idle_call(void)
        /* The cpu is no longer idle or about to enter idle. */
        idle_set_state(this_rq(), NULL);
 
-       if (broadcast)
-               tick_broadcast_exit();
+       if (entered_state == -EBUSY)
+               goto use_default;
 
        /*
         * Give the governor an opportunity to reflect on the outcome
index 4898442b837fbd715f8d5079a1060e43c4b7a367..b28df4019adedfe182d5c719da62b6bf05c031fe 100644 (file)
@@ -405,13 +405,18 @@ int rhashtable_insert_rehash(struct rhashtable *ht)
 
        if (rht_grow_above_75(ht, tbl))
                size *= 2;
-       /* More than two rehashes (not resizes) detected. */
-       else if (WARN_ON(old_tbl != tbl && old_tbl->size == size))
+       /* Do not schedule more than one rehash */
+       else if (old_tbl != tbl)
                return -EBUSY;
 
        new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
-       if (new_tbl == NULL)
+       if (new_tbl == NULL) {
+               /* Schedule async resize/rehash to try allocation
+                * non-atomic context.
+                */
+               schedule_work(&ht->run_work);
                return -ENOMEM;
+       }
 
        err = rhashtable_rehash_attach(ht, tbl, new_tbl);
        if (err) {
index 409608960899630b5349bbd310200a95dc2867a2..e29ad70b3000be4f0e7486b172746ea62b4f216f 100644 (file)
@@ -170,7 +170,7 @@ static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
        struct br_port_msg *bpm;
        struct nlattr *nest, *nest2;
 
-       nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
+       nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
        if (!nlh)
                return -EMSGSIZE;
 
index 0e4ddb81610d90ff51a45835424cef547bba73bf..4b5c236998ff1010831711a17b773e16c7d8ba58 100644 (file)
@@ -394,7 +394,7 @@ errout:
  * Dump information about all ports, in response to GETLINK
  */
 int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
-              struct net_device *dev, u32 filter_mask)
+              struct net_device *dev, u32 filter_mask, int nlflags)
 {
        struct net_bridge_port *port = br_port_get_rtnl(dev);
 
@@ -402,7 +402,7 @@ int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
            !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
                return 0;
 
-       return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, NLM_F_MULTI,
+       return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags,
                              filter_mask, dev);
 }
 
index 6ca0251cb478bf3147501b325a381a4081dd5149..3362c29400f182c90db6e67a04fcc5018906fa41 100644 (file)
@@ -828,7 +828,7 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port);
 int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags);
 int br_dellink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags);
 int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev,
-              u32 filter_mask);
+              u32 filter_mask, int nlflags);
 
 #ifdef CONFIG_SYSFS
 /* br_sysfs_if.c */
index 1796cef55ab5af93718047604c554475b4d3275b..c7ba0388f1be8e37e780f27e0b3bd95f45dd0ade 100644 (file)
@@ -3079,7 +3079,7 @@ static struct rps_dev_flow *
 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
            struct rps_dev_flow *rflow, u16 next_cpu)
 {
-       if (next_cpu != RPS_NO_CPU) {
+       if (next_cpu < nr_cpu_ids) {
 #ifdef CONFIG_RFS_ACCEL
                struct netdev_rx_queue *rxqueue;
                struct rps_dev_flow_table *flow_table;
@@ -3184,7 +3184,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
                 * If the desired CPU (where last recvmsg was done) is
                 * different from current CPU (one in the rx-queue flow
                 * table entry), switch if one of the following holds:
-                *   - Current CPU is unset (equal to RPS_NO_CPU).
+                *   - Current CPU is unset (>= nr_cpu_ids).
                 *   - Current CPU is offline.
                 *   - The current CPU's queue tail has advanced beyond the
                 *     last packet that was enqueued using this table entry.
@@ -3192,14 +3192,14 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
                 *     have been dequeued, thus preserving in order delivery.
                 */
                if (unlikely(tcpu != next_cpu) &&
-                   (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
+                   (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
                     ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
                      rflow->last_qtail)) >= 0)) {
                        tcpu = next_cpu;
                        rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
                }
 
-               if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
+               if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
                        *rflowp = rflow;
                        cpu = tcpu;
                        goto done;
@@ -3240,14 +3240,14 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
        struct rps_dev_flow_table *flow_table;
        struct rps_dev_flow *rflow;
        bool expire = true;
-       int cpu;
+       unsigned int cpu;
 
        rcu_read_lock();
        flow_table = rcu_dereference(rxqueue->rps_flow_table);
        if (flow_table && flow_id <= flow_table->mask) {
                rflow = &flow_table->flows[flow_id];
                cpu = ACCESS_ONCE(rflow->cpu);
-               if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
+               if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
                    ((int)(per_cpu(softnet_data, cpu).input_queue_head -
                           rflow->last_qtail) <
                     (int)(10 * flow_table->mask)))
index 358d52a38533b90d8df212de6636ca8e2525730b..666e0928ba404b85cf210749e505a2c205e5ee3f 100644 (file)
@@ -2854,7 +2854,7 @@ static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
 
 int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
                            struct net_device *dev, u16 mode,
-                           u32 flags, u32 mask)
+                           u32 flags, u32 mask, int nlflags)
 {
        struct nlmsghdr *nlh;
        struct ifinfomsg *ifm;
@@ -2863,7 +2863,7 @@ int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
        u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
        struct net_device *br_dev = netdev_master_upper_dev_get(dev);
 
-       nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), NLM_F_MULTI);
+       nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
        if (nlh == NULL)
                return -EMSGSIZE;
 
@@ -2969,7 +2969,8 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
                if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
                        if (idx >= cb->args[0] &&
                            br_dev->netdev_ops->ndo_bridge_getlink(
-                                   skb, portid, seq, dev, filter_mask) < 0)
+                                   skb, portid, seq, dev, filter_mask,
+                                   NLM_F_MULTI) < 0)
                                break;
                        idx++;
                }
@@ -2977,7 +2978,8 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
                if (ops->ndo_bridge_getlink) {
                        if (idx >= cb->args[0] &&
                            ops->ndo_bridge_getlink(skb, portid, seq, dev,
-                                                   filter_mask) < 0)
+                                                   filter_mask,
+                                                   NLM_F_MULTI) < 0)
                                break;
                        idx++;
                }
@@ -3018,7 +3020,7 @@ static int rtnl_bridge_notify(struct net_device *dev)
                goto errout;
        }
 
-       err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0);
+       err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
        if (err < 0)
                goto errout;
 
index d1967dab9cc697a4dad45838e1a94c2b5f9cf147..3cfff2a3d651fb7d7cd2baaa3698c123eb7fc00f 100644 (file)
@@ -280,13 +280,14 @@ nodata:
 EXPORT_SYMBOL(__alloc_skb);
 
 /**
- * build_skb - build a network buffer
+ * __build_skb - build a network buffer
  * @data: data buffer provided by caller
- * @frag_size: size of fragment, or 0 if head was kmalloced
+ * @frag_size: size of data, or 0 if head was kmalloced
  *
  * Allocate a new &sk_buff. Caller provides space holding head and
  * skb_shared_info. @data must have been allocated by kmalloc() only if
- * @frag_size is 0, otherwise data should come from the page allocator.
+ * @frag_size is 0, otherwise data should come from the page allocator
+ *  or vmalloc()
  * The return is the new skb buffer.
  * On a failure the return is %NULL, and @data is not freed.
  * Notes :
@@ -297,7 +298,7 @@ EXPORT_SYMBOL(__alloc_skb);
  *  before giving packet to stack.
  *  RX rings only contains data buffers, not full skbs.
  */
-struct sk_buff *build_skb(void *data, unsigned int frag_size)
+struct sk_buff *__build_skb(void *data, unsigned int frag_size)
 {
        struct skb_shared_info *shinfo;
        struct sk_buff *skb;
@@ -311,7 +312,6 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
 
        memset(skb, 0, offsetof(struct sk_buff, tail));
        skb->truesize = SKB_TRUESIZE(size);
-       skb->head_frag = frag_size != 0;
        atomic_set(&skb->users, 1);
        skb->head = data;
        skb->data = data;
@@ -328,6 +328,23 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
 
        return skb;
 }
+
+/* build_skb() is wrapper over __build_skb(), that specifically
+ * takes care of skb->head and skb->pfmemalloc
+ * This means that if @frag_size is not zero, then @data must be backed
+ * by a page fragment, not kmalloc() or vmalloc()
+ */
+struct sk_buff *build_skb(void *data, unsigned int frag_size)
+{
+       struct sk_buff *skb = __build_skb(data, frag_size);
+
+       if (skb && frag_size) {
+               skb->head_frag = 1;
+               if (virt_to_head_page(data)->pfmemalloc)
+                       skb->pfmemalloc = 1;
+       }
+       return skb;
+}
 EXPORT_SYMBOL(build_skb);
 
 struct netdev_alloc_cache {
@@ -348,7 +365,8 @@ static struct page *__page_frag_refill(struct netdev_alloc_cache *nc,
        gfp_t gfp = gfp_mask;
 
        if (order) {
-               gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
+               gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
+                           __GFP_NOMEMALLOC;
                page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
                nc->frag.size = PAGE_SIZE << (page ? order : 0);
        }
index 2b4f21d34df6819c134b590d8ddeecffe668aaf6..ccf4c5629b3c8672f348a3ca29b0b6bd46cc23e8 100644 (file)
@@ -453,7 +453,8 @@ static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
                                                       iph->saddr, iph->daddr);
        if (req) {
                nsk = dccp_check_req(sk, skb, req);
-               reqsk_put(req);
+               if (!nsk)
+                       reqsk_put(req);
                return nsk;
        }
        nsk = inet_lookup_established(sock_net(sk), &dccp_hashinfo,
index 9d0551092c6cd73f3cfa30c89130bac69d693118..5165571f397aa6d40da19d36287ca8339852ab54 100644 (file)
@@ -301,7 +301,8 @@ static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
                                   &iph->daddr, inet6_iif(skb));
        if (req) {
                nsk = dccp_check_req(sk, skb, req);
-               reqsk_put(req);
+               if (!nsk)
+                       reqsk_put(req);
                return nsk;
        }
        nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo,
index 5f566663e47f3faa8025a998b2d8ae976db58860..30addee2dd037f9686c5585243e503632dcdd21a 100644 (file)
@@ -186,8 +186,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
        if (child == NULL)
                goto listen_overflow;
 
-       inet_csk_reqsk_queue_unlink(sk, req);
-       inet_csk_reqsk_queue_removed(sk, req);
+       inet_csk_reqsk_queue_drop(sk, req);
        inet_csk_reqsk_queue_add(sk, req, child);
 out:
        return child;
index 079a224471e7e20641079e9f54f82743c0172165..e6f6cc3a1bcf45ee6fa49d6cfe840f58660e3511 100644 (file)
@@ -633,7 +633,7 @@ static int dsa_of_probe(struct device *dev)
                if (cd->sw_addr > PHY_MAX_ADDR)
                        continue;
 
-               if (!of_property_read_u32(np, "eeprom-length", &eeprom_len))
+               if (!of_property_read_u32(child, "eeprom-length", &eeprom_len))
                        cd->eeprom_len = eeprom_len;
 
                for_each_available_child_of_node(child, port) {
index 5c3dd6267ed3557f2f139f83002fd7b1feaab237..8976ca423a074447f0d857973ab9ea3bc6bbca7c 100644 (file)
@@ -564,6 +564,40 @@ int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req)
 }
 EXPORT_SYMBOL(inet_rtx_syn_ack);
 
+/* return true if req was found in the syn_table[] */
+static bool reqsk_queue_unlink(struct request_sock_queue *queue,
+                              struct request_sock *req)
+{
+       struct listen_sock *lopt = queue->listen_opt;
+       struct request_sock **prev;
+       bool found = false;
+
+       spin_lock(&queue->syn_wait_lock);
+
+       for (prev = &lopt->syn_table[req->rsk_hash]; *prev != NULL;
+            prev = &(*prev)->dl_next) {
+               if (*prev == req) {
+                       *prev = req->dl_next;
+                       found = true;
+                       break;
+               }
+       }
+
+       spin_unlock(&queue->syn_wait_lock);
+       if (del_timer(&req->rsk_timer))
+               reqsk_put(req);
+       return found;
+}
+
+void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
+{
+       if (reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req)) {
+               reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
+               reqsk_put(req);
+       }
+}
+EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
+
 static void reqsk_timer_handler(unsigned long data)
 {
        struct request_sock *req = (struct request_sock *)data;
index a93f260cf24ca0a9d60346dc085eb51afdb43927..05ff44b758dfee1e02996a3726ac63854a96ad16 100644 (file)
@@ -158,6 +158,7 @@ void ping_unhash(struct sock *sk)
        if (sk_hashed(sk)) {
                write_lock_bh(&ping_table.lock);
                hlist_nulls_del(&sk->sk_nulls_node);
+               sk_nulls_node_init(&sk->sk_nulls_node);
                sock_put(sk);
                isk->inet_num = 0;
                isk->inet_sport = 0;
index a78540f28276771e4c8f35024d3ee133c31317ab..bff62fc87b8e266dbee59d43359fea8a77e29d60 100644 (file)
@@ -962,10 +962,7 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
        if (dst_metric_locked(dst, RTAX_MTU))
                return;
 
-       if (dst->dev->mtu < mtu)
-               return;
-
-       if (rt->rt_pmtu && rt->rt_pmtu < mtu)
+       if (ipv4_mtu(dst) < mtu)
                return;
 
        if (mtu < ip_rt_min_pmtu)
index 3571f2be4470749b9a7c8178fa43a6bf2bac33d8..fc1c658ec6c18cb1daa1cc06039a9f19df67bb5e 100644 (file)
@@ -1348,7 +1348,8 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
        req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
        if (req) {
                nsk = tcp_check_req(sk, skb, req, false);
-               reqsk_put(req);
+               if (!nsk)
+                       reqsk_put(req);
                return nsk;
        }
 
index 63d6311b5365944fae0bdaa0f7abfbc51f55a429..e5d7649136fcb31ca70b097dcbd9873df07e7417 100644 (file)
@@ -755,10 +755,11 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
        if (!child)
                goto listen_overflow;
 
-       inet_csk_reqsk_queue_unlink(sk, req);
-       inet_csk_reqsk_queue_removed(sk, req);
-
+       inet_csk_reqsk_queue_drop(sk, req);
        inet_csk_reqsk_queue_add(sk, req, child);
+       /* Warning: caller must not call reqsk_put(req);
+        * child stole last reference on it.
+        */
        return child;
 
 listen_overflow:
index 8c8d7e06b72fc1e5c4a50ca55136757f0501f8c0..a369e8a70b2c775bfee94d7f329ee892c2cdc895 100644 (file)
@@ -2812,39 +2812,65 @@ begin_fwd:
        }
 }
 
-/* Send a fin.  The caller locks the socket for us.  This cannot be
- * allowed to fail queueing a FIN frame under any circumstances.
+/* We allow to exceed memory limits for FIN packets to expedite
+ * connection tear down and (memory) recovery.
+ * Otherwise tcp_send_fin() could be tempted to either delay FIN
+ * or even be forced to close flow without any FIN.
+ */
+static void sk_forced_wmem_schedule(struct sock *sk, int size)
+{
+       int amt, status;
+
+       if (size <= sk->sk_forward_alloc)
+               return;
+       amt = sk_mem_pages(size);
+       sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
+       sk_memory_allocated_add(sk, amt, &status);
+}
+
+/* Send a FIN. The caller locks the socket for us.
+ * We should try to send a FIN packet really hard, but eventually give up.
  */
 void tcp_send_fin(struct sock *sk)
 {
+       struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk);
        struct tcp_sock *tp = tcp_sk(sk);
-       struct sk_buff *skb = tcp_write_queue_tail(sk);
-       int mss_now;
 
-       /* Optimization, tack on the FIN if we have a queue of
-        * unsent frames.  But be careful about outgoing SACKS
-        * and IP options.
+       /* Optimization, tack on the FIN if we have one skb in write queue and
+        * this skb was not yet sent, or we are under memory pressure.
+        * Note: in the latter case, FIN packet will be sent after a timeout,
+        * as TCP stack thinks it has already been transmitted.
         */
-       mss_now = tcp_current_mss(sk);
-
-       if (tcp_send_head(sk)) {
-               TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
-               TCP_SKB_CB(skb)->end_seq++;
+       if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) {
+coalesce:
+               TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
+               TCP_SKB_CB(tskb)->end_seq++;
                tp->write_seq++;
+               if (!tcp_send_head(sk)) {
+                       /* This means tskb was already sent.
+                        * Pretend we included the FIN on previous transmit.
+                        * We need to set tp->snd_nxt to the value it would have
+                        * if FIN had been sent. This is because retransmit path
+                        * does not change tp->snd_nxt.
+                        */
+                       tp->snd_nxt++;
+                       return;
+               }
        } else {
-               /* Socket is locked, keep trying until memory is available. */
-               for (;;) {
-                       skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
-                       if (skb)
-                               break;
-                       yield();
+               skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
+               if (unlikely(!skb)) {
+                       if (tskb)
+                               goto coalesce;
+                       return;
                }
+               skb_reserve(skb, MAX_TCP_HEADER);
+               sk_forced_wmem_schedule(sk, skb->truesize);
                /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
                tcp_init_nondata_skb(skb, tp->write_seq,
                                     TCPHDR_ACK | TCPHDR_FIN);
                tcp_queue_skb(sk, skb);
        }
-       __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
+       __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
 }
 
 /* We get here when a process closes a file descriptor (either due to
index b5e6cc1d4a7302f3288ba00ee9c75bb327410960..a38d3ac0f18f6e631e3a17904bf617f7a0dfe28a 100644 (file)
@@ -1246,7 +1246,6 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
 static int ip6gre_tunnel_init(struct net_device *dev)
 {
        struct ip6_tnl *tunnel;
-       int i;
 
        tunnel = netdev_priv(dev);
 
@@ -1260,16 +1259,10 @@ static int ip6gre_tunnel_init(struct net_device *dev)
        if (ipv6_addr_any(&tunnel->parms.raddr))
                dev->header_ops = &ip6gre_header_ops;
 
-       dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+       dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
        if (!dev->tstats)
                return -ENOMEM;
 
-       for_each_possible_cpu(i) {
-               struct pcpu_sw_netstats *ip6gre_tunnel_stats;
-               ip6gre_tunnel_stats = per_cpu_ptr(dev->tstats, i);
-               u64_stats_init(&ip6gre_tunnel_stats->syncp);
-       }
-
        return 0;
 }
 
index ad51df85aa00dda56b86dca24ed5fe9a3b5c791e..b6575d6655681e8e84993a5db929c7309d47d4d3 100644 (file)
@@ -946,7 +946,8 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
                                   &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
        if (req) {
                nsk = tcp_check_req(sk, skb, req, false);
-               reqsk_put(req);
+               if (!nsk)
+                       reqsk_put(req);
                return nsk;
        }
        nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
index db8a2ea6d4de5bb2b4b92785d2e6a05a7613b0f3..954810c76a8650d87ec4bddc62fa5618e48c1341 100644 (file)
@@ -53,6 +53,11 @@ static struct mpls_route *mpls_route_input_rcu(struct net *net, unsigned index)
        return rt;
 }
 
+static inline struct mpls_dev *mpls_dev_get(const struct net_device *dev)
+{
+       return rcu_dereference_rtnl(dev->mpls_ptr);
+}
+
 static bool mpls_output_possible(const struct net_device *dev)
 {
        return dev && (dev->flags & IFF_UP) && netif_carrier_ok(dev);
@@ -136,6 +141,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
        struct mpls_route *rt;
        struct mpls_entry_decoded dec;
        struct net_device *out_dev;
+       struct mpls_dev *mdev;
        unsigned int hh_len;
        unsigned int new_header_size;
        unsigned int mtu;
@@ -143,6 +149,10 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
 
        /* Careful this entire function runs inside of an rcu critical section */
 
+       mdev = mpls_dev_get(dev);
+       if (!mdev || !mdev->input_enabled)
+               goto drop;
+
        if (skb->pkt_type != PACKET_HOST)
                goto drop;
 
@@ -352,9 +362,9 @@ static int mpls_route_add(struct mpls_route_config *cfg)
        if (!dev)
                goto errout;
 
-       /* For now just support ethernet devices */
+       /* Ensure this is a supported device */
        err = -EINVAL;
-       if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK))
+       if (!mpls_dev_get(dev))
                goto errout;
 
        err = -EINVAL;
@@ -428,10 +438,89 @@ errout:
        return err;
 }
 
+#define MPLS_PERDEV_SYSCTL_OFFSET(field)       \
+       (&((struct mpls_dev *)0)->field)
+
+static const struct ctl_table mpls_dev_table[] = {
+       {
+               .procname       = "input",
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+               .data           = MPLS_PERDEV_SYSCTL_OFFSET(input_enabled),
+       },
+       { }
+};
+
+static int mpls_dev_sysctl_register(struct net_device *dev,
+                                   struct mpls_dev *mdev)
+{
+       char path[sizeof("net/mpls/conf/") + IFNAMSIZ];
+       struct ctl_table *table;
+       int i;
+
+       table = kmemdup(&mpls_dev_table, sizeof(mpls_dev_table), GFP_KERNEL);
+       if (!table)
+               goto out;
+
+       /* Table data contains only offsets relative to the base of
+        * the mdev at this point, so make them absolute.
+        */
+       for (i = 0; i < ARRAY_SIZE(mpls_dev_table); i++)
+               table[i].data = (char *)mdev + (uintptr_t)table[i].data;
+
+       snprintf(path, sizeof(path), "net/mpls/conf/%s", dev->name);
+
+       mdev->sysctl = register_net_sysctl(dev_net(dev), path, table);
+       if (!mdev->sysctl)
+               goto free;
+
+       return 0;
+
+free:
+       kfree(table);
+out:
+       return -ENOBUFS;
+}
+
+static void mpls_dev_sysctl_unregister(struct mpls_dev *mdev)
+{
+       struct ctl_table *table;
+
+       table = mdev->sysctl->ctl_table_arg;
+       unregister_net_sysctl_table(mdev->sysctl);
+       kfree(table);
+}
+
+static struct mpls_dev *mpls_add_dev(struct net_device *dev)
+{
+       struct mpls_dev *mdev;
+       int err = -ENOMEM;
+
+       ASSERT_RTNL();
+
+       mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+       if (!mdev)
+               return ERR_PTR(err);
+
+       err = mpls_dev_sysctl_register(dev, mdev);
+       if (err)
+               goto free;
+
+       rcu_assign_pointer(dev->mpls_ptr, mdev);
+
+       return mdev;
+
+free:
+       kfree(mdev);
+       return ERR_PTR(err);
+}
+
 static void mpls_ifdown(struct net_device *dev)
 {
        struct mpls_route __rcu **platform_label;
        struct net *net = dev_net(dev);
+       struct mpls_dev *mdev;
        unsigned index;
 
        platform_label = rtnl_dereference(net->mpls.platform_label);
@@ -443,14 +532,35 @@ static void mpls_ifdown(struct net_device *dev)
                        continue;
                rt->rt_dev = NULL;
        }
+
+       mdev = mpls_dev_get(dev);
+       if (!mdev)
+               return;
+
+       mpls_dev_sysctl_unregister(mdev);
+
+       RCU_INIT_POINTER(dev->mpls_ptr, NULL);
+
+       kfree(mdev);
 }
 
 static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
                           void *ptr)
 {
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+       struct mpls_dev *mdev;
 
        switch(event) {
+       case NETDEV_REGISTER:
+               /* For now just support ethernet devices */
+               if ((dev->type == ARPHRD_ETHER) ||
+                   (dev->type == ARPHRD_LOOPBACK)) {
+                       mdev = mpls_add_dev(dev);
+                       if (IS_ERR(mdev))
+                               return notifier_from_errno(PTR_ERR(mdev));
+               }
+               break;
+
        case NETDEV_UNREGISTER:
                mpls_ifdown(dev);
                break;
@@ -536,6 +646,15 @@ int nla_get_labels(const struct nlattr *nla,
                if ((dec.bos != bos) || dec.ttl || dec.tc)
                        return -EINVAL;
 
+               switch (dec.label) {
+               case LABEL_IMPLICIT_NULL:
+                       /* RFC3032: This is a label that an LSR may
+                        * assign and distribute, but which never
+                        * actually appears in the encapsulation.
+                        */
+                       return -EINVAL;
+               }
+
                label[i] = dec.label;
        }
        *labels = nla_labels;
@@ -912,7 +1031,7 @@ static int mpls_platform_labels(struct ctl_table *table, int write,
        return ret;
 }
 
-static struct ctl_table mpls_table[] = {
+static const struct ctl_table mpls_table[] = {
        {
                .procname       = "platform_labels",
                .data           = NULL,
index fb6de92052c4bc87db6c83ba5288f2f921e13c62..693877d69606a1ecdab02dd78dc74eefdaf26f3f 100644 (file)
@@ -22,6 +22,12 @@ struct mpls_entry_decoded {
        u8 bos;
 };
 
+struct mpls_dev {
+       int                     input_enabled;
+
+       struct ctl_table_header *sysctl;
+};
+
 struct sk_buff;
 
 static inline struct mpls_shim_hdr *mpls_hdr(const struct sk_buff *skb)
index 78af83bc9c8e8dbfcead6e151247a127602ac107..ad9d11fb29fd208cd44d67e786b7977afc9e51e7 100644 (file)
@@ -4340,7 +4340,6 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
        case NFT_CONTINUE:
        case NFT_BREAK:
        case NFT_RETURN:
-               desc->len = sizeof(data->verdict);
                break;
        case NFT_JUMP:
        case NFT_GOTO:
@@ -4355,10 +4354,10 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
 
                chain->use++;
                data->verdict.chain = chain;
-               desc->len = sizeof(data);
                break;
        }
 
+       desc->len = sizeof(data->verdict);
        desc->type = NFT_DATA_VERDICT;
        return 0;
 }
index 57d3e1af56305e90eade03e1fca1b38524091be6..0522fc9bfb0a88db513c480f45dff35a6f233000 100644 (file)
@@ -63,6 +63,8 @@ int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr)
                if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
                        goto nla_put_failure;
                break;
+       default:
+               break;
        }
 
        return 0;
index 62cabee42fbecc93587a0b008b19913a5a8470f9..635dbba93d013244038cbdbb48bbe9549f622304 100644 (file)
@@ -108,6 +108,8 @@ static int nft_reject_inet_dump(struct sk_buff *skb,
                if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
                        goto nla_put_failure;
                break;
+       default:
+               break;
        }
 
        return 0;
index 19909d0786a2e60574ba623d343193ca212ed2be..ec4adbdcb9b4a9926b4d992906f51787c57dc6a2 100644 (file)
@@ -1629,13 +1629,11 @@ static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
        if (data == NULL)
                return NULL;
 
-       skb = build_skb(data, size);
+       skb = __build_skb(data, size);
        if (skb == NULL)
                vfree(data);
-       else {
-               skb->head_frag = 0;
+       else
                skb->destructor = netlink_skb_destructor;
-       }
 
        return skb;
 }
index 8e472518f9f6856dcd5802c16373b6579f7284eb..295d14bd6c678c31b56219371df83d4ebe3b0a2c 100644 (file)
@@ -63,7 +63,6 @@ static int tcf_connmark(struct sk_buff *skb, const struct tc_action *a,
                skb->mark = c->mark;
                /* using overlimits stats to count how many packets marked */
                ca->tcf_qstats.overlimits++;
-               nf_ct_put(c);
                goto out;
        }
 
@@ -82,7 +81,6 @@ static int tcf_connmark(struct sk_buff *skb, const struct tc_action *a,
        nf_ct_put(c);
 
 out:
-       skb->nfct = NULL;
        spin_unlock(&ca->tcf_lock);
        return ca->tcf_action;
 }
index 3613e72e858e2e259bd91455127e4ec0af5c903b..70e3dacbf84ab7a899298fca1fdcb2e77fd0c2d5 100644 (file)
@@ -591,14 +591,14 @@ void tipc_bearer_stop(struct net *net)
 
 /* Caller should hold rtnl_lock to protect the bearer */
 static int __tipc_nl_add_bearer(struct tipc_nl_msg *msg,
-                               struct tipc_bearer *bearer)
+                               struct tipc_bearer *bearer, int nlflags)
 {
        void *hdr;
        struct nlattr *attrs;
        struct nlattr *prop;
 
        hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
-                         NLM_F_MULTI, TIPC_NL_BEARER_GET);
+                         nlflags, TIPC_NL_BEARER_GET);
        if (!hdr)
                return -EMSGSIZE;
 
@@ -657,7 +657,7 @@ int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb)
                if (!bearer)
                        continue;
 
-               err = __tipc_nl_add_bearer(&msg, bearer);
+               err = __tipc_nl_add_bearer(&msg, bearer, NLM_F_MULTI);
                if (err)
                        break;
        }
@@ -705,7 +705,7 @@ int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info)
                goto err_out;
        }
 
-       err = __tipc_nl_add_bearer(&msg, bearer);
+       err = __tipc_nl_add_bearer(&msg, bearer, 0);
        if (err)
                goto err_out;
        rtnl_unlock();
@@ -857,14 +857,14 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
 }
 
 static int __tipc_nl_add_media(struct tipc_nl_msg *msg,
-                              struct tipc_media *media)
+                              struct tipc_media *media, int nlflags)
 {
        void *hdr;
        struct nlattr *attrs;
        struct nlattr *prop;
 
        hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
-                         NLM_F_MULTI, TIPC_NL_MEDIA_GET);
+                         nlflags, TIPC_NL_MEDIA_GET);
        if (!hdr)
                return -EMSGSIZE;
 
@@ -916,7 +916,8 @@ int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb)
 
        rtnl_lock();
        for (; media_info_array[i] != NULL; i++) {
-               err = __tipc_nl_add_media(&msg, media_info_array[i]);
+               err = __tipc_nl_add_media(&msg, media_info_array[i],
+                                         NLM_F_MULTI);
                if (err)
                        break;
        }
@@ -963,7 +964,7 @@ int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info)
                goto err_out;
        }
 
-       err = __tipc_nl_add_media(&msg, media);
+       err = __tipc_nl_add_media(&msg, media, 0);
        if (err)
                goto err_out;
        rtnl_unlock();
index a6b30df6ec02ec22f1b4b44930bd1ceb168258f3..43a515dc97b0d4a2c7fca5bc9dc2951175201fff 100644 (file)
@@ -1145,11 +1145,8 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
                }
                /* Synchronize with parallel link if applicable */
                if (unlikely((l_ptr->flags & LINK_SYNCHING) && !msg_dup(msg))) {
-                       link_handle_out_of_seq_msg(l_ptr, skb);
-                       if (link_synch(l_ptr))
-                               link_retrieve_defq(l_ptr, &head);
-                       skb = NULL;
-                       goto unlock;
+                       if (!link_synch(l_ptr))
+                               goto unlock;
                }
                l_ptr->next_in_no++;
                if (unlikely(!skb_queue_empty(&l_ptr->deferdq)))
@@ -2013,7 +2010,7 @@ msg_full:
 
 /* Caller should hold appropriate locks to protect the link */
 static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
-                             struct tipc_link *link)
+                             struct tipc_link *link, int nlflags)
 {
        int err;
        void *hdr;
@@ -2022,7 +2019,7 @@ static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
        struct tipc_net *tn = net_generic(net, tipc_net_id);
 
        hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
-                         NLM_F_MULTI, TIPC_NL_LINK_GET);
+                         nlflags, TIPC_NL_LINK_GET);
        if (!hdr)
                return -EMSGSIZE;
 
@@ -2095,7 +2092,7 @@ static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
                if (!node->links[i])
                        continue;
 
-               err = __tipc_nl_add_link(net, msg, node->links[i]);
+               err = __tipc_nl_add_link(net, msg, node->links[i], NLM_F_MULTI);
                if (err)
                        return err;
        }
@@ -2143,7 +2140,6 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
                        err = __tipc_nl_add_node_links(net, &msg, node,
                                                       &prev_link);
                        tipc_node_unlock(node);
-                       tipc_node_put(node);
                        if (err)
                                goto out;
 
@@ -2210,7 +2206,7 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
                goto err_out;
        }
 
-       err = __tipc_nl_add_link(net, &msg, link);
+       err = __tipc_nl_add_link(net, &msg, link, 0);
        if (err)
                goto err_out;
 
index ab6183cdb12113565e3575e746470d5564f2ce22..77ff03ed1e18d13224f086c2315d7123cc931123 100644 (file)
@@ -102,7 +102,7 @@ static void tipc_conn_kref_release(struct kref *kref)
                }
                saddr->scope = -TIPC_NODE_SCOPE;
                kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr));
-               sk_release_kernel(sk);
+               sock_release(sock);
                con->sock = NULL;
        }
 
@@ -321,12 +321,9 @@ static struct socket *tipc_create_listen_sock(struct tipc_conn *con)
        struct socket *sock = NULL;
        int ret;
 
-       ret = sock_create_kern(AF_TIPC, SOCK_SEQPACKET, 0, &sock);
+       ret = __sock_create(s->net, AF_TIPC, SOCK_SEQPACKET, 0, &sock, 1);
        if (ret < 0)
                return NULL;
-
-       sk_change_net(sock->sk, s->net);
-
        ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE,
                                (char *)&s->imp, sizeof(s->imp));
        if (ret < 0)
@@ -376,7 +373,7 @@ static struct socket *tipc_create_listen_sock(struct tipc_conn *con)
 
 create_err:
        kernel_sock_shutdown(sock, SHUT_RDWR);
-       sk_release_kernel(sock->sk);
+       sock_release(sock);
        return NULL;
 }
 
index ee90d74d7516e93af0587f5898d81e41f870e267..9074b5cede38b8edd75890b684a706d96b9f71ba 100644 (file)
@@ -1764,13 +1764,14 @@ static int tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
 int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
 {
        u32 dnode, dport = 0;
-       int err = -TIPC_ERR_NO_PORT;
+       int err;
        struct sk_buff *skb;
        struct tipc_sock *tsk;
        struct tipc_net *tn;
        struct sock *sk;
 
        while (skb_queue_len(inputq)) {
+               err = -TIPC_ERR_NO_PORT;
                skb = NULL;
                dport = tipc_skb_peek_port(inputq, dport);
                tsk = tipc_sk_lookup(net, dport);
index 99f7012b23b9f2101bc8b3a35ed27871a7835b1d..a73a226f2d33f07261f606fa84ef4db8b91ed6fc 100644 (file)
@@ -95,39 +95,36 @@ static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
 
 unsigned int unix_tot_inflight;
 
-
 struct sock *unix_get_socket(struct file *filp)
 {
        struct sock *u_sock = NULL;
        struct inode *inode = file_inode(filp);
 
-       /*
-        *      Socket ?
-        */
+       /* Socket ? */
        if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
                struct socket *sock = SOCKET_I(inode);
                struct sock *s = sock->sk;
 
-               /*
-                *      PF_UNIX ?
-                */
+               /* PF_UNIX ? */
                if (s && sock->ops && sock->ops->family == PF_UNIX)
                        u_sock = s;
        }
        return u_sock;
 }
 
-/*
- *     Keep the number of times in flight count for the file
- *     descriptor if it is for an AF_UNIX socket.
+/* Keep the number of times in flight count for the file
+ * descriptor if it is for an AF_UNIX socket.
  */
 
 void unix_inflight(struct file *fp)
 {
        struct sock *s = unix_get_socket(fp);
+
        if (s) {
                struct unix_sock *u = unix_sk(s);
+
                spin_lock(&unix_gc_lock);
+
                if (atomic_long_inc_return(&u->inflight) == 1) {
                        BUG_ON(!list_empty(&u->link));
                        list_add_tail(&u->link, &gc_inflight_list);
@@ -142,10 +139,13 @@ void unix_inflight(struct file *fp)
 void unix_notinflight(struct file *fp)
 {
        struct sock *s = unix_get_socket(fp);
+
        if (s) {
                struct unix_sock *u = unix_sk(s);
+
                spin_lock(&unix_gc_lock);
                BUG_ON(list_empty(&u->link));
+
                if (atomic_long_dec_and_test(&u->inflight))
                        list_del_init(&u->link);
                unix_tot_inflight--;
@@ -161,32 +161,27 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
 
        spin_lock(&x->sk_receive_queue.lock);
        skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
-               /*
-                *      Do we have file descriptors ?
-                */
+               /* Do we have file descriptors ? */
                if (UNIXCB(skb).fp) {
                        bool hit = false;
-                       /*
-                        *      Process the descriptors of this socket
-                        */
+                       /* Process the descriptors of this socket */
                        int nfd = UNIXCB(skb).fp->count;
                        struct file **fp = UNIXCB(skb).fp->fp;
+
                        while (nfd--) {
-                               /*
-                                *      Get the socket the fd matches
-                                *      if it indeed does so
-                                */
+                               /* Get the socket the fd matches if it indeed does so */
                                struct sock *sk = unix_get_socket(*fp++);
+
                                if (sk) {
                                        struct unix_sock *u = unix_sk(sk);
 
-                                       /*
-                                        * Ignore non-candidates, they could
+                                       /* Ignore non-candidates, they could
                                         * have been added to the queues after
                                         * starting the garbage collection
                                         */
                                        if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
                                                hit = true;
+
                                                func(u);
                                        }
                                }
@@ -203,24 +198,22 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
 static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
                          struct sk_buff_head *hitlist)
 {
-       if (x->sk_state != TCP_LISTEN)
+       if (x->sk_state != TCP_LISTEN) {
                scan_inflight(x, func, hitlist);
-       else {
+       else {
                struct sk_buff *skb;
                struct sk_buff *next;
                struct unix_sock *u;
                LIST_HEAD(embryos);
 
-               /*
-                * For a listening socket collect the queued embryos
+               /* For a listening socket collect the queued embryos
                 * and perform a scan on them as well.
                 */
                spin_lock(&x->sk_receive_queue.lock);
                skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
                        u = unix_sk(skb->sk);
 
-                       /*
-                        * An embryo cannot be in-flight, so it's safe
+                       /* An embryo cannot be in-flight, so it's safe
                         * to use the list link.
                         */
                        BUG_ON(!list_empty(&u->link));
@@ -249,8 +242,7 @@ static void inc_inflight(struct unix_sock *usk)
 static void inc_inflight_move_tail(struct unix_sock *u)
 {
        atomic_long_inc(&u->inflight);
-       /*
-        * If this still might be part of a cycle, move it to the end
+       /* If this still might be part of a cycle, move it to the end
         * of the list, so that it's checked even if it was already
         * passed over
         */
@@ -263,8 +255,7 @@ static bool gc_in_progress;
 
 void wait_for_unix_gc(void)
 {
-       /*
-        * If number of inflight sockets is insane,
+       /* If number of inflight sockets is insane,
         * force a garbage collect right now.
         */
        if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress)
@@ -288,8 +279,7 @@ void unix_gc(void)
                goto out;
 
        gc_in_progress = true;
-       /*
-        * First, select candidates for garbage collection.  Only
+       /* First, select candidates for garbage collection.  Only
         * in-flight sockets are considered, and from those only ones
         * which don't have any external reference.
         *
@@ -320,15 +310,13 @@ void unix_gc(void)
                }
        }
 
-       /*
-        * Now remove all internal in-flight reference to children of
+       /* Now remove all internal in-flight reference to children of
         * the candidates.
         */
        list_for_each_entry(u, &gc_candidates, link)
                scan_children(&u->sk, dec_inflight, NULL);
 
-       /*
-        * Restore the references for children of all candidates,
+       /* Restore the references for children of all candidates,
         * which have remaining references.  Do this recursively, so
         * only those remain, which form cyclic references.
         *
@@ -350,8 +338,7 @@ void unix_gc(void)
        }
        list_del(&cursor);
 
-       /*
-        * not_cycle_list contains those sockets which do not make up a
+       /* not_cycle_list contains those sockets which do not make up a
         * cycle.  Restore these to the inflight list.
         */
        while (!list_empty(&not_cycle_list)) {
@@ -360,8 +347,7 @@ void unix_gc(void)
                list_move_tail(&u->link, &gc_inflight_list);
        }
 
-       /*
-        * Now gc_candidates contains only garbage.  Restore original
+       /* Now gc_candidates contains only garbage.  Restore original
         * inflight counters for these as well, and remove the skbuffs
         * which are creating the cycle(s).
         */
index 37d0220a094cb55451cff620c5e455f83d8a894d..db7a2e5e4a14ee6d7d110206aa94d7d71002066a 100644 (file)
@@ -183,8 +183,10 @@ static int snd_card_emu10k1_probe(struct pci_dev *pci,
        }
 #endif
  
-       strcpy(card->driver, emu->card_capabilities->driver);
-       strcpy(card->shortname, emu->card_capabilities->name);
+       strlcpy(card->driver, emu->card_capabilities->driver,
+               sizeof(card->driver));
+       strlcpy(card->shortname, emu->card_capabilities->name,
+               sizeof(card->shortname));
        snprintf(card->longname, sizeof(card->longname),
                 "%s (rev.%d, serial:0x%x) at 0x%lx, irq %i",
                 card->shortname, emu->revision, emu->serial, emu->port, emu->irq);
index 874cd76c7b7fb09b5c6c3cc722173914280b45ce..d2c7ea3a7610861a15730bb3a8b0c8764205fd5a 100644 (file)
@@ -415,7 +415,7 @@ start_voice(struct snd_emux_voice *vp)
        snd_emu10k1_ptr_write(hw, Z2, ch, 0);
 
        /* invalidate maps */
-       temp = (hw->silent_page.addr << 1) | MAP_PTI_MASK;
+       temp = (hw->silent_page.addr << hw->address_mode) | (hw->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
        snd_emu10k1_ptr_write(hw, MAPA, ch, temp);
        snd_emu10k1_ptr_write(hw, MAPB, ch, temp);
 #if 0
@@ -436,7 +436,7 @@ start_voice(struct snd_emux_voice *vp)
                snd_emu10k1_ptr_write(hw, CDF, ch, sample);
 
                /* invalidate maps */
-               temp = ((unsigned int)hw->silent_page.addr << 1) | MAP_PTI_MASK;
+               temp = ((unsigned int)hw->silent_page.addr << hw_address_mode) | (hw->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
                snd_emu10k1_ptr_write(hw, MAPA, ch, temp);
                snd_emu10k1_ptr_write(hw, MAPB, ch, temp);
                
index 54079f5d5673951ad739c81e2e679d3864f0ea79..a4548147c6215e788bc6cd9ead8357cb72658f8a 100644 (file)
@@ -282,7 +282,7 @@ static int snd_emu10k1_init(struct snd_emu10k1 *emu, int enable_ir, int resume)
        snd_emu10k1_ptr_write(emu, TCB, 0, 0);  /* taken from original driver */
        snd_emu10k1_ptr_write(emu, TCBS, 0, 4); /* taken from original driver */
 
-       silent_page = (emu->silent_page.addr << 1) | MAP_PTI_MASK;
+       silent_page = (emu->silent_page.addr << emu->address_mode) | (emu->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
        for (ch = 0; ch < NUM_G; ch++) {
                snd_emu10k1_ptr_write(emu, MAPA, ch, silent_page);
                snd_emu10k1_ptr_write(emu, MAPB, ch, silent_page);
@@ -348,6 +348,11 @@ static int snd_emu10k1_init(struct snd_emu10k1 *emu, int enable_ir, int resume)
                outl(reg | A_IOCFG_GPOUT0, emu->port + A_IOCFG);
        }
 
+       if (emu->address_mode == 0) {
+               /* use 16M in 4G */
+               outl(inl(emu->port + HCFG) | HCFG_EXPANDED_MEM, emu->port + HCFG);
+       }
+
        return 0;
 }
 
@@ -1446,7 +1451,7 @@ static struct snd_emu_chip_details emu_chip_details[] = {
         *
         */
        {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x20011102,
-        .driver = "Audigy2", .name = "SB Audigy 2 ZS Notebook [SB0530]",
+        .driver = "Audigy2", .name = "Audigy 2 ZS Notebook [SB0530]",
         .id = "Audigy2",
         .emu10k2_chip = 1,
         .ca0108_chip = 1,
@@ -1596,7 +1601,7 @@ static struct snd_emu_chip_details emu_chip_details[] = {
         .adc_1361t = 1,  /* 24 bit capture instead of 16bit */
         .ac97_chip = 1} ,
        {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x10051102,
-        .driver = "Audigy2", .name = "SB Audigy 2 Platinum EX [SB0280]",
+        .driver = "Audigy2", .name = "Audigy 2 Platinum EX [SB0280]",
         .id = "Audigy2",
         .emu10k2_chip = 1,
         .ca0102_chip = 1,
@@ -1902,8 +1907,10 @@ int snd_emu10k1_create(struct snd_card *card,
 
        is_audigy = emu->audigy = c->emu10k2_chip;
 
+       /* set addressing mode */
+       emu->address_mode = is_audigy ? 0 : 1;
        /* set the DMA transfer mask */
-       emu->dma_mask = is_audigy ? AUDIGY_DMA_MASK : EMU10K1_DMA_MASK;
+       emu->dma_mask = emu->address_mode ? EMU10K1_DMA_MASK : AUDIGY_DMA_MASK;
        if (pci_set_dma_mask(pci, emu->dma_mask) < 0 ||
            pci_set_consistent_dma_mask(pci, emu->dma_mask) < 0) {
                dev_err(card->dev,
@@ -1928,7 +1935,7 @@ int snd_emu10k1_create(struct snd_card *card,
 
        emu->max_cache_pages = max_cache_bytes >> PAGE_SHIFT;
        if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
-                               32 * 1024, &emu->ptb_pages) < 0) {
+                               (emu->address_mode ? 32 : 16) * 1024, &emu->ptb_pages) < 0) {
                err = -ENOMEM;
                goto error;
        }
@@ -2027,8 +2034,8 @@ int snd_emu10k1_create(struct snd_card *card,
 
        /* Clear silent pages and set up pointers */
        memset(emu->silent_page.area, 0, PAGE_SIZE);
-       silent_page = emu->silent_page.addr << 1;
-       for (idx = 0; idx < MAXPAGES; idx++)
+       silent_page = emu->silent_page.addr << emu->address_mode;
+       for (idx = 0; idx < (emu->address_mode ? MAXPAGES1 : MAXPAGES0); idx++)
                ((u32 *)emu->ptb_pages.area)[idx] = cpu_to_le32(silent_page | idx);
 
        /* set up voice indices */
index 0dc07385af0ebaee5ea66126bf8d758e17f71054..14a305bd8a98577cf50ecad28090996714b2034b 100644 (file)
@@ -380,7 +380,7 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu,
        snd_emu10k1_ptr_write(emu, Z1, voice, 0);
        snd_emu10k1_ptr_write(emu, Z2, voice, 0);
        /* invalidate maps */
-       silent_page = ((unsigned int)emu->silent_page.addr << 1) | MAP_PTI_MASK;
+       silent_page = ((unsigned int)emu->silent_page.addr << emu->address_mode) | (emu->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
        snd_emu10k1_ptr_write(emu, MAPA, voice, silent_page);
        snd_emu10k1_ptr_write(emu, MAPB, voice, silent_page);
        /* modulation envelope */
index c68e6dd2fa6772fc88cae359b1b7947f1c4e9796..4f1f69be18651b7c692f9feb812c239e8f911386 100644 (file)
  * aligned pages in others
  */
 #define __set_ptb_entry(emu,page,addr) \
-       (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page)))
+       (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
 
 #define UNIT_PAGES             (PAGE_SIZE / EMUPAGESIZE)
-#define MAX_ALIGN_PAGES                (MAXPAGES / UNIT_PAGES)
+#define MAX_ALIGN_PAGES0               (MAXPAGES0 / UNIT_PAGES)
+#define MAX_ALIGN_PAGES1               (MAXPAGES1 / UNIT_PAGES)
 /* get aligned page from offset address */
 #define get_aligned_page(offset)       ((offset) >> PAGE_SHIFT)
 /* get offset address from aligned page */
@@ -124,7 +125,7 @@ static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct lis
                }
                page = blk->mapped_page + blk->pages;
        }
-       size = MAX_ALIGN_PAGES - page;
+       size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page;
        if (size >= max_size) {
                *nextp = pos;
                return page;
@@ -181,7 +182,7 @@ static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
                q = get_emu10k1_memblk(p, mapped_link);
                end_page = q->mapped_page;
        } else
-               end_page = MAX_ALIGN_PAGES;
+               end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0);
 
        /* remove links */
        list_del(&blk->mapped_link);
@@ -307,7 +308,7 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst
        if (snd_BUG_ON(!emu))
                return NULL;
        if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
-                      runtime->dma_bytes >= MAXPAGES * EMUPAGESIZE))
+                      runtime->dma_bytes >= (emu->address_mode ? MAXPAGES1 : MAXPAGES0) * EMUPAGESIZE))
                return NULL;
        hdr = emu->memhdr;
        if (snd_BUG_ON(!hdr))
index 873ed1bce12b694b60be0c4737a16e0feee5abf0..b49feff0a31982e7c22071c08e8d088e91a97727 100644 (file)
@@ -873,14 +873,15 @@ struct hda_pcm *snd_hda_codec_pcm_new(struct hda_codec *codec,
        struct hda_pcm *pcm;
        va_list args;
 
-       va_start(args, fmt);
        pcm = kzalloc(sizeof(*pcm), GFP_KERNEL);
        if (!pcm)
                return NULL;
 
        pcm->codec = codec;
        kref_init(&pcm->kref);
+       va_start(args, fmt);
        pcm->name = kvasprintf(GFP_KERNEL, fmt, args);
+       va_end(args);
        if (!pcm->name) {
                kfree(pcm);
                return NULL;
@@ -2082,6 +2083,16 @@ static struct snd_kcontrol_new vmaster_mute_mode = {
        .put = vmaster_mute_mode_put,
 };
 
+/* meta hook to call each driver's vmaster hook */
+static void vmaster_hook(void *private_data, int enabled)
+{
+       struct hda_vmaster_mute_hook *hook = private_data;
+
+       if (hook->mute_mode != HDA_VMUTE_FOLLOW_MASTER)
+               enabled = hook->mute_mode;
+       hook->hook(hook->codec, enabled);
+}
+
 /**
  * snd_hda_add_vmaster_hook - Add a vmaster hook for mute-LED
  * @codec: the HDA codec
@@ -2100,9 +2111,9 @@ int snd_hda_add_vmaster_hook(struct hda_codec *codec,
 
        if (!hook->hook || !hook->sw_kctl)
                return 0;
-       snd_ctl_add_vmaster_hook(hook->sw_kctl, hook->hook, codec);
        hook->codec = codec;
        hook->mute_mode = HDA_VMUTE_FOLLOW_MASTER;
+       snd_ctl_add_vmaster_hook(hook->sw_kctl, vmaster_hook, hook);
        if (!expose_enum_ctl)
                return 0;
        kctl = snd_ctl_new1(&vmaster_mute_mode, hook);
@@ -2128,14 +2139,7 @@ void snd_hda_sync_vmaster_hook(struct hda_vmaster_mute_hook *hook)
         */
        if (hook->codec->bus->shutdown)
                return;
-       switch (hook->mute_mode) {
-       case HDA_VMUTE_FOLLOW_MASTER:
-               snd_ctl_sync_vmaster_hook(hook->sw_kctl);
-               break;
-       default:
-               hook->hook(hook->codec, hook->mute_mode);
-               break;
-       }
+       snd_ctl_sync_vmaster_hook(hook->sw_kctl);
 }
 EXPORT_SYMBOL_GPL(snd_hda_sync_vmaster_hook);
 
index 3d2597b7037bbc9c5a74faeeae035f5c5bd70f76..788f969b1a680e50f61940e31fe1a4609ccce402 100644 (file)
@@ -3259,7 +3259,8 @@ static int create_input_ctls(struct hda_codec *codec)
                val = PIN_IN;
                if (cfg->inputs[i].type == AUTO_PIN_MIC)
                        val |= snd_hda_get_default_vref(codec, pin);
-               if (pin != spec->hp_mic_pin)
+               if (pin != spec->hp_mic_pin &&
+                   !snd_hda_codec_get_pin_target(codec, pin))
                        set_pin_target(codec, pin, val, false);
 
                if (mixer) {
index 06199e4e930f77111c701603ba98216c5572faec..e2afd53cc14c7356b84d80e3c2aaed30d3c8ab49 100644 (file)
@@ -4190,11 +4190,18 @@ static void alc_shutup_dell_xps13(struct hda_codec *codec)
 static void alc_fixup_dell_xps13(struct hda_codec *codec,
                                const struct hda_fixup *fix, int action)
 {
-       if (action == HDA_FIXUP_ACT_PROBE) {
-               struct alc_spec *spec = codec->spec;
-               struct hda_input_mux *imux = &spec->gen.input_mux;
-               int i;
+       struct alc_spec *spec = codec->spec;
+       struct hda_input_mux *imux = &spec->gen.input_mux;
+       int i;
 
+       switch (action) {
+       case HDA_FIXUP_ACT_PRE_PROBE:
+               /* mic pin 0x19 must be initialized with Vref Hi-Z, otherwise
+                * it causes a click noise at start up
+                */
+               snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ);
+               break;
+       case HDA_FIXUP_ACT_PROBE:
                spec->shutup = alc_shutup_dell_xps13;
 
                /* Make the internal mic the default input source. */
@@ -4204,6 +4211,7 @@ static void alc_fixup_dell_xps13(struct hda_codec *codec,
                                break;
                        }
                }
+               break;
        }
 }
 
index 0a4ad5feb82e7817f7036f86c973d02b1dfecb9b..d51703e305238700bce9da8184971ffa516247ca 100644 (file)
@@ -72,6 +72,7 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec,
                if (led_set_func(TPACPI_LED_MUTE, false) >= 0) {
                        old_vmaster_hook = spec->vmaster_mute.hook;
                        spec->vmaster_mute.hook = update_tpacpi_mute_led;
+                       spec->vmaster_mute_enum = 1;
                        removefunc = false;
                }
                if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) {
index 69528ae5410c991125c2e16f3810d51ae7026b88..be4d741c45baa3164c8698782055aa9599d1df45 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/platform_device.h>
 #include <linux/spi/spi.h>
 #include <linux/gpio.h>
+#include <linux/acpi.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
@@ -2656,6 +2657,15 @@ static const struct i2c_device_id rt5645_i2c_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, rt5645_i2c_id);
 
+#ifdef CONFIG_ACPI
+static struct acpi_device_id rt5645_acpi_match[] = {
+       { "10EC5645", 0 },
+       { "10EC5650", 0 },
+       {},
+};
+MODULE_DEVICE_TABLE(acpi, rt5645_acpi_match);
+#endif
+
 static int rt5645_i2c_probe(struct i2c_client *i2c,
                    const struct i2c_device_id *id)
 {
@@ -2770,7 +2780,7 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
 
                case RT5645_DMIC_DATA_GPIO12:
                        regmap_update_bits(rt5645->regmap, RT5645_DMIC_CTRL1,
-                               RT5645_DMIC_1_DP_MASK, RT5645_DMIC_2_DP_GPIO12);
+                               RT5645_DMIC_2_DP_MASK, RT5645_DMIC_2_DP_GPIO12);
                        regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1,
                                RT5645_GP12_PIN_MASK,
                                RT5645_GP12_PIN_DMIC2_SDA);
@@ -2872,6 +2882,7 @@ static struct i2c_driver rt5645_i2c_driver = {
        .driver = {
                .name = "rt5645",
                .owner = THIS_MODULE,
+               .acpi_match_table = ACPI_PTR(rt5645_acpi_match),
        },
        .probe = rt5645_i2c_probe,
        .remove   = rt5645_i2c_remove,
index af182586712d42f9bbac6d5b03338e3e5219cf23..169aa471ffbd447e2ec3f009d775524eb77aa5ae 100644 (file)
@@ -62,6 +62,9 @@ static const struct reg_default init_list[] = {
        {RT5677_PR_BASE + 0x1e, 0x0000},
        {RT5677_PR_BASE + 0x12, 0x0eaa},
        {RT5677_PR_BASE + 0x14, 0x018a},
+       {RT5677_PR_BASE + 0x15, 0x0490},
+       {RT5677_PR_BASE + 0x38, 0x0f71},
+       {RT5677_PR_BASE + 0x39, 0x0f71},
 };
 #define RT5677_INIT_REG_LEN ARRAY_SIZE(init_list)
 
@@ -914,7 +917,7 @@ static int set_dmic_clk(struct snd_soc_dapm_widget *w,
 {
        struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
        struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
-       int idx = rl6231_calc_dmic_clk(rt5677->sysclk);
+       int idx = rl6231_calc_dmic_clk(rt5677->lrck[RT5677_AIF1] << 8);
 
        if (idx < 0)
                dev_err(codec->dev, "Failed to set DMIC clock\n");
index 16f1b71edb554aac82cb74093cbf6a80b84f5d0b..aab0af681e8cb3cd1b2c0ea0b5a22e32c1f08c2b 100644 (file)
@@ -280,8 +280,8 @@ static int tfa9879_i2c_probe(struct i2c_client *i2c,
        int i;
 
        tfa9879 = devm_kzalloc(&i2c->dev, sizeof(*tfa9879), GFP_KERNEL);
-       if (IS_ERR(tfa9879))
-               return PTR_ERR(tfa9879);
+       if (!tfa9879)
+               return -ENOMEM;
 
        i2c_set_clientdata(i2c, tfa9879);
 
index e8bb8eef1d16bee3d9d8ca5e4b7e4d5a7a026267..0d48804218b1bdbc31198fbb6b0ce16bc09a4fc3 100644 (file)
@@ -1357,7 +1357,7 @@ static int fsl_ssi_probe(struct platform_device *pdev)
        }
 
        ssi_private->irq = platform_get_irq(pdev, 0);
-       if (!ssi_private->irq) {
+       if (ssi_private->irq < 0) {
                dev_err(&pdev->dev, "no irq for node %s\n", pdev->name);
                return ssi_private->irq;
        }
index cd9aee9871a36a4400646441f1105d949fe5a2af..3853ec2ddbc758d2c558dfcbe093d2cee59af8a1 100644 (file)
@@ -4,7 +4,7 @@ obj-$(CONFIG_SND_SOC_INTEL_SST) += common/
 # Platform Support
 obj-$(CONFIG_SND_SOC_INTEL_HASWELL) += haswell/
 obj-$(CONFIG_SND_SOC_INTEL_BAYTRAIL) += baytrail/
-obj-$(CONFIG_SND_SOC_INTEL_BAYTRAIL) += atom/
+obj-$(CONFIG_SND_SST_MFLD_PLATFORM) += atom/
 
 # Machine support
 obj-$(CONFIG_SND_SOC_INTEL_SST) += boards/
index 1efb33b36303ea8b5c3c725c4f35b65055744689..a839dbfa5218e832c487512ca3d9f77f0b7db629 100644 (file)
@@ -759,7 +759,6 @@ fw_err:
 dsp_new_err:
        sst_ipc_fini(ipc);
 ipc_init_err:
-       kfree(byt);
 
        return err;
 }
index 344a1e9bbce5794311ec95a24143581ebf715907..324eceb07b255b06e71c07cd1267a55364ce403e 100644 (file)
@@ -2201,7 +2201,6 @@ dma_err:
 dsp_new_err:
        sst_ipc_fini(ipc);
 ipc_init_err:
-       kfree(hsw);
        return ret;
 }
 EXPORT_SYMBOL_GPL(sst_hsw_dsp_init);
index 6698d058de29600a464be79490bec641736819c3..dc790abaa3318e4760107c726ac02ea7e2c1a2cb 100644 (file)
@@ -194,7 +194,7 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
                int cmd, struct snd_soc_dai *dai)
 {
        struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
-       int ret;
+       int ret = -EINVAL;
 
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
index 326d3c3804e34bccf1069e61df0bb12d5a8bf042..5bf723689692fc52ffc5edce20f82e8d5cb4fc21 100644 (file)
@@ -461,8 +461,8 @@ static int s3c24xx_iis_dev_probe(struct platform_device *pdev)
                return -ENOENT;
        }
        s3c24xx_i2s.regs = devm_ioremap_resource(&pdev->dev, res);
-       if (s3c24xx_i2s.regs == NULL)
-               return -ENXIO;
+       if (IS_ERR(s3c24xx_i2s.regs))
+               return PTR_ERR(s3c24xx_i2s.regs);
 
        s3c24xx_i2s_pcm_stereo_out.dma_addr = res->start + S3C2410_IISFIFO;
        s3c24xx_i2s_pcm_stereo_in.dma_addr = res->start + S3C2410_IISFIFO;
index ac3756f6af603e9a21fe79988b00d13477da6fab..144308f15fb336cd1a27e99e57ab9b40b255499b 100644 (file)
@@ -156,6 +156,7 @@ static int rsnd_dmaen_init(struct rsnd_priv *priv, struct rsnd_dma *dma, int id,
                                                  (void *)id);
        }
        if (IS_ERR_OR_NULL(dmaen->chan)) {
+               dmaen->chan = NULL;
                dev_err(dev, "can't get dma channel\n");
                goto rsnd_dma_channel_err;
        }
index ab37add269aecd6ae00909776d954735e587f9a2..82e350e9501ccc0d5ebc82962f60c034466448eb 100644 (file)
@@ -118,12 +118,8 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
        if (snd_BUG_ON(!arg || !emu))
                return -ENXIO;
 
-       mutex_lock(&emu->register_mutex);
-
-       if (!snd_emux_inc_count(emu)) {
-               mutex_unlock(&emu->register_mutex);
+       if (!snd_emux_inc_count(emu))
                return -EFAULT;
-       }
 
        memset(&callback, 0, sizeof(callback));
        callback.owner = THIS_MODULE;
@@ -135,7 +131,6 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
        if (p == NULL) {
                snd_printk(KERN_ERR "can't create port\n");
                snd_emux_dec_count(emu);
-               mutex_unlock(&emu->register_mutex);
                return -ENOMEM;
        }
 
@@ -148,8 +143,6 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
        reset_port_mode(p, arg->seq_mode);
 
        snd_emux_reset_port(p);
-
-       mutex_unlock(&emu->register_mutex);
        return 0;
 }
 
@@ -195,13 +188,11 @@ snd_emux_close_seq_oss(struct snd_seq_oss_arg *arg)
        if (snd_BUG_ON(!emu))
                return -ENXIO;
 
-       mutex_lock(&emu->register_mutex);
        snd_emux_sounds_off_all(p);
        snd_soundfont_close_check(emu->sflist, SF_CLIENT_NO(p->chset.port));
        snd_seq_event_port_detach(p->chset.client, p->chset.port);
        snd_emux_dec_count(emu);
 
-       mutex_unlock(&emu->register_mutex);
        return 0;
 }
 
index 7778b8e19782e24a3ca56a5aa5dff9e00bf33e4d..a0209204ae4892a490877c30eaf056c45cd8043a 100644 (file)
@@ -124,12 +124,10 @@ snd_emux_detach_seq(struct snd_emux *emu)
        if (emu->voices)
                snd_emux_terminate_all(emu);
                
-       mutex_lock(&emu->register_mutex);
        if (emu->client >= 0) {
                snd_seq_delete_kernel_client(emu->client);
                emu->client = -1;
        }
-       mutex_unlock(&emu->register_mutex);
 }
 
 
@@ -269,8 +267,8 @@ snd_emux_event_input(struct snd_seq_event *ev, int direct, void *private_data,
 /*
  * increment usage count
  */
-int
-snd_emux_inc_count(struct snd_emux *emu)
+static int
+__snd_emux_inc_count(struct snd_emux *emu)
 {
        emu->used++;
        if (!try_module_get(emu->ops.owner))
@@ -284,12 +282,21 @@ snd_emux_inc_count(struct snd_emux *emu)
        return 1;
 }
 
+int snd_emux_inc_count(struct snd_emux *emu)
+{
+       int ret;
+
+       mutex_lock(&emu->register_mutex);
+       ret = __snd_emux_inc_count(emu);
+       mutex_unlock(&emu->register_mutex);
+       return ret;
+}
 
 /*
  * decrease usage count
  */
-void
-snd_emux_dec_count(struct snd_emux *emu)
+static void
+__snd_emux_dec_count(struct snd_emux *emu)
 {
        module_put(emu->card->module);
        emu->used--;
@@ -298,6 +305,12 @@ snd_emux_dec_count(struct snd_emux *emu)
        module_put(emu->ops.owner);
 }
 
+void snd_emux_dec_count(struct snd_emux *emu)
+{
+       mutex_lock(&emu->register_mutex);
+       __snd_emux_dec_count(emu);
+       mutex_unlock(&emu->register_mutex);
+}
 
 /*
  * Routine that is called upon a first use of a particular port
@@ -317,7 +330,7 @@ snd_emux_use(void *private_data, struct snd_seq_port_subscribe *info)
 
        mutex_lock(&emu->register_mutex);
        snd_emux_init_port(p);
-       snd_emux_inc_count(emu);
+       __snd_emux_inc_count(emu);
        mutex_unlock(&emu->register_mutex);
        return 0;
 }
@@ -340,7 +353,7 @@ snd_emux_unuse(void *private_data, struct snd_seq_port_subscribe *info)
 
        mutex_lock(&emu->register_mutex);
        snd_emux_sounds_off_all(p);
-       snd_emux_dec_count(emu);
+       __snd_emux_dec_count(emu);
        mutex_unlock(&emu->register_mutex);
        return 0;
 }
index 5a161175bbd4197e907dbeb68f7c5f7003ecdf76..a9099d9f8f39ea8a38a6345b15e2d24ab2e00eb7 100644 (file)
@@ -26,7 +26,7 @@ override define EMIT_TESTS
        $(MAKE) -s -C ebb emit_tests
 endef
 
-DEFAULT_INSTALL := $(INSTALL_RULE)
+DEFAULT_INSTALL_RULE := $(INSTALL_RULE)
 override define INSTALL_RULE
        $(DEFAULT_INSTALL_RULE)
        $(MAKE) -C ebb install
index 1b616fa79e93947987fa0093e8f73d1428205fd2..6bff955e1d55ac6cccb526f5a00355ac4e904973 100644 (file)
@@ -1,4 +1,4 @@
-TEST_PROGS := tm-resched-dscr tm-syscall
+TEST_PROGS := tm-resched-dscr
 
 all: $(TEST_PROGS)