Merge remote-tracking branches 'spi/topic/bcm2835', 'spi/topic/bcm63xx', 'spi/topic...
authorMark Brown <broonie@linaro.org>
Thu, 23 Jan 2014 13:07:05 +0000 (13:07 +0000)
committerMark Brown <broonie@linaro.org>
Thu, 23 Jan 2014 13:07:05 +0000 (13:07 +0000)
290 files changed:
Documentation/devicetree/bindings/clock/exynos5250-clock.txt
Documentation/spi/spi-summary
MAINTAINERS
Makefile
arch/arm/boot/dts/exynos5250.dtsi
arch/arm/crypto/aesbs-core.S_shipped
arch/arm/crypto/bsaes-armv7.pl
arch/arm/include/asm/io.h
arch/arm/include/asm/memory.h
arch/arm/include/asm/xen/page.h
arch/arm/kernel/traps.c
arch/arm/mach-footbridge/dc21285-timer.c
arch/arm/mach-shmobile/board-armadillo800eva.c
arch/arm/mach-shmobile/board-kzm9g.c
arch/arm/mach-shmobile/board-mackerel.c
arch/arm/mm/flush.c
arch/parisc/include/asm/cacheflush.h
arch/parisc/include/asm/page.h
arch/parisc/kernel/cache.c
arch/powerpc/boot/dts/mpc5125twr.dts
arch/powerpc/include/asm/exception-64s.h
arch/powerpc/include/asm/unaligned.h
arch/powerpc/kernel/head_64.S
arch/powerpc/lib/copyuser_64.S
arch/powerpc/platforms/powernv/eeh-ioda.c
arch/powerpc/platforms/powernv/pci.h
arch/s390/Kconfig
arch/s390/include/asm/smp.h
arch/s390/kernel/setup.c
arch/s390/kernel/smp.c
arch/s390/pci/pci_event.c
arch/sh/kernel/sh_ksyms_32.c
arch/sparc/include/asm/uaccess_64.h
arch/sparc/kernel/iommu.c
arch/sparc/kernel/ioport.c
arch/sparc/kernel/kgdb_64.c
arch/sparc/kernel/smp_64.c
arch/x86/kvm/lapic.c
arch/x86/kvm/vmx.c
drivers/acpi/ac.c
drivers/acpi/battery.c
drivers/acpi/bus.c
drivers/ata/ahci.c
drivers/ata/sata_sis.c
drivers/block/null_blk.c
drivers/bluetooth/ath3k.c
drivers/bluetooth/btusb.c
drivers/char/tpm/tpm_ppi.c
drivers/clk/clk-divider.c
drivers/clk/samsung/clk-exynos-audss.c
drivers/clk/samsung/clk-exynos4.c
drivers/clk/samsung/clk-exynos5250.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/cpuidle/cpuidle-calxeda.c
drivers/crypto/ixp4xx_crypto.c
drivers/dma/ioat/dma.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/nouveau/core/core/subdev.c
drivers/gpu/drm/nouveau/core/engine/device/base.c
drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
drivers/gpu/drm/nouveau/core/include/subdev/fb.h
drivers/gpu/drm/nouveau/core/subdev/bios/init.c
drivers/gpu/drm/nouveau/nouveau_abi16.c
drivers/gpu/drm/nouveau/nouveau_acpi.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_atpx_handler.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_uvd.c
drivers/gpu/drm/radeon/si.c
drivers/idle/intel_idle.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/ulp/ipoib/ipoib_netlink.c
drivers/input/input.c
drivers/input/touchscreen/zforce_ts.c
drivers/isdn/hisax/hfc_pci.c
drivers/isdn/hisax/telespci.c
drivers/leds/leds-lp5521.c
drivers/leds/leds-lp5523.c
drivers/mfd/rtsx_pcr.c
drivers/mtd/maps/pxa2xx-flash.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_main.c
drivers/net/ethernet/arc/emac_main.c
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
drivers/net/ethernet/chelsio/cxgb4/l2t.c
drivers/net/ethernet/chelsio/cxgb4/l2t.h
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/intel/e1000e/80003es2lan.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/e1000e/phy.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/lantiq_etop.c
drivers/net/ethernet/marvell/mvmdio.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/tile/tilegx.c
drivers/net/hamradio/hdlcdrv.c
drivers/net/hamradio/yam.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/macvlan.c
drivers/net/phy/phy.c
drivers/net/team/team.c
drivers/net/tun.c
drivers/net/usb/Kconfig
drivers/net/usb/dm9601.c
drivers/net/usb/hso.c
drivers/net/usb/mcs7830.c
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/net/wireless/ath/ath9k/ar9002_mac.c
drivers/net/wireless/ath/ath9k/htc_drv_main.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mwifiex/main.c
drivers/net/wireless/rtlwifi/pci.c
drivers/net/xen-netback/common.h
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/of/Kconfig
drivers/of/address.c
drivers/of/fdt.c
drivers/of/irq.c
drivers/pci/hotplug/acpiphp_glue.c
drivers/pci/pci-acpi.c
drivers/power/Kconfig
drivers/power/power_supply_core.c
drivers/s390/char/tty3270.c
drivers/spi/Kconfig
drivers/spi/Makefile
drivers/spi/spi-altera.c
drivers/spi/spi-ath79.c
drivers/spi/spi-atmel.c
drivers/spi/spi-bcm2835.c
drivers/spi/spi-bcm63xx-hsspi.c [new file with mode: 0644]
drivers/spi/spi-bcm63xx.c
drivers/spi/spi-bitbang-txrx.h
drivers/spi/spi-clps711x.c
drivers/spi/spi-coldfire-qspi.c
drivers/spi/spi-davinci.c
drivers/spi/spi-falcon.c
drivers/spi/spi-fsl-dspi.c
drivers/spi/spi-imx.c
drivers/spi/spi-mxs.c
drivers/spi/spi-pxa2xx.c
drivers/spi/spi-rspi.c
drivers/spi/spi-sc18is602.c
drivers/spi/spi-sh-hspi.c
drivers/spi/spi-sh-msiof.c
drivers/spi/spi-sh.c
drivers/spi/spi-sirf.c
drivers/spi/spi-ti-qspi.c
drivers/spi/spi-topcliff-pch.c
drivers/spi/spi.c
drivers/staging/bcm/Bcmnet.c
drivers/staging/netlogic/xlr_net.c
drivers/staging/rtl8188eu/os_dep/os_intfs.c
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/dir.c
fs/cifs/inode.c
fs/cifs/link.c
fs/eventpoll.c
fs/ext4/extents.c
fs/gfs2/aops.c
fs/gfs2/glock.c
fs/gfs2/glops.c
fs/gfs2/log.c
fs/gfs2/meta_io.c
fs/gfs2/ops_fstype.c
fs/xfs/xfs_attr_remote.c
fs/xfs/xfs_bmap_util.c
include/acpi/acpi_bus.h
include/drm/drm_pciids.h
include/linux/netdevice.h
include/linux/rtnetlink.h
include/linux/skbuff.h
include/linux/spi/spi.h
include/net/llc_pdu.h
include/net/sctp/structs.h
include/uapi/drm/radeon_drm.h
include/uapi/linux/input.h
mm/fremap.c
mm/huge_memory.c
mm/memcontrol.c
mm/memory-failure.c
mm/mlock.c
net/8021q/vlan_dev.c
net/batman-adv/bat_iv_ogm.c
net/batman-adv/distributed-arp-table.c
net/batman-adv/fragmentation.c
net/batman-adv/icmp_socket.c
net/batman-adv/main.c
net/batman-adv/network-coding.c
net/batman-adv/packet.h
net/batman-adv/routing.c
net/batman-adv/send.c
net/batman-adv/soft-interface.c
net/batman-adv/translation-table.c
net/bluetooth/hci_sock.c
net/bridge/br_multicast.c
net/core/dev.c
net/core/flow_dissector.c
net/core/neighbour.c
net/core/netpoll.c
net/dccp/probe.c
net/ieee802154/6lowpan.c
net/ipv4/gre_offload.c
net/ipv4/inet_diag.c
net/ipv4/ip_gre.c
net/ipv4/ip_output.c
net/ipv4/udp.c
net/ipv4/udp_offload.c
net/ipv6/addrconf.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/route.c
net/ipv6/sit.c
net/llc/af_llc.c
net/mac80211/iface.c
net/mac80211/tx.c
net/netfilter/ipvs/ip_vs_nfct.c
net/netfilter/nf_conntrack_seqadj.c
net/netfilter/nf_conntrack_timestamp.c
net/netfilter/nf_nat_irc.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_log.c
net/netfilter/nft_exthdr.c
net/nfc/core.c
net/rds/ib.c
net/rose/af_rose.c
net/sched/act_csum.c
net/sched/act_gact.c
net/sched/act_ipt.c
net/sched/act_nat.c
net/sched/act_pedit.c
net/sched/act_police.c
net/sched/act_simple.c
net/sched/act_skbedit.c
net/sched/sch_generic.c
net/sctp/outqueue.c
net/tipc/link.c
net/tipc/port.c
net/tipc/port.h
net/tipc/socket.c
net/wireless/radiotap.c
net/wireless/sme.c
security/selinux/hooks.c
security/selinux/include/objsec.h

index 46f5c791ea0df6e94e2ea6bd8ef26436231f8ebe..0f2f920e87348515995cb553ece45cf0949213b7 100644 (file)
@@ -159,6 +159,8 @@ clock which they consume.
   mixer                        343
   hdmi                 344
   g2d                  345
+  mdma0                        346
+  smmu_mdma0           347
 
 
    [Clock Muxes]
index f21edb9834137ac91a9d1e6789c612ef07deb8c1..f72e0d1e0da852ac3e89e97f8bd0c22055d66608 100644 (file)
@@ -34,7 +34,7 @@ SPI slave functions are usually not interoperable between vendors
   - It may also be used to stream data in either direction (half duplex),
     or both of them at the same time (full duplex).
 
-  - Some devices may use eight bit words.  Others may different word
+  - Some devices may use eight bit words.  Others may use different word
     lengths, such as streams of 12-bit or 20-bit digital samples.
 
   - Words are usually sent with their most significant bit (MSB) first,
@@ -121,7 +121,7 @@ active.  So the master must set the clock to inactive before selecting
 a slave, and the slave can tell the chosen polarity by sampling the
 clock level when its select line goes active.  That's why many devices
 support for example both modes 0 and 3:  they don't care about polarity,
-and alway clock data in/out on rising clock edges.
+and always clock data in/out on rising clock edges.
 
 
 How do these driver programming interfaces work?
@@ -139,7 +139,7 @@ a command and then reading its response.
 
 There are two types of SPI driver, here called:
 
-  Controller drivers ... controllers may be built in to System-On-Chip
+  Controller drivers ... controllers may be built into System-On-Chip
        processors, and often support both Master and Slave roles.
        These drivers touch hardware registers and may use DMA.
        Or they can be PIO bitbangers, needing just GPIO pins.
@@ -548,7 +548,7 @@ SPI MASTER METHODS
     DEPRECATED METHODS
 
     master->transfer(struct spi_device *spi, struct spi_message *message)
-       This must not sleep. Its responsibility is arrange that the
+       This must not sleep. Its responsibility is to arrange that the
        transfer happens and its complete() callback is issued. The two
        will normally happen later, after other transfers complete, and
        if the controller is idle it will need to be kickstarted. This
index d5e4ff328cc7146827ac3d02cd8df189b881abc4..31a046213274e06e63e26da0514ede7ebc7c4443 100644 (file)
@@ -783,7 +783,7 @@ F:  arch/arm/boot/dts/sama*.dts
 F:     arch/arm/boot/dts/sama*.dtsi
 
 ARM/CALXEDA HIGHBANK ARCHITECTURE
-M:     Rob Herring <rob.herring@calxeda.com>
+M:     Rob Herring <robh@kernel.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-highbank/
@@ -1368,6 +1368,9 @@ T:        git git://git.xilinx.com/linux-xlnx.git
 S:     Supported
 F:     arch/arm/mach-zynq/
 F:     drivers/cpuidle/cpuidle-zynq.c
+N:     zynq
+N:     xilinx
+F:     drivers/clocksource/cadence_ttc_timer.c
 
 ARM SMMU DRIVER
 M:     Will Deacon <will.deacon@arm.com>
@@ -2825,8 +2828,10 @@ F:       include/uapi/drm/
 
 INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
 M:     Daniel Vetter <daniel.vetter@ffwll.ch>
+M:     Jani Nikula <jani.nikula@linux.intel.com>
 L:     intel-gfx@lists.freedesktop.org
 L:     dri-devel@lists.freedesktop.org
+Q:     http://patchwork.freedesktop.org/project/intel-gfx/
 T:     git git://people.freedesktop.org/~danvet/drm-intel
 S:     Supported
 F:     drivers/gpu/drm/i915/
@@ -6256,7 +6261,7 @@ F:        drivers/i2c/busses/i2c-ocores.c
 
 OPEN FIRMWARE AND FLATTENED DEVICE TREE
 M:     Grant Likely <grant.likely@linaro.org>
-M:     Rob Herring <rob.herring@calxeda.com>
+M:     Rob Herring <robh+dt@kernel.org>
 L:     devicetree@vger.kernel.org
 W:     http://fdt.secretlab.ca
 T:     git git://git.secretlab.ca/git/linux-2.6.git
@@ -6268,7 +6273,7 @@ K:        of_get_property
 K:     of_match_table
 
 OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
-M:     Rob Herring <rob.herring@calxeda.com>
+M:     Rob Herring <robh+dt@kernel.org>
 M:     Pawel Moll <pawel.moll@arm.com>
 M:     Mark Rutland <mark.rutland@arm.com>
 M:     Ian Campbell <ijc+devicetree@hellion.org.uk>
index ab80be7a38bca0261b2adc49e878f00fea113dcd..eeec740776f3548d2ffa871b52d34a881e2f77e4 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 13
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc8
 NAME = One Giant Leap for Frogkind
 
 # *DOCUMENTATION*
index 9db5047812f3d6c05a36643abbea0e1fb33e8195..177becde7a268bdcee91be7b049567d17daf9809 100644 (file)
                        compatible = "arm,pl330", "arm,primecell";
                        reg = <0x10800000 0x1000>;
                        interrupts = <0 33 0>;
-                       clocks = <&clock 271>;
+                       clocks = <&clock 346>;
                        clock-names = "apb_pclk";
                        #dma-cells = <1>;
                        #dma-channels = <8>;
index 64205d453260d12ec1e2d43c424fab1c3251788a..71e5fc7cfb18f489f3adadb20c6f8049421fb079 100644 (file)
@@ -58,7 +58,7 @@
 # define VFP_ABI_FRAME 0
 # define BSAES_ASM_EXTENDED_KEY
 # define XTS_CHAIN_TWEAK
-# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+# define __ARM_ARCH__  7
 #endif
 
 #ifdef __thumb__
index f3d96d9325737fba399dc5e6613d223e03fcb7a1..be068db960ee0006ac1aa25a4e69b95265404844 100644 (file)
@@ -701,7 +701,7 @@ $code.=<<___;
 # define VFP_ABI_FRAME 0
 # define BSAES_ASM_EXTENDED_KEY
 # define XTS_CHAIN_TWEAK
-# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+# define __ARM_ARCH__  7
 #endif
 
 #ifdef __thumb__
index 3c597c222ef278a8eb170a4169b5e54dcc158ac0..fbeb39c869e9fdcedace1d61688247c14141cab3 100644 (file)
@@ -329,7 +329,7 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
  */
 #define ioremap(cookie,size)           __arm_ioremap((cookie), (size), MT_DEVICE)
 #define ioremap_nocache(cookie,size)   __arm_ioremap((cookie), (size), MT_DEVICE)
-#define ioremap_cached(cookie,size)    __arm_ioremap((cookie), (size), MT_DEVICE_CACHED)
+#define ioremap_cache(cookie,size)     __arm_ioremap((cookie), (size), MT_DEVICE_CACHED)
 #define ioremap_wc(cookie,size)                __arm_ioremap((cookie), (size), MT_DEVICE_WC)
 #define iounmap                                __arm_iounmap
 
index 6976b03e521369bddedfe6968cfa221c6705c552..8756e4bcdba0609ff789a4f1efaa551faa39ed98 100644 (file)
@@ -347,7 +347,8 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
 #define ARCH_PFN_OFFSET                PHYS_PFN_OFFSET
 
 #define virt_to_page(kaddr)    pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
+#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
+                                       && pfn_valid(__pa(kaddr) >> PAGE_SHIFT) )
 
 #endif
 
index 75579a9d6f76cba3cc84ad61d01ed103269378bd..3759cacdd7f8601322b98ffed6598d97ee8bd016 100644 (file)
@@ -117,6 +117,6 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
        return __set_phys_to_machine(pfn, mfn);
 }
 
-#define xen_remap(cookie, size) ioremap_cached((cookie), (size));
+#define xen_remap(cookie, size) ioremap_cache((cookie), (size));
 
 #endif /* _ASM_ARM_XEN_PAGE_H */
index 7940241f0576b0db1cc0a749e449c7faf492518a..6eda3bf85c52123dc75fb04fe46f0eb197c78d62 100644 (file)
 #include <asm/system_misc.h>
 #include <asm/opcodes.h>
 
-static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
+static const char *handler[]= {
+       "prefetch abort",
+       "data abort",
+       "address exception",
+       "interrupt",
+       "undefined instruction",
+};
 
 void *vectors_page;
 
index 9ee78f7b4990751386cf7f29337ec075297349dd..782f6c71fa0a6c761269cfc4b74fc6cdd0839e75 100644 (file)
@@ -96,11 +96,12 @@ static struct irqaction footbridge_timer_irq = {
 void __init footbridge_timer_init(void)
 {
        struct clock_event_device *ce = &ckevt_dc21285;
+       unsigned rate = DIV_ROUND_CLOSEST(mem_fclk_21285, 16);
 
-       clocksource_register_hz(&cksrc_dc21285, (mem_fclk_21285 + 8) / 16);
+       clocksource_register_hz(&cksrc_dc21285, rate);
 
        setup_irq(ce->irq, &footbridge_timer_irq);
 
        ce->cpumask = cpumask_of(smp_processor_id());
-       clockevents_config_and_register(ce, mem_fclk_21285, 0x4, 0xffffff);
+       clockevents_config_and_register(ce, rate, 0x4, 0xffffff);
 }
index c186891230233fe617161531b5f637246b4e0da7..8ea87bd45c330abd37df9eba0b30b8f4efc8dc47 100644 (file)
@@ -483,7 +483,7 @@ static struct platform_device lcdc0_device = {
        .id             = 0,
        .dev    = {
                .platform_data  = &lcdc0_info,
-               .coherent_dma_mask = ~0,
+               .coherent_dma_mask = DMA_BIT_MASK(32),
        },
 };
 
@@ -580,7 +580,7 @@ static struct platform_device hdmi_lcdc_device = {
        .id             = 1,
        .dev    = {
                .platform_data  = &hdmi_lcdc_info,
-               .coherent_dma_mask = ~0,
+               .coherent_dma_mask = DMA_BIT_MASK(32),
        },
 };
 
index fe689b7fdc9e715ecd724edcc746913010627fb4..bc40b853ffd3cff2204bdfdc283863a9577f51bc 100644 (file)
@@ -334,7 +334,7 @@ static struct platform_device lcdc_device = {
        .resource       = lcdc_resources,
        .dev    = {
                .platform_data  = &lcdc_info,
-               .coherent_dma_mask = ~0,
+               .coherent_dma_mask = DMA_BIT_MASK(32),
        },
 };
 
index af06753eb8092500de41707ac68216bd3ed244a0..e721d2ccceaef8c1d57bd0d671fa097a2c0f539a 100644 (file)
@@ -409,7 +409,7 @@ static struct platform_device lcdc_device = {
        .resource       = lcdc_resources,
        .dev    = {
                .platform_data  = &lcdc_info,
-               .coherent_dma_mask = ~0,
+               .coherent_dma_mask = DMA_BIT_MASK(32),
        },
 };
 
@@ -499,7 +499,7 @@ static struct platform_device hdmi_lcdc_device = {
        .id             = 1,
        .dev    = {
                .platform_data  = &hdmi_lcdc_info,
-               .coherent_dma_mask = ~0,
+               .coherent_dma_mask = DMA_BIT_MASK(32),
        },
 };
 
index 6d5ba9afb16a4409d50dbdbc03d08c83642df9b4..3387e60e4ea381c579725774ebab8d587eeded0c 100644 (file)
@@ -175,16 +175,16 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
                unsigned long i;
                if (cache_is_vipt_nonaliasing()) {
                        for (i = 0; i < (1 << compound_order(page)); i++) {
-                               void *addr = kmap_atomic(page);
+                               void *addr = kmap_atomic(page + i);
                                __cpuc_flush_dcache_area(addr, PAGE_SIZE);
                                kunmap_atomic(addr);
                        }
                } else {
                        for (i = 0; i < (1 << compound_order(page)); i++) {
-                               void *addr = kmap_high_get(page);
+                               void *addr = kmap_high_get(page + i);
                                if (addr) {
                                        __cpuc_flush_dcache_area(addr, PAGE_SIZE);
-                                       kunmap_high(page);
+                                       kunmap_high(page + i);
                                }
                        }
                }
index f0e2784e7ccacd6c6d0856ce87d1f1d26e81c083..2f9b751878ba86bf6cc32cfdadd9b7847ad158d5 100644 (file)
@@ -125,42 +125,38 @@ flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma
 void mark_rodata_ro(void);
 #endif
 
-#ifdef CONFIG_PA8X00
-/* Only pa8800, pa8900 needs this */
-
 #include <asm/kmap_types.h>
 
 #define ARCH_HAS_KMAP
 
-void kunmap_parisc(void *addr);
-
 static inline void *kmap(struct page *page)
 {
        might_sleep();
+       flush_dcache_page(page);
        return page_address(page);
 }
 
 static inline void kunmap(struct page *page)
 {
-       kunmap_parisc(page_address(page));
+       flush_kernel_dcache_page_addr(page_address(page));
 }
 
 static inline void *kmap_atomic(struct page *page)
 {
        pagefault_disable();
+       flush_dcache_page(page);
        return page_address(page);
 }
 
 static inline void __kunmap_atomic(void *addr)
 {
-       kunmap_parisc(addr);
+       flush_kernel_dcache_page_addr(addr);
        pagefault_enable();
 }
 
 #define kmap_atomic_prot(page, prot)   kmap_atomic(page)
 #define kmap_atomic_pfn(pfn)   kmap_atomic(pfn_to_page(pfn))
 #define kmap_atomic_to_page(ptr)       virt_to_page(ptr)
-#endif
 
 #endif /* _PARISC_CACHEFLUSH_H */
 
index b7adb2ac049c0e6b72817dee3df92d124c612e42..c53fc63149e8312437fe4109e8070495ba39d90a 100644 (file)
@@ -28,9 +28,8 @@ struct page;
 
 void clear_page_asm(void *page);
 void copy_page_asm(void *to, void *from);
-void clear_user_page(void *vto, unsigned long vaddr, struct page *pg);
-void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
-                          struct page *pg);
+#define clear_user_page(vto, vaddr, page) clear_page_asm(vto)
+#define copy_user_page(vto, vfrom, vaddr, page) copy_page_asm(vto, vfrom)
 
 /* #define CONFIG_PARISC_TMPALIAS */
 
index c035673209f732f0850aaa4dc98d2d49eee3b74c..a72545554a3154c254eace0648a769d4df645b25 100644 (file)
@@ -388,41 +388,6 @@ void flush_kernel_dcache_page_addr(void *addr)
 }
 EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
 
-void clear_user_page(void *vto, unsigned long vaddr, struct page *page)
-{
-       clear_page_asm(vto);
-       if (!parisc_requires_coherency())
-               flush_kernel_dcache_page_asm(vto);
-}
-EXPORT_SYMBOL(clear_user_page);
-
-void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
-       struct page *pg)
-{
-       /* Copy using kernel mapping.  No coherency is needed
-          (all in kmap/kunmap) on machines that don't support
-          non-equivalent aliasing.  However, the `from' page
-          needs to be flushed before it can be accessed through
-          the kernel mapping. */
-       preempt_disable();
-       flush_dcache_page_asm(__pa(vfrom), vaddr);
-       preempt_enable();
-       copy_page_asm(vto, vfrom);
-       if (!parisc_requires_coherency())
-               flush_kernel_dcache_page_asm(vto);
-}
-EXPORT_SYMBOL(copy_user_page);
-
-#ifdef CONFIG_PA8X00
-
-void kunmap_parisc(void *addr)
-{
-       if (parisc_requires_coherency())
-               flush_kernel_dcache_page_addr(addr);
-}
-EXPORT_SYMBOL(kunmap_parisc);
-#endif
-
 void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
 {
        unsigned long flags;
index 4177b62240c2440ff19f6089cb84cd5b9e0d27a0..a618dfc13e4c8f88a5ff70b88325fd39675c9e8a 100644 (file)
@@ -58,7 +58,6 @@
                compatible = "fsl,mpc5121-immr";
                #address-cells = <1>;
                #size-cells = <1>;
-               #interrupt-cells = <2>;
                ranges = <0x0 0x80000000 0x400000>;
                reg = <0x80000000 0x400000>;
                bus-frequency = <66000000>;     // 66 MHz ips bus
                        reg = <0xA000 0x1000>;
                };
 
+               // disable USB1 port
+               // TODO:
+               // correct pinmux config and fix USB3320 ulpi dependency
+               // before re-enabling it
                usb@3000 {
                        compatible = "fsl,mpc5121-usb2-dr";
                        reg = <0x3000 0x400>;
                        interrupts = <43 0x8>;
                        dr_mode = "host";
                        phy_type = "ulpi";
+                       status = "disabled";
                };
 
                // 5125 PSCs are not 52xx or 5121 PSC compatible
index 894662a5d4d5c5aa25f43e30653609d80d901774..243ce69ad685ffd335041537e6f0b33b0aafe3df 100644 (file)
@@ -284,7 +284,7 @@ do_kvm_##n:                                                         \
        subi    r1,r1,INT_FRAME_SIZE;   /* alloc frame on kernel stack  */ \
        beq-    1f;                                                        \
        ld      r1,PACAKSAVE(r13);      /* kernel stack to use          */ \
-1:     cmpdi   cr1,r1,0;               /* check if r1 is in userspace  */ \
+1:     cmpdi   cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace  */ \
        blt+    cr1,3f;                 /* abort if it is               */ \
        li      r1,(n);                 /* will be reloaded later       */ \
        sth     r1,PACA_TRAP_SAVE(r13);                                    \
index 5f1b1e3c21374d5a1833ed00c9af9d5c06a35123..8296381ae43294e679976f6a82d3bb936d64921f 100644 (file)
@@ -4,13 +4,18 @@
 #ifdef __KERNEL__
 
 /*
- * The PowerPC can do unaligned accesses itself in big endian mode.
+ * The PowerPC can do unaligned accesses itself based on its endian mode.
  */
 #include <linux/unaligned/access_ok.h>
 #include <linux/unaligned/generic.h>
 
+#ifdef __LITTLE_ENDIAN__
+#define get_unaligned  __get_unaligned_le
+#define put_unaligned  __put_unaligned_le
+#else
 #define get_unaligned  __get_unaligned_be
 #define put_unaligned  __put_unaligned_be
+#endif
 
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_UNALIGNED_H */
index 2ae41aba40530f7facf916f6ce101ad3196a7363..4f0946de2d5c917540f6f51a50c13640011fea8c 100644 (file)
@@ -80,6 +80,7 @@ END_FTR_SECTION(0, 1)
         * of the function that the cpu should jump to to continue
         * initialization.
         */
+       .balign 8
        .globl  __secondary_hold_spinloop
 __secondary_hold_spinloop:
        .llong  0x0
@@ -470,6 +471,7 @@ _STATIC(__after_prom_start)
        mtctr   r8
        bctr
 
+.balign 8
 p_end: .llong  _end - _stext
 
 4:     /* Now copy the rest of the kernel up to _end */
index d73a5901490018486fed19115fce46d88084f701..596a285c07554d65dfd561367a52ffed47aba55d 100644 (file)
@@ -9,6 +9,14 @@
 #include <asm/processor.h>
 #include <asm/ppc_asm.h>
 
+#ifdef __BIG_ENDIAN__
+#define sLd sld                /* Shift towards low-numbered address. */
+#define sHd srd                /* Shift towards high-numbered address. */
+#else
+#define sLd srd                /* Shift towards low-numbered address. */
+#define sHd sld                /* Shift towards high-numbered address. */
+#endif
+
        .align  7
 _GLOBAL(__copy_tofrom_user)
 BEGIN_FTR_SECTION
@@ -118,10 +126,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
 
 24:    ld      r9,0(r4)        /* 3+2n loads, 2+2n stores */
 25:    ld      r0,8(r4)
-       sld     r6,r9,r10
+       sLd     r6,r9,r10
 26:    ldu     r9,16(r4)
-       srd     r7,r0,r11
-       sld     r8,r0,r10
+       sHd     r7,r0,r11
+       sLd     r8,r0,r10
        or      r7,r7,r6
        blt     cr6,79f
 27:    ld      r0,8(r4)
@@ -129,35 +137,35 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
 
 28:    ld      r0,0(r4)        /* 4+2n loads, 3+2n stores */
 29:    ldu     r9,8(r4)
-       sld     r8,r0,r10
+       sLd     r8,r0,r10
        addi    r3,r3,-8
        blt     cr6,5f
 30:    ld      r0,8(r4)
-       srd     r12,r9,r11
-       sld     r6,r9,r10
+       sHd     r12,r9,r11
+       sLd     r6,r9,r10
 31:    ldu     r9,16(r4)
        or      r12,r8,r12
-       srd     r7,r0,r11
-       sld     r8,r0,r10
+       sHd     r7,r0,r11
+       sLd     r8,r0,r10
        addi    r3,r3,16
        beq     cr6,78f
 
 1:     or      r7,r7,r6
 32:    ld      r0,8(r4)
 76:    std     r12,8(r3)
-2:     srd     r12,r9,r11
-       sld     r6,r9,r10
+2:     sHd     r12,r9,r11
+       sLd     r6,r9,r10
 33:    ldu     r9,16(r4)
        or      r12,r8,r12
 77:    stdu    r7,16(r3)
-       srd     r7,r0,r11
-       sld     r8,r0,r10
+       sHd     r7,r0,r11
+       sLd     r8,r0,r10
        bdnz    1b
 
 78:    std     r12,8(r3)
        or      r7,r7,r6
 79:    std     r7,16(r3)
-5:     srd     r12,r9,r11
+5:     sHd     r12,r9,r11
        or      r12,r8,r12
 80:    std     r12,24(r3)
        bne     6f
@@ -165,23 +173,38 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
        blr
 6:     cmpwi   cr1,r5,8
        addi    r3,r3,32
-       sld     r9,r9,r10
+       sLd     r9,r9,r10
        ble     cr1,7f
 34:    ld      r0,8(r4)
-       srd     r7,r0,r11
+       sHd     r7,r0,r11
        or      r9,r7,r9
 7:
        bf      cr7*4+1,1f
+#ifdef __BIG_ENDIAN__
        rotldi  r9,r9,32
+#endif
 94:    stw     r9,0(r3)
+#ifdef __LITTLE_ENDIAN__
+       rotrdi  r9,r9,32
+#endif
        addi    r3,r3,4
 1:     bf      cr7*4+2,2f
+#ifdef __BIG_ENDIAN__
        rotldi  r9,r9,16
+#endif
 95:    sth     r9,0(r3)
+#ifdef __LITTLE_ENDIAN__
+       rotrdi  r9,r9,16
+#endif
        addi    r3,r3,2
 2:     bf      cr7*4+3,3f
+#ifdef __BIG_ENDIAN__
        rotldi  r9,r9,8
+#endif
 96:    stb     r9,0(r3)
+#ifdef __LITTLE_ENDIAN__
+       rotrdi  r9,r9,8
+#endif
 3:     li      r3,0
        blr
 
index 02245cee78183852d52f3a907023dcd9a63529bf..d7ddcee7feb8bc8084d18be32f348263c8de6e0e 100644 (file)
@@ -36,7 +36,6 @@
 #include "powernv.h"
 #include "pci.h"
 
-static char *hub_diag = NULL;
 static int ioda_eeh_nb_init = 0;
 
 static int ioda_eeh_event(struct notifier_block *nb,
@@ -140,15 +139,6 @@ static int ioda_eeh_post_init(struct pci_controller *hose)
                ioda_eeh_nb_init = 1;
        }
 
-       /* We needn't HUB diag-data on PHB3 */
-       if (phb->type == PNV_PHB_IODA1 && !hub_diag) {
-               hub_diag = (char *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
-               if (!hub_diag) {
-                       pr_err("%s: Out of memory !\n", __func__);
-                       return -ENOMEM;
-               }
-       }
-
 #ifdef CONFIG_DEBUG_FS
        if (phb->dbgfs) {
                debugfs_create_file("err_injct_outbound", 0600,
@@ -633,11 +623,10 @@ static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data)
 static void ioda_eeh_hub_diag(struct pci_controller *hose)
 {
        struct pnv_phb *phb = hose->private_data;
-       struct OpalIoP7IOCErrorData *data;
+       struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag;
        long rc;
 
-       data = (struct OpalIoP7IOCErrorData *)ioda_eeh_hub_diag;
-       rc = opal_pci_get_hub_diag_data(phb->hub_id, data, PAGE_SIZE);
+       rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data));
        if (rc != OPAL_SUCCESS) {
                pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n",
                           __func__, phb->hub_id, rc);
@@ -820,14 +809,15 @@ static void ioda_eeh_phb_diag(struct pci_controller *hose)
        struct OpalIoPhbErrorCommon *common;
        long rc;
 
-       common = (struct OpalIoPhbErrorCommon *)phb->diag.blob;
-       rc = opal_pci_get_phb_diag_data2(phb->opal_id, common, PAGE_SIZE);
+       rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
+                                        PNV_PCI_DIAG_BUF_SIZE);
        if (rc != OPAL_SUCCESS) {
                pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n",
                            __func__, hose->global_number, rc);
                return;
        }
 
+       common = (struct OpalIoPhbErrorCommon *)phb->diag.blob;
        switch (common->ioType) {
        case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
                ioda_eeh_p7ioc_phb_diag(hose, common);
index 911c24ef033e0ae207036e5d5cab8bceaf07e14a..1ed8d5f40f5ad1423f78c9ae7fdac42abac4376d 100644 (file)
@@ -172,11 +172,13 @@ struct pnv_phb {
                } ioda;
        };
 
-       /* PHB status structure */
+       /* PHB and hub status structure */
        union {
                unsigned char                   blob[PNV_PCI_DIAG_BUF_SIZE];
                struct OpalIoP7IOCPhbErrorData  p7ioc;
+               struct OpalIoP7IOCErrorData     hub_diag;
        } diag;
+
 };
 
 extern struct pci_ops pnv_pci_ops;
index 1e1a03d2d19fbbb35804d4fc28d8d9eac355a9c4..e9f3125325266ff8b93f13c009003d3d4aa335de 100644 (file)
@@ -135,7 +135,6 @@ config S390
        select HAVE_SYSCALL_TRACEPOINTS
        select HAVE_UID16 if 32BIT
        select HAVE_VIRT_CPU_ACCOUNTING
-       select INIT_ALL_POSSIBLE
        select KTIME_SCALAR if 32BIT
        select MODULES_USE_ELF_RELA
        select OLD_SIGACTION
index ac9bed8e103fa741f85b3aecc64fcc9a18511c82..16077939409622fc946b943f4556d91cc272211c 100644 (file)
@@ -31,6 +31,7 @@ extern void smp_yield(void);
 extern void smp_stop_cpu(void);
 extern void smp_cpu_set_polarization(int cpu, int val);
 extern int smp_cpu_get_polarization(int cpu);
+extern void smp_fill_possible_mask(void);
 
 #else /* CONFIG_SMP */
 
@@ -50,6 +51,7 @@ static inline int smp_vcpu_scheduled(int cpu) { return 1; }
 static inline void smp_yield_cpu(int cpu) { }
 static inline void smp_yield(void) { }
 static inline void smp_stop_cpu(void) { }
+static inline void smp_fill_possible_mask(void) { }
 
 #endif /* CONFIG_SMP */
 
index 4444875266ee028bdcc4d43604a4dea591a1a47c..0f3d44ecbfc6d5cd66541a0fe4c651acdf6a8336 100644 (file)
@@ -1023,6 +1023,7 @@ void __init setup_arch(char **cmdline_p)
        setup_vmcoreinfo();
        setup_lowcore();
 
+       smp_fill_possible_mask();
         cpu_init();
        s390_init_cpu_topology();
 
index dc4a534650604a972967ab5dedd2c1b7155c29ad..958704798f4a8e91ff898149b8d6379a39cede13 100644 (file)
@@ -721,18 +721,14 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
        return 0;
 }
 
-static int __init setup_possible_cpus(char *s)
-{
-       int max, cpu;
+static unsigned int setup_possible_cpus __initdata;
 
-       if (kstrtoint(s, 0, &max) < 0)
-               return 0;
-       init_cpu_possible(cpumask_of(0));
-       for (cpu = 1; cpu < max && cpu < nr_cpu_ids; cpu++)
-               set_cpu_possible(cpu, true);
+static int __init _setup_possible_cpus(char *s)
+{
+       get_option(&s, &setup_possible_cpus);
        return 0;
 }
-early_param("possible_cpus", setup_possible_cpus);
+early_param("possible_cpus", _setup_possible_cpus);
 
 #ifdef CONFIG_HOTPLUG_CPU
 
@@ -775,6 +771,17 @@ void __noreturn cpu_die(void)
 
 #endif /* CONFIG_HOTPLUG_CPU */
 
+void __init smp_fill_possible_mask(void)
+{
+       unsigned int possible, cpu;
+
+       possible = setup_possible_cpus;
+       if (!possible)
+               possible = MACHINE_IS_VM ? 64 : nr_cpu_ids;
+       for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
+               set_cpu_possible(cpu, true);
+}
+
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
        /* request the 0x1201 emergency signal external interrupt */
index 800f064b0da7c9a32c3910ecefdfb3e6979d7f9b..069607209a3075c6f4ff2c62fe91b7eb1d99c8aa 100644 (file)
@@ -75,6 +75,7 @@ void zpci_event_availability(void *data)
                if (!zdev || zdev->state == ZPCI_FN_STATE_CONFIGURED)
                        break;
                zdev->state = ZPCI_FN_STATE_CONFIGURED;
+               zdev->fh = ccdf->fh;
                ret = zpci_enable_device(zdev);
                if (ret)
                        break;
@@ -101,6 +102,7 @@ void zpci_event_availability(void *data)
                if (pdev)
                        pci_stop_and_remove_bus_device(pdev);
 
+               zdev->fh = ccdf->fh;
                zpci_disable_device(zdev);
                zdev->state = ZPCI_FN_STATE_STANDBY;
                break;
index 2a0a596ebf67d948663df3009d1ca14c8b09dabc..d77f2f6c7ff0761de1b51f09eb8926923c817681 100644 (file)
@@ -20,6 +20,11 @@ EXPORT_SYMBOL(csum_partial_copy_generic);
 EXPORT_SYMBOL(copy_page);
 EXPORT_SYMBOL(__clear_user);
 EXPORT_SYMBOL(empty_zero_page);
+#ifdef CONFIG_FLATMEM
+/* need in pfn_valid macro */
+EXPORT_SYMBOL(min_low_pfn);
+EXPORT_SYMBOL(max_low_pfn);
+#endif
 
 #define DECLARE_EXPORT(name)           \
        extern void name(void);EXPORT_SYMBOL(name)
index e562d3caee57457a3b188f13b2959d47a7f835b4..ad7e178337f12f2f753f66ec2a9c26dae65c9f68 100644 (file)
@@ -262,8 +262,8 @@ extern unsigned long __must_check __clear_user(void __user *, unsigned long);
 extern __must_check long strlen_user(const char __user *str);
 extern __must_check long strnlen_user(const char __user *str, long n);
 
-#define __copy_to_user_inatomic ___copy_to_user
-#define __copy_from_user_inatomic ___copy_from_user
+#define __copy_to_user_inatomic __copy_to_user
+#define __copy_from_user_inatomic __copy_from_user
 
 struct pt_regs;
 extern unsigned long compute_effective_address(struct pt_regs *,
index 070ed141aac79728da15f406289219a9c385fe27..76663b019eb5207f389655dac5133afce6e553aa 100644 (file)
@@ -854,7 +854,7 @@ int dma_supported(struct device *dev, u64 device_mask)
                return 1;
 
 #ifdef CONFIG_PCI
-       if (dev->bus == &pci_bus_type)
+       if (dev_is_pci(dev))
                return pci64_dma_supported(to_pci_dev(dev), device_mask);
 #endif
 
index 2096468de9b27f7dd2bb54f17e3b308f8d359836..e7e215dfa86668750c9490331d94b288ab78857c 100644 (file)
@@ -666,10 +666,9 @@ EXPORT_SYMBOL(dma_ops);
  */
 int dma_supported(struct device *dev, u64 mask)
 {
-#ifdef CONFIG_PCI
-       if (dev->bus == &pci_bus_type)
+       if (dev_is_pci(dev))
                return 1;
-#endif
+
        return 0;
 }
 EXPORT_SYMBOL(dma_supported);
index 60b19f50c80a8f9d7f31b4b4105ed4fcded6a5ca..b45fe3fb4d2cfbc7999d34a1b25baf9bc0278c2e 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/kgdb.h>
 #include <linux/kdebug.h>
 #include <linux/ftrace.h>
+#include <linux/context_tracking.h>
 
 #include <asm/cacheflush.h>
 #include <asm/kdebug.h>
index b66a5338231e965252fef5f23234141bb2fc8f85..b085311dcd0ea9f81840e1fc2550c4f870e984eb 100644 (file)
@@ -123,11 +123,12 @@ void smp_callin(void)
                rmb();
 
        set_cpu_online(cpuid, true);
-       local_irq_enable();
 
        /* idle thread is expected to have preempt disabled */
        preempt_disable();
 
+       local_irq_enable();
+
        cpu_startup_entry(CPUHP_ONLINE);
 }
 
index dec48bfaddb8ff79ee7f7734cebfca7f36844461..1673940cf9c35169d808a71ef31c031d5c8a390d 100644 (file)
@@ -1350,6 +1350,10 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
                return;
        }
 
+       if (!kvm_vcpu_is_bsp(apic->vcpu))
+               value &= ~MSR_IA32_APICBASE_BSP;
+       vcpu->arch.apic_base = value;
+
        /* update jump label if enable bit changes */
        if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) {
                if (value & MSR_IA32_APICBASE_ENABLE)
@@ -1359,10 +1363,6 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
                recalculate_apic_map(vcpu->kvm);
        }
 
-       if (!kvm_vcpu_is_bsp(apic->vcpu))
-               value &= ~MSR_IA32_APICBASE_BSP;
-
-       vcpu->arch.apic_base = value;
        if ((old_value ^ value) & X2APIC_ENABLE) {
                if (value & X2APIC_ENABLE) {
                        u32 id = kvm_apic_id(apic);
index b2fe1c252f35f92e3b6707406998738a9275c8d2..da7837e1349da8ac4ac06468fb2f05e798e136f1 100644 (file)
@@ -8283,8 +8283,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
        vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
        kvm_set_cr4(vcpu, vmcs12->host_cr4);
 
-       if (nested_cpu_has_ept(vmcs12))
-               nested_ept_uninit_mmu_context(vcpu);
+       nested_ept_uninit_mmu_context(vcpu);
 
        kvm_set_cr3(vcpu, vmcs12->host_cr3);
        kvm_mmu_reset_context(vcpu);
index 8711e3797165fa73fd0c401cedbb54c61eb68e4a..3c2e4aa529c479e92090c8862deaadcf0a8645e5 100644 (file)
@@ -207,7 +207,7 @@ static int acpi_ac_probe(struct platform_device *pdev)
                goto end;
 
        result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev),
-                       ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler, ac);
+                       ACPI_ALL_NOTIFY, acpi_ac_notify_handler, ac);
        if (result) {
                power_supply_unregister(&ac->charger);
                goto end;
@@ -255,7 +255,7 @@ static int acpi_ac_remove(struct platform_device *pdev)
                return -EINVAL;
 
        acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev),
-                       ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler);
+                       ACPI_ALL_NOTIFY, acpi_ac_notify_handler);
 
        ac = platform_get_drvdata(pdev);
        if (ac->charger.dev)
index fbf1aceda8b8ab915a7d2476d78db2b6e2b94e68..5876a49dfd386a4653325d99fcc69aa8452b12e1 100644 (file)
@@ -62,6 +62,7 @@ MODULE_AUTHOR("Alexey Starikovskiy <astarikovskiy@suse.de>");
 MODULE_DESCRIPTION("ACPI Battery Driver");
 MODULE_LICENSE("GPL");
 
+static int battery_bix_broken_package;
 static unsigned int cache_time = 1000;
 module_param(cache_time, uint, 0644);
 MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
@@ -416,7 +417,12 @@ static int acpi_battery_get_info(struct acpi_battery *battery)
                ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s", name));
                return -ENODEV;
        }
-       if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags))
+
+       if (battery_bix_broken_package)
+               result = extract_package(battery, buffer.pointer,
+                               extended_info_offsets + 1,
+                               ARRAY_SIZE(extended_info_offsets) - 1);
+       else if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags))
                result = extract_package(battery, buffer.pointer,
                                extended_info_offsets,
                                ARRAY_SIZE(extended_info_offsets));
@@ -754,6 +760,17 @@ static int battery_notify(struct notifier_block *nb,
        return 0;
 }
 
+static struct dmi_system_id bat_dmi_table[] = {
+       {
+               .ident = "NEC LZ750/LS",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "PC-LZ750LS"),
+               },
+       },
+       {},
+};
+
 static int acpi_battery_add(struct acpi_device *device)
 {
        int result = 0;
@@ -846,6 +863,9 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
 {
        if (acpi_disabled)
                return;
+
+       if (dmi_check_system(bat_dmi_table))
+               battery_bix_broken_package = 1;
        acpi_bus_register_driver(&acpi_battery_driver);
 }
 
index bba9b72e25f8235e6d12593bdd16bb378c7af25a..0710004055c809f3059cde2337373503666fd96f 100644 (file)
@@ -156,6 +156,16 @@ int acpi_bus_get_private_data(acpi_handle handle, void **data)
 }
 EXPORT_SYMBOL(acpi_bus_get_private_data);
 
+void acpi_bus_no_hotplug(acpi_handle handle)
+{
+       struct acpi_device *adev = NULL;
+
+       acpi_bus_get_device(handle, &adev);
+       if (adev)
+               adev->flags.no_hotplug = true;
+}
+EXPORT_SYMBOL_GPL(acpi_bus_no_hotplug);
+
 static void acpi_print_osc_error(acpi_handle handle,
        struct acpi_osc_context *context, char *error)
 {
index c0ed4f273cf22b50a486b08f08c811e563781b07..e3a92a6da39ae258cf9a4094f4293799e378271b 100644 (file)
@@ -427,6 +427,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
          .driver_data = board_ahci_yes_fbs },                  /* 88se9128 */
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9125),
          .driver_data = board_ahci_yes_fbs },                  /* 88se9125 */
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_MARVELL_EXT, 0x9178,
+                        PCI_VENDOR_ID_MARVELL_EXT, 0x9170),
+         .driver_data = board_ahci_yes_fbs },                  /* 88se9170 */
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
          .driver_data = board_ahci_yes_fbs },                  /* 88se9172 */
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172),
index fe3ca0989b14cac2e712ef2778f2a1b7e528c85b..1ad2f62d34b98fd0b41be9a239827e1634327c54 100644 (file)
@@ -83,6 +83,10 @@ static struct pci_driver sis_pci_driver = {
        .id_table               = sis_pci_tbl,
        .probe                  = sis_init_one,
        .remove                 = ata_pci_remove_one,
+#ifdef CONFIG_PM
+       .suspend                = ata_pci_device_suspend,
+       .resume                 = ata_pci_device_resume,
+#endif
 };
 
 static struct scsi_host_template sis_sht = {
index a2e69d26266d9b7faef4244aef695180d4c6eaef..83a598ebb65a4ab7699d1dcebe1b44b42ea8ae5a 100644 (file)
@@ -425,10 +425,7 @@ static void null_del_dev(struct nullb *nullb)
        list_del_init(&nullb->list);
 
        del_gendisk(nullb->disk);
-       if (queue_mode == NULL_Q_MQ)
-               blk_mq_free_queue(nullb->q);
-       else
-               blk_cleanup_queue(nullb->q);
+       blk_cleanup_queue(nullb->q);
        put_disk(nullb->disk);
        kfree(nullb);
 }
@@ -578,10 +575,7 @@ static int null_add_dev(void)
        disk = nullb->disk = alloc_disk_node(1, home_node);
        if (!disk) {
 queue_fail:
-               if (queue_mode == NULL_Q_MQ)
-                       blk_mq_free_queue(nullb->q);
-               else
-                       blk_cleanup_queue(nullb->q);
+               blk_cleanup_queue(nullb->q);
                cleanup_queues(nullb);
 err:
                kfree(nullb);
index 6bfc1bb318f6399397ca8f169cc07fd98b46256d..dceb85f8d9a8251ed115e06af348692898f6a400 100644 (file)
@@ -87,6 +87,7 @@ static const struct usb_device_id ath3k_table[] = {
        { USB_DEVICE(0x0CF3, 0xE004) },
        { USB_DEVICE(0x0CF3, 0xE005) },
        { USB_DEVICE(0x0930, 0x0219) },
+       { USB_DEVICE(0x0930, 0x0220) },
        { USB_DEVICE(0x0489, 0xe057) },
        { USB_DEVICE(0x13d3, 0x3393) },
        { USB_DEVICE(0x0489, 0xe04e) },
@@ -129,6 +130,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
        { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
index c0ff34f2d2df577efffe902562f9578690ea4e39..3980fd18f6eaeeb129fe543728f19231df80bd59 100644 (file)
@@ -154,6 +154,7 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
index 8e562dc656016cd9c4dbeaaa3dac0a96a02d0f26..e1f3337a0cf9f7d7fa223a86e84282765d915108 100644 (file)
@@ -27,15 +27,18 @@ static char *tpm_device_name = "TPM";
 static acpi_status ppi_callback(acpi_handle handle, u32 level, void *context,
                                void **return_value)
 {
-       acpi_status status;
+       acpi_status status = AE_OK;
        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-       status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
-       if (strstr(buffer.pointer, context) != NULL) {
-               *return_value = handle;
+
+       if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer))) {
+               if (strstr(buffer.pointer, context) != NULL) {
+                       *return_value = handle;
+                       status = AE_CTRL_TERMINATE;
+               }
                kfree(buffer.pointer);
-               return AE_CTRL_TERMINATE;
        }
-       return AE_OK;
+
+       return status;
 }
 
 static inline void ppi_assign_params(union acpi_object params[4],
index 8d3009e44fba40d4fba82825bd59d98febf53984..5543b7df8e16c736c210407a5d837ffb32ab3beb 100644 (file)
@@ -87,7 +87,7 @@ static unsigned int _get_table_val(const struct clk_div_table *table,
        return 0;
 }
 
-static unsigned int _get_val(struct clk_divider *divider, u8 div)
+static unsigned int _get_val(struct clk_divider *divider, unsigned int div)
 {
        if (divider->flags & CLK_DIVIDER_ONE_BASED)
                return div;
index 39b40aaede2b36a3bd92546ff344420b480fb569..68e515d093d864ca9cc704d6f90a441695e67a6d 100644 (file)
@@ -26,17 +26,17 @@ static struct clk_onecell_data clk_data;
 #define ASS_CLK_DIV 0x4
 #define ASS_CLK_GATE 0x8
 
+/* list of all parent clock list */
+static const char *mout_audss_p[] = { "fin_pll", "fout_epll" };
+static const char *mout_i2s_p[] = { "mout_audss", "cdclk0", "sclk_audio0" };
+
+#ifdef CONFIG_PM_SLEEP
 static unsigned long reg_save[][2] = {
        {ASS_CLK_SRC,  0},
        {ASS_CLK_DIV,  0},
        {ASS_CLK_GATE, 0},
 };
 
-/* list of all parent clock list */
-static const char *mout_audss_p[] = { "fin_pll", "fout_epll" };
-static const char *mout_i2s_p[] = { "mout_audss", "cdclk0", "sclk_audio0" };
-
-#ifdef CONFIG_PM_SLEEP
 static int exynos_audss_clk_suspend(void)
 {
        int i;
index ad5ff50c5f281a5e1c31c498c78c5a717aa464de..1a7c1b929c690b03b2b24a1423da55a97b66aa00 100644 (file)
@@ -39,7 +39,7 @@
 #define SRC_TOP1               0xc214
 #define SRC_CAM                        0xc220
 #define SRC_TV                 0xc224
-#define SRC_MFC                        0xcc28
+#define SRC_MFC                        0xc228
 #define SRC_G3D                        0xc22c
 #define E4210_SRC_IMAGE                0xc230
 #define SRC_LCD0               0xc234
index adf32343c9f9c408f5b8bc7c78a6ce3bc66c27a7..e52359cf9b6fe76db63b787717f003454d89e12e 100644 (file)
@@ -25,6 +25,7 @@
 #define MPLL_LOCK              0x4000
 #define MPLL_CON0              0x4100
 #define SRC_CORE1              0x4204
+#define GATE_IP_ACP            0x8800
 #define CPLL_LOCK              0x10020
 #define EPLL_LOCK              0x10030
 #define VPLL_LOCK              0x10040
@@ -75,7 +76,6 @@
 #define SRC_CDREX              0x20200
 #define PLL_DIV2_SEL           0x20a24
 #define GATE_IP_DISP1          0x10928
-#define GATE_IP_ACP            0x10000
 
 /* list of PLLs to be registered */
 enum exynos5250_plls {
@@ -120,7 +120,8 @@ enum exynos5250_clks {
        spi2, i2s1, i2s2, pcm1, pcm2, pwm, spdif, ac97, hsi2c0, hsi2c1, hsi2c2,
        hsi2c3, chipid, sysreg, pmu, cmu_top, cmu_core, cmu_mem, tzpc0, tzpc1,
        tzpc2, tzpc3, tzpc4, tzpc5, tzpc6, tzpc7, tzpc8, tzpc9, hdmi_cec, mct,
-       wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi, g2d,
+       wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi, g2d, mdma0,
+       smmu_mdma0,
 
        /* mux clocks */
        mout_hdmi = 1024,
@@ -354,8 +355,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
        GATE(smmu_gscl2, "smmu_gscl2", "aclk266", GATE_IP_GSCL, 9, 0, 0),
        GATE(smmu_gscl3, "smmu_gscl3", "aclk266", GATE_IP_GSCL, 10, 0, 0),
        GATE(mfc, "mfc", "aclk333", GATE_IP_MFC, 0, 0, 0),
-       GATE(smmu_mfcl, "smmu_mfcl", "aclk333", GATE_IP_MFC, 1, 0, 0),
-       GATE(smmu_mfcr, "smmu_mfcr", "aclk333", GATE_IP_MFC, 2, 0, 0),
+       GATE(smmu_mfcl, "smmu_mfcl", "aclk333", GATE_IP_MFC, 2, 0, 0),
+       GATE(smmu_mfcr, "smmu_mfcr", "aclk333", GATE_IP_MFC, 1, 0, 0),
        GATE(rotator, "rotator", "aclk266", GATE_IP_GEN, 1, 0, 0),
        GATE(jpeg, "jpeg", "aclk166", GATE_IP_GEN, 2, 0, 0),
        GATE(mdma1, "mdma1", "aclk266", GATE_IP_GEN, 4, 0, 0),
@@ -406,7 +407,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
        GATE(hsi2c2, "hsi2c2", "aclk66", GATE_IP_PERIC, 30, 0, 0),
        GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0),
        GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0),
-       GATE(sysreg, "sysreg", "aclk66", GATE_IP_PERIS, 1, 0, 0),
+       GATE(sysreg, "sysreg", "aclk66",
+                       GATE_IP_PERIS, 1, CLK_IGNORE_UNUSED, 0),
        GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED, 0),
        GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0),
        GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0),
@@ -492,6 +494,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
        GATE(mixer, "mixer", "mout_aclk200_disp1", GATE_IP_DISP1, 5, 0, 0),
        GATE(hdmi, "hdmi", "mout_aclk200_disp1", GATE_IP_DISP1, 6, 0, 0),
        GATE(g2d, "g2d", "aclk200", GATE_IP_ACP, 3, 0, 0),
+       GATE(mdma0, "mdma0", "aclk266", GATE_IP_ACP, 1, 0, 0),
+       GATE(smmu_mdma0, "smmu_mdma0", "aclk266", GATE_IP_ACP, 5, 0, 0),
 };
 
 static struct samsung_pll_rate_table vpll_24mhz_tbl[] __initdata = {
index 16d7b4ac94be21210779cd7c87d9440a78666162..8d19f7c06010c364ff6ee758c15096c252f9f4bd 100644 (file)
@@ -839,9 +839,6 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
 
        /* set default policy */
        ret = cpufreq_set_policy(policy, &new_policy);
-       policy->user_policy.policy = policy->policy;
-       policy->user_policy.governor = policy->governor;
-
        if (ret) {
                pr_debug("setting policy failed\n");
                if (cpufreq_driver->exit)
@@ -1016,15 +1013,17 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
        read_unlock_irqrestore(&cpufreq_driver_lock, flags);
 #endif
 
-       if (frozen)
-               /* Restore the saved policy when doing light-weight init */
-               policy = cpufreq_policy_restore(cpu);
-       else
+       /*
+        * Restore the saved policy when doing light-weight init and fall back
+        * to the full init if that fails.
+        */
+       policy = frozen ? cpufreq_policy_restore(cpu) : NULL;
+       if (!policy) {
+               frozen = false;
                policy = cpufreq_policy_alloc();
-
-       if (!policy)
-               goto nomem_out;
-
+               if (!policy)
+                       goto nomem_out;
+       }
 
        /*
         * In the resume path, since we restore a saved policy, the assignment
@@ -1069,8 +1068,10 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
         */
        cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
 
-       policy->user_policy.min = policy->min;
-       policy->user_policy.max = policy->max;
+       if (!frozen) {
+               policy->user_policy.min = policy->min;
+               policy->user_policy.max = policy->max;
+       }
 
        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
                                     CPUFREQ_START, policy);
@@ -1101,6 +1102,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
 
        cpufreq_init_policy(policy);
 
+       if (!frozen) {
+               policy->user_policy.policy = policy->policy;
+               policy->user_policy.governor = policy->governor;
+       }
+
        kobject_uevent(&policy->kobj, KOBJ_ADD);
        up_read(&cpufreq_rwsem);
 
@@ -1118,8 +1124,11 @@ err_get_freq:
        if (cpufreq_driver->exit)
                cpufreq_driver->exit(policy);
 err_set_policy_cpu:
-       if (frozen)
+       if (frozen) {
+               /* Do not leave stale fallback data behind. */
+               per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
                cpufreq_policy_put_kobj(policy);
+       }
        cpufreq_policy_free(policy);
 
 nomem_out:
index 5f1cbae369611062c18ee50982ffa5c6f724d30f..d51f17ed691e023196bbd7e56a3341d4025ce092 100644 (file)
@@ -581,7 +581,8 @@ static void intel_pstate_timer_func(unsigned long __data)
 }
 
 #define ICPU(model, policy) \
-       { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&policy }
+       { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
+                       (unsigned long)&policy }
 
 static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
        ICPU(0x2a, core_params),
@@ -614,6 +615,11 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
        cpu = all_cpu_data[cpunum];
 
        intel_pstate_get_cpu_pstates(cpu);
+       if (!cpu->pstate.current_pstate) {
+               all_cpu_data[cpunum] = NULL;
+               kfree(cpu);
+               return -ENODATA;
+       }
 
        cpu->cpu = cpunum;
 
index 36795639df0da2d828c784b61c82a728037f6e1c..6e51114057d0963605ef1dfc80b231946e1b88f7 100644 (file)
@@ -65,7 +65,7 @@ static struct cpuidle_driver calxeda_idle_driver = {
        .state_count = 2,
 };
 
-static int __init calxeda_cpuidle_probe(struct platform_device *pdev)
+static int calxeda_cpuidle_probe(struct platform_device *pdev)
 {
        return cpuidle_register(&calxeda_idle_driver, NULL);
 }
index 9dd6e01eac33050b8304c5f8758440e7286606f2..f757a0f428bde807a8e5402af8404e98314c2bf8 100644 (file)
@@ -1410,14 +1410,12 @@ static const struct platform_device_info ixp_dev_info __initdata = {
 static int __init ixp_module_init(void)
 {
        int num = ARRAY_SIZE(ixp4xx_algos);
-       int i, err ;
+       int i, err;
 
        pdev = platform_device_register_full(&ixp_dev_info);
        if (IS_ERR(pdev))
                return PTR_ERR(pdev);
 
-       dev = &pdev->dev;
-
        spin_lock_init(&desc_lock);
        spin_lock_init(&emerg_lock);
 
index 1a49c777607c50d313482f3ead21c19572a1cf8d..87529181efccb9851467cc04be04bd91fecb15b3 100644 (file)
@@ -817,7 +817,15 @@ int ioat_dma_self_test(struct ioatdma_device *device)
        }
 
        dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
+       if (dma_mapping_error(dev, dma_src)) {
+               dev_err(dev, "mapping src buffer failed\n");
+               goto free_resources;
+       }
        dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
+       if (dma_mapping_error(dev, dma_dest)) {
+               dev_err(dev, "mapping dest buffer failed\n");
+               goto unmap_src;
+       }
        flags = DMA_PREP_INTERRUPT;
        tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
                                                   IOAT_TEST_SIZE, flags);
@@ -855,8 +863,9 @@ int ioat_dma_self_test(struct ioatdma_device *device)
        }
 
 unmap_dma:
-       dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
        dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
+unmap_src:
+       dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
 free_resources:
        dma->device_free_chan_resources(dma_chan);
 out:
index c79dd2b1f70ecc2af6d0fb67a3c3289672eba7f9..d3c3b5b15824ee8bb927a7b1a382582309b31376 100644 (file)
@@ -906,14 +906,12 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
                WARN_ON(readq(&gtt_entries[i-1])
                        != gen8_pte_encode(addr, level, true));
 
-#if 0 /* TODO: Still needed on GEN8? */
        /* This next bit makes the above posting read even more important. We
         * want to flush the TLBs only after we're certain all the PTE updates
         * have finished.
         */
        I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
        POSTING_READ(GFX_FLSH_CNTL_GEN6);
-#endif
 }
 
 /*
index 54e82a80cf507fe90366ad1e0fb33f082073ca83..769b864465a989201339870db2eb122770886c72 100644 (file)
@@ -10541,11 +10541,20 @@ static struct intel_quirk intel_quirks[] = {
        /* Sony Vaio Y cannot use SSC on LVDS */
        { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
 
-       /*
-        * All GM45 Acer (and its brands eMachines and Packard Bell) laptops
-        * seem to use inverted backlight PWM.
-        */
-       { 0x2a42, 0x1025, PCI_ANY_ID, quirk_invert_brightness },
+       /* Acer Aspire 5734Z must invert backlight brightness */
+       { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
+
+       /* Acer/eMachines G725 */
+       { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
+
+       /* Acer/eMachines e725 */
+       { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
+
+       /* Acer/Packard Bell NCL20 */
+       { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
+
+       /* Acer Aspire 4736Z */
+       { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
 
        /* Dell XPS13 HD Sandy Bridge */
        { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
index 48f06378d3f9d5f9e09e03291a11d6e26527f674..2ea5568b6cf59be811fa6777612eca8b8180736b 100644 (file)
@@ -104,11 +104,8 @@ nouveau_subdev_create_(struct nouveau_object *parent,
 
        if (parent) {
                struct nouveau_device *device = nv_device(parent);
-               int subidx = nv_hclass(subdev) & 0xff;
-
                subdev->debug = nouveau_dbgopt(device->dbgopt, subname);
                subdev->mmio  = nv_subdev(device)->mmio;
-               device->subdev[subidx] = *pobject;
        }
 
        return 0;
index 9135b25a29d09a4e82e57e1c9a1a27608eee6a99..dd01c6c435d6e2e01ecb92ccbd57cf09e7346660 100644 (file)
@@ -268,6 +268,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
                if (ret)
                        return ret;
 
+               device->subdev[i] = devobj->subdev[i];
+
                /* note: can't init *any* subdevs until devinit has been run
                 * due to not knowing exactly what the vbios init tables will
                 * mess with.  devinit also can't be run until all of its
index 8d06eef2b9ee02f7b3d895c8d7a37bfd6e022924..dbc5e33de94f8b6caebbab7319d3bfb7117c57f2 100644 (file)
@@ -161,7 +161,7 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
-               device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
+               device->oclass[NVDEV_SUBDEV_MC     ] =  nvc0_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
index 434bb4b0fa2e650a130a98cc940f5ca7727cfeac..5c8a63dc506aafcfbbe5088ede75ed635d0ba8a4 100644 (file)
@@ -334,7 +334,7 @@ nvc0_graph_mthd(struct nvc0_graph_priv *priv, struct nvc0_graph_mthd *mthds)
        while ((mthd = &mthds[i++]) && (init = mthd->init)) {
                u32  addr = 0x80000000 | mthd->oclass;
                for (data = 0; init->count; init++) {
-                       if (data != init->data) {
+                       if (init == mthd->init || data != init->data) {
                                nv_wr32(priv, 0x40448c, init->data);
                                data = init->data;
                        }
index 8541aa382ff224136d8c1cedff00db0595a753c2..d89dbdf39b0db501159a873eeb5ada0612a3ba35 100644 (file)
@@ -75,6 +75,11 @@ struct nouveau_fb {
 static inline struct nouveau_fb *
 nouveau_fb(void *obj)
 {
+       /* fbram uses this before device subdev pointer is valid */
+       if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
+           nv_subidx(obj) == NVDEV_SUBDEV_FB)
+               return obj;
+
        return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB];
 }
 
index 420908cb82b613bbc97fbccb9b123f3449926100..df1b1b42309337cb14c79b503497e49bf80ee49b 100644 (file)
@@ -365,13 +365,13 @@ static u16
 init_script(struct nouveau_bios *bios, int index)
 {
        struct nvbios_init init = { .bios = bios };
-       u16 data;
+       u16 bmp_ver = bmp_version(bios), data;
 
-       if (bmp_version(bios) && bmp_version(bios) < 0x0510) {
-               if (index > 1)
+       if (bmp_ver && bmp_ver < 0x0510) {
+               if (index > 1 || bmp_ver < 0x0100)
                        return 0x0000;
 
-               data = bios->bmp_offset + (bios->version.major < 2 ? 14 : 18);
+               data = bios->bmp_offset + (bmp_ver < 0x0200 ? 14 : 18);
                return nv_ro16(bios, data + (index * 2));
        }
 
@@ -1294,7 +1294,11 @@ init_jump(struct nvbios_init *init)
        u16 offset = nv_ro16(bios, init->offset + 1);
 
        trace("JUMP\t0x%04x\n", offset);
-       init->offset = offset;
+
+       if (init_exec(init))
+               init->offset = offset;
+       else
+               init->offset += 3;
 }
 
 /**
index 6828d81ed7b99daea875ffd06c4dd399d4aa05dd..900fae01793e7884ee8033a11d6c247cbeb36678 100644 (file)
@@ -447,6 +447,8 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
        if (ret)
                goto done;
 
+       info->offset = ntfy->node->offset;
+
 done:
        if (ret)
                nouveau_abi16_ntfy_fini(chan, ntfy);
index 95c740454049ad1b4a4cf23c2bc63d7038c7a362..ba0183fb84f3b871a4815552021174e870f780e8 100644 (file)
@@ -51,6 +51,7 @@ static struct nouveau_dsm_priv {
        bool dsm_detected;
        bool optimus_detected;
        acpi_handle dhandle;
+       acpi_handle other_handle;
        acpi_handle rom_handle;
 } nouveau_dsm_priv;
 
@@ -260,9 +261,10 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
        if (!dhandle)
                return false;
 
-       if (!acpi_has_method(dhandle, "_DSM"))
+       if (!acpi_has_method(dhandle, "_DSM")) {
+               nouveau_dsm_priv.other_handle = dhandle;
                return false;
-
+       }
        if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER))
                retval |= NOUVEAU_DSM_HAS_MUX;
 
@@ -338,6 +340,16 @@ static bool nouveau_dsm_detect(void)
                printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
                        acpi_method_name);
                nouveau_dsm_priv.dsm_detected = true;
+               /*
+                * On some systems hotplug events are generated for the device
+                * being switched off when _DSM is executed.  They cause ACPI
+                * hotplug to trigger and attempt to remove the device from
+                * the system, which causes it to break down.  Prevent that from
+                * happening by setting the no_hotplug flag for the involved
+                * ACPI device objects.
+                */
+               acpi_bus_no_hotplug(nouveau_dsm_priv.dhandle);
+               acpi_bus_no_hotplug(nouveau_dsm_priv.other_handle);
                ret = true;
        }
 
index 29c3efdfc7dd714e00ae03525d0b9528fce841cd..25ea82f8def3cb883a06aef6e9da59ea4cbffaf7 100644 (file)
@@ -610,7 +610,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
        ret = nouveau_fence_sync(fence, chan);
        nouveau_fence_unref(&fence);
        if (ret)
-               return ret;
+               goto fail_free;
 
        if (new_bo != old_bo) {
                ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
index b1970596a782a437547a1fe32475253a69557f97..0b9621c9aeea3b25da6bad8b121bfc3c3236ddcc 100644 (file)
@@ -1143,31 +1143,53 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
        }
 
        if (tiling_flags & RADEON_TILING_MACRO) {
-               if (rdev->family >= CHIP_BONAIRE)
-                       tmp = rdev->config.cik.tile_config;
-               else if (rdev->family >= CHIP_TAHITI)
-                       tmp = rdev->config.si.tile_config;
-               else if (rdev->family >= CHIP_CAYMAN)
-                       tmp = rdev->config.cayman.tile_config;
-               else
-                       tmp = rdev->config.evergreen.tile_config;
+               evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
 
-               switch ((tmp & 0xf0) >> 4) {
-               case 0: /* 4 banks */
-                       fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
-                       break;
-               case 1: /* 8 banks */
-               default:
-                       fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
-                       break;
-               case 2: /* 16 banks */
-                       fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
-                       break;
+               /* Set NUM_BANKS. */
+               if (rdev->family >= CHIP_BONAIRE) {
+                       unsigned tileb, index, num_banks, tile_split_bytes;
+
+                       /* Calculate the macrotile mode index. */
+                       tile_split_bytes = 64 << tile_split;
+                       tileb = 8 * 8 * target_fb->bits_per_pixel / 8;
+                       tileb = min(tile_split_bytes, tileb);
+
+                       for (index = 0; tileb > 64; index++) {
+                               tileb >>= 1;
+                       }
+
+                       if (index >= 16) {
+                               DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n",
+                                         target_fb->bits_per_pixel, tile_split);
+                               return -EINVAL;
+                       }
+
+                       num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
+                       fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
+               } else {
+                       /* SI and older. */
+                       if (rdev->family >= CHIP_TAHITI)
+                               tmp = rdev->config.si.tile_config;
+                       else if (rdev->family >= CHIP_CAYMAN)
+                               tmp = rdev->config.cayman.tile_config;
+                       else
+                               tmp = rdev->config.evergreen.tile_config;
+
+                       switch ((tmp & 0xf0) >> 4) {
+                       case 0: /* 4 banks */
+                               fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
+                               break;
+                       case 1: /* 8 banks */
+                       default:
+                               fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
+                               break;
+                       case 2: /* 16 banks */
+                               fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
+                               break;
+                       }
                }
 
                fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
-
-               evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
                fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
                fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
                fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
@@ -1180,19 +1202,12 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
                fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
 
        if (rdev->family >= CHIP_BONAIRE) {
-               u32 num_pipe_configs = rdev->config.cik.max_tile_pipes;
-               u32 num_rb = rdev->config.cik.max_backends_per_se;
-               if (num_pipe_configs > 8)
-                       num_pipe_configs = 8;
-               if (num_pipe_configs == 8)
-                       fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P8_32x32_16x16);
-               else if (num_pipe_configs == 4) {
-                       if (num_rb == 4)
-                               fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_16x16);
-                       else if (num_rb < 4)
-                               fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_8x16);
-               } else if (num_pipe_configs == 2)
-                       fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P2);
+               /* Read the pipe config from the 2D TILED SCANOUT mode.
+                * It should be the same for the other modes too, but not all
+                * modes set the pipe config field. */
+               u32 pipe_config = (rdev->config.cik.tile_mode_array[10] >> 6) & 0x1f;
+
+               fb_format |= CIK_GRPH_PIPE_CONFIG(pipe_config);
        } else if ((rdev->family == CHIP_TAHITI) ||
                   (rdev->family == CHIP_PITCAIRN))
                fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
index b43a3a3c90671911a4eaa0a7c5a260c42118df41..e950fabd7f5e474ab3e60371f669e31805077785 100644 (file)
@@ -3057,7 +3057,7 @@ static u32 cik_create_bitmask(u32 bit_width)
  * Returns the disabled RB bitmask.
  */
 static u32 cik_get_rb_disabled(struct radeon_device *rdev,
-                             u32 max_rb_num, u32 se_num,
+                             u32 max_rb_num_per_se,
                              u32 sh_per_se)
 {
        u32 data, mask;
@@ -3071,7 +3071,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev,
 
        data >>= BACKEND_DISABLE_SHIFT;
 
-       mask = cik_create_bitmask(max_rb_num / se_num / sh_per_se);
+       mask = cik_create_bitmask(max_rb_num_per_se / sh_per_se);
 
        return data & mask;
 }
@@ -3088,7 +3088,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev,
  */
 static void cik_setup_rb(struct radeon_device *rdev,
                         u32 se_num, u32 sh_per_se,
-                        u32 max_rb_num)
+                        u32 max_rb_num_per_se)
 {
        int i, j;
        u32 data, mask;
@@ -3098,7 +3098,7 @@ static void cik_setup_rb(struct radeon_device *rdev,
        for (i = 0; i < se_num; i++) {
                for (j = 0; j < sh_per_se; j++) {
                        cik_select_se_sh(rdev, i, j);
-                       data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
+                       data = cik_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
                        if (rdev->family == CHIP_HAWAII)
                                disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH);
                        else
@@ -3108,12 +3108,14 @@ static void cik_setup_rb(struct radeon_device *rdev,
        cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
 
        mask = 1;
-       for (i = 0; i < max_rb_num; i++) {
+       for (i = 0; i < max_rb_num_per_se * se_num; i++) {
                if (!(disabled_rbs & mask))
                        enabled_rbs |= mask;
                mask <<= 1;
        }
 
+       rdev->config.cik.backend_enable_mask = enabled_rbs;
+
        for (i = 0; i < se_num; i++) {
                cik_select_se_sh(rdev, i, 0xffffffff);
                data = 0;
index b1f990d0eaa101d1bce56fabfc4b286ff01de7e9..45e1f447bc794c677a8e83830fd318585c2d7f7e 100644 (file)
@@ -1940,7 +1940,7 @@ struct si_asic {
        unsigned sc_earlyz_tile_fifo_size;
 
        unsigned num_tile_pipes;
-       unsigned num_backends_per_se;
+       unsigned backend_enable_mask;
        unsigned backend_disable_mask_per_asic;
        unsigned backend_map;
        unsigned num_texture_channel_caches;
@@ -1970,7 +1970,7 @@ struct cik_asic {
        unsigned sc_earlyz_tile_fifo_size;
 
        unsigned num_tile_pipes;
-       unsigned num_backends_per_se;
+       unsigned backend_enable_mask;
        unsigned backend_disable_mask_per_asic;
        unsigned backend_map;
        unsigned num_texture_channel_caches;
index 9d302eaeea1587b6fdb5439119b6f45c0de961db..485848f889f55c9f4b86bf61bd8c019e2ad96a27 100644 (file)
@@ -33,6 +33,7 @@ static struct radeon_atpx_priv {
        bool atpx_detected;
        /* handle for device - and atpx */
        acpi_handle dhandle;
+       acpi_handle other_handle;
        struct radeon_atpx atpx;
 } radeon_atpx_priv;
 
@@ -451,9 +452,10 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
                return false;
 
        status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
-       if (ACPI_FAILURE(status))
+       if (ACPI_FAILURE(status)) {
+               radeon_atpx_priv.other_handle = dhandle;
                return false;
-
+       }
        radeon_atpx_priv.dhandle = dhandle;
        radeon_atpx_priv.atpx.handle = atpx_handle;
        return true;
@@ -530,6 +532,16 @@ static bool radeon_atpx_detect(void)
                printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
                       acpi_method_name);
                radeon_atpx_priv.atpx_detected = true;
+               /*
+                * On some systems hotplug events are generated for the device
+                * being switched off when ATPX is executed.  They cause ACPI
+                * hotplug to trigger and attempt to remove the device from
+                * the system, which causes it to break down.  Prevent that from
+                * happening by setting the no_hotplug flag for the involved
+                * ACPI device objects.
+                */
+               acpi_bus_no_hotplug(radeon_atpx_priv.dhandle);
+               acpi_bus_no_hotplug(radeon_atpx_priv.other_handle);
                return true;
        }
        return false;
index 1958b36ad0e5cdddf623b8a54a16df0c501ca668..db39ea36bf22fc396da63e5a157adf42e752e712 100644 (file)
  *   2.33.0 - Add SI tiling mode array query
  *   2.34.0 - Add CIK tiling mode array query
  *   2.35.0 - Add CIK macrotile mode array query
+ *   2.36.0 - Fix CIK DCE tiling setup
  */
 #define KMS_DRIVER_MAJOR       2
-#define KMS_DRIVER_MINOR       35
+#define KMS_DRIVER_MINOR       36
 #define KMS_DRIVER_PATCHLEVEL  0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
index 55d0b474bd371ae83f1cea0ec08d30504b371816..21d593c0ecaf4e7e0ec85ab9b7bf53fafd0f2a87 100644 (file)
@@ -461,6 +461,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
        case RADEON_INFO_SI_CP_DMA_COMPUTE:
                *value = 1;
                break;
+       case RADEON_INFO_SI_BACKEND_ENABLED_MASK:
+               if (rdev->family >= CHIP_BONAIRE) {
+                       *value = rdev->config.cik.backend_enable_mask;
+               } else if (rdev->family >= CHIP_TAHITI) {
+                       *value = rdev->config.si.backend_enable_mask;
+               } else {
+                       DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
+               }
+               break;
        default:
                DRM_DEBUG_KMS("Invalid request %d\n", info->request);
                return -EINVAL;
index 373d088bac66db910291045424e075921d39d202..b9c0529b4a2e1e9d8f69e51f6742d023492a0040 100644 (file)
@@ -473,7 +473,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
                return -EINVAL;
        }
 
-       if ((start >> 28) != (end >> 28)) {
+       if ((start >> 28) != ((end - 1) >> 28)) {
                DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
                          start, end);
                return -EINVAL;
index a36736dab5e0694cd5a7b78d6dbb013b484cb8f6..85e1edfaa3bed0814e262378ae0a7558834936d3 100644 (file)
@@ -2811,7 +2811,7 @@ static void si_setup_spi(struct radeon_device *rdev,
 }
 
 static u32 si_get_rb_disabled(struct radeon_device *rdev,
-                             u32 max_rb_num, u32 se_num,
+                             u32 max_rb_num_per_se,
                              u32 sh_per_se)
 {
        u32 data, mask;
@@ -2825,14 +2825,14 @@ static u32 si_get_rb_disabled(struct radeon_device *rdev,
 
        data >>= BACKEND_DISABLE_SHIFT;
 
-       mask = si_create_bitmask(max_rb_num / se_num / sh_per_se);
+       mask = si_create_bitmask(max_rb_num_per_se / sh_per_se);
 
        return data & mask;
 }
 
 static void si_setup_rb(struct radeon_device *rdev,
                        u32 se_num, u32 sh_per_se,
-                       u32 max_rb_num)
+                       u32 max_rb_num_per_se)
 {
        int i, j;
        u32 data, mask;
@@ -2842,19 +2842,21 @@ static void si_setup_rb(struct radeon_device *rdev,
        for (i = 0; i < se_num; i++) {
                for (j = 0; j < sh_per_se; j++) {
                        si_select_se_sh(rdev, i, j);
-                       data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
+                       data = si_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
                        disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
                }
        }
        si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
 
        mask = 1;
-       for (i = 0; i < max_rb_num; i++) {
+       for (i = 0; i < max_rb_num_per_se * se_num; i++) {
                if (!(disabled_rbs & mask))
                        enabled_rbs |= mask;
                mask <<= 1;
        }
 
+       rdev->config.si.backend_enable_mask = enabled_rbs;
+
        for (i = 0; i < se_num; i++) {
                si_select_se_sh(rdev, i, 0xffffffff);
                data = 0;
index f80b700f821ca4ece2a17ef1db08c099738f74fe..797ed29a36ea7969765e90641d15f3a66e982db3 100644 (file)
@@ -123,7 +123,7 @@ static struct cpuidle_state *cpuidle_state_table;
  * which is also the index into the MWAIT hint array.
  * Thus C0 is a dummy.
  */
-static struct cpuidle_state nehalem_cstates[] __initdata = {
+static struct cpuidle_state nehalem_cstates[] = {
        {
                .name = "C1-NHM",
                .desc = "MWAIT 0x00",
@@ -156,7 +156,7 @@ static struct cpuidle_state nehalem_cstates[] __initdata = {
                .enter = NULL }
 };
 
-static struct cpuidle_state snb_cstates[] __initdata = {
+static struct cpuidle_state snb_cstates[] = {
        {
                .name = "C1-SNB",
                .desc = "MWAIT 0x00",
@@ -196,7 +196,7 @@ static struct cpuidle_state snb_cstates[] __initdata = {
                .enter = NULL }
 };
 
-static struct cpuidle_state ivb_cstates[] __initdata = {
+static struct cpuidle_state ivb_cstates[] = {
        {
                .name = "C1-IVB",
                .desc = "MWAIT 0x00",
@@ -236,7 +236,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = {
                .enter = NULL }
 };
 
-static struct cpuidle_state hsw_cstates[] __initdata = {
+static struct cpuidle_state hsw_cstates[] = {
        {
                .name = "C1-HSW",
                .desc = "MWAIT 0x00",
@@ -297,7 +297,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
                .enter = NULL }
 };
 
-static struct cpuidle_state atom_cstates[] __initdata = {
+static struct cpuidle_state atom_cstates[] = {
        {
                .name = "C1E-ATM",
                .desc = "MWAIT 0x00",
@@ -329,7 +329,7 @@ static struct cpuidle_state atom_cstates[] __initdata = {
        {
                .enter = NULL }
 };
-static struct cpuidle_state avn_cstates[] __initdata = {
+static struct cpuidle_state avn_cstates[] = {
        {
                .name = "C1-AVN",
                .desc = "MWAIT 0x00",
@@ -344,6 +344,8 @@ static struct cpuidle_state avn_cstates[] __initdata = {
                .exit_latency = 15,
                .target_residency = 45,
                .enter = &intel_idle },
+       {
+               .enter = NULL }
 };
 
 /**
index 12fef76c791c524454bd9a0c48d5b4a967ac19af..45126879ad28a2149351232a1f9c4a2551f06c09 100644 (file)
@@ -524,50 +524,6 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
        return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
 }
 
-#define VLAN_NONE 0xfff
-#define FILTER_SEL_VLAN_NONE 0xffff
-#define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */
-#define FILTER_SEL_WIDTH_VIN_P_FC \
-       (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/
-#define FILTER_SEL_WIDTH_TAG_P_FC \
-       (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */
-#define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
-
-static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst,
-                                 struct l2t_entry *l2t)
-{
-       unsigned int ntuple = 0;
-       u32 viid;
-
-       switch (dev->rdev.lldi.filt_mode) {
-
-       /* default filter mode */
-       case HW_TPL_FR_MT_PR_IV_P_FC:
-               if (l2t->vlan == VLAN_NONE)
-                       ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
-               else {
-                       ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC;
-                       ntuple |= 1 << FILTER_SEL_WIDTH_TAG_P_FC;
-               }
-               ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
-                         FILTER_SEL_WIDTH_VLD_TAG_P_FC;
-               break;
-       case HW_TPL_FR_MT_PR_OV_P_FC: {
-               viid = cxgb4_port_viid(l2t->neigh->dev);
-
-               ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC;
-               ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC;
-               ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC;
-               ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
-                         FILTER_SEL_WIDTH_VLD_TAG_P_FC;
-               break;
-       }
-       default:
-               break;
-       }
-       return ntuple;
-}
-
 static int send_connect(struct c4iw_ep *ep)
 {
        struct cpl_act_open_req *req;
@@ -641,8 +597,9 @@ static int send_connect(struct c4iw_ep *ep)
                        req->local_ip = la->sin_addr.s_addr;
                        req->peer_ip = ra->sin_addr.s_addr;
                        req->opt0 = cpu_to_be64(opt0);
-                       req->params = cpu_to_be32(select_ntuple(ep->com.dev,
-                                               ep->dst, ep->l2t));
+                       req->params = cpu_to_be32(cxgb4_select_ntuple(
+                                               ep->com.dev->rdev.lldi.ports[0],
+                                               ep->l2t));
                        req->opt2 = cpu_to_be32(opt2);
                } else {
                        req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen);
@@ -662,9 +619,9 @@ static int send_connect(struct c4iw_ep *ep)
                        req6->peer_ip_lo = *((__be64 *)
                                                (ra6->sin6_addr.s6_addr + 8));
                        req6->opt0 = cpu_to_be64(opt0);
-                       req6->params = cpu_to_be32(
-                                       select_ntuple(ep->com.dev, ep->dst,
-                                                     ep->l2t));
+                       req6->params = cpu_to_be32(cxgb4_select_ntuple(
+                                               ep->com.dev->rdev.lldi.ports[0],
+                                               ep->l2t));
                        req6->opt2 = cpu_to_be32(opt2);
                }
        } else {
@@ -681,8 +638,9 @@ static int send_connect(struct c4iw_ep *ep)
                        t5_req->peer_ip = ra->sin_addr.s_addr;
                        t5_req->opt0 = cpu_to_be64(opt0);
                        t5_req->params = cpu_to_be64(V_FILTER_TUPLE(
-                                               select_ntuple(ep->com.dev,
-                                               ep->dst, ep->l2t)));
+                                                    cxgb4_select_ntuple(
+                                            ep->com.dev->rdev.lldi.ports[0],
+                                            ep->l2t)));
                        t5_req->opt2 = cpu_to_be32(opt2);
                } else {
                        t5_req6 = (struct cpl_t5_act_open_req6 *)
@@ -703,7 +661,9 @@ static int send_connect(struct c4iw_ep *ep)
                                                (ra6->sin6_addr.s6_addr + 8));
                        t5_req6->opt0 = cpu_to_be64(opt0);
                        t5_req6->params = (__force __be64)cpu_to_be32(
-                               select_ntuple(ep->com.dev, ep->dst, ep->l2t));
+                                                       cxgb4_select_ntuple(
+                                               ep->com.dev->rdev.lldi.ports[0],
+                                               ep->l2t));
                        t5_req6->opt2 = cpu_to_be32(opt2);
                }
        }
@@ -1630,7 +1590,8 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
        memset(req, 0, sizeof(*req));
        req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
        req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
-       req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst,
+       req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
+                                    ep->com.dev->rdev.lldi.ports[0],
                                     ep->l2t));
        sin = (struct sockaddr_in *)&ep->com.local_addr;
        req->le.lport = sin->sin_port;
@@ -2938,7 +2899,8 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
        /*
         * Allocate a server TID.
         */
-       if (dev->rdev.lldi.enable_fw_ofld_conn)
+       if (dev->rdev.lldi.enable_fw_ofld_conn &&
+           ep->com.local_addr.ss_family == AF_INET)
                ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
                                             cm_id->local_addr.ss_family, ep);
        else
@@ -3323,9 +3285,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
        /*
         * Calculate the server tid from filter hit index from cpl_rx_pkt.
         */
-       stid = (__force int) cpu_to_be32((__force u32) rss->hash_val)
-                                         - dev->rdev.lldi.tids->sftid_base
-                                         + dev->rdev.lldi.tids->nstids;
+       stid = (__force int) cpu_to_be32((__force u32) rss->hash_val);
 
        lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
        if (!lep) {
@@ -3397,7 +3357,9 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
        window = (__force u16) htons((__force u16)tcph->window);
 
        /* Calcuate filter portion for LE region. */
-       filter = (__force unsigned int) cpu_to_be32(select_ntuple(dev, dst, e));
+       filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple(
+                                                   dev->rdev.lldi.ports[0],
+                                                   e));
 
        /*
         * Synthesize the cpl_pass_accept_req. We have everything except the
index c29b5c8388331458bb2a2caf85580d76031f1dcb..cdc7df4fdb8aedea8c9026de82d774e5f5fe502b 100644 (file)
@@ -31,6 +31,7 @@
  */
 
 #include <linux/netdevice.h>
+#include <linux/if_arp.h>      /* For ARPHRD_xxx */
 #include <linux/module.h>
 #include <net/rtnetlink.h>
 #include "ipoib.h"
@@ -103,7 +104,7 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
                return -EINVAL;
 
        pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
-       if (!pdev)
+       if (!pdev || pdev->type != ARPHRD_INFINIBAND)
                return -ENODEV;
 
        ppriv = netdev_priv(pdev);
index 846ccdd905b19b66872762fd05db29ea0552a8cb..d2965e4b32243a90f40db55a7722c3229d4edb3a 100644 (file)
@@ -1871,6 +1871,10 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int
                break;
 
        case EV_ABS:
+               input_alloc_absinfo(dev);
+               if (!dev->absinfo)
+                       return;
+
                __set_bit(code, dev->absbit);
                break;
 
index 75762d6ff3ba70934190bb2fe70ef6d6135d49c5..aa127ba392a45cbabe9f95180876543dd6441865 100644 (file)
@@ -455,7 +455,18 @@ static void zforce_complete(struct zforce_ts *ts, int cmd, int result)
        }
 }
 
-static irqreturn_t zforce_interrupt(int irq, void *dev_id)
+static irqreturn_t zforce_irq(int irq, void *dev_id)
+{
+       struct zforce_ts *ts = dev_id;
+       struct i2c_client *client = ts->client;
+
+       if (ts->suspended && device_may_wakeup(&client->dev))
+               pm_wakeup_event(&client->dev, 500);
+
+       return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t zforce_irq_thread(int irq, void *dev_id)
 {
        struct zforce_ts *ts = dev_id;
        struct i2c_client *client = ts->client;
@@ -465,12 +476,10 @@ static irqreturn_t zforce_interrupt(int irq, void *dev_id)
        u8 *payload;
 
        /*
-        * When suspended, emit a wakeup signal if necessary and return.
+        * When still suspended, return.
         * Due to the level-interrupt we will get re-triggered later.
         */
        if (ts->suspended) {
-               if (device_may_wakeup(&client->dev))
-                       pm_wakeup_event(&client->dev, 500);
                msleep(20);
                return IRQ_HANDLED;
        }
@@ -763,8 +772,8 @@ static int zforce_probe(struct i2c_client *client,
         * Therefore we can trigger the interrupt anytime it is low and do
         * not need to limit it to the interrupt edge.
         */
-       ret = devm_request_threaded_irq(&client->dev, client->irq, NULL,
-                                       zforce_interrupt,
+       ret = devm_request_threaded_irq(&client->dev, client->irq,
+                                       zforce_irq, zforce_irq_thread,
                                        IRQF_TRIGGER_LOW | IRQF_ONESHOT,
                                        input_dev->name, ts);
        if (ret) {
index 497bd026c2378eec6e781df68d007c135be4c1f7..4a48255281887e8c03f9dbacccf404f4f9c52476 100644 (file)
@@ -1643,10 +1643,6 @@ setup_hfcpci(struct IsdnCard *card)
        int i;
        struct pci_dev *tmp_hfcpci = NULL;
 
-#ifdef __BIG_ENDIAN
-#error "not running on big endian machines now"
-#endif
-
        strcpy(tmp, hfcpci_revision);
        printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp));
 
index f6ab63aa699590278531ba9ae6ceabe75ba90813..33eeb4602c7e7131078d78d97b24f09971b8396e 100644 (file)
@@ -290,10 +290,6 @@ int setup_telespci(struct IsdnCard *card)
        struct IsdnCardState *cs = card->cs;
        char tmp[64];
 
-#ifdef __BIG_ENDIAN
-#error "not running on big endian machines now"
-#endif
-
        strcpy(tmp, telespci_revision);
        printk(KERN_INFO "HiSax: Teles/PCI driver Rev. %s\n", HiSax_getrev(tmp));
        if (cs->typ != ISDN_CTYPE_TELESPCI)
index 05188351711d2d80f5ac8cd0a006159b732b2c90..a97263e902ffc6b927db6b935cb290902909f21e 100644 (file)
@@ -244,18 +244,12 @@ static int lp5521_update_program_memory(struct lp55xx_chip *chip,
        if (i % 2)
                goto err;
 
-       mutex_lock(&chip->lock);
-
        for (i = 0; i < LP5521_PROGRAM_LENGTH; i++) {
                ret = lp55xx_write(chip, addr[idx] + i, pattern[i]);
-               if (ret) {
-                       mutex_unlock(&chip->lock);
+               if (ret)
                        return -EINVAL;
-               }
        }
 
-       mutex_unlock(&chip->lock);
-
        return size;
 
 err:
@@ -427,15 +421,17 @@ static ssize_t store_engine_load(struct device *dev,
 {
        struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
        struct lp55xx_chip *chip = led->chip;
+       int ret;
 
        mutex_lock(&chip->lock);
 
        chip->engine_idx = nr;
        lp5521_load_engine(chip);
+       ret = lp5521_update_program_memory(chip, buf, len);
 
        mutex_unlock(&chip->lock);
 
-       return lp5521_update_program_memory(chip, buf, len);
+       return ret;
 }
 store_load(1)
 store_load(2)
index 6b553d9f4266d570eec78a2180eee9b56194d62c..fd9ab5f61441c50716620138bf55b175c2ac1045 100644 (file)
@@ -337,18 +337,12 @@ static int lp5523_update_program_memory(struct lp55xx_chip *chip,
        if (i % 2)
                goto err;
 
-       mutex_lock(&chip->lock);
-
        for (i = 0; i < LP5523_PROGRAM_LENGTH; i++) {
                ret = lp55xx_write(chip, LP5523_REG_PROG_MEM + i, pattern[i]);
-               if (ret) {
-                       mutex_unlock(&chip->lock);
+               if (ret)
                        return -EINVAL;
-               }
        }
 
-       mutex_unlock(&chip->lock);
-
        return size;
 
 err:
@@ -548,15 +542,17 @@ static ssize_t store_engine_load(struct device *dev,
 {
        struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
        struct lp55xx_chip *chip = led->chip;
+       int ret;
 
        mutex_lock(&chip->lock);
 
        chip->engine_idx = nr;
        lp5523_load_engine_and_select_page(chip);
+       ret = lp5523_update_program_memory(chip, buf, len);
 
        mutex_unlock(&chip->lock);
 
-       return lp5523_update_program_memory(chip, buf, len);
+       return ret;
 }
 store_load(1)
 store_load(2)
index 11e20afbdcacb7d31e49798db6d2dfbf0504afd8..705698fd2c7ed0f8f5e4fcada7b060627352be05 100644 (file)
@@ -1228,8 +1228,14 @@ static void rtsx_pci_remove(struct pci_dev *pcidev)
 
        pcr->remove_pci = true;
 
-       cancel_delayed_work(&pcr->carddet_work);
-       cancel_delayed_work(&pcr->idle_work);
+       /* Disable interrupts at the pcr level */
+       spin_lock_irq(&pcr->lock);
+       rtsx_pci_writel(pcr, RTSX_BIER, 0);
+       pcr->bier = 0;
+       spin_unlock_irq(&pcr->lock);
+
+       cancel_delayed_work_sync(&pcr->carddet_work);
+       cancel_delayed_work_sync(&pcr->idle_work);
 
        mfd_remove_devices(&pcidev->dev);
 
index d210d131fef255da97e7d277574ac80f01a66341..0f55589a56b815af4c00bc521c504db468774fa5 100644 (file)
@@ -73,7 +73,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
        info->map.cached =
-               ioremap_cached(info->map.phys, info->map.size);
+               ioremap_cache(info->map.phys, info->map.size);
        if (!info->map.cached)
                printk(KERN_WARNING "Failed to ioremap cached %s\n",
                       info->map.name);
index 187b1b7772ef1b873303fc46998a591137bec7b7..4ced59436558e65a723df9975e8302fcec2cbb62 100644 (file)
@@ -2201,20 +2201,25 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
 
        port = &(SLAVE_AD_INFO(slave).port);
 
-       // if slave is null, the whole port is not initialized
+       /* if slave is null, the whole port is not initialized */
        if (!port->slave) {
                pr_warning("Warning: %s: speed changed for uninitialized port on %s\n",
                           slave->bond->dev->name, slave->dev->name);
                return;
        }
 
+       __get_state_machine_lock(port);
+
        port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS;
        port->actor_oper_port_key = port->actor_admin_port_key |=
                (__get_link_speed(port) << 1);
        pr_debug("Port %d changed speed\n", port->actor_port_number);
-       // there is no need to reselect a new aggregator, just signal the
-       // state machines to reinitialize
+       /* there is no need to reselect a new aggregator, just signal the
+        * state machines to reinitialize
+        */
        port->sm_vars |= AD_PORT_BEGIN;
+
+       __release_state_machine_lock(port);
 }
 
 /**
@@ -2229,20 +2234,25 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
 
        port = &(SLAVE_AD_INFO(slave).port);
 
-       // if slave is null, the whole port is not initialized
+       /* if slave is null, the whole port is not initialized */
        if (!port->slave) {
                pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n",
                           slave->bond->dev->name, slave->dev->name);
                return;
        }
 
+       __get_state_machine_lock(port);
+
        port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
        port->actor_oper_port_key = port->actor_admin_port_key |=
                __get_duplex(port);
        pr_debug("Port %d changed duplex\n", port->actor_port_number);
-       // there is no need to reselect a new aggregator, just signal the
-       // state machines to reinitialize
+       /* there is no need to reselect a new aggregator, just signal the
+        * state machines to reinitialize
+        */
        port->sm_vars |= AD_PORT_BEGIN;
+
+       __release_state_machine_lock(port);
 }
 
 /**
@@ -2258,15 +2268,21 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
 
        port = &(SLAVE_AD_INFO(slave).port);
 
-       // if slave is null, the whole port is not initialized
+       /* if slave is null, the whole port is not initialized */
        if (!port->slave) {
                pr_warning("Warning: %s: link status changed for uninitialized port on %s\n",
                           slave->bond->dev->name, slave->dev->name);
                return;
        }
 
-       // on link down we are zeroing duplex and speed since some of the adaptors(ce1000.lan) report full duplex/speed instead of N/A(duplex) / 0(speed)
-       // on link up we are forcing recheck on the duplex and speed since some of he adaptors(ce1000.lan) report
+       __get_state_machine_lock(port);
+       /* on link down we are zeroing duplex and speed since
+        * some of the adaptors(ce1000.lan) report full duplex/speed
+        * instead of N/A(duplex) / 0(speed).
+        *
+        * on link up we are forcing recheck on the duplex and speed since
+        * some of he adaptors(ce1000.lan) report.
+        */
        if (link == BOND_LINK_UP) {
                port->is_enabled = true;
                port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
@@ -2282,10 +2298,15 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
                port->actor_oper_port_key = (port->actor_admin_port_key &=
                                             ~AD_SPEED_KEY_BITS);
        }
-       //BOND_PRINT_DBG(("Port %d changed link status to %s", port->actor_port_number, ((link == BOND_LINK_UP)?"UP":"DOWN")));
-       // there is no need to reselect a new aggregator, just signal the
-       // state machines to reinitialize
+       pr_debug("Port %d changed link status to %s",
+               port->actor_port_number,
+               (link == BOND_LINK_UP) ? "UP" : "DOWN");
+       /* there is no need to reselect a new aggregator, just signal the
+        * state machines to reinitialize
+        */
        port->sm_vars |= AD_PORT_BEGIN;
+
+       __release_state_machine_lock(port);
 }
 
 /*
index 398e299ee1bded33a57d7eb91318b00b24513658..4b8c58b0ec243575bbc17a547f7370b8d0b26d54 100644 (file)
@@ -3732,7 +3732,8 @@ static inline int bond_slave_override(struct bonding *bond,
 }
 
 
-static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
+static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
+                            void *accel_priv)
 {
        /*
         * This helper function exists to help dev_pick_tx get the correct
index b2ffad1304d221ef54e60cd1a82138a4d8344975..248baf6273fb76a2b179c50fe068aeddff51a42e 100644 (file)
@@ -565,6 +565,8 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
        /* Make sure pointer to data buffer is set */
        wmb();
 
+       skb_tx_timestamp(skb);
+
        *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);
 
        /* Increment index to point to the next BD */
@@ -579,8 +581,6 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
 
        arc_reg_set(priv, R_STATUS, TXPL_MASK);
 
-       skb_tx_timestamp(skb);
-
        return NETDEV_TX_OK;
 }
 
index a36a760ada28af64272a5132a60648c543963bed..29801750f239b247389c7040f6616f9b79d14983 100644 (file)
@@ -145,9 +145,11 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
         * Mask some pcie error bits
         */
        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
-       pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data);
-       data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP);
-       pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data);
+       if (pos) {
+               pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data);
+               data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP);
+               pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data);
+       }
        /* clear error status */
        pcie_capability_write_word(pdev, PCI_EXP_DEVSTA,
                        PCI_EXP_DEVSTA_NFED |
index a1f66e2c9a8694c9d83471dd63f6659d431840eb..ec6119089b82b8445cd573d961378c70b28632e5 100644 (file)
@@ -520,10 +520,12 @@ struct bnx2x_fastpath {
 #define BNX2X_FP_STATE_IDLE                  0
 #define BNX2X_FP_STATE_NAPI            (1 << 0)    /* NAPI owns this FP */
 #define BNX2X_FP_STATE_POLL            (1 << 1)    /* poll owns this FP */
-#define BNX2X_FP_STATE_NAPI_YIELD      (1 << 2)    /* NAPI yielded this FP */
-#define BNX2X_FP_STATE_POLL_YIELD      (1 << 3)    /* poll yielded this FP */
+#define BNX2X_FP_STATE_DISABLED                (1 << 2)
+#define BNX2X_FP_STATE_NAPI_YIELD      (1 << 3)    /* NAPI yielded this FP */
+#define BNX2X_FP_STATE_POLL_YIELD      (1 << 4)    /* poll yielded this FP */
+#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
 #define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD)
-#define BNX2X_FP_LOCKED        (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
+#define BNX2X_FP_LOCKED        (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED)
 #define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
        /* protect state */
        spinlock_t lock;
@@ -613,7 +615,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
 {
        bool rc = true;
 
-       spin_lock(&fp->lock);
+       spin_lock_bh(&fp->lock);
        if (fp->state & BNX2X_FP_LOCKED) {
                WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
                fp->state |= BNX2X_FP_STATE_NAPI_YIELD;
@@ -622,7 +624,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
                /* we don't care if someone yielded */
                fp->state = BNX2X_FP_STATE_NAPI;
        }
-       spin_unlock(&fp->lock);
+       spin_unlock_bh(&fp->lock);
        return rc;
 }
 
@@ -631,14 +633,16 @@ static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
 {
        bool rc = false;
 
-       spin_lock(&fp->lock);
+       spin_lock_bh(&fp->lock);
        WARN_ON(fp->state &
                (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));
 
        if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
                rc = true;
-       fp->state = BNX2X_FP_STATE_IDLE;
-       spin_unlock(&fp->lock);
+
+       /* state ==> idle, unless currently disabled */
+       fp->state &= BNX2X_FP_STATE_DISABLED;
+       spin_unlock_bh(&fp->lock);
        return rc;
 }
 
@@ -669,7 +673,9 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
 
        if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
                rc = true;
-       fp->state = BNX2X_FP_STATE_IDLE;
+
+       /* state ==> idle, unless currently disabled */
+       fp->state &= BNX2X_FP_STATE_DISABLED;
        spin_unlock_bh(&fp->lock);
        return rc;
 }
@@ -677,9 +683,23 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
 /* true if a socket is polling, even if it did not get the lock */
 static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
 {
-       WARN_ON(!(fp->state & BNX2X_FP_LOCKED));
+       WARN_ON(!(fp->state & BNX2X_FP_OWNED));
        return fp->state & BNX2X_FP_USER_PEND;
 }
+
+/* false if fp is currently owned */
+static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
+{
+       int rc = true;
+
+       spin_lock_bh(&fp->lock);
+       if (fp->state & BNX2X_FP_OWNED)
+               rc = false;
+       fp->state |= BNX2X_FP_STATE_DISABLED;
+       spin_unlock_bh(&fp->lock);
+
+       return rc;
+}
 #else
 static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
 {
@@ -709,6 +729,10 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
 {
        return false;
 }
+static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
+{
+       return true;
+}
 #endif /* CONFIG_NET_RX_BUSY_POLL */
 
 /* Use 2500 as a mini-jumbo MTU for FCoE */
@@ -1250,7 +1274,10 @@ struct bnx2x_slowpath {
         * Therefore, if they would have been defined in the same union,
         * data can get corrupted.
         */
-       struct afex_vif_list_ramrod_data func_afex_rdata;
+       union {
+               struct afex_vif_list_ramrod_data        viflist_data;
+               struct function_update_data             func_update;
+       } func_afex_rdata;
 
        /* used by dmae command executer */
        struct dmae_command             dmae[MAX_DMAE_C];
@@ -2499,4 +2526,6 @@ void bnx2x_set_local_cmng(struct bnx2x *bp);
 #define MCPR_SCRATCH_BASE(bp) \
        (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
 
+#define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX))
+
 #endif /* bnx2x.h */
index ec96130533cc54630c3f26f6253e58b5a0f5a7cf..bf811565ee245a0472cffc0ea5f70b30da053beb 100644 (file)
@@ -160,6 +160,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
        struct sk_buff *skb = tx_buf->skb;
        u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
        int nbd;
+       u16 split_bd_len = 0;
 
        /* prefetch skb end pointer to speedup dev_kfree_skb() */
        prefetch(&skb->end);
@@ -167,10 +168,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
        DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
           txdata->txq_index, idx, tx_buf, skb);
 
-       /* unmap first bd */
        tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
-       dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
-                        BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
 
        nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
 #ifdef BNX2X_STOP_ON_ERROR
@@ -188,12 +186,19 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
        --nbd;
        bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 
-       /* ...and the TSO split header bd since they have no mapping */
+       /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
        if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
+               tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
+               split_bd_len = BD_UNMAP_LEN(tx_data_bd);
                --nbd;
                bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
        }
 
+       /* unmap first bd */
+       dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
+                        BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
+                        DMA_TO_DEVICE);
+
        /* now free frags */
        while (nbd > 0) {
 
@@ -1790,26 +1795,22 @@ static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
 {
        int i;
 
-       local_bh_disable();
        for_each_rx_queue_cnic(bp, i) {
                napi_disable(&bnx2x_fp(bp, i, napi));
-               while (!bnx2x_fp_lock_napi(&bp->fp[i]))
-                       mdelay(1);
+               while (!bnx2x_fp_ll_disable(&bp->fp[i]))
+                       usleep_range(1000, 2000);
        }
-       local_bh_enable();
 }
 
 static void bnx2x_napi_disable(struct bnx2x *bp)
 {
        int i;
 
-       local_bh_disable();
        for_each_eth_queue(bp, i) {
                napi_disable(&bnx2x_fp(bp, i, napi));
-               while (!bnx2x_fp_lock_napi(&bp->fp[i]))
-                       mdelay(1);
+               while (!bnx2x_fp_ll_disable(&bp->fp[i]))
+                       usleep_range(1000, 2000);
        }
-       local_bh_enable();
 }
 
 void bnx2x_netif_start(struct bnx2x *bp)
@@ -1832,7 +1833,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
                bnx2x_napi_disable_cnic(bp);
 }
 
-u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
+u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
+                      void *accel_priv)
 {
        struct bnx2x *bp = netdev_priv(dev);
 
index da8fcaa74495475cb2c2c3984f3aa3cf85fc83ba..41f3ca5ad972b396498cbe5b7d7ad72ea9bdb1a7 100644 (file)
@@ -524,7 +524,8 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
 int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
 
 /* select_queue callback */
-u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
+u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
+                      void *accel_priv);
 
 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
                                        struct bnx2x_fastpath *fp,
index 20dcc02431cac441a8bf3076a7fe6db1abfb04e9..11fc79585491f484f7ffd56d7a2e8e876ebb8e79 100644 (file)
@@ -3865,6 +3865,19 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
 
                bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
        } else {
+               /* Enable Auto-Detect to support 1G over CL37 as well */
+               bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+                                MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10);
+
+               /* Force cl48 sync_status LOW to avoid getting stuck in CL73
+                * parallel-detect loop when CL73 and CL37 are enabled.
+                */
+               CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+                                 MDIO_AER_BLOCK_AER_REG, 0);
+               bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+                                MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI, 0x0800);
+               bnx2x_set_aer_mmd(params, phy);
+
                bnx2x_disable_kr2(params, vars, phy);
        }
 
@@ -8120,17 +8133,20 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
                                *edc_mode = EDC_MODE_ACTIVE_DAC;
                        else
                                check_limiting_mode = 1;
-               } else if (copper_module_type &
-                       SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
+               } else {
+                       *edc_mode = EDC_MODE_PASSIVE_DAC;
+                       /* Even in case PASSIVE_DAC indication is not set,
+                        * treat it as a passive DAC cable, since some cables
+                        * don't have this indication.
+                        */
+                       if (copper_module_type &
+                           SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
                                DP(NETIF_MSG_LINK,
                                   "Passive Copper cable detected\n");
-                               *edc_mode =
-                                     EDC_MODE_PASSIVE_DAC;
-               } else {
-                       DP(NETIF_MSG_LINK,
-                          "Unknown copper-cable-type 0x%x !!!\n",
-                          copper_module_type);
-                       return -EINVAL;
+                       } else {
+                               DP(NETIF_MSG_LINK,
+                                  "Unknown copper-cable-type\n");
+                       }
                }
                break;
        }
@@ -10825,9 +10841,9 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
                           (1<<11));
 
        if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
-                       (phy->speed_cap_mask &
-                       PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
-                       (phy->req_line_speed == SPEED_1000)) {
+            (phy->speed_cap_mask &
+             PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+           (phy->req_line_speed == SPEED_1000)) {
                an_1000_val |= (1<<8);
                autoneg_val |= (1<<9 | 1<<12);
                if (phy->req_duplex == DUPLEX_FULL)
@@ -10843,30 +10859,32 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
                        0x09,
                        &an_1000_val);
 
-       /* Set 100 speed advertisement */
-       if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
-                       (phy->speed_cap_mask &
-                       (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
-                       PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) {
-               an_10_100_val |= (1<<7);
-               /* Enable autoneg and restart autoneg for legacy speeds */
-               autoneg_val |= (1<<9 | 1<<12);
-
-               if (phy->req_duplex == DUPLEX_FULL)
-                       an_10_100_val |= (1<<8);
-               DP(NETIF_MSG_LINK, "Advertising 100M\n");
-       }
-
-       /* Set 10 speed advertisement */
-       if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
-                       (phy->speed_cap_mask &
-                       (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
-                       PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) {
-               an_10_100_val |= (1<<5);
-               autoneg_val |= (1<<9 | 1<<12);
-               if (phy->req_duplex == DUPLEX_FULL)
+       /* Advertise 10/100 link speed */
+       if (phy->req_line_speed == SPEED_AUTO_NEG) {
+               if (phy->speed_cap_mask &
+                   PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) {
+                       an_10_100_val |= (1<<5);
+                       autoneg_val |= (1<<9 | 1<<12);
+                       DP(NETIF_MSG_LINK, "Advertising 10M-HD\n");
+               }
+               if (phy->speed_cap_mask &
+                   PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) {
                        an_10_100_val |= (1<<6);
-               DP(NETIF_MSG_LINK, "Advertising 10M\n");
+                       autoneg_val |= (1<<9 | 1<<12);
+                       DP(NETIF_MSG_LINK, "Advertising 10M-FD\n");
+               }
+               if (phy->speed_cap_mask &
+                   PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
+                       an_10_100_val |= (1<<7);
+                       autoneg_val |= (1<<9 | 1<<12);
+                       DP(NETIF_MSG_LINK, "Advertising 100M-HD\n");
+               }
+               if (phy->speed_cap_mask &
+                   PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
+                       an_10_100_val |= (1<<8);
+                       autoneg_val |= (1<<9 | 1<<12);
+                       DP(NETIF_MSG_LINK, "Advertising 100M-FD\n");
+               }
        }
 
        /* Only 10/100 are allowed to work in FORCE mode */
@@ -13342,6 +13360,10 @@ static u8 bnx2x_analyze_link_error(struct link_params *params,
        DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up,
           old_status, status);
 
+       /* Do not touch the link in case physical link down */
+       if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0)
+               return 1;
+
        /* a. Update shmem->link_status accordingly
         * b. Update link_vars->link_up
         */
@@ -13550,7 +13572,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
         */
        not_kr2_device = (((base_page & 0x8000) == 0) ||
                          (((base_page & 0x8000) &&
-                           ((next_page & 0xe0) == 0x2))));
+                           ((next_page & 0xe0) == 0x20))));
 
        /* In case KR2 is already disabled, check if we need to re-enable it */
        if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
index 814d0eca9b334ea86c862bc617a46f137f04d475..8b3107b2fcc13dd619ae08290f4467c47e8e67b7 100644 (file)
@@ -11447,9 +11447,9 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
                }
        }
 
-       /* adjust igu_sb_cnt to MF for E1x */
-       if (CHIP_IS_E1x(bp) && IS_MF(bp))
-               bp->igu_sb_cnt /= E1HVN_MAX;
+       /* adjust igu_sb_cnt to MF for E1H */
+       if (CHIP_IS_E1H(bp) && IS_MF(bp))
+               bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);
 
        /* port info */
        bnx2x_get_port_hwinfo(bp);
index 3efbb35267c853d576cc3a4d3104ec4ba1a18d1d..14ffb6e56e593d6a371e1036edf02ff7c193eb08 100644 (file)
@@ -7179,6 +7179,7 @@ Theotherbitsarereservedandshouldbezero*/
 #define MDIO_WC_REG_RX1_PCI_CTRL                       0x80ca
 #define MDIO_WC_REG_RX2_PCI_CTRL                       0x80da
 #define MDIO_WC_REG_RX3_PCI_CTRL                       0x80ea
+#define MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI             0x80fa
 #define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G          0x8104
 #define MDIO_WC_REG_XGXS_STATUS3                       0x8129
 #define MDIO_WC_REG_PAR_DET_10G_STATUS                 0x8130
index 32c92abf50949fa1494812ab9d3734a5a2f3db1f..18438a504d573082239fe53532035d5516ff2881 100644 (file)
@@ -2038,6 +2038,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
        struct bnx2x_vlan_mac_ramrod_params p;
        struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
        struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
+       unsigned long flags;
        int read_lock;
        int rc = 0;
 
@@ -2046,8 +2047,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
        spin_lock_bh(&exeq->lock);
 
        list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
-               if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
-                   *vlan_mac_flags) {
+               flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags;
+               if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
+                   BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
                        rc = exeq->remove(bp, exeq->owner, exeq_pos);
                        if (rc) {
                                BNX2X_ERR("Failed to remove command\n");
@@ -2080,7 +2082,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
                return read_lock;
 
        list_for_each_entry(pos, &o->head, link) {
-               if (pos->vlan_mac_flags == *vlan_mac_flags) {
+               flags = pos->vlan_mac_flags;
+               if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
+                   BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
                        p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
                        memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
                        rc = bnx2x_config_vlan_mac(bp, &p);
@@ -4382,8 +4386,11 @@ int bnx2x_config_rss(struct bnx2x *bp,
        struct bnx2x_raw_obj *r = &o->raw;
 
        /* Do nothing if only driver cleanup was requested */
-       if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
+       if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+               DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n",
+                  p->ramrod_flags);
                return 0;
+       }
 
        r->set_pending(r);
 
index 658f4e33abf9281d76e083873cb963c9b47b05f1..6a53c15c85a338c8efbb45ecf3348913272e038a 100644 (file)
@@ -266,6 +266,13 @@ enum {
        BNX2X_DONT_CONSUME_CAM_CREDIT,
        BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
 };
+/* When looking for matching filters, some flags are not interesting */
+#define BNX2X_VLAN_MAC_CMP_MASK        (1 << BNX2X_UC_LIST_MAC | \
+                                1 << BNX2X_ETH_MAC | \
+                                1 << BNX2X_ISCSI_ETH_MAC | \
+                                1 << BNX2X_NETQ_ETH_MAC)
+#define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \
+       ((flags) & BNX2X_VLAN_MAC_CMP_MASK)
 
 struct bnx2x_vlan_mac_ramrod_params {
        /* Object to run the command from */
index 2e46c28fc6019a892f7792c6017effe3f7db7063..e7845e5be1c76fb452d8ecb94943326844ab5eaa 100644 (file)
@@ -1209,6 +1209,11 @@ static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
                /* next state */
                vfop->state = BNX2X_VFOP_RXMODE_DONE;
 
+               /* record the accept flags in vfdb so hypervisor can modify them
+                * if necessary
+                */
+               bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) =
+                       ramrod->rx_accept_flags;
                vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
                bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
 op_err:
@@ -1224,39 +1229,43 @@ op_pending:
        return;
 }
 
+static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
+                                 struct bnx2x_rx_mode_ramrod_params *ramrod,
+                                 struct bnx2x_virtf *vf,
+                                 unsigned long accept_flags)
+{
+       struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
+
+       memset(ramrod, 0, sizeof(*ramrod));
+       ramrod->cid = vfq->cid;
+       ramrod->cl_id = vfq_cl_id(vf, vfq);
+       ramrod->rx_mode_obj = &bp->rx_mode_obj;
+       ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
+       ramrod->rx_accept_flags = accept_flags;
+       ramrod->tx_accept_flags = accept_flags;
+       ramrod->pstate = &vf->filter_state;
+       ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
+
+       set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
+       set_bit(RAMROD_RX, &ramrod->ramrod_flags);
+       set_bit(RAMROD_TX, &ramrod->ramrod_flags);
+
+       ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
+       ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
+}
+
 int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
                          struct bnx2x_virtf *vf,
                          struct bnx2x_vfop_cmd *cmd,
                          int qid, unsigned long accept_flags)
 {
-       struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
        struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
 
        if (vfop) {
                struct bnx2x_rx_mode_ramrod_params *ramrod =
                        &vf->op_params.rx_mode;
 
-               memset(ramrod, 0, sizeof(*ramrod));
-
-               /* Prepare ramrod parameters */
-               ramrod->cid = vfq->cid;
-               ramrod->cl_id = vfq_cl_id(vf, vfq);
-               ramrod->rx_mode_obj = &bp->rx_mode_obj;
-               ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
-
-               ramrod->rx_accept_flags = accept_flags;
-               ramrod->tx_accept_flags = accept_flags;
-               ramrod->pstate = &vf->filter_state;
-               ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
-
-               set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
-               set_bit(RAMROD_RX, &ramrod->ramrod_flags);
-               set_bit(RAMROD_TX, &ramrod->ramrod_flags);
-
-               ramrod->rdata =
-                       bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
-               ramrod->rdata_mapping =
-                       bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
+               bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags);
 
                bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
                                 bnx2x_vfop_rxmode, cmd->done);
@@ -3202,13 +3211,16 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
                bnx2x_iov_static_resc(bp, vf);
        }
 
-       /* prepare msix vectors in VF configuration space */
+       /* prepare msix vectors in VF configuration space - the value in the
+        * PCI configuration space should be the index of the last entry,
+        * namely one less than the actual size of the table
+        */
        for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
                bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
                REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
-                      num_vf_queues);
+                      num_vf_queues - 1);
                DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
-                  vf_idx, num_vf_queues);
+                  vf_idx, num_vf_queues - 1);
        }
        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 
@@ -3436,10 +3448,18 @@ out:
 
 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
 {
+       struct bnx2x_queue_state_params q_params = {NULL};
+       struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+       struct bnx2x_queue_update_params *update_params;
+       struct pf_vf_bulletin_content *bulletin = NULL;
+       struct bnx2x_rx_mode_ramrod_params rx_ramrod;
        struct bnx2x *bp = netdev_priv(dev);
-       int rc, q_logical_state;
+       struct bnx2x_vlan_mac_obj *vlan_obj;
+       unsigned long vlan_mac_flags = 0;
+       unsigned long ramrod_flags = 0;
        struct bnx2x_virtf *vf = NULL;
-       struct pf_vf_bulletin_content *bulletin = NULL;
+       unsigned long accept_flags;
+       int rc;
 
        /* sanity and init */
        rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
@@ -3457,104 +3477,118 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
        /* update PF's copy of the VF's bulletin. No point in posting the vlan
         * to the VF since it doesn't have anything to do with it. But it useful
         * to store it here in case the VF is not up yet and we can only
-        * configure the vlan later when it does.
+        * configure the vlan later when it does. Treat vlan id 0 as remove the
+        * Host tag.
         */
-       bulletin->valid_bitmap |= 1 << VLAN_VALID;
+       if (vlan > 0)
+               bulletin->valid_bitmap |= 1 << VLAN_VALID;
+       else
+               bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
        bulletin->vlan = vlan;
 
        /* is vf initialized and queue set up? */
-       q_logical_state =
-               bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
-       if (vf->state == VF_ENABLED &&
-           q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
-               /* configure the vlan in device on this vf's queue */
-               unsigned long ramrod_flags = 0;
-               unsigned long vlan_mac_flags = 0;
-               struct bnx2x_vlan_mac_obj *vlan_obj =
-                       &bnx2x_leading_vfq(vf, vlan_obj);
-               struct bnx2x_vlan_mac_ramrod_params ramrod_param;
-               struct bnx2x_queue_state_params q_params = {NULL};
-               struct bnx2x_queue_update_params *update_params;
+       if (vf->state != VF_ENABLED ||
+           bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
+           BNX2X_Q_LOGICAL_STATE_ACTIVE)
+               return rc;
 
-               rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
-               if (rc)
-                       return rc;
-               memset(&ramrod_param, 0, sizeof(ramrod_param));
+       /* configure the vlan in device on this vf's queue */
+       vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
+       rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
+       if (rc)
+               return rc;
 
-               /* must lock vfpf channel to protect against vf flows */
-               bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
+       /* must lock vfpf channel to protect against vf flows */
+       bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
 
-               /* remove existing vlans */
-               __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
-               rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
-                                         &ramrod_flags);
-               if (rc) {
-                       BNX2X_ERR("failed to delete vlans\n");
-                       rc = -EINVAL;
-                       goto out;
-               }
+       /* remove existing vlans */
+       __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+       rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
+                                 &ramrod_flags);
+       if (rc) {
+               BNX2X_ERR("failed to delete vlans\n");
+               rc = -EINVAL;
+               goto out;
+       }
+
+       /* need to remove/add the VF's accept_any_vlan bit */
+       accept_flags = bnx2x_leading_vfq(vf, accept_flags);
+       if (vlan)
+               clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+       else
+               set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+
+       bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
+                             accept_flags);
+       bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
+       bnx2x_config_rx_mode(bp, &rx_ramrod);
+
+       /* configure the new vlan to device */
+       memset(&ramrod_param, 0, sizeof(ramrod_param));
+       __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+       ramrod_param.vlan_mac_obj = vlan_obj;
+       ramrod_param.ramrod_flags = ramrod_flags;
+       set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+               &ramrod_param.user_req.vlan_mac_flags);
+       ramrod_param.user_req.u.vlan.vlan = vlan;
+       ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
+       rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+       if (rc) {
+               BNX2X_ERR("failed to configure vlan\n");
+               rc =  -EINVAL;
+               goto out;
+       }
 
-               /* send queue update ramrod to configure default vlan and silent
-                * vlan removal
+       /* send queue update ramrod to configure default vlan and silent
+        * vlan removal
+        */
+       __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+       q_params.cmd = BNX2X_Q_CMD_UPDATE;
+       q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
+       update_params = &q_params.params.update;
+       __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+                 &update_params->update_flags);
+       __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+                 &update_params->update_flags);
+       if (vlan == 0) {
+               /* if vlan is 0 then we want to leave the VF traffic
+                * untagged, and leave the incoming traffic untouched
+                * (i.e. do not remove any vlan tags).
                 */
-               __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
-               q_params.cmd = BNX2X_Q_CMD_UPDATE;
-               q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
-               update_params = &q_params.params.update;
-               __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+               __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+                           &update_params->update_flags);
+               __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+                           &update_params->update_flags);
+       } else {
+               /* configure default vlan to vf queue and set silent
+                * vlan removal (the vf remains unaware of this vlan).
+                */
+               __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
                          &update_params->update_flags);
-               __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+               __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
                          &update_params->update_flags);
+               update_params->def_vlan = vlan;
+               update_params->silent_removal_value =
+                       vlan & VLAN_VID_MASK;
+               update_params->silent_removal_mask = VLAN_VID_MASK;
+       }
 
-               if (vlan == 0) {
-                       /* if vlan is 0 then we want to leave the VF traffic
-                        * untagged, and leave the incoming traffic untouched
-                        * (i.e. do not remove any vlan tags).
-                        */
-                       __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
-                                   &update_params->update_flags);
-                       __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
-                                   &update_params->update_flags);
-               } else {
-                       /* configure the new vlan to device */
-                       __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
-                       ramrod_param.vlan_mac_obj = vlan_obj;
-                       ramrod_param.ramrod_flags = ramrod_flags;
-                       ramrod_param.user_req.u.vlan.vlan = vlan;
-                       ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
-                       rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
-                       if (rc) {
-                               BNX2X_ERR("failed to configure vlan\n");
-                               rc =  -EINVAL;
-                               goto out;
-                       }
-
-                       /* configure default vlan to vf queue and set silent
-                        * vlan removal (the vf remains unaware of this vlan).
-                        */
-                       update_params = &q_params.params.update;
-                       __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
-                                 &update_params->update_flags);
-                       __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
-                                 &update_params->update_flags);
-                       update_params->def_vlan = vlan;
-               }
+       /* Update the Queue state */
+       rc = bnx2x_queue_state_change(bp, &q_params);
+       if (rc) {
+               BNX2X_ERR("Failed to configure default VLAN\n");
+               goto out;
+       }
 
-               /* Update the Queue state */
-               rc = bnx2x_queue_state_change(bp, &q_params);
-               if (rc) {
-                       BNX2X_ERR("Failed to configure default VLAN\n");
-                       goto out;
-               }
 
-               /* clear the flag indicating that this VF needs its vlan
-                * (will only be set if the HV configured the Vlan before vf was
-                * up and we were called because the VF came up later
-                */
+       /* clear the flag indicating that this VF needs its vlan
+        * (will only be set if the HV configured the Vlan before vf was
+        * up and we were called because the VF came up later
+        */
 out:
-               vf->cfg_flags &= ~VF_CFG_VLAN;
-               bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
-       }
+       vf->cfg_flags &= ~VF_CFG_VLAN;
+       bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
+
        return rc;
 }
 
index 1ff6a9366629ed88fe79a079391c92e95d1e9baf..8c213fa52174f918d4f445dae554b593b54845db 100644 (file)
@@ -74,6 +74,7 @@ struct bnx2x_vf_queue {
        /* VLANs object */
        struct bnx2x_vlan_mac_obj       vlan_obj;
        atomic_t vlan_count;            /* 0 means vlan-0 is set  ~ untagged */
+       unsigned long accept_flags;     /* last accept flags configured */
 
        /* Queue Slow-path State object */
        struct bnx2x_queue_sp_obj       sp_obj;
index efa8a151d78907d4b17d5f5dd07eaed9b1f2c02c..0756d7dabdd59ae0e58139e07081d9ba02ca5a10 100644 (file)
@@ -208,7 +208,7 @@ static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
                return -EINVAL;
        }
 
-       BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg);
+       DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg);
 
        *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
 
@@ -1598,6 +1598,8 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
 
                if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
                        unsigned long accept = 0;
+                       struct pf_vf_bulletin_content *bulletin =
+                               BP_VF_BULLETIN(bp, vf->index);
 
                        /* covert VF-PF if mask to bnx2x accept flags */
                        if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
@@ -1617,9 +1619,11 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
                                __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
 
                        /* A packet arriving the vf's mac should be accepted
-                        * with any vlan
+                        * with any vlan, unless a vlan has already been
+                        * configured.
                         */
-                       __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
+                       if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
+                               __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
 
                        /* set rx-mode */
                        rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd,
@@ -1710,6 +1714,21 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
                        goto response;
                }
        }
+       /* if vlan was set by hypervisor we don't allow guest to config vlan */
+       if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
+               int i;
+
+               /* search for vlan filters */
+               for (i = 0; i < filters->n_mac_vlan_filters; i++) {
+                       if (filters->filters[i].flags &
+                           VFPF_Q_FILTER_VLAN_TAG_VALID) {
+                               BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
+                                         vf->abs_vfid);
+                               vf->op_rc = -EPERM;
+                               goto response;
+                       }
+               }
+       }
 
        /* verify vf_qid */
        if (filters->vf_qid > vf_rxq_count(vf))
@@ -1805,6 +1824,9 @@ static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
        vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
 
        /* flags handled individually for backward/forward compatability */
+       vf_op_params->rss_flags = 0;
+       vf_op_params->ramrod_flags = 0;
+
        if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
                __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags);
        if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
index f3dd93b4aeaac1bc8cdff71c9113d6668e085fd2..15a66e4b1f57a19d2d1e7feb31b557e3809fe6b0 100644 (file)
@@ -7622,7 +7622,7 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
 {
        u32 base = (u32) mapping & 0xffffffff;
 
-       return (base > 0xffffdcc0) && (base + len + 8 < base);
+       return base + len + 8 < base;
 }
 
 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
index 6c9308850453bf1ecd21173afa1d1cf11681c8c7..56e0415f8cdff396aca4d6462f2432278e258018 100644 (file)
@@ -228,6 +228,25 @@ struct tp_params {
 
        uint32_t dack_re;            /* DACK timer resolution */
        unsigned short tx_modq[NCHAN];  /* channel to modulation queue map */
+
+       u32 vlan_pri_map;               /* cached TP_VLAN_PRI_MAP */
+       u32 ingress_config;             /* cached TP_INGRESS_CONFIG */
+
+       /* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets.  This is a
+        * subset of the set of fields which may be present in the Compressed
+        * Filter Tuple portion of filters and TCP TCB connections.  The
+        * fields which are present are controlled by the TP_VLAN_PRI_MAP.
+        * Since a variable number of fields may or may not be present, their
+        * shifted field positions within the Compressed Filter Tuple may
+        * vary, or not even be present if the field isn't selected in
+        * TP_VLAN_PRI_MAP.  Since some of these fields are needed in various
+        * places we store their offsets here, or a -1 if the field isn't
+        * present.
+        */
+       int vlan_shift;
+       int vnic_shift;
+       int port_shift;
+       int protocol_shift;
 };
 
 struct vpd_params {
@@ -926,6 +945,8 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
               const u8 *fw_data, unsigned int fw_size,
               struct fw_hdr *card_fw, enum dev_state state, int *reset);
 int t4_prep_adapter(struct adapter *adapter);
+int t4_init_tp_params(struct adapter *adap);
+int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
 void t4_fatal_err(struct adapter *adapter);
 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
index d6b12e035a7d9f27d84d397dc2921713c7fed46f..fff02ed1295e5ef7bab408cbe19c46dbd5eedd52 100644 (file)
@@ -2986,7 +2986,14 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
        if (stid >= 0) {
                t->stid_tab[stid].data = data;
                stid += t->stid_base;
-               t->stids_in_use++;
+               /* IPv6 requires max of 520 bits or 16 cells in TCAM
+                * This is equivalent to 4 TIDs. With CLIP enabled it
+                * needs 2 TIDs.
+                */
+               if (family == PF_INET)
+                       t->stids_in_use++;
+               else
+                       t->stids_in_use += 4;
        }
        spin_unlock_bh(&t->stid_lock);
        return stid;
@@ -3012,7 +3019,8 @@ int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
        }
        if (stid >= 0) {
                t->stid_tab[stid].data = data;
-               stid += t->stid_base;
+               stid -= t->nstids;
+               stid += t->sftid_base;
                t->stids_in_use++;
        }
        spin_unlock_bh(&t->stid_lock);
@@ -3024,14 +3032,24 @@ EXPORT_SYMBOL(cxgb4_alloc_sftid);
  */
 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
 {
-       stid -= t->stid_base;
+       /* Is it a server filter TID? */
+       if (t->nsftids && (stid >= t->sftid_base)) {
+               stid -= t->sftid_base;
+               stid += t->nstids;
+       } else {
+               stid -= t->stid_base;
+       }
+
        spin_lock_bh(&t->stid_lock);
        if (family == PF_INET)
                __clear_bit(stid, t->stid_bmap);
        else
                bitmap_release_region(t->stid_bmap, stid, 2);
        t->stid_tab[stid].data = NULL;
-       t->stids_in_use--;
+       if (family == PF_INET)
+               t->stids_in_use--;
+       else
+               t->stids_in_use -= 4;
        spin_unlock_bh(&t->stid_lock);
 }
 EXPORT_SYMBOL(cxgb4_free_stid);
@@ -3134,6 +3152,7 @@ static int tid_init(struct tid_info *t)
        size_t size;
        unsigned int stid_bmap_size;
        unsigned int natids = t->natids;
+       struct adapter *adap = container_of(t, struct adapter, tids);
 
        stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
        size = t->ntids * sizeof(*t->tid_tab) +
@@ -3167,6 +3186,11 @@ static int tid_init(struct tid_info *t)
                t->afree = t->atid_tab;
        }
        bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
+       /* Reserve stid 0 for T4/T5 adapters */
+       if (!t->stid_base &&
+           (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
+               __set_bit(0, t->stid_bmap);
+
        return 0;
 }
 
@@ -3731,7 +3755,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
        lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
                        t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
                        (adap->fn * 4));
-       lli.filt_mode = adap->filter_mode;
+       lli.filt_mode = adap->params.tp.vlan_pri_map;
        /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
        for (i = 0; i < NCHAN; i++)
                lli.tx_modq[i] = i;
@@ -4179,7 +4203,7 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
        adap = netdev2adap(dev);
 
        /* Adjust stid to correct filter index */
-       stid -= adap->tids.nstids;
+       stid -= adap->tids.sftid_base;
        stid += adap->tids.nftids;
 
        /* Check to make sure the filter requested is writable ...
@@ -4205,12 +4229,17 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
                        f->fs.val.lip[i] = val[i];
                        f->fs.mask.lip[i] = ~0;
                }
-               if (adap->filter_mode & F_PORT) {
+               if (adap->params.tp.vlan_pri_map & F_PORT) {
                        f->fs.val.iport = port;
                        f->fs.mask.iport = mask;
                }
        }
 
+       if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
+               f->fs.val.proto = IPPROTO_TCP;
+               f->fs.mask.proto = ~0;
+       }
+
        f->fs.dirsteer = 1;
        f->fs.iq = queue;
        /* Mark filter as locked */
@@ -4237,7 +4266,7 @@ int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
        adap = netdev2adap(dev);
 
        /* Adjust stid to correct filter index */
-       stid -= adap->tids.nstids;
+       stid -= adap->tids.sftid_base;
        stid += adap->tids.nftids;
 
        f = &adap->tids.ftid_tab[stid];
@@ -5092,7 +5121,7 @@ static int adap_init0(struct adapter *adap)
        enum dev_state state;
        u32 params[7], val[7];
        struct fw_caps_config_cmd caps_cmd;
-       int reset = 1, j;
+       int reset = 1;
 
        /*
         * Contact FW, advertising Master capability (and potentially forcing
@@ -5434,21 +5463,11 @@ static int adap_init0(struct adapter *adap)
        /*
         * These are finalized by FW initialization, load their values now.
         */
-       v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
-       adap->params.tp.tre = TIMERRESOLUTION_GET(v);
-       adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
        t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
        t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
                     adap->params.b_wnd);
 
-       /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
-       for (j = 0; j < NCHAN; j++)
-               adap->params.tp.tx_modq[j] = j;
-
-       t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
-                        &adap->filter_mode, 1,
-                        TP_VLAN_PRI_MAP);
-
+       t4_init_tp_params(adap);
        adap->flags |= FW_OK;
        return 0;
 
index 6f21f2451c3052a24ecd2e8d8d16ee9bdf9996fe..4dd0a82533e442f8b330c5ede554359ea450a087 100644 (file)
@@ -131,7 +131,14 @@ static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
 
 static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
 {
-       stid -= t->stid_base;
+       /* Is it a server filter TID? */
+       if (t->nsftids && (stid >= t->sftid_base)) {
+               stid -= t->sftid_base;
+               stid += t->nstids;
+       } else {
+               stid -= t->stid_base;
+       }
+
        return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL;
 }
 
index 29878098101eb07771626399a1c5050a9544d055..cb05be905defdceb0e2d0b76386dcd224169b5b0 100644 (file)
@@ -45,6 +45,7 @@
 #include "l2t.h"
 #include "t4_msg.h"
 #include "t4fw_api.h"
+#include "t4_regs.h"
 
 #define VLAN_NONE 0xfff
 
@@ -411,6 +412,40 @@ done:
 }
 EXPORT_SYMBOL(cxgb4_l2t_get);
 
+u64 cxgb4_select_ntuple(struct net_device *dev,
+                       const struct l2t_entry *l2t)
+{
+       struct adapter *adap = netdev2adap(dev);
+       struct tp_params *tp = &adap->params.tp;
+       u64 ntuple = 0;
+
+       /* Initialize each of the fields which we care about which are present
+        * in the Compressed Filter Tuple.
+        */
+       if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE)
+               ntuple |= (F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift;
+
+       if (tp->port_shift >= 0)
+               ntuple |= (u64)l2t->lport << tp->port_shift;
+
+       if (tp->protocol_shift >= 0)
+               ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
+
+       if (tp->vnic_shift >= 0) {
+               u32 viid = cxgb4_port_viid(dev);
+               u32 vf = FW_VIID_VIN_GET(viid);
+               u32 pf = FW_VIID_PFN_GET(viid);
+               u32 vld = FW_VIID_VIVLD_GET(viid);
+
+               ntuple |= (u64)(V_FT_VNID_ID_VF(vf) |
+                               V_FT_VNID_ID_PF(pf) |
+                               V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift;
+       }
+
+       return ntuple;
+}
+EXPORT_SYMBOL(cxgb4_select_ntuple);
+
 /*
  * Called when address resolution fails for an L2T entry to handle packets
  * on the arpq head.  If a packet specifies a failure handler it is invoked,
index 108c0f1fce1c4e5776b687538d741cd6f771a909..85eb5c71358d80ad91e21df648289047165dbf39 100644 (file)
@@ -98,7 +98,8 @@ int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
 struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
                                const struct net_device *physdev,
                                unsigned int priority);
-
+u64 cxgb4_select_ntuple(struct net_device *dev,
+                       const struct l2t_entry *l2t);
 void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
 struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
 int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
index cc380c36e1a8687cd1c7f3b4c3dbf2d0b8c1bf47..cc3511a5cd0c0f9ea54e0d186878821f3dad8096 100644 (file)
@@ -2581,7 +2581,7 @@ static int t4_sge_init_soft(struct adapter *adap)
        #undef READ_FL_BUF
 
        if (fl_small_pg != PAGE_SIZE ||
-           (fl_large_pg != 0 && (fl_large_pg <= fl_small_pg ||
+           (fl_large_pg != 0 && (fl_large_pg < fl_small_pg ||
                                  (fl_large_pg & (fl_large_pg-1)) != 0))) {
                dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
                        fl_small_pg, fl_large_pg);
index 74a6fce5a15a6914faf74bbf903dcc76ec717775..e1413eacdbd20ab2233fd3217222bb76e6b779d1 100644 (file)
@@ -3808,6 +3808,109 @@ int t4_prep_adapter(struct adapter *adapter)
        return 0;
 }
 
+/**
+ *      t4_init_tp_params - initialize adap->params.tp
+ *      @adap: the adapter
+ *
+ *      Initialize various fields of the adapter's TP Parameters structure.
+ */
+int t4_init_tp_params(struct adapter *adap)
+{
+       int chan;
+       u32 v;
+
+       v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
+       adap->params.tp.tre = TIMERRESOLUTION_GET(v);
+       adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
+
+       /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
+       for (chan = 0; chan < NCHAN; chan++)
+               adap->params.tp.tx_modq[chan] = chan;
+
+       /* Cache the adapter's Compressed Filter Mode and global Incress
+        * Configuration.
+        */
+       t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+                        &adap->params.tp.vlan_pri_map, 1,
+                        TP_VLAN_PRI_MAP);
+       t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+                        &adap->params.tp.ingress_config, 1,
+                        TP_INGRESS_CONFIG);
+
+       /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
+        * shift positions of several elements of the Compressed Filter Tuple
+        * for this adapter which we need frequently ...
+        */
+       adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
+       adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
+       adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
+       adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
+                                                              F_PROTOCOL);
+
+       /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
+        * represents the presense of an Outer VLAN instead of a VNIC ID.
+        */
+       if ((adap->params.tp.ingress_config & F_VNIC) == 0)
+               adap->params.tp.vnic_shift = -1;
+
+       return 0;
+}
+
+/**
+ *      t4_filter_field_shift - calculate filter field shift
+ *      @adap: the adapter
+ *      @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
+ *
+ *      Return the shift position of a filter field within the Compressed
+ *      Filter Tuple.  The filter field is specified via its selection bit
+ *      within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
+ */
+int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
+{
+       unsigned int filter_mode = adap->params.tp.vlan_pri_map;
+       unsigned int sel;
+       int field_shift;
+
+       if ((filter_mode & filter_sel) == 0)
+               return -1;
+
+       for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
+               switch (filter_mode & sel) {
+               case F_FCOE:
+                       field_shift += W_FT_FCOE;
+                       break;
+               case F_PORT:
+                       field_shift += W_FT_PORT;
+                       break;
+               case F_VNIC_ID:
+                       field_shift += W_FT_VNIC_ID;
+                       break;
+               case F_VLAN:
+                       field_shift += W_FT_VLAN;
+                       break;
+               case F_TOS:
+                       field_shift += W_FT_TOS;
+                       break;
+               case F_PROTOCOL:
+                       field_shift += W_FT_PROTOCOL;
+                       break;
+               case F_ETHERTYPE:
+                       field_shift += W_FT_ETHERTYPE;
+                       break;
+               case F_MACMATCH:
+                       field_shift += W_FT_MACMATCH;
+                       break;
+               case F_MPSHITTYPE:
+                       field_shift += W_FT_MPSHITTYPE;
+                       break;
+               case F_FRAGMENTATION:
+                       field_shift += W_FT_FRAGMENTATION;
+                       break;
+               }
+       }
+       return field_shift;
+}
+
 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
 {
        u8 addr[6];
index 0a8205d69d2c290aae4dbec30245c9dc3e18bd39..4082522d81408bf0a23cff8eaf9700bef14533a9 100644 (file)
 
 #define A_TP_TX_SCHED_PCMD 0x25
 
+#define S_VNIC    11
+#define V_VNIC(x) ((x) << S_VNIC)
+#define F_VNIC    V_VNIC(1U)
+
+#define S_FRAGMENTATION    9
+#define V_FRAGMENTATION(x) ((x) << S_FRAGMENTATION)
+#define F_FRAGMENTATION    V_FRAGMENTATION(1U)
+
+#define S_MPSHITTYPE    8
+#define V_MPSHITTYPE(x) ((x) << S_MPSHITTYPE)
+#define F_MPSHITTYPE    V_MPSHITTYPE(1U)
+
+#define S_MACMATCH    7
+#define V_MACMATCH(x) ((x) << S_MACMATCH)
+#define F_MACMATCH    V_MACMATCH(1U)
+
+#define S_ETHERTYPE    6
+#define V_ETHERTYPE(x) ((x) << S_ETHERTYPE)
+#define F_ETHERTYPE    V_ETHERTYPE(1U)
+
+#define S_PROTOCOL    5
+#define V_PROTOCOL(x) ((x) << S_PROTOCOL)
+#define F_PROTOCOL    V_PROTOCOL(1U)
+
+#define S_TOS    4
+#define V_TOS(x) ((x) << S_TOS)
+#define F_TOS    V_TOS(1U)
+
+#define S_VLAN    3
+#define V_VLAN(x) ((x) << S_VLAN)
+#define F_VLAN    V_VLAN(1U)
+
+#define S_VNIC_ID    2
+#define V_VNIC_ID(x) ((x) << S_VNIC_ID)
+#define F_VNIC_ID    V_VNIC_ID(1U)
+
 #define S_PORT    1
 #define V_PORT(x) ((x) << S_PORT)
 #define F_PORT    V_PORT(1U)
 
+#define S_FCOE    0
+#define V_FCOE(x) ((x) << S_FCOE)
+#define F_FCOE    V_FCOE(1U)
+
 #define NUM_MPS_CLS_SRAM_L_INSTANCES 336
 #define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512
 
 #define V_CHIPID(x) ((x) << S_CHIPID)
 #define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID)
 
+/* TP_VLAN_PRI_MAP controls which subset of fields will be present in the
+ * Compressed Filter Tuple for LE filters.  Each bit set in TP_VLAN_PRI_MAP
+ * selects for a particular field being present.  These fields, when present
+ * in the Compressed Filter Tuple, have the following widths in bits.
+ */
+#define W_FT_FCOE                       1
+#define W_FT_PORT                       3
+#define W_FT_VNIC_ID                    17
+#define W_FT_VLAN                       17
+#define W_FT_TOS                        8
+#define W_FT_PROTOCOL                   8
+#define W_FT_ETHERTYPE                  16
+#define W_FT_MACMATCH                   9
+#define W_FT_MPSHITTYPE                 3
+#define W_FT_FRAGMENTATION              1
+
+/* Some of the Compressed Filter Tuple fields have internal structure.  These
+ * bit shifts/masks describe those structures.  All shifts are relative to the
+ * base position of the fields within the Compressed Filter Tuple
+ */
+#define S_FT_VLAN_VLD                   16
+#define V_FT_VLAN_VLD(x)                ((x) << S_FT_VLAN_VLD)
+#define F_FT_VLAN_VLD                   V_FT_VLAN_VLD(1U)
+
+#define S_FT_VNID_ID_VF                 0
+#define V_FT_VNID_ID_VF(x)              ((x) << S_FT_VNID_ID_VF)
+
+#define S_FT_VNID_ID_PF                 7
+#define V_FT_VNID_ID_PF(x)              ((x) << S_FT_VNID_ID_PF)
+
+#define S_FT_VNID_ID_VLD                16
+#define V_FT_VNID_ID_VLD(x)             ((x) << S_FT_VNID_ID_VLD)
+
 #endif /* __T4_REGS_H */
index 5878df619b531ad0b3ea96167f53e8909ff9fd21..4ccaf9af6fc90cde0d9d13490fa5753e480c543e 100644 (file)
@@ -104,6 +104,7 @@ static inline char *nic_name(struct pci_dev *pdev)
 #define BE3_MAX_RSS_QS         16
 #define BE3_MAX_TX_QS          16
 #define BE3_MAX_EVT_QS         16
+#define BE3_SRIOV_MAX_EVT_QS   8
 
 #define MAX_RX_QS              32
 #define MAX_EVT_QS             32
@@ -480,7 +481,7 @@ struct be_adapter {
        struct list_head entry;
 
        u32 flash_status;
-       struct completion flash_compl;
+       struct completion et_cmd_compl;
 
        struct be_resources res;        /* resources available for the func */
        u16 num_vfs;                    /* Number of VFs provisioned by PF */
index e0e8bc1ef14c47e93336df2c784d1ff14e6b4143..94c35c8d799d9f1e4f4b9fd78121600ad600505e 100644 (file)
@@ -141,11 +141,17 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
                subsystem = resp_hdr->subsystem;
        }
 
+       if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
+           subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
+               complete(&adapter->et_cmd_compl);
+               return 0;
+       }
+
        if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
             (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
            (subsystem == CMD_SUBSYSTEM_COMMON)) {
                adapter->flash_status = compl_status;
-               complete(&adapter->flash_compl);
+               complete(&adapter->et_cmd_compl);
        }
 
        if (compl_status == MCC_STATUS_SUCCESS) {
@@ -2017,6 +2023,9 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
                        0x3ea83c02, 0x4a110304};
        int status;
 
+       if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
+               return 0;
+
        if (mutex_lock_interruptible(&adapter->mbox_lock))
                return -1;
 
@@ -2160,7 +2169,7 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
        be_mcc_notify(adapter);
        spin_unlock_bh(&adapter->mcc_lock);
 
-       if (!wait_for_completion_timeout(&adapter->flash_compl,
+       if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
                                         msecs_to_jiffies(60000)))
                status = -1;
        else
@@ -2255,8 +2264,8 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
        be_mcc_notify(adapter);
        spin_unlock_bh(&adapter->mcc_lock);
 
-       if (!wait_for_completion_timeout(&adapter->flash_compl,
-                       msecs_to_jiffies(40000)))
+       if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
+                                        msecs_to_jiffies(40000)))
                status = -1;
        else
                status = adapter->flash_status;
@@ -2367,6 +2376,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_loopback_test *req;
+       struct be_cmd_resp_loopback_test *resp;
        int status;
 
        spin_lock_bh(&adapter->mcc_lock);
@@ -2381,8 +2391,8 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
                        OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
-       req->hdr.timeout = cpu_to_le32(4);
 
+       req->hdr.timeout = cpu_to_le32(15);
        req->pattern = cpu_to_le64(pattern);
        req->src_port = cpu_to_le32(port_num);
        req->dest_port = cpu_to_le32(port_num);
@@ -2390,12 +2400,15 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
        req->num_pkts = cpu_to_le32(num_pkts);
        req->loopback_type = cpu_to_le32(loopback_type);
 
-       status = be_mcc_notify_wait(adapter);
-       if (!status) {
-               struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
-               status = le32_to_cpu(resp->status);
-       }
+       be_mcc_notify(adapter);
+
+       spin_unlock_bh(&adapter->mcc_lock);
 
+       wait_for_completion(&adapter->et_cmd_compl);
+       resp = embedded_payload(wrb);
+       status = le32_to_cpu(resp->status);
+
+       return status;
 err:
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
index 0fde69d5cb6afd610db5f2a57325e5d2e9f5ed94..bf40fdaecfa3e89b96fb90336cb26f909642de36 100644 (file)
@@ -2744,13 +2744,16 @@ static int be_rx_qs_create(struct be_adapter *adapter)
                if (!BEx_chip(adapter))
                        adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
                                                RSS_ENABLE_UDP_IPV6;
+       } else {
+               /* Disable RSS, if only default RX Q is created */
+               adapter->rss_flags = RSS_ENABLE_NONE;
+       }
 
-               rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
-                                      128);
-               if (rc) {
-                       adapter->rss_flags = 0;
-                       return rc;
-               }
+       rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
+                              128);
+       if (rc) {
+               adapter->rss_flags = RSS_ENABLE_NONE;
+               return rc;
        }
 
        /* First time posting */
@@ -3124,11 +3127,11 @@ static void BEx_get_resources(struct be_adapter *adapter,
 {
        struct pci_dev *pdev = adapter->pdev;
        bool use_sriov = false;
+       int max_vfs;
 
-       if (BE3_chip(adapter) && sriov_want(adapter)) {
-               int max_vfs;
+       max_vfs = pci_sriov_get_totalvfs(pdev);
 
-               max_vfs = pci_sriov_get_totalvfs(pdev);
+       if (BE3_chip(adapter) && sriov_want(adapter)) {
                res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
                use_sriov = res->max_vfs;
        }
@@ -3159,7 +3162,11 @@ static void BEx_get_resources(struct be_adapter *adapter,
                                           BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
        res->max_rx_qs = res->max_rss_qs + 1;
 
-       res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1;
+       if (be_physfn(adapter))
+               res->max_evt_qs = (max_vfs > 0) ?
+                                       BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
+       else
+               res->max_evt_qs = 1;
 
        res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
        if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
@@ -4205,7 +4212,7 @@ static int be_ctrl_init(struct be_adapter *adapter)
        spin_lock_init(&adapter->mcc_lock);
        spin_lock_init(&adapter->mcc_cq_lock);
 
-       init_completion(&adapter->flash_compl);
+       init_completion(&adapter->et_cmd_compl);
        pci_save_state(adapter->pdev);
        return 0;
 
index e7c8b749c5a53f969096e3f442195d88f82597d7..50bb71c663e20a010c9a4390c3d1c38726037c85 100644 (file)
@@ -428,6 +428,8 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        /* If this was the last BD in the ring, start at the beginning again. */
        bdp = fec_enet_get_nextdesc(bdp, fep);
 
+       skb_tx_timestamp(skb);
+
        fep->cur_tx = bdp;
 
        if (fep->cur_tx == fep->dirty_tx)
@@ -436,8 +438,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        /* Trigger transmission start */
        writel(0, fep->hwp + FEC_X_DES_ACTIVE);
 
-       skb_tx_timestamp(skb);
-
        return NETDEV_TX_OK;
 }
 
index 895450e9bb3cfe81d22f19148b4a52d9fc14352a..ff2d806eaef71bc2a7a861a8971c228cd6856e2a 100644 (file)
@@ -718,8 +718,11 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
        e1000_release_phy_80003es2lan(hw);
 
        /* Disable IBIST slave mode (far-end loopback) */
-       e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
-                                       &kum_reg_data);
+       ret_val =
+           e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+                                           &kum_reg_data);
+       if (ret_val)
+               return ret_val;
        kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
        e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
                                         kum_reg_data);
index 8d3945ab7334840684db42ea6eefa9bafc52c061..c30d41d6e4260d4830c82ccb1dd6b0bcec3bfa1d 100644 (file)
@@ -6174,7 +6174,7 @@ static int __e1000_resume(struct pci_dev *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
 static int e1000_suspend(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
@@ -6193,7 +6193,7 @@ static int e1000_resume(struct device *dev)
 
        return __e1000_resume(pdev);
 }
-#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM */
 
 #ifdef CONFIG_PM_RUNTIME
 static int e1000_runtime_suspend(struct device *dev)
index da2be59505c06226b4cd79f9e8cc120945ca1d17..20e71f4ca4261f99694b04eccd3647906326239d 100644 (file)
@@ -1757,19 +1757,23 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
                 * it across the board.
                 */
                ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
-               if (ret_val)
+               if (ret_val) {
                        /* If the first read fails, another entity may have
                         * ownership of the resources, wait and try again to
                         * see if they have relinquished the resources yet.
                         */
-                       udelay(usec_interval);
+                       if (usec_interval >= 1000)
+                               msleep(usec_interval / 1000);
+                       else
+                               udelay(usec_interval);
+               }
                ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
                if (ret_val)
                        break;
                if (phy_status & BMSR_LSTATUS)
                        break;
                if (usec_interval >= 1000)
-                       mdelay(usec_interval / 1000);
+                       msleep(usec_interval / 1000);
                else
                        udelay(usec_interval);
        }
index cc06854296a379a6f5d1fac21ecf9412d94d477b..5bcc870f8367f803c309f5c28115cded7284748c 100644 (file)
@@ -6827,12 +6827,20 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
        return __ixgbe_maybe_stop_tx(tx_ring, size);
 }
 
-#ifdef IXGBE_FCOE
-static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
+static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
+                             void *accel_priv)
 {
+       struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
+#ifdef IXGBE_FCOE
        struct ixgbe_adapter *adapter;
        struct ixgbe_ring_feature *f;
        int txq;
+#endif
+
+       if (fwd_adapter)
+               return skb->queue_mapping + fwd_adapter->tx_base_queue;
+
+#ifdef IXGBE_FCOE
 
        /*
         * only execute the code below if protocol is FCoE
@@ -6858,9 +6866,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
                txq -= f->indices;
 
        return txq + f->offset;
+#else
+       return __netdev_pick_tx(dev, skb);
+#endif
 }
 
-#endif
 netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
                          struct ixgbe_adapter *adapter,
                          struct ixgbe_ring *tx_ring)
@@ -7629,27 +7639,11 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
        kfree(fwd_adapter);
 }
 
-static netdev_tx_t ixgbe_fwd_xmit(struct sk_buff *skb,
-                                 struct net_device *dev,
-                                 void *priv)
-{
-       struct ixgbe_fwd_adapter *fwd_adapter = priv;
-       unsigned int queue;
-       struct ixgbe_ring *tx_ring;
-
-       queue = skb->queue_mapping + fwd_adapter->tx_base_queue;
-       tx_ring = fwd_adapter->real_adapter->tx_ring[queue];
-
-       return __ixgbe_xmit_frame(skb, dev, tx_ring);
-}
-
 static const struct net_device_ops ixgbe_netdev_ops = {
        .ndo_open               = ixgbe_open,
        .ndo_stop               = ixgbe_close,
        .ndo_start_xmit         = ixgbe_xmit_frame,
-#ifdef IXGBE_FCOE
        .ndo_select_queue       = ixgbe_select_queue,
-#endif
        .ndo_set_rx_mode        = ixgbe_set_rx_mode,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_mac_address    = ixgbe_set_mac,
@@ -7689,7 +7683,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
        .ndo_bridge_getlink     = ixgbe_ndo_bridge_getlink,
        .ndo_dfwd_add_station   = ixgbe_fwd_add,
        .ndo_dfwd_del_station   = ixgbe_fwd_del,
-       .ndo_dfwd_start_xmit    = ixgbe_fwd_xmit,
 };
 
 /**
index d6f0c0d8cf11ddb395617e2749bbf25329cae42c..72084f70adbba84c5b3e4df8f890155c2643a13b 100644 (file)
@@ -291,7 +291,9 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
 {
        struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
        int err;
+#ifdef CONFIG_PCI_IOV
        u32 current_flags = adapter->flags;
+#endif
 
        err = ixgbe_disable_sriov(adapter);
 
index 6a6c1f76d8e04406b1c6de12820a48a7d27337fd..ec94a20d709952d1f678f69ac7778205f75e8ffe 100644 (file)
@@ -619,7 +619,8 @@ ltq_etop_set_multicast_list(struct net_device *dev)
 }
 
 static u16
-ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb)
+ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb,
+                     void *accel_priv)
 {
        /* we are currently only using the first queue */
        return 0;
index 7354960b583bc83b45a8aadd001c03c456912f81..c4eeb69a5beee6f4e4214746b9c958c614e64beb 100644 (file)
@@ -92,6 +92,12 @@ static int orion_mdio_wait_ready(struct mii_bus *bus)
                        if (time_is_before_jiffies(end))
                                ++timedout;
                } else {
+                       /* wait_event_timeout does not guarantee a delay of at
+                        * least one whole jiffie, so timeout must be no less
+                        * than two.
+                        */
+                       if (timeout < 2)
+                               timeout = 2;
                        wait_event_timeout(dev->smi_busy_wait,
                                           orion_mdio_smi_is_done(dev),
                                           timeout);
index f54ebd5a1702457524bf5fc6918fbdf191a5d73f..a7fcd593b2dbb397bdf3d2a810ff42f71d0806b6 100644 (file)
@@ -592,7 +592,8 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
        }
 }
 
-u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
+u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
+                        void *accel_priv)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        u16 rings_p_up = priv->num_tx_rings_p_up;
index f3758de59c05f5adeb195661814ff4041b405c4a..d5758adceaa2f264b74d83e99f8091b7e1ead948 100644 (file)
@@ -714,7 +714,8 @@ int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
 int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
 
 void mlx4_en_tx_irq(struct mlx4_cq *mcq);
-u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
+u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
+                        void *accel_priv);
 netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
 
 int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
index 7692dfd4f262db9045f285aadc194c1713fb724a..cc68657f05368d5642b62091bc6dbec1bc00bd1c 100644 (file)
@@ -1604,13 +1604,13 @@ netxen_process_lro(struct netxen_adapter *adapter,
        u32 seq_number;
        u8 vhdr_len = 0;
 
-       if (unlikely(ring > adapter->max_rds_rings))
+       if (unlikely(ring >= adapter->max_rds_rings))
                return NULL;
 
        rds_ring = &recv_ctx->rds_rings[ring];
 
        index = netxen_get_lro_sts_refhandle(sts_data0);
-       if (unlikely(index > rds_ring->num_desc))
+       if (unlikely(index >= rds_ring->num_desc))
                return NULL;
 
        buffer = &rds_ring->rx_buf_arr[index];
index 631ea0ac1cd89c47bddb863c56f04c28469bd745..f2a7c7166e2408a747dd03ef0fc456ba6121486c 100644 (file)
@@ -487,6 +487,7 @@ struct qlcnic_hardware_context {
        struct qlcnic_mailbox *mailbox;
        u8 extend_lb_time;
        u8 phys_port_id[ETH_ALEN];
+       u8 lb_mode;
 };
 
 struct qlcnic_adapter_stats {
@@ -578,6 +579,8 @@ struct qlcnic_host_tx_ring {
        dma_addr_t phys_addr;
        dma_addr_t hw_cons_phys_addr;
        struct netdev_queue *txq;
+       /* Lock to protect Tx descriptors cleanup */
+       spinlock_t tx_clean_lock;
 } ____cacheline_internodealigned_in_smp;
 
 /*
@@ -808,6 +811,7 @@ struct qlcnic_mac_list_s {
 
 #define QLCNIC_ILB_MODE                0x1
 #define QLCNIC_ELB_MODE                0x2
+#define QLCNIC_LB_MODE_MASK    0x3
 
 #define QLCNIC_LINKEVENT       0x1
 #define QLCNIC_LB_RESPONSE     0x2
@@ -1093,7 +1097,6 @@ struct qlcnic_adapter {
        struct qlcnic_filter_hash rx_fhash;
        struct list_head vf_mc_list;
 
-       spinlock_t tx_clean_lock;
        spinlock_t mac_learn_lock;
        /* spinlock for catching rcv filters for eswitch traffic */
        spinlock_t rx_mac_learn_lock;
@@ -1708,6 +1711,7 @@ int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *);
 void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *);
 void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx);
 void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx);
+void qlcnic_update_stats(struct qlcnic_adapter *);
 
 /* Adapter hardware abstraction */
 struct qlcnic_hardware_ops {
index 6055d397a29edf70317299e83966aba18b6ff5dc..f776f99f79155ea42d3371b7e9aadcc901421898 100644 (file)
@@ -1684,12 +1684,6 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
                }
        } while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
 
-       /* Make sure carrier is off and queue is stopped during loopback */
-       if (netif_running(netdev)) {
-               netif_carrier_off(netdev);
-               netif_tx_stop_all_queues(netdev);
-       }
-
        ret = qlcnic_do_lb_test(adapter, mode);
 
        qlcnic_83xx_clear_lb_mode(adapter, mode);
@@ -2121,6 +2115,7 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
        ahw->link_autoneg = MSB(MSW(data[3]));
        ahw->module_type = MSB(LSW(data[3]));
        ahw->has_link_events = 1;
+       ahw->lb_mode = data[4] & QLCNIC_LB_MODE_MASK;
        qlcnic_advert_link_change(adapter, link_status);
 }
 
index e3be2760665cd9e0781dc967a813fd1976778c3e..6b08194aa0d4900f8e29a4c5eca433f1d1709d9c 100644 (file)
@@ -167,27 +167,35 @@ static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
 
 #define QLCNIC_TEST_LEN        ARRAY_SIZE(qlcnic_gstrings_test)
 
-static inline int qlcnic_82xx_statistics(void)
+static inline int qlcnic_82xx_statistics(struct qlcnic_adapter *adapter)
 {
-       return ARRAY_SIZE(qlcnic_device_gstrings_stats) +
-              ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
+       return ARRAY_SIZE(qlcnic_gstrings_stats) +
+              ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) +
+              QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings;
 }
 
-static inline int qlcnic_83xx_statistics(void)
+static inline int qlcnic_83xx_statistics(struct qlcnic_adapter *adapter)
 {
-       return ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) +
+       return ARRAY_SIZE(qlcnic_gstrings_stats) +
+              ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) +
               ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) +
-              ARRAY_SIZE(qlcnic_83xx_rx_stats_strings);
+              ARRAY_SIZE(qlcnic_83xx_rx_stats_strings) +
+              QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings;
 }
 
 static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter)
 {
-       if (qlcnic_82xx_check(adapter))
-               return qlcnic_82xx_statistics();
-       else if (qlcnic_83xx_check(adapter))
-               return qlcnic_83xx_statistics();
-       else
-               return -1;
+       int len = -1;
+
+       if (qlcnic_82xx_check(adapter)) {
+               len = qlcnic_82xx_statistics(adapter);
+               if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+                       len += ARRAY_SIZE(qlcnic_device_gstrings_stats);
+       } else if (qlcnic_83xx_check(adapter)) {
+               len = qlcnic_83xx_statistics(adapter);
+       }
+
+       return len;
 }
 
 #define        QLCNIC_TX_INTR_NOT_CONFIGURED   0X78563412
@@ -920,18 +928,13 @@ static int qlcnic_eeprom_test(struct net_device *dev)
 
 static int qlcnic_get_sset_count(struct net_device *dev, int sset)
 {
-       int len;
 
        struct qlcnic_adapter *adapter = netdev_priv(dev);
        switch (sset) {
        case ETH_SS_TEST:
                return QLCNIC_TEST_LEN;
        case ETH_SS_STATS:
-               len = qlcnic_dev_statistics_len(adapter) + QLCNIC_STATS_LEN;
-               if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
-                   qlcnic_83xx_check(adapter))
-                       return len;
-               return qlcnic_82xx_statistics();
+               return qlcnic_dev_statistics_len(adapter);
        default:
                return -EOPNOTSUPP;
        }
@@ -1267,7 +1270,7 @@ static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type)
        return data;
 }
 
-static void qlcnic_update_stats(struct qlcnic_adapter *adapter)
+void qlcnic_update_stats(struct qlcnic_adapter *adapter)
 {
        struct qlcnic_host_tx_ring *tx_ring;
        int ring;
index e9c21e5d0ca95c7cc08df0c5f29cca2f7b473ba4..c4262c23ed7c77b009f6c5cebf233a54ebfd685b 100644 (file)
@@ -134,6 +134,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
        struct qlcnic_skb_frag *buffrag;
        int i, j;
 
+       spin_lock(&tx_ring->tx_clean_lock);
+
        cmd_buf = tx_ring->cmd_buf_arr;
        for (i = 0; i < tx_ring->num_desc; i++) {
                buffrag = cmd_buf->frag_array;
@@ -157,6 +159,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
                }
                cmd_buf++;
        }
+
+       spin_unlock(&tx_ring->tx_clean_lock);
 }
 
 void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
index eda6c691d8970418ae14eb67db450a87314aa882..ad1531ae3aa8f8e3cda7f56c36bb12ec46694464 100644 (file)
@@ -689,6 +689,10 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
                adapter->ahw->linkup = 0;
                netif_carrier_off(netdev);
        } else if (!adapter->ahw->linkup && linkup) {
+               /* Do not advertise Link up if the port is in loopback mode */
+               if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode)
+                       return;
+
                netdev_info(netdev, "NIC Link is up\n");
                adapter->ahw->linkup = 1;
                netif_carrier_on(netdev);
@@ -778,7 +782,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
        struct net_device *netdev = adapter->netdev;
        struct qlcnic_skb_frag *frag;
 
-       if (!spin_trylock(&adapter->tx_clean_lock))
+       if (!spin_trylock(&tx_ring->tx_clean_lock))
                return 1;
 
        sw_consumer = tx_ring->sw_consumer;
@@ -807,8 +811,9 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
                        break;
        }
 
+       tx_ring->sw_consumer = sw_consumer;
+
        if (count && netif_running(netdev)) {
-               tx_ring->sw_consumer = sw_consumer;
                smp_mb();
                if (netif_tx_queue_stopped(tx_ring->txq) &&
                    netif_carrier_ok(netdev)) {
@@ -834,7 +839,8 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
         */
        hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
        done = (sw_consumer == hw_consumer);
-       spin_unlock(&adapter->tx_clean_lock);
+
+       spin_unlock(&tx_ring->tx_clean_lock);
 
        return done;
 }
index 2c8cac0c6a55a7e9a32541c4f88e7d0ddcd5a977..550791b8fbae98404f4be790520d0c4985871802 100644 (file)
@@ -1756,7 +1756,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
        if (qlcnic_sriov_vf_check(adapter))
                qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
        smp_mb();
-       spin_lock(&adapter->tx_clean_lock);
        netif_carrier_off(netdev);
        adapter->ahw->linkup = 0;
        netif_tx_disable(netdev);
@@ -1777,7 +1776,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
 
        for (ring = 0; ring < adapter->drv_tx_rings; ring++)
                qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]);
-       spin_unlock(&adapter->tx_clean_lock);
 }
 
 /* Usage: During suspend and firmware recovery module */
@@ -2172,6 +2170,7 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
                }
                memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
                tx_ring->cmd_buf_arr = cmd_buf_arr;
+               spin_lock_init(&tx_ring->tx_clean_lock);
        }
 
        if (qlcnic_83xx_check(adapter) ||
@@ -2299,7 +2298,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        rwlock_init(&adapter->ahw->crb_lock);
        mutex_init(&adapter->ahw->mem_lock);
 
-       spin_lock_init(&adapter->tx_clean_lock);
        INIT_LIST_HEAD(&adapter->mac_list);
 
        qlcnic_register_dcb(adapter);
@@ -2782,6 +2780,9 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        struct net_device_stats *stats = &netdev->stats;
 
+       if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+               qlcnic_update_stats(adapter);
+
        stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
        stats->tx_packets = adapter->stats.xmitfinished;
        stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
index 686f460b15022b4b2b7759ad29493a64a1f84592..024f8161d2fe1cccd0e97cc2a1eccd67371b8ab4 100644 (file)
@@ -75,7 +75,6 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
        num_vfs = sriov->num_vfs;
        max = num_vfs + 1;
        info->bit_offsets = 0xffff;
-       info->max_tx_ques = res->num_tx_queues / max;
        info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
        num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC;
 
@@ -86,6 +85,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
                info->max_tx_mac_filters = temp;
                info->min_tx_bw = 0;
                info->max_tx_bw = MAX_BW;
+               info->max_tx_ques = res->num_tx_queues - sriov->num_vfs;
        } else {
                id = qlcnic_sriov_func_to_index(adapter, func);
                if (id < 0)
@@ -95,6 +95,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
                info->max_tx_bw = vp->max_tx_bw;
                info->max_rx_ucast_mac_filters = num_vf_macs;
                info->max_tx_mac_filters = num_vf_macs;
+               info->max_tx_ques = QLCNIC_SINGLE_RING;
        }
 
        info->max_rx_ip_addr = res->num_destip / max;
index 8a7a23a84ac5c3b6a7b7d4979e8c5866097895a5..797b56a0efc4a4e7ab1ff5c9af1f0e50f29e9d17 100644 (file)
@@ -622,17 +622,15 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
        if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
                return -EOPNOTSUPP;
 
-       if (netif_msg_hw(priv)) {
-               if (priv->dma_cap.time_stamp) {
-                       pr_debug("IEEE 1588-2002 Time Stamp supported\n");
-                       priv->adv_ts = 0;
-               }
-               if (priv->dma_cap.atime_stamp && priv->extend_desc) {
-                       pr_debug
-                           ("IEEE 1588-2008 Advanced Time Stamp supported\n");
-                       priv->adv_ts = 1;
-               }
-       }
+       priv->adv_ts = 0;
+       if (priv->dma_cap.atime_stamp && priv->extend_desc)
+               priv->adv_ts = 1;
+
+       if (netif_msg_hw(priv) && priv->dma_cap.time_stamp)
+               pr_debug("IEEE 1588-2002 Time Stamp supported\n");
+
+       if (netif_msg_hw(priv) && priv->adv_ts)
+               pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n");
 
        priv->hw->ptp = &stmmac_ptp;
        priv->hwts_tx_en = 0;
index b8b0eeed0f92bb68f34e1dac22e1f072faec77ce..7680581ebe12fe58a60de42b419467e3f2f065f7 100644 (file)
@@ -56,7 +56,7 @@ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb)
 
        priv->hw->ptp->config_addend(priv->ioaddr, addend);
 
-       spin_unlock_irqrestore(&priv->lock, flags);
+       spin_unlock_irqrestore(&priv->ptp_lock, flags);
 
        return 0;
 }
@@ -91,7 +91,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
 
        priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj);
 
-       spin_unlock_irqrestore(&priv->lock, flags);
+       spin_unlock_irqrestore(&priv->ptp_lock, flags);
 
        return 0;
 }
index 5120d9ce1dd4cdbdd8608550f87392d0a5f6bbb9..5330fd298705e06181b7d1a2a1557e0cea27e85c 100644 (file)
@@ -740,6 +740,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
                /* set speed_in input in case RMII mode is used in 100Mbps */
                if (phy->speed == 100)
                        mac_control |= BIT(15);
+               else if (phy->speed == 10)
+                       mac_control |= BIT(18); /* In Band mode */
 
                *link = true;
        } else {
@@ -2106,7 +2108,7 @@ static int cpsw_probe(struct platform_device *pdev)
        while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
                for (i = res->start; i <= res->end; i++) {
                        if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0,
-                                            dev_name(priv->dev), priv)) {
+                                            dev_name(&pdev->dev), priv)) {
                                dev_err(priv->dev, "error attaching irq\n");
                                goto clean_ale_ret;
                        }
index 628b736e5ae776fcf00333bed8c355e4b518314e..0e9fb3301b1136e333cdd3d9bc62bcacc89dd4bb 100644 (file)
@@ -2080,7 +2080,8 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
 }
 
 /* Return subqueue id on this core (one per core). */
-static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb)
+static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb,
+                                void *accel_priv)
 {
        return smp_processor_id();
 }
index 3169252613faae400904201fa5ad9f1a7decf1cf..5d78c1d08abd60fcc6bcee5dea5ab178a2a166da 100644 (file)
@@ -571,6 +571,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        case HDLCDRVCTL_CALIBRATE:
                if(!capable(CAP_SYS_RAWIO))
                        return -EPERM;
+               if (bi.data.calibrate > INT_MAX / s->par.bitrate)
+                       return -EINVAL;
                s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
                return 0;
 
index 1971411574db1c7ae2dd6549490ab6975e816cd1..61dd2447e1bb4eedb5d8e6b72abcf21e9f273afb 100644 (file)
@@ -1057,6 +1057,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                break;
 
        case SIOCYAMGCFG:
+               memset(&yi, 0, sizeof(yi));
                yi.cfg.mask = 0xffffffff;
                yi.cfg.iobase = yp->iobase;
                yi.cfg.irq = yp->irq;
index f8135725bcf678c231cb833c3ab50487b1ec67e6..71baeb3ed905cfa9ed930ab9f7ac63c3edd206b7 100644 (file)
@@ -261,9 +261,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
        struct sk_buff *skb;
 
        net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
-       if (!net) {
-               netdev_err(net, "got receive callback but net device"
-                       " not initialized yet\n");
+       if (!net || net->reg_state != NETREG_REGISTERED) {
                packet->status = NVSP_STAT_FAIL;
                return 0;
        }
@@ -435,19 +433,11 @@ static int netvsc_probe(struct hv_device *dev,
        SET_ETHTOOL_OPS(net, &ethtool_ops);
        SET_NETDEV_DEV(net, &dev->device);
 
-       ret = register_netdev(net);
-       if (ret != 0) {
-               pr_err("Unable to register netdev.\n");
-               free_netdev(net);
-               goto out;
-       }
-
        /* Notify the netvsc driver of the new device */
        device_info.ring_size = ring_size;
        ret = rndis_filter_device_add(dev, &device_info);
        if (ret != 0) {
                netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
-               unregister_netdev(net);
                free_netdev(net);
                hv_set_drvdata(dev, NULL);
                return ret;
@@ -456,7 +446,13 @@ static int netvsc_probe(struct hv_device *dev,
 
        netif_carrier_on(net);
 
-out:
+       ret = register_netdev(net);
+       if (ret != 0) {
+               pr_err("Unable to register netdev.\n");
+               rndis_filter_device_remove(dev);
+               free_netdev(net);
+       }
+
        return ret;
 }
 
index acf93798dc675929394e82ffed96cd1616c3a31d..bc8faaec33f5afb0bbf7efdd88cd082122c3cc7f 100644 (file)
@@ -299,7 +299,7 @@ netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
 
        if (vlan->fwd_priv) {
                skb->dev = vlan->lowerdev;
-               ret = dev_hard_start_xmit(skb, skb->dev, NULL, vlan->fwd_priv);
+               ret = dev_queue_xmit_accel(skb, vlan->fwd_priv);
        } else {
                ret = macvlan_queue_xmit(skb, dev);
        }
@@ -338,6 +338,8 @@ static const struct header_ops macvlan_hard_header_ops = {
        .cache_update   = eth_header_cache_update,
 };
 
+static struct rtnl_link_ops macvlan_link_ops;
+
 static int macvlan_open(struct net_device *dev)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
@@ -353,7 +355,8 @@ static int macvlan_open(struct net_device *dev)
                goto hash_add;
        }
 
-       if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) {
+       if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD &&
+           dev->rtnl_link_ops == &macvlan_link_ops) {
                vlan->fwd_priv =
                      lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev);
 
@@ -362,10 +365,8 @@ static int macvlan_open(struct net_device *dev)
                 */
                if (IS_ERR_OR_NULL(vlan->fwd_priv)) {
                        vlan->fwd_priv = NULL;
-               } else {
-                       dev->features &= ~NETIF_F_LLTX;
+               } else
                        return 0;
-               }
        }
 
        err = -EBUSY;
@@ -690,8 +691,18 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
                                              netdev_features_t features)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
+       netdev_features_t mask;
+
+       features |= NETIF_F_ALL_FOR_ALL;
+       features &= (vlan->set_features | ~MACVLAN_FEATURES);
+       mask = features;
+
+       features = netdev_increment_features(vlan->lowerdev->features,
+                                            features,
+                                            mask);
+       features |= NETIF_F_LLTX;
 
-       return features & (vlan->set_features | ~MACVLAN_FEATURES);
+       return features;
 }
 
 static const struct ethtool_ops macvlan_ethtool_ops = {
@@ -1019,9 +1030,8 @@ static int macvlan_device_event(struct notifier_block *unused,
                break;
        case NETDEV_FEAT_CHANGE:
                list_for_each_entry(vlan, &port->vlans, list) {
-                       vlan->dev->features = dev->features & MACVLAN_FEATURES;
                        vlan->dev->gso_max_size = dev->gso_max_size;
-                       netdev_features_change(vlan->dev);
+                       netdev_update_features(vlan->dev);
                }
                break;
        case NETDEV_UNREGISTER:
index 36c6994436b7ce7edb9ff9a0d9725c1aa09a4407..98434b84f0415ef2bfd635ba9dc4bc2f7a9ea762 100644 (file)
@@ -565,10 +565,8 @@ int phy_start_interrupts(struct phy_device *phydev)
        int err = 0;
 
        atomic_set(&phydev->irq_disable, 0);
-       if (request_irq(phydev->irq, phy_interrupt,
-                               IRQF_SHARED,
-                               "phy_interrupt",
-                               phydev) < 0) {
+       if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt",
+                       phydev) < 0) {
                pr_warn("%s: Can't get IRQ %d (PHY)\n",
                        phydev->bus->name, phydev->irq);
                phydev->irq = PHY_POLL;
index 736050d6b4516b3de85eb2c214d0464ff690d99f..b75ae5bde6734c1c70b196c00a841d84f0f984dd 100644 (file)
@@ -1647,7 +1647,8 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
        return NETDEV_TX_OK;
 }
 
-static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb)
+static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
+                            void *accel_priv)
 {
        /*
         * This helper function exists to help dev_pick_tx get the correct
index 7c8343a4f91823a579910cbcccc5827a2eb1d986..ecec8029c5e84c557374817429c29a505f699c0d 100644 (file)
@@ -348,7 +348,8 @@ unlock:
  * different rxq no. here. If we could not get rxhash, then we would
  * hope the rxq no. may help here.
  */
-static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb)
+static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
+                           void *accel_priv)
 {
        struct tun_struct *tun = netdev_priv(dev);
        struct tun_flow_entry *e;
index 85e4a01670f06e207609299ecacd035df3e098f3..47b0f732b0b10d21a07d4567e778efcbe4a7fddb 100644 (file)
@@ -276,12 +276,12 @@ config USB_NET_CDC_MBIM
          module will be called cdc_mbim.
 
 config USB_NET_DM9601
-       tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices"
+       tristate "Davicom DM96xx based USB 10/100 ethernet devices"
        depends on USB_USBNET
        select CRC32
        help
-         This option adds support for Davicom DM9601 based USB 1.1
-         10/100 Ethernet adapters.
+         This option adds support for Davicom DM9601/DM9620/DM9621A
+         based USB 10/100 Ethernet adapters.
 
 config USB_NET_SR9700
        tristate "CoreChip-sz SR9700 based USB 1.1 10/100 ethernet devices"
index c6867f926cffc18a981c7682e5493ae36924d988..14aa48fa8d7e5610087aee24b5eb0e493593cd83 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Davicom DM9601 USB 1.1 10/100Mbps ethernet devices
+ * Davicom DM96xx USB 10/100Mbps ethernet devices
  *
  * Peter Korsgaard <jacmet@sunsite.dk>
  *
@@ -364,7 +364,12 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
        dev->net->ethtool_ops = &dm9601_ethtool_ops;
        dev->net->hard_header_len += DM_TX_OVERHEAD;
        dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
-       dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD;
+
+       /* dm9620/21a require room for 4 byte padding, even in dm9601
+        * mode, so we need +1 to be able to receive full size
+        * ethernet frames.
+        */
+       dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD + 1;
 
        dev->mii.dev = dev->net;
        dev->mii.mdio_read = dm9601_mdio_read;
@@ -468,7 +473,7 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
 static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
                                       gfp_t flags)
 {
-       int len;
+       int len, pad;
 
        /* format:
           b1: packet length low
@@ -476,12 +481,23 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
           b3..n: packet data
        */
 
-       len = skb->len;
+       len = skb->len + DM_TX_OVERHEAD;
+
+       /* workaround for dm962x errata with tx fifo getting out of
+        * sync if a USB bulk transfer retry happens right after a
+        * packet with odd / maxpacket length by adding up to 3 bytes
+        * padding.
+        */
+       while ((len & 1) || !(len % dev->maxpacket))
+               len++;
 
-       if (skb_headroom(skb) < DM_TX_OVERHEAD) {
+       len -= DM_TX_OVERHEAD; /* hw header doesn't count as part of length */
+       pad = len - skb->len;
+
+       if (skb_headroom(skb) < DM_TX_OVERHEAD || skb_tailroom(skb) < pad) {
                struct sk_buff *skb2;
 
-               skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, 0, flags);
+               skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, pad, flags);
                dev_kfree_skb_any(skb);
                skb = skb2;
                if (!skb)
@@ -490,10 +506,10 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
 
        __skb_push(skb, DM_TX_OVERHEAD);
 
-       /* usbnet adds padding if length is a multiple of packet size
-          if so, adjust length value in header */
-       if ((skb->len % dev->maxpacket) == 0)
-               len++;
+       if (pad) {
+               memset(skb->data + skb->len, 0, pad);
+               __skb_put(skb, pad);
+       }
 
        skb->data[0] = len;
        skb->data[1] = len >> 8;
@@ -543,7 +559,7 @@ static int dm9601_link_reset(struct usbnet *dev)
 }
 
 static const struct driver_info dm9601_info = {
-       .description    = "Davicom DM9601 USB Ethernet",
+       .description    = "Davicom DM96xx USB 10/100 Ethernet",
        .flags          = FLAG_ETHER | FLAG_LINK_INTR,
        .bind           = dm9601_bind,
        .rx_fixup       = dm9601_rx_fixup,
@@ -594,6 +610,10 @@ static const struct usb_device_id products[] = {
         USB_DEVICE(0x0a46, 0x9620),    /* DM9620 USB to Fast Ethernet Adapter */
         .driver_info = (unsigned long)&dm9601_info,
         },
+       {
+        USB_DEVICE(0x0a46, 0x9621),    /* DM9621A USB to Fast Ethernet Adapter */
+        .driver_info = (unsigned long)&dm9601_info,
+       },
        {},                     // END
 };
 
@@ -612,5 +632,5 @@ static struct usb_driver dm9601_driver = {
 module_usb_driver(dm9601_driver);
 
 MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>");
-MODULE_DESCRIPTION("Davicom DM9601 USB 1.1 ethernet devices");
+MODULE_DESCRIPTION("Davicom DM96xx USB 10/100 ethernet devices");
 MODULE_LICENSE("GPL");
index 86292e6aaf4955c4412ead6579f74e6848bcd089..1a482344b3f507e97486059d56280dfd3f23d37c 100644 (file)
@@ -185,7 +185,6 @@ enum rx_ctrl_state{
 #define BM_REQUEST_TYPE (0xa1)
 #define B_NOTIFICATION  (0x20)
 #define W_VALUE         (0x0)
-#define W_INDEX         (0x2)
 #define W_LENGTH        (0x2)
 
 #define B_OVERRUN       (0x1<<6)
@@ -1487,6 +1486,7 @@ static void tiocmget_intr_callback(struct urb *urb)
        struct uart_icount *icount;
        struct hso_serial_state_notification *serial_state_notification;
        struct usb_device *usb;
+       int if_num;
 
        /* Sanity checks */
        if (!serial)
@@ -1495,15 +1495,24 @@ static void tiocmget_intr_callback(struct urb *urb)
                handle_usb_error(status, __func__, serial->parent);
                return;
        }
+
+       /* tiocmget is only supported on HSO_PORT_MODEM */
        tiocmget = serial->tiocmget;
        if (!tiocmget)
                return;
+       BUG_ON((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM);
+
        usb = serial->parent->usb;
+       if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber;
+
+       /* wIndex should be the USB interface number of the port to which the
+        * notification applies, which should always be the Modem port.
+        */
        serial_state_notification = &tiocmget->serial_state_notification;
        if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE ||
            serial_state_notification->bNotification != B_NOTIFICATION ||
            le16_to_cpu(serial_state_notification->wValue) != W_VALUE ||
-           le16_to_cpu(serial_state_notification->wIndex) != W_INDEX ||
+           le16_to_cpu(serial_state_notification->wIndex) != if_num ||
            le16_to_cpu(serial_state_notification->wLength) != W_LENGTH) {
                dev_warn(&usb->dev,
                         "hso received invalid serial state notification\n");
index 03832d3780aa6134059dc4768bb7614d71075b82..f54637828574f6b4af868d978288d4920aa3061a 100644 (file)
@@ -117,7 +117,6 @@ enum {
 struct mcs7830_data {
        u8 multi_filter[8];
        u8 config;
-       u8 link_counter;
 };
 
 static const char driver_name[] = "MOSCHIP usb-ethernet driver";
@@ -561,26 +560,16 @@ static void mcs7830_status(struct usbnet *dev, struct urb *urb)
 {
        u8 *buf = urb->transfer_buffer;
        bool link, link_changed;
-       struct mcs7830_data *data = mcs7830_get_data(dev);
 
        if (urb->actual_length < 16)
                return;
 
-       link = !(buf[1] & 0x20);
+       link = !(buf[1] == 0x20);
        link_changed = netif_carrier_ok(dev->net) != link;
        if (link_changed) {
-               data->link_counter++;
-               /*
-                  track link state 20 times to guard against erroneous
-                  link state changes reported sometimes by the chip
-                */
-               if (data->link_counter > 20) {
-                       data->link_counter = 0;
-                       usbnet_link_change(dev, link, 0);
-                       netdev_dbg(dev->net, "Link Status is: %d\n", link);
-               }
-       } else
-               data->link_counter = 0;
+               usbnet_link_change(dev, link, 0);
+               netdev_dbg(dev->net, "Link Status is: %d\n", link);
+       }
 }
 
 static const struct driver_info moschip_info = {
index d208f860498106013913211183baa39a37f33982..5d776447d9c33e2da56b622a2537536cf950ca0c 100644 (file)
@@ -1797,16 +1797,17 @@ static int virtnet_restore(struct virtio_device *vdev)
        if (err)
                return err;
 
-       if (netif_running(vi->dev))
+       if (netif_running(vi->dev)) {
+               for (i = 0; i < vi->curr_queue_pairs; i++)
+                       if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
+                               schedule_delayed_work(&vi->refill, 0);
+
                for (i = 0; i < vi->max_queue_pairs; i++)
                        virtnet_napi_enable(&vi->rq[i]);
+       }
 
        netif_device_attach(vi->dev);
 
-       for (i = 0; i < vi->curr_queue_pairs; i++)
-               if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
-                       schedule_delayed_work(&vi->refill, 0);
-
        mutex_lock(&vi->config_lock);
        vi->config_enable = true;
        mutex_unlock(&vi->config_lock);
index 249e01c5600c9010a19ca07c175867ec9e5fb91d..ed384fee76ac9ffa12eff0563864424f95ab2d01 100644 (file)
@@ -2440,7 +2440,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
                /* update header length based on lower device */
                dev->hard_header_len = lowerdev->hard_header_len +
                                       (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
-       }
+       } else if (use_ipv6)
+               vxlan->flags |= VXLAN_F_IPV6;
 
        if (data[IFLA_VXLAN_TOS])
                vxlan->tos  = nla_get_u8(data[IFLA_VXLAN_TOS]);
index 8d78253c26cee6f459121e7e8ef5c3e0e44b8b2a..a366d6b4626f5fdd0bcf04b1d6b9c6063a70afbd 100644 (file)
@@ -76,9 +76,16 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
                                mask2 |= ATH9K_INT_CST;
                        if (isr2 & AR_ISR_S2_TSFOOR)
                                mask2 |= ATH9K_INT_TSFOOR;
+
+                       if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+                               REG_WRITE(ah, AR_ISR_S2, isr2);
+                               isr &= ~AR_ISR_BCNMISC;
+                       }
                }
 
-               isr = REG_READ(ah, AR_ISR_RAC);
+               if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)
+                       isr = REG_READ(ah, AR_ISR_RAC);
+
                if (isr == 0xffffffff) {
                        *masked = 0;
                        return false;
@@ -97,11 +104,23 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
 
                        *masked |= ATH9K_INT_TX;
 
-                       s0_s = REG_READ(ah, AR_ISR_S0_S);
+                       if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
+                               s0_s = REG_READ(ah, AR_ISR_S0_S);
+                               s1_s = REG_READ(ah, AR_ISR_S1_S);
+                       } else {
+                               s0_s = REG_READ(ah, AR_ISR_S0);
+                               REG_WRITE(ah, AR_ISR_S0, s0_s);
+                               s1_s = REG_READ(ah, AR_ISR_S1);
+                               REG_WRITE(ah, AR_ISR_S1, s1_s);
+
+                               isr &= ~(AR_ISR_TXOK |
+                                        AR_ISR_TXDESC |
+                                        AR_ISR_TXERR |
+                                        AR_ISR_TXEOL);
+                       }
+
                        ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
                        ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
-
-                       s1_s = REG_READ(ah, AR_ISR_S1_S);
                        ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
                        ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
                }
@@ -114,13 +133,15 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
                *masked |= mask2;
        }
 
-       if (AR_SREV_9100(ah))
-               return true;
-
-       if (isr & AR_ISR_GENTMR) {
+       if (!AR_SREV_9100(ah) && (isr & AR_ISR_GENTMR)) {
                u32 s5_s;
 
-               s5_s = REG_READ(ah, AR_ISR_S5_S);
+               if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
+                       s5_s = REG_READ(ah, AR_ISR_S5_S);
+               } else {
+                       s5_s = REG_READ(ah, AR_ISR_S5);
+               }
+
                ah->intr_gen_timer_trigger =
                                MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
 
@@ -133,8 +154,21 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
                if ((s5_s & AR_ISR_S5_TIM_TIMER) &&
                    !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
                        *masked |= ATH9K_INT_TIM_TIMER;
+
+               if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+                       REG_WRITE(ah, AR_ISR_S5, s5_s);
+                       isr &= ~AR_ISR_GENTMR;
+               }
        }
 
+       if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+               REG_WRITE(ah, AR_ISR, isr);
+               REG_READ(ah, AR_ISR);
+       }
+
+       if (AR_SREV_9100(ah))
+               return true;
+
        if (sync_cause) {
                ath9k_debug_sync_cause(common, sync_cause);
                fatal_int =
index 9a2657fdd9ccd4ec62f96f8a639182e2ded29fec..608d739d13782233db4271f9248c3472601b1a5f 100644 (file)
@@ -127,21 +127,26 @@ static void ath9k_htc_bssid_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
        struct ath9k_vif_iter_data *iter_data = data;
        int i;
 
-       for (i = 0; i < ETH_ALEN; i++)
-               iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
+       if (iter_data->hw_macaddr != NULL) {
+               for (i = 0; i < ETH_ALEN; i++)
+                       iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
+       } else {
+               iter_data->hw_macaddr = mac;
+       }
 }
 
-static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
+static void ath9k_htc_set_mac_bssid_mask(struct ath9k_htc_priv *priv,
                                     struct ieee80211_vif *vif)
 {
        struct ath_common *common = ath9k_hw_common(priv->ah);
        struct ath9k_vif_iter_data iter_data;
 
        /*
-        * Use the hardware MAC address as reference, the hardware uses it
-        * together with the BSSID mask when matching addresses.
+        * Pick the MAC address of the first interface as the new hardware
+        * MAC address. The hardware will use it together with the BSSID mask
+        * when matching addresses.
         */
-       iter_data.hw_macaddr = common->macaddr;
+       iter_data.hw_macaddr = NULL;
        memset(&iter_data.mask, 0xff, ETH_ALEN);
 
        if (vif)
@@ -153,6 +158,10 @@ static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
                ath9k_htc_bssid_iter, &iter_data);
 
        memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
+
+       if (iter_data.hw_macaddr)
+               memcpy(common->macaddr, iter_data.hw_macaddr, ETH_ALEN);
+
        ath_hw_setbssidmask(common);
 }
 
@@ -1063,7 +1072,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
                goto out;
        }
 
-       ath9k_htc_set_bssid_mask(priv, vif);
+       ath9k_htc_set_mac_bssid_mask(priv, vif);
 
        priv->vif_slot |= (1 << avp->index);
        priv->nvifs++;
@@ -1128,7 +1137,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
 
        ath9k_htc_set_opmode(priv);
 
-       ath9k_htc_set_bssid_mask(priv, vif);
+       ath9k_htc_set_mac_bssid_mask(priv, vif);
 
        /*
         * Stop ANI only if there are no associated station interfaces.
index 74f452c7b1667c47a65506a077042f2b0668c3a8..21aa09e0e825df1161a4fcca3753ca08e9632558 100644 (file)
@@ -965,8 +965,9 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
        struct ath_common *common = ath9k_hw_common(ah);
 
        /*
-        * Use the hardware MAC address as reference, the hardware uses it
-        * together with the BSSID mask when matching addresses.
+        * Pick the MAC address of the first interface as the new hardware
+        * MAC address. The hardware will use it together with the BSSID mask
+        * when matching addresses.
         */
        memset(iter_data, 0, sizeof(*iter_data));
        memset(&iter_data->mask, 0xff, ETH_ALEN);
index 86605027c41d6b4187c15ba874ef8fe25056676e..e6272546395a982d6d3e6e23239eaddc1bc3bf60 100644 (file)
@@ -357,21 +357,27 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
        {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x095B, 0x5012, iwl7265_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x095B, 0x500A, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
        {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)},
        {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095A, 0x5190, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)},
 #endif /* CONFIG_IWLMVM */
index c72438bb2fafd24b8e59f416d4e4311752dce941..a1b32ee9594a6b9b899caa03cea53f3d9a0ddf90 100644 (file)
@@ -2011,7 +2011,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
           (hwsim_flags & HWSIM_TX_STAT_ACK)) {
                if (skb->len >= 16) {
                        hdr = (struct ieee80211_hdr *) skb->data;
-                       mac80211_hwsim_monitor_ack(txi->rate_driver_data[0],
+                       mac80211_hwsim_monitor_ack(data2->channel,
                                                   hdr->addr2);
                }
                txi->flags |= IEEE80211_TX_STAT_ACK;
index 78e8a6666cc6edad81bd87c98dcf0353af371866..8bb8988c435cf04b0280aef69d4e9d01acc24fee 100644 (file)
@@ -746,7 +746,8 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
 }
 
 static u16
-mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb)
+mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
+                               void *accel_priv)
 {
        skb->priority = cfg80211_classify8021d(skb);
        return mwifiex_1d_to_wmm_queue[skb->priority];
index 0f494444bcd1d90b457b927d704bf77abe0592ca..5a53195d016b61da57f3198ad1f616343e1ebd3d 100644 (file)
@@ -740,6 +740,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
        };
        int index = rtlpci->rx_ring[rx_queue_idx].idx;
 
+       if (rtlpci->driver_is_goingto_unload)
+               return;
        /*RX NORMAL PKT */
        while (count--) {
                /*rx descriptor */
@@ -1636,6 +1638,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
         */
        set_hal_stop(rtlhal);
 
+       rtlpci->driver_is_goingto_unload = true;
        rtlpriv->cfg->ops->disable_interrupt(hw);
        cancel_work_sync(&rtlpriv->works.lps_change_work);
 
@@ -1653,7 +1656,6 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
        ppsc->rfchange_inprogress = true;
        spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
 
-       rtlpci->driver_is_goingto_unload = true;
        rtlpriv->cfg->ops->hw_disable(hw);
        /* some things are not needed if firmware not available */
        if (!rtlpriv->max_fw_size)
index 08ae01b41c832d7144bdeceb70b976adaf281d4c..c47794b9d42f9bfd71d2761a48f8429a8b4633c5 100644 (file)
@@ -101,6 +101,13 @@ struct xenvif_rx_meta {
 
 #define MAX_PENDING_REQS 256
 
+/* It's possible for an skb to have a maximal number of frags
+ * but still be less than MAX_BUFFER_OFFSET in size. Thus the
+ * worst-case number of copy operations is MAX_SKB_FRAGS per
+ * ring slot.
+ */
+#define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
+
 struct xenvif {
        /* Unique identifier for this interface. */
        domid_t          domid;
@@ -143,13 +150,13 @@ struct xenvif {
         */
        RING_IDX rx_req_cons_peek;
 
-       /* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
-        * head/fragment page uses 2 copy operations because it
-        * straddles two buffers in the frontend.
-        */
-       struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
-       struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
+       /* This array is allocated seperately as it is large */
+       struct gnttab_copy *grant_copy_op;
 
+       /* We create one meta structure per ring request we consume, so
+        * the maximum number is the same as the ring size.
+        */
+       struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
 
        u8               fe_dev_addr[6];
 
index 870f1fa583702ee4bc61d9acab3e4925a4d984e0..fff8cddfed816d3479fac57a6f91b4d827272033 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/ethtool.h>
 #include <linux/rtnetlink.h>
 #include <linux/if_vlan.h>
+#include <linux/vmalloc.h>
 
 #include <xen/events.h>
 #include <asm/xen/hypercall.h>
@@ -307,6 +308,15 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
        SET_NETDEV_DEV(dev, parent);
 
        vif = netdev_priv(dev);
+
+       vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) *
+                                    MAX_GRANT_COPY_OPS);
+       if (vif->grant_copy_op == NULL) {
+               pr_warn("Could not allocate grant copy space for %s\n", name);
+               free_netdev(dev);
+               return ERR_PTR(-ENOMEM);
+       }
+
        vif->domid  = domid;
        vif->handle = handle;
        vif->can_sg = 1;
@@ -487,6 +497,7 @@ void xenvif_free(struct xenvif *vif)
 
        unregister_netdev(vif->dev);
 
+       vfree(vif->grant_copy_op);
        free_netdev(vif->dev);
 
        module_put(THIS_MODULE);
index 27bbe58dcbe7bf424fdfd54a461af847836836eb..78425554a537579b01378b99295913c1706f0b0a 100644 (file)
@@ -608,7 +608,7 @@ void xenvif_rx_action(struct xenvif *vif)
        if (!npo.copy_prod)
                return;
 
-       BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op));
+       BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
        gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
 
        while ((skb = __skb_dequeue(&rxq)) != NULL) {
@@ -1209,8 +1209,10 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
                        goto out;
 
                if (!skb_partial_csum_set(skb, off,
-                                         offsetof(struct tcphdr, check)))
+                                         offsetof(struct tcphdr, check))) {
+                       err = -EPROTO;
                        goto out;
+               }
 
                if (recalculate_partial_csum)
                        tcp_hdr(skb)->check =
@@ -1227,8 +1229,10 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
                        goto out;
 
                if (!skb_partial_csum_set(skb, off,
-                                         offsetof(struct udphdr, check)))
+                                         offsetof(struct udphdr, check))) {
+                       err = -EPROTO;
                        goto out;
+               }
 
                if (recalculate_partial_csum)
                        udp_hdr(skb)->check =
@@ -1350,8 +1354,10 @@ static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
                        goto out;
 
                if (!skb_partial_csum_set(skb, off,
-                                         offsetof(struct tcphdr, check)))
+                                         offsetof(struct tcphdr, check))) {
+                       err = -EPROTO;
                        goto out;
+               }
 
                if (recalculate_partial_csum)
                        tcp_hdr(skb)->check =
@@ -1368,8 +1374,10 @@ static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
                        goto out;
 
                if (!skb_partial_csum_set(skb, off,
-                                         offsetof(struct udphdr, check)))
+                                         offsetof(struct udphdr, check))) {
+                       err = -EPROTO;
                        goto out;
+               }
 
                if (recalculate_partial_csum)
                        udp_hdr(skb)->check =
index de6f8990246fefc8aaef7c0e88e0371143ae9859..c6973f101a3e4ae9e00c604cd9e1708d1bd88485 100644 (file)
@@ -20,7 +20,7 @@ config OF_SELFTEST
        depends on OF_IRQ
        help
          This option builds in test cases for the device tree infrastructure
-         that are executed one at boot time, and the results dumped to the
+         that are executed once at boot time, and the results dumped to the
          console.
 
          If unsure, say N here, but this option is safe to enable.
index 4b9317bdb81ce85209573e6c28f86492e643962b..d3dd41c840f1cd8d6784e4a61382cb3e4987ad1a 100644 (file)
@@ -69,14 +69,6 @@ static u64 of_bus_default_map(__be32 *addr, const __be32 *range,
                 (unsigned long long)cp, (unsigned long long)s,
                 (unsigned long long)da);
 
-       /*
-        * If the number of address cells is larger than 2 we assume the
-        * mapping doesn't specify a physical address. Rather, the address
-        * specifies an identifier that must match exactly.
-        */
-       if (na > 2 && memcmp(range, addr, na * 4) != 0)
-               return OF_BAD_ADDR;
-
        if (da < cp || da >= (cp + s))
                return OF_BAD_ADDR;
        return da - cp;
index 2fa024b97c4350c680e384c645dc17d0fc5dc177..758b4f8b30b7d237c92b6a541f01db535d37101b 100644 (file)
@@ -922,8 +922,16 @@ void __init unflatten_device_tree(void)
  */
 void __init unflatten_and_copy_device_tree(void)
 {
-       int size = __be32_to_cpu(initial_boot_params->totalsize);
-       void *dt = early_init_dt_alloc_memory_arch(size,
+       int size;
+       void *dt;
+
+       if (!initial_boot_params) {
+               pr_warn("No valid device tree found, continuing without\n");
+               return;
+       }
+
+       size = __be32_to_cpu(initial_boot_params->totalsize);
+       dt = early_init_dt_alloc_memory_arch(size,
                __alignof__(struct boot_param_header));
 
        if (dt) {
index 786b0b47fae46802503ecbecee5719887830c44c..27212402c53247819cf4edf09e576d2cc69f3136 100644 (file)
@@ -165,7 +165,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
                if (of_get_property(ipar, "interrupt-controller", NULL) !=
                                NULL) {
                        pr_debug(" -> got it !\n");
-                       of_node_put(old);
                        return 0;
                }
 
@@ -250,8 +249,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
                 * Successfully parsed an interrrupt-map translation; copy new
                 * interrupt specifier into the out_irq structure
                 */
-               of_node_put(out_irq->np);
-               out_irq->np = of_node_get(newpar);
+               out_irq->np = newpar;
 
                match_array = imap - newaddrsize - newintsize;
                for (i = 0; i < newintsize; i++)
@@ -268,7 +266,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
        }
  fail:
        of_node_put(ipar);
-       of_node_put(out_irq->np);
        of_node_put(newpar);
 
        return -EINVAL;
index 1cf605f6767357947e9a097981caa7d7869ca2f0..e86439283a5d1911933eed185e22a50146093968 100644 (file)
@@ -279,7 +279,9 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
 
        status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
        if (ACPI_FAILURE(status)) {
-               acpi_handle_warn(handle, "can't evaluate _ADR (%#x)\n", status);
+               if (status != AE_NOT_FOUND)
+                       acpi_handle_warn(handle,
+                               "can't evaluate _ADR (%#x)\n", status);
                return AE_OK;
        }
 
@@ -643,6 +645,24 @@ static void disable_slot(struct acpiphp_slot *slot)
        slot->flags &= (~SLOT_ENABLED);
 }
 
+static bool acpiphp_no_hotplug(acpi_handle handle)
+{
+       struct acpi_device *adev = NULL;
+
+       acpi_bus_get_device(handle, &adev);
+       return adev && adev->flags.no_hotplug;
+}
+
+static bool slot_no_hotplug(struct acpiphp_slot *slot)
+{
+       struct acpiphp_func *func;
+
+       list_for_each_entry(func, &slot->funcs, sibling)
+               if (acpiphp_no_hotplug(func_to_handle(func)))
+                       return true;
+
+       return false;
+}
 
 /**
  * get_slot_status - get ACPI slot status
@@ -701,7 +721,8 @@ static void trim_stale_devices(struct pci_dev *dev)
                unsigned long long sta;
 
                status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
-               alive = ACPI_SUCCESS(status) && sta == ACPI_STA_ALL;
+               alive = (ACPI_SUCCESS(status) && sta == ACPI_STA_ALL)
+                       || acpiphp_no_hotplug(handle);
        }
        if (!alive) {
                u32 v;
@@ -741,8 +762,9 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
                struct pci_dev *dev, *tmp;
 
                mutex_lock(&slot->crit_sect);
-               /* wake up all functions */
-               if (get_slot_status(slot) == ACPI_STA_ALL) {
+               if (slot_no_hotplug(slot)) {
+                       ; /* do nothing */
+               } else if (get_slot_status(slot) == ACPI_STA_ALL) {
                        /* remove stale devices if any */
                        list_for_each_entry_safe(dev, tmp, &bus->devices,
                                                 bus_list)
index 577074efbe62f93f39ac00c6f5c4963d53bd2b0d..f7ebdba14bde9bc1c7e3dd702bc6e47506067ff5 100644 (file)
@@ -330,29 +330,32 @@ static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
 static void pci_acpi_setup(struct device *dev)
 {
        struct pci_dev *pci_dev = to_pci_dev(dev);
-       acpi_handle handle = ACPI_HANDLE(dev);
-       struct acpi_device *adev;
+       struct acpi_device *adev = ACPI_COMPANION(dev);
 
-       if (acpi_bus_get_device(handle, &adev) || !adev->wakeup.flags.valid)
+       if (!adev)
+               return;
+
+       pci_acpi_add_pm_notifier(adev, pci_dev);
+       if (!adev->wakeup.flags.valid)
                return;
 
        device_set_wakeup_capable(dev, true);
        acpi_pci_sleep_wake(pci_dev, false);
-
-       pci_acpi_add_pm_notifier(adev, pci_dev);
        if (adev->wakeup.flags.run_wake)
                device_set_run_wake(dev, true);
 }
 
 static void pci_acpi_cleanup(struct device *dev)
 {
-       acpi_handle handle = ACPI_HANDLE(dev);
-       struct acpi_device *adev;
+       struct acpi_device *adev = ACPI_COMPANION(dev);
+
+       if (!adev)
+               return;
 
-       if (!acpi_bus_get_device(handle, &adev) && adev->wakeup.flags.valid) {
+       pci_acpi_remove_pm_notifier(adev);
+       if (adev->wakeup.flags.valid) {
                device_set_wakeup_capable(dev, false);
                device_set_run_wake(dev, false);
-               pci_acpi_remove_pm_notifier(adev);
        }
 }
 
index 5e2054afe840e9d6412a28c5250f34d1017aa7b2..85ad58c6da17233a9503650500b69579887c84bb 100644 (file)
@@ -196,6 +196,7 @@ config BATTERY_MAX17040
 config BATTERY_MAX17042
        tristate "Maxim MAX17042/17047/17050/8997/8966 Fuel Gauge"
        depends on I2C
+       select REGMAP_I2C
        help
          MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries
          in handheld and portable equipment. The MAX17042 is configured
index 00e6672963601754262ced9e842f31c27e07c69e..557af943b2f53d0a3ee2a280122e42e69db94c67 100644 (file)
@@ -511,6 +511,10 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
        dev_set_drvdata(dev, psy);
        psy->dev = dev;
 
+       rc = dev_set_name(dev, "%s", psy->name);
+       if (rc)
+               goto dev_set_name_failed;
+
        INIT_WORK(&psy->changed_work, power_supply_changed_work);
 
        rc = power_supply_check_supplies(psy);
@@ -524,10 +528,6 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
        if (rc)
                goto wakeup_init_failed;
 
-       rc = kobject_set_name(&dev->kobj, "%s", psy->name);
-       if (rc)
-               goto kobject_set_name_failed;
-
        rc = device_add(dev);
        if (rc)
                goto device_add_failed;
@@ -553,11 +553,11 @@ create_triggers_failed:
 register_cooler_failed:
        psy_unregister_thermal(psy);
 register_thermal_failed:
-wakeup_init_failed:
        device_del(dev);
-kobject_set_name_failed:
 device_add_failed:
+wakeup_init_failed:
 check_supplies_failed:
+dev_set_name_failed:
        put_device(dev);
 success:
        return rc;
index 3f4ca4e09a4ccc4d49fba39d64094fafe1f1fdc7..34629ea913d4eb3bd77fefee02bd301e946cf5a3 100644 (file)
@@ -942,7 +942,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
                return rc;
        }
 
-       tp->screen = tty3270_alloc_screen(tp->view.cols, tp->view.rows);
+       tp->screen = tty3270_alloc_screen(tp->view.rows, tp->view.cols);
        if (IS_ERR(tp->screen)) {
                rc = PTR_ERR(tp->screen);
                raw3270_put_view(&tp->view);
index eb1f1ef5fa2eb69db729b6061b2d96c85aaf968b..94964af1428d37519acfb0b88f125f1b9c78c841 100644 (file)
@@ -118,6 +118,13 @@ config SPI_BCM63XX
        help
           Enable support for the SPI controller on the Broadcom BCM63xx SoCs.
 
+config SPI_BCM63XX_HSSPI
+       tristate "Broadcom BCM63XX HS SPI controller driver"
+       depends on BCM63XX || COMPILE_TEST
+       help
+         This enables support for the High Speed SPI controller present on
+         newer Broadcom BCM63XX SoCs.
+
 config SPI_BITBANG
        tristate "Utilities for Bitbanging SPI masters"
        help
@@ -159,7 +166,6 @@ config SPI_DAVINCI
        tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller"
        depends on ARCH_DAVINCI || ARCH_KEYSTONE
        select SPI_BITBANG
-       select TI_EDMA
        help
          SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules.
 
@@ -370,7 +376,7 @@ config SPI_PXA2XX_PCI
 
 config SPI_RSPI
        tristate "Renesas RSPI controller"
-       depends on (SUPERH || ARCH_SHMOBILE) && SH_DMAE_BASE
+       depends on (SUPERH && SH_DMAE_BASE) || ARCH_SHMOBILE
        help
          SPI driver for Renesas RSPI blocks.
 
index ab8d8644af0e9e97e66d082c472f2a83793dfbd7..95af48d2d360d694e25012c80fa0cf127222db16 100644 (file)
@@ -16,6 +16,7 @@ obj-$(CONFIG_SPI_ATH79)                       += spi-ath79.o
 obj-$(CONFIG_SPI_AU1550)               += spi-au1550.o
 obj-$(CONFIG_SPI_BCM2835)              += spi-bcm2835.o
 obj-$(CONFIG_SPI_BCM63XX)              += spi-bcm63xx.o
+obj-$(CONFIG_SPI_BCM63XX_HSSPI)                += spi-bcm63xx-hsspi.o
 obj-$(CONFIG_SPI_BFIN5XX)              += spi-bfin5xx.o
 obj-$(CONFIG_SPI_BFIN_V3)               += spi-bfin-v3.o
 obj-$(CONFIG_SPI_BFIN_SPORT)           += spi-bfin-sport.o
index 595b62cb545d9f804a6fe64bc07c20a6a892892d..5d7deaf628670b1cfad9c92b21d66c03a7c8d805 100644 (file)
@@ -220,8 +220,6 @@ static int altera_spi_probe(struct platform_device *pdev)
 
        /* setup the state for the bitbang driver */
        hw->bitbang.master = master;
-       if (!hw->bitbang.master)
-               return err;
        hw->bitbang.chipselect = altera_spi_chipsel;
        hw->bitbang.txrx_bufs = altera_spi_txrx;
 
index 821bf7ac218d965411027e3ce7efe16ed8e3e567..31534b51715aa95e29664cc80d44d71b5131d0ab 100644 (file)
@@ -243,21 +243,21 @@ static int ath79_spi_probe(struct platform_device *pdev)
                goto err_put_master;
        }
 
-       sp->base = ioremap(r->start, resource_size(r));
+       sp->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
        if (!sp->base) {
                ret = -ENXIO;
                goto err_put_master;
        }
 
-       sp->clk = clk_get(&pdev->dev, "ahb");
+       sp->clk = devm_clk_get(&pdev->dev, "ahb");
        if (IS_ERR(sp->clk)) {
                ret = PTR_ERR(sp->clk);
-               goto err_unmap;
+               goto err_put_master;
        }
 
        ret = clk_enable(sp->clk);
        if (ret)
-               goto err_clk_put;
+               goto err_put_master;
 
        rate = DIV_ROUND_UP(clk_get_rate(sp->clk), MHZ);
        if (!rate) {
@@ -280,10 +280,6 @@ err_disable:
        ath79_spi_disable(sp);
 err_clk_disable:
        clk_disable(sp->clk);
-err_clk_put:
-       clk_put(sp->clk);
-err_unmap:
-       iounmap(sp->base);
 err_put_master:
        spi_master_put(sp->bitbang.master);
 
@@ -297,8 +293,6 @@ static int ath79_spi_remove(struct platform_device *pdev)
        spi_bitbang_stop(&sp->bitbang);
        ath79_spi_disable(sp);
        clk_disable(sp->clk);
-       clk_put(sp->clk);
-       iounmap(sp->base);
        spi_master_put(sp->bitbang.master);
 
        return 0;
index 273db0beb2b88ffeb588307ce08b2cb40120c790..b0842f75101647616ada3a3db4f545a57b519be0 100644 (file)
  */
 #define DMA_MIN_BYTES  16
 
+#define SPI_DMA_TIMEOUT                (msecs_to_jiffies(1000))
+
 struct atmel_spi_dma {
        struct dma_chan                 *chan_rx;
        struct dma_chan                 *chan_tx;
@@ -220,17 +222,13 @@ struct atmel_spi {
        int                     irq;
        struct clk              *clk;
        struct platform_device  *pdev;
-       struct spi_device       *stay;
 
-       u8                      stopping;
-       struct list_head        queue;
-       struct tasklet_struct   tasklet;
        struct spi_transfer     *current_transfer;
        unsigned long           current_remaining_bytes;
-       struct spi_transfer     *next_transfer;
-       unsigned long           next_remaining_bytes;
        int                     done_status;
 
+       struct completion       xfer_completion;
+
        /* scratch buffer */
        void                    *buffer;
        dma_addr_t              buffer_dma;
@@ -241,6 +239,9 @@ struct atmel_spi {
        bool                    use_pdc;
        /* dmaengine data */
        struct atmel_spi_dma    dma;
+
+       bool                    keep_cs;
+       bool                    cs_active;
 };
 
 /* Controller-specific per-slave state */
@@ -376,17 +377,6 @@ static inline bool atmel_spi_use_dma(struct atmel_spi *as,
        return as->use_dma && xfer->len >= DMA_MIN_BYTES;
 }
 
-static inline int atmel_spi_xfer_is_last(struct spi_message *msg,
-                                       struct spi_transfer *xfer)
-{
-       return msg->transfers.prev == &xfer->transfer_list;
-}
-
-static inline int atmel_spi_xfer_can_be_chained(struct spi_transfer *xfer)
-{
-       return xfer->delay_usecs == 0 && !xfer->cs_change;
-}
-
 static int atmel_spi_dma_slave_config(struct atmel_spi *as,
                                struct dma_slave_config *slave_config,
                                u8 bits_per_word)
@@ -513,23 +503,20 @@ static void dma_callback(void *data)
        struct spi_master       *master = data;
        struct atmel_spi        *as = spi_master_get_devdata(master);
 
-       /* trigger SPI tasklet */
-       tasklet_schedule(&as->tasklet);
+       complete(&as->xfer_completion);
 }
 
 /*
  * Next transfer using PIO.
- * lock is held, spi tasklet is blocked
  */
 static void atmel_spi_next_xfer_pio(struct spi_master *master,
                                struct spi_transfer *xfer)
 {
        struct atmel_spi        *as = spi_master_get_devdata(master);
+       unsigned long xfer_pos = xfer->len - as->current_remaining_bytes;
 
        dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_pio\n");
 
-       as->current_remaining_bytes = xfer->len;
-
        /* Make sure data is not remaining in RDR */
        spi_readl(as, RDR);
        while (spi_readl(as, SR) & SPI_BIT(RDRF)) {
@@ -537,13 +524,14 @@ static void atmel_spi_next_xfer_pio(struct spi_master *master,
                cpu_relax();
        }
 
-       if (xfer->tx_buf)
+       if (xfer->tx_buf) {
                if (xfer->bits_per_word > 8)
-                       spi_writel(as, TDR, *(u16 *)(xfer->tx_buf));
+                       spi_writel(as, TDR, *(u16 *)(xfer->tx_buf + xfer_pos));
                else
-                       spi_writel(as, TDR, *(u8 *)(xfer->tx_buf));
-       else
+                       spi_writel(as, TDR, *(u8 *)(xfer->tx_buf + xfer_pos));
+       } else {
                spi_writel(as, TDR, 0);
+       }
 
        dev_dbg(master->dev.parent,
                "  start pio xfer %p: len %u tx %p rx %p bitpw %d\n",
@@ -556,7 +544,6 @@ static void atmel_spi_next_xfer_pio(struct spi_master *master,
 
 /*
  * Submit next transfer for DMA.
- * lock is held, spi tasklet is blocked
  */
 static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
                                struct spi_transfer *xfer,
@@ -694,74 +681,90 @@ static void atmel_spi_next_xfer_data(struct spi_master *master,
        *plen = len;
 }
 
+static int atmel_spi_set_xfer_speed(struct atmel_spi *as,
+                                   struct spi_device *spi,
+                                   struct spi_transfer *xfer)
+{
+       u32                     scbr, csr;
+       unsigned long           bus_hz;
+
+       /* v1 chips start out at half the peripheral bus speed. */
+       bus_hz = clk_get_rate(as->clk);
+       if (!atmel_spi_is_v2(as))
+               bus_hz /= 2;
+
+       /*
+        * Calculate the lowest divider that satisfies the
+        * constraint, assuming div32/fdiv/mbz == 0.
+        */
+       if (xfer->speed_hz)
+               scbr = DIV_ROUND_UP(bus_hz, xfer->speed_hz);
+       else
+               /*
+                * This can happend if max_speed is null.
+                * In this case, we set the lowest possible speed
+                */
+               scbr = 0xff;
+
+       /*
+        * If the resulting divider doesn't fit into the
+        * register bitfield, we can't satisfy the constraint.
+        */
+       if (scbr >= (1 << SPI_SCBR_SIZE)) {
+               dev_err(&spi->dev,
+                       "setup: %d Hz too slow, scbr %u; min %ld Hz\n",
+                       xfer->speed_hz, scbr, bus_hz/255);
+               return -EINVAL;
+       }
+       if (scbr == 0) {
+               dev_err(&spi->dev,
+                       "setup: %d Hz too high, scbr %u; max %ld Hz\n",
+                       xfer->speed_hz, scbr, bus_hz);
+               return -EINVAL;
+       }
+       csr = spi_readl(as, CSR0 + 4 * spi->chip_select);
+       csr = SPI_BFINS(SCBR, scbr, csr);
+       spi_writel(as, CSR0 + 4 * spi->chip_select, csr);
+
+       return 0;
+}
+
 /*
  * Submit next transfer for PDC.
  * lock is held, spi irq is blocked
  */
 static void atmel_spi_pdc_next_xfer(struct spi_master *master,
-                               struct spi_message *msg)
+                                       struct spi_message *msg,
+                                       struct spi_transfer *xfer)
 {
        struct atmel_spi        *as = spi_master_get_devdata(master);
-       struct spi_transfer     *xfer;
-       u32                     len, remaining;
-       u32                     ieval;
+       u32                     len;
        dma_addr_t              tx_dma, rx_dma;
 
-       if (!as->current_transfer)
-               xfer = list_entry(msg->transfers.next,
-                               struct spi_transfer, transfer_list);
-       else if (!as->next_transfer)
-               xfer = list_entry(as->current_transfer->transfer_list.next,
-                               struct spi_transfer, transfer_list);
-       else
-               xfer = NULL;
-
-       if (xfer) {
-               spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
-
-               len = xfer->len;
-               atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
-               remaining = xfer->len - len;
-
-               spi_writel(as, RPR, rx_dma);
-               spi_writel(as, TPR, tx_dma);
-
-               if (msg->spi->bits_per_word > 8)
-                       len >>= 1;
-               spi_writel(as, RCR, len);
-               spi_writel(as, TCR, len);
-
-               dev_dbg(&msg->spi->dev,
-                       "  start xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
-                       xfer, xfer->len, xfer->tx_buf,
-                       (unsigned long long)xfer->tx_dma, xfer->rx_buf,
-                       (unsigned long long)xfer->rx_dma);
-       } else {
-               xfer = as->next_transfer;
-               remaining = as->next_remaining_bytes;
-       }
+       spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
 
-       as->current_transfer = xfer;
-       as->current_remaining_bytes = remaining;
+       len = as->current_remaining_bytes;
+       atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
+       as->current_remaining_bytes -= len;
 
-       if (remaining > 0)
-               len = remaining;
-       else if (!atmel_spi_xfer_is_last(msg, xfer)
-                       && atmel_spi_xfer_can_be_chained(xfer)) {
-               xfer = list_entry(xfer->transfer_list.next,
-                               struct spi_transfer, transfer_list);
-               len = xfer->len;
-       } else
-               xfer = NULL;
+       spi_writel(as, RPR, rx_dma);
+       spi_writel(as, TPR, tx_dma);
 
-       as->next_transfer = xfer;
+       if (msg->spi->bits_per_word > 8)
+               len >>= 1;
+       spi_writel(as, RCR, len);
+       spi_writel(as, TCR, len);
 
-       if (xfer) {
-               u32     total;
+       dev_dbg(&msg->spi->dev,
+               "  start xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
+               xfer, xfer->len, xfer->tx_buf,
+               (unsigned long long)xfer->tx_dma, xfer->rx_buf,
+               (unsigned long long)xfer->rx_dma);
 
-               total = len;
+       if (as->current_remaining_bytes) {
+               len = as->current_remaining_bytes;
                atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
-               as->next_remaining_bytes = total - len;
+               as->current_remaining_bytes -= len;
 
                spi_writel(as, RNPR, rx_dma);
                spi_writel(as, TNPR, tx_dma);
@@ -776,11 +779,6 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master,
                        xfer, xfer->len, xfer->tx_buf,
                        (unsigned long long)xfer->tx_dma, xfer->rx_buf,
                        (unsigned long long)xfer->rx_dma);
-               ieval = SPI_BIT(ENDRX) | SPI_BIT(OVRES);
-       } else {
-               spi_writel(as, RNCR, 0);
-               spi_writel(as, TNCR, 0);
-               ieval = SPI_BIT(RXBUFF) | SPI_BIT(ENDRX) | SPI_BIT(OVRES);
        }
 
        /* REVISIT: We're waiting for ENDRX before we start the next
@@ -793,82 +791,10 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master,
         *
         * It should be doable, though. Just not now...
         */
-       spi_writel(as, IER, ieval);
+       spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES));
        spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN));
 }
 
-/*
- * Choose way to submit next transfer and start it.
- * lock is held, spi tasklet is blocked
- */
-static void atmel_spi_dma_next_xfer(struct spi_master *master,
-                               struct spi_message *msg)
-{
-       struct atmel_spi        *as = spi_master_get_devdata(master);
-       struct spi_transfer     *xfer;
-       u32     remaining, len;
-
-       remaining = as->current_remaining_bytes;
-       if (remaining) {
-               xfer = as->current_transfer;
-               len = remaining;
-       } else {
-               if (!as->current_transfer)
-                       xfer = list_entry(msg->transfers.next,
-                               struct spi_transfer, transfer_list);
-               else
-                       xfer = list_entry(
-                               as->current_transfer->transfer_list.next,
-                                       struct spi_transfer, transfer_list);
-
-               as->current_transfer = xfer;
-               len = xfer->len;
-       }
-
-       if (atmel_spi_use_dma(as, xfer)) {
-               u32 total = len;
-               if (!atmel_spi_next_xfer_dma_submit(master, xfer, &len)) {
-                       as->current_remaining_bytes = total - len;
-                       return;
-               } else {
-                       dev_err(&msg->spi->dev, "unable to use DMA, fallback to PIO\n");
-               }
-       }
-
-       /* use PIO if error appened using DMA */
-       atmel_spi_next_xfer_pio(master, xfer);
-}
-
-static void atmel_spi_next_message(struct spi_master *master)
-{
-       struct atmel_spi        *as = spi_master_get_devdata(master);
-       struct spi_message      *msg;
-       struct spi_device       *spi;
-
-       BUG_ON(as->current_transfer);
-
-       msg = list_entry(as->queue.next, struct spi_message, queue);
-       spi = msg->spi;
-
-       dev_dbg(master->dev.parent, "start message %p for %s\n",
-                       msg, dev_name(&spi->dev));
-
-       /* select chip if it's not still active */
-       if (as->stay) {
-               if (as->stay != spi) {
-                       cs_deactivate(as, as->stay);
-                       cs_activate(as, spi);
-               }
-               as->stay = NULL;
-       } else
-               cs_activate(as, spi);
-
-       if (as->use_pdc)
-               atmel_spi_pdc_next_xfer(master, msg);
-       else
-               atmel_spi_dma_next_xfer(master, msg);
-}
-
 /*
  * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma:
  *  - The buffer is either valid for CPU access, else NULL
@@ -924,41 +850,7 @@ static void atmel_spi_disable_pdc_transfer(struct atmel_spi *as)
        spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
 }
 
-static void
-atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as,
-               struct spi_message *msg, int stay)
-{
-       if (!stay || as->done_status < 0)
-               cs_deactivate(as, msg->spi);
-       else
-               as->stay = msg->spi;
-
-       list_del(&msg->queue);
-       msg->status = as->done_status;
-
-       dev_dbg(master->dev.parent,
-               "xfer complete: %u bytes transferred\n",
-               msg->actual_length);
-
-       atmel_spi_unlock(as);
-       msg->complete(msg->context);
-       atmel_spi_lock(as);
-
-       as->current_transfer = NULL;
-       as->next_transfer = NULL;
-       as->done_status = 0;
-
-       /* continue if needed */
-       if (list_empty(&as->queue) || as->stopping) {
-               if (as->use_pdc)
-                       atmel_spi_disable_pdc_transfer(as);
-       } else {
-               atmel_spi_next_message(master);
-       }
-}
-
 /* Called from IRQ
- * lock is held
  *
  * Must update "current_remaining_bytes" to keep track of data
  * to transfer.
@@ -966,9 +858,7 @@ atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as,
 static void
 atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
 {
-       u8              *txp;
        u8              *rxp;
-       u16             *txp16;
        u16             *rxp16;
        unsigned long   xfer_pos = xfer->len - as->current_remaining_bytes;
 
@@ -990,96 +880,12 @@ atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
        } else {
                as->current_remaining_bytes--;
        }
-
-       if (as->current_remaining_bytes) {
-               if (xfer->tx_buf) {
-                       if (xfer->bits_per_word > 8) {
-                               txp16 = (u16 *)(((u8 *)xfer->tx_buf)
-                                                       + xfer_pos + 2);
-                               spi_writel(as, TDR, *txp16);
-                       } else {
-                               txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1;
-                               spi_writel(as, TDR, *txp);
-                       }
-               } else {
-                       spi_writel(as, TDR, 0);
-               }
-       }
-}
-
-/* Tasklet
- * Called from DMA callback + pio transfer and overrun IRQ.
- */
-static void atmel_spi_tasklet_func(unsigned long data)
-{
-       struct spi_master       *master = (struct spi_master *)data;
-       struct atmel_spi        *as = spi_master_get_devdata(master);
-       struct spi_message      *msg;
-       struct spi_transfer     *xfer;
-
-       dev_vdbg(master->dev.parent, "atmel_spi_tasklet_func\n");
-
-       atmel_spi_lock(as);
-
-       xfer = as->current_transfer;
-
-       if (xfer == NULL)
-               /* already been there */
-               goto tasklet_out;
-
-       msg = list_entry(as->queue.next, struct spi_message, queue);
-
-       if (as->current_remaining_bytes == 0) {
-               if (as->done_status < 0) {
-                       /* error happened (overrun) */
-                       if (atmel_spi_use_dma(as, xfer))
-                               atmel_spi_stop_dma(as);
-               } else {
-                       /* only update length if no error */
-                       msg->actual_length += xfer->len;
-               }
-
-               if (atmel_spi_use_dma(as, xfer))
-                       if (!msg->is_dma_mapped)
-                               atmel_spi_dma_unmap_xfer(master, xfer);
-
-               if (xfer->delay_usecs)
-                       udelay(xfer->delay_usecs);
-
-               if (atmel_spi_xfer_is_last(msg, xfer) || as->done_status < 0) {
-                       /* report completed (or erroneous) message */
-                       atmel_spi_msg_done(master, as, msg, xfer->cs_change);
-               } else {
-                       if (xfer->cs_change) {
-                               cs_deactivate(as, msg->spi);
-                               udelay(1);
-                               cs_activate(as, msg->spi);
-                       }
-
-                       /*
-                        * Not done yet. Submit the next transfer.
-                        *
-                        * FIXME handle protocol options for xfer
-                        */
-                       atmel_spi_dma_next_xfer(master, msg);
-               }
-       } else {
-               /*
-                * Keep going, we still have data to send in
-                * the current transfer.
-                */
-               atmel_spi_dma_next_xfer(master, msg);
-       }
-
-tasklet_out:
-       atmel_spi_unlock(as);
 }
 
 /* Interrupt
  *
  * No need for locking in this Interrupt handler: done_status is the
- * only information modified. What we need is the update of this field
- * before tasklet runs. This is ensured by using barrier.
+ * only information modified.
  */
 static irqreturn_t
 atmel_spi_pio_interrupt(int irq, void *dev_id)
@@ -1107,8 +913,6 @@ atmel_spi_pio_interrupt(int irq, void *dev_id)
                 *
                 * We will also not process any remaning transfers in
                 * the message.
-                *
-                * All actions are done in tasklet with done_status indication
                 */
                as->done_status = -EIO;
                smp_wmb();
@@ -1116,7 +920,7 @@ atmel_spi_pio_interrupt(int irq, void *dev_id)
                /* Clear any overrun happening while cleaning up */
                spi_readl(as, SR);
 
-               tasklet_schedule(&as->tasklet);
+               complete(&as->xfer_completion);
 
        } else if (pending & SPI_BIT(RDRF)) {
                atmel_spi_lock(as);
@@ -1125,11 +929,10 @@ atmel_spi_pio_interrupt(int irq, void *dev_id)
                        ret = IRQ_HANDLED;
                        xfer = as->current_transfer;
                        atmel_spi_pump_pio_data(as, xfer);
-                       if (!as->current_remaining_bytes) {
-                               /* no more data to xfer, kick tasklet */
+                       if (!as->current_remaining_bytes)
                                spi_writel(as, IDR, pending);
-                               tasklet_schedule(&as->tasklet);
-                       }
+
+                       complete(&as->xfer_completion);
                }
 
                atmel_spi_unlock(as);
@@ -1147,116 +950,35 @@ atmel_spi_pdc_interrupt(int irq, void *dev_id)
 {
        struct spi_master       *master = dev_id;
        struct atmel_spi        *as = spi_master_get_devdata(master);
-       struct spi_message      *msg;
-       struct spi_transfer     *xfer;
        u32                     status, pending, imr;
        int                     ret = IRQ_NONE;
 
-       atmel_spi_lock(as);
-
-       xfer = as->current_transfer;
-       msg = list_entry(as->queue.next, struct spi_message, queue);
-
        imr = spi_readl(as, IMR);
        status = spi_readl(as, SR);
        pending = status & imr;
 
        if (pending & SPI_BIT(OVRES)) {
-               int timeout;
 
                ret = IRQ_HANDLED;
 
                spi_writel(as, IDR, (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX)
                                     | SPI_BIT(OVRES)));
 
-               /*
-                * When we get an overrun, we disregard the current
-                * transfer. Data will not be copied back from any
-                * bounce buffer and msg->actual_len will not be
-                * updated with the last xfer.
-                *
-                * We will also not process any remaning transfers in
-                * the message.
-                *
-                * First, stop the transfer and unmap the DMA buffers.
-                */
-               spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
-               if (!msg->is_dma_mapped)
-                       atmel_spi_dma_unmap_xfer(master, xfer);
-
-               /* REVISIT: udelay in irq is unfriendly */
-               if (xfer->delay_usecs)
-                       udelay(xfer->delay_usecs);
-
-               dev_warn(master->dev.parent, "overrun (%u/%u remaining)\n",
-                        spi_readl(as, TCR), spi_readl(as, RCR));
-
-               /*
-                * Clean up DMA registers and make sure the data
-                * registers are empty.
-                */
-               spi_writel(as, RNCR, 0);
-               spi_writel(as, TNCR, 0);
-               spi_writel(as, RCR, 0);
-               spi_writel(as, TCR, 0);
-               for (timeout = 1000; timeout; timeout--)
-                       if (spi_readl(as, SR) & SPI_BIT(TXEMPTY))
-                               break;
-               if (!timeout)
-                       dev_warn(master->dev.parent,
-                                "timeout waiting for TXEMPTY");
-               while (spi_readl(as, SR) & SPI_BIT(RDRF))
-                       spi_readl(as, RDR);
-
                /* Clear any overrun happening while cleaning up */
                spi_readl(as, SR);
 
                as->done_status = -EIO;
-               atmel_spi_msg_done(master, as, msg, 0);
+
+               complete(&as->xfer_completion);
+
        } else if (pending & (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX))) {
                ret = IRQ_HANDLED;
 
                spi_writel(as, IDR, pending);
 
-               if (as->current_remaining_bytes == 0) {
-                       msg->actual_length += xfer->len;
-
-                       if (!msg->is_dma_mapped)
-                               atmel_spi_dma_unmap_xfer(master, xfer);
-
-                       /* REVISIT: udelay in irq is unfriendly */
-                       if (xfer->delay_usecs)
-                               udelay(xfer->delay_usecs);
-
-                       if (atmel_spi_xfer_is_last(msg, xfer)) {
-                               /* report completed message */
-                               atmel_spi_msg_done(master, as, msg,
-                                               xfer->cs_change);
-                       } else {
-                               if (xfer->cs_change) {
-                                       cs_deactivate(as, msg->spi);
-                                       udelay(1);
-                                       cs_activate(as, msg->spi);
-                               }
-
-                               /*
-                                * Not done yet. Submit the next transfer.
-                                *
-                                * FIXME handle protocol options for xfer
-                                */
-                               atmel_spi_pdc_next_xfer(master, msg);
-                       }
-               } else {
-                       /*
-                        * Keep going, we still have data to send in
-                        * the current transfer.
-                        */
-                       atmel_spi_pdc_next_xfer(master, msg);
-               }
+               complete(&as->xfer_completion);
        }
 
-       atmel_spi_unlock(as);
-
        return ret;
 }
 
@@ -1264,17 +986,13 @@ static int atmel_spi_setup(struct spi_device *spi)
 {
        struct atmel_spi        *as;
        struct atmel_spi_device *asd;
-       u32                     scbr, csr;
+       u32                     csr;
        unsigned int            bits = spi->bits_per_word;
-       unsigned long           bus_hz;
        unsigned int            npcs_pin;
        int                     ret;
 
        as = spi_master_get_devdata(spi->master);
 
-       if (as->stopping)
-               return -ESHUTDOWN;
-
        if (spi->chip_select > spi->master->num_chipselect) {
                dev_dbg(&spi->dev,
                                "setup: invalid chipselect %u (%u defined)\n",
@@ -1290,33 +1008,7 @@ static int atmel_spi_setup(struct spi_device *spi)
                return -EINVAL;
        }
 
-       /* v1 chips start out at half the peripheral bus speed. */
-       bus_hz = clk_get_rate(as->clk);
-       if (!atmel_spi_is_v2(as))
-               bus_hz /= 2;
-
-       if (spi->max_speed_hz) {
-               /*
-                * Calculate the lowest divider that satisfies the
-                * constraint, assuming div32/fdiv/mbz == 0.
-                */
-               scbr = DIV_ROUND_UP(bus_hz, spi->max_speed_hz);
-
-               /*
-                * If the resulting divider doesn't fit into the
-                * register bitfield, we can't satisfy the constraint.
-                */
-               if (scbr >= (1 << SPI_SCBR_SIZE)) {
-                       dev_dbg(&spi->dev,
-                               "setup: %d Hz too slow, scbr %u; min %ld Hz\n",
-                               spi->max_speed_hz, scbr, bus_hz/255);
-                       return -EINVAL;
-               }
-       } else
-               /* speed zero means "as slow as possible" */
-               scbr = 0xff;
-
-       csr = SPI_BF(SCBR, scbr) | SPI_BF(BITS, bits - 8);
+       csr = SPI_BF(BITS, bits - 8);
        if (spi->mode & SPI_CPOL)
                csr |= SPI_BIT(CPOL);
        if (!(spi->mode & SPI_CPHA))
@@ -1352,19 +1044,13 @@ static int atmel_spi_setup(struct spi_device *spi)
                asd->npcs_pin = npcs_pin;
                spi->controller_state = asd;
                gpio_direction_output(npcs_pin, !(spi->mode & SPI_CS_HIGH));
-       } else {
-               atmel_spi_lock(as);
-               if (as->stay == spi)
-                       as->stay = NULL;
-               cs_deactivate(as, spi);
-               atmel_spi_unlock(as);
        }
 
        asd->csr = csr;
 
        dev_dbg(&spi->dev,
-               "setup: %lu Hz bpw %u mode 0x%x -> csr%d %08x\n",
-               bus_hz / scbr, bits, spi->mode, spi->chip_select, csr);
+               "setup: bpw %u mode 0x%x -> csr%d %08x\n",
+               bits, spi->mode, spi->chip_select, csr);
 
        if (!atmel_spi_is_v2(as))
                spi_writel(as, CSR0 + 4 * spi->chip_select, csr);
@@ -1372,103 +1058,218 @@ static int atmel_spi_setup(struct spi_device *spi)
        return 0;
 }
 
-static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
+static int atmel_spi_one_transfer(struct spi_master *master,
+                                       struct spi_message *msg,
+                                       struct spi_transfer *xfer)
 {
        struct atmel_spi        *as;
-       struct spi_transfer     *xfer;
-       struct device           *controller = spi->master->dev.parent;
+       struct spi_device       *spi = msg->spi;
        u8                      bits;
+       u32                     len;
        struct atmel_spi_device *asd;
+       int                     timeout;
+       int                     ret;
 
-       as = spi_master_get_devdata(spi->master);
-
-       dev_dbg(controller, "new message %p submitted for %s\n",
-                       msg, dev_name(&spi->dev));
+       as = spi_master_get_devdata(master);
 
-       if (unlikely(list_empty(&msg->transfers)))
+       if (!(xfer->tx_buf || xfer->rx_buf) && xfer->len) {
+               dev_dbg(&spi->dev, "missing rx or tx buf\n");
                return -EINVAL;
+       }
 
-       if (as->stopping)
-               return -ESHUTDOWN;
+       if (xfer->bits_per_word) {
+               asd = spi->controller_state;
+               bits = (asd->csr >> 4) & 0xf;
+               if (bits != xfer->bits_per_word - 8) {
+                       dev_dbg(&spi->dev,
+                       "you can't yet change bits_per_word in transfers\n");
+                       return -ENOPROTOOPT;
+               }
+       }
 
-       list_for_each_entry(xfer, &msg->transfers, transfer_list) {
-               if (!(xfer->tx_buf || xfer->rx_buf) && xfer->len) {
-                       dev_dbg(&spi->dev, "missing rx or tx buf\n");
+       if (xfer->bits_per_word > 8) {
+               if (xfer->len % 2) {
+                       dev_dbg(&spi->dev,
+                       "buffer len should be 16 bits aligned\n");
                        return -EINVAL;
                }
+       }
 
-               if (xfer->bits_per_word) {
-                       asd = spi->controller_state;
-                       bits = (asd->csr >> 4) & 0xf;
-                       if (bits != xfer->bits_per_word - 8) {
-                               dev_dbg(&spi->dev,
-                                       "you can't yet change bits_per_word in transfers\n");
-                               return -ENOPROTOOPT;
+       /*
+        * DMA map early, for performance (empties dcache ASAP) and
+        * better fault reporting.
+        */
+       if ((!msg->is_dma_mapped)
+               && (atmel_spi_use_dma(as, xfer) || as->use_pdc)) {
+               if (atmel_spi_dma_map_xfer(as, xfer) < 0)
+                       return -ENOMEM;
+       }
+
+       atmel_spi_set_xfer_speed(as, msg->spi, xfer);
+
+       as->done_status = 0;
+       as->current_transfer = xfer;
+       as->current_remaining_bytes = xfer->len;
+       while (as->current_remaining_bytes) {
+               reinit_completion(&as->xfer_completion);
+
+               if (as->use_pdc) {
+                       atmel_spi_pdc_next_xfer(master, msg, xfer);
+               } else if (atmel_spi_use_dma(as, xfer)) {
+                       len = as->current_remaining_bytes;
+                       ret = atmel_spi_next_xfer_dma_submit(master,
+                                                               xfer, &len);
+                       if (ret) {
+                               dev_err(&spi->dev,
+                                       "unable to use DMA, fallback to PIO\n");
+                               atmel_spi_next_xfer_pio(master, xfer);
+                       } else {
+                               as->current_remaining_bytes -= len;
                        }
+               } else {
+                       atmel_spi_next_xfer_pio(master, xfer);
                }
 
-               if (xfer->bits_per_word > 8) {
-                       if (xfer->len % 2) {
-                               dev_dbg(&spi->dev, "buffer len should be 16 bits aligned\n");
-                               return -EINVAL;
-                       }
+               ret = wait_for_completion_timeout(&as->xfer_completion,
+                                                       SPI_DMA_TIMEOUT);
+               if (WARN_ON(ret == 0)) {
+                       dev_err(&spi->dev,
+                               "spi trasfer timeout, err %d\n", ret);
+                       as->done_status = -EIO;
+               } else {
+                       ret = 0;
                }
 
-               /* FIXME implement these protocol options!! */
-               if (xfer->speed_hz < spi->max_speed_hz) {
-                       dev_dbg(&spi->dev, "can't change speed in transfer\n");
-                       return -ENOPROTOOPT;
+               if (as->done_status)
+                       break;
+       }
+
+       if (as->done_status) {
+               if (as->use_pdc) {
+                       dev_warn(master->dev.parent,
+                               "overrun (%u/%u remaining)\n",
+                               spi_readl(as, TCR), spi_readl(as, RCR));
+
+                       /*
+                        * Clean up DMA registers and make sure the data
+                        * registers are empty.
+                        */
+                       spi_writel(as, RNCR, 0);
+                       spi_writel(as, TNCR, 0);
+                       spi_writel(as, RCR, 0);
+                       spi_writel(as, TCR, 0);
+                       for (timeout = 1000; timeout; timeout--)
+                               if (spi_readl(as, SR) & SPI_BIT(TXEMPTY))
+                                       break;
+                       if (!timeout)
+                               dev_warn(master->dev.parent,
+                                        "timeout waiting for TXEMPTY");
+                       while (spi_readl(as, SR) & SPI_BIT(RDRF))
+                               spi_readl(as, RDR);
+
+                       /* Clear any overrun happening while cleaning up */
+                       spi_readl(as, SR);
+
+               } else if (atmel_spi_use_dma(as, xfer)) {
+                       atmel_spi_stop_dma(as);
                }
 
-               /*
-                * DMA map early, for performance (empties dcache ASAP) and
-                * better fault reporting.
-                */
-               if ((!msg->is_dma_mapped) && (atmel_spi_use_dma(as, xfer)
-                       || as->use_pdc)) {
-                       if (atmel_spi_dma_map_xfer(as, xfer) < 0)
-                               return -ENOMEM;
+               if (!msg->is_dma_mapped
+                       && (atmel_spi_use_dma(as, xfer) || as->use_pdc))
+                       atmel_spi_dma_unmap_xfer(master, xfer);
+
+               return 0;
+
+       } else {
+               /* only update length if no error */
+               msg->actual_length += xfer->len;
+       }
+
+       if (!msg->is_dma_mapped
+               && (atmel_spi_use_dma(as, xfer) || as->use_pdc))
+               atmel_spi_dma_unmap_xfer(master, xfer);
+
+       if (xfer->delay_usecs)
+               udelay(xfer->delay_usecs);
+
+       if (xfer->cs_change) {
+               if (list_is_last(&xfer->transfer_list,
+                                &msg->transfers)) {
+                       as->keep_cs = true;
+               } else {
+                       as->cs_active = !as->cs_active;
+                       if (as->cs_active)
+                               cs_activate(as, msg->spi);
+                       else
+                               cs_deactivate(as, msg->spi);
                }
        }
 
-#ifdef VERBOSE
+       return 0;
+}
+
+static int atmel_spi_transfer_one_message(struct spi_master *master,
+                                               struct spi_message *msg)
+{
+       struct atmel_spi *as;
+       struct spi_transfer *xfer;
+       struct spi_device *spi = msg->spi;
+       int ret = 0;
+
+       as = spi_master_get_devdata(master);
+
+       dev_dbg(&spi->dev, "new message %p submitted for %s\n",
+                                       msg, dev_name(&spi->dev));
+
+       if (unlikely(list_empty(&msg->transfers)))
+               return -EINVAL;
+
+       atmel_spi_lock(as);
+       cs_activate(as, spi);
+
+       as->cs_active = true;
+       as->keep_cs = false;
+
+       msg->status = 0;
+       msg->actual_length = 0;
+
        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
-               dev_dbg(controller,
+               ret = atmel_spi_one_transfer(master, msg, xfer);
+               if (ret)
+                       goto msg_done;
+       }
+
+       if (as->use_pdc)
+               atmel_spi_disable_pdc_transfer(as);
+
+       list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+               dev_dbg(&spi->dev,
                        "  xfer %p: len %u tx %p/%08x rx %p/%08x\n",
                        xfer, xfer->len,
                        xfer->tx_buf, xfer->tx_dma,
                        xfer->rx_buf, xfer->rx_dma);
        }
-#endif
 
-       msg->status = -EINPROGRESS;
-       msg->actual_length = 0;
+msg_done:
+       if (!as->keep_cs)
+               cs_deactivate(as, msg->spi);
 
-       atmel_spi_lock(as);
-       list_add_tail(&msg->queue, &as->queue);
-       if (!as->current_transfer)
-               atmel_spi_next_message(spi->master);
        atmel_spi_unlock(as);
 
-       return 0;
+       msg->status = as->done_status;
+       spi_finalize_current_message(spi->master);
+
+       return ret;
 }
 
 static void atmel_spi_cleanup(struct spi_device *spi)
 {
-       struct atmel_spi        *as = spi_master_get_devdata(spi->master);
        struct atmel_spi_device *asd = spi->controller_state;
        unsigned                gpio = (unsigned) spi->controller_data;
 
        if (!asd)
                return;
 
-       atmel_spi_lock(as);
-       if (as->stay == spi) {
-               as->stay = NULL;
-               cs_deactivate(as, spi);
-       }
-       atmel_spi_unlock(as);
-
        spi->controller_state = NULL;
        gpio_free(gpio);
        kfree(asd);
@@ -1510,7 +1311,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
        if (irq < 0)
                return irq;
 
-       clk = clk_get(&pdev->dev, "spi_clk");
+       clk = devm_clk_get(&pdev->dev, "spi_clk");
        if (IS_ERR(clk))
                return PTR_ERR(clk);
 
@@ -1527,7 +1328,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
        master->bus_num = pdev->id;
        master->num_chipselect = master->dev.of_node ? 0 : 4;
        master->setup = atmel_spi_setup;
-       master->transfer = atmel_spi_transfer;
+       master->transfer_one_message = atmel_spi_transfer_one_message;
        master->cleanup = atmel_spi_cleanup;
        platform_set_drvdata(pdev, master);
 
@@ -1543,7 +1344,6 @@ static int atmel_spi_probe(struct platform_device *pdev)
                goto out_free;
 
        spin_lock_init(&as->lock);
-       INIT_LIST_HEAD(&as->queue);
 
        as->pdev = pdev;
        as->regs = devm_ioremap_resource(&pdev->dev, regs);
@@ -1555,6 +1355,8 @@ static int atmel_spi_probe(struct platform_device *pdev)
        as->irq = irq;
        as->clk = clk;
 
+       init_completion(&as->xfer_completion);
+
        atmel_get_caps(as);
 
        as->use_dma = false;
@@ -1570,14 +1372,11 @@ static int atmel_spi_probe(struct platform_device *pdev)
                dev_info(&pdev->dev, "Atmel SPI Controller using PIO only\n");
 
        if (as->use_pdc) {
-               ret = request_irq(irq, atmel_spi_pdc_interrupt, 0,
-                                       dev_name(&pdev->dev), master);
+               ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pdc_interrupt,
+                                       0, dev_name(&pdev->dev), master);
        } else {
-               tasklet_init(&as->tasklet, atmel_spi_tasklet_func,
-                                       (unsigned long)master);
-
-               ret = request_irq(irq, atmel_spi_pio_interrupt, 0,
-                                       dev_name(&pdev->dev), master);
+               ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pio_interrupt,
+                                       0, dev_name(&pdev->dev), master);
        }
        if (ret)
                goto out_unmap_regs;
@@ -1603,7 +1402,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
        dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n",
                        (unsigned long)regs->start, irq);
 
-       ret = spi_register_master(master);
+       ret = devm_spi_register_master(&pdev->dev, master);
        if (ret)
                goto out_free_dma;
 
@@ -1617,15 +1416,11 @@ out_free_dma:
        spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
        clk_disable_unprepare(clk);
 out_free_irq:
-       free_irq(irq, master);
 out_unmap_regs:
 out_free_buffer:
-       if (!as->use_pdc)
-               tasklet_kill(&as->tasklet);
        dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer,
                        as->buffer_dma);
 out_free:
-       clk_put(clk);
        spi_master_put(master);
        return ret;
 }
@@ -1634,12 +1429,9 @@ static int atmel_spi_remove(struct platform_device *pdev)
 {
        struct spi_master       *master = platform_get_drvdata(pdev);
        struct atmel_spi        *as = spi_master_get_devdata(master);
-       struct spi_message      *msg;
-       struct spi_transfer     *xfer;
 
        /* reset the hardware and block queue progress */
        spin_lock_irq(&as->lock);
-       as->stopping = 1;
        if (as->use_dma) {
                atmel_spi_stop_dma(as);
                atmel_spi_release_dma(as);
@@ -1650,28 +1442,10 @@ static int atmel_spi_remove(struct platform_device *pdev)
        spi_readl(as, SR);
        spin_unlock_irq(&as->lock);
 
-       /* Terminate remaining queued transfers */
-       list_for_each_entry(msg, &as->queue, queue) {
-               list_for_each_entry(xfer, &msg->transfers, transfer_list) {
-                       if (!msg->is_dma_mapped
-                               && (atmel_spi_use_dma(as, xfer)
-                                       || as->use_pdc))
-                               atmel_spi_dma_unmap_xfer(master, xfer);
-               }
-               msg->status = -ESHUTDOWN;
-               msg->complete(msg->context);
-       }
-
-       if (!as->use_pdc)
-               tasklet_kill(&as->tasklet);
        dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer,
                        as->buffer_dma);
 
        clk_disable_unprepare(as->clk);
-       clk_put(as->clk);
-       free_irq(as->irq, master);
-
-       spi_unregister_master(master);
 
        return 0;
 }
index 9025edd7dc45fde131c7c9c6004ce5434829c785..8a89dd1f265427da19089824d284432451c7537c 100644 (file)
@@ -347,8 +347,8 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
 
        clk_prepare_enable(bs->clk);
 
-       err = request_irq(bs->irq, bcm2835_spi_interrupt, 0,
-                       dev_name(&pdev->dev), master);
+       err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, 0,
+                               dev_name(&pdev->dev), master);
        if (err) {
                dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
                goto out_clk_disable;
@@ -361,13 +361,11 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
        err = devm_spi_register_master(&pdev->dev, master);
        if (err) {
                dev_err(&pdev->dev, "could not register SPI master: %d\n", err);
-               goto out_free_irq;
+               goto out_clk_disable;
        }
 
        return 0;
 
-out_free_irq:
-       free_irq(bs->irq, master);
 out_clk_disable:
        clk_disable_unprepare(bs->clk);
 out_master_put:
@@ -380,8 +378,6 @@ static int bcm2835_spi_remove(struct platform_device *pdev)
        struct spi_master *master = platform_get_drvdata(pdev);
        struct bcm2835_spi *bs = spi_master_get_devdata(master);
 
-       free_irq(bs->irq, master);
-
        /* Clear FIFOs, and disable the HW block */
        bcm2835_wr(bs, BCM2835_SPI_CS,
                   BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
new file mode 100644 (file)
index 0000000..b528f9f
--- /dev/null
@@ -0,0 +1,475 @@
+/*
+ * Broadcom BCM63XX High Speed SPI Controller driver
+ *
+ * Copyright 2000-2010 Broadcom Corporation
+ * Copyright 2012-2013 Jonas Gorski <jogo@openwrt.org>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+
+#define HSSPI_GLOBAL_CTRL_REG                  0x0
+#define GLOBAL_CTRL_CS_POLARITY_SHIFT          0
+#define GLOBAL_CTRL_CS_POLARITY_MASK           0x000000ff
+#define GLOBAL_CTRL_PLL_CLK_CTRL_SHIFT         8
+#define GLOBAL_CTRL_PLL_CLK_CTRL_MASK          0x0000ff00
+#define GLOBAL_CTRL_CLK_GATE_SSOFF             BIT(16)
+#define GLOBAL_CTRL_CLK_POLARITY               BIT(17)
+#define GLOBAL_CTRL_MOSI_IDLE                  BIT(18)
+
+#define HSSPI_GLOBAL_EXT_TRIGGER_REG           0x4
+
+#define HSSPI_INT_STATUS_REG                   0x8
+#define HSSPI_INT_STATUS_MASKED_REG            0xc
+#define HSSPI_INT_MASK_REG                     0x10
+
+#define HSSPI_PINGx_CMD_DONE(i)                        BIT((i * 8) + 0)
+#define HSSPI_PINGx_RX_OVER(i)                 BIT((i * 8) + 1)
+#define HSSPI_PINGx_TX_UNDER(i)                        BIT((i * 8) + 2)
+#define HSSPI_PINGx_POLL_TIMEOUT(i)            BIT((i * 8) + 3)
+#define HSSPI_PINGx_CTRL_INVAL(i)              BIT((i * 8) + 4)
+
+#define HSSPI_INT_CLEAR_ALL                    0xff001f1f
+
+#define HSSPI_PINGPONG_COMMAND_REG(x)          (0x80 + (x) * 0x40)
+#define PINGPONG_CMD_COMMAND_MASK              0xf
+#define PINGPONG_COMMAND_NOOP                  0
+#define PINGPONG_COMMAND_START_NOW             1
+#define PINGPONG_COMMAND_START_TRIGGER         2
+#define PINGPONG_COMMAND_HALT                  3
+#define PINGPONG_COMMAND_FLUSH                 4
+#define PINGPONG_CMD_PROFILE_SHIFT             8
+#define PINGPONG_CMD_SS_SHIFT                  12
+
+#define HSSPI_PINGPONG_STATUS_REG(x)           (0x84 + (x) * 0x40)
+
+#define HSSPI_PROFILE_CLK_CTRL_REG(x)          (0x100 + (x) * 0x20)
+#define CLK_CTRL_FREQ_CTRL_MASK                        0x0000ffff
+#define CLK_CTRL_SPI_CLK_2X_SEL                        BIT(14)
+#define CLK_CTRL_ACCUM_RST_ON_LOOP             BIT(15)
+
+#define HSSPI_PROFILE_SIGNAL_CTRL_REG(x)       (0x104 + (x) * 0x20)
+#define SIGNAL_CTRL_LATCH_RISING               BIT(12)
+#define SIGNAL_CTRL_LAUNCH_RISING              BIT(13)
+#define SIGNAL_CTRL_ASYNC_INPUT_PATH           BIT(16)
+
+#define HSSPI_PROFILE_MODE_CTRL_REG(x)         (0x108 + (x) * 0x20)
+#define MODE_CTRL_MULTIDATA_RD_STRT_SHIFT      8
+#define MODE_CTRL_MULTIDATA_WR_STRT_SHIFT      12
+#define MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT      16
+#define MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT      18
+#define MODE_CTRL_MODE_3WIRE                   BIT(20)
+#define MODE_CTRL_PREPENDBYTE_CNT_SHIFT                24
+
+#define HSSPI_FIFO_REG(x)                      (0x200 + (x) * 0x200)
+
+
+#define HSSPI_OP_CODE_SHIFT                    13
+#define HSSPI_OP_SLEEP                         (0 << HSSPI_OP_CODE_SHIFT)
+#define HSSPI_OP_READ_WRITE                    (1 << HSSPI_OP_CODE_SHIFT)
+#define HSSPI_OP_WRITE                         (2 << HSSPI_OP_CODE_SHIFT)
+#define HSSPI_OP_READ                          (3 << HSSPI_OP_CODE_SHIFT)
+#define HSSPI_OP_SETIRQ                                (4 << HSSPI_OP_CODE_SHIFT)
+
+#define HSSPI_BUFFER_LEN                       512
+#define HSSPI_OPCODE_LEN                       2
+
+#define HSSPI_MAX_PREPEND_LEN                  15
+
+#define HSSPI_MAX_SYNC_CLOCK                   30000000
+
+#define HSSPI_BUS_NUM                          1 /* 0 is legacy SPI */
+
+struct bcm63xx_hsspi {
+       struct completion done;
+       struct mutex bus_mutex;
+
+       struct platform_device *pdev;
+       struct clk *clk;
+       void __iomem *regs;
+       u8 __iomem *fifo;
+
+       u32 speed_hz;
+       u8 cs_polarity;
+};
+
+static void bcm63xx_hsspi_set_cs(struct bcm63xx_hsspi *bs, unsigned cs,
+                                bool active)
+{
+       u32 reg;
+
+       mutex_lock(&bs->bus_mutex);
+       reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+
+       reg &= ~BIT(cs);
+       if (active == !(bs->cs_polarity & BIT(cs)))
+               reg |= BIT(cs);
+
+       __raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
+       mutex_unlock(&bs->bus_mutex);
+}
+
+static void bcm63xx_hsspi_set_clk(struct bcm63xx_hsspi *bs,
+                                 struct spi_device *spi, int hz)
+{
+       unsigned profile = spi->chip_select;
+       u32 reg;
+
+       reg = DIV_ROUND_UP(2048, DIV_ROUND_UP(bs->speed_hz, hz));
+       __raw_writel(CLK_CTRL_ACCUM_RST_ON_LOOP | reg,
+                    bs->regs + HSSPI_PROFILE_CLK_CTRL_REG(profile));
+
+       reg = __raw_readl(bs->regs + HSSPI_PROFILE_SIGNAL_CTRL_REG(profile));
+       if (hz > HSSPI_MAX_SYNC_CLOCK)
+               reg |= SIGNAL_CTRL_ASYNC_INPUT_PATH;
+       else
+               reg &= ~SIGNAL_CTRL_ASYNC_INPUT_PATH;
+       __raw_writel(reg, bs->regs + HSSPI_PROFILE_SIGNAL_CTRL_REG(profile));
+
+       mutex_lock(&bs->bus_mutex);
+       /* setup clock polarity */
+       reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+       reg &= ~GLOBAL_CTRL_CLK_POLARITY;
+       if (spi->mode & SPI_CPOL)
+               reg |= GLOBAL_CTRL_CLK_POLARITY;
+       __raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
+       mutex_unlock(&bs->bus_mutex);
+}
+
+static int bcm63xx_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t)
+{
+       struct bcm63xx_hsspi *bs = spi_master_get_devdata(spi->master);
+       unsigned chip_select = spi->chip_select;
+       u16 opcode = 0;
+       int pending = t->len;
+       int step_size = HSSPI_BUFFER_LEN;
+       const u8 *tx = t->tx_buf;
+       u8 *rx = t->rx_buf;
+
+       bcm63xx_hsspi_set_clk(bs, spi, t->speed_hz);
+       bcm63xx_hsspi_set_cs(bs, spi->chip_select, true);
+
+       if (tx && rx)
+               opcode = HSSPI_OP_READ_WRITE;
+       else if (tx)
+               opcode = HSSPI_OP_WRITE;
+       else if (rx)
+               opcode = HSSPI_OP_READ;
+
+       if (opcode != HSSPI_OP_READ)
+               step_size -= HSSPI_OPCODE_LEN;
+
+       __raw_writel(0 << MODE_CTRL_PREPENDBYTE_CNT_SHIFT |
+                    2 << MODE_CTRL_MULTIDATA_WR_STRT_SHIFT |
+                    2 << MODE_CTRL_MULTIDATA_RD_STRT_SHIFT | 0xff,
+                    bs->regs + HSSPI_PROFILE_MODE_CTRL_REG(chip_select));
+
+       while (pending > 0) {
+               int curr_step = min_t(int, step_size, pending);
+
+               init_completion(&bs->done);
+               if (tx) {
+                       memcpy_toio(bs->fifo + HSSPI_OPCODE_LEN, tx, curr_step);
+                       tx += curr_step;
+               }
+
+               __raw_writew(opcode | curr_step, bs->fifo);
+
+               /* enable interrupt */
+               __raw_writel(HSSPI_PINGx_CMD_DONE(0),
+                            bs->regs + HSSPI_INT_MASK_REG);
+
+               /* start the transfer */
+               __raw_writel(!chip_select << PINGPONG_CMD_SS_SHIFT |
+                            chip_select << PINGPONG_CMD_PROFILE_SHIFT |
+                            PINGPONG_COMMAND_START_NOW,
+                            bs->regs + HSSPI_PINGPONG_COMMAND_REG(0));
+
+               if (wait_for_completion_timeout(&bs->done, HZ) == 0) {
+                       dev_err(&bs->pdev->dev, "transfer timed out!\n");
+                       return -ETIMEDOUT;
+               }
+
+               if (rx) {
+                       memcpy_fromio(rx, bs->fifo, curr_step);
+                       rx += curr_step;
+               }
+
+               pending -= curr_step;
+       }
+
+       return 0;
+}
+
+static int bcm63xx_hsspi_setup(struct spi_device *spi)
+{
+       struct bcm63xx_hsspi *bs = spi_master_get_devdata(spi->master);
+       u32 reg;
+
+       reg = __raw_readl(bs->regs +
+                         HSSPI_PROFILE_SIGNAL_CTRL_REG(spi->chip_select));
+       reg &= ~(SIGNAL_CTRL_LAUNCH_RISING | SIGNAL_CTRL_LATCH_RISING);
+       if (spi->mode & SPI_CPHA)
+               reg |= SIGNAL_CTRL_LAUNCH_RISING;
+       else
+               reg |= SIGNAL_CTRL_LATCH_RISING;
+       __raw_writel(reg, bs->regs +
+                    HSSPI_PROFILE_SIGNAL_CTRL_REG(spi->chip_select));
+
+       mutex_lock(&bs->bus_mutex);
+       reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+
+       /* only change actual polarities if there is no transfer */
+       if ((reg & GLOBAL_CTRL_CS_POLARITY_MASK) == bs->cs_polarity) {
+               if (spi->mode & SPI_CS_HIGH)
+                       reg |= BIT(spi->chip_select);
+               else
+                       reg &= ~BIT(spi->chip_select);
+               __raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
+       }
+
+       if (spi->mode & SPI_CS_HIGH)
+               bs->cs_polarity |= BIT(spi->chip_select);
+       else
+               bs->cs_polarity &= ~BIT(spi->chip_select);
+
+       mutex_unlock(&bs->bus_mutex);
+
+       return 0;
+}
+
+static int bcm63xx_hsspi_transfer_one(struct spi_master *master,
+                                     struct spi_message *msg)
+{
+       struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+       struct spi_transfer *t;
+       struct spi_device *spi = msg->spi;
+       int status = -EINVAL;
+       int dummy_cs;
+       u32 reg;
+
+       /* This controller does not support keeping CS active during idle.
+        * To work around this, we use the following ugly hack:
+        *
+        * a. Invert the target chip select's polarity so it will be active.
+        * b. Select a "dummy" chip select to use as the hardware target.
+        * c. Invert the dummy chip select's polarity so it will be inactive
+        *    during the actual transfers.
+        * d. Tell the hardware to send to the dummy chip select. Thanks to
+        *    the multiplexed nature of SPI the actual target will receive
+        *    the transfer and we see its response.
+        *
+        * e. At the end restore the polarities again to their default values.
+        */
+
+       dummy_cs = !spi->chip_select;
+       bcm63xx_hsspi_set_cs(bs, dummy_cs, true);
+
+       list_for_each_entry(t, &msg->transfers, transfer_list) {
+               status = bcm63xx_hsspi_do_txrx(spi, t);
+               if (status)
+                       break;
+
+               msg->actual_length += t->len;
+
+               if (t->delay_usecs)
+                       udelay(t->delay_usecs);
+
+               if (t->cs_change)
+                       bcm63xx_hsspi_set_cs(bs, spi->chip_select, false);
+       }
+
+       mutex_lock(&bs->bus_mutex);
+       reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+       reg &= ~GLOBAL_CTRL_CS_POLARITY_MASK;
+       reg |= bs->cs_polarity;
+       __raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
+       mutex_unlock(&bs->bus_mutex);
+
+       msg->status = status;
+       spi_finalize_current_message(master);
+
+       return 0;
+}
+
+static irqreturn_t bcm63xx_hsspi_interrupt(int irq, void *dev_id)
+{
+       struct bcm63xx_hsspi *bs = (struct bcm63xx_hsspi *)dev_id;
+
+       if (__raw_readl(bs->regs + HSSPI_INT_STATUS_MASKED_REG) == 0)
+               return IRQ_NONE;
+
+       __raw_writel(HSSPI_INT_CLEAR_ALL, bs->regs + HSSPI_INT_STATUS_REG);
+       __raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
+
+       complete(&bs->done);
+
+       return IRQ_HANDLED;
+}
+
+static int bcm63xx_hsspi_probe(struct platform_device *pdev)
+{
+       struct spi_master *master;
+       struct bcm63xx_hsspi *bs;
+       struct resource *res_mem;
+       void __iomem *regs;
+       struct device *dev = &pdev->dev;
+       struct clk *clk;
+       int irq, ret;
+       u32 reg, rate;
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_err(dev, "no irq\n");
+               return -ENXIO;
+       }
+
+       res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       regs = devm_ioremap_resource(dev, res_mem);
+       if (IS_ERR(regs))
+               return PTR_ERR(regs);
+
+       clk = devm_clk_get(dev, "hsspi");
+
+       if (IS_ERR(clk))
+               return PTR_ERR(clk);
+
+       rate = clk_get_rate(clk);
+       if (!rate)
+               return -EINVAL;
+
+       ret = clk_prepare_enable(clk);
+       if (ret)
+               return ret;
+
+       master = spi_alloc_master(&pdev->dev, sizeof(*bs));
+       if (!master) {
+               ret = -ENOMEM;
+               goto out_disable_clk;
+       }
+
+       bs = spi_master_get_devdata(master);
+       bs->pdev = pdev;
+       bs->clk = clk;
+       bs->regs = regs;
+       bs->speed_hz = rate;
+       bs->fifo = (u8 __iomem *)(bs->regs + HSSPI_FIFO_REG(0));
+
+       mutex_init(&bs->bus_mutex);
+
+       master->bus_num = HSSPI_BUS_NUM;
+       master->num_chipselect = 8;
+       master->setup = bcm63xx_hsspi_setup;
+       master->transfer_one_message = bcm63xx_hsspi_transfer_one;
+       master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+       master->bits_per_word_mask = SPI_BPW_MASK(8);
+       master->auto_runtime_pm = true;
+
+       platform_set_drvdata(pdev, master);
+
+       /* Initialize the hardware */
+       __raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
+
+       /* clean up any pending interrupts */
+       __raw_writel(HSSPI_INT_CLEAR_ALL, bs->regs + HSSPI_INT_STATUS_REG);
+
+       /* read out default CS polarities */
+       reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+       bs->cs_polarity = reg & GLOBAL_CTRL_CS_POLARITY_MASK;
+       __raw_writel(reg | GLOBAL_CTRL_CLK_GATE_SSOFF,
+                    bs->regs + HSSPI_GLOBAL_CTRL_REG);
+
+       ret = devm_request_irq(dev, irq, bcm63xx_hsspi_interrupt, IRQF_SHARED,
+                              pdev->name, bs);
+
+       if (ret)
+               goto out_put_master;
+
+       /* register and we are done */
+       ret = devm_spi_register_master(dev, master);
+       if (ret)
+               goto out_put_master;
+
+       return 0;
+
+out_put_master:
+       spi_master_put(master);
+out_disable_clk:
+       clk_disable_unprepare(clk);
+       return ret;
+}
+
+
+static int bcm63xx_hsspi_remove(struct platform_device *pdev)
+{
+       struct spi_master *master = platform_get_drvdata(pdev);
+       struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+
+       /* reset the hardware and block queue progress */
+       __raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
+       clk_disable_unprepare(bs->clk);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bcm63xx_hsspi_suspend(struct device *dev)
+{
+       struct spi_master *master = dev_get_drvdata(dev);
+       struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+
+       spi_master_suspend(master);
+       clk_disable_unprepare(bs->clk);
+
+       return 0;
+}
+
+static int bcm63xx_hsspi_resume(struct device *dev)
+{
+       struct spi_master *master = dev_get_drvdata(dev);
+       struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+       int ret;
+
+       ret = clk_prepare_enable(bs->clk);
+       if (ret)
+               return ret;
+
+       spi_master_resume(master);
+
+       return 0;
+}
+#endif
+
+static const struct dev_pm_ops bcm63xx_hsspi_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(bcm63xx_hsspi_suspend, bcm63xx_hsspi_resume)
+};
+
+static struct platform_driver bcm63xx_hsspi_driver = {
+       .driver = {
+               .name   = "bcm63xx-hsspi",
+               .owner  = THIS_MODULE,
+               .pm     = &bcm63xx_hsspi_pm_ops,
+       },
+       .probe          = bcm63xx_hsspi_probe,
+       .remove         = bcm63xx_hsspi_remove,
+};
+
+module_platform_driver(bcm63xx_hsspi_driver);
+
+MODULE_ALIAS("platform:bcm63xx_hsspi");
+MODULE_DESCRIPTION("Broadcom BCM63xx High Speed SPI Controller driver");
+MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>");
+MODULE_LICENSE("GPL");
index 469ecd8763581c3628c033e00adc47699e033f12..77286aef2adfc9e5495704405f4c76f45855b1a0 100644 (file)
@@ -169,8 +169,6 @@ static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *first,
                               transfer_list);
        }
 
-       len -= prepend_len;
-
        init_completion(&bs->done);
 
        /* Fill in the Message control register */
@@ -205,13 +203,7 @@ static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *first,
        if (!timeout)
                return -ETIMEDOUT;
 
-       /* read out all data */
-       rx_tail = bcm_spi_readb(bs, SPI_RX_TAIL);
-
-       if (do_rx && rx_tail != len)
-               return -EIO;
-
-       if (!rx_tail)
+       if (!do_rx)
                return 0;
 
        len = 0;
@@ -345,22 +337,19 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
                dev_err(dev, "no irq\n");
-               ret = -ENXIO;
-               goto out;
+               return -ENXIO;
        }
 
-       clk = clk_get(dev, "spi");
+       clk = devm_clk_get(dev, "spi");
        if (IS_ERR(clk)) {
                dev_err(dev, "no clock for device\n");
-               ret = PTR_ERR(clk);
-               goto out;
+               return PTR_ERR(clk);
        }
 
        master = spi_alloc_master(dev, sizeof(*bs));
        if (!master) {
                dev_err(dev, "out of memory\n");
-               ret = -ENOMEM;
-               goto out_clk;
+               return -ENOMEM;
        }
 
        bs = spi_master_get_devdata(master);
@@ -408,7 +397,10 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
        }
 
        /* Initialize hardware */
-       clk_prepare_enable(bs->clk);
+       ret = clk_prepare_enable(bs->clk);
+       if (ret)
+               goto out_err;
+
        bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS);
 
        /* register and we are done */
@@ -427,9 +419,6 @@ out_clk_disable:
        clk_disable_unprepare(clk);
 out_err:
        spi_master_put(master);
-out_clk:
-       clk_put(clk);
-out:
        return ret;
 }
 
@@ -443,12 +432,11 @@ static int bcm63xx_spi_remove(struct platform_device *pdev)
 
        /* HW shutdown */
        clk_disable_unprepare(bs->clk);
-       clk_put(bs->clk);
 
        return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int bcm63xx_spi_suspend(struct device *dev)
 {
        struct spi_master *master = dev_get_drvdata(dev);
@@ -465,29 +453,27 @@ static int bcm63xx_spi_resume(struct device *dev)
 {
        struct spi_master *master = dev_get_drvdata(dev);
        struct bcm63xx_spi *bs = spi_master_get_devdata(master);
+       int ret;
 
-       clk_prepare_enable(bs->clk);
+       ret = clk_prepare_enable(bs->clk);
+       if (ret)
+               return ret;
 
        spi_master_resume(master);
 
        return 0;
 }
+#endif
 
 static const struct dev_pm_ops bcm63xx_spi_pm_ops = {
-       .suspend        = bcm63xx_spi_suspend,
-       .resume         = bcm63xx_spi_resume,
+       SET_SYSTEM_SLEEP_PM_OPS(bcm63xx_spi_suspend, bcm63xx_spi_resume)
 };
 
-#define BCM63XX_SPI_PM_OPS     (&bcm63xx_spi_pm_ops)
-#else
-#define BCM63XX_SPI_PM_OPS     NULL
-#endif
-
 static struct platform_driver bcm63xx_spi_driver = {
        .driver = {
                .name   = "bcm63xx-spi",
                .owner  = THIS_MODULE,
-               .pm     = BCM63XX_SPI_PM_OPS,
+               .pm     = &bcm63xx_spi_pm_ops,
        },
        .probe          = bcm63xx_spi_probe,
        .remove         = bcm63xx_spi_remove,
index c16bf853c3eb3f3a0c7ab3c3c8731cc179d30a15..c616e41521be18589ec89ab68dcfeec9cd477da7 100644 (file)
@@ -38,7 +38,7 @@
  *
  * Since this is software, the timings may not be exactly what your board's
  * chips need ... there may be several reasons you'd need to tweak timings
- * in these routines, not just make to make it faster or slower to match a
+ * in these routines, not just to make it faster or slower to match a
  * particular CPU clock rate.
  */
 
index 6f03d7e6435d82d367d568bd478af2aed9394a98..374ba4a48a9e1450dba5c46c062c71a3de6f00df 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  CLPS711X SPI bus driver
  *
- *  Copyright (C) 2012 Alexander Shiyan <shc_work@mail.ru>
+ *  Copyright (C) 2012-2014 Alexander Shiyan <shc_work@mail.ru>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -198,7 +198,7 @@ static int spi_clps711x_probe(struct platform_device *pdev)
                        ret = -EINVAL;
                        goto err_out;
                }
-               if (gpio_request(hw->chipselect[i], DRIVER_NAME)) {
+               if (devm_gpio_request(&pdev->dev, hw->chipselect[i], NULL)) {
                        dev_err(&pdev->dev, "Can't get CS GPIO %i\n", i);
                        ret = -EINVAL;
                        goto err_out;
@@ -240,38 +240,21 @@ static int spi_clps711x_probe(struct platform_device *pdev)
        dev_err(&pdev->dev, "Failed to register master\n");
 
 err_out:
-       while (--i >= 0)
-               if (gpio_is_valid(hw->chipselect[i]))
-                       gpio_free(hw->chipselect[i]);
-
        spi_master_put(master);
 
        return ret;
 }
 
-static int spi_clps711x_remove(struct platform_device *pdev)
-{
-       int i;
-       struct spi_master *master = platform_get_drvdata(pdev);
-       struct spi_clps711x_data *hw = spi_master_get_devdata(master);
-
-       for (i = 0; i < master->num_chipselect; i++)
-               if (gpio_is_valid(hw->chipselect[i]))
-                       gpio_free(hw->chipselect[i]);
-
-       return 0;
-}
-
 static struct platform_driver clps711x_spi_driver = {
        .driver = {
                .name   = DRIVER_NAME,
                .owner  = THIS_MODULE,
        },
        .probe  = spi_clps711x_probe,
-       .remove = spi_clps711x_remove,
 };
 module_platform_driver(clps711x_spi_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
 MODULE_DESCRIPTION("CLPS711X SPI bus driver");
+MODULE_ALIAS("platform:" DRIVER_NAME);
index cc5b75d10c386145dad24ac595c5495e4b7564f0..cabed8f9119e1af30d80766420d548b94567c91c 100644 (file)
@@ -397,44 +397,31 @@ static int mcfqspi_probe(struct platform_device *pdev)
        mcfqspi = spi_master_get_devdata(master);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_dbg(&pdev->dev, "platform_get_resource failed\n");
-               status = -ENXIO;
+       mcfqspi->iobase = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(mcfqspi->iobase)) {
+               status = PTR_ERR(mcfqspi->iobase);
                goto fail0;
        }
 
-       if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
-               dev_dbg(&pdev->dev, "request_mem_region failed\n");
-               status = -EBUSY;
-               goto fail0;
-       }
-
-       mcfqspi->iobase = ioremap(res->start, resource_size(res));
-       if (!mcfqspi->iobase) {
-               dev_dbg(&pdev->dev, "ioremap failed\n");
-               status = -ENOMEM;
-               goto fail1;
-       }
-
        mcfqspi->irq = platform_get_irq(pdev, 0);
        if (mcfqspi->irq < 0) {
                dev_dbg(&pdev->dev, "platform_get_irq failed\n");
                status = -ENXIO;
-               goto fail2;
+               goto fail0;
        }
 
-       status = request_irq(mcfqspi->irq, mcfqspi_irq_handler, 0,
-                            pdev->name, mcfqspi);
+       status = devm_request_irq(&pdev->dev, mcfqspi->irq, mcfqspi_irq_handler,
+                               0, pdev->name, mcfqspi);
        if (status) {
                dev_dbg(&pdev->dev, "request_irq failed\n");
-               goto fail2;
+               goto fail0;
        }
 
-       mcfqspi->clk = clk_get(&pdev->dev, "qspi_clk");
+       mcfqspi->clk = devm_clk_get(&pdev->dev, "qspi_clk");
        if (IS_ERR(mcfqspi->clk)) {
                dev_dbg(&pdev->dev, "clk_get failed\n");
                status = PTR_ERR(mcfqspi->clk);
-               goto fail3;
+               goto fail0;
        }
        clk_enable(mcfqspi->clk);
 
@@ -445,7 +432,7 @@ static int mcfqspi_probe(struct platform_device *pdev)
        status = mcfqspi_cs_setup(mcfqspi);
        if (status) {
                dev_dbg(&pdev->dev, "error initializing cs_control\n");
-               goto fail4;
+               goto fail1;
        }
 
        init_waitqueue_head(&mcfqspi->waitq);
@@ -459,10 +446,10 @@ static int mcfqspi_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, master);
 
-       status = spi_register_master(master);
+       status = devm_spi_register_master(&pdev->dev, master);
        if (status) {
                dev_dbg(&pdev->dev, "spi_register_master failed\n");
-               goto fail5;
+               goto fail2;
        }
        pm_runtime_enable(mcfqspi->dev);
 
@@ -470,17 +457,10 @@ static int mcfqspi_probe(struct platform_device *pdev)
 
        return 0;
 
-fail5:
-       mcfqspi_cs_teardown(mcfqspi);
-fail4:
-       clk_disable(mcfqspi->clk);
-       clk_put(mcfqspi->clk);
-fail3:
-       free_irq(mcfqspi->irq, mcfqspi);
 fail2:
-       iounmap(mcfqspi->iobase);
+       mcfqspi_cs_teardown(mcfqspi);
 fail1:
-       release_mem_region(res->start, resource_size(res));
+       clk_disable(mcfqspi->clk);
 fail0:
        spi_master_put(master);
 
@@ -501,11 +481,6 @@ static int mcfqspi_remove(struct platform_device *pdev)
 
        mcfqspi_cs_teardown(mcfqspi);
        clk_disable(mcfqspi->clk);
-       clk_put(mcfqspi->clk);
-       free_irq(mcfqspi->irq, mcfqspi);
-       iounmap(mcfqspi->iobase);
-       release_mem_region(res->start, resource_size(res));
-       spi_unregister_master(master);
 
        return 0;
 }
index 50b2d88c81901c2b0b0ec1720f7e802ccf4924fb..5e7389faa2a0189472d71fedcba95ae8ecd1cf35 100644 (file)
@@ -396,10 +396,6 @@ static int davinci_spi_setup(struct spi_device *spi)
        dspi = spi_master_get_devdata(spi->master);
        pdata = &dspi->pdata;
 
-       /* if bits per word length is zero then set it default 8 */
-       if (!spi->bits_per_word)
-               spi->bits_per_word = 8;
-
        if (!(spi->mode & SPI_NO_CS)) {
                if ((pdata->chip_sel == NULL) ||
                    (pdata->chip_sel[spi->chip_select] == SPI_INTERN_CS))
@@ -853,7 +849,7 @@ static int davinci_spi_probe(struct platform_device *pdev)
        struct spi_master *master;
        struct davinci_spi *dspi;
        struct davinci_spi_platform_data *pdata;
-       struct resource *r, *mem;
+       struct resource *r;
        resource_size_t dma_rx_chan = SPI_NO_RESOURCE;
        resource_size_t dma_tx_chan = SPI_NO_RESOURCE;
        int i = 0, ret = 0;
@@ -894,39 +890,33 @@ static int davinci_spi_probe(struct platform_device *pdev)
 
        dspi->pbase = r->start;
 
-       mem = request_mem_region(r->start, resource_size(r), pdev->name);
-       if (mem == NULL) {
-               ret = -EBUSY;
+       dspi->base = devm_ioremap_resource(&pdev->dev, r);
+       if (IS_ERR(dspi->base)) {
+               ret = PTR_ERR(dspi->base);
                goto free_master;
        }
 
-       dspi->base = ioremap(r->start, resource_size(r));
-       if (dspi->base == NULL) {
-               ret = -ENOMEM;
-               goto release_region;
-       }
-
        dspi->irq = platform_get_irq(pdev, 0);
        if (dspi->irq <= 0) {
                ret = -EINVAL;
-               goto unmap_io;
+               goto free_master;
        }
 
-       ret = request_threaded_irq(dspi->irq, davinci_spi_irq, dummy_thread_fn,
-                                0, dev_name(&pdev->dev), dspi);
+       ret = devm_request_threaded_irq(&pdev->dev, dspi->irq, davinci_spi_irq,
+                               dummy_thread_fn, 0, dev_name(&pdev->dev), dspi);
        if (ret)
-               goto unmap_io;
+               goto free_master;
 
        dspi->bitbang.master = master;
        if (dspi->bitbang.master == NULL) {
                ret = -ENODEV;
-               goto irq_free;
+               goto free_master;
        }
 
-       dspi->clk = clk_get(&pdev->dev, NULL);
+       dspi->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(dspi->clk)) {
                ret = -ENODEV;
-               goto irq_free;
+               goto free_master;
        }
        clk_prepare_enable(dspi->clk);
 
@@ -963,8 +953,8 @@ static int davinci_spi_probe(struct platform_device *pdev)
                        goto free_clk;
 
                dev_info(&pdev->dev, "DMA: supported\n");
-               dev_info(&pdev->dev, "DMA: RX channel: %d, TX channel: %d, "
-                               "event queue: %d\n", dma_rx_chan, dma_tx_chan,
+               dev_info(&pdev->dev, "DMA: RX channel: %pa, TX channel: %pa, "
+                               "event queue: %d\n", &dma_rx_chan, &dma_tx_chan,
                                pdata->dma_event_q);
        }
 
@@ -1015,13 +1005,6 @@ free_dma:
        dma_release_channel(dspi->dma_tx);
 free_clk:
        clk_disable_unprepare(dspi->clk);
-       clk_put(dspi->clk);
-irq_free:
-       free_irq(dspi->irq, dspi);
-unmap_io:
-       iounmap(dspi->base);
-release_region:
-       release_mem_region(dspi->pbase, resource_size(r));
 free_master:
        spi_master_put(master);
 err:
@@ -1041,7 +1024,6 @@ static int davinci_spi_remove(struct platform_device *pdev)
 {
        struct davinci_spi *dspi;
        struct spi_master *master;
-       struct resource *r;
 
        master = platform_get_drvdata(pdev);
        dspi = spi_master_get_devdata(master);
@@ -1049,11 +1031,6 @@ static int davinci_spi_remove(struct platform_device *pdev)
        spi_bitbang_stop(&dspi->bitbang);
 
        clk_disable_unprepare(dspi->clk);
-       clk_put(dspi->clk);
-       free_irq(dspi->irq, dspi);
-       iounmap(dspi->base);
-       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       release_mem_region(dspi->pbase, resource_size(r));
        spi_master_put(master);
 
        return 0;
index c7a74f0ef89285eb2f14926dd979145987509dc6..dd5bd468e9621698c7a130418d5a74994c474cbe 100644 (file)
@@ -433,21 +433,12 @@ static int falcon_sflash_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, priv);
 
-       ret = spi_register_master(master);
+       ret = devm_spi_register_master(&pdev->dev, master);
        if (ret)
                spi_master_put(master);
        return ret;
 }
 
-static int falcon_sflash_remove(struct platform_device *pdev)
-{
-       struct falcon_sflash *priv = platform_get_drvdata(pdev);
-
-       spi_unregister_master(priv->master);
-
-       return 0;
-}
-
 static const struct of_device_id falcon_sflash_match[] = {
        { .compatible = "lantiq,sflash-falcon" },
        {},
@@ -456,7 +447,6 @@ MODULE_DEVICE_TABLE(of, falcon_sflash_match);
 
 static struct platform_driver falcon_sflash_driver = {
        .probe  = falcon_sflash_probe,
-       .remove = falcon_sflash_remove,
        .driver = {
                .name   = DRV_NAME,
                .owner  = THIS_MODULE,
index 8641b03bdd7a26f6057914a05e2095c4a9d22d0c..ec79f726672a14f7644810101afe59e82322976c 100644 (file)
@@ -320,8 +320,10 @@ static void dspi_chipselect(struct spi_device *spi, int value)
        switch (value) {
        case BITBANG_CS_ACTIVE:
                pushr |= SPI_PUSHR_CONT;
+               break;
        case BITBANG_CS_INACTIVE:
                pushr &= ~SPI_PUSHR_CONT;
+               break;
        }
 
        writel(pushr, dspi->base + SPI_PUSHR);
@@ -373,9 +375,6 @@ static int dspi_setup(struct spi_device *spi)
        if (!spi->max_speed_hz)
                return -EINVAL;
 
-       if (!spi->bits_per_word)
-               spi->bits_per_word = 8;
-
        return dspi_setup_transfer(spi, NULL);
 }
 
index b80f2f70fef7c22174df30a97c311f2aa9693e5c..a5474ef9d2a0cf4ad7367ee1e0399fae2e982145 100644 (file)
@@ -206,7 +206,8 @@ static unsigned int spi_imx_clkdiv_2(unsigned int fin,
 #define MX51_ECSPI_STAT_RR             (1 <<  3)
 
 /* MX51 eCSPI */
-static unsigned int mx51_ecspi_clkdiv(unsigned int fin, unsigned int fspi)
+static unsigned int mx51_ecspi_clkdiv(unsigned int fin, unsigned int fspi,
+                                     unsigned int *fres)
 {
        /*
         * there are two 4-bit dividers, the pre-divider divides by
@@ -234,6 +235,10 @@ static unsigned int mx51_ecspi_clkdiv(unsigned int fin, unsigned int fspi)
 
        pr_debug("%s: fin: %u, fspi: %u, post: %u, pre: %u\n",
                        __func__, fin, fspi, post, pre);
+
+       /* Resulting frequency for the SCLK line. */
+       *fres = (fin / (pre + 1)) >> post;
+
        return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) |
                (post << MX51_ECSPI_CTRL_POSTDIV_OFFSET);
 }
@@ -264,6 +269,7 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
                struct spi_imx_config *config)
 {
        u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0;
+       u32 clk = config->speed_hz, delay;
 
        /*
         * The hardware seems to have a race condition when changing modes. The
@@ -275,7 +281,7 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
        ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
 
        /* set clock speed */
-       ctrl |= mx51_ecspi_clkdiv(spi_imx->spi_clk, config->speed_hz);
+       ctrl |= mx51_ecspi_clkdiv(spi_imx->spi_clk, config->speed_hz, &clk);
 
        /* set chip select to use */
        ctrl |= MX51_ECSPI_CTRL_CS(config->cs);
@@ -297,6 +303,23 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
        writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
        writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
 
+       /*
+        * Wait until the changes in the configuration register CONFIGREG
+        * propagate into the hardware. It takes exactly one tick of the
+        * SCLK clock, but we will wait two SCLK clock just to be sure. The
+        * effect of the delay it takes for the hardware to apply changes
+        * is noticable if the SCLK clock run very slow. In such a case, if
+        * the polarity of SCLK should be inverted, the GPIO ChipSelect might
+        * be asserted before the SCLK polarity changes, which would disrupt
+        * the SPI communication as the device on the other end would consider
+        * the change of SCLK polarity as a clock tick already.
+        */
+       delay = (2 * 1000000) / clk;
+       if (likely(delay < 10)) /* SCLK is faster than 100 kHz */
+               udelay(delay);
+       else                    /* SCLK is _very_ slow */
+               usleep_range(delay, delay + 10);
+
        return 0;
 }
 
index 3adebfa22e3d785b6bf7ffb2149e77652039c3c3..79e5aa2250c89d48541d8d9eb263410c2aa66ac6 100644 (file)
@@ -111,14 +111,6 @@ static int mxs_spi_setup_transfer(struct spi_device *dev,
        return 0;
 }
 
-static int mxs_spi_setup(struct spi_device *dev)
-{
-       if (!dev->bits_per_word)
-               dev->bits_per_word = 8;
-
-       return 0;
-}
-
 static u32 mxs_spi_cs_to_reg(unsigned cs)
 {
        u32 select = 0;
@@ -502,7 +494,6 @@ static int mxs_spi_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        master->transfer_one_message = mxs_spi_transfer_one;
-       master->setup = mxs_spi_setup;
        master->bits_per_word_mask = SPI_BPW_MASK(8);
        master->mode_bits = SPI_CPOL | SPI_CPHA;
        master->num_chipselect = 3;
index 7765b1999537a08e5c6d95eed39bdea4c8c9c9dd..cbc68848789d09aa808ab44c8e84f4d2a765ab8f 100644 (file)
@@ -1066,6 +1066,8 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
 
        pdata->num_chipselect = 1;
        pdata->enable_dma = true;
+       pdata->tx_chan_id = -1;
+       pdata->rx_chan_id = -1;
 
        return pdata;
 }
index 9e829cee73572bab01eae87f3e4a219a8e76c98f..d1e89bb352d8cd614a9fb31d2ce3d44dd171475e 100644 (file)
 #include <linux/spi/spi.h>
 #include <linux/spi/rspi.h>
 
-#define RSPI_SPCR              0x00
-#define RSPI_SSLP              0x01
-#define RSPI_SPPCR             0x02
-#define RSPI_SPSR              0x03
-#define RSPI_SPDR              0x04
-#define RSPI_SPSCR             0x08
-#define RSPI_SPSSR             0x09
-#define RSPI_SPBR              0x0a
-#define RSPI_SPDCR             0x0b
-#define RSPI_SPCKD             0x0c
-#define RSPI_SSLND             0x0d
-#define RSPI_SPND              0x0e
-#define RSPI_SPCR2             0x0f
-#define RSPI_SPCMD0            0x10
-#define RSPI_SPCMD1            0x12
-#define RSPI_SPCMD2            0x14
-#define RSPI_SPCMD3            0x16
-#define RSPI_SPCMD4            0x18
-#define RSPI_SPCMD5            0x1a
-#define RSPI_SPCMD6            0x1c
-#define RSPI_SPCMD7            0x1e
+#define RSPI_SPCR              0x00    /* Control Register */
+#define RSPI_SSLP              0x01    /* Slave Select Polarity Register */
+#define RSPI_SPPCR             0x02    /* Pin Control Register */
+#define RSPI_SPSR              0x03    /* Status Register */
+#define RSPI_SPDR              0x04    /* Data Register */
+#define RSPI_SPSCR             0x08    /* Sequence Control Register */
+#define RSPI_SPSSR             0x09    /* Sequence Status Register */
+#define RSPI_SPBR              0x0a    /* Bit Rate Register */
+#define RSPI_SPDCR             0x0b    /* Data Control Register */
+#define RSPI_SPCKD             0x0c    /* Clock Delay Register */
+#define RSPI_SSLND             0x0d    /* Slave Select Negation Delay Register */
+#define RSPI_SPND              0x0e    /* Next-Access Delay Register */
+#define RSPI_SPCR2             0x0f    /* Control Register 2 */
+#define RSPI_SPCMD0            0x10    /* Command Register 0 */
+#define RSPI_SPCMD1            0x12    /* Command Register 1 */
+#define RSPI_SPCMD2            0x14    /* Command Register 2 */
+#define RSPI_SPCMD3            0x16    /* Command Register 3 */
+#define RSPI_SPCMD4            0x18    /* Command Register 4 */
+#define RSPI_SPCMD5            0x1a    /* Command Register 5 */
+#define RSPI_SPCMD6            0x1c    /* Command Register 6 */
+#define RSPI_SPCMD7            0x1e    /* Command Register 7 */
+#define RSPI_SPBFCR            0x20    /* Buffer Control Register */
+#define RSPI_SPBFDR            0x22    /* Buffer Data Count Setting Register */
 
 /*qspi only */
-#define QSPI_SPBFCR            0x18
-#define QSPI_SPBDCR            0x1a
-#define QSPI_SPBMUL0           0x1c
-#define QSPI_SPBMUL1           0x20
-#define QSPI_SPBMUL2           0x24
-#define QSPI_SPBMUL3           0x28
-
-/* SPCR */
-#define SPCR_SPRIE             0x80
-#define SPCR_SPE               0x40
-#define SPCR_SPTIE             0x20
-#define SPCR_SPEIE             0x10
-#define SPCR_MSTR              0x08
-#define SPCR_MODFEN            0x04
-#define SPCR_TXMD              0x02
-#define SPCR_SPMS              0x01
-
-/* SSLP */
-#define SSLP_SSL1P             0x02
-#define SSLP_SSL0P             0x01
-
-/* SPPCR */
-#define SPPCR_MOIFE            0x20
-#define SPPCR_MOIFV            0x10
+#define QSPI_SPBFCR            0x18    /* Buffer Control Register */
+#define QSPI_SPBDCR            0x1a    /* Buffer Data Count Register */
+#define QSPI_SPBMUL0           0x1c    /* Transfer Data Length Multiplier Setting Register 0 */
+#define QSPI_SPBMUL1           0x20    /* Transfer Data Length Multiplier Setting Register 1 */
+#define QSPI_SPBMUL2           0x24    /* Transfer Data Length Multiplier Setting Register 2 */
+#define QSPI_SPBMUL3           0x28    /* Transfer Data Length Multiplier Setting Register 3 */
+
+/* SPCR - Control Register */
+#define SPCR_SPRIE             0x80    /* Receive Interrupt Enable */
+#define SPCR_SPE               0x40    /* Function Enable */
+#define SPCR_SPTIE             0x20    /* Transmit Interrupt Enable */
+#define SPCR_SPEIE             0x10    /* Error Interrupt Enable */
+#define SPCR_MSTR              0x08    /* Master/Slave Mode Select */
+#define SPCR_MODFEN            0x04    /* Mode Fault Error Detection Enable */
+/* RSPI on SH only */
+#define SPCR_TXMD              0x02    /* TX Only Mode (vs. Full Duplex) */
+#define SPCR_SPMS              0x01    /* 3-wire Mode (vs. 4-wire) */
+/* QSPI on R-Car M2 only */
+#define SPCR_WSWAP             0x02    /* Word Swap of read-data for DMAC */
+#define SPCR_BSWAP             0x01    /* Byte Swap of read-data for DMAC */
+
+/* SSLP - Slave Select Polarity Register */
+#define SSLP_SSL1P             0x02    /* SSL1 Signal Polarity Setting */
+#define SSLP_SSL0P             0x01    /* SSL0 Signal Polarity Setting */
+
+/* SPPCR - Pin Control Register */
+#define SPPCR_MOIFE            0x20    /* MOSI Idle Value Fixing Enable */
+#define SPPCR_MOIFV            0x10    /* MOSI Idle Fixed Value */
 #define SPPCR_SPOM             0x04
-#define SPPCR_SPLP2            0x02
-#define SPPCR_SPLP             0x01
-
-/* SPSR */
-#define SPSR_SPRF              0x80
-#define SPSR_SPTEF             0x20
-#define SPSR_PERF              0x08
-#define SPSR_MODF              0x04
-#define SPSR_IDLNF             0x02
-#define SPSR_OVRF              0x01
-
-/* SPSCR */
-#define SPSCR_SPSLN_MASK       0x07
-
-/* SPSSR */
-#define SPSSR_SPECM_MASK       0x70
-#define SPSSR_SPCP_MASK                0x07
-
-/* SPDCR */
-#define SPDCR_SPLW             0x20
-#define SPDCR_SPRDTD           0x10
+#define SPPCR_SPLP2            0x02    /* Loopback Mode 2 (non-inverting) */
+#define SPPCR_SPLP             0x01    /* Loopback Mode (inverting) */
+
+#define SPPCR_IO3FV            0x04    /* Single-/Dual-SPI Mode IO3 Output Fixed Value */
+#define SPPCR_IO2FV            0x04    /* Single-/Dual-SPI Mode IO2 Output Fixed Value */
+
+/* SPSR - Status Register */
+#define SPSR_SPRF              0x80    /* Receive Buffer Full Flag */
+#define SPSR_TEND              0x40    /* Transmit End */
+#define SPSR_SPTEF             0x20    /* Transmit Buffer Empty Flag */
+#define SPSR_PERF              0x08    /* Parity Error Flag */
+#define SPSR_MODF              0x04    /* Mode Fault Error Flag */
+#define SPSR_IDLNF             0x02    /* RSPI Idle Flag */
+#define SPSR_OVRF              0x01    /* Overrun Error Flag */
+
+/* SPSCR - Sequence Control Register */
+#define SPSCR_SPSLN_MASK       0x07    /* Sequence Length Specification */
+
+/* SPSSR - Sequence Status Register */
+#define SPSSR_SPECM_MASK       0x70    /* Command Error Mask */
+#define SPSSR_SPCP_MASK                0x07    /* Command Pointer Mask */
+
+/* SPDCR - Data Control Register */
+#define SPDCR_TXDMY            0x80    /* Dummy Data Transmission Enable */
+#define SPDCR_SPLW1            0x40    /* Access Width Specification (RZ) */
+#define SPDCR_SPLW0            0x20    /* Access Width Specification (RZ) */
+#define SPDCR_SPLLWORD         (SPDCR_SPLW1 | SPDCR_SPLW0)
+#define SPDCR_SPLWORD          SPDCR_SPLW1
+#define SPDCR_SPLBYTE          SPDCR_SPLW0
+#define SPDCR_SPLW             0x20    /* Access Width Specification (SH) */
+#define SPDCR_SPRDTD           0x10    /* Receive Transmit Data Select */
 #define SPDCR_SLSEL1           0x08
 #define SPDCR_SLSEL0           0x04
-#define SPDCR_SLSEL_MASK       0x0c
+#define SPDCR_SLSEL_MASK       0x0c    /* SSL1 Output Select */
 #define SPDCR_SPFC1            0x02
 #define SPDCR_SPFC0            0x01
+#define SPDCR_SPFC_MASK                0x03    /* Frame Count Setting (1-4) */
 
-/* SPCKD */
-#define SPCKD_SCKDL_MASK       0x07
+/* SPCKD - Clock Delay Register */
+#define SPCKD_SCKDL_MASK       0x07    /* Clock Delay Setting (1-8) */
 
-/* SSLND */
-#define SSLND_SLNDL_MASK       0x07
+/* SSLND - Slave Select Negation Delay Register */
+#define SSLND_SLNDL_MASK       0x07    /* SSL Negation Delay Setting (1-8) */
 
-/* SPND */
-#define SPND_SPNDL_MASK                0x07
+/* SPND - Next-Access Delay Register */
+#define SPND_SPNDL_MASK                0x07    /* Next-Access Delay Setting (1-8) */
 
-/* SPCR2 */
-#define SPCR2_PTE              0x08
-#define SPCR2_SPIE             0x04
-#define SPCR2_SPOE             0x02
-#define SPCR2_SPPE             0x01
+/* SPCR2 - Control Register 2 */
+#define SPCR2_PTE              0x08    /* Parity Self-Test Enable */
+#define SPCR2_SPIE             0x04    /* Idle Interrupt Enable */
+#define SPCR2_SPOE             0x02    /* Odd Parity Enable (vs. Even) */
+#define SPCR2_SPPE             0x01    /* Parity Enable */
 
-/* SPCMDn */
-#define SPCMD_SCKDEN           0x8000
-#define SPCMD_SLNDEN           0x4000
-#define SPCMD_SPNDEN           0x2000
-#define SPCMD_LSBF             0x1000
-#define SPCMD_SPB_MASK         0x0f00
+/* SPCMDn - Command Registers */
+#define SPCMD_SCKDEN           0x8000  /* Clock Delay Setting Enable */
+#define SPCMD_SLNDEN           0x4000  /* SSL Negation Delay Setting Enable */
+#define SPCMD_SPNDEN           0x2000  /* Next-Access Delay Enable */
+#define SPCMD_LSBF             0x1000  /* LSB First */
+#define SPCMD_SPB_MASK         0x0f00  /* Data Length Setting */
 #define SPCMD_SPB_8_TO_16(bit) (((bit - 1) << 8) & SPCMD_SPB_MASK)
 #define SPCMD_SPB_8BIT         0x0000  /* qspi only */
 #define SPCMD_SPB_16BIT                0x0100
 #define SPCMD_SPB_20BIT                0x0000
 #define SPCMD_SPB_24BIT                0x0100
 #define SPCMD_SPB_32BIT                0x0200
-#define SPCMD_SSLKP            0x0080
-#define SPCMD_SSLA_MASK                0x0030
-#define SPCMD_BRDV_MASK                0x000c
-#define SPCMD_CPOL             0x0002
-#define SPCMD_CPHA             0x0001
-
-/* SPBFCR */
-#define SPBFCR_TXRST           0x80    /* qspi only */
-#define SPBFCR_RXRST           0x40    /* qspi only */
+#define SPCMD_SSLKP            0x0080  /* SSL Signal Level Keeping */
+#define SPCMD_SPIMOD_MASK      0x0060  /* SPI Operating Mode (QSPI only) */
+#define SPCMD_SPIMOD1          0x0040
+#define SPCMD_SPIMOD0          0x0020
+#define SPCMD_SPIMOD_SINGLE    0
+#define SPCMD_SPIMOD_DUAL      SPCMD_SPIMOD0
+#define SPCMD_SPIMOD_QUAD      SPCMD_SPIMOD1
+#define SPCMD_SPRW             0x0010  /* SPI Read/Write Access (Dual/Quad) */
+#define SPCMD_SSLA_MASK                0x0030  /* SSL Assert Signal Setting (RSPI) */
+#define SPCMD_BRDV_MASK                0x000c  /* Bit Rate Division Setting */
+#define SPCMD_CPOL             0x0002  /* Clock Polarity Setting */
+#define SPCMD_CPHA             0x0001  /* Clock Phase Setting */
+
+/* SPBFCR - Buffer Control Register */
+#define SPBFCR_TXRST           0x80    /* Transmit Buffer Data Reset (qspi only) */
+#define SPBFCR_RXRST           0x40    /* Receive Buffer Data Reset (qspi only) */
+#define SPBFCR_TXTRG_MASK      0x30    /* Transmit Buffer Data Triggering Number */
+#define SPBFCR_RXTRG_MASK      0x07    /* Receive Buffer Data Triggering Number */
+
+#define DUMMY_DATA             0x00
 
 struct rspi_data {
        void __iomem *addr;
@@ -158,7 +186,8 @@ struct rspi_data {
        wait_queue_head_t wait;
        spinlock_t lock;
        struct clk *clk;
-       unsigned char spsr;
+       u8 spsr;
+       u16 spcmd;
        const struct spi_ops *ops;
 
        /* for dmaengine */
@@ -170,34 +199,35 @@ struct rspi_data {
        unsigned dma_callbacked:1;
 };
 
-static void rspi_write8(struct rspi_data *rspi, u8 data, u16 offset)
+static void rspi_write8(const struct rspi_data *rspi, u8 data, u16 offset)
 {
        iowrite8(data, rspi->addr + offset);
 }
 
-static void rspi_write16(struct rspi_data *rspi, u16 data, u16 offset)
+static void rspi_write16(const struct rspi_data *rspi, u16 data, u16 offset)
 {
        iowrite16(data, rspi->addr + offset);
 }
 
-static void rspi_write32(struct rspi_data *rspi, u32 data, u16 offset)
+static void rspi_write32(const struct rspi_data *rspi, u32 data, u16 offset)
 {
        iowrite32(data, rspi->addr + offset);
 }
 
-static u8 rspi_read8(struct rspi_data *rspi, u16 offset)
+static u8 rspi_read8(const struct rspi_data *rspi, u16 offset)
 {
        return ioread8(rspi->addr + offset);
 }
 
-static u16 rspi_read16(struct rspi_data *rspi, u16 offset)
+static u16 rspi_read16(const struct rspi_data *rspi, u16 offset)
 {
        return ioread16(rspi->addr + offset);
 }
 
 /* optional functions */
 struct spi_ops {
-       int (*set_config_register)(struct rspi_data *rspi, int access_size);
+       int (*set_config_register)(const struct rspi_data *rspi,
+                                  int access_size);
        int (*send_pio)(struct rspi_data *rspi, struct spi_message *mesg,
                        struct spi_transfer *t);
        int (*receive_pio)(struct rspi_data *rspi, struct spi_message *mesg,
@@ -208,7 +238,8 @@ struct spi_ops {
 /*
  * functions for RSPI
  */
-static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
+static int rspi_set_config_register(const struct rspi_data *rspi,
+                                   int access_size)
 {
        int spbr;
 
@@ -231,7 +262,7 @@ static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
        rspi_write8(rspi, 0x00, RSPI_SPCR2);
 
        /* Sets SPCMD */
-       rspi_write16(rspi, SPCMD_SPB_8_TO_16(access_size) | SPCMD_SSLKP,
+       rspi_write16(rspi, SPCMD_SPB_8_TO_16(access_size) | rspi->spcmd,
                     RSPI_SPCMD0);
 
        /* Sets RSPI mode */
@@ -243,7 +274,8 @@ static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
 /*
  * functions for QSPI
  */
-static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
+static int qspi_set_config_register(const struct rspi_data *rspi,
+                                   int access_size)
 {
        u16 spcmd;
        int spbr;
@@ -268,10 +300,10 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
                spcmd = SPCMD_SPB_8BIT;
        else if (access_size == 16)
                spcmd = SPCMD_SPB_16BIT;
-       else if (access_size == 32)
+       else
                spcmd = SPCMD_SPB_32BIT;
 
-       spcmd |= SPCMD_SCKDEN | SPCMD_SLNDEN | SPCMD_SSLKP | SPCMD_SPNDEN;
+       spcmd |= SPCMD_SCKDEN | SPCMD_SLNDEN | rspi->spcmd | SPCMD_SPNDEN;
 
        /* Resets transfer data length */
        rspi_write32(rspi, 0, QSPI_SPBMUL0);
@@ -292,12 +324,12 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
 
 #define set_config_register(spi, n) spi->ops->set_config_register(spi, n)
 
-static void rspi_enable_irq(struct rspi_data *rspi, u8 enable)
+static void rspi_enable_irq(const struct rspi_data *rspi, u8 enable)
 {
        rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR);
 }
 
-static void rspi_disable_irq(struct rspi_data *rspi, u8 disable)
+static void rspi_disable_irq(const struct rspi_data *rspi, u8 disable)
 {
        rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~disable, RSPI_SPCR);
 }
@@ -316,12 +348,12 @@ static int rspi_wait_for_interrupt(struct rspi_data *rspi, u8 wait_mask,
        return 0;
 }
 
-static void rspi_assert_ssl(struct rspi_data *rspi)
+static void rspi_assert_ssl(const struct rspi_data *rspi)
 {
        rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_SPE, RSPI_SPCR);
 }
 
-static void rspi_negate_ssl(struct rspi_data *rspi)
+static void rspi_negate_ssl(const struct rspi_data *rspi)
 {
        rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
 }
@@ -330,9 +362,7 @@ static int rspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
                         struct spi_transfer *t)
 {
        int remain = t->len;
-       u8 *data;
-
-       data = (u8 *)t->tx_buf;
+       const u8 *data = t->tx_buf;
        while (remain > 0) {
                rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD,
                            RSPI_SPCR);
@@ -348,7 +378,7 @@ static int rspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
                remain--;
        }
 
-       /* Waiting for the last transmition */
+       /* Waiting for the last transmission */
        rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
 
        return 0;
@@ -358,12 +388,11 @@ static int qspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
                         struct spi_transfer *t)
 {
        int remain = t->len;
-       u8 *data;
+       const u8 *data = t->tx_buf;
 
        rspi_write8(rspi, SPBFCR_TXRST, QSPI_SPBFCR);
        rspi_write8(rspi, 0x00, QSPI_SPBFCR);
 
-       data = (u8 *)t->tx_buf;
        while (remain > 0) {
 
                if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
@@ -383,7 +412,7 @@ static int qspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
                remain--;
        }
 
-       /* Waiting for the last transmition */
+       /* Waiting for the last transmission */
        rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
 
        return 0;
@@ -399,8 +428,8 @@ static void rspi_dma_complete(void *arg)
        wake_up_interruptible(&rspi->wait);
 }
 
-static int rspi_dma_map_sg(struct scatterlist *sg, void *buf, unsigned len,
-                          struct dma_chan *chan,
+static int rspi_dma_map_sg(struct scatterlist *sg, const void *buf,
+                          unsigned len, struct dma_chan *chan,
                           enum dma_transfer_direction dir)
 {
        sg_init_table(sg, 1);
@@ -440,12 +469,13 @@ static void rspi_memory_from_8bit(void *buf, const void *data, unsigned len)
 static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t)
 {
        struct scatterlist sg;
-       void *buf = NULL;
+       const void *buf = NULL;
        struct dma_async_tx_descriptor *desc;
        unsigned len;
        int ret = 0;
 
        if (rspi->dma_width_16bit) {
+               void *tmp;
                /*
                 * If DMAC bus width is 16-bit, the driver allocates a dummy
                 * buffer. And, the driver converts original data into the
@@ -454,13 +484,14 @@ static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t)
                 *  DMAC data:     1st byte, dummy, 2nd byte, dummy ...
                 */
                len = t->len * 2;
-               buf = kmalloc(len, GFP_KERNEL);
-               if (!buf)
+               tmp = kmalloc(len, GFP_KERNEL);
+               if (!tmp)
                        return -ENOMEM;
-               rspi_memory_to_8bit(buf, t->tx_buf, t->len);
+               rspi_memory_to_8bit(tmp, t->tx_buf, t->len);
+               buf = tmp;
        } else {
                len = t->len;
-               buf = (void *)t->tx_buf;
+               buf = t->tx_buf;
        }
 
        if (!rspi_dma_map_sg(&sg, buf, len, rspi->chan_tx, DMA_TO_DEVICE)) {
@@ -508,9 +539,9 @@ end_nomap:
        return ret;
 }
 
-static void rspi_receive_init(struct rspi_data *rspi)
+static void rspi_receive_init(const struct rspi_data *rspi)
 {
-       unsigned char spsr;
+       u8 spsr;
 
        spsr = rspi_read8(rspi, RSPI_SPSR);
        if (spsr & SPSR_SPRF)
@@ -528,7 +559,7 @@ static int rspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
 
        rspi_receive_init(rspi);
 
-       data = (u8 *)t->rx_buf;
+       data = t->rx_buf;
        while (remain > 0) {
                rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_TXMD,
                            RSPI_SPCR);
@@ -539,7 +570,7 @@ static int rspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
                        return -ETIMEDOUT;
                }
                /* dummy write for generate clock */
-               rspi_write16(rspi, 0x00, RSPI_SPDR);
+               rspi_write16(rspi, DUMMY_DATA, RSPI_SPDR);
 
                if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
                        dev_err(&rspi->master->dev,
@@ -556,9 +587,9 @@ static int rspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
        return 0;
 }
 
-static void qspi_receive_init(struct rspi_data *rspi)
+static void qspi_receive_init(const struct rspi_data *rspi)
 {
-       unsigned char spsr;
+       u8 spsr;
 
        spsr = rspi_read8(rspi, RSPI_SPSR);
        if (spsr & SPSR_SPRF)
@@ -575,7 +606,7 @@ static int qspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
 
        qspi_receive_init(rspi);
 
-       data = (u8 *)t->rx_buf;
+       data = t->rx_buf;
        while (remain > 0) {
 
                if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
@@ -584,7 +615,7 @@ static int qspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
                        return -ETIMEDOUT;
                }
                /* dummy write for generate clock */
-               rspi_write8(rspi, 0x00, RSPI_SPDR);
+               rspi_write8(rspi, DUMMY_DATA, RSPI_SPDR);
 
                if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
                        dev_err(&rspi->master->dev,
@@ -704,7 +735,7 @@ end_nomap:
        return ret;
 }
 
-static int rspi_is_dma(struct rspi_data *rspi, struct spi_transfer *t)
+static int rspi_is_dma(const struct rspi_data *rspi, struct spi_transfer *t)
 {
        if (t->tx_buf && rspi->chan_tx)
                return 1;
@@ -771,10 +802,14 @@ static int rspi_setup(struct spi_device *spi)
 {
        struct rspi_data *rspi = spi_master_get_devdata(spi->master);
 
-       if (!spi->bits_per_word)
-               spi->bits_per_word = 8;
        rspi->max_speed_hz = spi->max_speed_hz;
 
+       rspi->spcmd = SPCMD_SSLKP;
+       if (spi->mode & SPI_CPOL)
+               rspi->spcmd |= SPCMD_CPOL;
+       if (spi->mode & SPI_CPHA)
+               rspi->spcmd |= SPCMD_CPHA;
+
        set_config_register(rspi, 8);
 
        return 0;
@@ -802,10 +837,10 @@ static void rspi_cleanup(struct spi_device *spi)
 
 static irqreturn_t rspi_irq(int irq, void *_sr)
 {
-       struct rspi_data *rspi = (struct rspi_data *)_sr;
-       unsigned long spsr;
+       struct rspi_data *rspi = _sr;
+       u8 spsr;
        irqreturn_t ret = IRQ_NONE;
-       unsigned char disable_irq = 0;
+       u8 disable_irq = 0;
 
        rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
        if (spsr & SPSR_SPRF)
@@ -825,7 +860,7 @@ static irqreturn_t rspi_irq(int irq, void *_sr)
 static int rspi_request_dma(struct rspi_data *rspi,
                                      struct platform_device *pdev)
 {
-       struct rspi_plat_data *rspi_pd = dev_get_platdata(&pdev->dev);
+       const struct rspi_plat_data *rspi_pd = dev_get_platdata(&pdev->dev);
        struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        dma_cap_mask_t mask;
        struct dma_slave_config cfg;
@@ -887,11 +922,8 @@ static int rspi_remove(struct platform_device *pdev)
 {
        struct rspi_data *rspi = platform_get_drvdata(pdev);
 
-       spi_unregister_master(rspi->master);
        rspi_release_dma(rspi);
-       free_irq(platform_get_irq(pdev, 0), rspi);
-       clk_put(rspi->clk);
-       iounmap(rspi->addr);
+       clk_disable(rspi->clk);
 
        return 0;
 }
@@ -903,7 +935,7 @@ static int rspi_probe(struct platform_device *pdev)
        struct rspi_data *rspi;
        int ret, irq;
        char clk_name[16];
-       struct rspi_plat_data *rspi_pd = pdev->dev.platform_data;
+       const struct rspi_plat_data *rspi_pd = dev_get_platdata(&pdev->dev);
        const struct spi_ops *ops;
        const struct platform_device_id *id_entry = pdev->id_entry;
 
@@ -913,12 +945,6 @@ static int rspi_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "there is no set_config_register\n");
                return -ENODEV;
        }
-       /* get base addr */
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (unlikely(res == NULL)) {
-               dev_err(&pdev->dev, "invalid resource\n");
-               return -EINVAL;
-       }
 
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
@@ -936,19 +962,20 @@ static int rspi_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, rspi);
        rspi->ops = ops;
        rspi->master = master;
-       rspi->addr = ioremap(res->start, resource_size(res));
-       if (rspi->addr == NULL) {
-               dev_err(&pdev->dev, "ioremap error.\n");
-               ret = -ENOMEM;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       rspi->addr = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(rspi->addr)) {
+               ret = PTR_ERR(rspi->addr);
                goto error1;
        }
 
        snprintf(clk_name, sizeof(clk_name), "%s%d", id_entry->name, pdev->id);
-       rspi->clk = clk_get(&pdev->dev, clk_name);
+       rspi->clk = devm_clk_get(&pdev->dev, clk_name);
        if (IS_ERR(rspi->clk)) {
                dev_err(&pdev->dev, "cannot get clock\n");
                ret = PTR_ERR(rspi->clk);
-               goto error2;
+               goto error1;
        }
        clk_enable(rspi->clk);
 
@@ -965,37 +992,36 @@ static int rspi_probe(struct platform_device *pdev)
        master->setup = rspi_setup;
        master->transfer = rspi_transfer;
        master->cleanup = rspi_cleanup;
+       master->mode_bits = SPI_CPHA | SPI_CPOL;
 
-       ret = request_irq(irq, rspi_irq, 0, dev_name(&pdev->dev), rspi);
+       ret = devm_request_irq(&pdev->dev, irq, rspi_irq, 0,
+                              dev_name(&pdev->dev), rspi);
        if (ret < 0) {
                dev_err(&pdev->dev, "request_irq error\n");
-               goto error3;
+               goto error2;
        }
 
        rspi->irq = irq;
        ret = rspi_request_dma(rspi, pdev);
        if (ret < 0) {
                dev_err(&pdev->dev, "rspi_request_dma failed.\n");
-               goto error4;
+               goto error3;
        }
 
-       ret = spi_register_master(master);
+       ret = devm_spi_register_master(&pdev->dev, master);
        if (ret < 0) {
                dev_err(&pdev->dev, "spi_register_master error.\n");
-               goto error4;
+               goto error3;
        }
 
        dev_info(&pdev->dev, "probed\n");
 
        return 0;
 
-error4:
-       rspi_release_dma(rspi);
-       free_irq(irq, rspi);
 error3:
-       clk_put(rspi->clk);
+       rspi_release_dma(rspi);
 error2:
-       iounmap(rspi->addr);
+       clk_disable(rspi->clk);
 error1:
        spi_master_put(master);
 
index 9eda21d739c627706a40e101f5ea0b9eed9e4fab..1edffed9e098ee775a5ed7bbcc2b111fc796652d 100644 (file)
@@ -254,9 +254,6 @@ error:
 
 static int sc18is602_setup(struct spi_device *spi)
 {
-       if (!spi->bits_per_word)
-               spi->bits_per_word = 8;
-
        if (spi->mode & ~(SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST))
                return -EINVAL;
 
@@ -319,7 +316,7 @@ static int sc18is602_probe(struct i2c_client *client,
        master->transfer_one_message = sc18is602_transfer_one;
        master->dev.of_node = np;
 
-       error = spi_register_master(master);
+       error = devm_spi_register_master(dev, master);
        if (error)
                goto error_reg;
 
@@ -330,16 +327,6 @@ error_reg:
        return error;
 }
 
-static int sc18is602_remove(struct i2c_client *client)
-{
-       struct sc18is602 *hw = i2c_get_clientdata(client);
-       struct spi_master *master = hw->master;
-
-       spi_unregister_master(master);
-
-       return 0;
-}
-
 static const struct i2c_device_id sc18is602_id[] = {
        { "sc18is602", sc18is602 },
        { "sc18is602b", sc18is602b },
@@ -353,7 +340,6 @@ static struct i2c_driver sc18is602_driver = {
                .name = "sc18is602",
        },
        .probe = sc18is602_probe,
-       .remove = sc18is602_remove,
        .id_table = sc18is602_id,
 };
 
index 292567ab4c6c41df0eb981c8692d0454e98bf8ed..40179d20196679d36d64b23ac6684d7f5dfec19e 100644 (file)
@@ -353,4 +353,4 @@ module_platform_driver(hspi_driver);
 MODULE_DESCRIPTION("SuperH HSPI bus driver");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
-MODULE_ALIAS("platform:sh_spi");
+MODULE_ALIAS("platform:sh-hspi");
index c74298cf70e2406d8972f4fe75cc2fcd59a15864..ac8795f2e7009a96f517043520d261c32a6ad7b4 100644 (file)
@@ -152,7 +152,7 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
        size_t k;
 
        if (!WARN_ON(!spi_hz || !parent_rate))
-               div = parent_rate / spi_hz;
+               div = DIV_ROUND_UP(parent_rate, spi_hz);
 
        /* TODO: make more fine grained */
 
index c120a70094f20f131cf0ef5e7a3861e57f9eba39..86a17d60a68c0f24b495c7257460d2fe558d2dcb 100644 (file)
@@ -358,9 +358,6 @@ static int spi_sh_setup(struct spi_device *spi)
 {
        struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
 
-       if (!spi->bits_per_word)
-               spi->bits_per_word = 8;
-
        pr_debug("%s: enter\n", __func__);
 
        spi_sh_write(ss, 0xfe, SPI_SH_CR1);     /* SPI sycle stop */
index ed5e501c465276b6d39318818fa5d668d2685668..e430689c3837360f02e5e6d7e70a40ceae712777 100644 (file)
@@ -536,16 +536,9 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
 
 static int spi_sirfsoc_setup(struct spi_device *spi)
 {
-       struct sirfsoc_spi *sspi;
-
        if (!spi->max_speed_hz)
                return -EINVAL;
 
-       sspi = spi_master_get_devdata(spi->master);
-
-       if (!spi->bits_per_word)
-               spi->bits_per_word = 8;
-
        return spi_sirfsoc_setup_transfer(spi, NULL);
 }
 
index 4396bd44854063d9488cfd5dadc863c40c9e1dbf..286cf8d6764beac9f7e705692ffc3c055c836c3d 100644 (file)
@@ -417,10 +417,8 @@ out:
 static int ti_qspi_runtime_resume(struct device *dev)
 {
        struct ti_qspi      *qspi;
-       struct spi_master       *master;
 
-       master = dev_get_drvdata(dev);
-       qspi = spi_master_get_devdata(master);
+       qspi = dev_get_drvdata(dev);
        ti_qspi_restore_ctx(qspi);
 
        return 0;
@@ -516,13 +514,9 @@ free_master:
 
 static int ti_qspi_remove(struct platform_device *pdev)
 {
-       struct spi_master *master;
-       struct ti_qspi *qspi;
+       struct ti_qspi *qspi = platform_get_drvdata(pdev);
        int ret;
 
-       master = platform_get_drvdata(pdev);
-       qspi = spi_master_get_devdata(master);
-
        ret = pm_runtime_get_sync(qspi->dev);
        if (ret < 0) {
                dev_err(qspi->dev, "pm_runtime_get_sync() failed\n");
@@ -534,8 +528,6 @@ static int ti_qspi_remove(struct platform_device *pdev)
        pm_runtime_put(qspi->dev);
        pm_runtime_disable(&pdev->dev);
 
-       spi_unregister_master(master);
-
        return 0;
 }
 
index 446131308acb26a1fac3ed8bfc1aad9e287404d5..9322de9e13fbbf4263832838821bed04c7694822 100644 (file)
@@ -466,12 +466,6 @@ static void pch_spi_reset(struct spi_master *master)
 
 static int pch_spi_setup(struct spi_device *pspi)
 {
-       /* check bits per word */
-       if (pspi->bits_per_word == 0) {
-               pspi->bits_per_word = 8;
-               dev_dbg(&pspi->dev, "%s 8 bits per word\n", __func__);
-       }
-
        /* Check baud rate setting */
        /* if baud rate of chip is greater than
           max we can support,return error */
index 349ebba4b1992afdf703e689022dfbf67b07f626..63613a96233c3f1eddc77846affea03ffe92a852 100644 (file)
@@ -370,6 +370,17 @@ static void spi_dev_set_name(struct spi_device *spi)
                     spi->chip_select);
 }
 
+static int spi_dev_check(struct device *dev, void *data)
+{
+       struct spi_device *spi = to_spi_device(dev);
+       struct spi_device *new_spi = data;
+
+       if (spi->master == new_spi->master &&
+           spi->chip_select == new_spi->chip_select)
+               return -EBUSY;
+       return 0;
+}
+
 /**
  * spi_add_device - Add spi_device allocated with spi_alloc_device
  * @spi: spi_device to register
@@ -384,7 +395,6 @@ int spi_add_device(struct spi_device *spi)
        static DEFINE_MUTEX(spi_add_lock);
        struct spi_master *master = spi->master;
        struct device *dev = master->dev.parent;
-       struct device *d;
        int status;
 
        /* Chipselects are numbered 0..max; validate. */
@@ -404,12 +414,10 @@ int spi_add_device(struct spi_device *spi)
         */
        mutex_lock(&spi_add_lock);
 
-       d = bus_find_device_by_name(&spi_bus_type, NULL, dev_name(&spi->dev));
-       if (d != NULL) {
+       status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
+       if (status) {
                dev_err(dev, "chipselect %d already in use\n",
                                spi->chip_select);
-               put_device(d);
-               status = -EBUSY;
                goto done;
        }
 
@@ -591,8 +599,10 @@ static int spi_transfer_one_message(struct spi_master *master,
                        goto out;
                }
 
-               if (ret > 0)
+               if (ret > 0) {
+                       ret = 0;
                        wait_for_completion(&master->xfer_completion);
+               }
 
                trace_spi_transfer_stop(msg, xfer);
 
@@ -632,7 +642,7 @@ out:
  *
  * Called by SPI drivers using the core transfer_one_message()
  * implementation to notify it that the current interrupt driven
- * transfer has finised and the next one may be scheduled.
+ * transfer has finished and the next one may be scheduled.
  */
 void spi_finalize_current_transfer(struct spi_master *master)
 {
@@ -685,7 +695,7 @@ static void spi_pump_messages(struct kthread_work *work)
        }
        /* Extract head of queue */
        master->cur_msg =
-           list_entry(master->queue.next, struct spi_message, queue);
+               list_first_entry(&master->queue, struct spi_message, queue);
 
        list_del_init(&master->cur_msg->queue);
        if (master->busy)
@@ -735,7 +745,9 @@ static void spi_pump_messages(struct kthread_work *work)
        ret = master->transfer_one_message(master, master->cur_msg);
        if (ret) {
                dev_err(&master->dev,
-                       "failed to transfer one message from queue\n");
+                       "failed to transfer one message from queue: %d\n", ret);
+               master->cur_msg->status = ret;
+               spi_finalize_current_message(master);
                return;
        }
 }
@@ -791,11 +803,8 @@ struct spi_message *spi_get_next_queued_message(struct spi_master *master)
 
        /* get a pointer to the next message, if any */
        spin_lock_irqsave(&master->queue_lock, flags);
-       if (list_empty(&master->queue))
-               next = NULL;
-       else
-               next = list_entry(master->queue.next,
-                                 struct spi_message, queue);
+       next = list_first_entry_or_null(&master->queue, struct spi_message,
+                                       queue);
        spin_unlock_irqrestore(&master->queue_lock, flags);
 
        return next;
@@ -1596,15 +1605,11 @@ int spi_setup(struct spi_device *spi)
 }
 EXPORT_SYMBOL_GPL(spi_setup);
 
-static int __spi_async(struct spi_device *spi, struct spi_message *message)
+static int __spi_validate(struct spi_device *spi, struct spi_message *message)
 {
        struct spi_master *master = spi->master;
        struct spi_transfer *xfer;
 
-       message->spi = spi;
-
-       trace_spi_message_submit(message);
-
        if (list_empty(&message->transfers))
                return -EINVAL;
        if (!message->complete)
@@ -1667,9 +1672,8 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
                if (xfer->rx_buf && !xfer->rx_nbits)
                        xfer->rx_nbits = SPI_NBITS_SINGLE;
                /* check transfer tx/rx_nbits:
-                * 1. keep the value is not out of single, dual and quad
-                * 2. keep tx/rx_nbits is contained by mode in spi_device
-                * 3. if SPI_3WIRE, tx/rx_nbits should be in single
+                * 1. check the value matches one of single, dual and quad
+                * 2. check tx/rx_nbits match the mode in spi_device
                 */
                if (xfer->tx_buf) {
                        if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
@@ -1682,9 +1686,6 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
                        if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
                                !(spi->mode & SPI_TX_QUAD))
                                return -EINVAL;
-                       if ((spi->mode & SPI_3WIRE) &&
-                               (xfer->tx_nbits != SPI_NBITS_SINGLE))
-                               return -EINVAL;
                }
                /* check transfer rx_nbits */
                if (xfer->rx_buf) {
@@ -1698,13 +1699,22 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
                        if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
                                !(spi->mode & SPI_RX_QUAD))
                                return -EINVAL;
-                       if ((spi->mode & SPI_3WIRE) &&
-                               (xfer->rx_nbits != SPI_NBITS_SINGLE))
-                               return -EINVAL;
                }
        }
 
        message->status = -EINPROGRESS;
+
+       return 0;
+}
+
+static int __spi_async(struct spi_device *spi, struct spi_message *message)
+{
+       struct spi_master *master = spi->master;
+
+       message->spi = spi;
+
+       trace_spi_message_submit(message);
+
        return master->transfer(spi, message);
 }
 
@@ -1743,6 +1753,10 @@ int spi_async(struct spi_device *spi, struct spi_message *message)
        int ret;
        unsigned long flags;
 
+       ret = __spi_validate(spi, message);
+       if (ret != 0)
+               return ret;
+
        spin_lock_irqsave(&master->bus_lock_spinlock, flags);
 
        if (master->bus_lock_flag)
@@ -1791,6 +1805,10 @@ int spi_async_locked(struct spi_device *spi, struct spi_message *message)
        int ret;
        unsigned long flags;
 
+       ret = __spi_validate(spi, message);
+       if (ret != 0)
+               return ret;
+
        spin_lock_irqsave(&master->bus_lock_spinlock, flags);
 
        ret = __spi_async(spi, message);
index 53fee2f9a498866c6a19d3d6c3ca1d078fd7206e..8dfdd2732bdc329b3865c010d0afdb8e1e248337 100644 (file)
@@ -39,7 +39,8 @@ static INT bcm_close(struct net_device *dev)
        return 0;
 }
 
-static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb)
+static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb,
+                           void *accel_priv)
 {
        return ClassifyPacket(netdev_priv(dev), skb);
 }
index 235d2b1ec593c4dfba7a83c4eb5fd0f578e0c5c4..eedffed17e391d3243443c443ff9338bf494c440 100644 (file)
@@ -306,7 +306,8 @@ static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb,
        return NETDEV_TX_OK;
 }
 
-static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb)
+static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb,
+                               void *accel_priv)
 {
        return (u16)smp_processor_id();
 }
index 17659bb04befc24fabded5439be74aba9fdcb5d1..dd69e344e4099c852ffd0bd5ef11ef859a0e274c 100644 (file)
@@ -652,7 +652,8 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
        return dscp >> 5;
 }
 
-static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb)
+static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
+                           void *accel_priv)
 {
        struct adapter  *padapter = rtw_netdev_priv(dev);
        struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
index aa3397620342d20beba92abc732845273d3663ca..2c29db6a247e4788256461100bed11ac79648d26 100644 (file)
@@ -477,9 +477,10 @@ extern int CIFSGetExtAttr(const unsigned int xid, struct cifs_tcon *tcon,
                        const int netfid, __u64 *pExtAttrBits, __u64 *pMask);
 extern void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb);
 extern bool CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr);
-extern int CIFSCheckMFSymlink(struct cifs_fattr *fattr,
-               const unsigned char *path,
-               struct cifs_sb_info *cifs_sb, unsigned int xid);
+extern int CIFSCheckMFSymlink(unsigned int xid, struct cifs_tcon *tcon,
+                             struct cifs_sb_info *cifs_sb,
+                             struct cifs_fattr *fattr,
+                             const unsigned char *path);
 extern int mdfour(unsigned char *, unsigned char *, int);
 extern int E_md4hash(const unsigned char *passwd, unsigned char *p16,
                        const struct nls_table *codepage);
index 124aa0230c1b8738edb8d04ca5d764f0a6cc5b12..d707edb6b852695f0de3993c9ef9cd12a3f909f3 100644 (file)
@@ -4010,7 +4010,7 @@ QFileInfoRetry:
        rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
                         (struct smb_hdr *) pSMBr, &bytes_returned, 0);
        if (rc) {
-               cifs_dbg(FYI, "Send error in QPathInfo = %d\n", rc);
+               cifs_dbg(FYI, "Send error in QFileInfo = %d", rc);
        } else {                /* decode response */
                rc = validate_t2((struct smb_t2_rsp *)pSMBr);
 
@@ -4179,7 +4179,7 @@ UnixQFileInfoRetry:
        rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
                         (struct smb_hdr *) pSMBr, &bytes_returned, 0);
        if (rc) {
-               cifs_dbg(FYI, "Send error in QPathInfo = %d\n", rc);
+               cifs_dbg(FYI, "Send error in UnixQFileInfo = %d", rc);
        } else {                /* decode response */
                rc = validate_t2((struct smb_t2_rsp *)pSMBr);
 
@@ -4263,7 +4263,7 @@ UnixQPathInfoRetry:
        rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
                         (struct smb_hdr *) pSMBr, &bytes_returned, 0);
        if (rc) {
-               cifs_dbg(FYI, "Send error in QPathInfo = %d\n", rc);
+               cifs_dbg(FYI, "Send error in UnixQPathInfo = %d", rc);
        } else {                /* decode response */
                rc = validate_t2((struct smb_t2_rsp *)pSMBr);
 
index 11ff5f116b20e663bf5e0428a940b437d9b232aa..a514e0a65f69b5936fbd60934b33938666f70dd0 100644 (file)
@@ -193,7 +193,7 @@ check_name(struct dentry *direntry)
 static int
 cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
               struct tcon_link *tlink, unsigned oflags, umode_t mode,
-              __u32 *oplock, struct cifs_fid *fid, int *created)
+              __u32 *oplock, struct cifs_fid *fid)
 {
        int rc = -ENOENT;
        int create_options = CREATE_NOT_DIR;
@@ -349,7 +349,6 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
                                .device = 0,
                };
 
-               *created |= FILE_CREATED;
                if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
                        args.uid = current_fsuid();
                        if (inode->i_mode & S_ISGID)
@@ -480,13 +479,16 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
        cifs_add_pending_open(&fid, tlink, &open);
 
        rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode,
-                           &oplock, &fid, opened);
+                           &oplock, &fid);
 
        if (rc) {
                cifs_del_pending_open(&open);
                goto out;
        }
 
+       if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
+               *opened |= FILE_CREATED;
+
        rc = finish_open(file, direntry, generic_file_open, opened);
        if (rc) {
                if (server->ops->close)
@@ -529,7 +531,6 @@ int cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode,
        struct TCP_Server_Info *server;
        struct cifs_fid fid;
        __u32 oplock;
-       int created = FILE_CREATED;
 
        cifs_dbg(FYI, "cifs_create parent inode = 0x%p name is: %s and dentry = 0x%p\n",
                 inode, direntry->d_name.name, direntry);
@@ -546,7 +547,7 @@ int cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode,
                server->ops->new_lease_key(&fid);
 
        rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode,
-                           &oplock, &fid, &created);
+                           &oplock, &fid);
        if (!rc && server->ops->close)
                server->ops->close(xid, tcon, &fid);
 
index 36f9ebb93ceba676c363cdf7b973f3936a9dde8f..49719b8228e58bd44b373fd4b39d844be9e2043b 100644 (file)
@@ -383,7 +383,8 @@ int cifs_get_inode_info_unix(struct inode **pinode,
 
        /* check for Minshall+French symlinks */
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
-               int tmprc = CIFSCheckMFSymlink(&fattr, full_path, cifs_sb, xid);
+               int tmprc = CIFSCheckMFSymlink(xid, tcon, cifs_sb, &fattr,
+                                              full_path);
                if (tmprc)
                        cifs_dbg(FYI, "CIFSCheckMFSymlink: %d\n", tmprc);
        }
@@ -799,7 +800,8 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
 
        /* check for Minshall+French symlinks */
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
-               tmprc = CIFSCheckMFSymlink(&fattr, full_path, cifs_sb, xid);
+               tmprc = CIFSCheckMFSymlink(xid, tcon, cifs_sb, &fattr,
+                                          full_path);
                if (tmprc)
                        cifs_dbg(FYI, "CIFSCheckMFSymlink: %d\n", tmprc);
        }
index cc0234710ddbb780cae5037b717a93d8a7d3d54d..92aee08483a52e100011fb05e1fcfc5255b340ac 100644 (file)
@@ -354,34 +354,30 @@ open_query_close_cifs_symlink(const unsigned char *path, char *pbuf,
 
 
 int
-CIFSCheckMFSymlink(struct cifs_fattr *fattr,
-                  const unsigned char *path,
-                  struct cifs_sb_info *cifs_sb, unsigned int xid)
+CIFSCheckMFSymlink(unsigned int xid, struct cifs_tcon *tcon,
+                  struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
+                  const unsigned char *path)
 {
-       int rc = 0;
+       int rc;
        u8 *buf = NULL;
        unsigned int link_len = 0;
        unsigned int bytes_read = 0;
-       struct cifs_tcon *ptcon;
 
        if (!CIFSCouldBeMFSymlink(fattr))
                /* it's not a symlink */
                return 0;
 
        buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
-       if (!buf) {
-               rc = -ENOMEM;
-               goto out;
-       }
+       if (!buf)
+               return -ENOMEM;
 
-       ptcon = tlink_tcon(cifs_sb_tlink(cifs_sb));
-       if ((ptcon->ses) && (ptcon->ses->server->ops->query_mf_symlink))
-               rc = ptcon->ses->server->ops->query_mf_symlink(path, buf,
-                                                &bytes_read, cifs_sb, xid);
+       if (tcon->ses->server->ops->query_mf_symlink)
+               rc = tcon->ses->server->ops->query_mf_symlink(path, buf,
+                                               &bytes_read, cifs_sb, xid);
        else
-               goto out;
+               rc = -ENOSYS;
 
-       if (rc != 0)
+       if (rc)
                goto out;
 
        if (bytes_read == 0) /* not a symlink */
index 8b5e2584c840903bc9aeffab1a31aa0a2149b595..af903128891cfa0365f7fac36ba8fc052cc584a4 100644 (file)
@@ -1907,10 +1907,6 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
                        }
                }
        }
-       if (op == EPOLL_CTL_DEL && is_file_epoll(tf.file)) {
-               tep = tf.file->private_data;
-               mutex_lock_nested(&tep->mtx, 1);
-       }
 
        /*
         * Try to lookup the file inside our RB tree, Since we grabbed "mtx"
index 4410cc3d6ee2cc817f947e09475e565cebb07211..3384dc4bed4034921beffb84783327024e4cf22e 100644 (file)
@@ -4218,7 +4218,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
         */
        map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
        newex.ee_block = cpu_to_le32(map->m_lblk);
-       cluster_offset = EXT4_LBLK_CMASK(sbi, map->m_lblk);
+       cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
 
        /*
         * If we are doing bigalloc, check to see if the extent returned
index b7fc035a6943cc40f649f1cca7be5858fbcf1cbc..73f3e4ee403793e1ae39c75b5d652f64c6a4b52a 100644 (file)
@@ -986,6 +986,7 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
+       struct address_space *mapping = inode->i_mapping;
        struct gfs2_inode *ip = GFS2_I(inode);
        struct gfs2_holder gh;
        int rv;
@@ -1006,6 +1007,35 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
        if (rv != 1)
                goto out; /* dio not valid, fall back to buffered i/o */
 
+       /*
+        * Now since we are holding a deferred (CW) lock at this point, you
+        * might be wondering why this is ever needed. There is a case however
+        * where we've granted a deferred local lock against a cached exclusive
+        * glock. That is ok provided all granted local locks are deferred, but
+        * it also means that it is possible to encounter pages which are
+        * cached and possibly also mapped. So here we check for that and sort
+        * them out ahead of the dio. The glock state machine will take care of
+        * everything else.
+        *
+        * If in fact the cached glock state (gl->gl_state) is deferred (CW) in
+        * the first place, mapping->nr_pages will always be zero.
+        */
+       if (mapping->nrpages) {
+               loff_t lstart = offset & (PAGE_CACHE_SIZE - 1);
+               loff_t len = iov_length(iov, nr_segs);
+               loff_t end = PAGE_ALIGN(offset + len) - 1;
+
+               rv = 0;
+               if (len == 0)
+                       goto out;
+               if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
+                       unmap_shared_mapping_range(ip->i_inode.i_mapping, offset, len);
+               rv = filemap_write_and_wait_range(mapping, lstart, end);
+               if (rv)
+                       return rv;
+               truncate_inode_pages_range(mapping, lstart, end);
+       }
+
        rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
                                  offset, nr_segs, gfs2_get_block_direct,
                                  NULL, NULL, 0);
index c8420f7e4db604da3663da61c5ae4556b6439783..6f7a47c052592145d7601c0e9ab0341c4dd8c4d7 100644 (file)
@@ -1655,6 +1655,7 @@ static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
        struct task_struct *gh_owner = NULL;
        char flags_buf[32];
 
+       rcu_read_lock();
        if (gh->gh_owner_pid)
                gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
        gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
@@ -1664,6 +1665,7 @@ static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
                       gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
                       gh_owner ? gh_owner->comm : "(ended)",
                       (void *)gh->gh_ip);
+       rcu_read_unlock();
        return 0;
 }
 
index db908f697139cfffbca462d3a7528e13139576b5..f88dcd92501098e4a9bca28724da34e8df5a0154 100644 (file)
@@ -192,8 +192,11 @@ static void inode_go_sync(struct gfs2_glock *gl)
 
        if (ip && !S_ISREG(ip->i_inode.i_mode))
                ip = NULL;
-       if (ip && test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
-               unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
+       if (ip) {
+               if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
+                       unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
+               inode_dio_wait(&ip->i_inode);
+       }
        if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
                return;
 
@@ -410,6 +413,9 @@ static int inode_go_lock(struct gfs2_holder *gh)
                        return error;
        }
 
+       if (gh->gh_state != LM_ST_DEFERRED)
+               inode_dio_wait(&ip->i_inode);
+
        if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
            (gl->gl_state == LM_ST_EXCLUSIVE) &&
            (gh->gh_state == LM_ST_EXCLUSIVE)) {
index 610613fb65b552dccfdf9853ac96d2f667bd36d4..9dcb9777a5f80eb32bcfce88aca3a8026776f047 100644 (file)
@@ -551,10 +551,10 @@ void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
        struct buffer_head *bh = bd->bd_bh;
        struct gfs2_glock *gl = bd->bd_gl;
 
-       gfs2_remove_from_ail(bd);
-       bd->bd_bh = NULL;
        bh->b_private = NULL;
        bd->bd_blkno = bh->b_blocknr;
+       gfs2_remove_from_ail(bd); /* drops ref on bh */
+       bd->bd_bh = NULL;
        bd->bd_ops = &gfs2_revoke_lops;
        sdp->sd_log_num_revoke++;
        atomic_inc(&gl->gl_revokes);
index 932415050540e2a1bdefc6d957e68ef7a0d82d01..52f177be3bf861309ed2439d33da7617bba8dba8 100644 (file)
@@ -258,6 +258,7 @@ void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int
        struct address_space *mapping = bh->b_page->mapping;
        struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
        struct gfs2_bufdata *bd = bh->b_private;
+       int was_pinned = 0;
 
        if (test_clear_buffer_pinned(bh)) {
                trace_gfs2_pin(bd, 0);
@@ -273,12 +274,16 @@ void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int
                        tr->tr_num_databuf_rm++;
                }
                tr->tr_touched = 1;
+               was_pinned = 1;
                brelse(bh);
        }
        if (bd) {
                spin_lock(&sdp->sd_ail_lock);
                if (bd->bd_tr) {
                        gfs2_trans_add_revoke(sdp, bd);
+               } else if (was_pinned) {
+                       bh->b_private = NULL;
+                       kmem_cache_free(gfs2_bufdata_cachep, bd);
                }
                spin_unlock(&sdp->sd_ail_lock);
        }
index 82303b4749582cd3c00d402a9f42b9972b1de677..52fa88314f5cdf8ef98ed95e44791a5065cbfccd 100644 (file)
@@ -1366,8 +1366,18 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags,
        if (IS_ERR(s))
                goto error_bdev;
 
-       if (s->s_root)
+       if (s->s_root) {
+               /*
+                * s_umount nests inside bd_mutex during
+                * __invalidate_device().  blkdev_put() acquires
+                * bd_mutex and can't be called under s_umount.  Drop
+                * s_umount temporarily.  This is safe as we're
+                * holding an active reference.
+                */
+               up_write(&s->s_umount);
                blkdev_put(bdev, mode);
+               down_write(&s->s_umount);
+       }
 
        memset(&args, 0, sizeof(args));
        args.ar_quota = GFS2_QUOTA_DEFAULT;
index 739e0a52dedadea67eb595f4b03619ce3e3383a3..5549d69ddb45a2038ece8e24085e0f9ef6ce9984 100644 (file)
@@ -110,7 +110,7 @@ xfs_attr3_rmt_verify(
        if (be32_to_cpu(rmt->rm_bytes) > fsbsize - sizeof(*rmt))
                return false;
        if (be32_to_cpu(rmt->rm_offset) +
-                               be32_to_cpu(rmt->rm_bytes) >= XATTR_SIZE_MAX)
+                               be32_to_cpu(rmt->rm_bytes) > XATTR_SIZE_MAX)
                return false;
        if (rmt->rm_owner == 0)
                return false;
index 1394106ed22db9db61542f5183023f98ef1ca817..82e0dab46ee52cc991a7130d8f671e011eef10a3 100644 (file)
@@ -287,6 +287,7 @@ xfs_bmapi_allocate(
        INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker);
        queue_work(xfs_alloc_wq, &args->work);
        wait_for_completion(&done);
+       destroy_work_on_stack(&args->work);
        return args->result;
 }
 
index c602c7718421ded2f2bbe50f1e76edc39ac2b6f2..ddabed1f51c22227df9fdc1a7d050ca3b395b470 100644 (file)
@@ -169,7 +169,8 @@ struct acpi_device_flags {
        u32 ejectable:1;
        u32 power_manageable:1;
        u32 match_driver:1;
-       u32 reserved:27;
+       u32 no_hotplug:1;
+       u32 reserved:26;
 };
 
 /* File System */
@@ -344,6 +345,7 @@ extern struct kobject *acpi_kobj;
 extern int acpi_bus_generate_netlink_event(const char*, const char*, u8, int);
 void acpi_bus_private_data_handler(acpi_handle, void *);
 int acpi_bus_get_private_data(acpi_handle, void **);
+void acpi_bus_no_hotplug(acpi_handle handle);
 extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32);
 extern int register_acpi_notifier(struct notifier_block *);
 extern int unregister_acpi_notifier(struct notifier_block *);
index 87578c109e4869bbecf5cce3a7530a0c2fb80637..49376aec2fbb8a9e054b9605fd9de36e4cd39c5f 100644 (file)
        {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
        {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
-       {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
+       {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
        {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
index d9a550bf3e8e8a770c4198bf96bb2b81e34a3b6e..ce2a1f5f9a1e0226d933d28606823239e4467526 100644 (file)
@@ -769,7 +769,8 @@ struct netdev_phys_port_id {
  *        (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
  *     Required can not be NULL.
  *
- * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
+ * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
+ *                         void *accel_priv);
  *     Called to decide which queue to when device supports multiple
  *     transmit queues.
  *
@@ -990,7 +991,8 @@ struct net_device_ops {
        netdev_tx_t             (*ndo_start_xmit) (struct sk_buff *skb,
                                                   struct net_device *dev);
        u16                     (*ndo_select_queue)(struct net_device *dev,
-                                                   struct sk_buff *skb);
+                                                   struct sk_buff *skb,
+                                                   void *accel_priv);
        void                    (*ndo_change_rx_flags)(struct net_device *dev,
                                                       int flags);
        void                    (*ndo_set_rx_mode)(struct net_device *dev);
@@ -1529,7 +1531,8 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
 }
 
 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
-                                   struct sk_buff *skb);
+                                   struct sk_buff *skb,
+                                   void *accel_priv);
 u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
 
 /*
@@ -1819,6 +1822,7 @@ int dev_close(struct net_device *dev);
 void dev_disable_lro(struct net_device *dev);
 int dev_loopback_xmit(struct sk_buff *newskb);
 int dev_queue_xmit(struct sk_buff *skb);
+int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
 int register_netdevice(struct net_device *dev);
 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
 void unregister_netdevice_many(struct list_head *head);
@@ -1912,6 +1916,15 @@ static inline int dev_parse_header(const struct sk_buff *skb,
        return dev->header_ops->parse(skb, haddr);
 }
 
+static inline int dev_rebuild_header(struct sk_buff *skb)
+{
+       const struct net_device *dev = skb->dev;
+
+       if (!dev->header_ops || !dev->header_ops->rebuild)
+               return 0;
+       return dev->header_ops->rebuild(skb);
+}
+
 typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
 int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
 static inline int unregister_gifconf(unsigned int family)
@@ -2417,7 +2430,7 @@ int dev_change_carrier(struct net_device *, bool new_carrier);
 int dev_get_phys_port_id(struct net_device *dev,
                         struct netdev_phys_port_id *ppid);
 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
-                       struct netdev_queue *txq, void *accel_priv);
+                       struct netdev_queue *txq);
 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
 
 extern int             netdev_budget;
@@ -3008,6 +3021,19 @@ static inline void netif_set_gso_max_size(struct net_device *dev,
        dev->gso_max_size = size;
 }
 
+static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
+                                       int pulled_hlen, u16 mac_offset,
+                                       int mac_len)
+{
+       skb->protocol = protocol;
+       skb->encapsulation = 1;
+       skb_push(skb, pulled_hlen);
+       skb_reset_transport_header(skb);
+       skb->mac_header = mac_offset;
+       skb->network_header = skb->mac_header + mac_len;
+       skb->mac_len = mac_len;
+}
+
 static inline bool netif_is_macvlan(struct net_device *dev)
 {
        return dev->priv_flags & IFF_MACVLAN;
index 939428ad25acb506297c1726faaee83d60d047c4..8e3e66ac0a5215d221042e15e631fb2fe2fb51d1 100644 (file)
@@ -24,6 +24,11 @@ extern int rtnl_trylock(void);
 extern int rtnl_is_locked(void);
 #ifdef CONFIG_PROVE_LOCKING
 extern int lockdep_rtnl_is_held(void);
+#else
+static inline int lockdep_rtnl_is_held(void)
+{
+       return 1;
+}
 #endif /* #ifdef CONFIG_PROVE_LOCKING */
 
 /**
index 215b5ea1cb302c43f0e8b9532fbe3ce59ff0f612..6f69b3f914fbf94c2d6d42beea62748a29a42d08 100644 (file)
@@ -1638,6 +1638,11 @@ static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
        skb->mac_header += offset;
 }
 
+static inline void skb_pop_mac_header(struct sk_buff *skb)
+{
+       skb->mac_header = skb->network_header;
+}
+
 static inline void skb_probe_transport_header(struct sk_buff *skb,
                                              const int offset_hint)
 {
@@ -2526,6 +2531,10 @@ static inline void sw_tx_timestamp(struct sk_buff *skb)
  * Ethernet MAC Drivers should call this function in their hard_xmit()
  * function immediately before giving the sk_buff to the MAC hardware.
  *
+ * Specifically, one should make absolutely sure that this function is
+ * called before TX completion of this packet can trigger.  Otherwise
+ * the packet could potentially already be freed.
+ *
  * @skb: A socket buffer.
  */
 static inline void skb_tx_timestamp(struct sk_buff *skb)
index 8c62ba74dd91d9800439038d86298f2e2477f1fe..a1d4ca290862d766d5460f198503ae4530df3f45 100644 (file)
@@ -75,6 +75,7 @@ struct spi_device {
        struct spi_master       *master;
        u32                     max_speed_hz;
        u8                      chip_select;
+       u8                      bits_per_word;
        u16                     mode;
 #define        SPI_CPHA        0x01                    /* clock phase */
 #define        SPI_CPOL        0x02                    /* clock polarity */
@@ -92,7 +93,6 @@ struct spi_device {
 #define        SPI_TX_QUAD     0x200                   /* transmit with 4 wires */
 #define        SPI_RX_DUAL     0x400                   /* receive with 2 wires */
 #define        SPI_RX_QUAD     0x800                   /* receive with 4 wires */
-       u8                      bits_per_word;
        int                     irq;
        void                    *controller_state;
        void                    *controller_data;
@@ -277,15 +277,17 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
  * @unprepare_transfer_hardware: there are currently no more messages on the
  *     queue so the subsystem notifies the driver that it may relax the
  *     hardware by issuing this call
- * @set_cs: assert or deassert chip select, true to assert.  May be called
+ * @set_cs: set the logic level of the chip select line.  May be called
  *          from interrupt context.
  * @prepare_message: set up the controller to transfer a single message,
  *                   for example doing DMA mapping.  Called from threaded
  *                   context.
- * @transfer_one: transfer a single spi_transfer. When the
- *               driver is finished with this transfer it must call
- *               spi_finalize_current_transfer() so the subsystem can issue
- *                the next transfer
+ * @transfer_one: transfer a single spi_transfer.
+ *                  - return 0 if the transfer is finished,
+ *                  - return 1 if the transfer is still in progress. When
+ *                    the driver is finished with this transfer it must
+ *                    call spi_finalize_current_transfer() so the subsystem
+ *                    can issue the next transfer
  * @unprepare_message: undo any work done by prepare_message().
  * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
  *     number. Any individual value may be -ENOENT for CS lines that
@@ -576,8 +578,8 @@ struct spi_transfer {
        dma_addr_t      rx_dma;
 
        unsigned        cs_change:1;
-       u8              tx_nbits;
-       u8              rx_nbits;
+       unsigned        tx_nbits:3;
+       unsigned        rx_nbits:3;
 #define        SPI_NBITS_SINGLE        0x01 /* 1bit transfer */
 #define        SPI_NBITS_DUAL          0x02 /* 2bits transfer */
 #define        SPI_NBITS_QUAD          0x04 /* 4bits transfer */
@@ -847,7 +849,7 @@ static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd)
        ssize_t                 status;
        u16                     result;
 
-       status = spi_write_then_read(spi, &cmd, 1, (u8 *) &result, 2);
+       status = spi_write_then_read(spi, &cmd, 1, &result, 2);
 
        /* return negative errno or unsigned value */
        return (status < 0) ? status : result;
index 31e2de7d57c5d6c210e1b445b19551407fd31b6c..c0f0a13ed8183e58029aec1d12097aa1efb085f0 100644 (file)
 #define LLC_S_PF_IS_1(pdu)     ((pdu->ctrl_2 & LLC_S_PF_BIT_MASK) ? 1 : 0)
 
 #define PDU_SUPV_GET_Nr(pdu)   ((pdu->ctrl_2 & 0xFE) >> 1)
-#define PDU_GET_NEXT_Vr(sn)    (++sn & ~LLC_2_SEQ_NBR_MODULO)
+#define PDU_GET_NEXT_Vr(sn)    (((sn) + 1) & ~LLC_2_SEQ_NBR_MODULO)
 
 /* FRMR information field macros */
 
index 67b5d0068273e64ac94125861699e790a9d06e9d..0a248b323d875c08ca456d62ed46c028781e9432 100644 (file)
@@ -1046,9 +1046,6 @@ struct sctp_outq {
 
        /* Corked? */
        char cork;
-
-       /* Is this structure empty?  */
-       char empty;
 };
 
 void sctp_outq_init(struct sctp_association *, struct sctp_outq *);
index 2f3f7ea8c77b8a653b0972302ead53319f13e5fc..fe421e8a431bcf91dd1d5e845b5d06ce81bbe23d 100644 (file)
@@ -983,6 +983,8 @@ struct drm_radeon_cs {
 #define RADEON_INFO_SI_CP_DMA_COMPUTE  0x17
 /* CIK macrotile mode array */
 #define RADEON_INFO_CIK_MACROTILE_MODE_ARRAY   0x18
+/* query the number of render backends */
+#define RADEON_INFO_SI_BACKEND_ENABLED_MASK    0x19
 
 
 struct drm_radeon_info {
index ecc88592ecbe94acfbacd1eda9b5907a832470da..bd24470d24a2c7a3145af21094e03f67b7ce0761 100644 (file)
@@ -464,7 +464,8 @@ struct input_keymap_entry {
 #define KEY_BRIGHTNESS_ZERO    244     /* brightness off, use ambient */
 #define KEY_DISPLAY_OFF                245     /* display device to off state */
 
-#define KEY_WIMAX              246
+#define KEY_WWAN               246     /* Wireless WAN (LTE, UMTS, GSM, etc.) */
+#define KEY_WIMAX              KEY_WWAN
 #define KEY_RFKILL             247     /* Key that controls all radios */
 
 #define KEY_MICMUTE            248     /* Mute / unmute the microphone */
index 5bff0814776870e2974e6e436ad99f99f264e05a..bbc4d660221ac4e514e24d49ce140b25ad5364d2 100644 (file)
@@ -208,9 +208,10 @@ get_write_lock:
                if (mapping_cap_account_dirty(mapping)) {
                        unsigned long addr;
                        struct file *file = get_file(vma->vm_file);
+                       /* mmap_region may free vma; grab the info now */
+                       vm_flags = vma->vm_flags;
 
-                       addr = mmap_region(file, start, size,
-                                       vma->vm_flags, pgoff);
+                       addr = mmap_region(file, start, size, vm_flags, pgoff);
                        fput(file);
                        if (IS_ERR_VALUE(addr)) {
                                err = addr;
@@ -218,7 +219,7 @@ get_write_lock:
                                BUG_ON(addr != start);
                                err = 0;
                        }
-                       goto out;
+                       goto out_freed;
                }
                mutex_lock(&mapping->i_mmap_mutex);
                flush_dcache_mmap_lock(mapping);
@@ -253,6 +254,7 @@ get_write_lock:
 out:
        if (vma)
                vm_flags = vma->vm_flags;
+out_freed:
        if (likely(!has_write_lock))
                up_read(&mm->mmap_sem);
        else
index 7de1bf85f6833422e16161445b71e328fad2e1f6..95d1acb0f3d237f55f5f1d6405de5774f0f9fc6a 100644 (file)
@@ -883,9 +883,6 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                goto out_unlock;
        }
 
-       /* mmap_sem prevents this happening but warn if that changes */
-       WARN_ON(pmd_trans_migrating(pmd));
-
        if (unlikely(pmd_trans_splitting(pmd))) {
                /* split huge page running from under us */
                spin_unlock(src_ptl);
@@ -1157,7 +1154,7 @@ alloc:
                new_page = NULL;
 
        if (unlikely(!new_page)) {
-               if (is_huge_zero_pmd(orig_pmd)) {
+               if (!page) {
                        ret = do_huge_pmd_wp_zero_page_fallback(mm, vma,
                                        address, pmd, orig_pmd, haddr);
                } else {
@@ -1184,7 +1181,7 @@ alloc:
 
        count_vm_event(THP_FAULT_ALLOC);
 
-       if (is_huge_zero_pmd(orig_pmd))
+       if (!page)
                clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
        else
                copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
@@ -1210,7 +1207,7 @@ alloc:
                page_add_new_anon_rmap(new_page, vma, haddr);
                set_pmd_at(mm, haddr, pmd, entry);
                update_mmu_cache_pmd(vma, address, pmd);
-               if (is_huge_zero_pmd(orig_pmd)) {
+               if (!page) {
                        add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
                        put_huge_zero_page();
                } else {
index bf5e8945714944f896e2dc79b33b75a491da4794..7f1a356153c013349ce520c7763002838c02fb76 100644 (file)
@@ -338,7 +338,7 @@ struct mem_cgroup {
 static size_t memcg_size(void)
 {
        return sizeof(struct mem_cgroup) +
-               nr_node_ids * sizeof(struct mem_cgroup_per_node);
+               nr_node_ids * sizeof(struct mem_cgroup_per_node *);
 }
 
 /* internal only representation about the status of kmem accounting. */
index db08af92c6fce92d9e0d61ca5752aa003c52c3f2..fabe55046c1d7b9638172aa8e26ecb4f3d6ed4b9 100644 (file)
@@ -938,6 +938,16 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
                                BUG_ON(!PageHWPoison(p));
                                return SWAP_FAIL;
                        }
+                       /*
+                        * We pinned the head page for hwpoison handling,
+                        * now we split the thp and we are interested in
+                        * the hwpoisoned raw page, so move the refcount
+                        * to it.
+                        */
+                       if (hpage != p) {
+                               put_page(hpage);
+                               get_page(p);
+                       }
                        /* THP is split, so ppage should be the real poisoned page. */
                        ppage = p;
                }
index d480cd6fc475854259bdd51021d5125dbdfbe479..192e6eebe4f240e4a8ece7cc0e10e902ff29f308 100644 (file)
@@ -133,7 +133,10 @@ static void __munlock_isolation_failed(struct page *page)
 
 /**
  * munlock_vma_page - munlock a vma page
- * @page - page to be unlocked
+ * @page - page to be unlocked, either a normal page or THP page head
+ *
+ * returns the size of the page as a page mask (0 for normal page,
+ *         HPAGE_PMD_NR - 1 for THP head page)
  *
  * called from munlock()/munmap() path with page supposedly on the LRU.
  * When we munlock a page, because the vma where we found the page is being
@@ -148,21 +151,30 @@ static void __munlock_isolation_failed(struct page *page)
  */
 unsigned int munlock_vma_page(struct page *page)
 {
-       unsigned int page_mask = 0;
+       unsigned int nr_pages;
 
        BUG_ON(!PageLocked(page));
 
        if (TestClearPageMlocked(page)) {
-               unsigned int nr_pages = hpage_nr_pages(page);
+               nr_pages = hpage_nr_pages(page);
                mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
-               page_mask = nr_pages - 1;
                if (!isolate_lru_page(page))
                        __munlock_isolated_page(page);
                else
                        __munlock_isolation_failed(page);
+       } else {
+               nr_pages = hpage_nr_pages(page);
        }
 
-       return page_mask;
+       /*
+        * Regardless of the original PageMlocked flag, we determine nr_pages
+        * after touching the flag. This leaves a possible race with a THP page
+        * split, such that a whole THP page was munlocked, but nr_pages == 1.
+        * Returning a smaller mask due to that is OK, the worst that can
+        * happen is subsequent useless scanning of the former tail pages.
+        * The NR_MLOCK accounting can however become broken.
+        */
+       return nr_pages - 1;
 }
 
 /**
@@ -286,10 +298,12 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
 {
        int i;
        int nr = pagevec_count(pvec);
-       int delta_munlocked = -nr;
+       int delta_munlocked;
        struct pagevec pvec_putback;
        int pgrescued = 0;
 
+       pagevec_init(&pvec_putback, 0);
+
        /* Phase 1: page isolation */
        spin_lock_irq(&zone->lru_lock);
        for (i = 0; i < nr; i++) {
@@ -318,18 +332,21 @@ skip_munlock:
                        /*
                         * We won't be munlocking this page in the next phase
                         * but we still need to release the follow_page_mask()
-                        * pin.
+                        * pin. We cannot do it under lru_lock however. If it's
+                        * the last pin, __page_cache_release would deadlock.
                         */
+                       pagevec_add(&pvec_putback, pvec->pages[i]);
                        pvec->pages[i] = NULL;
-                       put_page(page);
-                       delta_munlocked++;
                }
        }
+       delta_munlocked = -nr + pagevec_count(&pvec_putback);
        __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
        spin_unlock_irq(&zone->lru_lock);
 
+       /* Now we can release pins of pages that we are not munlocking */
+       pagevec_release(&pvec_putback);
+
        /* Phase 2: page munlock */
-       pagevec_init(&pvec_putback, 0);
        for (i = 0; i < nr; i++) {
                struct page *page = pvec->pages[i];
 
@@ -440,7 +457,8 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
 
        while (start < end) {
                struct page *page = NULL;
-               unsigned int page_mask, page_increm;
+               unsigned int page_mask;
+               unsigned long page_increm;
                struct pagevec pvec;
                struct zone *zone;
                int zoneid;
@@ -490,7 +508,9 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
                                goto next;
                        }
                }
-               page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
+               /* It's a bug to munlock in the middle of a THP page */
+               VM_BUG_ON((start >> PAGE_SHIFT) & page_mask);
+               page_increm = 1 + page_mask;
                start += page_increm * PAGE_SIZE;
 next:
                cond_resched();
index 762896ebfcf505348a659c83d8720b48e8b58c86..47c908f1f626a4d4a2de96b9f4a648ad386d4357 100644 (file)
@@ -530,6 +530,23 @@ static const struct header_ops vlan_header_ops = {
        .parse   = eth_header_parse,
 };
 
+static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev,
+                                    unsigned short type,
+                                    const void *daddr, const void *saddr,
+                                    unsigned int len)
+{
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+       struct net_device *real_dev = vlan->real_dev;
+
+       return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
+}
+
+static const struct header_ops vlan_passthru_header_ops = {
+       .create  = vlan_passthru_hard_header,
+       .rebuild = dev_rebuild_header,
+       .parse   = eth_header_parse,
+};
+
 static struct device_type vlan_type = {
        .name   = "vlan",
 };
@@ -573,7 +590,7 @@ static int vlan_dev_init(struct net_device *dev)
 
        dev->needed_headroom = real_dev->needed_headroom;
        if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
-               dev->header_ops      = real_dev->header_ops;
+               dev->header_ops      = &vlan_passthru_header_ops;
                dev->hard_header_len = real_dev->hard_header_len;
        } else {
                dev->header_ops      = &vlan_header_ops;
index a2b480a908723a37d47d14bd76e63628518e6561..b9c8a6eedf4537e4216f09e65f71ef72a55d4ed3 100644 (file)
@@ -307,9 +307,9 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
        hard_iface->bat_iv.ogm_buff = ogm_buff;
 
        batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
-       batadv_ogm_packet->header.packet_type = BATADV_IV_OGM;
-       batadv_ogm_packet->header.version = BATADV_COMPAT_VERSION;
-       batadv_ogm_packet->header.ttl = 2;
+       batadv_ogm_packet->packet_type = BATADV_IV_OGM;
+       batadv_ogm_packet->version = BATADV_COMPAT_VERSION;
+       batadv_ogm_packet->ttl = 2;
        batadv_ogm_packet->flags = BATADV_NO_FLAGS;
        batadv_ogm_packet->reserved = 0;
        batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE;
@@ -346,7 +346,7 @@ batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface)
 
        batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
        batadv_ogm_packet->flags = BATADV_PRIMARIES_FIRST_HOP;
-       batadv_ogm_packet->header.ttl = BATADV_TTL;
+       batadv_ogm_packet->ttl = BATADV_TTL;
 }
 
 /* when do we schedule our own ogm to be sent */
@@ -435,7 +435,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
                           fwd_str, (packet_num > 0 ? "aggregated " : ""),
                           batadv_ogm_packet->orig,
                           ntohl(batadv_ogm_packet->seqno),
-                          batadv_ogm_packet->tq, batadv_ogm_packet->header.ttl,
+                          batadv_ogm_packet->tq, batadv_ogm_packet->ttl,
                           (batadv_ogm_packet->flags & BATADV_DIRECTLINK ?
                            "on" : "off"),
                           hard_iface->net_dev->name,
@@ -491,7 +491,7 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
        /* multihomed peer assumed
         * non-primary OGMs are only broadcasted on their interface
         */
-       if ((directlink && (batadv_ogm_packet->header.ttl == 1)) ||
+       if ((directlink && (batadv_ogm_packet->ttl == 1)) ||
            (forw_packet->own && (forw_packet->if_incoming != primary_if))) {
                /* FIXME: what about aggregated packets ? */
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
@@ -499,7 +499,7 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
                           (forw_packet->own ? "Sending own" : "Forwarding"),
                           batadv_ogm_packet->orig,
                           ntohl(batadv_ogm_packet->seqno),
-                          batadv_ogm_packet->header.ttl,
+                          batadv_ogm_packet->ttl,
                           forw_packet->if_incoming->net_dev->name,
                           forw_packet->if_incoming->net_dev->dev_addr);
 
@@ -572,7 +572,7 @@ batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet,
                 */
                if ((!directlink) &&
                    (!(batadv_ogm_packet->flags & BATADV_DIRECTLINK)) &&
-                   (batadv_ogm_packet->header.ttl != 1) &&
+                   (batadv_ogm_packet->ttl != 1) &&
 
                    /* own packets originating non-primary
                     * interfaces leave only that interface
@@ -587,7 +587,7 @@ batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet,
                 * interface only - we still can aggregate
                 */
                if ((directlink) &&
-                   (new_bat_ogm_packet->header.ttl == 1) &&
+                   (new_bat_ogm_packet->ttl == 1) &&
                    (forw_packet->if_incoming == if_incoming) &&
 
                    /* packets from direct neighbors or
@@ -778,7 +778,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
        struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
        uint16_t tvlv_len;
 
-       if (batadv_ogm_packet->header.ttl <= 1) {
+       if (batadv_ogm_packet->ttl <= 1) {
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n");
                return;
        }
@@ -798,7 +798,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
 
        tvlv_len = ntohs(batadv_ogm_packet->tvlv_len);
 
-       batadv_ogm_packet->header.ttl--;
+       batadv_ogm_packet->ttl--;
        memcpy(batadv_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
 
        /* apply hop penalty */
@@ -807,7 +807,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
 
        batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
                   "Forwarding packet: tq: %i, ttl: %i\n",
-                  batadv_ogm_packet->tq, batadv_ogm_packet->header.ttl);
+                  batadv_ogm_packet->tq, batadv_ogm_packet->ttl);
 
        /* switch of primaries first hop flag when forwarding */
        batadv_ogm_packet->flags &= ~BATADV_PRIMARIES_FIRST_HOP;
@@ -972,8 +972,8 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
        spin_unlock_bh(&neigh_node->bat_iv.lq_update_lock);
 
        if (dup_status == BATADV_NO_DUP) {
-               orig_node->last_ttl = batadv_ogm_packet->header.ttl;
-               neigh_node->last_ttl = batadv_ogm_packet->header.ttl;
+               orig_node->last_ttl = batadv_ogm_packet->ttl;
+               neigh_node->last_ttl = batadv_ogm_packet->ttl;
        }
 
        batadv_bonding_candidate_add(bat_priv, orig_node, neigh_node);
@@ -1247,7 +1247,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
         * packet in an aggregation.  Here we expect that the padding
         * is always zero (or not 0x01)
         */
-       if (batadv_ogm_packet->header.packet_type != BATADV_IV_OGM)
+       if (batadv_ogm_packet->packet_type != BATADV_IV_OGM)
                return;
 
        /* could be changed by schedule_own_packet() */
@@ -1267,8 +1267,8 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
                   if_incoming->net_dev->dev_addr, batadv_ogm_packet->orig,
                   batadv_ogm_packet->prev_sender,
                   ntohl(batadv_ogm_packet->seqno), batadv_ogm_packet->tq,
-                  batadv_ogm_packet->header.ttl,
-                  batadv_ogm_packet->header.version, has_directlink_flag);
+                  batadv_ogm_packet->ttl,
+                  batadv_ogm_packet->version, has_directlink_flag);
 
        rcu_read_lock();
        list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
@@ -1433,7 +1433,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
         * seqno and similar ttl as the non-duplicate
         */
        sameseq = orig_node->last_real_seqno == ntohl(batadv_ogm_packet->seqno);
-       similar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->header.ttl;
+       similar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->ttl;
        if (is_bidirect && ((dup_status == BATADV_NO_DUP) ||
                            (sameseq && similar_ttl)))
                batadv_iv_ogm_orig_update(bat_priv, orig_node, ethhdr,
index 6c8c3934bd7b44bac8643683683c494e003f1ed2..b316a4cb6f147dbbafadb51cdccf687cef500373 100644 (file)
@@ -349,7 +349,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
 
        unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
 
-       switch (unicast_4addr_packet->u.header.packet_type) {
+       switch (unicast_4addr_packet->u.packet_type) {
        case BATADV_UNICAST:
                batadv_dbg(BATADV_DBG_DAT, bat_priv,
                           "* encapsulated within a UNICAST packet\n");
@@ -374,7 +374,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
                        break;
                default:
                        batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: Unknown (%u)!\n",
-                                  unicast_4addr_packet->u.header.packet_type);
+                                  unicast_4addr_packet->u.packet_type);
                }
                break;
        case BATADV_BCAST:
@@ -387,7 +387,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
        default:
                batadv_dbg(BATADV_DBG_DAT, bat_priv,
                           "* encapsulated within an unknown packet type (0x%x)\n",
-                          unicast_4addr_packet->u.header.packet_type);
+                          unicast_4addr_packet->u.packet_type);
        }
 }
 
index 271d321b3a04063b4f9dfe476c7c07d9e2e50048..6ddb6145ffb564be9397728e83a4040f071c4a96 100644 (file)
@@ -355,7 +355,7 @@ bool batadv_frag_skb_fwd(struct sk_buff *skb,
                batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES,
                                   skb->len + ETH_HLEN);
 
-               packet->header.ttl--;
+               packet->ttl--;
                batadv_send_skb_packet(skb, neigh_node->if_incoming,
                                       neigh_node->addr);
                ret = true;
@@ -444,9 +444,9 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
                goto out_err;
 
        /* Create one header to be copied to all fragments */
-       frag_header.header.packet_type = BATADV_UNICAST_FRAG;
-       frag_header.header.version = BATADV_COMPAT_VERSION;
-       frag_header.header.ttl = BATADV_TTL;
+       frag_header.packet_type = BATADV_UNICAST_FRAG;
+       frag_header.version = BATADV_COMPAT_VERSION;
+       frag_header.ttl = BATADV_TTL;
        frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
        frag_header.reserved = 0;
        frag_header.no = 0;
index 29ae4efe3543e4bfc6ff013b9c97628b9e395209..130cc3217e2b0fb091dccfad6a4f41d4b0c9dbc9 100644 (file)
@@ -194,7 +194,7 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
                goto free_skb;
        }
 
-       if (icmp_header->header.packet_type != BATADV_ICMP) {
+       if (icmp_header->packet_type != BATADV_ICMP) {
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
                           "Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n");
                len = -EINVAL;
@@ -243,9 +243,9 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
 
        icmp_header->uid = socket_client->index;
 
-       if (icmp_header->header.version != BATADV_COMPAT_VERSION) {
+       if (icmp_header->version != BATADV_COMPAT_VERSION) {
                icmp_header->msg_type = BATADV_PARAMETER_PROBLEM;
-               icmp_header->header.version = BATADV_COMPAT_VERSION;
+               icmp_header->version = BATADV_COMPAT_VERSION;
                batadv_socket_add_packet(socket_client, icmp_header,
                                         packet_len);
                goto free_skb;
index c51a5e568f0a80c08beb4ded5b34b1aff8d5cfdf..1511f64a6ceaae34d26860cd4642cde71c5af9a0 100644 (file)
@@ -383,17 +383,17 @@ int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
 
        batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
 
-       if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) {
+       if (batadv_ogm_packet->version != BATADV_COMPAT_VERSION) {
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
                           "Drop packet: incompatible batman version (%i)\n",
-                          batadv_ogm_packet->header.version);
+                          batadv_ogm_packet->version);
                goto err_free;
        }
 
        /* all receive handlers return whether they received or reused
         * the supplied skb. if not, we have to free the skb.
         */
-       idx = batadv_ogm_packet->header.packet_type;
+       idx = batadv_ogm_packet->packet_type;
        ret = (*batadv_rx_handler[idx])(skb, hard_iface);
 
        if (ret == NET_RX_DROP)
@@ -426,8 +426,8 @@ static void batadv_recv_handler_init(void)
        BUILD_BUG_ON(offsetof(struct batadv_unicast_packet, dest) != 4);
        BUILD_BUG_ON(offsetof(struct batadv_unicast_tvlv_packet, dst) != 4);
        BUILD_BUG_ON(offsetof(struct batadv_frag_packet, dest) != 4);
-       BUILD_BUG_ON(offsetof(struct batadv_icmp_packet, icmph.dst) != 4);
-       BUILD_BUG_ON(offsetof(struct batadv_icmp_packet_rr, icmph.dst) != 4);
+       BUILD_BUG_ON(offsetof(struct batadv_icmp_packet, dst) != 4);
+       BUILD_BUG_ON(offsetof(struct batadv_icmp_packet_rr, dst) != 4);
 
        /* broadcast packet */
        batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
@@ -1119,9 +1119,9 @@ void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, uint8_t *src,
        skb_reserve(skb, ETH_HLEN);
        tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len);
        unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff;
-       unicast_tvlv_packet->header.packet_type = BATADV_UNICAST_TVLV;
-       unicast_tvlv_packet->header.version = BATADV_COMPAT_VERSION;
-       unicast_tvlv_packet->header.ttl = BATADV_TTL;
+       unicast_tvlv_packet->packet_type = BATADV_UNICAST_TVLV;
+       unicast_tvlv_packet->version = BATADV_COMPAT_VERSION;
+       unicast_tvlv_packet->ttl = BATADV_TTL;
        unicast_tvlv_packet->reserved = 0;
        unicast_tvlv_packet->tvlv_len = htons(tvlv_len);
        unicast_tvlv_packet->align = 0;
index 351e199bc0afff37aa9c1a4b3f40201956dfde91..511d7e1eea38b6d6e13247df0f8d4399deea8382 100644 (file)
@@ -722,7 +722,7 @@ static bool batadv_can_nc_with_orig(struct batadv_priv *bat_priv,
 {
        if (orig_node->last_real_seqno != ntohl(ogm_packet->seqno))
                return false;
-       if (orig_node->last_ttl != ogm_packet->header.ttl + 1)
+       if (orig_node->last_ttl != ogm_packet->ttl + 1)
                return false;
        if (!batadv_compare_eth(ogm_packet->orig, ogm_packet->prev_sender))
                return false;
@@ -1082,9 +1082,9 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
        coded_packet = (struct batadv_coded_packet *)skb_dest->data;
        skb_reset_mac_header(skb_dest);
 
-       coded_packet->header.packet_type = BATADV_CODED;
-       coded_packet->header.version = BATADV_COMPAT_VERSION;
-       coded_packet->header.ttl = packet1->header.ttl;
+       coded_packet->packet_type = BATADV_CODED;
+       coded_packet->version = BATADV_COMPAT_VERSION;
+       coded_packet->ttl = packet1->ttl;
 
        /* Info about first unicast packet */
        memcpy(coded_packet->first_source, first_source, ETH_ALEN);
@@ -1097,7 +1097,7 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
        memcpy(coded_packet->second_source, second_source, ETH_ALEN);
        memcpy(coded_packet->second_orig_dest, packet2->dest, ETH_ALEN);
        coded_packet->second_crc = packet_id2;
-       coded_packet->second_ttl = packet2->header.ttl;
+       coded_packet->second_ttl = packet2->ttl;
        coded_packet->second_ttvn = packet2->ttvn;
        coded_packet->coded_len = htons(coding_len);
 
@@ -1452,7 +1452,7 @@ bool batadv_nc_skb_forward(struct sk_buff *skb,
        /* We only handle unicast packets */
        payload = skb_network_header(skb);
        packet = (struct batadv_unicast_packet *)payload;
-       if (packet->header.packet_type != BATADV_UNICAST)
+       if (packet->packet_type != BATADV_UNICAST)
                goto out;
 
        /* Try to find a coding opportunity and send the skb if one is found */
@@ -1505,7 +1505,7 @@ void batadv_nc_skb_store_for_decoding(struct batadv_priv *bat_priv,
        /* Check for supported packet type */
        payload = skb_network_header(skb);
        packet = (struct batadv_unicast_packet *)payload;
-       if (packet->header.packet_type != BATADV_UNICAST)
+       if (packet->packet_type != BATADV_UNICAST)
                goto out;
 
        /* Find existing nc_path or create a new */
@@ -1623,7 +1623,7 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
                ttvn = coded_packet_tmp.second_ttvn;
        } else {
                orig_dest = coded_packet_tmp.first_orig_dest;
-               ttl = coded_packet_tmp.header.ttl;
+               ttl = coded_packet_tmp.ttl;
                ttvn = coded_packet_tmp.first_ttvn;
        }
 
@@ -1648,9 +1648,9 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
 
        /* Create decoded unicast packet */
        unicast_packet = (struct batadv_unicast_packet *)skb->data;
-       unicast_packet->header.packet_type = BATADV_UNICAST;
-       unicast_packet->header.version = BATADV_COMPAT_VERSION;
-       unicast_packet->header.ttl = ttl;
+       unicast_packet->packet_type = BATADV_UNICAST;
+       unicast_packet->version = BATADV_COMPAT_VERSION;
+       unicast_packet->ttl = ttl;
        memcpy(unicast_packet->dest, orig_dest, ETH_ALEN);
        unicast_packet->ttvn = ttvn;
 
index 207459b62966d0975bca70894fcf5ce266a5da29..2dd8f2422550c3157112eb4ce5a27362e6caf8fb 100644 (file)
@@ -155,6 +155,7 @@ enum batadv_tvlv_type {
        BATADV_TVLV_ROAM        = 0x05,
 };
 
+#pragma pack(2)
 /* the destination hardware field in the ARP frame is used to
  * transport the claim type and the group id
  */
@@ -163,24 +164,20 @@ struct batadv_bla_claim_dst {
        uint8_t type;           /* bla_claimframe */
        __be16 group;           /* group id */
 };
-
-struct batadv_header {
-       uint8_t  packet_type;
-       uint8_t  version;  /* batman version field */
-       uint8_t  ttl;
-       /* the parent struct has to add a byte after the header to make
-        * everything 4 bytes aligned again
-        */
-};
+#pragma pack()
 
 /**
  * struct batadv_ogm_packet - ogm (routing protocol) packet
- * @header: common batman packet header
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
  * @flags: contains routing relevant flags - see enum batadv_iv_flags
  * @tvlv_len: length of tvlv data following the ogm header
  */
 struct batadv_ogm_packet {
-       struct batadv_header header;
+       uint8_t  packet_type;
+       uint8_t  version;
+       uint8_t  ttl;
        uint8_t  flags;
        __be32   seqno;
        uint8_t  orig[ETH_ALEN];
@@ -196,29 +193,51 @@ struct batadv_ogm_packet {
 #define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet)
 
 /**
- * batadv_icmp_header - common ICMP header
- * @header: common batman header
+ * batadv_icmp_header - common members among all the ICMP packets
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
  * @msg_type: ICMP packet type
  * @dst: address of the destination node
  * @orig: address of the source node
  * @uid: local ICMP socket identifier
+ * @align: not used - useful for alignment purposes only
+ *
+ * This structure is used for ICMP packets parsing only and it is never sent
+ * over the wire. The alignment field at the end is there to ensure that
+ * members are padded the same way as they are in real packets.
  */
 struct batadv_icmp_header {
-       struct batadv_header header;
+       uint8_t  packet_type;
+       uint8_t  version;
+       uint8_t  ttl;
        uint8_t  msg_type; /* see ICMP message types above */
        uint8_t  dst[ETH_ALEN];
        uint8_t  orig[ETH_ALEN];
        uint8_t  uid;
+       uint8_t  align[3];
 };
 
 /**
  * batadv_icmp_packet - ICMP packet
- * @icmph: common ICMP header
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @msg_type: ICMP packet type
+ * @dst: address of the destination node
+ * @orig: address of the source node
+ * @uid: local ICMP socket identifier
  * @reserved: not used - useful for alignment
  * @seqno: ICMP sequence number
  */
 struct batadv_icmp_packet {
-       struct batadv_icmp_header icmph;
+       uint8_t  packet_type;
+       uint8_t  version;
+       uint8_t  ttl;
+       uint8_t  msg_type; /* see ICMP message types above */
+       uint8_t  dst[ETH_ALEN];
+       uint8_t  orig[ETH_ALEN];
+       uint8_t  uid;
        uint8_t  reserved;
        __be16   seqno;
 };
@@ -227,13 +246,25 @@ struct batadv_icmp_packet {
 
 /**
  * batadv_icmp_packet_rr - ICMP RouteRecord packet
- * @icmph: common ICMP header
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @msg_type: ICMP packet type
+ * @dst: address of the destination node
+ * @orig: address of the source node
+ * @uid: local ICMP socket identifier
  * @rr_cur: number of entries the rr array
  * @seqno: ICMP sequence number
  * @rr: route record array
  */
 struct batadv_icmp_packet_rr {
-       struct batadv_icmp_header icmph;
+       uint8_t  packet_type;
+       uint8_t  version;
+       uint8_t  ttl;
+       uint8_t  msg_type; /* see ICMP message types above */
+       uint8_t  dst[ETH_ALEN];
+       uint8_t  orig[ETH_ALEN];
+       uint8_t  uid;
        uint8_t  rr_cur;
        __be16   seqno;
        uint8_t  rr[BATADV_RR_LEN][ETH_ALEN];
@@ -253,8 +284,18 @@ struct batadv_icmp_packet_rr {
  */
 #pragma pack(2)
 
+/**
+ * struct batadv_unicast_packet - unicast packet for network payload
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @ttvn: translation table version number
+ * @dest: originator destination of the unicast packet
+ */
 struct batadv_unicast_packet {
-       struct batadv_header header;
+       uint8_t  packet_type;
+       uint8_t  version;
+       uint8_t  ttl;
        uint8_t  ttvn; /* destination translation table version number */
        uint8_t  dest[ETH_ALEN];
        /* "4 bytes boundary + 2 bytes" long to make the payload after the
@@ -280,7 +321,9 @@ struct batadv_unicast_4addr_packet {
 
 /**
  * struct batadv_frag_packet - fragmented packet
- * @header: common batman packet header with type, compatversion, and ttl
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
  * @dest: final destination used when routing fragments
  * @orig: originator of the fragment used when merging the packet
  * @no: fragment number within this sequence
@@ -289,7 +332,9 @@ struct batadv_unicast_4addr_packet {
  * @total_size: size of the merged packet
  */
 struct batadv_frag_packet {
-       struct  batadv_header header;
+       uint8_t packet_type;
+       uint8_t version;  /* batman version field */
+       uint8_t ttl;
 #if defined(__BIG_ENDIAN_BITFIELD)
        uint8_t no:4;
        uint8_t reserved:4;
@@ -305,8 +350,19 @@ struct batadv_frag_packet {
        __be16  total_size;
 };
 
+/**
+ * struct batadv_bcast_packet - broadcast packet for network payload
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @reserved: reserved byte for alignment
+ * @seqno: sequence identification
+ * @orig: originator of the broadcast packet
+ */
 struct batadv_bcast_packet {
-       struct batadv_header header;
+       uint8_t  packet_type;
+       uint8_t  version;  /* batman version field */
+       uint8_t  ttl;
        uint8_t  reserved;
        __be32   seqno;
        uint8_t  orig[ETH_ALEN];
@@ -315,11 +371,11 @@ struct batadv_bcast_packet {
         */
 };
 
-#pragma pack()
-
 /**
  * struct batadv_coded_packet - network coded packet
- * @header: common batman packet header and ttl of first included packet
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
  * @reserved: Align following fields to 2-byte boundaries
  * @first_source: original source of first included packet
  * @first_orig_dest: original destinal of first included packet
@@ -334,7 +390,9 @@ struct batadv_bcast_packet {
  * @coded_len: length of network coded part of the payload
  */
 struct batadv_coded_packet {
-       struct batadv_header header;
+       uint8_t  packet_type;
+       uint8_t  version;  /* batman version field */
+       uint8_t  ttl;
        uint8_t  first_ttvn;
        /* uint8_t  first_dest[ETH_ALEN]; - saved in mac header destination */
        uint8_t  first_source[ETH_ALEN];
@@ -349,9 +407,13 @@ struct batadv_coded_packet {
        __be16   coded_len;
 };
 
+#pragma pack()
+
 /**
  * struct batadv_unicast_tvlv - generic unicast packet with tvlv payload
- * @header: common batman packet header
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
  * @reserved: reserved field (for packet alignment)
  * @src: address of the source
  * @dst: address of the destination
@@ -359,7 +421,9 @@ struct batadv_coded_packet {
  * @align: 2 bytes to align the header to a 4 byte boundry
  */
 struct batadv_unicast_tvlv_packet {
-       struct batadv_header header;
+       uint8_t  packet_type;
+       uint8_t  version;  /* batman version field */
+       uint8_t  ttl;
        uint8_t  reserved;
        uint8_t  dst[ETH_ALEN];
        uint8_t  src[ETH_ALEN];
@@ -420,13 +484,13 @@ struct batadv_tvlv_tt_vlan_data {
  * struct batadv_tvlv_tt_change - translation table diff data
  * @flags: status indicators concerning the non-mesh client (see
  *  batadv_tt_client_flags)
- * @reserved: reserved field
+ * @reserved: reserved field - useful for alignment purposes only
  * @addr: mac address of non-mesh client that triggered this tt change
  * @vid: VLAN identifier
  */
 struct batadv_tvlv_tt_change {
        uint8_t flags;
-       uint8_t reserved;
+       uint8_t reserved[3];
        uint8_t addr[ETH_ALEN];
        __be16 vid;
 };
index d4114d775ad61f659b59437c67aa7054d9a88f92..46278bfb8fdb9469e12f8fbdc8922e49f81f8014 100644 (file)
@@ -308,7 +308,7 @@ static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv,
                memcpy(icmph->dst, icmph->orig, ETH_ALEN);
                memcpy(icmph->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
                icmph->msg_type = BATADV_ECHO_REPLY;
-               icmph->header.ttl = BATADV_TTL;
+               icmph->ttl = BATADV_TTL;
 
                res = batadv_send_skb_to_orig(skb, orig_node, NULL);
                if (res != NET_XMIT_DROP)
@@ -338,9 +338,9 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
        icmp_packet = (struct batadv_icmp_packet *)skb->data;
 
        /* send TTL exceeded if packet is an echo request (traceroute) */
-       if (icmp_packet->icmph.msg_type != BATADV_ECHO_REQUEST) {
+       if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
                pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n",
-                        icmp_packet->icmph.orig, icmp_packet->icmph.dst);
+                        icmp_packet->orig, icmp_packet->dst);
                goto out;
        }
 
@@ -349,7 +349,7 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
                goto out;
 
        /* get routing information */
-       orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->icmph.orig);
+       orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig);
        if (!orig_node)
                goto out;
 
@@ -359,11 +359,11 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
 
        icmp_packet = (struct batadv_icmp_packet *)skb->data;
 
-       memcpy(icmp_packet->icmph.dst, icmp_packet->icmph.orig, ETH_ALEN);
-       memcpy(icmp_packet->icmph.orig, primary_if->net_dev->dev_addr,
+       memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
+       memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr,
               ETH_ALEN);
-       icmp_packet->icmph.msg_type = BATADV_TTL_EXCEEDED;
-       icmp_packet->icmph.header.ttl = BATADV_TTL;
+       icmp_packet->msg_type = BATADV_TTL_EXCEEDED;
+       icmp_packet->ttl = BATADV_TTL;
 
        if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
                ret = NET_RX_SUCCESS;
@@ -434,7 +434,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
                return batadv_recv_my_icmp_packet(bat_priv, skb);
 
        /* TTL exceeded */
-       if (icmph->header.ttl < 2)
+       if (icmph->ttl < 2)
                return batadv_recv_icmp_ttl_exceeded(bat_priv, skb);
 
        /* get routing information */
@@ -449,7 +449,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
        icmph = (struct batadv_icmp_header *)skb->data;
 
        /* decrement ttl */
-       icmph->header.ttl--;
+       icmph->ttl--;
 
        /* route it */
        if (batadv_send_skb_to_orig(skb, orig_node, recv_if) != NET_XMIT_DROP)
@@ -709,7 +709,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
        unicast_packet = (struct batadv_unicast_packet *)skb->data;
 
        /* TTL exceeded */
-       if (unicast_packet->header.ttl < 2) {
+       if (unicast_packet->ttl < 2) {
                pr_debug("Warning - can't forward unicast packet from %pM to %pM: ttl exceeded\n",
                         ethhdr->h_source, unicast_packet->dest);
                goto out;
@@ -727,9 +727,9 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
 
        /* decrement ttl */
        unicast_packet = (struct batadv_unicast_packet *)skb->data;
-       unicast_packet->header.ttl--;
+       unicast_packet->ttl--;
 
-       switch (unicast_packet->header.packet_type) {
+       switch (unicast_packet->packet_type) {
        case BATADV_UNICAST_4ADDR:
                hdr_len = sizeof(struct batadv_unicast_4addr_packet);
                break;
@@ -970,7 +970,7 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
        unicast_packet = (struct batadv_unicast_packet *)skb->data;
        unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
 
-       is4addr = unicast_packet->header.packet_type == BATADV_UNICAST_4ADDR;
+       is4addr = unicast_packet->packet_type == BATADV_UNICAST_4ADDR;
        /* the caller function should have already pulled 2 bytes */
        if (is4addr)
                hdr_size = sizeof(*unicast_4addr_packet);
@@ -1160,7 +1160,7 @@ int batadv_recv_bcast_packet(struct sk_buff *skb,
        if (batadv_is_my_mac(bat_priv, bcast_packet->orig))
                goto out;
 
-       if (bcast_packet->header.ttl < 2)
+       if (bcast_packet->ttl < 2)
                goto out;
 
        orig_node = batadv_orig_hash_find(bat_priv, bcast_packet->orig);
index c83be5ebaa285dffe64d051850e87f2fca02624b..fba4dcfcfac21b6e9c6686c30fc6ca8a1a08f3fa 100644 (file)
@@ -161,11 +161,11 @@ batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
                return false;
 
        unicast_packet = (struct batadv_unicast_packet *)skb->data;
-       unicast_packet->header.version = BATADV_COMPAT_VERSION;
+       unicast_packet->version = BATADV_COMPAT_VERSION;
        /* batman packet type: unicast */
-       unicast_packet->header.packet_type = BATADV_UNICAST;
+       unicast_packet->packet_type = BATADV_UNICAST;
        /* set unicast ttl */
-       unicast_packet->header.ttl = BATADV_TTL;
+       unicast_packet->ttl = BATADV_TTL;
        /* copy the destination for faster routing */
        memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
        /* set the destination tt version number */
@@ -221,7 +221,7 @@ bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
                goto out;
 
        uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
-       uc_4addr_packet->u.header.packet_type = BATADV_UNICAST_4ADDR;
+       uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
        memcpy(uc_4addr_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
        uc_4addr_packet->subtype = packet_subtype;
        uc_4addr_packet->reserved = 0;
@@ -436,7 +436,7 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
 
        /* as we have a copy now, it is safe to decrease the TTL */
        bcast_packet = (struct batadv_bcast_packet *)newskb->data;
-       bcast_packet->header.ttl--;
+       bcast_packet->ttl--;
 
        skb_reset_mac_header(newskb);
 
index 36f050876f8260245a008de079cf0974fa97b76e..a8f99d1486c0441e9aa192e81d7522ad451f4bd7 100644 (file)
@@ -264,11 +264,11 @@ static int batadv_interface_tx(struct sk_buff *skb,
                        goto dropped;
 
                bcast_packet = (struct batadv_bcast_packet *)skb->data;
-               bcast_packet->header.version = BATADV_COMPAT_VERSION;
-               bcast_packet->header.ttl = BATADV_TTL;
+               bcast_packet->version = BATADV_COMPAT_VERSION;
+               bcast_packet->ttl = BATADV_TTL;
 
                /* batman packet type: broadcast */
-               bcast_packet->header.packet_type = BATADV_BCAST;
+               bcast_packet->packet_type = BATADV_BCAST;
                bcast_packet->reserved = 0;
 
                /* hw address of first interface is the orig mac because only
@@ -328,7 +328,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
                         struct sk_buff *skb, struct batadv_hard_iface *recv_if,
                         int hdr_size, struct batadv_orig_node *orig_node)
 {
-       struct batadv_header *batadv_header = (struct batadv_header *)skb->data;
+       struct batadv_bcast_packet *batadv_bcast_packet;
        struct batadv_priv *bat_priv = netdev_priv(soft_iface);
        __be16 ethertype = htons(ETH_P_BATMAN);
        struct vlan_ethhdr *vhdr;
@@ -336,7 +336,8 @@ void batadv_interface_rx(struct net_device *soft_iface,
        unsigned short vid;
        bool is_bcast;
 
-       is_bcast = (batadv_header->packet_type == BATADV_BCAST);
+       batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data;
+       is_bcast = (batadv_bcast_packet->packet_type == BATADV_BCAST);
 
        /* check if enough space is available for pulling, and pull */
        if (!pskb_may_pull(skb, hdr_size))
@@ -345,7 +346,12 @@ void batadv_interface_rx(struct net_device *soft_iface,
        skb_pull_rcsum(skb, hdr_size);
        skb_reset_mac_header(skb);
 
-       vid = batadv_get_vid(skb, hdr_size);
+       /* clean the netfilter state now that the batman-adv header has been
+        * removed
+        */
+       nf_reset(skb);
+
+       vid = batadv_get_vid(skb, 0);
        ethhdr = eth_hdr(skb);
 
        switch (ntohs(ethhdr->h_proto)) {
index 4add57d4857f11e5ea9edfa13aae1f46dab3e4f9..ff625fedbc5eeb08a7569c01973f5eb1ddc71eb1 100644 (file)
@@ -333,7 +333,8 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
                return;
 
        tt_change_node->change.flags = flags;
-       tt_change_node->change.reserved = 0;
+       memset(tt_change_node->change.reserved, 0,
+              sizeof(tt_change_node->change.reserved));
        memcpy(tt_change_node->change.addr, common->addr, ETH_ALEN);
        tt_change_node->change.vid = htons(common->vid);
 
@@ -2221,7 +2222,8 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
                               ETH_ALEN);
                        tt_change->flags = tt_common_entry->flags;
                        tt_change->vid = htons(tt_common_entry->vid);
-                       tt_change->reserved = 0;
+                       memset(tt_change->reserved, 0,
+                              sizeof(tt_change->reserved));
 
                        tt_num_entries++;
                        tt_change++;
index 6a6c8bb4fd72d4f2d4294b9f7fed772d81e57b93..7552f9e3089ce790040268f1ce0a067d6ef728cb 100644 (file)
@@ -940,8 +940,22 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
        bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
        skb_pull(skb, 1);
 
-       if (hci_pi(sk)->channel == HCI_CHANNEL_RAW &&
-           bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
+       if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
+               /* No permission check is needed for user channel
+                * since that gets enforced when binding the socket.
+                *
+                * However check that the packet type is valid.
+                */
+               if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
+                   bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
+                   bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
+                       err = -EINVAL;
+                       goto drop;
+               }
+
+               skb_queue_tail(&hdev->raw_q, skb);
+               queue_work(hdev->workqueue, &hdev->tx_work);
+       } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
                u16 opcode = get_unaligned_le16(skb->data);
                u16 ogf = hci_opcode_ogf(opcode);
                u16 ocf = hci_opcode_ocf(opcode);
@@ -972,14 +986,6 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
                        goto drop;
                }
 
-               if (hci_pi(sk)->channel == HCI_CHANNEL_USER &&
-                   bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
-                   bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
-                   bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
-                       err = -EINVAL;
-                       goto drop;
-               }
-
                skb_queue_tail(&hdev->raw_q, skb);
                queue_work(hdev->workqueue, &hdev->tx_work);
        }
index 4c214b2b88efa01d81be93a8b8d77e57229c1fa4..ef66365b7354da9f2fe2c4d87d2056d9a781f3c8 100644 (file)
@@ -1998,7 +1998,7 @@ int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
        u32 old;
        struct net_bridge_mdb_htable *mdb;
 
-       spin_lock(&br->multicast_lock);
+       spin_lock_bh(&br->multicast_lock);
        if (!netif_running(br->dev))
                goto unlock;
 
@@ -2030,7 +2030,7 @@ rollback:
        }
 
 unlock:
-       spin_unlock(&br->multicast_lock);
+       spin_unlock_bh(&br->multicast_lock);
 
        return err;
 }
index ba3b7ea5ebb3139cca38e82ac6f5f5e346f4a4e0..0ce469e5ec8057c674901b404db13061fe8b7392 100644 (file)
@@ -2539,7 +2539,7 @@ static inline int skb_needs_linearize(struct sk_buff *skb,
 }
 
 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
-                       struct netdev_queue *txq, void *accel_priv)
+                       struct netdev_queue *txq)
 {
        const struct net_device_ops *ops = dev->netdev_ops;
        int rc = NETDEV_TX_OK;
@@ -2605,13 +2605,10 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                        dev_queue_xmit_nit(skb, dev);
 
                skb_len = skb->len;
-               if (accel_priv)
-                       rc = ops->ndo_dfwd_start_xmit(skb, dev, accel_priv);
-               else
                        rc = ops->ndo_start_xmit(skb, dev);
 
                trace_net_dev_xmit(skb, rc, dev, skb_len);
-               if (rc == NETDEV_TX_OK && txq)
+               if (rc == NETDEV_TX_OK)
                        txq_trans_update(txq);
                return rc;
        }
@@ -2627,10 +2624,7 @@ gso:
                        dev_queue_xmit_nit(nskb, dev);
 
                skb_len = nskb->len;
-               if (accel_priv)
-                       rc = ops->ndo_dfwd_start_xmit(nskb, dev, accel_priv);
-               else
-                       rc = ops->ndo_start_xmit(nskb, dev);
+               rc = ops->ndo_start_xmit(nskb, dev);
                trace_net_dev_xmit(nskb, rc, dev, skb_len);
                if (unlikely(rc != NETDEV_TX_OK)) {
                        if (rc & ~NETDEV_TX_MASK)
@@ -2811,7 +2805,7 @@ EXPORT_SYMBOL(dev_loopback_xmit);
  *      the BH enable code must have IRQs enabled so that it will not deadlock.
  *          --BLG
  */
-int dev_queue_xmit(struct sk_buff *skb)
+int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
 {
        struct net_device *dev = skb->dev;
        struct netdev_queue *txq;
@@ -2827,7 +2821,7 @@ int dev_queue_xmit(struct sk_buff *skb)
 
        skb_update_prio(skb);
 
-       txq = netdev_pick_tx(dev, skb);
+       txq = netdev_pick_tx(dev, skb, accel_priv);
        q = rcu_dereference_bh(txq->qdisc);
 
 #ifdef CONFIG_NET_CLS_ACT
@@ -2863,7 +2857,7 @@ int dev_queue_xmit(struct sk_buff *skb)
 
                        if (!netif_xmit_stopped(txq)) {
                                __this_cpu_inc(xmit_recursion);
-                               rc = dev_hard_start_xmit(skb, dev, txq, NULL);
+                               rc = dev_hard_start_xmit(skb, dev, txq);
                                __this_cpu_dec(xmit_recursion);
                                if (dev_xmit_complete(rc)) {
                                        HARD_TX_UNLOCK(dev, txq);
@@ -2892,8 +2886,19 @@ out:
        rcu_read_unlock_bh();
        return rc;
 }
+
+int dev_queue_xmit(struct sk_buff *skb)
+{
+       return __dev_queue_xmit(skb, NULL);
+}
 EXPORT_SYMBOL(dev_queue_xmit);
 
+int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
+{
+       return __dev_queue_xmit(skb, accel_priv);
+}
+EXPORT_SYMBOL(dev_queue_xmit_accel);
+
 
 /*=======================================================================
                        Receiver routines
@@ -4500,7 +4505,7 @@ struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
 {
        struct netdev_adjacent *upper;
 
-       WARN_ON_ONCE(!rcu_read_lock_held());
+       WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
 
        upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
 
index d6ef173225008ec1946ed232f781cc63754b9575..2fc5beaf578349cd543621a460bf2bb4eaa9d221 100644 (file)
@@ -395,17 +395,21 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
 EXPORT_SYMBOL(__netdev_pick_tx);
 
 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
-                                   struct sk_buff *skb)
+                                   struct sk_buff *skb,
+                                   void *accel_priv)
 {
        int queue_index = 0;
 
        if (dev->real_num_tx_queues != 1) {
                const struct net_device_ops *ops = dev->netdev_ops;
                if (ops->ndo_select_queue)
-                       queue_index = ops->ndo_select_queue(dev, skb);
+                       queue_index = ops->ndo_select_queue(dev, skb,
+                                                           accel_priv);
                else
                        queue_index = __netdev_pick_tx(dev, skb);
-               queue_index = dev_cap_txqueue(dev, queue_index);
+
+               if (!accel_priv)
+                       queue_index = dev_cap_txqueue(dev, queue_index);
        }
 
        skb_set_queue_mapping(skb, queue_index);
index 36b1443f9ae4b38cdd7b15f645f08032acac06d0..932c6d7cf6668e073a90432281037b51cf7dfecc 100644 (file)
@@ -1275,7 +1275,7 @@ int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb)
 
        if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
                            skb->len) < 0 &&
-           dev->header_ops->rebuild(skb))
+           dev_rebuild_header(skb))
                return 0;
 
        return dev_queue_xmit(skb);
index 8f971990677cd48026f651b2dba01d01190bfff6..19fe9c717cedb56051ff6cb5effca0274fd946d8 100644 (file)
@@ -375,7 +375,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
        if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
                struct netdev_queue *txq;
 
-               txq = netdev_pick_tx(dev, skb);
+               txq = netdev_pick_tx(dev, skb, NULL);
 
                /* try until next clock tick */
                for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
@@ -386,8 +386,14 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
                                            !vlan_hw_offload_capable(netif_skb_features(skb),
                                                                     skb->vlan_proto)) {
                                                skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb));
-                                               if (unlikely(!skb))
-                                                       break;
+                                               if (unlikely(!skb)) {
+                                                       /* This is actually a packet drop, but we
+                                                        * don't want the code at the end of this
+                                                        * function to try and re-queue a NULL skb.
+                                                        */
+                                                       status = NETDEV_TX_OK;
+                                                       goto unlock_txq;
+                                               }
                                                skb->vlan_tci = 0;
                                        }
 
@@ -395,6 +401,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
                                        if (status == NETDEV_TX_OK)
                                                txq_trans_update(txq);
                                }
+                       unlock_txq:
                                __netif_tx_unlock(txq);
 
                                if (status == NETDEV_TX_OK)
index 4c6bdf97a6577d190332b85654769a92ce553111..595ddf0459db79c724b8eab2c8b0f6a0a7d1df97 100644 (file)
@@ -152,17 +152,6 @@ static const struct file_operations dccpprobe_fops = {
        .llseek  = noop_llseek,
 };
 
-static __init int setup_jprobe(void)
-{
-       int ret = register_jprobe(&dccp_send_probe);
-
-       if (ret) {
-               request_module("dccp");
-               ret = register_jprobe(&dccp_send_probe);
-       }
-       return ret;
-}
-
 static __init int dccpprobe_init(void)
 {
        int ret = -ENOMEM;
@@ -174,7 +163,13 @@ static __init int dccpprobe_init(void)
        if (!proc_create(procname, S_IRUSR, init_net.proc_net, &dccpprobe_fops))
                goto err0;
 
-       ret = setup_jprobe();
+       ret = register_jprobe(&dccp_send_probe);
+       if (ret) {
+               ret = request_module("dccp");
+               if (!ret)
+                       ret = register_jprobe(&dccp_send_probe);
+       }
+
        if (ret)
                goto err1;
 
index 459e200c08a40e4e737798582524cd973064b143..a2d2456a557a7621059a5b0bbe033168492f8fe7 100644 (file)
@@ -547,7 +547,7 @@ static int lowpan_header_create(struct sk_buff *skb,
                        hc06_ptr += 3;
                } else {
                        /* compress nothing */
-                       memcpy(hc06_ptr, &hdr, 4);
+                       memcpy(hc06_ptr, hdr, 4);
                        /* replace the top byte with new ECN | DSCP format */
                        *hc06_ptr = tmp;
                        hc06_ptr += 4;
index e5d436188464eba55eca773f4fb88fd0f0634e97..2cd02f32f99f568c8275389aefd754700a77efa6 100644 (file)
@@ -28,6 +28,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
        netdev_features_t enc_features;
        int ghl = GRE_HEADER_SECTION;
        struct gre_base_hdr *greh;
+       u16 mac_offset = skb->mac_header;
        int mac_len = skb->mac_len;
        __be16 protocol = skb->protocol;
        int tnl_hlen;
@@ -58,13 +59,13 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
        } else
                csum = false;
 
+       if (unlikely(!pskb_may_pull(skb, ghl)))
+               goto out;
+
        /* setup inner skb. */
        skb->protocol = greh->protocol;
        skb->encapsulation = 0;
 
-       if (unlikely(!pskb_may_pull(skb, ghl)))
-               goto out;
-
        __skb_pull(skb, ghl);
        skb_reset_mac_header(skb);
        skb_set_network_header(skb, skb_inner_network_offset(skb));
@@ -73,8 +74,10 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
        /* segment inner packet. */
        enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
        segs = skb_mac_gso_segment(skb, enc_features);
-       if (!segs || IS_ERR(segs))
+       if (!segs || IS_ERR(segs)) {
+               skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len);
                goto out;
+       }
 
        skb = segs;
        tnl_hlen = skb_tnl_header_len(skb);
index 56a964a553d2c739a03b880acd7158a4c9714b66..a0f52dac8940de45ffaa53291c82950b2e6c4c99 100644 (file)
@@ -106,6 +106,10 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
 
        r->id.idiag_sport = inet->inet_sport;
        r->id.idiag_dport = inet->inet_dport;
+
+       memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
+       memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
+
        r->id.idiag_src[0] = inet->inet_rcv_saddr;
        r->id.idiag_dst[0] = inet->inet_daddr;
 
@@ -240,12 +244,19 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
 
        r->idiag_family       = tw->tw_family;
        r->idiag_retrans      = 0;
+
        r->id.idiag_if        = tw->tw_bound_dev_if;
        sock_diag_save_cookie(tw, r->id.idiag_cookie);
+
        r->id.idiag_sport     = tw->tw_sport;
        r->id.idiag_dport     = tw->tw_dport;
+
+       memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
+       memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
+
        r->id.idiag_src[0]    = tw->tw_rcv_saddr;
        r->id.idiag_dst[0]    = tw->tw_daddr;
+
        r->idiag_state        = tw->tw_substate;
        r->idiag_timer        = 3;
        r->idiag_expires      = jiffies_to_msecs(tmo);
@@ -726,8 +737,13 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
 
        r->id.idiag_sport = inet->inet_sport;
        r->id.idiag_dport = ireq->ir_rmt_port;
+
+       memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
+       memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
+
        r->id.idiag_src[0] = ireq->ir_loc_addr;
        r->id.idiag_dst[0] = ireq->ir_rmt_addr;
+
        r->idiag_expires = jiffies_to_msecs(tmo);
        r->idiag_rqueue = 0;
        r->idiag_wqueue = 0;
index d7aea4c5b9400efec37d15db3c5a896769d0da68..e560ef34cf4bd2efe9f1f7a64424441117a4502a 100644 (file)
@@ -217,6 +217,7 @@ static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
                                  iph->saddr, iph->daddr, tpi->key);
 
        if (tunnel) {
+               skb_pop_mac_header(skb);
                ip_tunnel_rcv(tunnel, skb, tpi, log_ecn_error);
                return PACKET_RCVD;
        }
index 912402752f2ffce701d697de1bb7a32b05c3b482..df184616493f707ea8a0ea353a33d3a5aa981fc7 100644 (file)
@@ -828,7 +828,7 @@ static int __ip_append_data(struct sock *sk,
 
        if (cork->length + length > maxnonfragsize - fragheaderlen) {
                ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
-                              mtu-exthdrlen);
+                              mtu - (opt ? opt->optlen : 0));
                return -EMSGSIZE;
        }
 
@@ -1151,7 +1151,8 @@ ssize_t   ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
                         mtu : 0xFFFF;
 
        if (cork->length + size > maxnonfragsize - fragheaderlen) {
-               ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, mtu);
+               ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
+                              mtu - (opt ? opt->optlen : 0));
                return -EMSGSIZE;
        }
 
index f140048334ce21f38d86ac4a2fdf770df5330d78..a7e4729e974b3206f57e3b3da4026a77432768e1 100644 (file)
@@ -2478,6 +2478,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
                                       netdev_features_t features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
+       u16 mac_offset = skb->mac_header;
        int mac_len = skb->mac_len;
        int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
        __be16 protocol = skb->protocol;
@@ -2497,8 +2498,11 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
        /* segment inner packet. */
        enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
        segs = skb_mac_gso_segment(skb, enc_features);
-       if (!segs || IS_ERR(segs))
+       if (!segs || IS_ERR(segs)) {
+               skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
+                                    mac_len);
                goto out;
+       }
 
        outer_hlen = skb_tnl_header_len(skb);
        skb = segs;
index 83206de2bc7679dc20e4fdcf34f8b1f5cd7831ef..79c62bdcd3c549b7dc07c74449d30b77296134eb 100644 (file)
@@ -41,6 +41,14 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        unsigned int mss;
+       int offset;
+       __wsum csum;
+
+       if (skb->encapsulation &&
+           skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) {
+               segs = skb_udp_tunnel_segment(skb, features);
+               goto out;
+       }
 
        mss = skb_shinfo(skb)->gso_size;
        if (unlikely(skb->len <= mss))
@@ -63,27 +71,20 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
                goto out;
        }
 
+       /* Do software UFO. Complete and fill in the UDP checksum as
+        * HW cannot do checksum of UDP packets sent as multiple
+        * IP fragments.
+        */
+       offset = skb_checksum_start_offset(skb);
+       csum = skb_checksum(skb, offset, skb->len - offset, 0);
+       offset += skb->csum_offset;
+       *(__sum16 *)(skb->data + offset) = csum_fold(csum);
+       skb->ip_summed = CHECKSUM_NONE;
+
        /* Fragment the skb. IP headers of the fragments are updated in
         * inet_gso_segment()
         */
-       if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL)
-               segs = skb_udp_tunnel_segment(skb, features);
-       else {
-               int offset;
-               __wsum csum;
-
-               /* Do software UFO. Complete and fill in the UDP checksum as
-                * HW cannot do checksum of UDP packets sent as multiple
-                * IP fragments.
-                */
-               offset = skb_checksum_start_offset(skb);
-               csum = skb_checksum(skb, offset, skb->len - offset, 0);
-               offset += skb->csum_offset;
-               *(__sum16 *)(skb->data + offset) = csum_fold(csum);
-               skb->ip_summed = CHECKSUM_NONE;
-
-               segs = skb_segment(skb, features);
-       }
+       segs = skb_segment(skb, features);
 out:
        return segs;
 }
index d5fa5b8c443ecbbcb0ed09a10db19b06730b7d8b..abe46a4228ce96655d1aacec77052a89e70cfdd4 100644 (file)
@@ -1671,7 +1671,7 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
 static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
 {
        struct in6_addr addr;
-       if (ifp->prefix_len == 127) /* RFC 6164 */
+       if (ifp->prefix_len >= 127) /* RFC 6164 */
                return;
        ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
        if (ipv6_addr_any(&addr))
@@ -1682,7 +1682,7 @@ static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
 {
        struct in6_addr addr;
-       if (ifp->prefix_len == 127) /* RFC 6164 */
+       if (ifp->prefix_len >= 127) /* RFC 6164 */
                return;
        ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
        if (ipv6_addr_any(&addr))
@@ -2509,7 +2509,8 @@ static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
        struct inet6_ifaddr *ifp;
 
        ifp = ipv6_add_addr(idev, addr, NULL, plen,
-                           scope, IFA_F_PERMANENT, 0, 0);
+                           scope, IFA_F_PERMANENT,
+                           INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
        if (!IS_ERR(ifp)) {
                spin_lock_bh(&ifp->lock);
                ifp->flags &= ~IFA_F_TENTATIVE;
@@ -2637,7 +2638,8 @@ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr
 #endif
 
 
-       ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags, 0, 0);
+       ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags,
+                           INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
        if (!IS_ERR(ifp)) {
                addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0);
                addrconf_dad_start(ifp);
@@ -3456,7 +3458,12 @@ restart:
                                         &inet6_addr_lst[i], addr_lst) {
                        unsigned long age;
 
-                       if (ifp->flags & IFA_F_PERMANENT)
+                       /* When setting preferred_lft to a value not zero or
+                        * infinity, while valid_lft is infinity
+                        * IFA_F_PERMANENT has a non-infinity life time.
+                        */
+                       if ((ifp->flags & IFA_F_PERMANENT) &&
+                           (ifp->prefered_lft == INFINITY_LIFE_TIME))
                                continue;
 
                        spin_lock(&ifp->lock);
@@ -3481,7 +3488,8 @@ restart:
                                        ifp->flags |= IFA_F_DEPRECATED;
                                }
 
-                               if (time_before(ifp->tstamp + ifp->valid_lft * HZ, next))
+                               if ((ifp->valid_lft != INFINITY_LIFE_TIME) &&
+                                   (time_before(ifp->tstamp + ifp->valid_lft * HZ, next)))
                                        next = ifp->tstamp + ifp->valid_lft * HZ;
 
                                spin_unlock(&ifp->lock);
@@ -3761,7 +3769,8 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
        put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
                      ifa->idev->dev->ifindex);
 
-       if (!(ifa->flags&IFA_F_PERMANENT)) {
+       if (!((ifa->flags&IFA_F_PERMANENT) &&
+             (ifa->prefered_lft == INFINITY_LIFE_TIME))) {
                preferred = ifa->prefered_lft;
                valid = ifa->valid_lft;
                if (preferred != INFINITY_LIFE_TIME) {
index 4acdb63495dbe2484de9d278b810401e6d26fc6a..e6f931997996ef46efa1272b16e261bf8f6609a7 100644 (file)
@@ -1193,11 +1193,35 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
 
        fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
                        (opt ? opt->opt_nflen : 0);
-       maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
+       maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
+                    sizeof(struct frag_hdr);
 
        if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
-               if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
-                       ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen);
+               unsigned int maxnonfragsize, headersize;
+
+               headersize = sizeof(struct ipv6hdr) +
+                            (opt ? opt->tot_len : 0) +
+                            (dst_allfrag(&rt->dst) ?
+                             sizeof(struct frag_hdr) : 0) +
+                            rt->rt6i_nfheader_len;
+
+               maxnonfragsize = (np->pmtudisc >= IPV6_PMTUDISC_DO) ?
+                                mtu : sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
+
+               /* dontfrag active */
+               if ((cork->length + length > mtu - headersize) && dontfrag &&
+                   (sk->sk_protocol == IPPROTO_UDP ||
+                    sk->sk_protocol == IPPROTO_RAW)) {
+                       ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
+                                                  sizeof(struct ipv6hdr));
+                       goto emsgsize;
+               }
+
+               if (cork->length + length > maxnonfragsize - headersize) {
+emsgsize:
+                       ipv6_local_error(sk, EMSGSIZE, fl6,
+                                        mtu - headersize +
+                                        sizeof(struct ipv6hdr));
                        return -EMSGSIZE;
                }
        }
@@ -1222,12 +1246,6 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
         * --yoshfuji
         */
 
-       if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP ||
-                                          sk->sk_protocol == IPPROTO_RAW)) {
-               ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
-               return -EMSGSIZE;
-       }
-
        skb = skb_peek_tail(&sk->sk_write_queue);
        cork->length += length;
        if (((length > mtu) ||
index d6062325db08411207fd63b435ec1e5e4e0f2001..7881965a824840a763a7e1183efcf21153f87b47 100644 (file)
@@ -103,16 +103,25 @@ struct ip6_tnl_net {
 
 static struct net_device_stats *ip6_get_stats(struct net_device *dev)
 {
-       struct pcpu_tstats sum = { 0 };
+       struct pcpu_tstats tmp, sum = { 0 };
        int i;
 
        for_each_possible_cpu(i) {
+               unsigned int start;
                const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
 
-               sum.rx_packets += tstats->rx_packets;
-               sum.rx_bytes   += tstats->rx_bytes;
-               sum.tx_packets += tstats->tx_packets;
-               sum.tx_bytes   += tstats->tx_bytes;
+               do {
+                       start = u64_stats_fetch_begin_bh(&tstats->syncp);
+                       tmp.rx_packets = tstats->rx_packets;
+                       tmp.rx_bytes = tstats->rx_bytes;
+                       tmp.tx_packets = tstats->tx_packets;
+                       tmp.tx_bytes =  tstats->tx_bytes;
+               } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
+
+               sum.rx_packets += tmp.rx_packets;
+               sum.rx_bytes   += tmp.rx_bytes;
+               sum.tx_packets += tmp.tx_packets;
+               sum.tx_bytes   += tmp.tx_bytes;
        }
        dev->stats.rx_packets = sum.rx_packets;
        dev->stats.rx_bytes   = sum.rx_bytes;
@@ -824,8 +833,10 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
                }
 
                tstats = this_cpu_ptr(t->dev->tstats);
+               u64_stats_update_begin(&tstats->syncp);
                tstats->rx_packets++;
                tstats->rx_bytes += skb->len;
+               u64_stats_update_end(&tstats->syncp);
 
                netif_rx(skb);
 
index ed94ba61dda0ec1c3e2b170fe74eb2334b752700..7b42d5ef868deaa193094a85a00b502aeaba0bdd 100644 (file)
@@ -75,26 +75,6 @@ struct vti6_net {
        struct ip6_tnl __rcu **tnls[2];
 };
 
-static struct net_device_stats *vti6_get_stats(struct net_device *dev)
-{
-       struct pcpu_tstats sum = { 0 };
-       int i;
-
-       for_each_possible_cpu(i) {
-               const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
-
-               sum.rx_packets += tstats->rx_packets;
-               sum.rx_bytes   += tstats->rx_bytes;
-               sum.tx_packets += tstats->tx_packets;
-               sum.tx_bytes   += tstats->tx_bytes;
-       }
-       dev->stats.rx_packets = sum.rx_packets;
-       dev->stats.rx_bytes   = sum.rx_bytes;
-       dev->stats.tx_packets = sum.tx_packets;
-       dev->stats.tx_bytes   = sum.tx_bytes;
-       return &dev->stats;
-}
-
 #define for_each_vti6_tunnel_rcu(start) \
        for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
 
@@ -331,8 +311,10 @@ static int vti6_rcv(struct sk_buff *skb)
                }
 
                tstats = this_cpu_ptr(t->dev->tstats);
+               u64_stats_update_begin(&tstats->syncp);
                tstats->rx_packets++;
                tstats->rx_bytes += skb->len;
+               u64_stats_update_end(&tstats->syncp);
 
                skb->mark = 0;
                secpath_reset(skb);
@@ -716,7 +698,7 @@ static const struct net_device_ops vti6_netdev_ops = {
        .ndo_start_xmit = vti6_tnl_xmit,
        .ndo_do_ioctl   = vti6_ioctl,
        .ndo_change_mtu = vti6_change_mtu,
-       .ndo_get_stats  = vti6_get_stats,
+       .ndo_get_stats64 = ip_tunnel_get_stats64,
 };
 
 /**
@@ -750,12 +732,18 @@ static void vti6_dev_setup(struct net_device *dev)
 static inline int vti6_dev_init_gen(struct net_device *dev)
 {
        struct ip6_tnl *t = netdev_priv(dev);
+       int i;
 
        t->dev = dev;
        t->net = dev_net(dev);
        dev->tstats = alloc_percpu(struct pcpu_tstats);
        if (!dev->tstats)
                return -ENOMEM;
+       for_each_possible_cpu(i) {
+               struct pcpu_tstats *stats;
+               stats = per_cpu_ptr(dev->tstats, i);
+               u64_stats_init(&stats->syncp);
+       }
        return 0;
 }
 
index a0a48ac3403f3a9284786aa5b8f00dc03696e2e0..4b4944c3e4c4473c8b1ca4fbce8700a8af988604 100644 (file)
@@ -1905,9 +1905,7 @@ static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
                else
                        rt->rt6i_gateway = *dest;
                rt->rt6i_flags = ort->rt6i_flags;
-               if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ==
-                   (RTF_DEFAULT | RTF_ADDRCONF))
-                       rt6_set_from(rt, ort);
+               rt6_set_from(rt, ort);
                rt->rt6i_metric = 0;
 
 #ifdef CONFIG_IPV6_SUBTREES
index 366fbba3359ab790e1b22917931acdbdb1daebf3..d3005b34476a82f8172cfe8807507d561d307938 100644 (file)
@@ -702,8 +702,10 @@ static int ipip6_rcv(struct sk_buff *skb)
                }
 
                tstats = this_cpu_ptr(tunnel->dev->tstats);
+               u64_stats_update_begin(&tstats->syncp);
                tstats->rx_packets++;
                tstats->rx_bytes += skb->len;
+               u64_stats_update_end(&tstats->syncp);
 
                netif_rx(skb);
 
@@ -924,7 +926,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                if (tunnel->parms.iph.daddr && skb_dst(skb))
                        skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
 
-               if (skb->len > mtu) {
+               if (skb->len > mtu && !skb_is_gso(skb)) {
                        icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
                        ip_rt_put(rt);
                        goto tx_error;
@@ -966,8 +968,10 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
        tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
 
        skb = iptunnel_handle_offloads(skb, false, SKB_GSO_SIT);
-       if (IS_ERR(skb))
+       if (IS_ERR(skb)) {
+               ip_rt_put(rt);
                goto out;
+       }
 
        err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos,
                            ttl, df, !net_eq(tunnel->net, dev_net(dev)));
index 7b01b9f5846c845bcf4f04b21cfbcb5786c19421..c71b699eb555165e124ec21109e37212587c7741 100644 (file)
@@ -715,7 +715,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
        unsigned long cpu_flags;
        size_t copied = 0;
        u32 peek_seq = 0;
-       u32 *seq;
+       u32 *seq, skb_len;
        unsigned long used;
        int target;     /* Read at least this many bytes */
        long timeo;
@@ -812,6 +812,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
                }
                continue;
        found_ok_skb:
+               skb_len = skb->len;
                /* Ok so how much can we use? */
                used = skb->len - offset;
                if (len < used)
@@ -844,7 +845,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
                }
 
                /* Partial read */
-               if (used + offset < skb->len)
+               if (used + offset < skb_len)
                        continue;
        } while (len > 0);
 
index 36c3a4cbcabf66b2a0864414b3c23ea2896b7c4e..a0757913046eab8eee0c99104d8900db4e338c3e 100644 (file)
@@ -1061,7 +1061,8 @@ static void ieee80211_uninit(struct net_device *dev)
 }
 
 static u16 ieee80211_netdev_select_queue(struct net_device *dev,
-                                        struct sk_buff *skb)
+                                        struct sk_buff *skb,
+                                        void *accel_priv)
 {
        return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
 }
@@ -1078,7 +1079,8 @@ static const struct net_device_ops ieee80211_dataif_ops = {
 };
 
 static u16 ieee80211_monitor_select_queue(struct net_device *dev,
-                                         struct sk_buff *skb)
+                                         struct sk_buff *skb,
+                                         void *accel_priv)
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
        struct ieee80211_local *local = sdata->local;
index c558b246ef0036c38e6c8a00b338b43c46913f78..ca7fa7f0613dba22cdc0e82d8a96e1de9ff011dc 100644 (file)
@@ -463,7 +463,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
 {
        struct sta_info *sta = tx->sta;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
        struct ieee80211_local *local = tx->local;
 
        if (unlikely(!sta))
@@ -474,15 +473,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
                     !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
                int ac = skb_get_queue_mapping(tx->skb);
 
-               /* only deauth, disassoc and action are bufferable MMPDUs */
-               if (ieee80211_is_mgmt(hdr->frame_control) &&
-                   !ieee80211_is_deauth(hdr->frame_control) &&
-                   !ieee80211_is_disassoc(hdr->frame_control) &&
-                   !ieee80211_is_action(hdr->frame_control)) {
-                       info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
-                       return TX_CONTINUE;
-               }
-
                ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
                       sta->sta.addr, sta->sta.aid, ac);
                if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
@@ -525,9 +515,22 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
 static ieee80211_tx_result debug_noinline
 ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
 {
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
+
        if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
                return TX_CONTINUE;
 
+       /* only deauth, disassoc and action are bufferable MMPDUs */
+       if (ieee80211_is_mgmt(hdr->frame_control) &&
+           !ieee80211_is_deauth(hdr->frame_control) &&
+           !ieee80211_is_disassoc(hdr->frame_control) &&
+           !ieee80211_is_action(hdr->frame_control)) {
+               if (tx->flags & IEEE80211_TX_UNICAST)
+                       info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
+               return TX_CONTINUE;
+       }
+
        if (tx->flags & IEEE80211_TX_UNICAST)
                return ieee80211_tx_h_unicast_ps_buf(tx);
        else
index c8beafd401aa283f0befa0494e02649359bab83f..5a355a46d1dc4a70e82fadcd509ddd4fae34c3e1 100644 (file)
@@ -63,6 +63,7 @@
 #include <net/ip_vs.h>
 #include <net/netfilter/nf_conntrack_core.h>
 #include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_conntrack_zones.h>
 
@@ -97,6 +98,11 @@ ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin)
        if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
                return;
 
+       /* Applications may adjust TCP seqs */
+       if (cp->app && nf_ct_protonum(ct) == IPPROTO_TCP &&
+           !nfct_seqadj(ct) && !nfct_seqadj_ext_add(ct))
+               return;
+
        /*
         * The connection is not yet in the hashtable, so we update it.
         * CIP->VIP will remain the same, so leave the tuple in
index 17c1bcb182c6b58a782744e23bfe671de160bf92..f6e2ae91a80badd697a1f77e299dc332bc34a8d1 100644 (file)
@@ -36,6 +36,11 @@ int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
        if (off == 0)
                return 0;
 
+       if (unlikely(!seqadj)) {
+               WARN_ONCE(1, "Missing nfct_seqadj_ext_add() setup call\n");
+               return 0;
+       }
+
        set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
 
        spin_lock_bh(&ct->lock);
index 902fb0a6b38ad9baac15c3fc3f17a3e695510db8..7a394df0deb7686fa7fe4da162ff77d3c03435c5 100644 (file)
@@ -97,7 +97,6 @@ int nf_conntrack_tstamp_pernet_init(struct net *net)
 void nf_conntrack_tstamp_pernet_fini(struct net *net)
 {
        nf_conntrack_tstamp_fini_sysctl(net);
-       nf_ct_extend_unregister(&tstamp_extend);
 }
 
 int nf_conntrack_tstamp_init(void)
index f02b3605823e5616bc70f128a896e76d8f984ff2..1fb2258c35357c8c6f5072ae6593de2d53ab9f03 100644 (file)
@@ -34,10 +34,14 @@ static unsigned int help(struct sk_buff *skb,
                         struct nf_conntrack_expect *exp)
 {
        char buffer[sizeof("4294967296 65635")];
+       struct nf_conn *ct = exp->master;
+       union nf_inet_addr newaddr;
        u_int16_t port;
        unsigned int ret;
 
        /* Reply comes from server. */
+       newaddr = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3;
+
        exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
        exp->dir = IP_CT_DIR_REPLY;
        exp->expectfn = nf_nat_follow_master;
@@ -57,17 +61,35 @@ static unsigned int help(struct sk_buff *skb,
        }
 
        if (port == 0) {
-               nf_ct_helper_log(skb, exp->master, "all ports in use");
+               nf_ct_helper_log(skb, ct, "all ports in use");
                return NF_DROP;
        }
 
-       ret = nf_nat_mangle_tcp_packet(skb, exp->master, ctinfo,
-                                      protoff, matchoff, matchlen, buffer,
-                                      strlen(buffer));
+       /* strlen("\1DCC CHAT chat AAAAAAAA P\1\n")=27
+        * strlen("\1DCC SCHAT chat AAAAAAAA P\1\n")=28
+        * strlen("\1DCC SEND F AAAAAAAA P S\1\n")=26
+        * strlen("\1DCC MOVE F AAAAAAAA P S\1\n")=26
+        * strlen("\1DCC TSEND F AAAAAAAA P S\1\n")=27
+        *
+        * AAAAAAAAA: bound addr (1.0.0.0==16777216, min 8 digits,
+        *                        255.255.255.255==4294967296, 10 digits)
+        * P:         bound port (min 1 d, max 5d (65635))
+        * F:         filename   (min 1 d )
+        * S:         size       (min 1 d )
+        * 0x01, \n:  terminators
+        */
+       /* AAA = "us", ie. where server normally talks to. */
+       snprintf(buffer, sizeof(buffer), "%u %u", ntohl(newaddr.ip), port);
+       pr_debug("nf_nat_irc: inserting '%s' == %pI4, port %u\n",
+                buffer, &newaddr.ip, port);
+
+       ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, matchoff,
+                                      matchlen, buffer, strlen(buffer));
        if (ret != NF_ACCEPT) {
-               nf_ct_helper_log(skb, exp->master, "cannot mangle packet");
+               nf_ct_helper_log(skb, ct, "cannot mangle packet");
                nf_ct_unexpect_related(exp);
        }
+
        return ret;
 }
 
index f93b7d06f4be9525ad4ab4314a106b01e85749ba..71a9f49a768b887dec1b361b78147016cb2b9307 100644 (file)
@@ -312,6 +312,9 @@ static int nf_tables_table_enable(struct nft_table *table)
        int err, i = 0;
 
        list_for_each_entry(chain, &table->chains, list) {
+               if (!(chain->flags & NFT_BASE_CHAIN))
+                       continue;
+
                err = nf_register_hook(&nft_base_chain(chain)->ops);
                if (err < 0)
                        goto err;
@@ -321,6 +324,9 @@ static int nf_tables_table_enable(struct nft_table *table)
        return 0;
 err:
        list_for_each_entry(chain, &table->chains, list) {
+               if (!(chain->flags & NFT_BASE_CHAIN))
+                       continue;
+
                if (i-- <= 0)
                        break;
 
@@ -333,8 +339,10 @@ static int nf_tables_table_disable(struct nft_table *table)
 {
        struct nft_chain *chain;
 
-       list_for_each_entry(chain, &table->chains, list)
-               nf_unregister_hook(&nft_base_chain(chain)->ops);
+       list_for_each_entry(chain, &table->chains, list) {
+               if (chain->flags & NFT_BASE_CHAIN)
+                       nf_unregister_hook(&nft_base_chain(chain)->ops);
+       }
 
        return 0;
 }
@@ -2098,17 +2106,21 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
                                   struct netlink_callback *cb)
 {
        const struct nft_set *set;
-       unsigned int idx = 0, s_idx = cb->args[0];
+       unsigned int idx, s_idx = cb->args[0];
        struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2];
 
        if (cb->args[1])
                return skb->len;
 
        list_for_each_entry(table, &ctx->afi->tables, list) {
-               if (cur_table && cur_table != table)
-                       continue;
+               if (cur_table) {
+                       if (cur_table != table)
+                               continue;
 
+                       cur_table = NULL;
+               }
                ctx->table = table;
+               idx = 0;
                list_for_each_entry(set, &ctx->table->sets, list) {
                        if (idx < s_idx)
                                goto cont;
@@ -2370,7 +2382,9 @@ static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx,
        enum nft_registers dreg;
 
        dreg = nft_type_to_reg(set->dtype);
-       return nft_validate_data_load(ctx, dreg, &elem->data, set->dtype);
+       return nft_validate_data_load(ctx, dreg, &elem->data,
+                                     set->dtype == NFT_DATA_VERDICT ?
+                                     NFT_DATA_VERDICT : NFT_DATA_VALUE);
 }
 
 int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
index 3c4b69e5fe17348b422f390bf79ab4269043fe5d..a155d19a225edcfb4b1a550ebc898f40d51c564d 100644 (file)
@@ -1053,6 +1053,7 @@ static void __net_exit nfnl_log_net_exit(struct net *net)
 #ifdef CONFIG_PROC_FS
        remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter);
 #endif
+       nf_log_unset(net, &nfulnl_logger);
 }
 
 static struct pernet_operations nfnl_log_net_ops = {
index 8e0bb75e7c51e61092fb904dc1d93befe1e7320d..55c939f5371fabf35ad996efb51ea52a29630703 100644 (file)
@@ -31,7 +31,7 @@ static void nft_exthdr_eval(const struct nft_expr *expr,
 {
        struct nft_exthdr *priv = nft_expr_priv(expr);
        struct nft_data *dest = &data[priv->dreg];
-       unsigned int offset;
+       unsigned int offset = 0;
        int err;
 
        err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL);
index 872529105abc7c3a2e41b9d579e934e019e8e5ff..83b9927e7d19f3b14c8db4b199385122843d6764 100644 (file)
@@ -384,7 +384,7 @@ int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx,
 {
        dev->dep_link_up = true;
 
-       if (!dev->active_target) {
+       if (!dev->active_target && rf_mode == NFC_RF_INITIATOR) {
                struct nfc_target *target;
 
                target = nfc_find_target(dev, target_idx);
index b4c8b0022feeebea1aec424542e37a8391d87297..ba2dffeff60876ca669993d1863dcbb6cb76a740 100644 (file)
@@ -338,7 +338,8 @@ static int rds_ib_laddr_check(__be32 addr)
        ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
        /* due to this, we will claim to support iWARP devices unless we
           check node_type. */
-       if (ret || cm_id->device->node_type != RDMA_NODE_IB_CA)
+       if (ret || !cm_id->device ||
+           cm_id->device->node_type != RDMA_NODE_IB_CA)
                ret = -EADDRNOTAVAIL;
 
        rdsdebug("addr %pI4 ret %d node type %d\n",
index 33af77246bfeb90c6b31bfe6163dc2c9cfbda787..62ced6516c586f5c7b5cb779e079ad57b72b7518 100644 (file)
@@ -1253,6 +1253,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
 
        if (msg->msg_name) {
                struct sockaddr_rose *srose;
+               struct full_sockaddr_rose *full_srose = msg->msg_name;
 
                memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose));
                srose = msg->msg_name;
@@ -1260,18 +1261,9 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
                srose->srose_addr   = rose->dest_addr;
                srose->srose_call   = rose->dest_call;
                srose->srose_ndigis = rose->dest_ndigis;
-               if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) {
-                       struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name;
-                       for (n = 0 ; n < rose->dest_ndigis ; n++)
-                               full_srose->srose_digis[n] = rose->dest_digis[n];
-                       msg->msg_namelen = sizeof(struct full_sockaddr_rose);
-               } else {
-                       if (rose->dest_ndigis >= 1) {
-                               srose->srose_ndigis = 1;
-                               srose->srose_digi = rose->dest_digis[0];
-                       }
-                       msg->msg_namelen = sizeof(struct sockaddr_rose);
-               }
+               for (n = 0 ; n < rose->dest_ndigis ; n++)
+                       full_srose->srose_digis[n] = rose->dest_digis[n];
+               msg->msg_namelen = sizeof(struct full_sockaddr_rose);
        }
 
        skb_free_datagram(sk, skb);
index 5c5edf56adbd4fc9a4d64a509a9103511f995db4..11fe1a416433f1fe2ea46c86ee753826e523b716 100644 (file)
@@ -77,16 +77,16 @@ static int tcf_csum_init(struct net *n, struct nlattr *nla, struct nlattr *est,
                                     &csum_idx_gen, &csum_hash_info);
                if (IS_ERR(pc))
                        return PTR_ERR(pc);
-               p = to_tcf_csum(pc);
                ret = ACT_P_CREATED;
        } else {
-               p = to_tcf_csum(pc);
-               if (!ovr) {
-                       tcf_hash_release(pc, bind, &csum_hash_info);
+               if (bind)/* dont override defaults */
+                       return 0;
+               tcf_hash_release(pc, bind, &csum_hash_info);
+               if (!ovr)
                        return -EEXIST;
-               }
        }
 
+       p = to_tcf_csum(pc);
        spin_lock_bh(&p->tcf_lock);
        p->tcf_action = parm->action;
        p->update_flags = parm->update_flags;
index 5645a4d32abdd187f59d9f6301842e35d08762df..eb9ba60ebab4394527d10f25511b36b5aff84a32 100644 (file)
@@ -102,10 +102,11 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
                        return PTR_ERR(pc);
                ret = ACT_P_CREATED;
        } else {
-               if (!ovr) {
-                       tcf_hash_release(pc, bind, &gact_hash_info);
+               if (bind)/* dont override defaults */
+                       return 0;
+               tcf_hash_release(pc, bind, &gact_hash_info);
+               if (!ovr)
                        return -EEXIST;
-               }
        }
 
        gact = to_gact(pc);
index 882a89762f77c2edb8b6d10d0c8ef69cf3e39aac..dcbfe8ce04a6a30ee9ce273f8e82196a5ba4a319 100644 (file)
@@ -141,10 +141,12 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla, struct nlattr *est,
                        return PTR_ERR(pc);
                ret = ACT_P_CREATED;
        } else {
-               if (!ovr) {
-                       tcf_ipt_release(to_ipt(pc), bind);
+               if (bind)/* dont override defaults */
+                       return 0;
+               tcf_ipt_release(to_ipt(pc), bind);
+
+               if (!ovr)
                        return -EEXIST;
-               }
        }
        ipt = to_ipt(pc);
 
index 6a15ace002411a8c11efeed6156d72324b9eb2ff..76869538d0287148e48bf43660d3e243912a6e30 100644 (file)
@@ -70,15 +70,15 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
                                     &nat_idx_gen, &nat_hash_info);
                if (IS_ERR(pc))
                        return PTR_ERR(pc);
-               p = to_tcf_nat(pc);
                ret = ACT_P_CREATED;
        } else {
-               p = to_tcf_nat(pc);
-               if (!ovr) {
-                       tcf_hash_release(pc, bind, &nat_hash_info);
+               if (bind)
+                       return 0;
+               tcf_hash_release(pc, bind, &nat_hash_info);
+               if (!ovr)
                        return -EEXIST;
-               }
        }
+       p = to_tcf_nat(pc);
 
        spin_lock_bh(&p->tcf_lock);
        p->old_addr = parm->old_addr;
index 03b67674169c5db79eb546d93d0b4833324786d3..7aa2dcd989f842976c57d449452d2e0458a7b32c 100644 (file)
@@ -84,10 +84,12 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
                ret = ACT_P_CREATED;
        } else {
                p = to_pedit(pc);
-               if (!ovr) {
-                       tcf_hash_release(pc, bind, &pedit_hash_info);
+               tcf_hash_release(pc, bind, &pedit_hash_info);
+               if (bind)
+                       return 0;
+               if (!ovr)
                        return -EEXIST;
-               }
+
                if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) {
                        keys = kmalloc(ksize, GFP_KERNEL);
                        if (keys == NULL)
index 16a62c36928a78110923d36d5b87808936d8f90a..ef246d87e68bbdf628bc00eebb05e9cb88a3cb87 100644 (file)
@@ -177,10 +177,12 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
                        if (bind) {
                                police->tcf_bindcnt += 1;
                                police->tcf_refcnt += 1;
+                               return 0;
                        }
                        if (ovr)
                                goto override;
-                       return ret;
+                       /* not replacing */
+                       return -EEXIST;
                }
        }
 
index 31157d3e729c8c29e8bb89ce03fa68c5ad8e7f39..f7b45ab85388748e9f730939b7f214952f8df84e 100644 (file)
@@ -142,10 +142,13 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
                ret = ACT_P_CREATED;
        } else {
                d = to_defact(pc);
-               if (!ovr) {
-                       tcf_simp_release(d, bind);
+
+               if (bind)
+                       return 0;
+               tcf_simp_release(d, bind);
+               if (!ovr)
                        return -EEXIST;
-               }
+
                reset_policy(d, defdata, parm);
        }
 
index 35ea643b4325562d74c4c05d9950a4f8a8a7ad32..8fe9d25c3008ece001d4da815cdf4f50c893fdab 100644 (file)
@@ -120,10 +120,11 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
                ret = ACT_P_CREATED;
        } else {
                d = to_skbedit(pc);
-               if (!ovr) {
-                       tcf_hash_release(pc, bind, &skbedit_hash_info);
+               if (bind)
+                       return 0;
+               tcf_hash_release(pc, bind, &skbedit_hash_info);
+               if (!ovr)
                        return -EEXIST;
-               }
        }
 
        spin_lock_bh(&d->tcf_lock);
index 922a09406ba70573499877ac3e561c9ee7b61a5c..7fc899a943a8fa8368415bc0c6c8a939bd042963 100644 (file)
@@ -126,7 +126,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
 
        HARD_TX_LOCK(dev, txq, smp_processor_id());
        if (!netif_xmit_frozen_or_stopped(txq))
-               ret = dev_hard_start_xmit(skb, dev, txq, NULL);
+               ret = dev_hard_start_xmit(skb, dev, txq);
 
        HARD_TX_UNLOCK(dev, txq);
 
index f51ba985a36eaaf0b021adba700cf16d42394e6f..59268f6e2c36a0b32ac6b768dea15b7b6be0021d 100644 (file)
@@ -208,8 +208,6 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
        INIT_LIST_HEAD(&q->retransmit);
        INIT_LIST_HEAD(&q->sacked);
        INIT_LIST_HEAD(&q->abandoned);
-
-       q->empty = 1;
 }
 
 /* Free the outqueue structure and any related pending chunks.
@@ -332,7 +330,6 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
                                SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
                        else
                                SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
-                       q->empty = 0;
                        break;
                }
        } else {
@@ -654,7 +651,6 @@ redo:
                        if (chunk->fast_retransmit == SCTP_NEED_FRTX)
                                chunk->fast_retransmit = SCTP_DONT_FRTX;
 
-                       q->empty = 0;
                        q->asoc->stats.rtxchunks++;
                        break;
                }
@@ -1065,8 +1061,6 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
 
                        sctp_transport_reset_timers(transport);
 
-                       q->empty = 0;
-
                        /* Only let one DATA chunk get bundled with a
                         * COOKIE-ECHO chunk.
                         */
@@ -1275,29 +1269,17 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
                 "advertised peer ack point:0x%x\n", __func__, asoc, ctsn,
                 asoc->adv_peer_ack_point);
 
-       /* See if all chunks are acked.
-        * Make sure the empty queue handler will get run later.
-        */
-       q->empty = (list_empty(&q->out_chunk_list) &&
-                   list_empty(&q->retransmit));
-       if (!q->empty)
-               goto finish;
-
-       list_for_each_entry(transport, transport_list, transports) {
-               q->empty = q->empty && list_empty(&transport->transmitted);
-               if (!q->empty)
-                       goto finish;
-       }
-
-       pr_debug("%s: sack queue is empty\n", __func__);
-finish:
-       return q->empty;
+       return sctp_outq_is_empty(q);
 }
 
-/* Is the outqueue empty?  */
+/* Is the outqueue empty?
+ * The queue is empty when we have not pending data, no in-flight data
+ * and nothing pending retransmissions.
+ */
 int sctp_outq_is_empty(const struct sctp_outq *q)
 {
-       return q->empty;
+       return q->out_qlen == 0 && q->outstanding_bytes == 0 &&
+              list_empty(&q->retransmit);
 }
 
 /********************************************************************
index 69cd9bf3f561d64d377ddb65091b98e1017e15cf..13b9877458201fa4d5bb14a36892825272229050 100644 (file)
@@ -1498,6 +1498,7 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
                int type;
 
                head = head->next;
+               buf->next = NULL;
 
                /* Ensure bearer is still enabled */
                if (unlikely(!b_ptr->active))
index c081a7632302ca0798c193b039214466c3180184..d43f3182b1d4cc1628817d882c9d006006e27e1e 100644 (file)
@@ -251,18 +251,15 @@ struct tipc_port *tipc_createport(struct sock *sk,
        return p_ptr;
 }
 
-int tipc_deleteport(u32 ref)
+int tipc_deleteport(struct tipc_port *p_ptr)
 {
-       struct tipc_port *p_ptr;
        struct sk_buff *buf = NULL;
 
-       tipc_withdraw(ref, 0, NULL);
-       p_ptr = tipc_port_lock(ref);
-       if (!p_ptr)
-               return -EINVAL;
+       tipc_withdraw(p_ptr, 0, NULL);
 
-       tipc_ref_discard(ref);
-       tipc_port_unlock(p_ptr);
+       spin_lock_bh(p_ptr->lock);
+       tipc_ref_discard(p_ptr->ref);
+       spin_unlock_bh(p_ptr->lock);
 
        k_cancel_timer(&p_ptr->timer);
        if (p_ptr->connected) {
@@ -704,47 +701,36 @@ int tipc_set_portimportance(u32 ref, unsigned int imp)
 }
 
 
-int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
+int tipc_publish(struct tipc_port *p_ptr, unsigned int scope,
+                struct tipc_name_seq const *seq)
 {
-       struct tipc_port *p_ptr;
        struct publication *publ;
        u32 key;
-       int res = -EINVAL;
 
-       p_ptr = tipc_port_lock(ref);
-       if (!p_ptr)
+       if (p_ptr->connected)
                return -EINVAL;
+       key = p_ptr->ref + p_ptr->pub_count + 1;
+       if (key == p_ptr->ref)
+               return -EADDRINUSE;
 
-       if (p_ptr->connected)
-               goto exit;
-       key = ref + p_ptr->pub_count + 1;
-       if (key == ref) {
-               res = -EADDRINUSE;
-               goto exit;
-       }
        publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper,
                                    scope, p_ptr->ref, key);
        if (publ) {
                list_add(&publ->pport_list, &p_ptr->publications);
                p_ptr->pub_count++;
                p_ptr->published = 1;
-               res = 0;
+               return 0;
        }
-exit:
-       tipc_port_unlock(p_ptr);
-       return res;
+       return -EINVAL;
 }
 
-int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
+int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope,
+                 struct tipc_name_seq const *seq)
 {
-       struct tipc_port *p_ptr;
        struct publication *publ;
        struct publication *tpubl;
        int res = -EINVAL;
 
-       p_ptr = tipc_port_lock(ref);
-       if (!p_ptr)
-               return -EINVAL;
        if (!seq) {
                list_for_each_entry_safe(publ, tpubl,
                                         &p_ptr->publications, pport_list) {
@@ -771,7 +757,6 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
        }
        if (list_empty(&p_ptr->publications))
                p_ptr->published = 0;
-       tipc_port_unlock(p_ptr);
        return res;
 }
 
index 9122535973430edff99e8ee6070a091975dea457..34f12bd4074e49ff8b4c7809bfe0bc706941ec7c 100644 (file)
@@ -116,7 +116,7 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err);
 
 void tipc_acknowledge(u32 port_ref, u32 ack);
 
-int tipc_deleteport(u32 portref);
+int tipc_deleteport(struct tipc_port *p_ptr);
 
 int tipc_portimportance(u32 portref, unsigned int *importance);
 int tipc_set_portimportance(u32 portref, unsigned int importance);
@@ -127,9 +127,9 @@ int tipc_set_portunreliable(u32 portref, unsigned int isunreliable);
 int tipc_portunreturnable(u32 portref, unsigned int *isunreturnable);
 int tipc_set_portunreturnable(u32 portref, unsigned int isunreturnable);
 
-int tipc_publish(u32 portref, unsigned int scope,
+int tipc_publish(struct tipc_port *p_ptr, unsigned int scope,
                 struct tipc_name_seq const *name_seq);
-int tipc_withdraw(u32 portref, unsigned int scope,
+int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope,
                  struct tipc_name_seq const *name_seq);
 
 int tipc_connect(u32 portref, struct tipc_portid const *port);
index 3b61851bb9276ec733f7ab8c9d2179a5dd7fef4d..e741416d1d24d4655cd4096b07f84cd978c90cc7 100644 (file)
@@ -354,7 +354,7 @@ static int release(struct socket *sock)
         * Delete TIPC port; this ensures no more messages are queued
         * (also disconnects an active connection & sends a 'FIN-' to peer)
         */
-       res = tipc_deleteport(tport->ref);
+       res = tipc_deleteport(tport);
 
        /* Discard any remaining (connection-based) messages in receive queue */
        __skb_queue_purge(&sk->sk_receive_queue);
@@ -386,30 +386,46 @@ static int release(struct socket *sock)
  */
 static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
 {
+       struct sock *sk = sock->sk;
        struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
-       u32 portref = tipc_sk_port(sock->sk)->ref;
+       struct tipc_port *tport = tipc_sk_port(sock->sk);
+       int res = -EINVAL;
 
-       if (unlikely(!uaddr_len))
-               return tipc_withdraw(portref, 0, NULL);
+       lock_sock(sk);
+       if (unlikely(!uaddr_len)) {
+               res = tipc_withdraw(tport, 0, NULL);
+               goto exit;
+       }
 
-       if (uaddr_len < sizeof(struct sockaddr_tipc))
-               return -EINVAL;
-       if (addr->family != AF_TIPC)
-               return -EAFNOSUPPORT;
+       if (uaddr_len < sizeof(struct sockaddr_tipc)) {
+               res = -EINVAL;
+               goto exit;
+       }
+       if (addr->family != AF_TIPC) {
+               res = -EAFNOSUPPORT;
+               goto exit;
+       }
 
        if (addr->addrtype == TIPC_ADDR_NAME)
                addr->addr.nameseq.upper = addr->addr.nameseq.lower;
-       else if (addr->addrtype != TIPC_ADDR_NAMESEQ)
-               return -EAFNOSUPPORT;
+       else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
+               res = -EAFNOSUPPORT;
+               goto exit;
+       }
 
        if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
            (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
-           (addr->addr.nameseq.type != TIPC_CFG_SRV))
-               return -EACCES;
+           (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
+               res = -EACCES;
+               goto exit;
+       }
 
-       return (addr->scope > 0) ?
-               tipc_publish(portref, addr->scope, &addr->addr.nameseq) :
-               tipc_withdraw(portref, -addr->scope, &addr->addr.nameseq);
+       res = (addr->scope > 0) ?
+               tipc_publish(tport, addr->scope, &addr->addr.nameseq) :
+               tipc_withdraw(tport, -addr->scope, &addr->addr.nameseq);
+exit:
+       release_sock(sk);
+       return res;
 }
 
 /**
index a271c27fac774ce987c0db6f1330ffbfca6dc7f7..722da616438cd1e933fb4d5e60a2c4846f44d186 100644 (file)
@@ -124,6 +124,10 @@ int ieee80211_radiotap_iterator_init(
        /* find payload start allowing for extended bitmap(s) */
 
        if (iterator->_bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT)) {
+               if ((unsigned long)iterator->_arg -
+                   (unsigned long)iterator->_rtheader + sizeof(uint32_t) >
+                   (unsigned long)iterator->_max_length)
+                       return -EINVAL;
                while (get_unaligned_le32(iterator->_arg) &
                                        (1 << IEEE80211_RADIOTAP_EXT)) {
                        iterator->_arg += sizeof(uint32_t);
index 65f800890d70d857c9b42d379caad887f5a98c74..d3c5bd7c6b513cd376a2e37857cb8f19919b5a43 100644 (file)
@@ -632,6 +632,16 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
        }
 #endif
 
+       if (!bss && (status == WLAN_STATUS_SUCCESS)) {
+               WARN_ON_ONCE(!wiphy_to_dev(wdev->wiphy)->ops->connect);
+               bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
+                                      wdev->ssid, wdev->ssid_len,
+                                      WLAN_CAPABILITY_ESS,
+                                      WLAN_CAPABILITY_ESS);
+               if (bss)
+                       cfg80211_hold_bss(bss_from_pub(bss));
+       }
+
        if (wdev->current_bss) {
                cfg80211_unhold_bss(wdev->current_bss);
                cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
@@ -649,16 +659,8 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
                return;
        }
 
-       if (!bss) {
-               WARN_ON_ONCE(!wiphy_to_dev(wdev->wiphy)->ops->connect);
-               bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
-                                      wdev->ssid, wdev->ssid_len,
-                                      WLAN_CAPABILITY_ESS,
-                                      WLAN_CAPABILITY_ESS);
-               if (WARN_ON(!bss))
-                       return;
-               cfg80211_hold_bss(bss_from_pub(bss));
-       }
+       if (WARN_ON(!bss))
+               return;
 
        wdev->current_bss = bss_from_pub(bss);
 
index 6625699f497c7f3889f7b82b325a315c9e51774a..57b0b49f4e6e23a9c6dd974e3f9dd78664f1f59a 100644 (file)
@@ -234,6 +234,14 @@ static int inode_alloc_security(struct inode *inode)
        return 0;
 }
 
+static void inode_free_rcu(struct rcu_head *head)
+{
+       struct inode_security_struct *isec;
+
+       isec = container_of(head, struct inode_security_struct, rcu);
+       kmem_cache_free(sel_inode_cache, isec);
+}
+
 static void inode_free_security(struct inode *inode)
 {
        struct inode_security_struct *isec = inode->i_security;
@@ -244,8 +252,16 @@ static void inode_free_security(struct inode *inode)
                list_del_init(&isec->list);
        spin_unlock(&sbsec->isec_lock);
 
-       inode->i_security = NULL;
-       kmem_cache_free(sel_inode_cache, isec);
+       /*
+        * The inode may still be referenced in a path walk and
+        * a call to selinux_inode_permission() can be made
+        * after inode_free_security() is called. Ideally, the VFS
+        * wouldn't do this, but fixing that is a much harder
+        * job. For now, simply free the i_security via RCU, and
+        * leave the current inode->i_security pointer intact.
+        * The inode will be freed after the RCU grace period too.
+        */
+       call_rcu(&isec->rcu, inode_free_rcu);
 }
 
 static int file_alloc_security(struct file *file)
index b1dfe104945078ead53647c247c46aa6134fac2e..078e553f52f27a03ab83490f20bb2353782d2ef2 100644 (file)
@@ -38,7 +38,10 @@ struct task_security_struct {
 
 struct inode_security_struct {
        struct inode *inode;    /* back pointer to inode object */
-       struct list_head list;  /* list of inode_security_struct */
+       union {
+               struct list_head list;  /* list of inode_security_struct */
+               struct rcu_head rcu;    /* for freeing the inode_security_struct */
+       };
        u32 task_sid;           /* SID of creating task */
        u32 sid;                /* SID of this object */
        u16 sclass;             /* security class of this object */