Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next
authorDavid S. Miller <davem@davemloft.net>
Sat, 31 May 2014 00:54:47 +0000 (17:54 -0700)
committerDavid S. Miller <davem@davemloft.net>
Sat, 31 May 2014 00:54:47 +0000 (17:54 -0700)
Pablo Neira Ayuso says:

====================
Netfilter/IPVS updates for net-next

This small patchset contains three accumulated Netfilter/IPVS updates,
they are:

1) Refactorize common NAT code by encapsulating it into a helper
   function, similarly to what we do in other conntrack extensions,
   from Florian Westphal.

2) A minor format string mismatch fix for IPVS, from Masanari Iida.

3) Add quota support to the netfilter accounting infrastructure, now
   you can add quotas to accounting objects via the nfnetlink interface
   and use them from iptables. You can also listen to quota
   notifications from userspace. This enhancement from Mathieu Poirier.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
2012 files changed:
Documentation/ABI/testing/sysfs-bus-pci
Documentation/ABI/testing/sysfs-class-net
Documentation/ABI/testing/sysfs-class-net-queues [new file with mode: 0644]
Documentation/ABI/testing/sysfs-class-net-statistics [new file with mode: 0644]
Documentation/DocBook/80211.tmpl
Documentation/DocBook/media/Makefile
Documentation/devicetree/bindings/arm/arch_timer.txt
Documentation/devicetree/bindings/ata/apm-xgene.txt
Documentation/devicetree/bindings/clock/at91-clock.txt
Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
Documentation/devicetree/bindings/dma/ti-edma.txt
Documentation/devicetree/bindings/net/arc_emac.txt
Documentation/devicetree/bindings/net/broadcom-bcmgenet.txt
Documentation/devicetree/bindings/net/broadcom-systemport.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/can/xilinx_can.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/cpsw-phy-sel.txt
Documentation/devicetree/bindings/net/fixed-link.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
Documentation/devicetree/bindings/net/ieee802154/at86rf230.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/mdio-gpio.txt
Documentation/devicetree/bindings/net/micrel-ks8851.txt
Documentation/devicetree/bindings/net/micrel-ksz9021.txt [deleted file]
Documentation/devicetree/bindings/net/micrel-ksz90x1.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/nfc/pn544.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/nfc/st21nfca.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/nfc/trf7970a.txt
Documentation/devicetree/bindings/net/socfpga-dwmac.txt
Documentation/devicetree/bindings/net/stmmac.txt
Documentation/devicetree/bindings/net/via-rhine.txt [new file with mode: 0644]
Documentation/devicetree/bindings/pinctrl/pinctrl-st.txt
Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt
Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
Documentation/driver-model/devres.txt
Documentation/input/elantech.txt
Documentation/kernel-parameters.txt
Documentation/networking/bonding.txt
Documentation/networking/can.txt
Documentation/networking/cdc_mbim.txt [new file with mode: 0644]
Documentation/networking/filter.txt
Documentation/networking/packet_mmap.txt
Documentation/networking/scaling.txt
MAINTAINERS
Makefile
arch/arc/kernel/entry.S
arch/arm/Kconfig
arch/arm/Kconfig.debug
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/am335x-bone-common.dtsi
arch/arm/boot/dts/am335x-evm.dts
arch/arm/boot/dts/am335x-evmsk.dts
arch/arm/boot/dts/am335x-igep0033.dtsi
arch/arm/boot/dts/am33xx.dtsi
arch/arm/boot/dts/am3517.dtsi
arch/arm/boot/dts/am4372.dtsi
arch/arm/boot/dts/am437x-gp-evm.dts
arch/arm/boot/dts/armada-370-db.dts
arch/arm/boot/dts/armada-370-xp.dtsi
arch/arm/boot/dts/armada-375-db.dts
arch/arm/boot/dts/armada-38x.dtsi
arch/arm/boot/dts/armada-xp-db.dts
arch/arm/boot/dts/armada-xp-gp.dts
arch/arm/boot/dts/armada-xp-matrix.dts
arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
arch/arm/boot/dts/at91-sama5d3_xplained.dts
arch/arm/boot/dts/at91sam9261.dtsi
arch/arm/boot/dts/at91sam9rl.dtsi
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/dra7xx-clocks.dtsi
arch/arm/boot/dts/imx25.dtsi
arch/arm/boot/dts/imx27-apf27.dts
arch/arm/boot/dts/imx27.dtsi
arch/arm/boot/dts/imx50.dtsi
arch/arm/boot/dts/imx51.dtsi
arch/arm/boot/dts/imx53-m53evk.dts
arch/arm/boot/dts/imx53-mba53.dts
arch/arm/boot/dts/imx53-qsb-common.dtsi
arch/arm/boot/dts/imx53-tx53-x03x.dts
arch/arm/boot/dts/imx53.dtsi
arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts
arch/arm/boot/dts/imx6q-gw5400-a.dts
arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi
arch/arm/boot/dts/imx6qdl.dtsi
arch/arm/boot/dts/imx6sl-evk.dts
arch/arm/boot/dts/imx6sl.dtsi
arch/arm/boot/dts/kirkwood-b3.dts
arch/arm/boot/dts/kirkwood-cloudbox.dts
arch/arm/boot/dts/kirkwood-dreamplug.dts
arch/arm/boot/dts/kirkwood-laplug.dts
arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts
arch/arm/boot/dts/kirkwood-ns2-common.dtsi
arch/arm/boot/dts/kirkwood-nsa310-common.dtsi
arch/arm/boot/dts/kirkwood-nsa310.dts
arch/arm/boot/dts/kirkwood-nsa310a.dts
arch/arm/boot/dts/kirkwood-openblocks_a6.dts
arch/arm/boot/dts/kirkwood-openblocks_a7.dts
arch/arm/boot/dts/kirkwood-t5325.dts
arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi
arch/arm/boot/dts/omap2.dtsi
arch/arm/boot/dts/omap2420.dtsi
arch/arm/boot/dts/omap2430.dtsi
arch/arm/boot/dts/omap3-beagle-xm-ab.dts [new file with mode: 0644]
arch/arm/boot/dts/omap3-cm-t3x30.dtsi
arch/arm/boot/dts/omap3-devkit8000.dts
arch/arm/boot/dts/omap3-igep.dtsi
arch/arm/boot/dts/omap3-igep0020.dts
arch/arm/boot/dts/omap3-lilly-a83x.dtsi
arch/arm/boot/dts/omap3-sb-t35.dtsi
arch/arm/boot/dts/omap3-sbc-t3517.dts
arch/arm/boot/dts/omap3.dtsi
arch/arm/boot/dts/omap4.dtsi
arch/arm/boot/dts/omap5.dtsi
arch/arm/boot/dts/r8a7740.dtsi
arch/arm/boot/dts/r8a7790-lager.dts
arch/arm/boot/dts/r8a7791-koelsch.dts
arch/arm/boot/dts/rk3188.dtsi
arch/arm/boot/dts/sama5d3.dtsi
arch/arm/boot/dts/sama5d3_mci2.dtsi
arch/arm/boot/dts/sama5d3_tcb1.dtsi
arch/arm/boot/dts/sama5d3_uart.dtsi
arch/arm/boot/dts/sh73a0.dtsi
arch/arm/boot/dts/ste-ccu8540.dts
arch/arm/boot/dts/stih415-pinctrl.dtsi
arch/arm/boot/dts/stih416-pinctrl.dtsi
arch/arm/boot/dts/sun7i-a20.dtsi
arch/arm/boot/dts/tegra124.dtsi
arch/arm/boot/dts/vf610-twr.dts
arch/arm/boot/dts/vf610.dtsi
arch/arm/boot/dts/vt8500.dtsi
arch/arm/boot/dts/wm8650.dtsi
arch/arm/boot/dts/wm8850.dtsi
arch/arm/boot/dts/zynq-7000.dtsi
arch/arm/boot/dts/zynq-zc702.dts
arch/arm/boot/dts/zynq-zc706.dts
arch/arm/common/bL_switcher.c
arch/arm/common/edma.c
arch/arm/common/mcpm_entry.c
arch/arm/configs/omap2plus_defconfig
arch/arm/configs/sunxi_defconfig
arch/arm/configs/u300_defconfig
arch/arm/configs/u8500_defconfig
arch/arm/include/asm/cputype.h
arch/arm/include/asm/div64.h
arch/arm/include/asm/mcpm.h
arch/arm/include/asm/tlb.h
arch/arm/include/asm/xen/page.h
arch/arm/include/uapi/asm/unistd.h
arch/arm/kernel/Makefile
arch/arm/kernel/calls.S
arch/arm/kernel/head.S
arch/arm/kernel/iwmmxt.S
arch/arm/kernel/machine_kexec.c
arch/arm/kernel/pj4-cp0.c
arch/arm/kernel/sys_oabi-compat.c
arch/arm/kvm/Kconfig
arch/arm/kvm/mmu.c
arch/arm/mach-at91/at91sam9260_devices.c
arch/arm/mach-at91/at91sam9g45_devices.c
arch/arm/mach-imx/clk-imx6q.c
arch/arm/mach-omap2/board-rx51-video.c
arch/arm/mach-omap2/clkt_dpll.c
arch/arm/mach-omap2/gpmc.c
arch/arm/mach-omap2/omap-headsmp.S
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
arch/arm/mach-omap2/pm34xx.c
arch/arm/mach-orion5x/common.h
arch/arm/mach-pxa/include/mach/hx4700.h
arch/arm/mach-rockchip/platsmp.c
arch/arm/mach-shmobile/board-armadillo800eva.c
arch/arm/mach-shmobile/board-lager.c
arch/arm/mach-shmobile/clock-r8a7778.c
arch/arm/mach-spear/time.c
arch/arm/mach-tegra/Kconfig
arch/arm/mach-tegra/board-paz00.c
arch/arm/mach-vexpress/dcscb.c
arch/arm/mach-vexpress/spc.c
arch/arm/mm/Kconfig
arch/arm/mm/dma-mapping.c
arch/arm/vfp/vfpdouble.c
arch/arm/vfp/vfpsingle.c
arch/arm64/Kconfig
arch/arm64/boot/dts/apm-storm.dtsi
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/mmu.h
arch/arm64/include/asm/tlb.h
arch/arm64/include/asm/unistd32.h
arch/arm64/kernel/debug-monitors.c
arch/arm64/kernel/early_printk.c
arch/arm64/kernel/irq.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/time.c
arch/arm64/mm/dma-mapping.c
arch/arm64/mm/hugetlbpage.c
arch/arm64/mm/mmu.c
arch/hexagon/include/asm/barrier.h [deleted file]
arch/ia64/include/asm/tlb.h
arch/ia64/include/asm/unistd.h
arch/ia64/include/uapi/asm/unistd.h
arch/ia64/kernel/entry.S
arch/m68k/include/asm/unistd.h
arch/m68k/include/uapi/asm/unistd.h
arch/m68k/kernel/syscalltable.S
arch/metag/include/asm/barrier.h
arch/metag/include/asm/processor.h
arch/metag/include/uapi/asm/Kbuild
arch/metag/include/uapi/asm/resource.h [deleted file]
arch/mips/bcm47xx/sprom.c
arch/mips/cavium-octeon/octeon-irq.c
arch/mips/dec/ecc-berr.c
arch/mips/dec/kn02xa-berr.c
arch/mips/dec/prom/Makefile
arch/mips/dec/prom/call_o32.S [deleted file]
arch/mips/fw/lib/call_o32.S
arch/mips/fw/sni/sniprom.c
arch/mips/include/asm/dec/prom.h
arch/mips/include/asm/rm9k-ocd.h [deleted file]
arch/mips/include/asm/syscall.h
arch/mips/include/uapi/asm/inst.h
arch/mips/include/uapi/asm/unistd.h
arch/mips/kernel/proc.c
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/scall64-o32.S
arch/mips/lantiq/dts/easy50712.dts
arch/mips/lib/csum_partial.S
arch/mips/lib/delay.c
arch/mips/lib/strncpy_user.S
arch/mips/loongson/Kconfig
arch/mips/loongson/lemote-2f/clock.c
arch/mips/mm/tlb-funcs.S
arch/mips/mm/tlbex.c
arch/mips/ralink/dts/mt7620a_eval.dts
arch/mips/ralink/dts/rt2880_eval.dts
arch/mips/ralink/dts/rt3052_eval.dts
arch/mips/ralink/dts/rt3883_eval.dts
arch/parisc/Kconfig
arch/parisc/include/asm/processor.h
arch/parisc/include/uapi/asm/Kbuild
arch/parisc/include/uapi/asm/resource.h [deleted file]
arch/parisc/include/uapi/asm/unistd.h
arch/parisc/kernel/sys_parisc.c
arch/parisc/kernel/syscall.S
arch/parisc/kernel/syscall_table.S
arch/parisc/kernel/traps.c
arch/parisc/mm/fault.c
arch/powerpc/boot/main.c
arch/powerpc/boot/ops.h
arch/powerpc/boot/ps3.c
arch/powerpc/include/asm/opal.h
arch/powerpc/include/uapi/asm/setup.h
arch/powerpc/kernel/ppc_ksyms.c
arch/powerpc/kernel/rtas_flash.c
arch/powerpc/kernel/time.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/mm/hash_native_64.c
arch/powerpc/perf/hv-24x7.c
arch/powerpc/perf/hv-gpci.c
arch/powerpc/platforms/powernv/eeh-ioda.c
arch/powerpc/platforms/powernv/opal-dump.c
arch/powerpc/platforms/powernv/opal-elog.c
arch/powerpc/platforms/powernv/opal-flash.c
arch/powerpc/platforms/powernv/opal-sysparam.c
arch/powerpc/platforms/powernv/opal.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/powerpc/platforms/powernv/setup.c
arch/powerpc/platforms/powernv/smp.c
arch/powerpc/platforms/pseries/hotplug-cpu.c
arch/powerpc/platforms/pseries/hotplug-memory.c
arch/powerpc/sysdev/fsl_soc.c
arch/powerpc/sysdev/ppc4xx_pci.c
arch/s390/crypto/aes_s390.c
arch/s390/crypto/des_s390.c
arch/s390/include/asm/ccwgroup.h
arch/s390/include/asm/tlb.h
arch/s390/net/bpf_jit_comp.c
arch/sh/include/asm/tlb.h
arch/sparc/include/asm/checksum_32.h
arch/sparc/include/asm/checksum_64.h
arch/sparc/include/asm/pgtable_64.h
arch/sparc/include/asm/tsb.h
arch/sparc/kernel/head_64.S
arch/sparc/kernel/ktlb.S
arch/sparc/kernel/nmi.c
arch/sparc/kernel/smp_64.c
arch/sparc/kernel/sys32.S
arch/sparc/kernel/sysfs.c
arch/sparc/kernel/unaligned_64.c
arch/sparc/lib/NG2memcpy.S
arch/sparc/mm/fault_64.c
arch/sparc/mm/gup.c
arch/sparc/mm/init_64.c
arch/sparc/mm/tlb.c
arch/sparc/mm/tsb.c
arch/um/include/asm/tlb.h
arch/um/include/shared/os.h
arch/um/kernel/physmem.c
arch/um/os-Linux/file.c
arch/um/os-Linux/main.c
arch/um/os-Linux/mem.c
arch/x86/Makefile
arch/x86/boot/Makefile
arch/x86/boot/compressed/misc.c
arch/x86/include/asm/checksum_64.h
arch/x86/include/asm/hpet.h
arch/x86/include/asm/hugetlb.h
arch/x86/include/uapi/asm/msr-index.h
arch/x86/kernel/acpi/sleep.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/cpu/mcheck/therm_throt.c
arch/x86/kernel/cpu/mcheck/threshold.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_rapl.c
arch/x86/kernel/cpu/rdrand.c
arch/x86/kernel/early-quirks.c
arch/x86/kernel/head32.c
arch/x86/kernel/head64.c
arch/x86/kernel/hpet.c
arch/x86/kernel/ldt.c
arch/x86/kernel/process_64.c
arch/x86/kernel/reboot.c
arch/x86/kernel/smp.c
arch/x86/kernel/traps.c
arch/x86/kernel/vsmp_64.c
arch/x86/kernel/vsyscall_gtod.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/lguest/boot.c
arch/x86/lib/msr.c
arch/x86/math-emu/errors.c
arch/x86/net/bpf_jit.S
arch/x86/net/bpf_jit_comp.c
arch/x86/platform/efi/early_printk.c
arch/x86/platform/olpc/olpc-xo1-pm.c
arch/x86/power/hibernate_64.c
arch/x86/vdso/vdso-layout.lds.S
arch/x86/vdso/vdso32-setup.c
arch/x86/xen/enlighten.c
arch/x86/xen/irq.c
arch/xtensa/Kconfig
arch/xtensa/boot/dts/kc705.dts [new file with mode: 0644]
arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi [new file with mode: 0644]
arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi
arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi
arch/xtensa/boot/dts/xtfpga.dtsi
arch/xtensa/include/asm/bootparam.h
arch/xtensa/include/asm/fixmap.h [new file with mode: 0644]
arch/xtensa/include/asm/highmem.h
arch/xtensa/include/asm/pgtable.h
arch/xtensa/include/asm/sysmem.h [new file with mode: 0644]
arch/xtensa/include/asm/tlbflush.h
arch/xtensa/kernel/setup.c
arch/xtensa/kernel/smp.c
arch/xtensa/kernel/xtensa_ksyms.c
arch/xtensa/mm/Makefile
arch/xtensa/mm/cache.c
arch/xtensa/mm/highmem.c [new file with mode: 0644]
arch/xtensa/mm/init.c
arch/xtensa/mm/mmu.c
arch/xtensa/mm/tlb.c
arch/xtensa/platforms/iss/Makefile
arch/xtensa/platforms/xt2000/setup.c
block/blk-cgroup.c
crypto/crypto_user.c
drivers/Makefile
drivers/acpi/Kconfig
drivers/acpi/Makefile
drivers/acpi/ac.c
drivers/acpi/acpi_platform.c
drivers/acpi/acpi_processor.c
drivers/acpi/acpica/acglobal.h
drivers/acpi/acpica/exfield.c
drivers/acpi/acpica/tbutils.c
drivers/acpi/battery.c
drivers/acpi/blacklist.c
drivers/acpi/bus.c
drivers/acpi/cm_sbs.c [new file with mode: 0644]
drivers/acpi/ec.c
drivers/acpi/video.c
drivers/ata/Kconfig
drivers/ata/ahci.c
drivers/ata/ahci.h
drivers/ata/ahci_imx.c
drivers/ata/libahci.c
drivers/ata/libata-core.c
drivers/ata/pata_arasan_cf.c
drivers/ata/pata_at91.c
drivers/ata/pata_samsung_cf.c
drivers/atm/idt77252.c
drivers/base/dd.c
drivers/base/platform.c
drivers/block/floppy.c
drivers/bluetooth/ath3k.c
drivers/bluetooth/btmrvl_drv.h
drivers/bluetooth/btmrvl_main.c
drivers/bluetooth/btmrvl_sdio.c
drivers/bluetooth/btmrvl_sdio.h
drivers/bluetooth/btusb.c
drivers/bluetooth/hci_h4.c
drivers/bus/mvebu-mbus.c
drivers/char/agp/frontend.c
drivers/char/random.c
drivers/char/tpm/tpm_ppi.c
drivers/clk/bcm/clk-kona-setup.c
drivers/clk/bcm/clk-kona.c
drivers/clk/bcm/clk-kona.h
drivers/clk/clk-divider.c
drivers/clk/clk.c
drivers/clk/shmobile/clk-mstp.c
drivers/clk/socfpga/clk-pll.c
drivers/clk/socfpga/clk.c
drivers/clk/tegra/clk-pll.c
drivers/clk/tegra/clk-tegra124.c
drivers/clk/ti/clk-43xx.c
drivers/clk/versatile/clk-vexpress-osc.c
drivers/clocksource/arm_arch_timer.c
drivers/clocksource/exynos_mct.c
drivers/clocksource/zevio-timer.c
drivers/connector/cn_proc.c
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/longhaul.c
drivers/cpufreq/loongson2_cpufreq.c
drivers/cpufreq/powernow-k6.c
drivers/cpufreq/powernow-k7.c
drivers/cpufreq/powernv-cpufreq.c
drivers/cpufreq/ppc-corenet-cpufreq.c
drivers/cpufreq/unicore2-cpufreq.c
drivers/crypto/caam/error.c
drivers/dma/Kconfig
drivers/dma/edma.c
drivers/dma/fsl-edma.c
drivers/dma/sirf-dma.c
drivers/firmware/iscsi_ibft.c
drivers/gpio/gpio-ich.c
drivers/gpio/gpio-mcp23s08.c
drivers/gpio/gpiolib-acpi.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/exynos/exynos_drm_crtc.c
drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
drivers/gpu/drm/exynos/exynos_drm_dsi.c
drivers/gpu/drm/exynos/exynos_drm_vidi.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
drivers/gpu/drm/msm/msm_fbdev.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c
drivers/gpu/drm/nouveau/core/subdev/bios/base.c
drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
drivers/gpu/drm/nouveau/nouveau_acpi.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/cik_sdma.c
drivers/gpu/drm/radeon/cikd.h
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_dma.c
drivers/gpu/drm/radeon/kv_dpm.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_dma.c
drivers/gpu/drm/radeon/r600_dpm.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_atpx_handler.c
drivers/gpu/drm/radeon/radeon_bios.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_family.h
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_ucode.h
drivers/gpu/drm/radeon/radeon_uvd.c
drivers/gpu/drm/radeon/radeon_vce.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/gpu/drm/radeon/rv770_dma.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/si_dma.c
drivers/gpu/drm/radeon/sid.h
drivers/gpu/drm/radeon/uvd_v1_0.c
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-multitouch.c
drivers/hid/hid-sensor-hub.c
drivers/hid/usbhid/hid-quirks.c
drivers/hwmon/coretemp.c
drivers/hwmon/emc1403.c
drivers/hwmon/ltc2945.c
drivers/hwmon/vexpress.c
drivers/i2c/busses/i2c-designware-core.c
drivers/i2c/busses/i2c-nomadik.c
drivers/i2c/busses/i2c-qup.c
drivers/i2c/busses/i2c-rcar.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/idle/intel_idle.c
drivers/iio/adc/Kconfig
drivers/iio/adc/at91_adc.c
drivers/iio/adc/exynos_adc.c
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
drivers/iio/industrialio-buffer.c
drivers/iio/light/cm32181.c
drivers/iio/light/cm36651.c
drivers/infiniband/hw/cxgb4/Kconfig
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/isert/ib_isert.h
drivers/input/keyboard/atkbd.c
drivers/input/keyboard/tca8418_keypad.c
drivers/input/misc/bma150.c
drivers/input/misc/da9055_onkey.c
drivers/input/misc/soc_button_array.c
drivers/input/mouse/elantech.c
drivers/input/mouse/elantech.h
drivers/input/mouse/synaptics.c
drivers/input/serio/i8042-x86ia64io.h
drivers/input/serio/i8042.c
drivers/input/serio/serio.c
drivers/input/tablet/wacom_sys.c
drivers/input/tablet/wacom_wac.c
drivers/input/touchscreen/ads7846.c
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/amd_iommu_v2.c
drivers/iommu/arm-smmu.c
drivers/iommu/dmar.c
drivers/iommu/intel-iommu.c
drivers/irqchip/irq-armada-370-xp.c
drivers/irqchip/irq-crossbar.c
drivers/irqchip/irq-gic.c
drivers/isdn/hisax/hfc4s8s_l1.c
drivers/isdn/hisax/icc.c
drivers/isdn/i4l/isdn_ppp.c
drivers/isdn/mISDN/l1oip_core.c
drivers/md/dm-cache-target.c
drivers/md/dm-crypt.c
drivers/md/dm-mpath.c
drivers/md/dm-thin.c
drivers/md/dm-verity.c
drivers/md/md.c
drivers/md/raid10.c
drivers/media/i2c/ov7670.c
drivers/media/i2c/s5c73m3/s5c73m3-core.c
drivers/media/media-device.c
drivers/media/platform/Kconfig
drivers/media/platform/davinci/vpbe_display.c
drivers/media/platform/davinci/vpfe_capture.c
drivers/media/platform/davinci/vpif_capture.c
drivers/media/platform/davinci/vpif_display.c
drivers/media/platform/exynos4-is/fimc-core.c
drivers/media/tuners/fc2580.c
drivers/media/tuners/fc2580_priv.h
drivers/media/usb/dvb-usb-v2/Makefile
drivers/media/usb/dvb-usb-v2/rtl28xxu.c
drivers/media/usb/gspca/sonixb.c
drivers/media/v4l2-core/v4l2-compat-ioctl32.c
drivers/memory/mvebu-devbus.c
drivers/mfd/rtsx_pcr.c
drivers/mmc/host/mmc_spi.c
drivers/mmc/host/rtsx_pci_sdmmc.c
drivers/mtd/nand/davinci_nand.c
drivers/mtd/ubi/block.c
drivers/mtd/ubi/wl.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_alb.h
drivers/net/bonding/bond_debugfs.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_netlink.c
drivers/net/bonding/bond_options.c
drivers/net/bonding/bond_options.h
drivers/net/bonding/bond_procfs.c
drivers/net/bonding/bond_sysfs.c
drivers/net/bonding/bond_sysfs_slave.c
drivers/net/bonding/bonding.h
drivers/net/can/Kconfig
drivers/net/can/Makefile
drivers/net/can/c_can/c_can.c
drivers/net/can/c_can/c_can.h
drivers/net/can/c_can/c_can_pci.c
drivers/net/can/c_can/c_can_platform.c
drivers/net/can/dev.c
drivers/net/can/mcp251x.c [deleted file]
drivers/net/can/mscan/Kconfig
drivers/net/can/rcar_can.c [new file with mode: 0644]
drivers/net/can/sja1000/peak_pci.c
drivers/net/can/sja1000/sja1000_isa.c
drivers/net/can/slcan.c
drivers/net/can/softing/softing_main.c
drivers/net/can/spi/Kconfig [new file with mode: 0644]
drivers/net/can/spi/Makefile [new file with mode: 0644]
drivers/net/can/spi/mcp251x.c [new file with mode: 0644]
drivers/net/can/usb/Kconfig
drivers/net/can/usb/Makefile
drivers/net/can/usb/gs_usb.c [new file with mode: 0644]
drivers/net/can/usb/kvaser_usb.c
drivers/net/can/xilinx_can.c [new file with mode: 0644]
drivers/net/dsa/mv88e6123_61_65.c
drivers/net/dsa/mv88e6131.c
drivers/net/dsa/mv88e6xxx.c
drivers/net/ethernet/3com/3c509.c
drivers/net/ethernet/3com/3c589_cs.c
drivers/net/ethernet/3com/typhoon.c
drivers/net/ethernet/8390/ax88796.c
drivers/net/ethernet/Kconfig
drivers/net/ethernet/Makefile
drivers/net/ethernet/adaptec/starfire.c
drivers/net/ethernet/alteon/acenic.c
drivers/net/ethernet/altera/Kconfig
drivers/net/ethernet/altera/Makefile
drivers/net/ethernet/altera/altera_msgdma.c
drivers/net/ethernet/altera/altera_msgdma.h
drivers/net/ethernet/altera/altera_msgdmahw.h
drivers/net/ethernet/altera/altera_sgdma.c
drivers/net/ethernet/altera/altera_sgdma.h
drivers/net/ethernet/altera/altera_sgdmahw.h
drivers/net/ethernet/altera/altera_tse.h
drivers/net/ethernet/altera/altera_tse_ethtool.c
drivers/net/ethernet/altera/altera_tse_main.c
drivers/net/ethernet/altera/altera_utils.c
drivers/net/ethernet/altera/altera_utils.h
drivers/net/ethernet/amd/amd8111e.c
drivers/net/ethernet/amd/ariadne.c
drivers/net/ethernet/amd/au1000_eth.c
drivers/net/ethernet/amd/nmclan_cs.c
drivers/net/ethernet/arc/emac.h
drivers/net/ethernet/arc/emac_main.c
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
drivers/net/ethernet/atheros/atlx/atl2.c
drivers/net/ethernet/broadcom/Kconfig
drivers/net/ethernet/broadcom/Makefile
drivers/net/ethernet/broadcom/b44.c
drivers/net/ethernet/broadcom/bcm63xx_enet.c
drivers/net/ethernet/broadcom/bcmsysport.c [new file with mode: 0644]
drivers/net/ethernet/broadcom/bcmsysport.h [new file with mode: 0644]
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/broadcom/tg3.h
drivers/net/ethernet/brocade/bna/bnad_ethtool.c
drivers/net/ethernet/cadence/Kconfig
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/calxeda/xgmac.c
drivers/net/ethernet/chelsio/Kconfig
drivers/net/ethernet/chelsio/cxgb/cxgb2.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/cisco/enic/enic.h
drivers/net/ethernet/cisco/enic/enic_ethtool.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/cisco/enic/vnic_cq.h
drivers/net/ethernet/dec/tulip/tulip_core.c
drivers/net/ethernet/dlink/dl2k.c
drivers/net/ethernet/dlink/sundance.c
drivers/net/ethernet/ec_bhf.c [new file with mode: 0644]
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/emulex/benet/be_hw.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/faraday/ftgmac100.c
drivers/net/ethernet/faraday/ftmac100.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/freescale/gianfar_ethtool.c
drivers/net/ethernet/freescale/ucc_geth.c
drivers/net/ethernet/freescale/ucc_geth_ethtool.c
drivers/net/ethernet/fujitsu/fmvj18x_cs.c
drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
drivers/net/ethernet/ibm/ehea/ehea_main.c
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/icplus/ipg.c
drivers/net/ethernet/intel/e100.c
drivers/net/ethernet/intel/e1000/e1000_ethtool.c
drivers/net/ethernet/intel/e1000/e1000_hw.c
drivers/net/ethernet/intel/e1000e/80003es2lan.c
drivers/net/ethernet/intel/e1000e/82571.c
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/ethernet/intel/e1000e/hw.h
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/ich8lan.h
drivers/net/ethernet/intel/e1000e/mac.c
drivers/net/ethernet/intel/e1000e/mac.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/e1000e/nvm.c
drivers/net/ethernet/intel/e1000e/param.c
drivers/net/ethernet/intel/e1000e/phy.c
drivers/net/ethernet/intel/e1000e/phy.h
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_adminq.c
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_nvm.c
drivers/net/ethernet/intel/i40e/i40e_prototype.h
drivers/net/ethernet/intel/i40e/i40e_ptp.c
drivers/net/ethernet/intel/i40e/i40e_register.h
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h
drivers/net/ethernet/intel/i40e/i40e_type.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
drivers/net/ethernet/intel/i40evf/Makefile
drivers/net/ethernet/intel/i40evf/i40e_adminq.c
drivers/net/ethernet/intel/i40evf/i40e_adminq.h
drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
drivers/net/ethernet/intel/i40evf/i40e_alloc.h
drivers/net/ethernet/intel/i40evf/i40e_common.c
drivers/net/ethernet/intel/i40evf/i40e_hmc.h
drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
drivers/net/ethernet/intel/i40evf/i40e_osdep.h
drivers/net/ethernet/intel/i40evf/i40e_prototype.h
drivers/net/ethernet/intel/i40evf/i40e_register.h
drivers/net/ethernet/intel/i40evf/i40e_status.h
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.h
drivers/net/ethernet/intel/i40evf/i40e_type.h
drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
drivers/net/ethernet/intel/i40evf/i40evf.h
drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
drivers/net/ethernet/intel/i40evf/i40evf_main.c
drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/e1000_82575.h
drivers/net/ethernet/intel/igb/e1000_defines.h
drivers/net/ethernet/intel/igb/e1000_hw.h
drivers/net/ethernet/intel/igb/e1000_i210.c
drivers/net/ethernet/intel/igb/e1000_i210.h
drivers/net/ethernet/intel/igb/e1000_mac.c
drivers/net/ethernet/intel/igb/e1000_mac.h
drivers/net/ethernet/intel/igb/e1000_mbx.c
drivers/net/ethernet/intel/igb/e1000_mbx.h
drivers/net/ethernet/intel/igb/e1000_nvm.c
drivers/net/ethernet/intel/igb/e1000_nvm.h
drivers/net/ethernet/intel/igb/e1000_phy.c
drivers/net/ethernet/intel/igb/e1000_phy.h
drivers/net/ethernet/intel/igb/e1000_regs.h
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/igb/igb_hwmon.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igb/igb_ptp.c
drivers/net/ethernet/intel/igbvf/ethtool.c
drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
drivers/net/ethernet/intel/ixgbevf/ethtool.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/jme.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/mvmdio.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/pxa168_eth.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/cq.c
drivers/net/ethernet/mellanox/mlx4/en_cq.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_main.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mcg.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/mr.c
drivers/net/ethernet/mellanox/mlx4/port.c
drivers/net/ethernet/mellanox/mlx4/profile.c
drivers/net/ethernet/mellanox/mlx4/qp.c
drivers/net/ethernet/mellanox/mlx4/reset.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
drivers/net/ethernet/mellanox/mlx5/core/mr.c
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
drivers/net/ethernet/mellanox/mlx5/core/qp.c
drivers/net/ethernet/micrel/ks8695net.c
drivers/net/ethernet/micrel/ks8851.c
drivers/net/ethernet/micrel/ksz884x.c
drivers/net/ethernet/microchip/enc28j60.c
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/natsemi/natsemi.c
drivers/net/ethernet/natsemi/ns83820.c
drivers/net/ethernet/neterion/s2io.c
drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
drivers/net/ethernet/neterion/vxge/vxge-main.c
drivers/net/ethernet/nvidia/forcedeth.c
drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
drivers/net/ethernet/packetengines/hamachi.c
drivers/net/ethernet/packetengines/yellowfin.c
drivers/net/ethernet/qlogic/Kconfig
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/qlogic/qla3xxx.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c
drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h
drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c
drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/nic.c
drivers/net/ethernet/sfc/siena_sriov.c
drivers/net/ethernet/sis/sis190.c
drivers/net/ethernet/smsc/smc91c92_cs.c
drivers/net/ethernet/smsc/smc91x.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
drivers/net/ethernet/sun/cassini.c
drivers/net/ethernet/tehuti/tehuti.c
drivers/net/ethernet/ti/cpsw-phy-sel.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpts.c
drivers/net/ethernet/ti/davinci_cpdma.c
drivers/net/ethernet/ti/davinci_emac.c
drivers/net/ethernet/ti/davinci_mdio.c
drivers/net/ethernet/tile/tilegx.c
drivers/net/ethernet/via/Kconfig
drivers/net/ethernet/via/via-rhine.c
drivers/net/ethernet/xilinx/ll_temac_main.c
drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
drivers/net/ethernet/xilinx/xilinx_emaclite.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/ieee802154/at86rf230.c
drivers/net/ieee802154/fakelb.c
drivers/net/irda/Kconfig
drivers/net/irda/w83977af_ir.c
drivers/net/macvlan.c
drivers/net/macvtap.c
drivers/net/ntb_netdev.c
drivers/net/phy/at803x.c
drivers/net/phy/fixed.c
drivers/net/phy/mdio-gpio.c
drivers/net/phy/mdio_bus.c
drivers/net/phy/micrel.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/phy/smsc.c
drivers/net/phy/vitesse.c
drivers/net/ppp/ppp_generic.c
drivers/net/rionet.c
drivers/net/slip/slip.c
drivers/net/team/team.c
drivers/net/team/team_mode_loadbalance.c
drivers/net/tun.c
drivers/net/usb/catc.c
drivers/net/usb/cdc_mbim.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/hso.c
drivers/net/usb/huawei_cdc_ncm.c
drivers/net/usb/ipheth.c
drivers/net/usb/kaweth.c
drivers/net/usb/pegasus.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/usb/rtl8150.c
drivers/net/virtio_net.c
drivers/net/vmxnet3/vmxnet3_ethtool.c
drivers/net/vxlan.c
drivers/net/wan/sdla.c
drivers/net/wimax/i2400m/driver.c
drivers/net/wireless/ath/ar5523/ar5523.c
drivers/net/wireless/ath/ath10k/bmi.c
drivers/net/wireless/ath/ath10k/bmi.h
drivers/net/wireless/ath/ath10k/ce.c
drivers/net/wireless/ath/ath10k/ce.h
drivers/net/wireless/ath/ath10k/core.c
drivers/net/wireless/ath/ath10k/core.h
drivers/net/wireless/ath/ath10k/debug.c
drivers/net/wireless/ath/ath10k/htc.c
drivers/net/wireless/ath/ath10k/htt.h
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath10k/hw.h
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath10k/pci.c
drivers/net/wireless/ath/ath10k/pci.h
drivers/net/wireless/ath/ath10k/txrx.c
drivers/net/wireless/ath/ath10k/txrx.h
drivers/net/wireless/ath/ath10k/wmi.c
drivers/net/wireless/ath/ath10k/wmi.h
drivers/net/wireless/ath/ath5k/phy.c
drivers/net/wireless/ath/ath6kl/Kconfig
drivers/net/wireless/ath/ath6kl/cfg80211.c
drivers/net/wireless/ath/ath6kl/core.c
drivers/net/wireless/ath/ath6kl/debug.c
drivers/net/wireless/ath/ath6kl/debug.h
drivers/net/wireless/ath/ath6kl/hif.c
drivers/net/wireless/ath/ath6kl/hif.h
drivers/net/wireless/ath/ath6kl/htc_mbox.c
drivers/net/wireless/ath/ath6kl/htc_pipe.c
drivers/net/wireless/ath/ath6kl/init.c
drivers/net/wireless/ath/ath6kl/main.c
drivers/net/wireless/ath/ath6kl/sdio.c
drivers/net/wireless/ath/ath6kl/target.h
drivers/net/wireless/ath/ath6kl/txrx.c
drivers/net/wireless/ath/ath6kl/usb.c
drivers/net/wireless/ath/ath6kl/wmi.c
drivers/net/wireless/ath/ath6kl/wmi.h
drivers/net/wireless/ath/ath9k/Makefile
drivers/net/wireless/ath/ath9k/ahb.c
drivers/net/wireless/ath/ath9k/ani.c
drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
drivers/net/wireless/ath/ath9k/ar9340_initvals.h
drivers/net/wireless/ath/ath9k/ar953x_initvals.h
drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/beacon.c
drivers/net/wireless/ath/ath9k/common-debug.c [new file with mode: 0644]
drivers/net/wireless/ath/ath9k/common-debug.h [new file with mode: 0644]
drivers/net/wireless/ath/ath9k/common.h
drivers/net/wireless/ath/ath9k/debug.c
drivers/net/wireless/ath/ath9k/debug.h
drivers/net/wireless/ath/ath9k/debug_sta.c
drivers/net/wireless/ath/ath9k/dfs.c
drivers/net/wireless/ath/ath9k/dfs_debug.h
drivers/net/wireless/ath/ath9k/htc.h
drivers/net/wireless/ath/ath9k/htc_drv_debug.c
drivers/net/wireless/ath/ath9k/htc_drv_main.c
drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/ath/ath9k/mac.c
drivers/net/wireless/ath/ath9k/mac.h
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/pci.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/ath9k/reg.h
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/carl9170/main.c
drivers/net/wireless/ath/carl9170/usb.c
drivers/net/wireless/ath/dfs_pattern_detector.c
drivers/net/wireless/ath/wil6210/cfg80211.c
drivers/net/wireless/ath/wil6210/interrupt.c
drivers/net/wireless/ath/wil6210/main.c
drivers/net/wireless/ath/wil6210/pcie_bus.c
drivers/net/wireless/ath/wil6210/rx_reorder.c
drivers/net/wireless/ath/wil6210/wil6210.h
drivers/net/wireless/ath/wil6210/wmi.c
drivers/net/wireless/ath/wil6210/wmi.h
drivers/net/wireless/b43/Kconfig
drivers/net/wireless/b43/b43.h
drivers/net/wireless/b43/bus.h
drivers/net/wireless/b43/main.c
drivers/net/wireless/b43/phy_common.c
drivers/net/wireless/b43/phy_common.h
drivers/net/wireless/b43/phy_g.c
drivers/net/wireless/b43/phy_n.c
drivers/net/wireless/b43/radio_2056.c
drivers/net/wireless/b43/tables_nphy.c
drivers/net/wireless/b43/tables_nphy.h
drivers/net/wireless/b43/wa.c
drivers/net/wireless/b43/xmit.c
drivers/net/wireless/brcm80211/brcmfmac/chip.c
drivers/net/wireless/brcm80211/brcmfmac/dhd.h
drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
drivers/net/wireless/brcm80211/brcmfmac/nvram.c
drivers/net/wireless/brcm80211/brcmfmac/usb.c
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
drivers/net/wireless/brcm80211/brcmsmac/main.c
drivers/net/wireless/brcm80211/brcmutil/d11.c
drivers/net/wireless/brcm80211/include/brcmu_d11.h
drivers/net/wireless/brcm80211/include/brcmu_wifi.h
drivers/net/wireless/cw1200/sta.c
drivers/net/wireless/cw1200/sta.h
drivers/net/wireless/hostap/hostap_main.c
drivers/net/wireless/iwlegacy/3945.c
drivers/net/wireless/iwlegacy/4965-mac.c
drivers/net/wireless/iwlegacy/common.c
drivers/net/wireless/iwlegacy/common.h
drivers/net/wireless/iwlwifi/Kconfig
drivers/net/wireless/iwlwifi/dvm/Makefile
drivers/net/wireless/iwlwifi/dvm/calib.c
drivers/net/wireless/iwlwifi/dvm/debugfs.c
drivers/net/wireless/iwlwifi/dvm/dev.h
drivers/net/wireless/iwlwifi/dvm/devices.c
drivers/net/wireless/iwlwifi/dvm/led.h
drivers/net/wireless/iwlwifi/dvm/lib.c
drivers/net/wireless/iwlwifi/dvm/mac80211.c
drivers/net/wireless/iwlwifi/dvm/main.c
drivers/net/wireless/iwlwifi/dvm/power.c
drivers/net/wireless/iwlwifi/dvm/rs.c
drivers/net/wireless/iwlwifi/dvm/rx.c
drivers/net/wireless/iwlwifi/dvm/rxon.c
drivers/net/wireless/iwlwifi/dvm/scan.c
drivers/net/wireless/iwlwifi/dvm/sta.c
drivers/net/wireless/iwlwifi/dvm/tt.c
drivers/net/wireless/iwlwifi/dvm/tx.c
drivers/net/wireless/iwlwifi/dvm/ucode.c
drivers/net/wireless/iwlwifi/iwl-1000.c
drivers/net/wireless/iwlwifi/iwl-2000.c
drivers/net/wireless/iwlwifi/iwl-5000.c
drivers/net/wireless/iwlwifi/iwl-6000.c
drivers/net/wireless/iwlwifi/iwl-7000.c
drivers/net/wireless/iwlwifi/iwl-8000.c
drivers/net/wireless/iwlwifi/iwl-agn-hw.h
drivers/net/wireless/iwlwifi/iwl-config.h
drivers/net/wireless/iwlwifi/iwl-debug.c
drivers/net/wireless/iwlwifi/iwl-debug.h
drivers/net/wireless/iwlwifi/iwl-drv.c
drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h [new file with mode: 0644]
drivers/net/wireless/iwlwifi/iwl-fw.h
drivers/net/wireless/iwlwifi/iwl-io.c
drivers/net/wireless/iwlwifi/iwl-io.h
drivers/net/wireless/iwlwifi/iwl-modparams.h
drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/iwlwifi/iwl-op-mode.h
drivers/net/wireless/iwlwifi/iwl-phy-db.c
drivers/net/wireless/iwlwifi/iwl-prph.h
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/mvm/Makefile
drivers/net/wireless/iwlwifi/mvm/coex.c
drivers/net/wireless/iwlwifi/mvm/d3.c
drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
drivers/net/wireless/iwlwifi/mvm/debugfs.c
drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
drivers/net/wireless/iwlwifi/mvm/fw-api.h
drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h [deleted file]
drivers/net/wireless/iwlwifi/mvm/fw.c
drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/nvm.c
drivers/net/wireless/iwlwifi/mvm/ops.c
drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
drivers/net/wireless/iwlwifi/mvm/power.c
drivers/net/wireless/iwlwifi/mvm/quota.c
drivers/net/wireless/iwlwifi/mvm/rs.c
drivers/net/wireless/iwlwifi/mvm/rs.h
drivers/net/wireless/iwlwifi/mvm/rx.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/mvm/sf.c
drivers/net/wireless/iwlwifi/mvm/sta.c
drivers/net/wireless/iwlwifi/mvm/sta.h
drivers/net/wireless/iwlwifi/mvm/time-event.c
drivers/net/wireless/iwlwifi/mvm/tt.c
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/net/wireless/iwlwifi/mvm/utils.c
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/iwlwifi/pcie/internal.h
drivers/net/wireless/iwlwifi/pcie/rx.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/iwlwifi/pcie/tx.c
drivers/net/wireless/libertas/cfg.c
drivers/net/wireless/libertas/defs.h
drivers/net/wireless/libertas/rx.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mwifiex/11ac.c
drivers/net/wireless/mwifiex/11n.c
drivers/net/wireless/mwifiex/11n.h
drivers/net/wireless/mwifiex/11n_aggr.c
drivers/net/wireless/mwifiex/README
drivers/net/wireless/mwifiex/cfg80211.c
drivers/net/wireless/mwifiex/cmdevt.c
drivers/net/wireless/mwifiex/debugfs.c
drivers/net/wireless/mwifiex/decl.h
drivers/net/wireless/mwifiex/fw.h
drivers/net/wireless/mwifiex/ioctl.h
drivers/net/wireless/mwifiex/main.c
drivers/net/wireless/mwifiex/main.h
drivers/net/wireless/mwifiex/pcie.c
drivers/net/wireless/mwifiex/scan.c
drivers/net/wireless/mwifiex/sdio.c
drivers/net/wireless/mwifiex/sdio.h
drivers/net/wireless/mwifiex/sta_cmd.c
drivers/net/wireless/mwifiex/sta_cmdresp.c
drivers/net/wireless/mwifiex/sta_event.c
drivers/net/wireless/mwifiex/sta_rx.c
drivers/net/wireless/mwifiex/sta_tx.c
drivers/net/wireless/mwifiex/tdls.c
drivers/net/wireless/mwifiex/uap_cmd.c
drivers/net/wireless/mwifiex/usb.c
drivers/net/wireless/mwifiex/util.c
drivers/net/wireless/mwifiex/wmm.c
drivers/net/wireless/mwifiex/wmm.h
drivers/net/wireless/orinoco/hw.c
drivers/net/wireless/orinoco/hw.h
drivers/net/wireless/orinoco/orinoco_usb.c
drivers/net/wireless/orinoco/wext.c
drivers/net/wireless/p54/main.c
drivers/net/wireless/ray_cs.c
drivers/net/wireless/rndis_wlan.c
drivers/net/wireless/rsi/rsi_91x_mac80211.c
drivers/net/wireless/rsi/rsi_91x_mgmt.c
drivers/net/wireless/rsi/rsi_mgmt.h
drivers/net/wireless/rt2x00/rt2800lib.c
drivers/net/wireless/rt2x00/rt2x00.h
drivers/net/wireless/rt2x00/rt2x00mac.c
drivers/net/wireless/rt2x00/rt2x00usb.c
drivers/net/wireless/rt2x00/rt61pci.c
drivers/net/wireless/rt2x00/rt73usb.c
drivers/net/wireless/rtl818x/rtl8180/Makefile
drivers/net/wireless/rtl818x/rtl8180/dev.c
drivers/net/wireless/rtl818x/rtl8187/dev.c
drivers/net/wireless/rtl818x/rtl818x.h
drivers/net/wireless/rtlwifi/core.c
drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
drivers/net/wireless/rtlwifi/rtl8188ee/hw.h
drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
drivers/net/wireless/rtlwifi/rtl8192se/hw.c
drivers/net/wireless/rtlwifi/rtl8192se/hw.h
drivers/net/wireless/rtlwifi/rtl8192se/sw.c
drivers/net/wireless/rtlwifi/rtl8192se/trx.c
drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
drivers/net/wireless/rtlwifi/rtl8723ae/hw.h
drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
drivers/net/wireless/rtlwifi/rtl8723be/hw.c
drivers/net/wireless/rtlwifi/rtl8723be/hw.h
drivers/net/wireless/rtlwifi/rtl8723be/sw.c
drivers/net/wireless/rtlwifi/wifi.h
drivers/net/wireless/ti/wl1251/acx.c
drivers/net/wireless/ti/wl1251/cmd.c
drivers/net/wireless/ti/wl1251/event.c
drivers/net/wireless/ti/wl1251/main.c
drivers/net/wireless/ti/wl1251/spi.c
drivers/net/wireless/ti/wlcore/debugfs.h
drivers/net/wireless/ti/wlcore/main.c
drivers/net/wireless/ti/wlcore/sdio.c
drivers/net/wireless/ti/wlcore/spi.c
drivers/net/wireless/ti/wlcore/wlcore_i.h
drivers/net/xen-netback/common.h
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netfront.c
drivers/nfc/Kconfig
drivers/nfc/Makefile
drivers/nfc/pn544/i2c.c
drivers/nfc/st21nfca/Kconfig [new file with mode: 0644]
drivers/nfc/st21nfca/Makefile [new file with mode: 0644]
drivers/nfc/st21nfca/i2c.c [new file with mode: 0644]
drivers/nfc/st21nfca/st21nfca.c [new file with mode: 0644]
drivers/nfc/st21nfca/st21nfca.h [new file with mode: 0644]
drivers/nfc/trf7970a.c
drivers/of/base.c
drivers/of/irq.c
drivers/of/of_mdio.c
drivers/of/platform.c
drivers/of/selftest.c
drivers/of/testcase-data/tests-interrupts.dtsi
drivers/pci/host/pci-mvebu.c
drivers/pci/hotplug/shpchp_ctrl.c
drivers/pci/pci.c
drivers/phy/Kconfig
drivers/phy/Makefile
drivers/phy/phy-core.c
drivers/pinctrl/pinctrl-as3722.c
drivers/pinctrl/pinctrl-single.c
drivers/pinctrl/pinctrl-tb10x.c
drivers/pinctrl/sh-pfc/pfc-r8a7790.c
drivers/pinctrl/sh-pfc/pfc-r8a7791.c
drivers/pnp/pnpacpi/core.c
drivers/pnp/pnpbios/bioscalls.c
drivers/pnp/quirks.c
drivers/power/reset/vexpress-poweroff.c
drivers/ptp/Kconfig
drivers/ptp/ptp_clock.c
drivers/regulator/pbias-regulator.c
drivers/rtc/rtc-hym8563.c
drivers/rtc/rtc-pcf8523.c
drivers/s390/cio/chsc.c
drivers/s390/net/claw.c
drivers/s390/net/ctcm_main.c
drivers/s390/net/ctcm_sysfs.c
drivers/s390/net/lcs.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_core_sys.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/scsi/hpsa.c
drivers/scsi/iscsi_tcp.c
drivers/scsi/mpt2sas/mpt2sas_scsih.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_netlink.c
drivers/scsi/virtio_scsi.c
drivers/sh/Makefile
drivers/sh/pm_runtime.c
drivers/spi/spi-atmel.c
drivers/spi/spi-bfin5xx.c
drivers/spi/spi-pxa2xx-dma.c
drivers/spi/spi-qup.c
drivers/spi/spi-sh-hspi.c
drivers/spi/spi-sirf.c
drivers/spi/spi.c
drivers/staging/comedi/drivers/usbdux.c
drivers/staging/et131x/et131x.c
drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
drivers/staging/iio/adc/mxs-lradc.c
drivers/staging/iio/resolver/ad2s1200.c
drivers/staging/imx-drm/imx-drm-core.c
drivers/staging/imx-drm/imx-tve.c
drivers/staging/media/davinci_vpfe/vpfe_video.c
drivers/staging/media/sn9c102/sn9c102_devtable.h
drivers/staging/netlogic/xlr_net.c
drivers/staging/octeon/ethernet.c
drivers/staging/rtl8723au/os_dep/os_intfs.c
drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
drivers/staging/rtl8821ae/core.c
drivers/staging/wlan-ng/cfg80211.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_core.h
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_tpg.c
drivers/target/target_core_device.c
drivers/target/target_core_transport.c
drivers/target/tcm_fc/tfc_cmd.c
drivers/tty/hvc/hvc_console.c
drivers/tty/n_tty.c
drivers/tty/serial/8250/8250_core.c
drivers/tty/serial/8250/8250_dma.c
drivers/tty/serial/samsung.c
drivers/tty/serial/serial_core.c
drivers/tty/tty_buffer.c
drivers/usb/chipidea/core.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/at91_udc.c
drivers/usb/gadget/f_fs.c
drivers/usb/gadget/f_rndis.c
drivers/usb/gadget/fsl_udc_core.c
drivers/usb/gadget/inode.c
drivers/usb/gadget/rndis.c
drivers/usb/gadget/u_ether.c
drivers/usb/gadget/zero.c
drivers/usb/host/ehci-fsl.c
drivers/usb/host/ohci-hub.c
drivers/usb/host/ohci-pci.c
drivers/usb/host/ohci.h
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/musb/musb_dsps.c
drivers/usb/musb/omap2430.c
drivers/usb/phy/phy-am335x-control.c
drivers/usb/phy/phy-fsm-usb.c
drivers/usb/phy/phy.c
drivers/usb/serial/io_ti.c
drivers/usb/serial/option.c
drivers/usb/serial/qcserial.c
drivers/usb/serial/usb-serial.c
drivers/usb/storage/shuttle_usbat.c
drivers/usb/storage/unusual_devs.h
drivers/usb/wusbcore/mmc.c
drivers/usb/wusbcore/wa-xfer.c
drivers/uwb/drp.c
drivers/xen/events/events_fifo.c
fs/affs/super.c
fs/aio.c
fs/autofs4/root.c
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/file.c
fs/btrfs/inode-map.c
fs/btrfs/ioctl.c
fs/btrfs/send.c
fs/btrfs/super.c
fs/ceph/caps.c
fs/ceph/dir.c
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/ioctl.c
fs/ceph/locks.c
fs/ceph/super.h
fs/cifs/inode.c
fs/compat.c
fs/coredump.c
fs/dcache.c
fs/exec.c
fs/ext4/balloc.c
fs/ext4/ext4.h
fs/ext4/extents.c
fs/ext4/extents_status.c
fs/ext4/file.c
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/page-io.c
fs/ext4/super.c
fs/ext4/xattr.c
fs/fcntl.c
fs/fuse/control.c
fs/fuse/dir.c
fs/fuse/file.c
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/hugetlbfs/inode.c
fs/kernfs/dir.c
fs/kernfs/file.c
fs/locks.c
fs/namei.c
fs/nfsd/nfs4acl.c
fs/nfsd/nfs4callback.c
fs/nfsd/nfs4state.c
fs/nfsd/nfs4xdr.c
fs/notify/fanotify/fanotify_user.c
fs/ocfs2/dlm/dlmmaster.c
fs/open.c
fs/posix_acl.c
fs/sysfs/file.c
fs/sysfs/mount.c
fs/ubifs/super.c
fs/xfs/xfs_attr.c
fs/xfs/xfs_attr_leaf.c
fs/xfs/xfs_attr_list.c
fs/xfs/xfs_attr_remote.c
fs/xfs/xfs_da_btree.h
fs/xfs/xfs_export.c
fs/xfs/xfs_file.c
fs/xfs/xfs_iops.c
fs/xfs/xfs_log.c
fs/xfs/xfs_mount.c
fs/xfs/xfs_qm.c
fs/xfs/xfs_sb.c
fs/xfs/xfs_super.c
include/asm-generic/fixmap.h
include/asm-generic/resource.h
include/asm-generic/word-at-a-time.h
include/drm/drm_pciids.h
include/drm/i915_pciids.h
include/dt-bindings/clk/at91.h [deleted file]
include/dt-bindings/clock/at91.h [new file with mode: 0644]
include/dt-bindings/clock/tegra124-car.h
include/linux/ath9k_platform.h
include/linux/can/core.h
include/linux/can/dev.h
include/linux/can/led.h
include/linux/can/platform/cc770.h
include/linux/can/platform/mcp251x.h
include/linux/can/platform/rcar_can.h [new file with mode: 0644]
include/linux/can/platform/sja1000.h
include/linux/can/platform/ti_hecc.h
include/linux/can/skb.h
include/linux/cgroup.h
include/linux/crc7.h
include/linux/dcache.h
include/linux/filter.h
include/linux/fs.h
include/linux/ftrace.h
include/linux/hugetlb.h
include/linux/if_link.h
include/linux/if_macvlan.h
include/linux/if_vlan.h
include/linux/interrupt.h
include/linux/irq.h
include/linux/kernfs.h
include/linux/libata.h
include/linux/linkage.h
include/linux/mfd/rtsx_common.h
include/linux/mfd/rtsx_pci.h
include/linux/mlx4/device.h
include/linux/mlx4/qp.h
include/linux/mm.h
include/linux/net.h
include/linux/netdevice.h
include/linux/netlink.h
include/linux/nl802154.h
include/linux/of.h
include/linux/of_irq.h
include/linux/of_mdio.h
include/linux/perf_event.h
include/linux/phy.h
include/linux/phy/phy.h
include/linux/phy_fixed.h
include/linux/platform_data/st21nfca.h [new file with mode: 0644]
include/linux/regulator/consumer.h
include/linux/rfkill-gpio.h
include/linux/rtnetlink.h
include/linux/sched.h
include/linux/serio.h
include/linux/skbuff.h
include/linux/slub_def.h
include/linux/sock_diag.h
include/linux/spi/at86rf230.h
include/linux/ssb/ssb.h
include/linux/tcp.h
include/linux/udp.h
include/linux/usb/cdc_ncm.h
include/net/addrconf.h
include/net/af_ieee802154.h
include/net/af_vsock.h
include/net/bluetooth/hci.h
include/net/bluetooth/hci_core.h
include/net/cfg80211.h
include/net/checksum.h
include/net/dsa.h
include/net/ieee802154.h
include/net/ieee802154_netdev.h
include/net/inet_ecn.h
include/net/inet_hashtables.h
include/net/inet_sock.h
include/net/ip.h
include/net/ip6_checksum.h
include/net/ip6_route.h
include/net/ipv6.h
include/net/mac80211.h
include/net/net_namespace.h
include/net/netfilter/nf_tables.h
include/net/netfilter/nft_meta.h [new file with mode: 0644]
include/net/netns/ipv4.h
include/net/netns/ipv6.h
include/net/nfc/digital.h
include/net/nfc/hci.h
include/net/nfc/nfc.h
include/net/pkt_cls.h
include/net/protocol.h
include/net/regulatory.h
include/net/sch_generic.h
include/net/snmp.h
include/net/sock.h
include/net/tcp.h
include/net/tso.h [new file with mode: 0644]
include/net/udp.h
include/net/vxlan.h
include/net/xfrm.h
include/trace/events/ext4.h
include/trace/events/module.h
include/uapi/asm-generic/fcntl.h
include/uapi/asm-generic/resource.h
include/uapi/asm-generic/unistd.h
include/uapi/linux/audit.h
include/uapi/linux/can.h
include/uapi/linux/can/bcm.h
include/uapi/linux/can/error.h
include/uapi/linux/can/gw.h
include/uapi/linux/can/netlink.h
include/uapi/linux/can/raw.h
include/uapi/linux/fuse.h
include/uapi/linux/if_fddi.h
include/uapi/linux/if_link.h
include/uapi/linux/input.h
include/uapi/linux/l2tp.h
include/uapi/linux/netfilter/nf_tables.h
include/uapi/linux/nfc.h
include/uapi/linux/nl80211.h
include/uapi/linux/openvswitch.h
include/uapi/linux/tipc.h
include/uapi/linux/tipc_config.h
include/uapi/linux/udp.h
init/main.c
kernel/audit.c
kernel/cgroup.c
kernel/cgroup_freezer.c
kernel/context_tracking.c
kernel/events/core.c
kernel/hrtimer.c
kernel/irq/irqdesc.c
kernel/irq/manage.c
kernel/locking/lockdep.c
kernel/module.c
kernel/power/snapshot.c
kernel/power/suspend.c
kernel/printk/printk.c
kernel/sched/core.c
kernel/sched/cpudeadline.c
kernel/sched/cpupri.c
kernel/sched/cputime.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/seccomp.c
kernel/softirq.c
kernel/sysctl.c
kernel/timer.c
kernel/trace/ftrace.c
kernel/trace/trace_events_trigger.c
kernel/tracepoint.c
kernel/workqueue.c
lib/Kconfig.debug
lib/Makefile
lib/crc7.c
lib/dump_stack.c
lib/test_bpf.c [new file with mode: 0644]
mm/Kconfig
mm/compaction.c
mm/filemap.c
mm/hugetlb.c
mm/kmemleak.c
mm/madvise.c
mm/memcontrol.c
mm/memory-failure.c
mm/memory.c
mm/mremap.c
mm/page-writeback.c
mm/percpu.c
mm/slab.c
mm/slab.h
mm/slab_common.c
mm/slub.c
mm/truncate.c
mm/util.c
mm/vmacache.c
mm/vmscan.c
net/8021q/vlan.c
net/8021q/vlan_core.c
net/8021q/vlan_dev.c
net/appletalk/ddp.c
net/atm/svc.c
net/batman-adv/bat_iv_ogm.c
net/batman-adv/debugfs.c
net/batman-adv/distributed-arp-table.c
net/batman-adv/fragmentation.c
net/batman-adv/gateway_client.c
net/batman-adv/hard-interface.c
net/batman-adv/main.h
net/batman-adv/network-coding.c
net/batman-adv/originator.c
net/batman-adv/soft-interface.c
net/batman-adv/sysfs.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/bluetooth/hci_sock.c
net/bluetooth/l2cap_core.c
net/bluetooth/lib.c
net/bluetooth/mgmt.c
net/bridge/Makefile
net/bridge/br.c
net/bridge/br_device.c
net/bridge/br_fdb.c
net/bridge/br_if.c
net/bridge/br_netfilter.c
net/bridge/br_netlink.c
net/bridge/br_notify.c [deleted file]
net/bridge/br_private.h
net/bridge/br_sysfs_if.c
net/bridge/br_vlan.c
net/bridge/netfilter/Kconfig
net/bridge/netfilter/Makefile
net/bridge/netfilter/nft_meta_bridge.c [new file with mode: 0644]
net/can/af_can.c
net/can/af_can.h
net/can/gw.c
net/can/proc.c
net/ceph/messenger.c
net/ceph/osdmap.c
net/core/Makefile
net/core/dev.c
net/core/ethtool.c
net/core/filter.c
net/core/neighbour.c
net/core/net_namespace.c
net/core/pktgen.c
net/core/ptp_classifier.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock.c
net/core/sock_diag.c
net/core/tso.c [new file with mode: 0644]
net/core/utils.c
net/dcb/dcbnl.c
net/dccp/ipv4.c
net/dccp/proto.c
net/dccp/sysctl.c
net/dccp/timer.c
net/decnet/af_decnet.c
net/decnet/dn_dev.c
net/decnet/dn_fib.c
net/decnet/netfilter/dn_rtmsg.c
net/dsa/dsa.c
net/dsa/slave.c
net/ieee802154/6lowpan_rtnl.c
net/ieee802154/dgram.c
net/ieee802154/header_ops.c
net/ieee802154/ieee802154.h
net/ieee802154/netlink.c
net/ieee802154/nl-mac.c
net/ieee802154/nl_policy.c
net/ipv4/af_inet.c
net/ipv4/devinet.c
net/ipv4/fib_semantics.c
net/ipv4/gre_demux.c
net/ipv4/icmp.c
net/ipv4/igmp.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_hashtables.c
net/ipv4/inetpeer.c
net/ipv4/ip_forward.c
net/ipv4/ip_fragment.c
net/ipv4/ip_gre.c
net/ipv4/ip_options.c
net/ipv4/ip_output.c
net/ipv4/ip_tunnel.c
net/ipv4/ip_vti.c
net/ipv4/ipip.c
net/ipv4/ipmr.c
net/ipv4/netfilter/nf_defrag_ipv4.c
net/ipv4/ping.c
net/ipv4/proc.c
net/ipv4/route.c
net/ipv4/syncookies.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp_bic.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_cubic.c
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_highspeed.c
net/ipv4/tcp_htcp.c
net/ipv4/tcp_hybla.c
net/ipv4/tcp_illinois.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_lp.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv4/tcp_scalable.c
net/ipv4/tcp_vegas.c
net/ipv4/tcp_veno.c
net/ipv4/tcp_yeah.c
net/ipv4/udp.c
net/ipv4/udplite.c
net/ipv4/xfrm4_output.c
net/ipv4/xfrm4_protocol.c
net/ipv6/addrconf.c
net/ipv6/addrconf_core.c
net/ipv6/af_inet6.c
net/ipv6/icmp.c
net/ipv6/inet6_connection_sock.c
net/ipv6/ip6_checksum.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_flowlabel.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/ip6mr.c
net/ipv6/ndisc.c
net/ipv6/netfilter.c
net/ipv6/netfilter/ip6t_rpfilter.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/ping.c
net/ipv6/proc.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/syncookies.c
net/ipv6/sysctl_net_ipv6.c
net/ipv6/tcp_ipv6.c
net/ipv6/tcpv6_offload.c
net/ipv6/udp.c
net/ipv6/udplite.c
net/ipv6/xfrm6_output.c
net/ipv6/xfrm6_protocol.c
net/ipx/af_ipx.c
net/ipx/ipx_route.c
net/iucv/af_iucv.c
net/key/af_key.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_core.h
net/l2tp/l2tp_ip.c
net/l2tp/l2tp_ip6.c
net/l2tp/l2tp_netlink.c
net/mac80211/Makefile
net/mac80211/aes_ccm.c
net/mac80211/cfg.c
net/mac80211/chan.c
net/mac80211/debugfs.c
net/mac80211/debugfs.h
net/mac80211/debugfs_netdev.h
net/mac80211/driver-ops.h
net/mac80211/ht.c
net/mac80211/ibss.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/key.c
net/mac80211/main.c
net/mac80211/mesh.c
net/mac80211/mesh_hwmp.c
net/mac80211/mesh_pathtbl.c
net/mac80211/mesh_sync.c
net/mac80211/michael.h
net/mac80211/mlme.c
net/mac80211/offchannel.c
net/mac80211/rc80211_minstrel.c
net/mac80211/rc80211_minstrel_ht.c
net/mac80211/rx.c
net/mac80211/scan.c
net/mac80211/sta_info.c
net/mac80211/status.c
net/mac80211/tdls.c [new file with mode: 0644]
net/mac80211/trace.h
net/mac80211/tx.c
net/mac80211/util.c
net/mac80211/vht.c
net/mac80211/wpa.c
net/mac802154/Kconfig
net/mac802154/Makefile
net/mac802154/llsec.c [new file with mode: 0644]
net/mac802154/llsec.h [new file with mode: 0644]
net/mac802154/mac802154.h
net/mac802154/mac_cmd.c
net/mac802154/mib.c
net/mac802154/rx.c
net/mac802154/wpan.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_tables_api.c
net/netfilter/nf_tables_core.c
net/netfilter/nfnetlink.c
net/netfilter/nft_ct.c
net/netfilter/nft_hash.c
net/netfilter/nft_lookup.c
net/netfilter/nft_meta.c
net/netfilter/nft_rbtree.c
net/netfilter/xt_bpf.c
net/netlink/af_netlink.c
net/netlink/genetlink.c
net/nfc/digital.h
net/nfc/digital_core.c
net/nfc/digital_technology.c
net/nfc/hci/command.c
net/nfc/hci/core.c
net/nfc/llcp_commands.c
net/nfc/llcp_core.c
net/nfc/nci/core.c
net/nfc/nci/ntf.c
net/nfc/nfc.h
net/nfc/rawsock.c
net/openvswitch/actions.c
net/openvswitch/datapath.c
net/openvswitch/datapath.h
net/openvswitch/flow.c
net/openvswitch/flow.h
net/openvswitch/flow_netlink.c
net/openvswitch/flow_netlink.h
net/openvswitch/flow_table.c
net/openvswitch/flow_table.h
net/openvswitch/vport-gre.c
net/openvswitch/vport-internal_dev.c
net/openvswitch/vport-vxlan.c
net/openvswitch/vport.h
net/packet/diag.c
net/phonet/pn_netlink.c
net/rds/ib_send.c
net/rds/iw_send.c
net/rds/iw_sysctl.c
net/rds/rdma_transport.c
net/rds/sysctl.c
net/rds/tcp_listen.c
net/rfkill/rfkill-gpio.c
net/rxrpc/ar-key.c
net/sched/act_api.c
net/sched/cls_api.c
net/sched/cls_basic.c
net/sched/cls_bpf.c
net/sched/cls_cgroup.c
net/sched/cls_flow.c
net/sched/cls_fw.c
net/sched/cls_route.c
net/sched/cls_rsvp.h
net/sched/cls_tcindex.c
net/sched/cls_u32.c
net/sched/sch_api.c
net/sched/sch_hhf.c
net/sctp/ipv6.c
net/sctp/output.c
net/sctp/proc.c
net/sctp/protocol.c
net/sctp/sm_sideeffect.c
net/sctp/socket.c
net/sctp/sysctl.c
net/sunrpc/xprtsock.c
net/tipc/Makefile
net/tipc/bcast.c
net/tipc/bcast.h
net/tipc/bearer.c
net/tipc/bearer.h
net/tipc/config.c
net/tipc/core.c
net/tipc/core.h
net/tipc/discover.c
net/tipc/eth_media.c
net/tipc/handler.c [deleted file]
net/tipc/ib_media.c
net/tipc/link.c
net/tipc/link.h
net/tipc/msg.c
net/tipc/msg.h
net/tipc/name_distr.c
net/tipc/name_distr.h
net/tipc/name_table.c
net/tipc/net.c
net/tipc/net.h
net/tipc/netlink.c
net/tipc/node.c
net/tipc/node.h
net/tipc/node_subscr.c
net/tipc/node_subscr.h
net/tipc/port.c
net/tipc/port.h
net/tipc/socket.c
net/tipc/socket.h
net/unix/af_unix.c
net/vmw_vsock/af_vsock.c
net/wireless/Kconfig
net/wireless/ap.c
net/wireless/chan.c
net/wireless/core.c
net/wireless/core.h
net/wireless/ethtool.c
net/wireless/ibss.c
net/wireless/mesh.c
net/wireless/mlme.c
net/wireless/nl80211.c
net/wireless/rdev-ops.h
net/wireless/reg.c
net/wireless/reg.h
net/wireless/scan.c
net/wireless/sme.c
net/wireless/trace.h
net/wireless/util.c
net/wireless/wext-compat.c
net/wireless/wext-compat.h
net/wireless/wext-sme.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_proc.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
scripts/checksyscalls.sh
scripts/sortextable.c
security/apparmor/include/apparmor.h
security/apparmor/lib.c
security/device_cgroup.c
security/selinux/hooks.c
sound/isa/sb/sb_mixer.c
sound/pci/hda/hda_controller.c
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_priv.h
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/alc5623.c
sound/soc/codecs/cs42l52.c
sound/soc/codecs/cs42l73.c
sound/soc/codecs/tlv320aic31xx.c
sound/soc/codecs/tlv320aic3x.c
sound/soc/codecs/wm8962.c
sound/soc/codecs/wm8962.h
sound/soc/fsl/fsl_esai.c
sound/soc/fsl/fsl_spdif.h
sound/soc/fsl/imx-audmux.c
sound/soc/intel/sst-acpi.c
sound/soc/intel/sst-baytrail-dsp.c
sound/soc/intel/sst-baytrail-ipc.c
sound/soc/intel/sst-dsp-priv.h
sound/soc/intel/sst-dsp.c
sound/soc/intel/sst-dsp.h
sound/soc/intel/sst-firmware.c
sound/soc/intel/sst-haswell-dsp.c
sound/soc/intel/sst-haswell-ipc.c
sound/soc/intel/sst-haswell-ipc.h
sound/soc/intel/sst-haswell-pcm.c
sound/soc/jz4740/Makefile
sound/soc/sh/rcar/core.c
sound/soc/sh/rcar/src.c
sound/soc/sh/rcar/ssi.c
sound/soc/soc-dapm.c
sound/soc/soc-pcm.c
sound/usb/card.c
sound/usb/card.h
sound/usb/endpoint.c
sound/usb/pcm.c
sound/usb/usbaudio.h
tools/Makefile
tools/lib/api/fs/debugfs.c
tools/lib/lockdep/Makefile
tools/lib/traceevent/event-parse.c
tools/lib/traceevent/event-parse.h
tools/net/bpf_dbg.c
tools/net/bpf_jit_disasm.c
tools/perf/Makefile.perf
tools/perf/arch/x86/tests/dwarf-unwind.c
tools/perf/arch/x86/tests/regs_load.S
tools/perf/builtin-kvm.c
tools/perf/builtin-record.c
tools/perf/config/Makefile
tools/perf/tests/make
tools/perf/util/data.c
tools/perf/util/machine.c
tools/perf/util/symbol-elf.c
tools/power/acpi/Makefile
tools/testing/selftests/net/Makefile
virt/kvm/arm/vgic.c
virt/kvm/assigned-dev.c
virt/kvm/async_pf.c

index a3c5a6685036103e7ec677272acdc4620d0302f6..ab8d76dfaa8096bbdf42bacf31336b856c07959b 100644 (file)
@@ -117,7 +117,7 @@ Description:
 
 What:          /sys/bus/pci/devices/.../vpd
 Date:          February 2008
-Contact:       Ben Hutchings <bhutchings@solarflare.com>
+Contact:       Ben Hutchings <bwh@kernel.org>
 Description:
                A file named vpd in a device directory will be a
                binary file containing the Vital Product Data for the
index d922060e455d5647e06c4f1182306d42c514a604..416c5d59f52eaf02081b88901cb0f165713e7e6a 100644 (file)
@@ -169,6 +169,14 @@ Description:
                "unknown", "notpresent", "down", "lowerlayerdown", "testing",
                "dormant", "up".
 
+What:          /sys/class/net/<iface>/phys_port_id
+Date:          July 2013
+KernelVersion: 3.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the interface unique physical port identifier within
+               the NIC, as a string.
+
 What:          /sys/class/net/<iface>/speed
 Date:          October 2009
 KernelVersion: 2.6.33
diff --git a/Documentation/ABI/testing/sysfs-class-net-queues b/Documentation/ABI/testing/sysfs-class-net-queues
new file mode 100644 (file)
index 0000000..5e9aeb9
--- /dev/null
@@ -0,0 +1,79 @@
+What:          /sys/class/<iface>/queues/rx-<queue>/rps_cpus
+Date:          March 2010
+KernelVersion: 2.6.35
+Contact:       netdev@vger.kernel.org
+Description:
+               Mask of the CPU(s) currently enabled to participate into the
+               Receive Packet Steering packet processing flow for this
+               network device queue. Possible values depend on the number
+               of available CPU(s) in the system.
+
+What:          /sys/class/<iface>/queues/rx-<queue>/rps_flow_cnt
+Date:          April 2010
+KernelVersion: 2.6.35
+Contact:       netdev@vger.kernel.org
+Description:
+               Number of Receive Packet Steering flows being currently
+               processed by this particular network device receive queue.
+
+What:          /sys/class/<iface>/queues/tx-<queue>/tx_timeout
+Date:          November 2011
+KernelVersion: 3.3
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the number of transmit timeout events seen by this
+               network interface transmit queue.
+
+What:          /sys/class/<iface>/queues/tx-<queue>/xps_cpus
+Date:          November 2010
+KernelVersion: 2.6.38
+Contact:       netdev@vger.kernel.org
+Description:
+               Mask of the CPU(s) currently enabled to participate into the
+               Transmit Packet Steering packet processing flow for this
+               network device transmit queue. Possible vaules depend on the
+               number of available CPU(s) in the system.
+
+What:          /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/hold_time
+Date:          November 2011
+KernelVersion: 3.3
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the hold time in milliseconds to measure the slack
+               of this particular network device transmit queue.
+               Default value is 1000.
+
+What:          /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/inflight
+Date:          November 2011
+KernelVersion: 3.3
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the number of bytes (objects) in flight on this
+               network device transmit queue.
+
+What:          /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit
+Date:          November 2011
+KernelVersion: 3.3
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the current limit of bytes allowed to be queued
+               on this network device transmit queue. This value is clamped
+               to be within the bounds defined by limit_max and limit_min.
+
+What:          /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit_max
+Date:          November 2011
+KernelVersion: 3.3
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the absolute maximum limit of bytes allowed to be
+               queued on this network device transmit queue. See
+               include/linux/dynamic_queue_limits.h for the default value.
+
+What:          /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit_min
+Date:          November 2011
+KernelVersion: 3.3
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the absolute minimum limit of bytes allowed to be
+               queued on this network device transmit queue. Default value is
+               0.
diff --git a/Documentation/ABI/testing/sysfs-class-net-statistics b/Documentation/ABI/testing/sysfs-class-net-statistics
new file mode 100644 (file)
index 0000000..397118d
--- /dev/null
@@ -0,0 +1,201 @@
+What:          /sys/class/<iface>/statistics/collisions
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the number of collisions seen by this network device.
+               This value might not be relevant with all MAC layers.
+
+What:          /sys/class/<iface>/statistics/multicast
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the number of multicast packets received by this
+               network device.
+
+What:          /sys/class/<iface>/statistics/rx_bytes
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the number of bytes received by this network device.
+               See the network driver for the exact meaning of when this
+               value is incremented.
+
+What:          /sys/class/<iface>/statistics/rx_compressed
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the number of compressed packets received by this
+               network device. This value might only be relevant for interfaces
+               that support packet compression (e.g: PPP).
+
+What:          /sys/class/<iface>/statistics/rx_crc_errors
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the number of packets received with a CRC (FCS) error
+               by this network device. Note that the specific meaning might
+               depend on the MAC layer used by the interface.
+
+What:          /sys/class/<iface>/statistics/rx_dropped
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the number of packets received by the network device
+               but dropped, that are not forwarded to the upper layers for
+               packet processing. See the network driver for the exact
+               meaning of this value.
+
+What:          /sys/class/<iface>/statistics/rx_fifo_errors
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the number of receive FIFO errors seen by this
+               network device. See the network driver for the exact
+               meaning of this value.
+
+What:          /sys/class/<iface>/statistics/rx_frame_errors
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the number of received frames with error, such as
+               alignment errors. Note that the specific meaning depends on
+               on the MAC layer protocol used. See the network driver for
+               the exact meaning of this value.
+
+What:          /sys/class/<iface>/statistics/rx_length_errors
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the number of received error packet with a length
+               error, oversized or undersized. See the network driver for the
+               exact meaning of this value.
+
+What:          /sys/class/<iface>/statistics/rx_missed_errors
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the number of received packets that have been missed
+               due to lack of capacity in the receive side. See the network
+               driver for the exact meaning of this value.
+
+What:          /sys/class/<iface>/statistics/rx_over_errors
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the number of received packets that are oversized
+               compared to what the network device is configured to accept
+               (e.g: larger than MTU). See the network driver for the exact
+               meaning of this value.
+
+What:          /sys/class/<iface>/statistics/rx_packets
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the total number of good packets received by this
+               network device.
+
+What:          /sys/class/<iface>/statistics/tx_aborted_errors
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the number of packets that have been aborted
+               during transmission by a network device (e.g: because of
+               a medium collision). See the network driver for the exact
+               meaning of this value.
+
+What:          /sys/class/<iface>/statistics/tx_bytes
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the number of bytes transmitted by a network
+               device. See the network driver for the exact meaning of this
+               value, in particular whether this accounts for all successfully
+               transmitted packets or all packets that have been queued for
+               transmission.
+
+What:          /sys/class/<iface>/statistics/tx_carrier_errors
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the number of packets that could not be transmitted
+               because of carrier errors (e.g: physical link down). See the
+               network driver for the exact meaning of this value.
+
+What:          /sys/class/<iface>/statistics/tx_compressed
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the number of transmitted compressed packets. Note
+               this might only be relevant for devices that support
+               compression (e.g: PPP).
+
+What:          /sys/class/<iface>/statistics/tx_dropped
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the number of packets dropped during transmission.
+               See the driver for the exact reasons as to why the packets were
+               dropped.
+
+What:          /sys/class/<iface>/statistics/tx_errors
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the number of packets in error during transmission by
+               a network device. See the driver for the exact reasons as to
+               why the packets were dropped.
+
+What:          /sys/class/<iface>/statistics/tx_fifo_errors
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the number of packets having caused a transmit
+               FIFO error. See the driver for the exact reasons as to why the
+               packets were dropped.
+
+What:          /sys/class/<iface>/statistics/tx_heartbeat_errors
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the number of packets transmitted that have been
+               reported as heartbeat errors. See the driver for the exact
+               reasons as to why the packets were dropped.
+
+What:          /sys/class/<iface>/statistics/tx_packets
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the number of packets transmitted by a network
+               device. See the driver for whether this reports the number of all
+               attempted or successful transmissions.
+
+What:          /sys/class/<iface>/statistics/tx_window_errors
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the number of packets not successfully transmitted
+               due to a window collision. The specific meaning depends on the
+               MAC layer used.  On Ethernet this is usually used to report
+               late collisions errors.
index 044b76436e8373ae601f9c60bd274754f3df6033..d9b9416c989fd81f0a9a338b59fc9093d96479a4 100644 (file)
 !Finclude/net/cfg80211.h wdev_priv
 !Finclude/net/cfg80211.h ieee80211_iface_limit
 !Finclude/net/cfg80211.h ieee80211_iface_combination
+!Finclude/net/cfg80211.h cfg80211_check_combinations
       </chapter>
       <chapter>
       <title>Actions and configuration</title>
index f9fd615427fbd4c0a718d66ed1b7de006ab796e6..1d27f0a1abd1e1872b0e05693ab35d6ecd64b0f2 100644 (file)
@@ -195,7 +195,7 @@ DVB_DOCUMENTED = \
 #
 
 install_media_images = \
-       $(Q)cp $(OBJIMGFILES) $(MEDIA_SRC_DIR)/v4l/*.svg $(MEDIA_OBJ_DIR)/media_api
+       $(Q)-cp $(OBJIMGFILES) $(MEDIA_SRC_DIR)/v4l/*.svg $(MEDIA_OBJ_DIR)/media_api
 
 $(MEDIA_OBJ_DIR)/%: $(MEDIA_SRC_DIR)/%.b64
        $(Q)base64 -d $< >$@
index 06fc7602593a9d38a4cec97538948b03b5ce2676..37b2cafa4e52703b516d8c163653ae58106ad990 100644 (file)
@@ -19,6 +19,9 @@ to deliver its interrupts via SPIs.
 
 - clock-frequency : The frequency of the main counter, in Hz. Optional.
 
+- always-on : a boolean property. If present, the timer is powered through an
+  always-on power domain, therefore it never loses context.
+
 Example:
 
        timer {
index 7bcfbf59810e5a5c105740c7dca63844d7791ef2..a668f0e7d0018b76841127db20845a7fd45affd6 100644 (file)
@@ -24,6 +24,7 @@ Required properties:
   * "sata-phy" for the SATA 6.0Gbps PHY
 
 Optional properties:
+- dma-coherent         : Present if dma operations are coherent
 - status               : Shall be "ok" if enabled or "disabled" if disabled.
                          Default is "ok".
 
@@ -55,6 +56,7 @@ Example:
                              <0x0 0x1f22e000 0x0 0x1000>,
                              <0x0 0x1f227000 0x0 0x1000>;
                        interrupts = <0x0 0x87 0x4>;
+                       dma-coherent;
                        status = "ok";
                        clocks = <&sataclk 0>;
                        phys = <&phy2 0>;
@@ -69,6 +71,7 @@ Example:
                              <0x0 0x1f23e000 0x0 0x1000>,
                              <0x0 0x1f237000 0x0 0x1000>;
                        interrupts = <0x0 0x88 0x4>;
+                       dma-coherent;
                        status = "ok";
                        clocks = <&sataclk 0>;
                        phys = <&phy3 0>;
index cd5e23912888cf4350750fe0adc908fb895e3d08..6794cdc96d8fdf44513a4ad2310fe67a2a93712d 100644 (file)
@@ -62,7 +62,7 @@ Required properties for PMC node:
 - interrupt-controller : tell that the PMC is an interrupt controller.
 - #interrupt-cells : must be set to 1. The first cell encodes the interrupt id,
        and reflect the bit position in the PMC_ER/DR/SR registers.
-       You can use the dt macros defined in dt-bindings/clk/at91.h.
+       You can use the dt macros defined in dt-bindings/clock/at91.h.
        0 (AT91_PMC_MOSCS) -> main oscillator ready
        1 (AT91_PMC_LOCKA) -> PLL A ready
        2 (AT91_PMC_LOCKB) -> PLL B ready
index 5992dceec7af7d1e9d1ac8f329b831d48e409d87..02a25d99ca61bfb33a46a2ce8e547b7f22d3c73a 100644 (file)
@@ -43,7 +43,7 @@ Example
                clock-output-names =
                        "tpu0", "mmcif1", "sdhi3", "sdhi2",
                         "sdhi1", "sdhi0", "mmcif0";
-               renesas,clock-indices = <
+               clock-indices = <
                        R8A7790_CLK_TPU0 R8A7790_CLK_MMCIF1 R8A7790_CLK_SDHI3
                        R8A7790_CLK_SDHI2 R8A7790_CLK_SDHI1 R8A7790_CLK_SDHI0
                        R8A7790_CLK_MMCIF0
index 9fbbdb783a72d50600755a074cbc11cfba97f713..68ff2137bae7261e84409a472fbf432989fca0d6 100644 (file)
@@ -29,6 +29,6 @@ edma: edma@49000000 {
        dma-channels = <64>;
        ti,edma-regions = <4>;
        ti,edma-slots = <256>;
-       ti,edma-xbar-event-map = <1 12
-                                 2 13>;
+       ti,edma-xbar-event-map = /bits/ 16 <1 12
+                                           2 13>;
 };
index 7fbb027218a126002312a829c6cd273ac715b030..a1d71eb43b209485ac7bec4832119f25fe68a49e 100644 (file)
@@ -4,11 +4,15 @@ Required properties:
 - compatible: Should be "snps,arc-emac"
 - reg: Address and length of the register set for the device
 - interrupts: Should contain the EMAC interrupts
-- clock-frequency: CPU frequency. It is needed to calculate and set polling
-period of EMAC.
 - max-speed: see ethernet.txt file in the same directory.
 - phy: see ethernet.txt file in the same directory.
 
+Clock handling:
+The clock frequency is needed to calculate and set polling period of EMAC.
+It must be provided by one of:
+- clock-frequency: CPU frequency.
+- clocks: reference to the clock supplying the EMAC.
+
 Child nodes of the driver are the individual PHY devices connected to the
 MDIO bus. They must have a "reg" property given the PHY address on the MDIO bus.
 
@@ -19,7 +23,11 @@ Examples:
                reg = <0xc0fc2000 0x3c>;
                interrupts = <6>;
                mac-address = [ 00 11 22 33 44 55 ];
+
                clock-frequency = <80000000>;
+               /* or */
+               clocks = <&emac_clock>;
+
                max-speed = <100>;
                phy = <&phy0>;
 
index f2febb94550e8868b32497001f3a6514feddddfb..451fef26b4dfaf05b6782099e54816d52a52baa8 100644 (file)
@@ -24,7 +24,7 @@ Optional properties:
 - fixed-link: When the GENET interface is connected to a MoCA hardware block or
   when operating in a RGMII to RGMII type of connection, or when the MDIO bus is
   voluntarily disabled, this property should be used to describe the "fixed link".
-  See Documentation/devicetree/bindings/net/fsl-tsec-phy.txt for information on
+  See Documentation/devicetree/bindings/net/fixed-link.txt for information on
   the property specifics
 
 Required child nodes:
diff --git a/Documentation/devicetree/bindings/net/broadcom-systemport.txt b/Documentation/devicetree/bindings/net/broadcom-systemport.txt
new file mode 100644 (file)
index 0000000..c183ea9
--- /dev/null
@@ -0,0 +1,29 @@
+* Broadcom BCM7xxx Ethernet Systemport Controller (SYSTEMPORT)
+
+Required properties:
+- compatible: should be one of "brcm,systemport-v1.00" or "brcm,systemport"
+- reg: address and length of the register set for the device.
+- interrupts: interrupts for the device, first cell must be for the the rx
+  interrupts, and the second cell should be for the transmit queues
+- local-mac-address: Ethernet MAC address (48 bits) of this adapter
+- phy-mode: Should be a string describing the PHY interface to the
+  Ethernet switch/PHY, see Documentation/devicetree/bindings/net/ethernet.txt
+- fixed-link: see Documentation/devicetree/bindings/net/fixed-link.txt for
+  the property specific details
+
+Optional properties:
+- systemport,num-tier2-arb: number of tier 2 arbiters, an integer
+- systemport,num-tier1-arb: number of tier 1 arbiters, an integer
+- systemport,num-txq: number of HW transmit queues, an integer
+- systemport,num-rxq: number of HW receive queues, an integer
+
+Example:
+ethernet@f04a0000 {
+       compatible = "brcm,systemport-v1.00";
+       reg = <0xf04a0000 0x4650>;
+       local-mac-address = [ 00 11 22 33 44 55 ];
+       fixed-link = <0 1 1000 0 0>;
+       phy-mode = "gmii";
+       interrupts = <0x0 0x16 0x0>,
+               <0x0 0x17 0x0>;
+};
diff --git a/Documentation/devicetree/bindings/net/can/xilinx_can.txt b/Documentation/devicetree/bindings/net/can/xilinx_can.txt
new file mode 100644 (file)
index 0000000..fe38847
--- /dev/null
@@ -0,0 +1,44 @@
+Xilinx Axi CAN/Zynq CANPS controller Device Tree Bindings
+---------------------------------------------------------
+
+Required properties:
+- compatible           : Should be "xlnx,zynq-can-1.0" for Zynq CAN
+                         controllers and "xlnx,axi-can-1.00.a" for Axi CAN
+                         controllers.
+- reg                  : Physical base address and size of the Axi CAN/Zynq
+                         CANPS registers map.
+- interrupts           : Property with a value describing the interrupt
+                         number.
+- interrupt-parent     : Must be core interrupt controller
+- clock-names          : List of input clock names - "can_clk", "pclk"
+                         (For CANPS), "can_clk" , "s_axi_aclk"(For AXI CAN)
+                         (See clock bindings for details).
+- clocks               : Clock phandles (see clock bindings for details).
+- tx-fifo-depth                : Can Tx fifo depth.
+- rx-fifo-depth                : Can Rx fifo depth.
+
+
+Example:
+
+For Zynq CANPS Dts file:
+       zynq_can_0: can@e0008000 {
+                       compatible = "xlnx,zynq-can-1.0";
+                       clocks = <&clkc 19>, <&clkc 36>;
+                       clock-names = "can_clk", "pclk";
+                       reg = <0xe0008000 0x1000>;
+                       interrupts = <0 28 4>;
+                       interrupt-parent = <&intc>;
+                       tx-fifo-depth = <0x40>;
+                       rx-fifo-depth = <0x40>;
+               };
+For Axi CAN Dts file:
+       axi_can_0: axi-can@40000000 {
+                       compatible = "xlnx,axi-can-1.00.a";
+                       clocks = <&clkc 0>, <&clkc 1>;
+                       clock-names = "can_clk","s_axi_aclk" ;
+                       reg = <0x40000000 0x10000>;
+                       interrupt-parent = <&intc>;
+                       interrupts = <0 59 1>;
+                       tx-fifo-depth = <0x40>;
+                       rx-fifo-depth = <0x40>;
+               };
index 7ff57a119f81f449cfc2afb5ef988183071a8aa8..764c0c79b43d391435b847614c8d6c7aa4f06653 100644 (file)
@@ -2,7 +2,9 @@ TI CPSW Phy mode Selection Device Tree Bindings
 -----------------------------------------------
 
 Required properties:
-- compatible           : Should be "ti,am3352-cpsw-phy-sel"
+- compatible           : Should be "ti,am3352-cpsw-phy-sel" for am335x platform and
+                         "ti,dra7xx-cpsw-phy-sel" for dra7xx platform
+                         "ti,am43xx-cpsw-phy-sel" for am43xx platform
 - reg                  : physical base address and size of the cpsw
                          registers map
 - reg-names            : names of the register map given in "reg" node
diff --git a/Documentation/devicetree/bindings/net/fixed-link.txt b/Documentation/devicetree/bindings/net/fixed-link.txt
new file mode 100644 (file)
index 0000000..82bf7e0
--- /dev/null
@@ -0,0 +1,42 @@
+Fixed link Device Tree binding
+------------------------------
+
+Some Ethernet MACs have a "fixed link", and are not connected to a
+normal MDIO-managed PHY device. For those situations, a Device Tree
+binding allows to describe a "fixed link".
+
+Such a fixed link situation is described by creating a 'fixed-link'
+sub-node of the Ethernet MAC device node, with the following
+properties:
+
+* 'speed' (integer, mandatory), to indicate the link speed. Accepted
+  values are 10, 100 and 1000
+* 'full-duplex' (boolean, optional), to indicate that full duplex is
+  used. When absent, half duplex is assumed.
+* 'pause' (boolean, optional), to indicate that pause should be
+  enabled.
+* 'asym-pause' (boolean, optional), to indicate that asym_pause should
+  be enabled.
+
+Old, deprecated 'fixed-link' binding:
+
+* A 'fixed-link' property in the Ethernet MAC node, with 5 cells, of the
+  form <a b c d e> with the following accepted values:
+  - a: emulated PHY ID, choose any but but unique to the all specified
+    fixed-links, from 0 to 31
+  - b: duplex configuration: 0 for half duplex, 1 for full duplex
+  - c: link speed in Mbits/sec, accepted values are: 10, 100 and 1000
+  - d: pause configuration: 0 for no pause, 1 for pause
+  - e: asymmetric pause configuration: 0 for no asymmetric pause, 1 for
+    asymmetric pause
+
+Example:
+
+ethernet@0 {
+       ...
+       fixed-link {
+             speed = <1000>;
+             full-duplex;
+       };
+       ...
+};
index 737cdef4f9036eb6069b9f536f137351dc42b137..be6ea8960f208c72b7d69e0286c56712661ccb2c 100644 (file)
@@ -42,10 +42,7 @@ Properties:
     interrupt.  For TSEC and eTSEC devices, the first interrupt is
     transmit, the second is receive, and the third is error.
   - phy-handle : See ethernet.txt file in the same directory.
-  - fixed-link : <a b c d e> where a is emulated phy id - choose any,
-    but unique to the all specified fixed-links, b is duplex - 0 half,
-    1 full, c is link speed - d#10/d#100/d#1000, d is pause - 0 no
-    pause, 1 pause, e is asym_pause - 0 no asym_pause, 1 asym_pause.
+  - fixed-link : See fixed-link.txt in the same directory.
   - phy-connection-type : See ethernet.txt file in the same directory.
     This property is only really needed if the connection is of type
     "rgmii-id", as all other connection types are detected by hardware.
diff --git a/Documentation/devicetree/bindings/net/ieee802154/at86rf230.txt b/Documentation/devicetree/bindings/net/ieee802154/at86rf230.txt
new file mode 100644 (file)
index 0000000..d3bbdde
--- /dev/null
@@ -0,0 +1,23 @@
+* AT86RF230 IEEE 802.15.4 *
+
+Required properties:
+  - compatible:                should be "atmel,at86rf230", "atmel,at86rf231",
+                       "atmel,at86rf233" or "atmel,at86rf212"
+  - spi-max-frequency: maximal bus speed, should be set to 7500000 depends
+                       sync or async operation mode
+  - reg:               the chipselect index
+  - interrupts:                the interrupt generated by the device
+
+Optional properties:
+  - reset-gpio:                GPIO spec for the rstn pin
+  - sleep-gpio:                GPIO spec for the slp_tr pin
+
+Example:
+
+       at86rf231@0 {
+               compatible = "atmel,at86rf231";
+               spi-max-frequency = <7500000>;
+               reg = <0>;
+               interrupts = <19 1>;
+               interrupt-parent = <&gpio3>;
+       };
index c79bab025369af4bb6320ba0e90f7fb386942cc1..8dbcf8295c6c9ceaa4eb7518694acf3b09095dc1 100644 (file)
@@ -14,7 +14,7 @@ node.
 Example:
 
 aliases {
-       mdio-gpio0 = <&mdio0>;
+       mdio-gpio0 = &mdio0;
 };
 
 mdio0: mdio {
index d54d0cc794871b29cbbbf9fbedd6242defa63677..bbdf9a7359a2ef021c3d6f780e2c14e7f3434740 100644 (file)
@@ -1,9 +1,18 @@
-Micrel KS8851 Ethernet mac
+Micrel KS8851 Ethernet mac (MLL)
 
 Required properties:
-- compatible = "micrel,ks8851-ml" of parallel interface
+- compatible = "micrel,ks8851-mll" of parallel interface
 - reg : 2 physical address and size of registers for data and command
 - interrupts : interrupt connection
 
+Micrel KS8851 Ethernet mac (SPI)
+
+Required properties:
+- compatible = "micrel,ks8851" or the deprecated "ks8851"
+- reg : chip select number
+- interrupts : interrupt connection
+
 Optional properties:
-- vdd-supply:  supply for Ethernet mac
+- vdd-supply: analog 3.3V supply for Ethernet mac
+- vdd-io-supply: digital 1.8V IO supply for Ethernet mac
+- reset-gpios: reset_n input pin
diff --git a/Documentation/devicetree/bindings/net/micrel-ksz9021.txt b/Documentation/devicetree/bindings/net/micrel-ksz9021.txt
deleted file mode 100644 (file)
index 997a63f..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-Micrel KSZ9021 Gigabit Ethernet PHY
-
-Some boards require special tuning values, particularly when it comes to
-clock delays.  You can specify clock delay values by adding
-micrel-specific properties to an Ethernet OF device node.
-
-All skew control options are specified in picoseconds.  The minimum
-value is 0, and the maximum value is 3000.
-
-Optional properties:
- - rxc-skew-ps : Skew control of RXC pad
- - rxdv-skew-ps : Skew control of RX CTL pad
- - txc-skew-ps : Skew control of TXC pad
- - txen-skew-ps : Skew control of TX_CTL pad
- - rxd0-skew-ps : Skew control of RX data 0 pad
- - rxd1-skew-ps : Skew control of RX data 1 pad
- - rxd2-skew-ps : Skew control of RX data 2 pad
- - rxd3-skew-ps : Skew control of RX data 3 pad
- - txd0-skew-ps : Skew control of TX data 0 pad
- - txd1-skew-ps : Skew control of TX data 1 pad
- - txd2-skew-ps : Skew control of TX data 2 pad
- - txd3-skew-ps : Skew control of TX data 3 pad
-
-Examples:
-
-       /* Attach to an Ethernet device with autodetected PHY */
-       &enet {
-               rxc-skew-ps = <3000>;
-               rxdv-skew-ps = <0>;
-               txc-skew-ps = <3000>;
-               txen-skew-ps = <0>;
-               status = "okay";
-       };
-
-       /* Attach to an explicitly-specified PHY */
-       mdio {
-               phy0: ethernet-phy@0 {
-                       rxc-skew-ps = <3000>;
-                       rxdv-skew-ps = <0>;
-                       txc-skew-ps = <3000>;
-                       txen-skew-ps = <0>;
-                       reg = <0>;
-               };
-       };
-       ethernet@70000 {
-               status = "okay";
-               phy = <&phy0>;
-               phy-mode = "rgmii-id";
-       };
diff --git a/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt b/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt
new file mode 100644 (file)
index 0000000..692076f
--- /dev/null
@@ -0,0 +1,83 @@
+Micrel KSZ9021/KSZ9031 Gigabit Ethernet PHY
+
+Some boards require special tuning values, particularly when it comes to
+clock delays. You can specify clock delay values by adding
+micrel-specific properties to an Ethernet OF device node.
+
+Note that these settings are applied after any phy-specific fixup from
+phy_fixup_list (see phy_init_hw() from drivers/net/phy/phy_device.c),
+and therefore may overwrite them.
+
+KSZ9021:
+
+  All skew control options are specified in picoseconds. The minimum
+  value is 0, the maximum value is 3000, and it is incremented by 200ps
+  steps.
+
+  Optional properties:
+
+    - rxc-skew-ps : Skew control of RXC pad
+    - rxdv-skew-ps : Skew control of RX CTL pad
+    - txc-skew-ps : Skew control of TXC pad
+    - txen-skew-ps : Skew control of TX CTL pad
+    - rxd0-skew-ps : Skew control of RX data 0 pad
+    - rxd1-skew-ps : Skew control of RX data 1 pad
+    - rxd2-skew-ps : Skew control of RX data 2 pad
+    - rxd3-skew-ps : Skew control of RX data 3 pad
+    - txd0-skew-ps : Skew control of TX data 0 pad
+    - txd1-skew-ps : Skew control of TX data 1 pad
+    - txd2-skew-ps : Skew control of TX data 2 pad
+    - txd3-skew-ps : Skew control of TX data 3 pad
+
+KSZ9031:
+
+  All skew control options are specified in picoseconds. The minimum
+  value is 0, and the maximum is property-dependent. The increment
+  step is 60ps.
+
+  Optional properties:
+
+    Maximum value of 1860:
+
+      - rxc-skew-ps : Skew control of RX clock pad
+      - txc-skew-ps : Skew control of TX clock pad
+
+    Maximum value of 900:
+
+      - rxdv-skew-ps : Skew control of RX CTL pad
+      - txen-skew-ps : Skew control of TX CTL pad
+      - rxd0-skew-ps : Skew control of RX data 0 pad
+      - rxd1-skew-ps : Skew control of RX data 1 pad
+      - rxd2-skew-ps : Skew control of RX data 2 pad
+      - rxd3-skew-ps : Skew control of RX data 3 pad
+      - txd0-skew-ps : Skew control of TX data 0 pad
+      - txd1-skew-ps : Skew control of TX data 1 pad
+      - txd2-skew-ps : Skew control of TX data 2 pad
+      - txd3-skew-ps : Skew control of TX data 3 pad
+
+Examples:
+
+       /* Attach to an Ethernet device with autodetected PHY */
+       &enet {
+               rxc-skew-ps = <3000>;
+               rxdv-skew-ps = <0>;
+               txc-skew-ps = <3000>;
+               txen-skew-ps = <0>;
+               status = "okay";
+       };
+
+       /* Attach to an explicitly-specified PHY */
+       mdio {
+               phy0: ethernet-phy@0 {
+                       rxc-skew-ps = <3000>;
+                       rxdv-skew-ps = <0>;
+                       txc-skew-ps = <3000>;
+                       txen-skew-ps = <0>;
+                       reg = <0>;
+               };
+       };
+       ethernet@70000 {
+               status = "okay";
+               phy = <&phy0>;
+               phy-mode = "rgmii-id";
+       };
diff --git a/Documentation/devicetree/bindings/net/nfc/pn544.txt b/Documentation/devicetree/bindings/net/nfc/pn544.txt
new file mode 100644 (file)
index 0000000..dab69f3
--- /dev/null
@@ -0,0 +1,35 @@
+* NXP Semiconductors PN544 NFC Controller
+
+Required properties:
+- compatible: Should be "nxp,pn544-i2c".
+- clock-frequency: I²C work frequency.
+- reg: address on the bus
+- interrupt-parent: phandle for the interrupt gpio controller
+- interrupts: GPIO interrupt to which the chip is connected
+- enable-gpios: Output GPIO pin used for enabling/disabling the PN544
+- firmware-gpios: Output GPIO pin used to enter firmware download mode
+
+Optional SoC Specific Properties:
+- pinctrl-names: Contains only one value - "default".
+- pintctrl-0: Specifies the pin control groups used for this controller.
+
+Example (for ARM-based BeagleBone with PN544 on I2C2):
+
+&i2c2 {
+
+       status = "okay";
+
+       pn544: pn544@28 {
+
+               compatible = "nxp,pn544-i2c";
+
+               reg = <0x28>;
+               clock-frequency = <400000>;
+
+               interrupt-parent = <&gpio1>;
+               interrupts = <17 GPIO_ACTIVE_HIGH>;
+
+               enable-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
+               firmware-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
+       };
+};
diff --git a/Documentation/devicetree/bindings/net/nfc/st21nfca.txt b/Documentation/devicetree/bindings/net/nfc/st21nfca.txt
new file mode 100644 (file)
index 0000000..4724fe6
--- /dev/null
@@ -0,0 +1,33 @@
+* STMicroelectronics SAS. ST21NFCA NFC Controller
+
+Required properties:
+- compatible: Should be "st,st21nfca-i2c".
+- clock-frequency: I²C work frequency.
+- reg: address on the bus
+- interrupt-parent: phandle for the interrupt gpio controller
+- interrupts: GPIO interrupt to which the chip is connected
+- enable-gpios: Output GPIO pin used for enabling/disabling the ST21NFCA
+
+Optional SoC Specific Properties:
+- pinctrl-names: Contains only one value - "default".
+- pintctrl-0: Specifies the pin control groups used for this controller.
+
+Example (for ARM-based BeagleBoard xM with ST21NFCA on I2C2):
+
+&i2c2 {
+
+       status = "okay";
+
+       st21nfca: st21nfca@1 {
+
+               compatible = "st,st21nfca_i2c";
+
+               reg = <0x01>;
+               clock-frequency = <400000>;
+
+               interrupt-parent = <&gpio5>;
+               interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+
+               enable-gpios = <&gpio5 29 GPIO_ACTIVE_HIGH>;
+       };
+};
index 8dd3ef7bc56b560b1b4eb29c25feaf9f5d1785cc..1e436133685f91f470ba7f63e302503422bd818e 100644 (file)
@@ -12,6 +12,7 @@ Required properties:
 Optional SoC Specific Properties:
 - pinctrl-names: Contains only one value - "default".
 - pintctrl-0: Specifies the pin control groups used for this controller.
+- autosuspend-delay: Specify autosuspend delay in milliseconds.
 
 Example (for ARM-based BeagleBone with TRF7970A on SPI1):
 
@@ -29,6 +30,7 @@ Example (for ARM-based BeagleBone with TRF7970A on SPI1):
                ti,enable-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>,
                                  <&gpio2 5 GPIO_ACTIVE_LOW>;
                vin-supply = <&ldo3_reg>;
+               autosuspend-delay = <30000>;
                status = "okay";
        };
 };
index 636f0ac4e22388b4c8934681f7a7fc3712d30a0f..2a60cd3e8d5ddb7bdf3b2caad2bc414a3d8566e0 100644 (file)
@@ -23,5 +23,5 @@ gmac0: ethernet@ff700000 {
        interrupt-names = "macirq";
        mac-address = [00 00 00 00 00 00];/* Filled in by U-Boot */
        clocks = <&emac_0_clk>;
-       clocks-names = "stmmaceth";
+       clock-names = "stmmaceth";
 };
index 80c1fb8bfbb8bd778a6682fa75d863ce51d3c0e4..a2acd2b26baf78c8aafc3948d7dc7cb012c09db5 100644 (file)
@@ -33,7 +33,7 @@ Optional properties:
 - max-frame-size: See ethernet.txt file in the same directory
 - clocks: If present, the first clock should be the GMAC main clock,
   further clocks may be specified in derived bindings.
-- clocks-names: One name for each entry in the clocks property, the
+- clock-names: One name for each entry in the clocks property, the
   first one should be "stmmaceth".
 
 Examples:
diff --git a/Documentation/devicetree/bindings/net/via-rhine.txt b/Documentation/devicetree/bindings/net/via-rhine.txt
new file mode 100644 (file)
index 0000000..334eca2
--- /dev/null
@@ -0,0 +1,17 @@
+* VIA Rhine 10/100 Network Controller
+
+Required properties:
+- compatible : Should be "via,vt8500-rhine" for integrated
+       Rhine controllers found in VIA VT8500, WonderMedia WM8950
+       and similar. These are listed as 1106:3106 rev. 0x84 on the
+       virtual PCI bus under vendor-provided kernels
+- reg : Address and length of the io space
+- interrupts : Should contain the controller interrupt line
+
+Examples:
+
+ethernet@d8004000 {
+       compatible = "via,vt8500-rhine";
+       reg = <0xd8004000 0x100>;
+       interrupts = <10>;
+};
index 4bd5be0e5e7dd51eaf7cf23a92a2bf884dd264f1..26bcb18f4e609288d006eeae5bbf496730e2921f 100644 (file)
@@ -83,7 +83,7 @@ Example:
                reg             = <0xfe61f080 0x4>;
                reg-names       = "irqmux";
                interrupts      = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
-               interrupts-names = "irqmux";
+               interrupt-names = "irqmux";
                ranges          = <0 0xfe610000 0x5000>;
 
                PIO0: gpio@fe610000 {
@@ -165,7 +165,7 @@ sdhci0:sdhci@fe810000{
        interrupt-parent = <&PIO3>;
        #interrupt-cells = <2>;
        interrupts = <3 IRQ_TYPE_LEVEL_HIGH>; /* Interrupt line via PIO3-3 */
-       interrupts-names = "card-detect";
+       interrupt-names = "card-detect";
        pinctrl-names = "default";
        pinctrl-0       = <&pinctrl_mmc>;
 };
index 569b26c4a81ee25e1f141329f90903dcb28ab4e4..60ca07996458576e2fcc6f85a334e13fcec5a2c7 100644 (file)
@@ -47,7 +47,7 @@ mcasp0: mcasp0@1d00000 {
        reg = <0x100000 0x3000>;
        reg-names "mpu";
        interrupts = <82>, <83>;
-       interrupts-names = "tx", "rx";
+       interrupt-names = "tx", "rx";
        op-mode = <0>;          /* MCASP_IIS_MODE */
        tdm-slots = <2>;
        serial-dir = <
index 74c66dee3e146445b5b1593670dc52473f527165..eff12be5e789cf91bb4a5d4a21bab7f7d1b7d32c 100644 (file)
@@ -13,6 +13,9 @@ Required properties:
     "ti,tlv320aic3111" - TLV320AIC3111 (stereo speaker amp, MiniDSP)
 
 - reg - <int> -  I2C slave address
+- HPVDD-supply, SPRVDD-supply, SPLVDD-supply, AVDD-supply, IOVDD-supply,
+  DVDD-supply : power supplies for the device as covered in
+  Documentation/devicetree/bindings/regulator/regulator.txt
 
 
 Optional properties:
@@ -24,9 +27,6 @@ Optional properties:
         3 or MICBIAS_AVDD - MICBIAS output is connected to AVDD
        If this node is not mentioned or if the value is unknown, then
        micbias is set to 2.0V.
-- HPVDD-supply, SPRVDD-supply, SPLVDD-supply, AVDD-supply, IOVDD-supply,
-  DVDD-supply : power supplies for the device as covered in
-  Documentation/devicetree/bindings/regulator/regulator.txt
 
 CODEC output pins:
   * HPL
index 4f7897e99cba8a8fc7b5a33343825cfb94ce2d68..c74e04494ade32bd60b0cef1aafe725352afb0ca 100644 (file)
@@ -308,3 +308,8 @@ SLAVE DMA ENGINE
 
 SPI
   devm_spi_register_master()
+
+MDIO
+  devm_mdiobus_alloc()
+  devm_mdiobus_alloc_size()
+  devm_mdiobus_free()
index 5602eb71ad5d7318e50952846d4f82edd3dcc239..e1ae127ed099d4934e1d7fb95f1a8ba819c1da9b 100644 (file)
@@ -504,9 +504,12 @@ byte 5:
 * reg_10
 
    bit   7   6   5   4   3   2   1   0
-         0   0   0   0   0   0   0   A
+         0   0   0   0   R   F   T   A
 
          A: 1 = enable absolute tracking
+         T: 1 = enable two finger mode auto correct
+         F: 1 = disable ABS Position Filter
+         R: 1 = enable real hardware resolution
 
 6.2 Native absolute mode 6 byte packet format
     ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
index 43842177b771d72e67e90361f79b28966435787f..30a8ad0dae535cf1670a77d7f4c869e5ffc4c6dc 100644 (file)
@@ -2218,10 +2218,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
        noreplace-smp   [X86-32,SMP] Don't replace SMP instructions
                        with UP alternatives
 
-       nordrand        [X86] Disable the direct use of the RDRAND
-                       instruction even if it is supported by the
-                       processor.  RDRAND is still available to user
-                       space applications.
+       nordrand        [X86] Disable kernel use of the RDRAND and
+                       RDSEED instructions even if they are supported
+                       by the processor.  RDRAND and RDSEED are still
+                       available to user space applications.
 
        noresume        [SWSUSP] Disables resume and restores original swap
                        space.
index a383c00392d03f2e316f7fe7dd954c7d8b71f274..9c723ecd00251534a0b011c4e0ffbd053242454e 100644 (file)
@@ -585,13 +585,19 @@ mode
        balance-tlb or 5
 
                Adaptive transmit load balancing: channel bonding that
-               does not require any special switch support.  The
-               outgoing traffic is distributed according to the
-               current load (computed relative to the speed) on each
-               slave.  Incoming traffic is received by the current
-               slave.  If the receiving slave fails, another slave
-               takes over the MAC address of the failed receiving
-               slave.
+               does not require any special switch support.
+
+               In tlb_dynamic_lb=1 mode; the outgoing traffic is
+               distributed according to the current load (computed
+               relative to the speed) on each slave.
+
+               In tlb_dynamic_lb=0 mode; the load balancing based on
+               current load is disabled and the load is distributed
+               only using the hash distribution.
+
+               Incoming traffic is received by the current slave.
+               If the receiving slave fails, another slave takes over
+               the MAC address of the failed receiving slave.
 
                Prerequisite:
 
@@ -736,6 +742,28 @@ primary_reselect
 
        This option was added for bonding version 3.6.0.
 
+tlb_dynamic_lb
+
+       Specifies if dynamic shuffling of flows is enabled in tlb
+       mode. The value has no effect on any other modes.
+
+       The default behavior of tlb mode is to shuffle active flows across
+       slaves based on the load in that interval. This gives nice lb
+       characteristics but can cause packet reordering. If re-ordering is
+       a concern use this variable to disable flow shuffling and rely on
+       load balancing provided solely by the hash distribution.
+       xmit-hash-policy can be used to select the appropriate hashing for
+       the setup.
+
+       The sysfs entry can be used to change the setting per bond device
+       and the initial value is derived from the module parameter. The
+       sysfs entry is allowed to be changed only if the bond device is
+       down.
+
+       The default value is "1" that enables flow shuffling while value "0"
+       disables it. This option was added in bonding driver 3.7.1
+
+
 updelay
 
        Specifies the time, in milliseconds, to wait before enabling a
@@ -769,7 +797,7 @@ use_carrier
 xmit_hash_policy
 
        Selects the transmit hash policy to use for slave selection in
-       balance-xor and 802.3ad modes.  Possible values are:
+       balance-xor, 802.3ad, and tlb modes.  Possible values are:
 
        layer2
 
index 2fa44cbe81b73433f40db59ea50dc4ebbca917aa..cdd381c5311d1ad9de20df4db52b6e1bdfac21b9 100644 (file)
@@ -469,6 +469,41 @@ solution for a couple of reasons:
   having this 'send only' use-case we may remove the receive list in the
   Kernel to save a little (really a very little!) CPU usage.
 
+  4.1.1.1 CAN filter usage optimisation
+
+  The CAN filters are processed in per-device filter lists at CAN frame
+  reception time. To reduce the number of checks that need to be performed
+  while walking through the filter lists the CAN core provides an optimized
+  filter handling when the filter subscription focusses on a single CAN ID.
+
+  For the possible 2048 SFF CAN identifiers the identifier is used as an index
+  to access the corresponding subscription list without any further checks.
+  For the 2^29 possible EFF CAN identifiers a 10 bit XOR folding is used as
+  hash function to retrieve the EFF table index.
+
+  To benefit from the optimized filters for single CAN identifiers the
+  CAN_SFF_MASK or CAN_EFF_MASK have to be set into can_filter.mask together
+  with set CAN_EFF_FLAG and CAN_RTR_FLAG bits. A set CAN_EFF_FLAG bit in the
+  can_filter.mask makes clear that it matters whether a SFF or EFF CAN ID is
+  subscribed. E.g. in the example from above
+
+    rfilter[0].can_id   = 0x123;
+    rfilter[0].can_mask = CAN_SFF_MASK;
+
+  both SFF frames with CAN ID 0x123 and EFF frames with 0xXXXXX123 can pass.
+
+  To filter for only 0x123 (SFF) and 0x12345678 (EFF) CAN identifiers the
+  filter has to be defined in this way to benefit from the optimized filters:
+
+    struct can_filter rfilter[2];
+
+    rfilter[0].can_id   = 0x123;
+    rfilter[0].can_mask = (CAN_EFF_FLAG | CAN_RTR_FLAG | CAN_SFF_MASK);
+    rfilter[1].can_id   = 0x12345678 | CAN_EFF_FLAG;
+    rfilter[1].can_mask = (CAN_EFF_FLAG | CAN_RTR_FLAG | CAN_EFF_MASK);
+
+    setsockopt(s, SOL_CAN_RAW, CAN_RAW_FILTER, &rfilter, sizeof(rfilter));
+
   4.1.2 RAW socket option CAN_RAW_ERR_FILTER
 
   As described in chapter 3.4 the CAN interface driver can generate so
diff --git a/Documentation/networking/cdc_mbim.txt b/Documentation/networking/cdc_mbim.txt
new file mode 100644 (file)
index 0000000..a15ea60
--- /dev/null
@@ -0,0 +1,339 @@
+     cdc_mbim - Driver for CDC MBIM Mobile Broadband modems
+    ========================================================
+
+The cdc_mbim driver supports USB devices conforming to the "Universal
+Serial Bus Communications Class Subclass Specification for Mobile
+Broadband Interface Model" [1], which is a further development of
+"Universal Serial Bus Communications Class Subclass Specifications for
+Network Control Model Devices" [2] optimized for Mobile Broadband
+devices, aka "3G/LTE modems".
+
+
+Command Line Parameters
+=======================
+
+The cdc_mbim driver has no parameters of its own.  But the probing
+behaviour for NCM 1.0 backwards compatible MBIM functions (an
+"NCM/MBIM function" as defined in section 3.2 of [1]) is affected
+by a cdc_ncm driver parameter:
+
+prefer_mbim
+-----------
+Type:          Boolean
+Valid Range:   N/Y (0-1)
+Default Value: Y (MBIM is preferred)
+
+This parameter sets the system policy for NCM/MBIM functions.  Such
+functions will be handled by either the cdc_ncm driver or the cdc_mbim
+driver depending on the prefer_mbim setting.  Setting prefer_mbim=N
+makes the cdc_mbim driver ignore these functions and lets the cdc_ncm
+driver handle them instead.
+
+The parameter is writable, and can be changed at any time. A manual
+unbind/bind is required to make the change effective for NCM/MBIM
+functions bound to the "wrong" driver
+
+
+Basic usage
+===========
+
+MBIM functions are inactive when unmanaged. The cdc_mbim driver only
+provides an userspace interface to the MBIM control channel, and will
+not participate in the management of the function. This implies that a
+userspace MBIM management application always is required to enable a
+MBIM function.
+
+Such userspace applications includes, but are not limited to:
+ - mbimcli (included with the libmbim [3] library), and
+ - ModemManager [4]
+
+Establishing a MBIM IP session reequires at least these actions by the
+management application:
+ - open the control channel
+ - configure network connection settings
+ - connect to network
+ - configure IP interface
+
+Management application development
+----------------------------------
+The driver <-> userspace interfaces are described below.  The MBIM
+control channel protocol is described in [1].
+
+
+MBIM control channel userspace ABI
+==================================
+
+/dev/cdc-wdmX character device
+------------------------------
+The driver creates a two-way pipe to the MBIM function control channel
+using the cdc-wdm driver as a subdriver.  The userspace end of the
+control channel pipe is a /dev/cdc-wdmX character device.
+
+The cdc_mbim driver does not process or police messages on the control
+channel.  The channel is fully delegated to the userspace management
+application.  It is therefore up to this application to ensure that it
+complies with all the control channel requirements in [1].
+
+The cdc-wdmX device is created as a child of the MBIM control
+interface USB device.  The character device associated with a specific
+MBIM function can be looked up using sysfs.  For example:
+
+ bjorn@nemi:~$ ls /sys/bus/usb/drivers/cdc_mbim/2-4:2.12/usbmisc
+ cdc-wdm0
+
+ bjorn@nemi:~$ grep . /sys/bus/usb/drivers/cdc_mbim/2-4:2.12/usbmisc/cdc-wdm0/dev
+ 180:0
+
+
+USB configuration descriptors
+-----------------------------
+The wMaxControlMessage field of the CDC MBIM functional descriptor
+limits the maximum control message size. The managament application is
+responsible for negotiating a control message size complying with the
+requirements in section 9.3.1 of [1], taking this descriptor field
+into consideration.
+
+The userspace application can access the CDC MBIM functional
+descriptor of a MBIM function using either of the two USB
+configuration descriptor kernel interfaces described in [6] or [7].
+
+See also the ioctl documentation below.
+
+
+Fragmentation
+-------------
+The userspace application is responsible for all control message
+fragmentation and defragmentaion, as described in section 9.5 of [1].
+
+
+/dev/cdc-wdmX write()
+---------------------
+The MBIM control messages from the management application *must not*
+exceed the negotiated control message size.
+
+
+/dev/cdc-wdmX read()
+--------------------
+The management application *must* accept control messages of up the
+negotiated control message size.
+
+
+/dev/cdc-wdmX ioctl()
+--------------------
+IOCTL_WDM_MAX_COMMAND: Get Maximum Command Size
+This ioctl returns the wMaxControlMessage field of the CDC MBIM
+functional descriptor for MBIM devices.  This is intended as a
+convenience, eliminating the need to parse the USB descriptors from
+userspace.
+
+       #include <stdio.h>
+       #include <fcntl.h>
+       #include <sys/ioctl.h>
+       #include <linux/types.h>
+       #include <linux/usb/cdc-wdm.h>
+       int main()
+       {
+               __u16 max;
+               int fd = open("/dev/cdc-wdm0", O_RDWR);
+               if (!ioctl(fd, IOCTL_WDM_MAX_COMMAND, &max))
+                       printf("wMaxControlMessage is %d\n", max);
+       }
+
+
+Custom device services
+----------------------
+The MBIM specification allows vendors to freely define additional
+services.  This is fully supported by the cdc_mbim driver.
+
+Support for new MBIM services, including vendor specified services, is
+implemented entirely in userspace, like the rest of the MBIM control
+protocol
+
+New services should be registered in the MBIM Registry [5].
+
+
+
+MBIM data channel userspace ABI
+===============================
+
+wwanY network device
+--------------------
+The cdc_mbim driver represents the MBIM data channel as a single
+network device of the "wwan" type. This network device is initially
+mapped to MBIM IP session 0.
+
+
+Multiplexed IP sessions (IPS)
+-----------------------------
+MBIM allows multiplexing up to 256 IP sessions over a single USB data
+channel.  The cdc_mbim driver models such IP sessions as 802.1q VLAN
+subdevices of the master wwanY device, mapping MBIM IP session Z to
+VLAN ID Z for all values of Z greater than 0.
+
+The device maximum Z is given in the MBIM_DEVICE_CAPS_INFO structure
+described in section 10.5.1 of [1].
+
+The userspace management application is responsible for adding new
+VLAN links prior to establishing MBIM IP sessions where the SessionId
+is greater than 0. These links can be added by using the normal VLAN
+kernel interfaces, either ioctl or netlink.
+
+For example, adding a link for a MBIM IP session with SessionId 3:
+
+  ip link add link wwan0 name wwan0.3 type vlan id 3
+
+The driver will automatically map the "wwan0.3" network device to MBIM
+IP session 3.
+
+
+Device Service Streams (DSS)
+----------------------------
+MBIM also allows up to 256 non-IP data streams to be multiplexed over
+the same shared USB data channel.  The cdc_mbim driver models these
+sessions as another set of 802.1q VLAN subdevices of the master wwanY
+device, mapping MBIM DSS session A to VLAN ID (256 + A) for all values
+of A.
+
+The device maximum A is given in the MBIM_DEVICE_SERVICES_INFO
+structure described in section 10.5.29 of [1].
+
+The DSS VLAN subdevices are used as a practical interface between the
+shared MBIM data channel and a MBIM DSS aware userspace application.
+It is not intended to be presented as-is to an end user. The
+assumption is that an userspace application initiating a DSS session
+also takes care of the necessary framing of the DSS data, presenting
+the stream to the end user in an appropriate way for the stream type.
+
+The network device ABI requires a dummy ethernet header for every DSS
+data frame being transported.  The contents of this header is
+arbitrary, with the following exceptions:
+ - TX frames using an IP protocol (0x0800 or 0x86dd) will be dropped
+ - RX frames will have the protocol field set to ETH_P_802_3 (but will
+   not be properly formatted 802.3 frames)
+ - RX frames will have the destination address set to the hardware
+   address of the master device
+
+The DSS supporting userspace management application is responsible for
+adding the dummy ethernet header on TX and stripping it on RX.
+
+This is a simple example using tools commonly available, exporting
+DssSessionId 5 as a pty character device pointed to by a /dev/nmea
+symlink:
+
+  ip link add link wwan0 name wwan0.dss5 type vlan id 261
+  ip link set dev wwan0.dss5 up
+  socat INTERFACE:wwan0.dss5,type=2 PTY:,echo=0,link=/dev/nmea
+
+This is only an example, most suitable for testing out a DSS
+service. Userspace applications supporting specific MBIM DSS services
+are expected to use the tools and programming interfaces required by
+that service.
+
+Note that adding VLAN links for DSS sessions is entirely optional.  A
+management application may instead choose to bind a packet socket
+directly to the master network device, using the received VLAN tags to
+map frames to the correct DSS session and adding 18 byte VLAN ethernet
+headers with the appropriate tag on TX.  In this case using a socket
+filter is recommended, matching only the DSS VLAN subset. This avoid
+unnecessary copying of unrelated IP session data to userspace.  For
+example:
+
+  static struct sock_filter dssfilter[] = {
+       /* use special negative offsets to get VLAN tag */
+       BPF_STMT(BPF_LD|BPF_B|BPF_ABS, SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT),
+       BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, 1, 0, 6), /* true */
+
+       /* verify DSS VLAN range */
+       BPF_STMT(BPF_LD|BPF_H|BPF_ABS, SKF_AD_OFF + SKF_AD_VLAN_TAG),
+       BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 256, 0, 4),     /* 256 is first DSS VLAN */
+       BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 512, 3, 0),     /* 511 is last DSS VLAN */
+
+       /* verify ethertype */
+        BPF_STMT(BPF_LD|BPF_H|BPF_ABS, 2 * ETH_ALEN),
+        BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, ETH_P_802_3, 0, 1),
+
+        BPF_STMT(BPF_RET|BPF_K, (u_int)-1),    /* accept */
+        BPF_STMT(BPF_RET|BPF_K, 0),            /* ignore */
+  };
+
+
+
+Tagged IP session 0 VLAN
+------------------------
+As described above, MBIM IP session 0 is treated as special by the
+driver.  It is initially mapped to untagged frames on the wwanY
+network device.
+
+This mapping implies a few restrictions on multiplexed IPS and DSS
+sessions, which may not always be practical:
+ - no IPS or DSS session can use a frame size greater than the MTU on
+   IP session 0
+ - no IPS or DSS session can be in the up state unless the network
+   device representing IP session 0 also is up
+
+These problems can be avoided by optionally making the driver map IP
+session 0 to a VLAN subdevice, similar to all other IP sessions.  This
+behaviour is triggered by adding a VLAN link for the magic VLAN ID
+4094.  The driver will then immediately start mapping MBIM IP session
+0 to this VLAN, and will drop untagged frames on the master wwanY
+device.
+
+Tip: It might be less confusing to the end user to name this VLAN
+subdevice after the MBIM SessionID instead of the VLAN ID.  For
+example:
+
+  ip link add link wwan0 name wwan0.0 type vlan id 4094
+
+
+VLAN mapping
+------------
+
+Summarizing the cdc_mbim driver mapping described above, we have this
+relationship between VLAN tags on the wwanY network device and MBIM
+sessions on the shared USB data channel:
+
+  VLAN ID       MBIM type   MBIM SessionID           Notes
+  ---------------------------------------------------------
+  untagged      IPS         0                        a)
+  1 - 255       IPS         1 - 255 <VLANID>
+  256 - 511     DSS         0 - 255 <VLANID - 256>
+  512 - 4093                                         b)
+  4094          IPS         0                        c)
+
+    a) if no VLAN ID 4094 link exists, else dropped
+    b) unsupported VLAN range, unconditionally dropped
+    c) if a VLAN ID 4094 link exists, else dropped
+
+
+
+
+References
+==========
+
+[1] USB Implementers Forum, Inc. - "Universal Serial Bus
+      Communications Class Subclass Specification for Mobile Broadband
+      Interface Model", Revision 1.0 (Errata 1), May 1, 2013
+      - http://www.usb.org/developers/docs/devclass_docs/
+
+[2] USB Implementers Forum, Inc. - "Universal Serial Bus
+      Communications Class Subclass Specifications for Network Control
+      Model Devices", Revision 1.0 (Errata 1), November 24, 2010
+      - http://www.usb.org/developers/docs/devclass_docs/
+
+[3] libmbim - "a glib-based library for talking to WWAN modems and
+      devices which speak the Mobile Interface Broadband Model (MBIM)
+      protocol"
+      - http://www.freedesktop.org/wiki/Software/libmbim/
+
+[4] ModemManager - "a DBus-activated daemon which controls mobile
+      broadband (2G/3G/4G) devices and connections"
+      - http://www.freedesktop.org/wiki/Software/ModemManager/
+
+[5] "MBIM (Mobile Broadband Interface Model) Registry"
+       - http://compliance.usb.org/mbim/
+
+[6] "/proc/bus/usb filesystem output"
+       - Documentation/usb/proc_usb_info.txt
+
+[7] "/sys/bus/usb/devices/.../descriptors"
+       - Documentation/ABI/stable/sysfs-bus-usb
index 82e1cb0b3da8efc9c76e2a21d424d41d6d6ebe20..58c443926647e7a24c212b0240f82567e5cadfc8 100644 (file)
@@ -277,7 +277,7 @@ Possible BPF extensions are shown in the following table:
   mark                                  skb->mark
   queue                                 skb->queue_mapping
   hatype                                skb->dev->type
-  rxhash                                skb->rxhash
+  rxhash                                skb->hash
   cpu                                   raw_smp_processor_id()
   vlan_tci                              vlan_tx_tag_get(skb)
   vlan_pr                               vlan_tx_tag_present(skb)
@@ -613,7 +613,7 @@ Some core changes of the new internal format:
 
   Therefore, BPF calling convention is defined as:
 
-    * R0       - return value from in-kernel function
+    * R0       - return value from in-kernel function, and exit value for BPF program
     * R1 - R5  - arguments from BPF program to in-kernel function
     * R6 - R9  - callee saved registers that in-kernel function will preserve
     * R10      - read-only frame pointer to access stack
@@ -659,9 +659,140 @@ Some core changes of the new internal format:
 - Introduces bpf_call insn and register passing convention for zero overhead
   calls from/to other kernel functions:
 
-  After a kernel function call, R1 - R5 are reset to unreadable and R0 has a
-  return type of the function. Since R6 - R9 are callee saved, their state is
-  preserved across the call.
+  Before an in-kernel function call, the internal BPF program needs to
+  place function arguments into R1 to R5 registers to satisfy calling
+  convention, then the interpreter will take them from registers and pass
+  to in-kernel function. If R1 - R5 registers are mapped to CPU registers
+  that are used for argument passing on given architecture, the JIT compiler
+  doesn't need to emit extra moves. Function arguments will be in the correct
+  registers and BPF_CALL instruction will be JITed as single 'call' HW
+  instruction. This calling convention was picked to cover common call
+  situations without performance penalty.
+
+  After an in-kernel function call, R1 - R5 are reset to unreadable and R0 has
+  a return value of the function. Since R6 - R9 are callee saved, their state
+  is preserved across the call.
+
+  For example, consider three C functions:
+
+  u64 f1() { return (*_f2)(1); }
+  u64 f2(u64 a) { return f3(a + 1, a); }
+  u64 f3(u64 a, u64 b) { return a - b; }
+
+  GCC can compile f1, f3 into x86_64:
+
+  f1:
+    movl $1, %edi
+    movq _f2(%rip), %rax
+    jmp  *%rax
+  f3:
+    movq %rdi, %rax
+    subq %rsi, %rax
+    ret
+
+  Function f2 in BPF may look like:
+
+  f2:
+    bpf_mov R2, R1
+    bpf_add R1, 1
+    bpf_call f3
+    bpf_exit
+
+  If f2 is JITed and the pointer stored to '_f2'. The calls f1 -> f2 -> f3 and
+  returns will be seamless. Without JIT, __sk_run_filter() interpreter needs to
+  be used to call into f2.
+
+  For practical reasons all BPF programs have only one argument 'ctx' which is
+  already placed into R1 (e.g. on __sk_run_filter() startup) and the programs
+  can call kernel functions with up to 5 arguments. Calls with 6 or more arguments
+  are currently not supported, but these restrictions can be lifted if necessary
+  in the future.
+
+  On 64-bit architectures all register map to HW registers one to one. For
+  example, x86_64 JIT compiler can map them as ...
+
+    R0 - rax
+    R1 - rdi
+    R2 - rsi
+    R3 - rdx
+    R4 - rcx
+    R5 - r8
+    R6 - rbx
+    R7 - r13
+    R8 - r14
+    R9 - r15
+    R10 - rbp
+
+  ... since x86_64 ABI mandates rdi, rsi, rdx, rcx, r8, r9 for argument passing
+  and rbx, r12 - r15 are callee saved.
+
+  Then the following internal BPF pseudo-program:
+
+    bpf_mov R6, R1 /* save ctx */
+    bpf_mov R2, 2
+    bpf_mov R3, 3
+    bpf_mov R4, 4
+    bpf_mov R5, 5
+    bpf_call foo
+    bpf_mov R7, R0 /* save foo() return value */
+    bpf_mov R1, R6 /* restore ctx for next call */
+    bpf_mov R2, 6
+    bpf_mov R3, 7
+    bpf_mov R4, 8
+    bpf_mov R5, 9
+    bpf_call bar
+    bpf_add R0, R7
+    bpf_exit
+
+  After JIT to x86_64 may look like:
+
+    push %rbp
+    mov %rsp,%rbp
+    sub $0x228,%rsp
+    mov %rbx,-0x228(%rbp)
+    mov %r13,-0x220(%rbp)
+    mov %rdi,%rbx
+    mov $0x2,%esi
+    mov $0x3,%edx
+    mov $0x4,%ecx
+    mov $0x5,%r8d
+    callq foo
+    mov %rax,%r13
+    mov %rbx,%rdi
+    mov $0x2,%esi
+    mov $0x3,%edx
+    mov $0x4,%ecx
+    mov $0x5,%r8d
+    callq bar
+    add %r13,%rax
+    mov -0x228(%rbp),%rbx
+    mov -0x220(%rbp),%r13
+    leaveq
+    retq
+
+  Which is in this example equivalent in C to:
+
+    u64 bpf_filter(u64 ctx)
+    {
+        return foo(ctx, 2, 3, 4, 5) + bar(ctx, 6, 7, 8, 9);
+    }
+
+  In-kernel functions foo() and bar() with prototype: u64 (*)(u64 arg1, u64
+  arg2, u64 arg3, u64 arg4, u64 arg5); will receive arguments in proper
+  registers and place their return value into '%rax' which is R0 in BPF.
+  Prologue and epilogue are emitted by JIT and are implicit in the
+  interpreter. R0-R5 are scratch registers, so BPF program needs to preserve
+  them across the calls as defined by calling convention.
+
+  For example the following program is invalid:
+
+    bpf_mov R1, 1
+    bpf_call foo
+    bpf_mov R0, R1
+    bpf_exit
+
+  After the call the registers R1-R5 contain junk values and cannot be read.
+  In the future a BPF verifier can be used to validate internal BPF programs.
 
 Also in the new design, BPF is limited to 4096 insns, which means that any
 program will terminate quickly and will only call a fixed number of kernel
@@ -676,6 +807,25 @@ A program, that is translated internally consists of the following elements:
 
   op:16, jt:8, jf:8, k:32    ==>    op:8, a_reg:4, x_reg:4, off:16, imm:32
 
+So far 87 internal BPF instructions were implemented. 8-bit 'op' opcode field
+has room for new instructions. Some of them may use 16/24/32 byte encoding. New
+instructions must be multiple of 8 bytes to preserve backward compatibility.
+
+Internal BPF is a general purpose RISC instruction set. Not every register and
+every instruction are used during translation from original BPF to new format.
+For example, socket filters are not using 'exclusive add' instruction, but
+tracing filters may do to maintain counters of events, for example. Register R9
+is not used by socket filters either, but more complex filters may be running
+out of registers and would have to resort to spill/fill to stack.
+
+Internal BPF can used as generic assembler for last step performance
+optimizations, socket filters and seccomp are using it as assembler. Tracing
+filters may use it as assembler to generate code from kernel. In kernel usage
+may not be bounded by security considerations, since generated internal BPF code
+may be optimizing internal code path and not being exposed to the user space.
+Safety of internal BPF can come from a verifier (TBD). In such use cases as
+described, it may be used as safe instruction set.
+
 Just like the original BPF, the new format runs within a controlled environment,
 is deterministic and the kernel can easily prove that. The safety of the program
 can be determined in two steps: first step does depth-first-search to disallow
@@ -683,6 +833,20 @@ loops and other CFG validation; second step starts from the first insn and
 descends all possible paths. It simulates execution of every insn and observes
 the state change of registers and stack.
 
+Testing
+-------
+
+Next to the BPF toolchain, the kernel also ships a test module that contains
+various test cases for classic and internal BPF that can be executed against
+the BPF interpreter and JIT compiler. It can be found in lib/test_bpf.c and
+enabled via Kconfig:
+
+  CONFIG_TEST_BPF=m
+
+After the module has been built and installed, the test suite can be executed
+via insmod or modprobe against 'test_bpf' module. Results of the test cases
+including timings in nsec can be found in the kernel log (dmesg).
+
 Misc
 ----
 
index 6fea79efb4cbfd31cc1d0155aef320b94b89da9e..38112d512f47db9ec9a70edae7c7df83b7d13119 100644 (file)
@@ -578,7 +578,7 @@ processes. This also works in combination with mmap(2) on packet sockets.
 
 Currently implemented fanout policies are:
 
-  - PACKET_FANOUT_HASH: schedule to socket by skb's rxhash
+  - PACKET_FANOUT_HASH: schedule to socket by skb's packet hash
   - PACKET_FANOUT_LB: schedule to socket by round-robin
   - PACKET_FANOUT_CPU: schedule to socket by CPU packet arrives on
   - PACKET_FANOUT_RND: schedule to socket by random selection
index ca6977f5b2ed066f49823c0d7c0129a9a16b0820..99ca40e8e810888d30bbb9726eb2de2e537c3e79 100644 (file)
@@ -429,7 +429,7 @@ RPS and RFS were introduced in kernel 2.6.35. XPS was incorporated into
 (therbert@google.com)
 
 Accelerated RFS was introduced in 2.6.35. Original patches were
-submitted by Ben Hutchings (bhutchings@solarflare.com)
+submitted by Ben Hutchings (bwh@kernel.org)
 
 Authors:
 Tom Herbert (therbert@google.com)
index e67ea244204163a5d0eb9e43239c5ccd4394bae1..dd33abf44766f9f2de4b78da612de96c4615257c 100644 (file)
@@ -537,7 +537,7 @@ L:  linux-alpha@vger.kernel.org
 F:     arch/alpha/
 
 ALTERA TRIPLE SPEED ETHERNET DRIVER
-M:     Vince Bridgers <vbridgers2013@gmail.com
+M:     Vince Bridgers <vbridgers2013@gmail.com>
 L:     netdev@vger.kernel.org
 L:     nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
 S:     Maintained
@@ -1888,19 +1888,20 @@ F:      drivers/net/ethernet/broadcom/bnx2.*
 F:     drivers/net/ethernet/broadcom/bnx2_*
 
 BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
-M:     Ariel Elior <ariele@broadcom.com>
+M:     Ariel Elior <ariel.elior@qlogic.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/broadcom/bnx2x/
 
-BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE
+BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
 M:     Christian Daudt <bcm@fixthebug.org>
 M:     Matt Porter <mporter@linaro.org>
 L:     bcm-kernel-feedback-list@broadcom.com
-T:     git git://git.github.com/broadcom/bcm11351
+T:     git git://github.com/broadcom/mach-bcm
 S:     Maintained
 F:     arch/arm/mach-bcm/
 F:     arch/arm/boot/dts/bcm113*
+F:     arch/arm/boot/dts/bcm216*
 F:     arch/arm/boot/dts/bcm281*
 F:     arch/arm/configs/bcm_defconfig
 F:     drivers/mmc/host/sdhci_bcm_kona.c
@@ -1967,6 +1968,12 @@ S:       Maintained
 F:     drivers/bcma/
 F:     include/linux/bcma/
 
+BROADCOM SYSTEMPORT ETHERNET DRIVER
+M:     Florian Fainelli <f.fainelli@gmail.com>
+L:     netdev@vger.kernel.org
+S:     Supported
+F:     drivers/net/ethernet/broadcom/bcmsysport.*
+
 BROCADE BFA FC SCSI DRIVER
 M:     Anil Gurumurthy <anil.gurumurthy@qlogic.com>
 M:     Sudarsana Kalluru <sudarsana.kalluru@qlogic.com>
@@ -2222,9 +2229,8 @@ F:        drivers/platform/chrome/
 CISCO VIC ETHERNET NIC DRIVER
 M:     Christian Benvenuti <benve@cisco.com>
 M:     Sujith Sankar <ssujith@cisco.com>
-M:     Govindarajulu Varadarajan <govindarajulu90@gmail.com>
+M:     Govindarajulu Varadarajan <_govind@gmx.com>
 M:     Neel Patel <neepatel@cisco.com>
-M:     Nishank Trivedi <nistrive@cisco.com>
 S:     Supported
 F:     drivers/net/ethernet/cisco/enic/
 
@@ -2245,12 +2251,6 @@ L:       linux-usb@vger.kernel.org
 S:     Maintained
 F:     drivers/usb/host/ohci-ep93xx.c
 
-CIRRUS LOGIC CS4270 SOUND DRIVER
-M:     Timur Tabi <timur@tabi.org>
-L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
-S:     Odd Fixes
-F:     sound/soc/codecs/cs4270*
-
 CIRRUS LOGIC AUDIO CODEC DRIVERS
 M:     Brian Austin <brian.austin@cirrus.com>
 M:     Paul Handrigan <Paul.Handrigan@cirrus.com>
@@ -3485,6 +3485,12 @@ S:       Maintained
 F:     drivers/extcon/
 F:     Documentation/extcon/
 
+EXYNOS DP DRIVER
+M:     Jingoo Han <jg1.han@samsung.com>
+L:     dri-devel@lists.freedesktop.org
+S:     Maintained
+F:     drivers/gpu/drm/exynos/exynos_dp*
+
 EXYNOS MIPI DISPLAY DRIVERS
 M:     Inki Dae <inki.dae@samsung.com>
 M:     Donghwa Lee <dh09.lee@samsung.com>
@@ -3550,7 +3556,7 @@ F:        include/scsi/libfcoe.h
 F:     include/uapi/scsi/fc/
 
 FILE LOCKING (flock() and fcntl()/lockf())
-M:     Jeff Layton <jlayton@redhat.com>
+M:     Jeff Layton <jlayton@poochiereds.net>
 M:     J. Bruce Fields <bfields@fieldses.org>
 L:     linux-fsdevel@vger.kernel.org
 S:     Maintained
@@ -4812,6 +4818,14 @@ L:       linux-kernel@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
 F:     kernel/irq/
+
+IRQCHIP DRIVERS
+M:     Thomas Gleixner <tglx@linutronix.de>
+M:     Jason Cooper <jason@lakedaemon.net>
+L:     linux-kernel@vger.kernel.org
+S:     Maintained
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
+T:     git git://git.infradead.org/users/jcooper/linux.git irqchip/core
 F:     drivers/irqchip/
 
 IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
@@ -5108,14 +5122,19 @@ F:      drivers/s390/kvm/
 
 KERNEL VIRTUAL MACHINE (KVM) FOR ARM
 M:     Christoffer Dall <christoffer.dall@linaro.org>
+M:     Marc Zyngier <marc.zyngier@arm.com>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     kvmarm@lists.cs.columbia.edu
 W:     http://systems.cs.columbia.edu/projects/kvm-arm
 S:     Supported
 F:     arch/arm/include/uapi/asm/kvm*
 F:     arch/arm/include/asm/kvm*
 F:     arch/arm/kvm/
+F:     virt/kvm/arm/
+F:     include/kvm/arm_*
 
 KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
+M:     Christoffer Dall <christoffer.dall@linaro.org>
 M:     Marc Zyngier <marc.zyngier@arm.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     kvmarm@lists.cs.columbia.edu
@@ -5479,15 +5498,15 @@ F:      Documentation/hwmon/ltc4261
 F:     drivers/hwmon/ltc4261.c
 
 LTP (Linux Test Project)
-M:     Shubham Goyal <shubham@linux.vnet.ibm.com>
 M:     Mike Frysinger <vapier@gentoo.org>
 M:     Cyril Hrubis <chrubis@suse.cz>
-M:     Caspar Zhang <caspar@casparzhang.com>
 M:     Wanlong Gao <gaowanlong@cn.fujitsu.com>
+M:     Jan Stancek <jstancek@redhat.com>
+M:     Stanislav Kholmanskikh <stanislav.kholmanskikh@oracle.com>
+M:     Alexey Kodanev <alexey.kodanev@oracle.com>
 L:     ltp-list@lists.sourceforge.net (subscribers-only)
-W:     http://ltp.sourceforge.net/
+W:     http://linux-test-project.github.io/
 T:     git git://github.com/linux-test-project/ltp.git
-T:     git git://ltp.git.sourceforge.net/gitroot/ltp/ltp-dev
 S:     Maintained
 
 M32R ARCHITECTURE
@@ -6500,10 +6519,10 @@ T:      git git://openrisc.net/~jonas/linux
 F:     arch/openrisc/
 
 OPENVSWITCH
-M:     Jesse Gross <jesse@nicira.com>
+M:     Pravin Shelar <pshelar@nicira.com>
 L:     dev@openvswitch.org
 W:     http://openvswitch.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jesse/openvswitch.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/pshelar/openvswitch.git
 S:     Maintained
 F:     net/openvswitch/
 
@@ -7277,7 +7296,6 @@ F:        drivers/video/aty/aty128fb.c
 RALINK RT2X00 WIRELESS LAN DRIVER
 P:     rt2x00 project
 M:     Ivo van Doorn <IvDoorn@gmail.com>
-M:     Gertjan van Wingerde <gwingerde@gmail.com>
 M:     Helmut Schaa <helmut.schaa@googlemail.com>
 L:     linux-wireless@vger.kernel.org
 L:     users@rt2x00.serialmonkey.com (moderated for non-subscribers)
@@ -7293,7 +7311,7 @@ F:        Documentation/blockdev/ramdisk.txt
 F:     drivers/block/brd.c
 
 RANDOM NUMBER DRIVER
-M:     Theodore Ts'o" <tytso@mit.edu>
+M:     "Theodore Ts'o" <tytso@mit.edu>
 S:     Maintained
 F:     drivers/char/random.c
 
@@ -7674,7 +7692,6 @@ F:        drivers/clk/samsung/
 SAMSUNG SXGBE DRIVERS
 M:     Byungho An <bh74.an@samsung.com>
 M:     Girish K S <ks.giri@samsung.com>
-M:     Siva Reddy Kallam <siva.kallam@samsung.com>
 M:     Vipul Pandya <vipul.pandya@samsung.com>
 S:     Supported
 L:     netdev@vger.kernel.org
@@ -9098,6 +9115,9 @@ F:        arch/um/os-Linux/drivers/
 
 TURBOCHANNEL SUBSYSTEM
 M:     "Maciej W. Rozycki" <macro@linux-mips.org>
+M:     Ralf Baechle <ralf@linux-mips.org>
+L:     linux-mips@linux-mips.org
+Q:     http://patchwork.linux-mips.org/project/linux-mips/list/
 S:     Maintained
 F:     drivers/tc/
 F:     include/linux/tc.h
@@ -9951,7 +9971,7 @@ F:        drivers/net/hamradio/*scc.c
 F:     drivers/net/hamradio/z8530.h
 
 ZBUD COMPRESSED PAGE ALLOCATOR
-M:     Seth Jennings <sjenning@linux.vnet.ibm.com>
+M:     Seth Jennings <sjennings@variantweb.net>
 L:     linux-mm@kvack.org
 S:     Maintained
 F:     mm/zbud.c
@@ -9996,7 +10016,7 @@ F:       mm/zsmalloc.c
 F:     include/linux/zsmalloc.h
 
 ZSWAP COMPRESSED SWAP CACHING
-M:     Seth Jennings <sjenning@linux.vnet.ibm.com>
+M:     Seth Jennings <sjennings@variantweb.net>
 L:     linux-mm@kvack.org
 S:     Maintained
 F:     mm/zswap.c
index 60ccbfe750a2641db2e43635885985b9e49318ed..9d993787afe08acfe8ef397d7fc80a2fb357dd47 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 15
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc6
 NAME = Shuffling Zombie Juror
 
 # *DOCUMENTATION*
index 819dd5f7eb055ec87f082188e3d7ebb4a8b683aa..29b82adbf0b401685b269f81a22d29ffd4fd97a5 100644 (file)
@@ -614,11 +614,13 @@ resume_user_mode_begin:
 
 resume_kernel_mode:
 
-#ifdef CONFIG_PREEMPT
-
-       ; This is a must for preempt_schedule_irq()
+       ; Disable Interrupts from this point on
+       ; CONFIG_PREEMPT: This is a must for preempt_schedule_irq()
+       ; !CONFIG_PREEMPT: To ensure restore_regs is intr safe
        IRQ_DISABLE     r9
 
+#ifdef CONFIG_PREEMPT
+
        ; Can't preempt if preemption disabled
        GET_CURR_THR_INFO_FROM_SP   r10
        ld  r8, [r10, THREAD_INFO_PREEMPT_COUNT]
index ab438cb5af5570f5aae9b3215b9c73586ce80427..db3c5414223e7298346c6338665263d5f51c0e3c 100644 (file)
@@ -30,9 +30,9 @@ config ARM
        select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
        select HAVE_ARCH_TRACEHOOK
        select HAVE_BPF_JIT
+       select HAVE_CC_STACKPROTECTOR
        select HAVE_CONTEXT_TRACKING
        select HAVE_C_RECORDMCOUNT
-       select HAVE_CC_STACKPROTECTOR
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DMA_API_DEBUG
        select HAVE_DMA_ATTRS
@@ -311,6 +311,7 @@ config ARCH_MULTIPLATFORM
        select ARM_HAS_SG_CHAIN
        select ARM_PATCH_PHYS_VIRT
        select AUTO_ZRELADDR
+       select CLKSRC_OF
        select COMMON_CLK
        select GENERIC_CLOCKEVENTS
        select MULTI_IRQ_HANDLER
@@ -422,8 +423,8 @@ config ARCH_EFM32
        bool "Energy Micro efm32"
        depends on !MMU
        select ARCH_REQUIRE_GPIOLIB
-       select AUTO_ZRELADDR
        select ARM_NVIC
+       select AUTO_ZRELADDR
        select CLKSRC_OF
        select COMMON_CLK
        select CPU_V7M
@@ -511,8 +512,8 @@ config ARCH_IXP4XX
        bool "IXP4xx-based"
        depends on MMU
        select ARCH_HAS_DMA_SET_COHERENT_MASK
-       select ARCH_SUPPORTS_BIG_ENDIAN
        select ARCH_REQUIRE_GPIOLIB
+       select ARCH_SUPPORTS_BIG_ENDIAN
        select CLKSRC_MMIO
        select CPU_XSCALE
        select DMABOUNCE if PCI
@@ -1110,9 +1111,9 @@ config ARM_NR_BANKS
        default 8
 
 config IWMMXT
-       bool "Enable iWMMXt support" if !CPU_PJ4
-       depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4
-       default y if PXA27x || PXA3xx || ARCH_MMP || CPU_PJ4
+       bool "Enable iWMMXt support"
+       depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4 || CPU_PJ4B
+       default y if PXA27x || PXA3xx || ARCH_MMP || CPU_PJ4 || CPU_PJ4B
        help
          Enable support for iWMMXt context switching at run time if
          running on a CPU that supports it.
@@ -1575,8 +1576,8 @@ config BIG_LITTLE
 config BL_SWITCHER
        bool "big.LITTLE switcher support"
        depends on BIG_LITTLE && MCPM && HOTPLUG_CPU
-       select CPU_PM
        select ARM_CPU_SUSPEND
+       select CPU_PM
        help
          The big.LITTLE "switcher" provides the core functionality to
          transparently handle transition between a cluster of A15's
@@ -1920,9 +1921,9 @@ config XEN
        depends on CPU_V7 && !CPU_V6
        depends on !GENERIC_ATOMIC64
        depends on MMU
+       select ARCH_DMA_ADDR_T_64BIT
        select ARM_PSCI
        select SWIOTLB_XEN
-       select ARCH_DMA_ADDR_T_64BIT
        help
          Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
 
index 4a2fc0bf6fc913683c29bc2113b85b1f3db9bdd3..eab8ecbe69c1d271369700a9f7aac14c628685b0 100644 (file)
@@ -1030,9 +1030,9 @@ config DEBUG_UART_PHYS
        default 0x40100000 if DEBUG_PXA_UART1
        default 0x42000000 if ARCH_GEMINI
        default 0x7c0003f8 if FOOTBRIDGE
-       default 0x80230000 if DEBUG_PICOXCELL_UART
        default 0x80070000 if DEBUG_IMX23_UART
        default 0x80074000 if DEBUG_IMX28_UART
+       default 0x80230000 if DEBUG_PICOXCELL_UART
        default 0x808c0000 if ARCH_EP93XX
        default 0x90020000 if DEBUG_NSPIRE_CLASSIC_UART || DEBUG_NSPIRE_CX_UART
        default 0xb0090000 if DEBUG_VEXPRESS_UART0_CRX
@@ -1096,22 +1096,22 @@ config DEBUG_UART_VIRT
        default 0xfeb26000 if DEBUG_RK3X_UART1
        default 0xfeb30c00 if DEBUG_KEYSTONE_UART0
        default 0xfeb31000 if DEBUG_KEYSTONE_UART1
-       default 0xfec12000 if DEBUG_MVEBU_UART || DEBUG_MVEBU_UART_ALTERNATE
-       default 0xfed60000 if DEBUG_RK29_UART0
-       default 0xfed64000 if DEBUG_RK29_UART1 || DEBUG_RK3X_UART2
-       default 0xfed68000 if DEBUG_RK29_UART2 || DEBUG_RK3X_UART3
        default 0xfec02000 if DEBUG_SOCFPGA_UART
+       default 0xfec12000 if DEBUG_MVEBU_UART || DEBUG_MVEBU_UART_ALTERNATE
        default 0xfec20000 if DEBUG_DAVINCI_DMx_UART0
        default 0xfed0c000 if DEBUG_DAVINCI_DA8XX_UART1
        default 0xfed0d000 if DEBUG_DAVINCI_DA8XX_UART2
        default 0xfed12000 if ARCH_KIRKWOOD
+       default 0xfed60000 if DEBUG_RK29_UART0
+       default 0xfed64000 if DEBUG_RK29_UART1 || DEBUG_RK3X_UART2
+       default 0xfed68000 if DEBUG_RK29_UART2 || DEBUG_RK3X_UART3
        default 0xfedc0000 if ARCH_EP93XX
        default 0xfee003f8 if FOOTBRIDGE
        default 0xfee20000 if DEBUG_NSPIRE_CLASSIC_UART || DEBUG_NSPIRE_CX_UART
-       default 0xfef36000 if DEBUG_HIGHBANK_UART
        default 0xfee82340 if ARCH_IOP13XX
        default 0xfef00000 if ARCH_IXP4XX && !CPU_BIG_ENDIAN
        default 0xfef00003 if ARCH_IXP4XX && CPU_BIG_ENDIAN
+       default 0xfef36000 if DEBUG_HIGHBANK_UART
        default 0xfefff700 if ARCH_IOP33X
        default 0xff003000 if DEBUG_U300_UART
        default DEBUG_UART_PHYS if !MMU
index 35c146f31e46effa1b3b64cd69fd3a3b76ab38cb..377b7c3640337ed994107814836d16909ddbd447 100644 (file)
@@ -51,10 +51,9 @@ dtb-$(CONFIG_ARCH_AT91)      += sama5d36ek.dtb
 
 dtb-$(CONFIG_ARCH_ATLAS6) += atlas6-evb.dtb
 dtb-$(CONFIG_ARCH_BCM2835) += bcm2835-rpi-b.dtb
+dtb-$(CONFIG_ARCH_BCM_5301X) += bcm4708-netgear-r6250.dtb
 dtb-$(CONFIG_ARCH_BCM_MOBILE) += bcm28155-ap.dtb \
        bcm21664-garnet.dtb
-dtb-$(CONFIG_ARCH_BCM2835) += bcm2835-rpi-b.dtb
-dtb-$(CONFIG_ARCH_BCM_5301X) += bcm4708-netgear-r6250.dtb
 dtb-$(CONFIG_ARCH_BERLIN) += \
        berlin2-sony-nsz-gs7.dtb        \
        berlin2cd-google-chromecast.dtb
@@ -246,6 +245,7 @@ dtb-$(CONFIG_ARCH_OMAP2PLUS) += omap2420-h4.dtb \
        omap3-sbc-t3730.dtb \
        omap3-devkit8000.dtb \
        omap3-beagle-xm.dtb \
+       omap3-beagle-xm-ab.dtb \
        omap3-evm.dtb \
        omap3-evm-37xx.dtb \
        omap3-ldp.dtb \
@@ -294,13 +294,6 @@ dtb-$(CONFIG_ARCH_PRIMA2) += prima2-evb.dtb
 dtb-$(CONFIG_ARCH_QCOM) += qcom-msm8660-surf.dtb \
        qcom-msm8960-cdp.dtb \
        qcom-apq8074-dragonboard.dtb
-dtb-$(CONFIG_ARCH_U8500) += ste-snowball.dtb \
-       ste-hrefprev60-stuib.dtb \
-       ste-hrefprev60-tvk.dtb \
-       ste-hrefv60plus-stuib.dtb \
-       ste-hrefv60plus-tvk.dtb \
-       ste-ccu8540.dtb \
-       ste-ccu9540.dtb
 dtb-$(CONFIG_ARCH_S3C24XX) += s3c2416-smdk2416.dtb
 dtb-$(CONFIG_ARCH_S3C64XX) += s3c6410-mini6410.dtb \
        s3c6410-smdk6410.dtb
@@ -369,9 +362,16 @@ dtb-$(CONFIG_ARCH_TEGRA) += tegra20-harmony.dtb \
        tegra30-cardhu-a04.dtb \
        tegra114-dalmore.dtb \
        tegra124-venice2.dtb
+dtb-$(CONFIG_ARCH_U300) += ste-u300.dtb
+dtb-$(CONFIG_ARCH_U8500) += ste-snowball.dtb \
+       ste-hrefprev60-stuib.dtb \
+       ste-hrefprev60-tvk.dtb \
+       ste-hrefv60plus-stuib.dtb \
+       ste-hrefv60plus-tvk.dtb \
+       ste-ccu8540.dtb \
+       ste-ccu9540.dtb
 dtb-$(CONFIG_ARCH_VERSATILE) += versatile-ab.dtb \
        versatile-pb.dtb
-dtb-$(CONFIG_ARCH_U300) += ste-u300.dtb
 dtb-$(CONFIG_ARCH_VEXPRESS) += vexpress-v2p-ca5s.dtb \
        vexpress-v2p-ca9.dtb \
        vexpress-v2p-ca15-tc1.dtb \
index e3f27ec317182b887961c0a58ca49f66407935d8..2e7d932887b50185e95df69513986635e1403b39 100644 (file)
 &usb {
        status = "okay";
 
-       control@44e10000 {
+       control@44e10620 {
                status = "okay";
        };
 
                dr_mode = "host";
        };
 
-       dma-controller@07402000  {
+       dma-controller@47402000  {
                status = "okay";
        };
 };
index 28ae040e7c3d90b9094afc8c6543cf4749ffe5b0..6028217ace0fab2fb2476d36980521cab3db07e3 100644 (file)
 
        am335x_evm_audio_pins: am335x_evm_audio_pins {
                pinctrl-single,pins = <
-                       0x10c (PIN_INPUT_PULLDOWN | MUX_MODE4) /* mii1_rx_dv.mcasp1_aclkx */
-                       0x110 (PIN_INPUT_PULLDOWN | MUX_MODE4) /* mii1_txd3.mcasp1_fsx */
+                       0x10c (PIN_INPUT_PULLDOWN | MUX_MODE4) /* mii1_crs.mcasp1_aclkx */
+                       0x110 (PIN_INPUT_PULLDOWN | MUX_MODE4) /* mii1_rxerr.mcasp1_fsx */
                        0x108 (PIN_OUTPUT_PULLDOWN | MUX_MODE4) /* mii1_col.mcasp1_axr2 */
                        0x144 (PIN_INPUT_PULLDOWN | MUX_MODE4) /* rmii1_ref_clk.mcasp1_axr3 */
                >;
 &usb {
        status = "okay";
 
-       control@44e10000 {
+       control@44e10620 {
                status = "okay";
        };
 
                dr_mode = "host";
        };
 
-       dma-controller@07402000  {
+       dma-controller@47402000  {
                status = "okay";
        };
 };
index ec08f6f677c3eb4a2025a096b30e941efbd706a0..ab238850a7b21947de099158a6712dda1ce39fcb 100644 (file)
 &usb {
        status = "okay";
 
-       control@44e10000 {
+       control@44e10620 {
                status = "okay";
        };
 
                dr_mode = "host";
        };
 
-       dma-controller@07402000  {
+       dma-controller@47402000  {
                status = "okay";
        };
 };
index 7063311a58d96785dd297f89b982970a59a185bb..9f22c189f6361194a5d5705a7fae1ba19235043d 100644 (file)
                reg = <0 0 0>; /* CS0, offset 0 */
                nand-bus-width = <8>;
                ti,nand-ecc-opt = "bch8";
-               gpmc,device-nand = "true";
                gpmc,device-width = <1>;
                gpmc,sync-clk-ps = <0>;
                gpmc,cs-on-ns = <0>;
 &usb {
        status = "okay";
 
-       control@44e10000 {
+       control@44e10620 {
                status = "okay";
        };
 
                dr_mode = "host";
        };
 
-       dma-controller@07402000  {
+       dma-controller@47402000  {
                status = "okay";
        };
 };
index 9770e35f25361644ade6c8846ba5cb7571822f0c..f1eea4af40eda05916dfa73ad8ffc48627d490a2 100644 (file)
@@ -72,7 +72,7 @@
        };
 
        /*
-        * The soc node represents the soc top level view. It is uses for IPs
+        * The soc node represents the soc top level view. It is used for IPs
         * that are not memory mapped in the MPU view or for the MPU itself.
         */
        soc {
@@ -94,8 +94,8 @@
 
        /*
         * XXX: Use a flat representation of the AM33XX interconnect.
-        * The real AM33XX interconnect network is quite complex.Since
-        * that will not bring real advantage to represent that in DT
+        * The real AM33XX interconnect network is quite complex. Since
+        * it will not bring real advantage to represent that in DT
         * for the moment, just use a fake OCP bus entry to represent
         * the whole bus hierarchy.
         */
                        compatible = "ti,edma3";
                        ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2";
                        reg =   <0x49000000 0x10000>,
-                               <0x44e10f90 0x10>;
+                               <0x44e10f90 0x40>;
                        interrupts = <12 13 14>;
                        #dma-cells = <1>;
                        dma-channels = <64>;
                mac: ethernet@4a100000 {
                        compatible = "ti,cpsw";
                        ti,hwmods = "cpgmac0";
+                       clocks = <&cpsw_125mhz_gclk>, <&cpsw_cpts_rft_clk>;
+                       clock-names = "fck", "cpts";
                        cpdma_channels = <8>;
                        ale_entries = <1024>;
                        bd_ram_size = <0x2000>;
                              <0x46000000 0x400000>;
                        reg-names = "mpu", "dat";
                        interrupts = <80>, <81>;
-                       interrupts-names = "tx", "rx";
+                       interrupt-names = "tx", "rx";
                        status = "disabled";
                        dmas = <&edma 8>,
                                <&edma 9>;
                              <0x46400000 0x400000>;
                        reg-names = "mpu", "dat";
                        interrupts = <82>, <83>;
-                       interrupts-names = "tx", "rx";
+                       interrupt-names = "tx", "rx";
                        status = "disabled";
                        dmas = <&edma 10>,
                                <&edma 11>;
index 788391f916844130d8d6cdb2722479163863a707..5a452fdd7c5d9711cec9f6d0196f90b0726fefc3 100644 (file)
        };
 };
 
+&iva {
+       status = "disabled";
+};
+
+&mailbox {
+       status = "disabled";
+};
+
+&mmu_isp {
+       status = "disabled";
+};
+
+&smartreflex_mpu_iva {
+       status = "disabled";
+};
+
 /include/ "am35xx-clocks.dtsi"
 /include/ "omap36xx-am35xx-omap3430es2plus-clocks.dtsi"
index 36d523a268314d3e1948dd894ae6b07141ac946e..03a2255051260ce50f8480f974316996dbae57f4 100644 (file)
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ti,hwmods = "cpgmac0";
+                       clocks = <&cpsw_125mhz_gclk>, <&cpsw_cpts_rft_clk>;
+                       clock-names = "fck", "cpts";
                        status = "disabled";
                        cpdma_channels = <8>;
                        ale_entries = <1024>;
                              <0x46000000 0x400000>;
                        reg-names = "mpu", "dat";
                        interrupts = <80>, <81>;
-                       interrupts-names = "tx", "rx";
+                       interrupt-names = "tx", "rx";
                        status = "disabled";
                        dmas = <&edma 8>,
                               <&edma 9>;
                              <0x46400000 0x400000>;
                        reg-names = "mpu", "dat";
                        interrupts = <82>, <83>;
-                       interrupts-names = "tx", "rx";
+                       interrupt-names = "tx", "rx";
                        status = "disabled";
                        dmas = <&edma 10>,
                               <&edma 11>;
index df8798e8bd255bbf789da0b44f5229b9f3dc80dd..a055f7f0f14ae5a3e080489c9c424bd37be0e1c0 100644 (file)
        status = "okay";
 };
 
+&gpio5 {
+       status = "okay";
+       ti,no-reset-on-init;
+};
+
 &mmc1 {
        status = "okay";
        vmmc-supply = <&vmmcsd_fixed>;
index 82f238a9063ffe47d10dbe083f136e1876f8efd2..3383c4b668035737e1812777fed34a03f5f9ef7c 100644 (file)
@@ -67,6 +67,7 @@
                        i2c@11000 {
                                pinctrl-0 = <&i2c0_pins>;
                                pinctrl-names = "default";
+                               clock-frequency = <100000>;
                                status = "okay";
                                audio_codec: audio-codec@4a {
                                        compatible = "cirrus,cs42l51";
index bbb40f62037dbaf67ac8a585a17817997e8c1a17..bb77970c0b1223499137ef80079ad39446919f42 100644 (file)
                                #size-cells = <0>;
                                compatible = "marvell,orion-mdio";
                                reg = <0x72004 0x4>;
+                               clocks = <&gateclk 4>;
                        };
 
                        eth1: ethernet@74000 {
index 9378d3136b41d7b37f11abdf01186c019758f7f1..0451124e8ebf49af45b34072ae69a02a6fa133a9 100644 (file)
                                };
                        };
 
+                       sata@a0000 {
+                               status = "okay";
+                               nr-ports = <2>;
+                       };
+
                        nand: nand@d0000 {
                                pinctrl-0 = <&nand_pins>;
                                pinctrl-names = "default";
index a064f59da02d566b4bfb994376e53e6d51693797..ca8813bb99ba627d52e7c9d163005d39481f3e1a 100644 (file)
                                #size-cells = <0>;
                                compatible = "marvell,orion-mdio";
                                reg = <0x72004 0x4>;
+                               clocks = <&gateclk 4>;
                        };
 
                        coredivclk: clock@e4250 {
index 448373c4b0e534c1d2ce08592981f5d38bad39a8..90f0bf6f92715c7335072bda5b508e7800fa89a4 100644 (file)
@@ -49,7 +49,7 @@
                        /* Device Bus parameters are required */
 
                        /* Read parameters */
-                       devbus,bus-width    = <8>;
+                       devbus,bus-width    = <16>;
                        devbus,turn-off-ps  = <60000>;
                        devbus,badr-skew-ps = <0>;
                        devbus,acc-first-ps = <124000>;
index 61bda687f782f65485f958adb8d8ec2822ebde35..0c756421ae6aa5f504b72898c16875bdaab5f8d3 100644 (file)
@@ -59,7 +59,7 @@
                        /* Device Bus parameters are required */
 
                        /* Read parameters */
-                       devbus,bus-width    = <8>;
+                       devbus,bus-width    = <16>;
                        devbus,turn-off-ps  = <60000>;
                        devbus,badr-skew-ps = <0>;
                        devbus,acc-first-ps = <124000>;
                        ethernet@70000 {
                                status = "okay";
                                phy = <&phy0>;
-                               phy-mode = "rgmii-id";
+                               phy-mode = "qsgmii";
                        };
                        ethernet@74000 {
                                status = "okay";
                                phy = <&phy1>;
-                               phy-mode = "rgmii-id";
+                               phy-mode = "qsgmii";
                        };
                        ethernet@30000 {
                                status = "okay";
                                phy = <&phy2>;
-                               phy-mode = "rgmii-id";
+                               phy-mode = "qsgmii";
                        };
                        ethernet@34000 {
                                status = "okay";
                                phy = <&phy3>;
-                               phy-mode = "rgmii-id";
+                               phy-mode = "qsgmii";
                        };
 
                        /* Front-side USB slot */
index c2242745b9b87a29afcfcc2fc77bf9178d5814f9..3bb8c008b14c822b26487d1c185ef18673aff1cf 100644 (file)
                        ethernet@30000 {
                                status = "okay";
                                phy-mode = "sgmii";
+                               fixed-link {
+                                       speed = <1000>;
+                                       full-duplex;
+                               };
                        };
 
                        pcie-controller {
index 985948ce67b3271a65c9b4a413a99e7d255b12e7..5d42feb3104983a2ee9b21be33b59cc2e66a1bcd 100644 (file)
@@ -39,7 +39,7 @@
                        /* Device Bus parameters are required */
 
                        /* Read parameters */
-                       devbus,bus-width    = <8>;
+                       devbus,bus-width    = <16>;
                        devbus,turn-off-ps  = <60000>;
                        devbus,badr-skew-ps = <0>;
                        devbus,acc-first-ps = <124000>;
index ce1375595e5f27117c37881bcafa5fc5805b1455..4537259ce5299baf8cf68978d7c2d106c5f67c32 100644 (file)
@@ -34,7 +34,7 @@
                        };
 
                        spi0: spi@f0004000 {
-                               cs-gpios = <&pioD 13 0>;
+                               cs-gpios = <&pioD 13 0>, <0>, <0>, <&pioD 16 0>;
                                status = "okay";
                        };
 
@@ -79,7 +79,7 @@
                        };
 
                        spi1: spi@f8008000 {
-                               cs-gpios = <&pioC 25 0>, <0>, <0>, <&pioD 16 0>;
+                               cs-gpios = <&pioC 25 0>;
                                status = "okay";
                        };
 
index e21dda0e8986574b2531c1675eca1b4de164822d..3be973e9889a2b0ef29cc45657ad6eeb056e7f5f 100644 (file)
@@ -10,7 +10,7 @@
 #include <dt-bindings/pinctrl/at91.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/clk/at91.h>
+#include <dt-bindings/clock/at91.h>
 
 / {
        model = "Atmel AT91SAM9261 family SoC";
index 63e1784d272c556974023fcfd7d46ba7a0510e3d..92a52faebef77cd8eda572d11504c0f83362a428 100644 (file)
@@ -8,7 +8,7 @@
 
 #include "skeleton.dtsi"
 #include <dt-bindings/pinctrl/at91.h>
-#include <dt-bindings/clk/at91.h>
+#include <dt-bindings/clock/at91.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/gpio/gpio.h>
 
index 1c0f8e1893aed11eb3dfb8ca43360cbed2e961f6..149b5509993588aa17971d6fbc7a56f314c658e5 100644 (file)
@@ -80,7 +80,7 @@
        };
 
        /*
-        * The soc node represents the soc top level view. It is uses for IPs
+        * The soc node represents the soc top level view. It is used for IPs
         * that are not memory mapped in the MPU view or for the MPU itself.
         */
        soc {
@@ -94,7 +94,7 @@
        /*
         * XXX: Use a flat representation of the SOC interconnect.
         * The real OMAP interconnect network is quite complex.
-        * Since that will not bring real advantage to represent that in DT for
+        * Since it will not bring real advantage to represent that in DT for
         * the moment, just use a fake OCP bus entry to represent the whole bus
         * hierarchy.
         */
index e96da9a898ad5cda61bc6b1b3195179c5c513eb6..cfb8fc753f5037087d7bbdfa6744d1a8fc283ec9 100644 (file)
                #clock-cells = <0>;
                compatible = "ti,mux-clock";
                clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atlclkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
-               ti,bit-shift = <28>;
+               ti,bit-shift = <24>;
                reg = <0x1860>;
        };
 
index 32f760e24898df9010b22b9efd21f400e5da5ab8..ea323f09dc78f83ecbf934ff894ef25f79f4a99e 100644 (file)
@@ -56,6 +56,7 @@
 
                osc {
                        compatible = "fsl,imx-osc", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <24000000>;
                };
        };
index 09f57b39e3ef37e7df1abd4321213bcca8626db5..73aae4f5e539dc3c6a9e68f642e0840e90e0f9e0 100644 (file)
@@ -29,6 +29,7 @@
 
                osc26m {
                        compatible = "fsl,imx-osc26m", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <0>;
                };
        };
index 6279e0b4f7683106439c062209e3c9101f0ea7ad..137e010eab35bebd9cad861574713ce9288adc68 100644 (file)
@@ -48,6 +48,7 @@
 
                osc26m {
                        compatible = "fsl,imx-osc26m", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <26000000>;
                };
        };
index 0c75fe3deb351d5210a513e442e20e74ba66f397..9c89d1ca97c2ce771a587c3066f968be22b52005 100644 (file)
 
                ckil {
                        compatible = "fsl,imx-ckil", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <32768>;
                };
 
                ckih1 {
                        compatible = "fsl,imx-ckih1", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <22579200>;
                };
 
                ckih2 {
                        compatible = "fsl,imx-ckih2", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <0>;
                };
 
                osc {
                        compatible = "fsl,imx-osc", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <24000000>;
                };
        };
index 5f8216d08f6b5f4ff98e13df047ab9b27ee83706..150bb4e2f744374fd712895ce30786dc9fdb7b25 100644 (file)
 
                ckil {
                        compatible = "fsl,imx-ckil", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <32768>;
                };
 
                ckih1 {
                        compatible = "fsl,imx-ckih1", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <0>;
                };
 
                ckih2 {
                        compatible = "fsl,imx-ckih2", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <0>;
                };
 
                osc {
                        compatible = "fsl,imx-osc", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <24000000>;
                };
        };
index f6d3ac3e55872657601c8a1785c8dd1e65707632..d5d146a8b149cd14601cef1b26857002d27eec9b 100644 (file)
@@ -17,7 +17,8 @@
        compatible = "denx,imx53-m53evk", "fsl,imx53";
 
        memory {
-               reg = <0x70000000 0x20000000>;
+               reg = <0x70000000 0x20000000>,
+                     <0xb0000000 0x20000000>;
        };
 
        soc {
                irq-trigger = <0x1>;
 
                stmpe_touchscreen {
-                       compatible = "stmpe,ts";
+                       compatible = "st,stmpe-ts";
                        reg = <0>;
-                       ts,sample-time = <4>;
-                       ts,mod-12b = <1>;
-                       ts,ref-sel = <0>;
-                       ts,adc-freq = <1>;
-                       ts,ave-ctrl = <3>;
-                       ts,touch-det-delay = <3>;
-                       ts,settling = <4>;
-                       ts,fraction-z = <7>;
-                       ts,i-drive = <1>;
+                       st,sample-time = <4>;
+                       st,mod-12b = <1>;
+                       st,ref-sel = <0>;
+                       st,adc-freq = <1>;
+                       st,ave-ctrl = <3>;
+                       st,touch-det-delay = <3>;
+                       st,settling = <4>;
+                       st,fraction-z = <7>;
+                       st,i-drive = <1>;
                };
        };
 
index 7c8c129698929151512f282c5baad14c69f51309..a3431d7848709aac0463a27e4471c22dd17a4a46 100644 (file)
 &tve {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_vga_sync_1>;
-       i2c-ddc-bus = <&i2c3>;
+       ddc-i2c-bus = <&i2c3>;
        fsl,tve-mode = "vga";
        fsl,hsync-pin = <4>;
        fsl,vsync-pin = <6>;
index 3f825a6813dae47a2412aebbf42788ca27cf8a26..ede04fa4161f63aeb925e35267608a12a0c5b0cc 100644 (file)
@@ -14,7 +14,8 @@
 
 / {
        memory {
-               reg = <0x70000000 0x40000000>;
+               reg = <0x70000000 0x20000000>,
+                     <0xb0000000 0x20000000>;
        };
 
        display0: display@di0 {
index 0217dde3b36b474d19d22a6b7192fb2c4aeefd61..3b73e81dc3f0df58507a7a6a3ae0556f9abee7dd 100644 (file)
        soc {
                display: display@di0 {
                        compatible = "fsl,imx-parallel-display";
-                       crtcs = <&ipu 0>;
                        interface-pix-fmt = "rgb24";
                        pinctrl-names = "default";
                        pinctrl-0 = <&pinctrl_rgb24_vga1>;
                        status = "okay";
 
+                       port {
+                               display0_in: endpoint {
+                                       remote-endpoint = <&ipu_di0_disp0>;
+                               };
+                       };
+
                        display-timings {
                                VGA {
                                        clock-frequency = <25200000>;
        };
 };
 
+&ipu_di0_disp0 {
+       remote-endpoint = <&display0_in>;
+};
+
 &kpp {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_kpp>;
index b57ab57740f686a96200b9a51c63c7d982d82644..6a1bf4ff83d5516dac0016ebc2bdd21d05565631 100644 (file)
 
                ckil {
                        compatible = "fsl,imx-ckil", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <32768>;
                };
 
                ckih1 {
                        compatible = "fsl,imx-ckih1", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <22579200>;
                };
 
                ckih2 {
                        compatible = "fsl,imx-ckih2", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <0>;
                };
 
                osc {
                        compatible = "fsl,imx-osc", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <24000000>;
                };
        };
                        #address-cells = <1>;
                        #size-cells = <0>;
                        compatible = "fsl,imx53-ipu";
-                       reg = <0x18000000 0x080000000>;
+                       reg = <0x18000000 0x08000000>;
                        interrupts = <11 10>;
                        clocks = <&clks IMX5_CLK_IPU_GATE>,
                                 <&clks IMX5_CLK_IPU_DI0_GATE>,
 
                                        port {
                                                lvds1_in: endpoint {
-                                                       remote-endpoint = <&ipu_di0_lvds0>;
+                                                       remote-endpoint = <&ipu_di1_lvds1>;
                                                };
                                        };
                                };
index a63bbb3d46bb43637ad2235bb3892c82508f1ac9..e4ae38fd02692a1d4a41876da4958519bb1465da 100644 (file)
        compatible = "dmo,imx6q-edmqmx6", "fsl,imx6q";
 
        aliases {
-               gpio7 = &stmpe_gpio;
+               gpio7 = &stmpe_gpio1;
+               gpio8 = &stmpe_gpio2;
+               stmpe-i2c0 = &stmpe1;
+               stmpe-i2c1 = &stmpe2;
        };
 
        memory {
                        regulator-always-on;
                };
 
-               reg_usb_otg_vbus: regulator@1 {
+               reg_usb_otg_switch: regulator@1 {
                        compatible = "regulator-fixed";
                        reg = <1>;
-                       regulator-name = "usb_otg_vbus";
+                       regulator-name = "usb_otg_switch";
                        regulator-min-microvolt = <5000000>;
                        regulator-max-microvolt = <5000000>;
                        gpio = <&gpio7 12 0>;
+                       regulator-boot-on;
+                       regulator-always-on;
                };
 
                reg_usb_host1: regulator@2 {
 
                led-blue {
                        label = "blue";
-                       gpios = <&stmpe_gpio 8 GPIO_ACTIVE_HIGH>;
+                       gpios = <&stmpe_gpio1 8 GPIO_ACTIVE_HIGH>;
                        linux,default-trigger = "heartbeat";
                };
 
                led-green {
                        label = "green";
-                       gpios = <&stmpe_gpio 9 GPIO_ACTIVE_HIGH>;
+                       gpios = <&stmpe_gpio1 9 GPIO_ACTIVE_HIGH>;
                };
 
                led-pink {
                        label = "pink";
-                       gpios = <&stmpe_gpio 10 GPIO_ACTIVE_HIGH>;
+                       gpios = <&stmpe_gpio1 10 GPIO_ACTIVE_HIGH>;
                };
 
                led-red {
                        label = "red";
-                       gpios = <&stmpe_gpio 11 GPIO_ACTIVE_HIGH>;
+                       gpios = <&stmpe_gpio1 11 GPIO_ACTIVE_HIGH>;
                };
        };
 };
        clock-frequency = <100000>;
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_i2c2
-                    &pinctrl_stmpe>;
+                    &pinctrl_stmpe1
+                    &pinctrl_stmpe2>;
        status = "okay";
 
        pmic: pfuze100@08 {
                };
        };
 
-       stmpe: stmpe1601@40 {
+       stmpe1: stmpe1601@40 {
                compatible = "st,stmpe1601";
                reg = <0x40>;
                interrupts = <30 0>;
                interrupt-parent = <&gpio3>;
 
-               stmpe_gpio: stmpe_gpio {
+               stmpe_gpio1: stmpe_gpio {
+                       #gpio-cells = <2>;
+                       compatible = "st,stmpe-gpio";
+               };
+       };
+
+       stmpe2: stmpe1601@44 {
+               compatible = "st,stmpe1601";
+               reg = <0x44>;
+               interrupts = <2 0>;
+               interrupt-parent = <&gpio5>;
+
+               stmpe_gpio2: stmpe_gpio {
                        #gpio-cells = <2>;
                        compatible = "st,stmpe-gpio";
                };
                        >;
                };
 
-               pinctrl_stmpe: stmpegrp {
+               pinctrl_stmpe1: stmpe1grp {
                        fsl,pins = <MX6QDL_PAD_EIM_D30__GPIO3_IO30 0x80000000>;
                };
 
+               pinctrl_stmpe2: stmpe2grp {
+                       fsl,pins = <MX6QDL_PAD_EIM_A25__GPIO5_IO02 0x80000000>;
+               };
+
                pinctrl_uart1: uart1grp {
                        fsl,pins = <
                                MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA      0x1b0b1
 
                pinctrl_usbotg: usbotggrp {
                        fsl,pins = <
-                               MX6QDL_PAD_GPIO_1__USB_OTG_ID           0x17059
+                               MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID       0x17059
                        >;
                };
 
 &usbh1 {
        vbus-supply = <&reg_usb_host1>;
        disable-over-current;
+       dr_mode = "host";
        status = "okay";
 };
 
 &usbotg {
-       vbus-supply = <&reg_usb_otg_vbus>;
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usbotg>;
        disable-over-current;
index 902f9831048139318f80ffc661be963cdc799db2..e51bb3f0fd560ddec5372582549b8b3a8f922cb0 100644 (file)
 
 &ldb {
        status = "okay";
-       lvds-channel@0 {
-               crtcs = <&ipu1 0>, <&ipu1 1>, <&ipu2 0>, <&ipu2 1>;
-       };
 };
 
 &pcie {
index 8e99c9a9bc762ab79309641383323cf463bbbcc8..035d3a85c318b1f842d0d3664a2e0cd9d56ab798 100644 (file)
 
 &ldb {
        status = "okay";
-       lvds-channel@0 {
-               crtcs = <&ipu1 0>, <&ipu1 1>;
-       };
 };
 
 &pcie {
index a3cb2fff8f612183bdf6b311d707a1f5210c2bec..d16066608e21ae3716bc52a58597dc51f6ed9241 100644 (file)
                                /* GPIO16 -> AR8035 25MHz */
                                MX6QDL_PAD_GPIO_16__ENET_REF_CLK        0xc0000000
                                MX6QDL_PAD_RGMII_TXC__RGMII_TXC         0x80000000
-                               MX6QDL_PAD_RGMII_TD0__RGMII_TD0         0x1b0b0
-                               MX6QDL_PAD_RGMII_TD1__RGMII_TD1         0x1b0b0
-                               MX6QDL_PAD_RGMII_TD2__RGMII_TD2         0x1b0b0
-                               MX6QDL_PAD_RGMII_TD3__RGMII_TD3         0x1b0b0
-                               MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL   0x1b0b0
+                               MX6QDL_PAD_RGMII_TD0__RGMII_TD0         0x1b030
+                               MX6QDL_PAD_RGMII_TD1__RGMII_TD1         0x1b030
+                               MX6QDL_PAD_RGMII_TD2__RGMII_TD2         0x1b030
+                               MX6QDL_PAD_RGMII_TD3__RGMII_TD3         0x1b030
+                               MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL   0x1b030
                                /* AR8035 CLK_25M --> ENET_REF_CLK (V22) */
                                MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK    0x0a0b1
                                /* AR8035 pin strapping: IO voltage: pull up */
-                               MX6QDL_PAD_RGMII_RXC__RGMII_RXC         0x1b0b0
+                               MX6QDL_PAD_RGMII_RXC__RGMII_RXC         0x1b030
                                /* AR8035 pin strapping: PHYADDR#0: pull down */
-                               MX6QDL_PAD_RGMII_RD0__RGMII_RD0         0x130b0
+                               MX6QDL_PAD_RGMII_RD0__RGMII_RD0         0x13030
                                /* AR8035 pin strapping: PHYADDR#1: pull down */
-                               MX6QDL_PAD_RGMII_RD1__RGMII_RD1         0x130b0
+                               MX6QDL_PAD_RGMII_RD1__RGMII_RD1         0x13030
                                /* AR8035 pin strapping: MODE#1: pull up */
-                               MX6QDL_PAD_RGMII_RD2__RGMII_RD2         0x1b0b0
+                               MX6QDL_PAD_RGMII_RD2__RGMII_RD2         0x1b030
                                /* AR8035 pin strapping: MODE#3: pull up */
-                               MX6QDL_PAD_RGMII_RD3__RGMII_RD3         0x1b0b0
+                               MX6QDL_PAD_RGMII_RD3__RGMII_RD3         0x1b030
                                /* AR8035 pin strapping: MODE#0: pull down */
-                               MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL   0x130b0
+                               MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL   0x13030
 
                                /*
                                 * As the RMII pins are also connected to RGMII
index 55cb926fa3f7ed4fbe043587e74d57af344c7bad..eca0971d4db1ae7885985e8ce966d6475b3f3991 100644 (file)
@@ -10,6 +10,8 @@
  * http://www.gnu.org/copyleft/gpl.html
  */
 
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
 #include "skeleton.dtsi"
 
 / {
@@ -46,8 +48,6 @@
        intc: interrupt-controller@00a01000 {
                compatible = "arm,cortex-a9-gic";
                #interrupt-cells = <3>;
-               #address-cells = <1>;
-               #size-cells = <1>;
                interrupt-controller;
                reg = <0x00a01000 0x1000>,
                      <0x00a00100 0x100>;
 
                ckil {
                        compatible = "fsl,imx-ckil", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <32768>;
                };
 
                ckih1 {
                        compatible = "fsl,imx-ckih1", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <0>;
                };
 
                osc {
                        compatible = "fsl,imx-osc", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <24000000>;
                };
        };
                                  0x82000000 0 0x01000000 0x01000000 0 0x00f00000>; /* non-prefetchable memory */
                        num-lanes = <1>;
                        interrupts = <0 123 IRQ_TYPE_LEVEL_HIGH>;
+                       #interrupt-cells = <1>;
+                       interrupt-map-mask = <0 0 0 0x7>;
+                       interrupt-map = <0 0 0 1 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+                                       <0 0 0 2 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+                                       <0 0 0 3 &intc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+                                       <0 0 0 4 &intc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&clks 189>, <&clks 187>, <&clks 206>, <&clks 144>;
                        clock-names = "pcie_ref_125m", "sata_ref_100m", "lvds_gate", "pcie_axi";
                        status = "disabled";
index 864d8dfb51ca525ebc04c0769073fdeda273835b..a8d9a93fab85fd5031eb8c164fc676823392e0b2 100644 (file)
                                MX6SL_PAD_ECSPI1_MISO__ECSPI1_MISO      0x100b1
                                MX6SL_PAD_ECSPI1_MOSI__ECSPI1_MOSI      0x100b1
                                MX6SL_PAD_ECSPI1_SCLK__ECSPI1_SCLK      0x100b1
+                               MX6SL_PAD_ECSPI1_SS0__GPIO4_IO11        0x80000000
                        >;
                };
 
index 3cb4941afeef9ab6cb121b4271d78c522ff771f8..d26b099260a35da021d85c9f20907294fcd27625 100644 (file)
@@ -68,8 +68,6 @@
        intc: interrupt-controller@00a01000 {
                compatible = "arm,cortex-a9-gic";
                #interrupt-cells = <3>;
-               #address-cells = <1>;
-               #size-cells = <1>;
                interrupt-controller;
                reg = <0x00a01000 0x1000>,
                      <0x00a00100 0x100>;
 
                ckil {
                        compatible = "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <32768>;
                };
 
                osc {
                        compatible = "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <24000000>;
                };
        };
index 40791053106bc9889b996544ef3de7e89440d626..6becedebaa4e946e9fc080096a771053d06854df 100644 (file)
@@ -75,7 +75,7 @@
                        m25p16@0 {
                                #address-cells = <1>;
                                #size-cells = <1>;
-                               compatible = "m25p16";
+                               compatible = "st,m25p16";
                                reg = <0>;
                                spi-max-frequency = <40000000>;
                                mode = <0>;
index 0e06fd3cee4dc40113e94420f767d4ffba7301c7..3b62aeeaa3a2fe1ff5b01eab47e7c25718b45647 100644 (file)
@@ -46,7 +46,7 @@
                        flash@0 {
                                #address-cells = <1>;
                                #size-cells = <1>;
-                               compatible = "mx25l4005a";
+                               compatible = "mxicy,mx25l4005a";
                                reg = <0>;
                                spi-max-frequency = <20000000>;
                                mode = <0>;
index ef3463e0ae1939be8bcffa3a7d0200c387a8dc5f..28b3ee369778f945379096fea755796ef6156cef 100644 (file)
@@ -43,7 +43,7 @@
                        m25p40@0 {
                                #address-cells = <1>;
                                #size-cells = <1>;
-                               compatible = "mx25l1606e";
+                               compatible = "mxicy,mx25l1606e";
                                reg = <0>;
                                spi-max-frequency = <50000000>;
                                mode = <0>;
index c9e82eff9bf2e73af3204e9be1f79dff2bc62410..6761ffa2c4ab7eb9e130dbef6a00e4a2faea1714 100644 (file)
@@ -48,7 +48,7 @@
                        status = "okay";
 
                        eeprom@50 {
-                               compatible = "at,24c04";
+                               compatible = "atmel,24c04";
                                pagesize = <16>;
                                reg = <0x50>;
                        };
index 2cb0dc529165dcd88cbbba8ae7df7da2647a041f..b939f4f52d16a7c0edb677a86558bacd94fec831 100644 (file)
                bootargs = "console=ttyS0,115200n8 earlyprintk";
        };
 
+       mbus {
+               pcie-controller {
+                       status = "okay";
+
+                       pcie@1,0 {
+                               status = "okay";
+                       };
+               };
+        };
+
        ocp@f1000000 {
                pinctrl@10000 {
                        pmx_usb_led: pmx-usb-led {
@@ -56,7 +66,7 @@
                        flash@0 {
                                #address-cells = <1>;
                                #size-cells = <1>;
-                               compatible = "mx25l12805d";
+                               compatible = "mxicy,mx25l12805d";
                                reg = <0>;
                                spi-max-frequency = <50000000>;
                                mode = <0>;
                ehci@50000 {
                        status = "okay";
                };
-
-               pcie-controller {
-                       status = "okay";
-
-                       pcie@1,0 {
-                               status = "okay";
-                       };
-               };
        };
 
        gpio-leds {
index 743152f31a815b0e970b7dab80e4ed8af94adb87..e6e5ec4fe6b9e005b9c37ced8fda18f31bb5fbe0 100644 (file)
@@ -32,7 +32,7 @@
                        flash@0 {
                                #address-cells = <1>;
                                #size-cells = <1>;
-                               compatible = "mx25l4005a";
+                               compatible = "mxicy,mx25l4005a";
                                reg = <0>;
                                spi-max-frequency = <20000000>;
                                mode = <0>;
@@ -50,7 +50,7 @@
                        status = "okay";
 
                        eeprom@50 {
-                               compatible = "at,24c04";
+                               compatible = "atmel,24c04";
                                pagesize = <16>;
                                reg = <0x50>;
                        };
index aa78c2d11fe738fc843f716bd1a6f98ff1054a5b..e2cc85cc3b87e805a113489d3d95cf912d682224 100644 (file)
@@ -4,6 +4,16 @@
 / {
        model = "ZyXEL NSA310";
 
+       mbus {
+               pcie-controller {
+                       status = "okay";
+
+                       pcie@1,0 {
+                               status = "okay";
+                       };
+               };
+       };
+
        ocp@f1000000 {
                pinctrl: pinctrl@10000 {
 
                        status = "okay";
                        nr-ports = <2>;
                };
-
-               pcie-controller {
-                       status = "okay";
-
-                       pcie@1,0 {
-                               status = "okay";
-                       };
-               };
        };
 
        gpio_poweroff {
index 03fa24cf334468ff66095883b64ac5b76ae6bc42..0a07af9d8e58d0c06938fe0284332f87a650503b 100644 (file)
                        status = "okay";
 
                        adt7476: adt7476a@2e {
-                               compatible = "adt7476";
+                               compatible = "adi,adt7476";
                                reg = <0x2e>;
                        };
                };
index a5e77945286776940aa38d363ad08bc081e85e3b..27ca6a79c48a473f15d082287e19a605ef98b651 100644 (file)
@@ -94,7 +94,7 @@
                        status = "okay";
 
                        lm85: lm85@2e {
-                               compatible = "lm85";
+                               compatible = "national,lm85";
                                reg = <0x2e>;
                        };
                };
index b88da9392c32dd93780ead4c80dd8b5e4bf6934a..0650beafc1de0ac4a7e60fbc759ed7726982c22a 100644 (file)
@@ -40,7 +40,7 @@
                        pinctrl-names = "default";
 
                        s35390a: s35390a@30 {
-                               compatible = "s35390a";
+                               compatible = "sii,s35390a";
                                reg = <0x30>;
                        };
                };
index b2f7cae0683959f7c75ba82b0a195762d7fd32f4..38520a2875146d565c8016f69cc4f324098ff1ee 100644 (file)
@@ -52,7 +52,7 @@
                        pinctrl-names = "default";
 
                        s24c02: s24c02@50 {
-                               compatible = "24c02";
+                               compatible = "atmel,24c02";
                                reg = <0x50>;
                        };
                };
index 7d1c7677a18f18c3fb1cc00463420545a5b6cb78..0bd70d928c69ba7f7798d1492f6ce3844af9d6ad 100644 (file)
 
                i2c@11000 {
                        status = "okay";
-
-                       alc5621: alc5621@1a {
-                               compatible = "realtek,alc5621";
-                               reg = <0x1a>;
-                       };
                };
 
                serial@12000 {
index f577b7df9a29e4f5f4e74ca86aef4b4ba61ceb87..521c587acaee9f679ab6f9200c5f8be8eee240e9 100644 (file)
                compatible = "smsc,lan9221", "smsc,lan9115";
                bank-width = <2>;
                gpmc,mux-add-data;
-               gpmc,cs-on-ns = <0>;
-               gpmc,cs-rd-off-ns = <186>;
-               gpmc,cs-wr-off-ns = <186>;
-               gpmc,adv-on-ns = <12>;
-               gpmc,adv-rd-off-ns = <48>;
+               gpmc,cs-on-ns = <1>;
+               gpmc,cs-rd-off-ns = <180>;
+               gpmc,cs-wr-off-ns = <180>;
+               gpmc,adv-rd-off-ns = <18>;
                gpmc,adv-wr-off-ns = <48>;
                gpmc,oe-on-ns = <54>;
                gpmc,oe-off-ns = <168>;
                gpmc,we-off-ns = <168>;
                gpmc,rd-cycle-ns = <186>;
                gpmc,wr-cycle-ns = <186>;
-               gpmc,access-ns = <114>;
-               gpmc,page-burst-access-ns = <6>;
-               gpmc,bus-turnaround-ns = <12>;
-               gpmc,cycle2cycle-delay-ns = <18>;
-               gpmc,wr-data-mux-bus-ns = <90>;
-               gpmc,wr-access-ns = <186>;
+               gpmc,access-ns = <144>;
+               gpmc,page-burst-access-ns = <24>;
+               gpmc,bus-turnaround-ns = <90>;
+               gpmc,cycle2cycle-delay-ns = <90>;
                gpmc,cycle2cycle-samecsen;
                gpmc,cycle2cycle-diffcsen;
                vddvario-supply = <&vddvario>;
index 22f35ea142c199082afdd8ba626bd7c5e0b6cd54..8f8c07da4ac148d550ae45cd3501d4a992f4e7c8 100644 (file)
                        interrupts = <58>;
                };
 
-               mailbox: mailbox@48094000 {
-                       compatible = "ti,omap2-mailbox";
-                       ti,hwmods = "mailbox";
-                       reg = <0x48094000 0x200>;
-                       interrupts = <26>;
-               };
-
                intc: interrupt-controller@1 {
                        compatible = "ti,omap2-intc";
                        interrupt-controller;
index 85b1fb014c4314efe82eca9fcb7dfa7e482cb8d2..2d9979835f241f2153cd3d0b52c5c8b0b6c49559 100644 (file)
                        dma-names = "tx", "rx";
                };
 
+               mailbox: mailbox@48094000 {
+                       compatible = "ti,omap2-mailbox";
+                       reg = <0x48094000 0x200>;
+                       interrupts = <26>, <34>;
+                       interrupt-names = "dsp", "iva";
+                       ti,hwmods = "mailbox";
+               };
+
                timer1: timer@48028000 {
                        compatible = "ti,omap2420-timer";
                        reg = <0x48028000 0x400>;
index d09697dab55e80063a737361c65138822bbdf828..42d2c61c9e2d7dc1851054a45ec305820cc8a9f4 100644 (file)
                        dma-names = "tx", "rx";
                };
 
+               mailbox: mailbox@48094000 {
+                       compatible = "ti,omap2-mailbox";
+                       reg = <0x48094000 0x200>;
+                       interrupts = <26>;
+                       ti,hwmods = "mailbox";
+               };
+
                timer1: timer@49018000 {
                        compatible = "ti,omap2420-timer";
                        reg = <0x49018000 0x400>;
diff --git a/arch/arm/boot/dts/omap3-beagle-xm-ab.dts b/arch/arm/boot/dts/omap3-beagle-xm-ab.dts
new file mode 100644 (file)
index 0000000..7ac3bcf
--- /dev/null
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "omap3-beagle-xm.dts"
+
+/ {
+       /* HS USB Port 2 Power enable was inverted with the xM C */
+       hsusb2_power: hsusb2_power_reg {
+               enable-active-high;
+       };
+};
index d00055809e31d79b9d1730c06efe02c7e2e6f747..25ba08331d8852e6701c96a4b9e8e54def19f0d8 100644 (file)
                        cpu0-supply = <&vcc>;
                };
        };
-
-       vddvario: regulator-vddvario {
-               compatible = "regulator-fixed";
-               regulator-name = "vddvario";
-               regulator-always-on;
-       };
-
-       vdd33a: regulator-vdd33a {
-               compatible = "regulator-fixed";
-               regulator-name = "vdd33a";
-               regulator-always-on;
-       };
 };
 
 &omap3_pmx_core {
 
        hsusb0_pins: pinmux_hsusb0_pins {
                pinctrl-single,pins = <
-                       OMAP3_CORE1_IOPAD(0x21a0, PIN_OUTPUT | MUX_MODE0)               /* hsusb0_clk.hsusb0_clk */
-                       OMAP3_CORE1_IOPAD(0x21a2, PIN_OUTPUT | MUX_MODE0)               /* hsusb0_stp.hsusb0_stp */
-                       OMAP3_CORE1_IOPAD(0x21a4, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_dir.hsusb0_dir */
-                       OMAP3_CORE1_IOPAD(0x21a6, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_nxt.hsusb0_nxt */
-                       OMAP3_CORE1_IOPAD(0x21a8, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data0.hsusb2_data0 */
-                       OMAP3_CORE1_IOPAD(0x21aa, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data1.hsusb0_data1 */
-                       OMAP3_CORE1_IOPAD(0x21ac, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data2.hsusb0_data2 */
-                       OMAP3_CORE1_IOPAD(0x21ae, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data7.hsusb0_data3 */
-                       OMAP3_CORE1_IOPAD(0x21b0, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data7.hsusb0_data4 */
-                       OMAP3_CORE1_IOPAD(0x21b2, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data7.hsusb0_data5 */
-                       OMAP3_CORE1_IOPAD(0x21b4, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data7.hsusb0_data6 */
-                       OMAP3_CORE1_IOPAD(0x21b6, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data7.hsusb0_data7 */
+                       OMAP3_CORE1_IOPAD(0x21a2, PIN_OUTPUT | MUX_MODE0)               /* hsusb0_clk.hsusb0_clk */
+                       OMAP3_CORE1_IOPAD(0x21a4, PIN_OUTPUT | MUX_MODE0)               /* hsusb0_stp.hsusb0_stp */
+                       OMAP3_CORE1_IOPAD(0x21a6, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_dir.hsusb0_dir */
+                       OMAP3_CORE1_IOPAD(0x21a8, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_nxt.hsusb0_nxt */
+                       OMAP3_CORE1_IOPAD(0x21aa, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data0.hsusb2_data0 */
+                       OMAP3_CORE1_IOPAD(0x21ac, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data1.hsusb0_data1 */
+                       OMAP3_CORE1_IOPAD(0x21ae, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data2.hsusb0_data2 */
+                       OMAP3_CORE1_IOPAD(0x21b0, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data7.hsusb0_data3 */
+                       OMAP3_CORE1_IOPAD(0x21b2, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data7.hsusb0_data4 */
+                       OMAP3_CORE1_IOPAD(0x21b4, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data7.hsusb0_data5 */
+                       OMAP3_CORE1_IOPAD(0x21b6, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data7.hsusb0_data6 */
+                       OMAP3_CORE1_IOPAD(0x21b8, PIN_INPUT_PULLDOWN | MUX_MODE0)       /* hsusb0_data7.hsusb0_data7 */
                >;
        };
 };
 
+#include "omap-gpmc-smsc911x.dtsi"
+
 &gpmc {
        ranges = <5 0 0x2c000000 0x01000000>;
 
-       smsc1: ethernet@5,0 {
+       smsc1: ethernet@gpmc {
                compatible = "smsc,lan9221", "smsc,lan9115";
                pinctrl-names = "default";
                pinctrl-0 = <&smsc1_pins>;
                interrupt-parent = <&gpio6>;
                interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
                reg = <5 0 0xff>;
-               bank-width = <2>;
-               gpmc,mux-add-data;
-               gpmc,cs-on-ns = <0>;
-               gpmc,cs-rd-off-ns = <186>;
-               gpmc,cs-wr-off-ns = <186>;
-               gpmc,adv-on-ns = <12>;
-               gpmc,adv-rd-off-ns = <48>;
-               gpmc,adv-wr-off-ns = <48>;
-               gpmc,oe-on-ns = <54>;
-               gpmc,oe-off-ns = <168>;
-               gpmc,we-on-ns = <54>;
-               gpmc,we-off-ns = <168>;
-               gpmc,rd-cycle-ns = <186>;
-               gpmc,wr-cycle-ns = <186>;
-               gpmc,access-ns = <114>;
-               gpmc,page-burst-access-ns = <6>;
-               gpmc,bus-turnaround-ns = <12>;
-               gpmc,cycle2cycle-delay-ns = <18>;
-               gpmc,wr-data-mux-bus-ns = <90>;
-               gpmc,wr-access-ns = <186>;
-               gpmc,cycle2cycle-samecsen;
-               gpmc,cycle2cycle-diffcsen;
-               vddvario-supply = <&vddvario>;
-               vdd33a-supply = <&vdd33a>;
-               reg-io-width = <4>;
-               smsc,save-mac-address;
        };
 };
 
index bf5a515a324752d8fd36e96fb288b07d3b2f31e1..da402f0fdab4861bf9b775b247cabaac192f8777 100644 (file)
                reg = <0 0 0>; /* CS0, offset 0 */
                nand-bus-width = <16>;
 
-               gpmc,device-nand;
                gpmc,sync-clk-ps = <0>;
                gpmc,cs-on-ns = <0>;
                gpmc,cs-rd-off-ns = <44>;
index b97736d98a6427f087c11bd510909b1007f8c152..e2d163bf061975bff9ac5a6e83ecf6bf4ef98ed3 100644 (file)
                >;
        };
 
-       smsc911x_pins: pinmux_smsc911x_pins {
+       smsc9221_pins: pinmux_smsc9221_pins {
                pinctrl-single,pins = <
                        0x1a2 (PIN_INPUT | MUX_MODE4)           /* mcspi1_cs2.gpio_176 */
                >;
index 7abd64f6ae21465c9ac74b22563f8f828a5d684b..b22caaaf774ba710461fcabf395c121c0ccc0482 100644 (file)
@@ -10,7 +10,7 @@
  */
 
 #include "omap3-igep.dtsi"
-#include "omap-gpmc-smsc911x.dtsi"
+#include "omap-gpmc-smsc9221.dtsi"
 
 / {
        model = "IGEPv2 (TI OMAP AM/DM37x)";
 
        ethernet@gpmc {
                pinctrl-names = "default";
-               pinctrl-0 = <&smsc911x_pins>;
+               pinctrl-0 = <&smsc9221_pins>;
                reg = <5 0 0xff>;
                interrupt-parent = <&gpio6>;
                interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
index 6369d9f43ca23e701197945ccf41483f3d7106b5..cc1dce6978f59323ba7579d66f6b29dd2a9b6d8b 100644 (file)
                /* no elm on omap3 */
 
                gpmc,mux-add-data = <0>;
-               gpmc,device-nand;
                gpmc,device-width = <2>;
                gpmc,wait-pin = <0>;
                gpmc,wait-monitoring-ns = <0>;
index 7909c51b05a5643563b4ed74405066e1a222a995..d59e3de1441e2f7dc5e0fb9b4bc29f49ee04cbc1 100644 (file)
@@ -2,20 +2,6 @@
  * Common support for CompuLab SB-T35 used on SBC-T3530, SBC-T3517 and SBC-T3730
  */
 
-/ {
-       vddvario_sb_t35: regulator-vddvario-sb-t35 {
-               compatible = "regulator-fixed";
-               regulator-name = "vddvario";
-               regulator-always-on;
-       };
-
-       vdd33a_sb_t35: regulator-vdd33a-sb-t35 {
-               compatible = "regulator-fixed";
-               regulator-name = "vdd33a";
-               regulator-always-on;
-       };
-};
-
 &omap3_pmx_core {
        smsc2_pins: pinmux_smsc2_pins {
                pinctrl-single,pins = <
                reg = <4 0 0xff>;
                bank-width = <2>;
                gpmc,mux-add-data;
-               gpmc,cs-on-ns = <0>;
-               gpmc,cs-rd-off-ns = <186>;
-               gpmc,cs-wr-off-ns = <186>;
-               gpmc,adv-on-ns = <12>;
-               gpmc,adv-rd-off-ns = <48>;
+               gpmc,cs-on-ns = <1>;
+               gpmc,cs-rd-off-ns = <180>;
+               gpmc,cs-wr-off-ns = <180>;
+               gpmc,adv-rd-off-ns = <18>;
                gpmc,adv-wr-off-ns = <48>;
                gpmc,oe-on-ns = <54>;
                gpmc,oe-off-ns = <168>;
                gpmc,we-off-ns = <168>;
                gpmc,rd-cycle-ns = <186>;
                gpmc,wr-cycle-ns = <186>;
-               gpmc,access-ns = <114>;
-               gpmc,page-burst-access-ns = <6>;
-               gpmc,bus-turnaround-ns = <12>;
-               gpmc,cycle2cycle-delay-ns = <18>;
-               gpmc,wr-data-mux-bus-ns = <90>;
-               gpmc,wr-access-ns = <186>;
+               gpmc,access-ns = <144>;
+               gpmc,page-burst-access-ns = <24>;
+               gpmc,bus-turnaround-ns = <90>;
+               gpmc,cycle2cycle-delay-ns = <90>;
                gpmc,cycle2cycle-samecsen;
                gpmc,cycle2cycle-diffcsen;
-               vddvario-supply = <&vddvario_sb_t35>;
-               vdd33a-supply = <&vdd33a_sb_t35>;
+               vddvario-supply = <&vddvario>;
+               vdd33a-supply = <&vdd33a>;
                reg-io-width = <4>;
                smsc,save-mac-address;
        };
index 024c9c6c682d7eb4b421a559c7c4b5f95e0511f9..42189b65d393d29d2cf467ddbc5df4eb0661a471 100644 (file)
@@ -8,6 +8,19 @@
 / {
        model = "CompuLab SBC-T3517 with CM-T3517";
        compatible = "compulab,omap3-sbc-t3517", "compulab,omap3-cm-t3517", "ti,am3517", "ti,omap3";
+
+       /* Only one GPMC smsc9220 on SBC-T3517, CM-T3517 uses am35x Ethernet */
+       vddvario: regulator-vddvario-sb-t35 {
+               compatible = "regulator-fixed";
+               regulator-name = "vddvario";
+               regulator-always-on;
+       };
+
+       vdd33a: regulator-vdd33a-sb-t35 {
+               compatible = "regulator-fixed";
+               regulator-name = "vdd33a";
+               regulator-always-on;
+       };
 };
 
 &omap3_pmx_core {
index 5e5790f631eba4b5b1085ac64a1c4d5a0f6062c9..4231191ade06acf8c7a02938a0fcfcbdeb318fbc 100644 (file)
@@ -61,7 +61,7 @@
                        ti,hwmods = "mpu";
                };
 
-               iva {
+               iva: iva {
                        compatible = "ti,iva2.2";
                        ti,hwmods = "iva";
 
@@ -74,7 +74,7 @@
        /*
         * XXX: Use a flat representation of the OMAP3 interconnect.
         * The real OMAP interconnect network is quite complex.
-        * Since that will not bring real advantage to represent that in DT for
+        * Since it will not bring real advantage to represent that in DT for
         * the moment, just use a fake OCP bus entry to represent the whole bus
         * hierarchy.
         */
index 27fcac874742894879bb978b8157f213a1c49eb2..649b5cd38b403102e9a6d5ddebf918a3e39ab73e 100644 (file)
@@ -72,7 +72,7 @@
        };
 
        /*
-        * The soc node represents the soc top level view. It is uses for IPs
+        * The soc node represents the soc top level view. It is used for IPs
         * that are not memory mapped in the MPU view or for the MPU itself.
         */
        soc {
@@ -96,7 +96,7 @@
        /*
         * XXX: Use a flat representation of the OMAP4 interconnect.
         * The real OMAP interconnect network is quite complex.
-        * Since that will not bring real advantage to represent that in DT for
+        * Since it will not bring real advantage to represent that in DT for
         * the moment, just use a fake OCP bus entry to represent the whole bus
         * hierarchy.
         */
index 6f3de22fb2660f20a61f2ec4438be6708ab40a6d..36b4312a5e0d82fb20fb6a7eb9b1d59942b0ec18 100644 (file)
@@ -93,7 +93,7 @@
        };
 
        /*
-        * The soc node represents the soc top level view. It is uses for IPs
+        * The soc node represents the soc top level view. It is used for IPs
         * that are not memory mapped in the MPU view or for the MPU itself.
         */
        soc {
        /*
         * XXX: Use a flat representation of the OMAP3 interconnect.
         * The real OMAP interconnect network is quite complex.
-        * Since that will not bring real advantage to represent that in DT for
+        * Since it will not bring real advantage to represent that in DT for
         * the moment, just use a fake OCP bus entry to represent the whole bus
         * hierarchy.
         */
                        status = "disabled";
                };
 
+               mailbox: mailbox@4a0f4000 {
+                       compatible = "ti,omap4-mailbox";
+                       reg = <0x4a0f4000 0x200>;
+                       interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+                       ti,hwmods = "mailbox";
+               };
+
                timer1: timer@4ae18000 {
                        compatible = "ti,omap5430-timer";
                        reg = <0x4ae18000 0x80>;
                                      <0x4a084c00 0x40>;
                                reg-names = "phy_rx", "phy_tx", "pll_ctrl";
                                ctrl-module = <&omap_control_usb3phy>;
+                               clocks = <&usb_phy_cm_clk32k>,
+                                        <&sys_clkin>,
+                                        <&usb_otg_ss_refclk960m>;
+                               clock-names =   "wkupclk",
+                                               "sysclk",
+                                               "refclk";
                                #phy-cells = <0>;
                        };
                };
index 8280884bfa596b95d447b11505b775783dc260a2..2551e9438d358a55e8e4edb7c494ea231ee46810 100644 (file)
@@ -28,7 +28,6 @@
        gic: interrupt-controller@c2800000 {
                compatible = "arm,cortex-a9-gic";
                #interrupt-cells = <3>;
-               #address-cells = <1>;
                interrupt-controller;
                reg = <0xc2800000 0x1000>,
                      <0xc2000000 0x1000>;
index 6e99eb2df076d7f1c7cdfdac46733c968256c0dd..d01048ab3e777534e224eb9a9395ba0a83cd56b7 100644 (file)
        };
 
        sdhi0_pins: sd0 {
-               renesas,gpios = "sdhi0_data4", "sdhi0_ctrl";
+               renesas,groups = "sdhi0_data4", "sdhi0_ctrl";
                renesas,function = "sdhi0";
        };
 
        sdhi2_pins: sd2 {
-               renesas,gpios = "sdhi2_data4", "sdhi2_ctrl";
+               renesas,groups = "sdhi2_data4", "sdhi2_ctrl";
                renesas,function = "sdhi2";
        };
 
index bdd73e6657b27a76ee2d2f7c37abdced267b96ae..de1b6977c69a4b009d3e658b8d9790b373278d49 100644 (file)
        };
 
        sdhi0_pins: sd0 {
-               renesas,gpios = "sdhi0_data4", "sdhi0_ctrl";
+               renesas,groups = "sdhi0_data4", "sdhi0_ctrl";
                renesas,function = "sdhi0";
        };
 
        sdhi1_pins: sd1 {
-               renesas,gpios = "sdhi1_data4", "sdhi1_ctrl";
+               renesas,groups = "sdhi1_data4", "sdhi1_ctrl";
                renesas,function = "sdhi1";
        };
 
        sdhi2_pins: sd2 {
-               renesas,gpios = "sdhi2_data4", "sdhi2_ctrl";
+               renesas,groups = "sdhi2_data4", "sdhi2_ctrl";
                renesas,function = "sdhi2";
        };
 
index bb36596ea20538ac9ac4d74f868c72fed40b5613..ed9a70af3e3f88ff59a266165b2655754e50dbda 100644 (file)
 
                        uart0 {
                                uart0_xfer: uart0-xfer {
-                                       rockchip,pins = <RK_GPIO1 0 RK_FUNC_1 &pcfg_pull_none>,
+                                       rockchip,pins = <RK_GPIO1 0 RK_FUNC_1 &pcfg_pull_up>,
                                                        <RK_GPIO1 1 RK_FUNC_1 &pcfg_pull_none>;
                                };
 
 
                        uart1 {
                                uart1_xfer: uart1-xfer {
-                                       rockchip,pins = <RK_GPIO1 4 RK_FUNC_1 &pcfg_pull_none>,
+                                       rockchip,pins = <RK_GPIO1 4 RK_FUNC_1 &pcfg_pull_up>,
                                                        <RK_GPIO1 5 RK_FUNC_1 &pcfg_pull_none>;
                                };
 
 
                        uart2 {
                                uart2_xfer: uart2-xfer {
-                                       rockchip,pins = <RK_GPIO1 8 RK_FUNC_1 &pcfg_pull_none>,
+                                       rockchip,pins = <RK_GPIO1 8 RK_FUNC_1 &pcfg_pull_up>,
                                                        <RK_GPIO1 9 RK_FUNC_1 &pcfg_pull_none>;
                                };
                                /* no rts / cts for uart2 */
 
                        uart3 {
                                uart3_xfer: uart3-xfer {
-                                       rockchip,pins = <RK_GPIO1 10 RK_FUNC_1 &pcfg_pull_none>,
+                                       rockchip,pins = <RK_GPIO1 10 RK_FUNC_1 &pcfg_pull_up>,
                                                        <RK_GPIO1 11 RK_FUNC_1 &pcfg_pull_none>;
                                };
 
index eabcfdbb403acc7ff40b409617a53c9831414c04..a106b0872910da874f1000f417a1aab1793237de 100644 (file)
@@ -13,7 +13,7 @@
 #include <dt-bindings/pinctrl/at91.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/clk/at91.h>
+#include <dt-bindings/clock/at91.h>
 
 / {
        model = "Atmel SAMA5D3 family SoC";
index b029fe7ef17a657946de4d2fe71168b4b03210d8..1b02208ea6ff2b70aab43ddd416b37ef0b9a2a1f 100644 (file)
@@ -9,7 +9,7 @@
 
 #include <dt-bindings/pinctrl/at91.h>
 #include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/clk/at91.h>
+#include <dt-bindings/clock/at91.h>
 
 / {
        ahb {
index 382b04431f66b621e01a9f2fe7a9e488ac4c171b..02848453ca0cf5447de27aaca6b233611173da4e 100644 (file)
@@ -9,7 +9,7 @@
 
 #include <dt-bindings/pinctrl/at91.h>
 #include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/clk/at91.h>
+#include <dt-bindings/clock/at91.h>
 
 / {
        aliases {
index a9fa75e4165205f9a1259f9a0821519faee54070..7a8d4c6115f72fdab533980a0f0d96eb6cb1469a 100644 (file)
@@ -9,7 +9,7 @@
 
 #include <dt-bindings/pinctrl/at91.h>
 #include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/clk/at91.h>
+#include <dt-bindings/clock/at91.h>
 
 / {
        aliases {
index b7bd3b9a67533933623ba5b5c118ae46c035f019..5ecf552e1c009faf2317793e2b52ab6f24fc5655 100644 (file)
@@ -34,7 +34,6 @@
        gic: interrupt-controller@f0001000 {
                compatible = "arm,cortex-a9-gic";
                #interrupt-cells = <3>;
-               #address-cells = <1>;
                interrupt-controller;
                reg = <0xf0001000 0x1000>,
                      <0xf0000100 0x100>;
index 7f3baf51a3a9e933d3a47dce18d2ca102c104a9b..32dd55e5f4e6b8567a8a2d207b69311959c9f9a1 100644 (file)
@@ -18,6 +18,7 @@
        compatible = "st-ericsson,ccu8540", "st-ericsson,u8540";
 
        memory@0 {
+               device_type = "memory";
                reg = <0x20000000 0x1f000000>, <0xc0000000 0x3f000000>;
        };
 
index f09fb10a3791a7e4fc238f4e47548fc7f86da705..81df870e5ee6791530b3902aab65ea6576bf47f8 100644 (file)
@@ -49,7 +49,7 @@
                        reg             = <0xfe61f080 0x4>;
                        reg-names       = "irqmux";
                        interrupts      = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupts-names = "irqmux";
+                       interrupt-names = "irqmux";
                        ranges          = <0 0xfe610000 0x5000>;
 
                        PIO0: gpio@fe610000 {
                        reg             = <0xfee0f080 0x4>;
                        reg-names       = "irqmux";
                        interrupts      = <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupts-names = "irqmux";
+                       interrupt-names = "irqmux";
                        ranges          = <0 0xfee00000 0x8000>;
 
                        PIO5: gpio@fee00000 {
                        reg             = <0xfe82f080 0x4>;
                        reg-names       = "irqmux";
                        interrupts      = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupts-names = "irqmux";
+                       interrupt-names = "irqmux";
                        ranges          = <0 0xfe820000 0x8000>;
 
                        PIO13: gpio@fe820000 {
                        reg             = <0xfd6bf080 0x4>;
                        reg-names       = "irqmux";
                        interrupts      = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupts-names = "irqmux";
+                       interrupt-names = "irqmux";
                        ranges          = <0 0xfd6b0000 0x3000>;
 
                        PIO100: gpio@fd6b0000 {
                        reg             = <0xfd33f080 0x4>;
                        reg-names       = "irqmux";
                        interrupts      = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupts-names = "irqmux";
+                       interrupt-names = "irqmux";
                        ranges          = <0 0xfd330000 0x5000>;
 
                        PIO103: gpio@fd330000 {
index aeea304086eb3b57c5682539643f0d6ca4540c0d..250d5ecc951ea0e3e5c7f071fb4e38b6312840d7 100644 (file)
@@ -53,7 +53,7 @@
                        reg             = <0xfe61f080 0x4>;
                        reg-names       = "irqmux";
                        interrupts      = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupts-names = "irqmux";
+                       interrupt-names = "irqmux";
                        ranges          = <0 0xfe610000 0x6000>;
 
                        PIO0: gpio@fe610000 {
                        reg             = <0xfee0f080 0x4>;
                        reg-names       = "irqmux";
                        interrupts      = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupts-names = "irqmux";
+                       interrupt-names = "irqmux";
                        ranges          = <0 0xfee00000 0x10000>;
 
                        PIO5: gpio@fee00000 {
                        reg             = <0xfe82f080 0x4>;
                        reg-names       = "irqmux";
                        interrupts      = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupts-names = "irqmux";
+                       interrupt-names = "irqmux";
                        ranges          = <0 0xfe820000 0x6000>;
 
                        PIO13: gpio@fe820000 {
                        reg             = <0xfd6bf080 0x4>;
                        reg-names       = "irqmux";
                        interrupts      = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupts-names = "irqmux";
+                       interrupt-names = "irqmux";
                        ranges          = <0 0xfd6b0000 0x3000>;
 
                        PIO100: gpio@fd6b0000 {
                        reg             = <0xfd33f080 0x4>;
                        reg-names       = "irqmux";
                        interrupts      = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupts-names = "irqmux";
+                       interrupt-names = "irqmux";
                        ranges                  = <0 0xfd330000 0x5000>;
 
                        PIO103: gpio@fd330000 {
index 32efc105df834de3c3143938bf0cfdebbf5d1b55..aba1c8a3f3883320a50b31dcac05341568e0a335 100644 (file)
@@ -87,7 +87,7 @@
 
                pll4: clk@01c20018 {
                        #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-pll1-clk";
+                       compatible = "allwinner,sun7i-a20-pll4-clk";
                        reg = <0x01c20018 0x4>;
                        clocks = <&osc24M>;
                        clock-output-names = "pll4";
                        clock-output-names = "pll6_sata", "pll6_other", "pll6";
                };
 
+               pll8: clk@01c20040 {
+                       #clock-cells = <0>;
+                       compatible = "allwinner,sun7i-a20-pll4-clk";
+                       reg = <0x01c20040 0x4>;
+                       clocks = <&osc24M>;
+                       clock-output-names = "pll8";
+               };
+
                cpu: cpu@01c20054 {
                        #clock-cells = <0>;
                        compatible = "allwinner,sun4i-a10-cpu-clk";
                        status = "disabled";
                };
 
-               i2c4: i2c@01c2bc00 {
+               i2c4: i2c@01c2c000 {
                        compatible = "allwinner,sun4i-i2c";
-                       reg = <0x01c2bc00 0x400>;
+                       reg = <0x01c2c000 0x400>;
                        interrupts = <0 89 4>;
                        clocks = <&apb1_gates 15>;
                        clock-frequency = <100000>;
index cf45a1a394835ecc64309e00f58f5b33a5855260..6d540a02514886d37e095a7e47909457db9ef0e5 100644 (file)
                status = "disabled";
        };
 
-       serial@0,70006400 {
-               compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart";
-               reg = <0x0 0x70006400 0x0 0x40>;
-               reg-shift = <2>;
-               interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&tegra_car TEGRA124_CLK_UARTE>;
-               resets = <&tegra_car 66>;
-               reset-names = "serial";
-               dmas = <&apbdma 20>, <&apbdma 20>;
-               dma-names = "rx", "tx";
-               status = "disabled";
-       };
-
        pwm@0,7000a000 {
                compatible = "nvidia,tegra124-pwm", "nvidia,tegra20-pwm";
                reg = <0x0 0x7000a000 0x0 0x100>;
index 7dd1d6ede5258e9b45c384bd2384428b0e8a7818..ded361075aab7a1504eadc460defc992d5402161 100644 (file)
        clocks {
                audio_ext {
                        compatible = "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <24576000>;
                };
 
                enet_ext {
                        compatible = "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <50000000>;
                };
        };
index 8048733676693de212e505aeae15ae5ad020c01c..b8ce0aa7b1579064980edee427fa0cd41f7c710d 100644 (file)
 
                sxosc {
                        compatible = "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <32768>;
                };
 
                fxosc {
                        compatible = "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <24000000>;
                };
        };
@@ -72,8 +74,6 @@
                        intc: interrupt-controller@40002000 {
                                compatible = "arm,cortex-a9-gic";
                                #interrupt-cells = <3>;
-                               #address-cells = <1>;
-                               #size-cells = <1>;
                                interrupt-controller;
                                reg = <0x40003000 0x1000>,
                                      <0x40002100 0x100>;
index 51d0e912c8f585b1acb51edc9f47fc4270a1a988..1929ad390d88feb0ec42bce29ad1b174823ed55b 100644 (file)
                        reg = <0xd8100000 0x10000>;
                        interrupts = <48>;
                };
+
+               ethernet@d8004000 {
+                       compatible = "via,vt8500-rhine";
+                       reg = <0xd8004000 0x100>;
+                       interrupts = <10>;
+               };
        };
 };
index 7525982262ac9896285031462e45b23b4020d9c7..b1c59a766a13381a693d897eb3349db4ac9d3c16 100644 (file)
                        reg = <0xd8100000 0x10000>;
                        interrupts = <48>;
                };
+
+               ethernet@d8004000 {
+                       compatible = "via,vt8500-rhine";
+                       reg = <0xd8004000 0x100>;
+                       interrupts = <10>;
+               };
        };
 };
index d98386dd2882500bd71ecf726d8ac9bb26b777a7..8fbccfbe75f33df7be79ea7be37c15b9f5bf2535 100644 (file)
                        bus-width = <4>;
                        sdon-inverted;
                };
+
+               ethernet@d8004000 {
+                       compatible = "via,vt8500-rhine";
+                       reg = <0xd8004000 0x100>;
+                       interrupts = <10>;
+                };
        };
 };
index 511180769af5c0fb31acd6beb58cb1031a417a1d..c1176abc34d92d0491eeeadf74a926ff7fc360ed 100644 (file)
@@ -24,6 +24,7 @@
                        device_type = "cpu";
                        reg = <0>;
                        clocks = <&clkc 3>;
+                       clock-latency = <1000>;
                        operating-points = <
                                /* kHz    uV */
                                666667  1000000
                interrupt-parent = <&intc>;
                ranges;
 
+               i2c0: zynq-i2c@e0004000 {
+                       compatible = "cdns,i2c-r1p10";
+                       status = "disabled";
+                       clocks = <&clkc 38>;
+                       interrupt-parent = <&intc>;
+                       interrupts = <0 25 4>;
+                       reg = <0xe0004000 0x1000>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+               };
+
+               i2c1: zynq-i2c@e0005000 {
+                       compatible = "cdns,i2c-r1p10";
+                       status = "disabled";
+                       clocks = <&clkc 39>;
+                       interrupt-parent = <&intc>;
+                       interrupts = <0 48 4>;
+                       reg = <0xe0005000 0x1000>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+               };
+
                intc: interrupt-controller@f8f01000 {
                        compatible = "arm,cortex-a9-gic";
                        #interrupt-cells = <3>;
index c913f77a21ebfbb84c4533eaa94d5bcd905e3edb..5e09cee33d4230773f8687fd3e187f22e49d0b77 100644 (file)
        phy-mode = "rgmii";
 };
 
+&i2c0 {
+       status = "okay";
+       clock-frequency = <400000>;
+
+       i2cswitch@74 {
+               compatible = "nxp,pca9548";
+               #address-cells = <1>;
+               #size-cells = <0>;
+               reg = <0x74>;
+
+               i2c@0 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0>;
+                       si570: clock-generator@5d {
+                               #clock-cells = <0>;
+                               compatible = "silabs,si570";
+                               temperature-stability = <50>;
+                               reg = <0x5d>;
+                               factory-fout = <156250000>;
+                               clock-frequency = <148500000>;
+                       };
+               };
+
+               i2c@2 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <2>;
+                       eeprom@54 {
+                               compatible = "at,24c08";
+                               reg = <0x54>;
+                       };
+               };
+
+               i2c@3 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <3>;
+                       gpio@21 {
+                               compatible = "ti,tca6416";
+                               reg = <0x21>;
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                       };
+               };
+
+               i2c@4 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <4>;
+                       rtc@51 {
+                               compatible = "nxp,pcf8563";
+                               reg = <0x51>;
+                       };
+               };
+
+               i2c@7 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <7>;
+                       hwmon@52 {
+                               compatible = "ti,ucd9248";
+                               reg = <52>;
+                       };
+                       hwmon@53 {
+                               compatible = "ti,ucd9248";
+                               reg = <53>;
+                       };
+                       hwmon@54 {
+                               compatible = "ti,ucd9248";
+                               reg = <54>;
+                       };
+               };
+       };
+};
+
 &sdhci0 {
        status = "okay";
 };
index 88f62c50382ec59e5f35cc7a1319cb41c1585f13..4cc9913078cd6427ab69d206a21ba8d44c5c431d 100644 (file)
        phy-mode = "rgmii";
 };
 
+&i2c0 {
+       status = "okay";
+       clock-frequency = <400000>;
+
+       i2cswitch@74 {
+               compatible = "nxp,pca9548";
+               #address-cells = <1>;
+               #size-cells = <0>;
+               reg = <0x74>;
+
+               i2c@0 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0>;
+                       si570: clock-generator@5d {
+                               #clock-cells = <0>;
+                               compatible = "silabs,si570";
+                               temperature-stability = <50>;
+                               reg = <0x5d>;
+                               factory-fout = <156250000>;
+                               clock-frequency = <148500000>;
+                       };
+               };
+
+               i2c@2 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <2>;
+                       eeprom@54 {
+                               compatible = "at,24c08";
+                               reg = <0x54>;
+                       };
+               };
+
+               i2c@3 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <3>;
+                       gpio@21 {
+                               compatible = "ti,tca6416";
+                               reg = <0x21>;
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                       };
+               };
+
+               i2c@4 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <4>;
+                       rtc@51 {
+                               compatible = "nxp,pcf8563";
+                               reg = <0x51>;
+                       };
+               };
+
+               i2c@7 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <7>;
+                       ucd90120@65 {
+                               compatible = "ti,ucd90120";
+                               reg = <0x65>;
+                       };
+               };
+       };
+};
+
 &sdhci0 {
        status = "okay";
 };
index 5774b6ea7ad55ab0e6c56a5239c475f00a9feb32..f01c0ee0c87ebd94debc320f5714bf247dfe5ab7 100644 (file)
@@ -797,10 +797,8 @@ static int __init bL_switcher_init(void)
 {
        int ret;
 
-       if (MAX_NR_CLUSTERS != 2) {
-               pr_err("%s: only dual cluster systems are supported\n", __func__);
-               return -EINVAL;
-       }
+       if (!mcpm_is_available())
+               return -ENODEV;
 
        cpu_notifier(bL_switcher_hotplug_callback, 0);
 
index 41bca32409fce81358c3b5c35bc081bcc28e7c76..5339009b3c0ce648df92244b5d9274e78273290b 100644 (file)
@@ -1423,55 +1423,38 @@ EXPORT_SYMBOL(edma_clear_event);
 
 #if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DMADEVICES)
 
-static int edma_of_read_u32_to_s16_array(const struct device_node *np,
-                                        const char *propname, s16 *out_values,
-                                        size_t sz)
+static int edma_xbar_event_map(struct device *dev, struct device_node *node,
+                              struct edma_soc_info *pdata, size_t sz)
 {
-       int ret;
-
-       ret = of_property_read_u16_array(np, propname, out_values, sz);
-       if (ret)
-               return ret;
-
-       /* Terminate it */
-       *out_values++ = -1;
-       *out_values++ = -1;
-
-       return 0;
-}
-
-static int edma_xbar_event_map(struct device *dev,
-                              struct device_node *node,
-                              struct edma_soc_info *pdata, int len)
-{
-       int ret, i;
+       const char pname[] = "ti,edma-xbar-event-map";
        struct resource res;
        void __iomem *xbar;
-       const s16 (*xbar_chans)[2];
+       s16 (*xbar_chans)[2];
+       size_t nelm = sz / sizeof(s16);
        u32 shift, offset, mux;
+       int ret, i;
 
-       xbar_chans = devm_kzalloc(dev,
-                                 len/sizeof(s16) + 2*sizeof(s16),
-                                 GFP_KERNEL);
+       xbar_chans = devm_kzalloc(dev, (nelm + 2) * sizeof(s16), GFP_KERNEL);
        if (!xbar_chans)
                return -ENOMEM;
 
        ret = of_address_to_resource(node, 1, &res);
        if (ret)
-               return -EIO;
+               return -ENOMEM;
 
        xbar = devm_ioremap(dev, res.start, resource_size(&res));
        if (!xbar)
                return -ENOMEM;
 
-       ret = edma_of_read_u32_to_s16_array(node,
-                                           "ti,edma-xbar-event-map",
-                                           (s16 *)xbar_chans,
-                                           len/sizeof(u32));
+       ret = of_property_read_u16_array(node, pname, (u16 *)xbar_chans, nelm);
        if (ret)
                return -EIO;
 
-       for (i = 0; xbar_chans[i][0] != -1; i++) {
+       /* Invalidate last entry for the other user of this mess */
+       nelm >>= 1;
+       xbar_chans[nelm][0] = xbar_chans[nelm][1] = -1;
+
+       for (i = 0; i < nelm; i++) {
                shift = (xbar_chans[i][1] & 0x03) << 3;
                offset = xbar_chans[i][1] & 0xfffffffc;
                mux = readl(xbar + offset);
@@ -1480,8 +1463,7 @@ static int edma_xbar_event_map(struct device *dev,
                writel(mux, (xbar + offset));
        }
 
-       pdata->xbar_chans = xbar_chans;
-
+       pdata->xbar_chans = (const s16 (*)[2]) xbar_chans;
        return 0;
 }
 
index 1e361abc29eb0e106492223348de051d1e4e9f3d..86fd60fefbc935a788b52bdf8701efebbe5d5f8d 100644 (file)
@@ -48,6 +48,11 @@ int __init mcpm_platform_register(const struct mcpm_platform_ops *ops)
        return 0;
 }
 
+bool mcpm_is_available(void)
+{
+       return (platform_ops) ? true : false;
+}
+
 int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
 {
        if (!platform_ops)
index a9667957b7578bac7b218ea9163e657ecb385f88..a4e8d017f25bae466d8b0a918c753196870e9d78 100644 (file)
@@ -226,7 +226,7 @@ CONFIG_USB_DWC3=m
 CONFIG_USB_TEST=y
 CONFIG_NOP_USB_XCEIV=y
 CONFIG_OMAP_USB2=y
-CONFIG_OMAP_USB3=y
+CONFIG_TI_PIPE3=y
 CONFIG_AM335X_PHY_USB=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_GADGET_DEBUG=y
index b5df4a511b0acdfea6df30b7dd4b9753d2b00f9a..81ba78eaf54adb02840dbc2d41cc4af8acc7c08a 100644 (file)
@@ -37,7 +37,7 @@ CONFIG_SUN4I_EMAC=y
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SMSC is not set
-# CONFIG_NET_VENDOR_STMICRO is not set
+CONFIG_STMMAC_ETH=y
 # CONFIG_NET_VENDOR_WIZNET is not set
 # CONFIG_WLAN is not set
 CONFIG_SERIAL_8250=y
index fd81a1b99cce5a0971315fb17bfb7f1c87f67773..aaa95ab606a83a647bfbc0f88de6a892f67123d6 100644 (file)
@@ -11,6 +11,7 @@ CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_LBDAF is not set
 # CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
 # CONFIG_IOSCHED_CFQ is not set
 # CONFIG_ARCH_MULTI_V7 is not set
 CONFIG_ARCH_U300=y
@@ -21,7 +22,6 @@ CONFIG_ZBOOT_ROM_TEXT=0x0
 CONFIG_ZBOOT_ROM_BSS=0x0
 CONFIG_CMDLINE="root=/dev/ram0 rw rootfstype=rootfs console=ttyAMA0,115200n8 lpj=515072"
 CONFIG_CPU_IDLE=y
-CONFIG_FPE_NWFPE=y
 # CONFIG_SUSPEND is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
@@ -64,8 +64,8 @@ CONFIG_TMPFS=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_FS=y
 # CONFIG_SCHED_DEBUG is not set
 CONFIG_TIMER_STATS=y
 # CONFIG_DEBUG_PREEMPT is not set
-CONFIG_DEBUG_INFO=y
index 65f77885c1674df038d6d92d17603e76e0058a46..d219d6a43238c6e354639500af4e5a9d56ff8714 100644 (file)
@@ -1,16 +1,16 @@
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
-CONFIG_NO_HZ=y
+CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
 CONFIG_ARCH_U8500=y
 CONFIG_MACH_HREFV60=y
 CONFIG_MACH_SNOWBALL=y
-CONFIG_MACH_UX500_DT=y
 CONFIG_SMP=y
 CONFIG_NR_CPUS=2
 CONFIG_PREEMPT=y
@@ -34,16 +34,22 @@ CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_NETFILTER=y
 CONFIG_PHONET=y
-# CONFIG_WIRELESS is not set
+CONFIG_CFG80211=y
+CONFIG_CFG80211_DEBUGFS=y
+CONFIG_MAC80211=y
+CONFIG_MAC80211_LEDS=y
 CONFIG_CAIF=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=65536
 CONFIG_SENSORS_BH1780=y
 CONFIG_NETDEVICES=y
 CONFIG_SMSC911X=y
 CONFIG_SMSC_PHY=y
-# CONFIG_WLAN is not set
+CONFIG_CW1200=y
+CONFIG_CW1200_WLAN_SDIO=y
 # CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 CONFIG_INPUT_EVDEV=y
 # CONFIG_KEYBOARD_ATKBD is not set
@@ -85,15 +91,12 @@ CONFIG_AB8500_USB=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_ETH=m
 CONFIG_MMC=y
-CONFIG_MMC_UNSAFE_RESUME=y
-# CONFIG_MMC_BLOCK_BOUNCE is not set
 CONFIG_MMC_ARMMMCI=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_LM3530=y
 CONFIG_LEDS_GPIO=y
 CONFIG_LEDS_LP5521=y
-CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_HEARTBEAT=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_AB8500=y
@@ -103,6 +106,11 @@ CONFIG_STE_DMA40=y
 CONFIG_STAGING=y
 CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4=y
 CONFIG_HSEM_U8500=y
+CONFIG_IIO=y
+CONFIG_IIO_ST_ACCEL_3AXIS=y
+CONFIG_IIO_ST_GYRO_3AXIS=y
+CONFIG_IIO_ST_MAGN_3AXIS=y
+CONFIG_IIO_ST_PRESS=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT2_FS_POSIX_ACL=y
@@ -110,8 +118,6 @@ CONFIG_EXT2_FS_SECURITY=y
 CONFIG_EXT3_FS=y
 CONFIG_EXT4_FS=y
 CONFIG_VFAT_FS=y
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_TMPFS=y
 CONFIG_TMPFS_POSIX_ACL=y
 # CONFIG_MISC_FILESYSTEMS is not set
index c651e3b26ec703b08cea0128eb76c23d9aa94d46..4764344367d4b3a14381aca696f01d335b75d50f 100644 (file)
@@ -222,22 +222,22 @@ static inline int cpu_is_xsc3(void)
 #endif
 
 /*
- * Marvell's PJ4 core is based on V7 version. It has some modification
- * for coprocessor setting. For this reason, we need a way to distinguish
- * it.
+ * Marvell's PJ4 and PJ4B cores are based on V7 version,
+ * but require a specical sequence for enabling coprocessors.
+ * For this reason, we need a way to distinguish them.
  */
-#ifndef CONFIG_CPU_PJ4
-#define cpu_is_pj4()   0
-#else
+#if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B)
 static inline int cpu_is_pj4(void)
 {
        unsigned int id;
 
        id = read_cpuid_id();
-       if ((id & 0xfffffff0) == 0x562f5840)
+       if ((id & 0xff0fff00) == 0x560f5800)
                return 1;
 
        return 0;
 }
+#else
+#define cpu_is_pj4()   0
 #endif
 #endif
index 191ada6e4d2db3393270ef9595e9cc083e441909..662c7bd061081b2fadfc0e790e719d14cc526d77 100644 (file)
                /* Select the best insn combination to perform the   */ \
                /* actual __m * __n / (__p << 64) operation.         */ \
                if (!__c) {                                             \
-                       asm (   "umull  %Q0, %R0, %1, %Q2\n\t"          \
+                       asm (   "umull  %Q0, %R0, %Q1, %Q2\n\t"         \
                                "mov    %Q0, #0"                        \
                                : "=&r" (__res)                         \
                                : "r" (__m), "r" (__n)                  \
index 608516ebabfe6111a651f3a5ca6e63c607046fab..a5ff410dcdb6a47a03a03214bcc0d66fdc33da94 100644 (file)
@@ -53,6 +53,13 @@ void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
  * CPU/cluster power operations API for higher subsystems to use.
  */
 
+/**
+ * mcpm_is_available - returns whether MCPM is initialized and available
+ *
+ * This returns true or false accordingly.
+ */
+bool mcpm_is_available(void);
+
 /**
  * mcpm_cpu_power_up - make given CPU in given cluster runable
  *
index 0baf7f0d939484264b089c772112657cb9f15c75..f1a0dace3efee423e7727e143550aae06f081fd5 100644 (file)
@@ -98,15 +98,25 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
        }
 }
 
-static inline void tlb_flush_mmu(struct mmu_gather *tlb)
+static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
 {
        tlb_flush(tlb);
+}
+
+static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
+{
        free_pages_and_swap_cache(tlb->pages, tlb->nr);
        tlb->nr = 0;
        if (tlb->pages == tlb->local)
                __tlb_alloc_page(tlb);
 }
 
+static inline void tlb_flush_mmu(struct mmu_gather *tlb)
+{
+       tlb_flush_mmu_tlbonly(tlb);
+       tlb_flush_mmu_free(tlb);
+}
+
 static inline void
 tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
 {
index cf4f3e867395ef0261b16a4d721d8cff740c1de2..ded062f9b358038c05fa074816706c40619e6408 100644 (file)
@@ -77,7 +77,6 @@ static inline xpaddr_t machine_to_phys(xmaddr_t machine)
 }
 /* VIRT <-> MACHINE conversion */
 #define virt_to_machine(v)     (phys_to_machine(XPADDR(__pa(v))))
-#define virt_to_pfn(v)          (PFN_DOWN(__pa(v)))
 #define virt_to_mfn(v)         (pfn_to_mfn(virt_to_pfn(v)))
 #define mfn_to_virt(m)         (__va(mfn_to_pfn(m) << PAGE_SHIFT))
 
index fb5584d0cc050a6c55b30ff8342615a5a39a1c2f..ba94446c72d9127633de59545a3691390ecdfc5d 100644 (file)
 #define __NR_finit_module              (__NR_SYSCALL_BASE+379)
 #define __NR_sched_setattr             (__NR_SYSCALL_BASE+380)
 #define __NR_sched_getattr             (__NR_SYSCALL_BASE+381)
+#define __NR_renameat2                 (__NR_SYSCALL_BASE+382)
 
 /*
  * This may need to be greater than __NR_last_syscall+1 in order to
index a766bcbaf8adfbca3e4bb5ef4446bc5700454d7d..040619c32d68dfe4ce63726f44fcedcc9fe6b2cf 100644 (file)
@@ -79,6 +79,7 @@ obj-$(CONFIG_CPU_XSCALE)      += xscale-cp0.o
 obj-$(CONFIG_CPU_XSC3)         += xscale-cp0.o
 obj-$(CONFIG_CPU_MOHAWK)       += xscale-cp0.o
 obj-$(CONFIG_CPU_PJ4)          += pj4-cp0.o
+obj-$(CONFIG_CPU_PJ4B)         += pj4-cp0.o
 obj-$(CONFIG_IWMMXT)           += iwmmxt.o
 obj-$(CONFIG_PERF_EVENTS)      += perf_regs.o
 obj-$(CONFIG_HW_PERF_EVENTS)   += perf_event.o perf_event_cpu.o
index 166e945de832f22b603d6b0de2ca3eb92f2ec732..8f51bdcdacbbf6675933f38fb595adbdc825f4c2 100644 (file)
                CALL(sys_finit_module)
 /* 380 */      CALL(sys_sched_setattr)
                CALL(sys_sched_getattr)
+               CALL(sys_renameat2)
 #ifndef syscalls_counted
 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
 #define syscalls_counted
index f8c08839edf3053c3ee9ac884fab3f61e8ba84b3..591d6e4a64922cda2cd23531e32aa43d339c5dd3 100644 (file)
@@ -587,7 +587,7 @@ __fixup_pv_table:
        add     r6, r6, r3      @ adjust __pv_phys_pfn_offset address
        add     r7, r7, r3      @ adjust __pv_offset address
        mov     r0, r8, lsr #12 @ convert to PFN
-       str     r0, [r6, #LOW_OFFSET]   @ save computed PHYS_OFFSET to __pv_phys_pfn_offset
+       str     r0, [r6]        @ save computed PHYS_OFFSET to __pv_phys_pfn_offset
        strcc   ip, [r7, #HIGH_OFFSET]  @ save to __pv_offset high bits
        mov     r6, r3, lsr #24 @ constant for add/sub instructions
        teq     r3, r6, lsl #24 @ must be 16MiB aligned
index a08783823b32fdde6dd73d7022b042b64c321bca..2452dd1bef53b0eb719dcda0ce127c2f5ddaeec9 100644 (file)
 #include <asm/thread_info.h>
 #include <asm/asm-offsets.h>
 
-#if defined(CONFIG_CPU_PJ4)
+#if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B)
 #define PJ4(code...)           code
 #define XSC(code...)
-#else
+#elif defined(CONFIG_CPU_MOHAWK) || \
+       defined(CONFIG_CPU_XSC3) || \
+       defined(CONFIG_CPU_XSCALE)
 #define PJ4(code...)
 #define XSC(code...)           code
+#else
+#error "Unsupported iWMMXt architecture"
 #endif
 
 #define MMX_WR0                        (0x00)
index f0d180d8b29f4e22558a98fdf9366ebaf1c0c0cb..8cf0996aa1a8d795bfdb65add498aa1552829382 100644 (file)
@@ -184,3 +184,10 @@ void machine_kexec(struct kimage *image)
 
        soft_restart(reboot_entry_phys);
 }
+
+void arch_crash_save_vmcoreinfo(void)
+{
+#ifdef CONFIG_ARM_LPAE
+       VMCOREINFO_CONFIG(ARM_LPAE);
+#endif
+}
index fc72086362842436381d0595c1afea648eb7b830..8153e36b24917e96c8fa69d18bd78e8b0c130c1a 100644 (file)
@@ -45,7 +45,7 @@ static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t)
        return NOTIFY_DONE;
 }
 
-static struct notifier_block iwmmxt_notifier_block = {
+static struct notifier_block __maybe_unused iwmmxt_notifier_block = {
        .notifier_call  = iwmmxt_do,
 };
 
@@ -72,6 +72,33 @@ static void __init pj4_cp_access_write(u32 value)
                : "=r" (temp) : "r" (value));
 }
 
+static int __init pj4_get_iwmmxt_version(void)
+{
+       u32 cp_access, wcid;
+
+       cp_access = pj4_cp_access_read();
+       pj4_cp_access_write(cp_access | 0xf);
+
+       /* check if coprocessor 0 and 1 are available */
+       if ((pj4_cp_access_read() & 0xf) != 0xf) {
+               pj4_cp_access_write(cp_access);
+               return -ENODEV;
+       }
+
+       /* read iWMMXt coprocessor id register p1, c0 */
+       __asm__ __volatile__ ("mrc    p1, 0, %0, c0, c0, 0\n" : "=r" (wcid));
+
+       pj4_cp_access_write(cp_access);
+
+       /* iWMMXt v1 */
+       if ((wcid & 0xffffff00) == 0x56051000)
+               return 1;
+       /* iWMMXt v2 */
+       if ((wcid & 0xffffff00) == 0x56052000)
+               return 2;
+
+       return -EINVAL;
+}
 
 /*
  * Disable CP0/CP1 on boot, and let call_fpe() and the iWMMXt lazy
@@ -79,17 +106,26 @@ static void __init pj4_cp_access_write(u32 value)
  */
 static int __init pj4_cp0_init(void)
 {
-       u32 cp_access;
+       u32 __maybe_unused cp_access;
+       int vers;
 
        if (!cpu_is_pj4())
                return 0;
 
+       vers = pj4_get_iwmmxt_version();
+       if (vers < 0)
+               return 0;
+
+#ifndef CONFIG_IWMMXT
+       pr_info("PJ4 iWMMXt coprocessor detected, but kernel support is missing.\n");
+#else
        cp_access = pj4_cp_access_read() & ~0xf;
        pj4_cp_access_write(cp_access);
 
-       printk(KERN_INFO "PJ4 iWMMXt coprocessor enabled.\n");
+       pr_info("PJ4 iWMMXt v%d coprocessor enabled.\n", vers);
        elf_hwcap |= HWCAP_IWMMXT;
        thread_register_notifier(&iwmmxt_notifier_block);
+#endif
 
        return 0;
 }
index 702bd329d9d0cd4f8b0912ca3a9694f942f1a568..e90a3148f38540c98c9f7a34ccce3f9ab7de7581 100644 (file)
@@ -203,9 +203,9 @@ asmlinkage long sys_oabi_fcntl64(unsigned int fd, unsigned int cmd,
        int ret;
 
        switch (cmd) {
-       case F_GETLKP:
-       case F_SETLKP:
-       case F_SETLKPW:
+       case F_OFD_GETLK:
+       case F_OFD_SETLK:
+       case F_OFD_SETLKW:
        case F_GETLK64:
        case F_SETLK64:
        case F_SETLKW64:
index 466bd299b1a8aad54949364d976d9c5430c2375e..4be5bb150bdddea694fbf71bffa6dd8e9b855177 100644 (file)
@@ -23,7 +23,7 @@ config KVM
        select HAVE_KVM_CPU_RELAX_INTERCEPT
        select KVM_MMIO
        select KVM_ARM_HOST
-       depends on ARM_VIRT_EXT && ARM_LPAE
+       depends on ARM_VIRT_EXT && ARM_LPAE && !CPU_BIG_ENDIAN
        ---help---
          Support hosting virtualized guest machines. You will also
          need to select one or more of the processor modules below.
index 80bb1e6c2c2906d0764ae5b696e72053c8faff9c..16f804938b8fea9fee56fa93991cf8c45cf141e5 100644 (file)
@@ -42,6 +42,8 @@ static unsigned long hyp_idmap_start;
 static unsigned long hyp_idmap_end;
 static phys_addr_t hyp_idmap_vector;
 
+#define pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
+
 #define kvm_pmd_huge(_x)       (pmd_huge(_x) || pmd_trans_huge(_x))
 
 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
@@ -293,14 +295,14 @@ void free_boot_hyp_pgd(void)
        if (boot_hyp_pgd) {
                unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
                unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
-               kfree(boot_hyp_pgd);
+               free_pages((unsigned long)boot_hyp_pgd, pgd_order);
                boot_hyp_pgd = NULL;
        }
 
        if (hyp_pgd)
                unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
 
-       kfree(init_bounce_page);
+       free_page((unsigned long)init_bounce_page);
        init_bounce_page = NULL;
 
        mutex_unlock(&kvm_hyp_pgd_mutex);
@@ -330,7 +332,7 @@ void free_hyp_pgds(void)
                for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
                        unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
 
-               kfree(hyp_pgd);
+               free_pages((unsigned long)hyp_pgd, pgd_order);
                hyp_pgd = NULL;
        }
 
@@ -1024,7 +1026,7 @@ int kvm_mmu_init(void)
                size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
                phys_addr_t phys_base;
 
-               init_bounce_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
+               init_bounce_page = (void *)__get_free_page(GFP_KERNEL);
                if (!init_bounce_page) {
                        kvm_err("Couldn't allocate HYP init bounce page\n");
                        err = -ENOMEM;
@@ -1050,8 +1052,9 @@ int kvm_mmu_init(void)
                         (unsigned long)phys_base);
        }
 
-       hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
-       boot_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
+       hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order);
+       boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order);
+
        if (!hyp_pgd || !boot_hyp_pgd) {
                kvm_err("Hyp mode PGD not allocated\n");
                err = -ENOMEM;
index 8b1b0a8700259961d04617ee064eb036679f8acd..a0282928e9c10bdbc67b385423b225f8e5315756 100644 (file)
@@ -1296,7 +1296,7 @@ static struct resource adc_resources[] = {
 };
 
 static struct platform_device at91_adc_device = {
-       .name           = "at91_adc",
+       .name           = "at91sam9260-adc",
        .id             = -1,
        .dev            = {
                                .platform_data          = &adc_data,
index 77b04c2edd783485d89f229a5c9c075bc68ab468..dab362c06487a856c9bcac67dd9248903c133fcd 100644 (file)
@@ -1204,7 +1204,7 @@ static struct resource adc_resources[] = {
 };
 
 static struct platform_device at91_adc_device = {
-       .name           = "at91_adc",
+       .name           = "at91sam9g45-adc",
        .id             = -1,
        .dev            = {
                                .platform_data  = &adc_data,
index b0e7f9d2c245ff093f1f08e11672e740f5170db0..2b4d6acfa34abdd67a6c6ed9becb44345dd17929 100644 (file)
@@ -208,8 +208,8 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
         * the "output_enable" bit as a gate, even though it's really just
         * enabling clock output.
         */
-       clk[lvds1_gate] = imx_clk_gate("lvds1_gate", "dummy", base + 0x160, 10);
-       clk[lvds2_gate] = imx_clk_gate("lvds2_gate", "dummy", base + 0x160, 11);
+       clk[lvds1_gate] = imx_clk_gate("lvds1_gate", "lvds1_sel", base + 0x160, 10);
+       clk[lvds2_gate] = imx_clk_gate("lvds2_gate", "lvds2_sel", base + 0x160, 11);
 
        /*                                name              parent_name        reg       idx */
        clk[pll2_pfd0_352m] = imx_clk_pfd("pll2_pfd0_352m", "pll2_bus",     base + 0x100, 0);
@@ -258,14 +258,14 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
        clk[ipu2_sel]         = imx_clk_mux("ipu2_sel",         base + 0x3c, 14, 2, ipu_sels,          ARRAY_SIZE(ipu_sels));
        clk[ldb_di0_sel]      = imx_clk_mux_flags("ldb_di0_sel", base + 0x2c, 9,  3, ldb_di_sels,      ARRAY_SIZE(ldb_di_sels), CLK_SET_RATE_PARENT);
        clk[ldb_di1_sel]      = imx_clk_mux_flags("ldb_di1_sel", base + 0x2c, 12, 3, ldb_di_sels,      ARRAY_SIZE(ldb_di_sels), CLK_SET_RATE_PARENT);
-       clk[ipu1_di0_pre_sel] = imx_clk_mux("ipu1_di0_pre_sel", base + 0x34, 6,  3, ipu_di_pre_sels,   ARRAY_SIZE(ipu_di_pre_sels));
-       clk[ipu1_di1_pre_sel] = imx_clk_mux("ipu1_di1_pre_sel", base + 0x34, 15, 3, ipu_di_pre_sels,   ARRAY_SIZE(ipu_di_pre_sels));
-       clk[ipu2_di0_pre_sel] = imx_clk_mux("ipu2_di0_pre_sel", base + 0x38, 6,  3, ipu_di_pre_sels,   ARRAY_SIZE(ipu_di_pre_sels));
-       clk[ipu2_di1_pre_sel] = imx_clk_mux("ipu2_di1_pre_sel", base + 0x38, 15, 3, ipu_di_pre_sels,   ARRAY_SIZE(ipu_di_pre_sels));
-       clk[ipu1_di0_sel]     = imx_clk_mux("ipu1_di0_sel",     base + 0x34, 0,  3, ipu1_di0_sels,     ARRAY_SIZE(ipu1_di0_sels));
-       clk[ipu1_di1_sel]     = imx_clk_mux("ipu1_di1_sel",     base + 0x34, 9,  3, ipu1_di1_sels,     ARRAY_SIZE(ipu1_di1_sels));
-       clk[ipu2_di0_sel]     = imx_clk_mux("ipu2_di0_sel",     base + 0x38, 0,  3, ipu2_di0_sels,     ARRAY_SIZE(ipu2_di0_sels));
-       clk[ipu2_di1_sel]     = imx_clk_mux("ipu2_di1_sel",     base + 0x38, 9,  3, ipu2_di1_sels,     ARRAY_SIZE(ipu2_di1_sels));
+       clk[ipu1_di0_pre_sel] = imx_clk_mux_flags("ipu1_di0_pre_sel", base + 0x34, 6,  3, ipu_di_pre_sels,   ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
+       clk[ipu1_di1_pre_sel] = imx_clk_mux_flags("ipu1_di1_pre_sel", base + 0x34, 15, 3, ipu_di_pre_sels,   ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
+       clk[ipu2_di0_pre_sel] = imx_clk_mux_flags("ipu2_di0_pre_sel", base + 0x38, 6,  3, ipu_di_pre_sels,   ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
+       clk[ipu2_di1_pre_sel] = imx_clk_mux_flags("ipu2_di1_pre_sel", base + 0x38, 15, 3, ipu_di_pre_sels,   ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
+       clk[ipu1_di0_sel]     = imx_clk_mux_flags("ipu1_di0_sel",     base + 0x34, 0,  3, ipu1_di0_sels,     ARRAY_SIZE(ipu1_di0_sels), CLK_SET_RATE_PARENT);
+       clk[ipu1_di1_sel]     = imx_clk_mux_flags("ipu1_di1_sel",     base + 0x34, 9,  3, ipu1_di1_sels,     ARRAY_SIZE(ipu1_di1_sels), CLK_SET_RATE_PARENT);
+       clk[ipu2_di0_sel]     = imx_clk_mux_flags("ipu2_di0_sel",     base + 0x38, 0,  3, ipu2_di0_sels,     ARRAY_SIZE(ipu2_di0_sels), CLK_SET_RATE_PARENT);
+       clk[ipu2_di1_sel]     = imx_clk_mux_flags("ipu2_di1_sel",     base + 0x38, 9,  3, ipu2_di1_sels,     ARRAY_SIZE(ipu2_di1_sels), CLK_SET_RATE_PARENT);
        clk[hsi_tx_sel]       = imx_clk_mux("hsi_tx_sel",       base + 0x30, 28, 1, hsi_tx_sels,       ARRAY_SIZE(hsi_tx_sels));
        clk[pcie_axi_sel]     = imx_clk_mux("pcie_axi_sel",     base + 0x18, 10, 1, pcie_axi_sels,     ARRAY_SIZE(pcie_axi_sels));
        clk[ssi1_sel]         = imx_clk_fixup_mux("ssi1_sel",   base + 0x1c, 10, 2, ssi_sels,          ARRAY_SIZE(ssi_sels),          imx_cscmr1_fixup);
@@ -445,6 +445,15 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
                clk_set_parent(clk[ldb_di1_sel], clk[pll5_video_div]);
        }
 
+       clk_set_parent(clk[ipu1_di0_pre_sel], clk[pll5_video_div]);
+       clk_set_parent(clk[ipu1_di1_pre_sel], clk[pll5_video_div]);
+       clk_set_parent(clk[ipu2_di0_pre_sel], clk[pll5_video_div]);
+       clk_set_parent(clk[ipu2_di1_pre_sel], clk[pll5_video_div]);
+       clk_set_parent(clk[ipu1_di0_sel], clk[ipu1_di0_pre]);
+       clk_set_parent(clk[ipu1_di1_sel], clk[ipu1_di1_pre]);
+       clk_set_parent(clk[ipu2_di0_sel], clk[ipu2_di0_pre]);
+       clk_set_parent(clk[ipu2_di1_sel], clk[ipu2_di1_pre]);
+
        /*
         * The gpmi needs 100MHz frequency in the EDO/Sync mode,
         * We can not get the 100MHz from the pll2_pfd0_352m.
index 43a90c8d68375594bb97d3a904021b534e217805..9cfebc5c7455b36b1123ae100efa513bd8fdbcdf 100644 (file)
@@ -48,7 +48,7 @@ static struct omap_dss_board_info rx51_dss_board_info = {
 
 static int __init rx51_video_init(void)
 {
-       if (!machine_is_nokia_rx51() && !of_machine_is_compatible("nokia,omap3-n900"))
+       if (!machine_is_nokia_rx51())
                return 0;
 
        if (omap_mux_init_gpio(RX51_LCD_RESET_GPIO, OMAP_PIN_OUTPUT)) {
index 2649ce445845288725c011bd66147cc103018339..332af927f4d3460f5852b3878279986ef965b675 100644 (file)
@@ -209,7 +209,7 @@ u8 omap2_init_dpll_parent(struct clk_hw *hw)
                if (v == OMAP3XXX_EN_DPLL_LPBYPASS ||
                    v == OMAP3XXX_EN_DPLL_FRBYPASS)
                        return 1;
-       } else if (soc_is_am33xx() || cpu_is_omap44xx()) {
+       } else if (soc_is_am33xx() || cpu_is_omap44xx() || soc_is_am43xx()) {
                if (v == OMAP4XXX_EN_DPLL_LPBYPASS ||
                    v == OMAP4XXX_EN_DPLL_FRBYPASS ||
                    v == OMAP4XXX_EN_DPLL_MNBYPASS)
@@ -255,7 +255,7 @@ unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk)
                if (v == OMAP3XXX_EN_DPLL_LPBYPASS ||
                    v == OMAP3XXX_EN_DPLL_FRBYPASS)
                        return __clk_get_rate(dd->clk_bypass);
-       } else if (soc_is_am33xx() || cpu_is_omap44xx()) {
+       } else if (soc_is_am33xx() || cpu_is_omap44xx() || soc_is_am43xx()) {
                if (v == OMAP4XXX_EN_DPLL_LPBYPASS ||
                    v == OMAP4XXX_EN_DPLL_FRBYPASS ||
                    v == OMAP4XXX_EN_DPLL_MNBYPASS)
index ab43755364f5a7c06ecfc367c0f055d65876dc54..9fe8c949305c3aff97626ca3edd5268696d57a8f 100644 (file)
@@ -501,7 +501,7 @@ static int gpmc_cs_delete_mem(int cs)
        int r;
 
        spin_lock(&gpmc_mem_lock);
-       r = release_resource(&gpmc_cs_mem[cs]);
+       r = release_resource(res);
        res->start = 0;
        res->end = 0;
        spin_unlock(&gpmc_mem_lock);
@@ -527,6 +527,14 @@ static int gpmc_cs_remap(int cs, u32 base)
                pr_err("%s: requested chip-select is disabled\n", __func__);
                return -ENODEV;
        }
+
+       /*
+        * Make sure we ignore any device offsets from the GPMC partition
+        * allocated for the chip select and that the new base confirms
+        * to the GPMC 16MB minimum granularity.
+        */ 
+       base &= ~(SZ_16M - 1);
+
        gpmc_cs_get_memconf(cs, &old_base, &size);
        if (base == old_base)
                return 0;
@@ -586,6 +594,8 @@ EXPORT_SYMBOL(gpmc_cs_request);
 
 void gpmc_cs_free(int cs)
 {
+       struct resource *res = &gpmc_cs_mem[cs];
+
        spin_lock(&gpmc_mem_lock);
        if (cs >= gpmc_cs_num || cs < 0 || !gpmc_cs_reserved(cs)) {
                printk(KERN_ERR "Trying to free non-reserved GPMC CS%d\n", cs);
@@ -594,7 +604,8 @@ void gpmc_cs_free(int cs)
                return;
        }
        gpmc_cs_disable_mem(cs);
-       release_resource(&gpmc_cs_mem[cs]);
+       if (res->flags)
+               release_resource(res);
        gpmc_cs_set_reserved(cs, 0);
        spin_unlock(&gpmc_mem_lock);
 }
index 75e92952c18efe3e597791e6798c2ee2ce5969a1..40c5d5f1451cd34b19ebed5e2d58f150f6f4a29d 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Secondary CPU startup routine source file.
  *
- * Copyright (C) 2009 Texas Instruments, Inc.
+ * Copyright (C) 2009-2014 Texas Instruments, Inc.
  *
  * Author:
  *      Santosh Shilimkar <santosh.shilimkar@ti.com>
  * code.  This routine also provides a holding flag into which
  * secondary core is held until we're ready for it to initialise.
  * The primary core will update this flag using a hardware
-+ * register AuxCoreBoot0.
+ * register AuxCoreBoot0.
  */
 ENTRY(omap5_secondary_startup)
+.arm
+THUMB( adr     r9, BSYM(wait)  )       @ CPU may be entered in ARM mode.
+THUMB( bx      r9              )       @ If this is a Thumb-2 kernel,
+THUMB( .thumb                  )       @ switch to Thumb now.
 wait:  ldr     r2, =AUX_CORE_BOOT0_PA  @ read from AuxCoreBoot0
        ldr     r0, [r2]
        mov     r0, r0, lsr #5
index 1f33f5db10d5a2dde0d90bd29f1eb6fce9b65995..66c60fe1104c9efabd02e99b8b5ca7755e9738ce 100644 (file)
@@ -2546,11 +2546,12 @@ static int __init _init(struct omap_hwmod *oh, void *data)
                return -EINVAL;
        }
 
-       if (np)
+       if (np) {
                if (of_find_property(np, "ti,no-reset-on-init", NULL))
                        oh->flags |= HWMOD_INIT_NO_RESET;
                if (of_find_property(np, "ti,no-idle-on-init", NULL))
                        oh->flags |= HWMOD_INIT_NO_IDLE;
+       }
 
        oh->_state = _HWMOD_STATE_INITIALIZED;
 
index a123ff0070bd65138394fa6248f611ef2cc10573..71ac7d5f338593e4e7f27ed8b4cb81465b28b7db 100644 (file)
@@ -1964,7 +1964,7 @@ static struct omap_hwmod_irq_info omap3xxx_usb_host_hs_irqs[] = {
 static struct omap_hwmod omap3xxx_usb_host_hs_hwmod = {
        .name           = "usb_host_hs",
        .class          = &omap3xxx_usb_host_hs_hwmod_class,
-       .clkdm_name     = "l3_init_clkdm",
+       .clkdm_name     = "usbhost_clkdm",
        .mpu_irqs       = omap3xxx_usb_host_hs_irqs,
        .main_clk       = "usbhost_48m_fck",
        .prcm = {
@@ -2047,7 +2047,7 @@ static struct omap_hwmod_irq_info omap3xxx_usb_tll_hs_irqs[] = {
 static struct omap_hwmod omap3xxx_usb_tll_hs_hwmod = {
        .name           = "usb_tll_hs",
        .class          = &omap3xxx_usb_tll_hs_hwmod_class,
-       .clkdm_name     = "l3_init_clkdm",
+       .clkdm_name     = "core_l4_clkdm",
        .mpu_irqs       = omap3xxx_usb_tll_hs_irqs,
        .main_clk       = "usbtll_fck",
        .prcm = {
index 1f3770a8a7286fd7650f76d46917408d0ff52b96..87099bb6de692771ce7d26a720f0bec5836d6beb 100644 (file)
@@ -330,10 +330,6 @@ void omap_sram_idle(void)
                        omap3_sram_restore_context();
                        omap2_sms_restore_context();
                }
-               if (core_next_state == PWRDM_POWER_OFF)
-                       omap2_prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK,
-                                              OMAP3430_GR_MOD,
-                                              OMAP3_PRM_VOLTCTRL_OFFSET);
        }
        omap3_intc_resume_idle();
 
index f565f9944af2ee45b795b180391853ffa8810a12..7548db2bfb8a7e595d7672c5cfbba3868651d689 100644 (file)
@@ -21,7 +21,7 @@ struct mv_sata_platform_data;
 #define ORION_MBUS_DEVBUS_BOOT_ATTR   0x0f
 #define ORION_MBUS_DEVBUS_TARGET(cs)  0x01
 #define ORION_MBUS_DEVBUS_ATTR(cs)    (~(1 << cs))
-#define ORION_MBUS_SRAM_TARGET        0x00
+#define ORION_MBUS_SRAM_TARGET        0x09
 #define ORION_MBUS_SRAM_ATTR          0x00
 
 /*
index 8bc02913517cd14a6e96f05295ff94f9fd250ee3..0e1bb46264f9c1bed329538a01088c27903a37ed 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <linux/gpio.h>
 #include <linux/mfd/asic3.h>
+#include "irqs.h" /* PXA_NR_BUILTIN_GPIO */
 
 #define HX4700_ASIC3_GPIO_BASE PXA_NR_BUILTIN_GPIO
 #define HX4700_EGPIO_BASE      (HX4700_ASIC3_GPIO_BASE + ASIC3_NUM_GPIOS)
index dbfa5a26cfff85b1ffdac6afdcc7ab26d21c0177..072842f6491b8efb0bc0ac7e3d3b04e3d6ffaefe 100644 (file)
@@ -152,7 +152,7 @@ static void __init rockchip_smp_prepare_cpus(unsigned int max_cpus)
 
        node = of_find_compatible_node(NULL, NULL, "rockchip,rk3066-pmu");
        if (!node) {
-               pr_err("%s: could not find sram dt node\n", __func__);
+               pr_err("%s: could not find pmu dt node\n", __func__);
                return;
        }
 
index 2858f380beaefba938f6dbdf75ec81874af62168..486063db2a2ffd501ca67cf7d62f0e0750464010 100644 (file)
@@ -992,6 +992,7 @@ static struct asoc_simple_card_info fsi_wm8978_info = {
        .platform       = "sh_fsi2",
        .daifmt         = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM,
        .cpu_dai = {
+               .fmt    = SND_SOC_DAIFMT_IB_NF,
                .name   = "fsia-dai",
        },
        .codec_dai = {
index f0104bfe544e378c6a778d853fbd3fc822669087..18c7e0311aa679c60a634963a3f64ddf7484dffd 100644 (file)
@@ -588,14 +588,12 @@ static struct asoc_simple_card_info rsnd_card_info = {
        .card           = "SSI01-AK4643",
        .codec          = "ak4642-codec.2-0012",
        .platform       = "rcar_sound",
-       .daifmt         = SND_SOC_DAIFMT_LEFT_J,
+       .daifmt         = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBM_CFM,
        .cpu_dai = {
                .name   = "rcar_sound",
-               .fmt    = SND_SOC_DAIFMT_CBS_CFS,
        },
        .codec_dai = {
                .name   = "ak4642-hifi",
-               .fmt    = SND_SOC_DAIFMT_CBM_CFM,
                .sysclk = 11289600,
        },
 };
index 2009a9bc63562af9d761c47f433a4b3d6a80f25d..9989b1b06ffd7dae363552e4d1b973f99f423f0a 100644 (file)
@@ -170,7 +170,7 @@ static struct clk mstp_clks[MSTP_NR] = {
        [MSTP010] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 10, 0), /* SSI2 */
        [MSTP009] = SH_CLK_MSTP32(&p_clk, MSTPCR0,  9, 0), /* SSI3 */
        [MSTP008] = SH_CLK_MSTP32(&p_clk, MSTPCR0,  8, 0), /* SRU */
-       [MSTP007] = SH_CLK_MSTP32(&p_clk, MSTPCR0,  7, 0), /* HSPI */
+       [MSTP007] = SH_CLK_MSTP32(&s_clk, MSTPCR0,  7, 0), /* HSPI */
 };
 
 static struct clk_lookup lookups[] = {
index 64790353951f0c8ca2d071d1c2ba199007fa18d3..26fda4ed4d51413301d148477cd4d18e2bef7884 100644 (file)
@@ -71,7 +71,7 @@ static void clockevent_set_mode(enum clock_event_mode mode,
 static int clockevent_next_event(unsigned long evt,
                                 struct clock_event_device *clk_event_dev);
 
-static void spear_clocksource_init(void)
+static void __init spear_clocksource_init(void)
 {
        u32 tick_rate;
        u16 val;
index 92d660f9610f4ca94092a81749e578372731939f..55b305d51669c576d7b85f6d9fa07f45739ab644 100644 (file)
@@ -70,7 +70,4 @@ config TEGRA_AHB
          which controls AHB bus master arbitration and some
          performance parameters(priority, prefech size).
 
-config TEGRA_EMC_SCALING_ENABLE
-       bool "Enable scaling the memory frequency"
-
 endmenu
index e4dec9fcb084afe014d8beb09706f946f7b8e734..9c6029ba526fff85005d8195c311138be7074a94 100644 (file)
@@ -23,9 +23,7 @@
 #include "board.h"
 
 static struct rfkill_gpio_platform_data wifi_rfkill_platform_data = {
-       .name           = "wifi_rfkill",
-       .reset_gpio     = 25, /* PD1 */
-       .shutdown_gpio  = 85, /* PK5 */
+       .name   = "wifi_rfkill",
        .type   = RFKILL_TYPE_WLAN,
 };
 
index 788495d35cf9ea6d920a69a8fd6cc2a7b46fd7c0..30b993399ed7758062f1f458b05d3fee2c9cd7b8 100644 (file)
@@ -51,12 +51,14 @@ static int dcscb_allcpus_mask[2];
 static int dcscb_power_up(unsigned int cpu, unsigned int cluster)
 {
        unsigned int rst_hold, cpumask = (1 << cpu);
-       unsigned int all_mask = dcscb_allcpus_mask[cluster];
+       unsigned int all_mask;
 
        pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
        if (cpu >= 4 || cluster >= 2)
                return -EINVAL;
 
+       all_mask = dcscb_allcpus_mask[cluster];
+
        /*
         * Since this is called with IRQs enabled, and no arch_spin_lock_irq
         * variant exists, we need to disable IRQs manually here.
@@ -101,11 +103,12 @@ static void dcscb_power_down(void)
        cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
        cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
        cpumask = (1 << cpu);
-       all_mask = dcscb_allcpus_mask[cluster];
 
        pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
        BUG_ON(cpu >= 4 || cluster >= 2);
 
+       all_mask = dcscb_allcpus_mask[cluster];
+
        __mcpm_cpu_going_down(cpu, cluster);
 
        arch_spin_lock(&dcscb_lock);
index c26ef5b92ca78587ce35b0f597a9cea66f9d592a..2c2754e79cb37d3fbcd9aff04ca086e4ba6f5274 100644 (file)
@@ -392,7 +392,7 @@ static irqreturn_t ve_spc_irq_handler(int irq, void *data)
  *  +--------------------------+
  *  | 31      20 | 19        0 |
  *  +--------------------------+
- *  |   u_volt   |  freq(kHz)  |
+ *  |   m_volt   |  freq(kHz)  |
  *  +--------------------------+
  */
 #define MULT_FACTOR    20
@@ -414,7 +414,7 @@ static int ve_spc_populate_opps(uint32_t cluster)
                ret = ve_spc_read_sys_cfg(SYSCFG_SCC, off, &data);
                if (!ret) {
                        opps->freq = (data & FREQ_MASK) * MULT_FACTOR;
-                       opps->u_volt = data >> VOLT_SHIFT;
+                       opps->u_volt = (data >> VOLT_SHIFT) * 1000;
                } else {
                        break;
                }
index f5ad9ee70426b0f2a285cc463739a01173994875..5bf7c3c3b3018aa37a721a222714873bec8b92ff 100644 (file)
@@ -420,29 +420,29 @@ config CPU_32v3
        bool
        select CPU_USE_DOMAINS if MMU
        select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
-       select TLS_REG_EMUL if SMP || !MMU
        select NEED_KUSER_HELPERS
+       select TLS_REG_EMUL if SMP || !MMU
 
 config CPU_32v4
        bool
        select CPU_USE_DOMAINS if MMU
        select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
-       select TLS_REG_EMUL if SMP || !MMU
        select NEED_KUSER_HELPERS
+       select TLS_REG_EMUL if SMP || !MMU
 
 config CPU_32v4T
        bool
        select CPU_USE_DOMAINS if MMU
        select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
-       select TLS_REG_EMUL if SMP || !MMU
        select NEED_KUSER_HELPERS
+       select TLS_REG_EMUL if SMP || !MMU
 
 config CPU_32v5
        bool
        select CPU_USE_DOMAINS if MMU
        select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
-       select TLS_REG_EMUL if SMP || !MMU
        select NEED_KUSER_HELPERS
+       select TLS_REG_EMUL if SMP || !MMU
 
 config CPU_32v6
        bool
index f62aa0677e5c4b69918d1ab36e39fada230d3d59..6b00be1f971e15958cc40c369c88ca872f645aa6 100644 (file)
@@ -1963,8 +1963,8 @@ arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
        mapping->nr_bitmaps = 1;
        mapping->extensions = extensions;
        mapping->base = base;
-       mapping->size = bitmap_size << PAGE_SHIFT;
        mapping->bits = BITS_PER_BYTE * bitmap_size;
+       mapping->size = mapping->bits << PAGE_SHIFT;
 
        spin_lock_init(&mapping->lock);
 
index 6cac43bd1d86c63638993bafa8ba989fea52e91f..423f56dd40283c3f7615a5aec0c1d5422313d61a 100644 (file)
@@ -866,6 +866,8 @@ vfp_double_multiply_accumulate(int dd, int dn, int dm, u32 fpscr, u32 negate, ch
                vdp.sign = vfp_sign_negate(vdp.sign);
 
        vfp_double_unpack(&vdn, vfp_get_double(dd));
+       if (vdn.exponent == 0 && vdn.significand)
+               vfp_double_normalise_denormal(&vdn);
        if (negate & NEG_SUBTRACT)
                vdn.sign = vfp_sign_negate(vdn.sign);
 
index b252631b406bd22bcbe040ede3d3f4dfbea8e987..4f96c1617aaec257a68496464aacbafe1f8d7ab8 100644 (file)
@@ -915,6 +915,8 @@ vfp_single_multiply_accumulate(int sd, int sn, s32 m, u32 fpscr, u32 negate, cha
        v = vfp_get_float(sd);
        pr_debug("VFP: s%u = %08x\n", sd, v);
        vfp_single_unpack(&vsn, v);
+       if (vsn.exponent == 0 && vsn.significand)
+               vfp_single_normalise_denormal(&vsn);
        if (negate & NEG_SUBTRACT)
                vsn.sign = vfp_sign_negate(vsn.sign);
 
index e6e4d3749a6e9d1eef343ec1f502c265a17c8e6d..e759af5d70988ea27959db20c7ef510a621ad335 100644 (file)
@@ -323,8 +323,6 @@ menu "CPU Power Management"
 
 source "drivers/cpuidle/Kconfig"
 
-source "kernel/power/Kconfig"
-
 source "drivers/cpufreq/Kconfig"
 
 endmenu
index 93f4b2dd92484863e8015da4a622a0c17745de5a..f8c40a66e65ddb3d3a4c327379d0eec2b234ce50 100644 (file)
                              <0x0 0x1f21e000 0x0 0x1000>,
                              <0x0 0x1f217000 0x0 0x1000>;
                        interrupts = <0x0 0x86 0x4>;
+                       dma-coherent;
                        status = "disabled";
                        clocks = <&sata01clk 0>;
                        phys = <&phy1 0>;
                              <0x0 0x1f22e000 0x0 0x1000>,
                              <0x0 0x1f227000 0x0 0x1000>;
                        interrupts = <0x0 0x87 0x4>;
+                       dma-coherent;
                        status = "ok";
                        clocks = <&sata23clk 0>;
                        phys = <&phy2 0>;
                              <0x0 0x1f23d000 0x0 0x1000>,
                              <0x0 0x1f23e000 0x0 0x1000>;
                        interrupts = <0x0 0x88 0x4>;
+                       dma-coherent;
                        status = "ok";
                        clocks = <&sata45clk 0>;
                        phys = <&phy3 0>;
index e94f9458aa6faa3630d5d2b7cebf9e522b56901d..993bce527b8552d379c62b6082703b1438e80436 100644 (file)
@@ -138,6 +138,7 @@ static inline void *phys_to_virt(phys_addr_t x)
 #define __pa(x)                        __virt_to_phys((unsigned long)(x))
 #define __va(x)                        ((void *)__phys_to_virt((phys_addr_t)(x)))
 #define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
+#define virt_to_pfn(x)      __phys_to_pfn(__virt_to_phys(x))
 
 /*
  *  virt_to_page(k)    convert a _valid_ virtual address to struct page *
index f600d400c07d2cb7e615a13bbe9d02fe3959714d..aff0292c8f4da75957ed0f3bee43ab1f804ae862 100644 (file)
@@ -22,6 +22,9 @@ typedef struct {
        void *vdso;
 } mm_context_t;
 
+#define INIT_MM_CONTEXT(name) \
+       .context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock),
+
 #define ASID(mm)       ((mm)->context.id & 0xffff)
 
 extern void paging_init(void);
index 72cadf52ca807f181261b1599b25944374de5544..80e2c08900d68c0e0345fe0d831e6d1070515064 100644 (file)
@@ -19,6 +19,7 @@
 #ifndef __ASM_TLB_H
 #define __ASM_TLB_H
 
+#define  __tlb_remove_pmd_tlb_entry __tlb_remove_pmd_tlb_entry
 
 #include <asm-generic/tlb.h>
 
@@ -99,5 +100,10 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
 }
 #endif
 
+static inline void __tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp,
+                                               unsigned long address)
+{
+       tlb_add_flush(tlb, address);
+}
 
 #endif
index bb8eb8a78e67d2c7906f40aa0f4db2ef0c4ef5f5..c8d8fc17bd5a6bb6def9c878acae69fbb7bee359 100644 (file)
@@ -403,8 +403,9 @@ __SYSCALL(378, sys_kcmp)
 __SYSCALL(379, sys_finit_module)
 __SYSCALL(380, sys_sched_setattr)
 __SYSCALL(381, sys_sched_getattr)
+__SYSCALL(382, sys_renameat2)
 
-#define __NR_compat_syscalls           379
+#define __NR_compat_syscalls           383
 
 /*
  * Compat syscall numbers used by the AArch64 kernel.
index ed3955a95747286ebcb3f705c107dc1b3af90423..a7fb874b595edc0c095430792de9c8883590855f 100644 (file)
@@ -318,9 +318,6 @@ static int brk_handler(unsigned long addr, unsigned int esr,
        if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED)
                return 0;
 
-       pr_warn("unexpected brk exception at %lx, esr=0x%x\n",
-                       (long)instruction_pointer(regs), esr);
-
        if (!user_mode(regs))
                return -EFAULT;
 
index ffbbdde7aba10480c12b41d552d1fb41da6097df..2dc36d00addffad4a4bd10ef0a6b1bac21170a49 100644 (file)
@@ -143,10 +143,8 @@ static int __init setup_early_printk(char *buf)
        }
        /* no options parsing yet */
 
-       if (paddr) {
-               set_fixmap_io(FIX_EARLYCON_MEM_BASE, paddr);
-               early_base = (void __iomem *)fix_to_virt(FIX_EARLYCON_MEM_BASE);
-       }
+       if (paddr)
+               early_base = (void __iomem *)set_fixmap_offset_io(FIX_EARLYCON_MEM_BASE, paddr);
 
        printch = match->printch;
        early_console = &early_console_dev;
index 473e5dbf8f39a39e8eaa7a0740e54ee4d6bacb59..0f08dfd69ebc73ea99b7b7f6d68c4b2f320eb2d1 100644 (file)
@@ -97,11 +97,15 @@ static bool migrate_one_irq(struct irq_desc *desc)
        if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
                return false;
 
-       if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
-               affinity = cpu_online_mask;
+       if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids)
                ret = true;
-       }
 
+       /*
+        * when using forced irq_set_affinity we must ensure that the cpu
+        * being offlined is not present in the affinity mask, it may be
+        * selected as the target CPU otherwise
+        */
+       affinity = cpu_online_mask;
        c = irq_data_get_irq_chip(d);
        if (!c->irq_set_affinity)
                pr_debug("IRQ%u: unable to set affinity\n", d->irq);
index 720853f70b6bab01a650e39548872472bcfff0b0..7ec784653b29fad2b1eba1c49a21e9e499a6c167 100644 (file)
@@ -393,11 +393,10 @@ void __init setup_arch(char **cmdline_p)
 
 static int __init arm64_device_init(void)
 {
-       of_clk_init(NULL);
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
        return 0;
 }
-arch_initcall(arm64_device_init);
+arch_initcall_sync(arm64_device_init);
 
 static DEFINE_PER_CPU(struct cpu, cpu_data);
 
index 29c39d5d77e31983d49ff2754b0413cd5bd48ff8..6815987b50f822af8ff1e8a8c4f8605fc83a6a7e 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/irq.h>
 #include <linux/delay.h>
 #include <linux/clocksource.h>
+#include <linux/clk-provider.h>
 
 #include <clocksource/arm_arch_timer.h>
 
@@ -65,6 +66,7 @@ void __init time_init(void)
 {
        u32 arch_timer_rate;
 
+       of_clk_init(NULL);
        clocksource_of_init();
 
        arch_timer_rate = arch_timer_get_rate();
index 0ba347e59f06a7dbfe3fe7dcc884f9435c791d6e..c851eb44dc505f8b250b7e1205b1a5ccb35afc8c 100644 (file)
 #include <linux/slab.h>
 #include <linux/dma-mapping.h>
 #include <linux/dma-contiguous.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
 #include <linux/vmalloc.h>
 #include <linux/swiotlb.h>
+#include <linux/amba/bus.h>
 
 #include <asm/cacheflush.h>
 
@@ -305,17 +308,45 @@ struct dma_map_ops coherent_swiotlb_dma_ops = {
 };
 EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
 
+static int dma_bus_notifier(struct notifier_block *nb,
+                           unsigned long event, void *_dev)
+{
+       struct device *dev = _dev;
+
+       if (event != BUS_NOTIFY_ADD_DEVICE)
+               return NOTIFY_DONE;
+
+       if (of_property_read_bool(dev->of_node, "dma-coherent"))
+               set_dma_ops(dev, &coherent_swiotlb_dma_ops);
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block platform_bus_nb = {
+       .notifier_call = dma_bus_notifier,
+};
+
+static struct notifier_block amba_bus_nb = {
+       .notifier_call = dma_bus_notifier,
+};
+
 extern int swiotlb_late_init_with_default_size(size_t default_size);
 
 static int __init swiotlb_late_init(void)
 {
        size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
 
-       dma_ops = &coherent_swiotlb_dma_ops;
+       /*
+        * These must be registered before of_platform_populate().
+        */
+       bus_register_notifier(&platform_bus_type, &platform_bus_nb);
+       bus_register_notifier(&amba_bustype, &amba_bus_nb);
+
+       dma_ops = &noncoherent_swiotlb_dma_ops;
 
        return swiotlb_late_init_with_default_size(swiotlb_size);
 }
-subsys_initcall(swiotlb_late_init);
+arch_initcall(swiotlb_late_init);
 
 #define PREALLOC_DMA_DEBUG_ENTRIES     4096
 
index 5e9aec358306f0c13bdd0bb70758dde88be9e961..31eb959e9aa81d05f16269e3bb500a965cb7cd48 100644 (file)
@@ -51,7 +51,11 @@ int pmd_huge(pmd_t pmd)
 
 int pud_huge(pud_t pud)
 {
+#ifndef __PAGETABLE_PMD_FOLDED
        return !(pud_val(pud) & PUD_TABLE_BIT);
+#else
+       return 0;
+#endif
 }
 
 int pmd_huge_support(void)
index 6b7e89569a3a9ff8518e7c6ee856603f1e9fb93b..0a472c41a67fa9dc33e9c746c24a402d6f306289 100644 (file)
@@ -374,6 +374,9 @@ int kern_addr_valid(unsigned long addr)
        if (pmd_none(*pmd))
                return 0;
 
+       if (pmd_sect(*pmd))
+               return pfn_valid(pmd_pfn(*pmd));
+
        pte = pte_offset_kernel(pmd, addr);
        if (pte_none(*pte))
                return 0;
diff --git a/arch/hexagon/include/asm/barrier.h b/arch/hexagon/include/asm/barrier.h
deleted file mode 100644 (file)
index 4e863da..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Memory barrier definitions for the Hexagon architecture
- *
- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#ifndef _ASM_BARRIER_H
-#define _ASM_BARRIER_H
-
-#define rmb()                          barrier()
-#define read_barrier_depends()         barrier()
-#define wmb()                          barrier()
-#define mb()                           barrier()
-#define smp_rmb()                      barrier()
-#define smp_read_barrier_depends()     barrier()
-#define smp_wmb()                      barrier()
-#define smp_mb()                       barrier()
-
-/*  Set a value and use a memory barrier.  Used by the scheduler somewhere.  */
-#define set_mb(var, value) \
-       do { var = value; mb(); } while (0)
-
-#endif /* _ASM_BARRIER_H */
index bc5efc7c3f3f8ead3608780ba5e2f5b9e212e20c..39d64e0df1de6dd62caf650fdb3ed5f969f280cc 100644 (file)
@@ -91,18 +91,9 @@ extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
 #define RR_RID_MASK    0x00000000ffffff00L
 #define RR_TO_RID(val)         ((val >> 8) & 0xffffff)
 
-/*
- * Flush the TLB for address range START to END and, if not in fast mode, release the
- * freed pages that where gathered up to this point.
- */
 static inline void
-ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
+ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned long end)
 {
-       unsigned long i;
-       unsigned int nr;
-
-       if (!tlb->need_flush)
-               return;
        tlb->need_flush = 0;
 
        if (tlb->fullmm) {
@@ -135,6 +126,14 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
                flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
        }
 
+}
+
+static inline void
+ia64_tlb_flush_mmu_free(struct mmu_gather *tlb)
+{
+       unsigned long i;
+       unsigned int nr;
+
        /* lastly, release the freed pages */
        nr = tlb->nr;
 
@@ -144,6 +143,19 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
                free_page_and_swap_cache(tlb->pages[i]);
 }
 
+/*
+ * Flush the TLB for address range START to END and, if not in fast mode, release the
+ * freed pages that where gathered up to this point.
+ */
+static inline void
+ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
+{
+       if (!tlb->need_flush)
+               return;
+       ia64_tlb_flush_mmu_tlbonly(tlb, start, end);
+       ia64_tlb_flush_mmu_free(tlb);
+}
+
 static inline void __tlb_alloc_page(struct mmu_gather *tlb)
 {
        unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
@@ -206,6 +218,16 @@ static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
        return tlb->max - tlb->nr;
 }
 
+static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
+{
+       ia64_tlb_flush_mmu_tlbonly(tlb, tlb->start_addr, tlb->end_addr);
+}
+
+static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
+{
+       ia64_tlb_flush_mmu_free(tlb);
+}
+
 static inline void tlb_flush_mmu(struct mmu_gather *tlb)
 {
        ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
index ae763d8bf55acff94a8c611893e2a103f3156f33..fb13dc5e8f8c7c34079664761f6fab1bf802a1c7 100644 (file)
@@ -11,7 +11,7 @@
 
 
 
-#define NR_syscalls                    314 /* length of syscall table */
+#define NR_syscalls                    315 /* length of syscall table */
 
 /*
  * The following defines stop scripts/checksyscalls.sh from complaining about
index 715e85f858de5ea34e7f581b38bc5d9a60ec1304..7de0a2d65da42a09b6d8b34f75f175c326b329e9 100644 (file)
 #define __NR_finit_module              1335
 #define __NR_sched_setattr             1336
 #define __NR_sched_getattr             1337
+#define __NR_renameat2                 1338
 
 #endif /* _UAPI_ASM_IA64_UNISTD_H */
index fa8d61a312a7ee818a300522d9f029c9534d78f6..ba3d03503e84fde7f714fda777acf254356b78e8 100644 (file)
@@ -1775,6 +1775,7 @@ sys_call_table:
        data8 sys_finit_module                  // 1335
        data8 sys_sched_setattr
        data8 sys_sched_getattr
+       data8 sys_renameat2
 
        .org sys_call_table + 8*NR_syscalls     // guard against failures to increase NR_syscalls
 #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
index 9d38b73989eb597d677acd95ea53cf0ddb99b624..33afa56ad47aecd6620bc150eea042705cc85d3e 100644 (file)
@@ -4,7 +4,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls            351
+#define NR_syscalls            352
 
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_OLD_STAT
index b932dd470041c2c5130dfcc44448c80033525259..9cd82fbc7817f716d589368bda3c1d274687a607 100644 (file)
 #define __NR_finit_module      348
 #define __NR_sched_setattr     349
 #define __NR_sched_getattr     350
+#define __NR_renameat2         351
 
 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */
index b6223dc41d82870953be64b35fc409c4b5634070..501e102127899c6afaa893bd99d4d555f5bab8ec 100644 (file)
@@ -371,4 +371,5 @@ ENTRY(sys_call_table)
        .long sys_finit_module
        .long sys_sched_setattr
        .long sys_sched_getattr         /* 350 */
+       .long sys_renameat2
 
index 5d6b4b407ddab29b677a7aa5328715127dfdad56..2d6f0de7732529212bf4a9256bb3720386e39e71 100644 (file)
@@ -15,6 +15,7 @@ static inline void wr_fence(void)
        volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_FENCE;
        barrier();
        *flushptr = 0;
+       barrier();
 }
 
 #else /* CONFIG_METAG_META21 */
@@ -35,6 +36,7 @@ static inline void wr_fence(void)
        *flushptr = 0;
        *flushptr = 0;
        *flushptr = 0;
+       barrier();
 }
 
 #endif /* !CONFIG_METAG_META21 */
@@ -68,6 +70,7 @@ static inline void fence(void)
        volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_ATOMIC_UNLOCK;
        barrier();
        *flushptr = 0;
+       barrier();
 }
 #define smp_mb()        fence()
 #define smp_rmb()       fence()
index f16477d1f571cb134a29e3afd13c480401f340ea..a8a37477c66e22a8e3c29ba2402ff6559a35bf27 100644 (file)
@@ -22,6 +22,8 @@
 /* Add an extra page of padding at the top of the stack for the guard page. */
 #define STACK_TOP      (TASK_SIZE - PAGE_SIZE)
 #define STACK_TOP_MAX  STACK_TOP
+/* Maximum virtual space for stack */
+#define STACK_SIZE_MAX (CONFIG_MAX_STACK_SIZE_MB*1024*1024)
 
 /* This decides where the kernel will search for a free chunk of vm
  * space during mmap's.
index 84e09feb4d546fef7004746a8d5550bddb3f8d58..ab78be2b6eb0503f939cc85409d532a3b037b491 100644 (file)
@@ -4,11 +4,11 @@ include include/uapi/asm-generic/Kbuild.asm
 header-y += byteorder.h
 header-y += ech.h
 header-y += ptrace.h
-header-y += resource.h
 header-y += sigcontext.h
 header-y += siginfo.h
 header-y += swab.h
 header-y += unistd.h
 
 generic-y += mman.h
+generic-y += resource.h
 generic-y += setup.h
diff --git a/arch/metag/include/uapi/asm/resource.h b/arch/metag/include/uapi/asm/resource.h
deleted file mode 100644 (file)
index 526d23c..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef _UAPI_METAG_RESOURCE_H
-#define _UAPI_METAG_RESOURCE_H
-
-#define _STK_LIM_MAX    (1 << 28)
-#include <asm-generic/resource.h>
-
-#endif /* _UAPI_METAG_RESOURCE_H */
index a8b5408dd3495f48a19a7ad35ddca3b5724d6005..da4cdb16844ebcca13adff525d5e486a4f9367a6 100644 (file)
@@ -168,6 +168,7 @@ static void nvram_read_alpha2(const char *prefix, const char *name,
 static void bcm47xx_fill_sprom_r1234589(struct ssb_sprom *sprom,
                                        const char *prefix, bool fallback)
 {
+       nvram_read_u16(prefix, NULL, "devid", &sprom->dev_id, 0, fallback);
        nvram_read_u8(prefix, NULL, "ledbh0", &sprom->gpio0, 0xff, fallback);
        nvram_read_u8(prefix, NULL, "ledbh1", &sprom->gpio1, 0xff, fallback);
        nvram_read_u8(prefix, NULL, "ledbh2", &sprom->gpio2, 0xff, fallback);
index c2bb4f896ce788cbba4b6c48cd18ef875e5bdfd6..3aa5b46b2d40d0c7549142072debd16e1667ad81 100644 (file)
@@ -635,7 +635,7 @@ static void octeon_irq_cpu_offline_ciu(struct irq_data *data)
                cpumask_clear(&new_affinity);
                cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
        }
-       __irq_set_affinity_locked(data, &new_affinity);
+       irq_set_affinity_locked(data, &new_affinity, false);
 }
 
 static int octeon_irq_ciu_set_affinity(struct irq_data *data,
index 5abf4e894216ac4b683772f6b187e4e9337eb2d5..2a66e908f6a9d9a276cad042ac793f692630255e 100644 (file)
@@ -21,6 +21,7 @@
 #include <asm/addrspace.h>
 #include <asm/bootinfo.h>
 #include <asm/cpu.h>
+#include <asm/cpu-type.h>
 #include <asm/irq_regs.h>
 #include <asm/processor.h>
 #include <asm/ptrace.h>
index f434b759e3b9aaa0f6eb9ab07f85fb783de4a6ad..ec606363b80677fd464b92c13d5a905871b21466 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/types.h>
 
 #include <asm/addrspace.h>
+#include <asm/cpu-type.h>
 #include <asm/irq_regs.h>
 #include <asm/ptrace.h>
 #include <asm/traps.h>
index 064ae7a76bdc204c28e0c8e92ccd63400fbbf403..ae73e42ac20b163331a77439a75270320a77dfa8 100644 (file)
@@ -6,4 +6,3 @@
 lib-y                  += init.o memory.o cmdline.o identify.o console.o
 
 lib-$(CONFIG_32BIT)    += locore.o
-lib-$(CONFIG_64BIT)    += call_o32.o
diff --git a/arch/mips/dec/prom/call_o32.S b/arch/mips/dec/prom/call_o32.S
deleted file mode 100644 (file)
index 8c84981..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- *     O32 interface for the 64 (or N32) ABI.
- *
- *     Copyright (C) 2002  Maciej W. Rozycki
- *
- *     This program is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License
- *     as published by the Free Software Foundation; either version
- *     2 of the License, or (at your option) any later version.
- */
-
-#include <asm/asm.h>
-#include <asm/regdef.h>
-
-/* Maximum number of arguments supported.  Must be even!  */
-#define O32_ARGC       32
-/* Number of static registers we save.  */
-#define O32_STATC      11
-/* Frame size for both of the above.  */
-#define O32_FRAMESZ    (4 * O32_ARGC + SZREG * O32_STATC)
-
-               .text
-
-/*
- * O32 function call dispatcher, for interfacing 32-bit ROM routines.
- *
- * The standard 64 (N32) calling sequence is supported, with a0
- * holding a function pointer, a1-a7 -- its first seven arguments
- * and the stack -- remaining ones (up to O32_ARGC, including a1-a7).
- * Static registers, gp and fp are preserved, v0 holds a result.
- * This code relies on the called o32 function for sp and ra
- * restoration and thus both this dispatcher and the current stack
- * have to be placed in a KSEGx (or KUSEG) address space.  Any
- * pointers passed have to point to addresses within one of these
- * spaces as well.
- */
-NESTED(call_o32, O32_FRAMESZ, ra)
-               REG_SUBU        sp,O32_FRAMESZ
-
-               REG_S           ra,O32_FRAMESZ-1*SZREG(sp)
-               REG_S           fp,O32_FRAMESZ-2*SZREG(sp)
-               REG_S           gp,O32_FRAMESZ-3*SZREG(sp)
-               REG_S           s7,O32_FRAMESZ-4*SZREG(sp)
-               REG_S           s6,O32_FRAMESZ-5*SZREG(sp)
-               REG_S           s5,O32_FRAMESZ-6*SZREG(sp)
-               REG_S           s4,O32_FRAMESZ-7*SZREG(sp)
-               REG_S           s3,O32_FRAMESZ-8*SZREG(sp)
-               REG_S           s2,O32_FRAMESZ-9*SZREG(sp)
-               REG_S           s1,O32_FRAMESZ-10*SZREG(sp)
-               REG_S           s0,O32_FRAMESZ-11*SZREG(sp)
-
-               move            jp,a0
-
-               sll             a0,a1,zero
-               sll             a1,a2,zero
-               sll             a2,a3,zero
-               sll             a3,a4,zero
-               sw              a5,0x10(sp)
-               sw              a6,0x14(sp)
-               sw              a7,0x18(sp)
-
-               PTR_LA          t0,O32_FRAMESZ(sp)
-               PTR_LA          t1,0x1c(sp)
-               li              t2,O32_ARGC-7
-1:
-               lw              t3,(t0)
-               REG_ADDU        t0,SZREG
-               sw              t3,(t1)
-               REG_SUBU        t2,1
-               REG_ADDU        t1,4
-               bnez            t2,1b
-
-               jalr            jp
-
-               REG_L           s0,O32_FRAMESZ-11*SZREG(sp)
-               REG_L           s1,O32_FRAMESZ-10*SZREG(sp)
-               REG_L           s2,O32_FRAMESZ-9*SZREG(sp)
-               REG_L           s3,O32_FRAMESZ-8*SZREG(sp)
-               REG_L           s4,O32_FRAMESZ-7*SZREG(sp)
-               REG_L           s5,O32_FRAMESZ-6*SZREG(sp)
-               REG_L           s6,O32_FRAMESZ-5*SZREG(sp)
-               REG_L           s7,O32_FRAMESZ-4*SZREG(sp)
-               REG_L           gp,O32_FRAMESZ-3*SZREG(sp)
-               REG_L           fp,O32_FRAMESZ-2*SZREG(sp)
-               REG_L           ra,O32_FRAMESZ-1*SZREG(sp)
-
-               REG_ADDU        sp,O32_FRAMESZ
-               jr              ra
-END(call_o32)
index b308b2a0613e210c1f7b068dfc6a7589f1ad2afb..4703fe4dbd9a7b6c192ce3e86d0c5bceada460f1 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *     O32 interface for the 64 (or N32) ABI.
  *
- *     Copyright (C) 2002  Maciej W. Rozycki
+ *     Copyright (C) 2002, 2014  Maciej W. Rozycki
  *
  *     This program is free software; you can redistribute it and/or
  *     modify it under the terms of the GNU General Public License
 #include <asm/asm.h>
 #include <asm/regdef.h>
 
+/* O32 register size.  */
+#define O32_SZREG      4
 /* Maximum number of arguments supported.  Must be even!  */
 #define O32_ARGC       32
-/* Number of static registers we save. */
+/* Number of static registers we save.  */
 #define O32_STATC      11
-/* Frame size for static register  */
-#define O32_FRAMESZ    (SZREG * O32_STATC)
-/* Frame size on new stack */
-#define O32_FRAMESZ_NEW (SZREG + 4 * O32_ARGC)
+/* Argument area frame size.  */
+#define O32_ARGSZ      (O32_SZREG * O32_ARGC)
+/* Static register save area frame size.  */
+#define O32_STATSZ     (SZREG * O32_STATC)
+/* Stack pointer register save area frame size.  */
+#define O32_SPSZ       SZREG
+/* Combined area frame size.  */
+#define O32_FRAMESZ    (O32_ARGSZ + O32_SPSZ + O32_STATSZ)
+/* Switched stack frame size.  */
+#define O32_NFRAMESZ   (O32_ARGSZ + O32_SPSZ)
 
                .text
 
 /*
  * O32 function call dispatcher, for interfacing 32-bit ROM routines.
  *
- * The standard 64 (N32) calling sequence is supported, with a0
- * holding a function pointer, a1 a new stack pointer, a2-a7 -- its
- * first six arguments and the stack -- remaining ones (up to O32_ARGC,
- * including a2-a7). Static registers, gp and fp are preserved, v0 holds
- * a result. This code relies on the called o32 function for sp and ra
- * restoration and this dispatcher has to be placed in a KSEGx (or KUSEG)
- * address space.  Any pointers passed have to point to addresses within
- * one of these spaces as well.
+ * The standard 64 (N32) calling sequence is supported, with a0 holding
+ * a function pointer, a1 a pointer to the new stack to call the
+ * function with or 0 if no stack switching is requested, a2-a7 -- the
+ * function call's first six arguments, and the stack -- the remaining
+ * arguments (up to O32_ARGC, including a2-a7).  Static registers, gp
+ * and fp are preserved, v0 holds the result.  This code relies on the
+ * called o32 function for sp and ra restoration and this dispatcher has
+ * to be placed in a KSEGx (or KUSEG) address space.  Any pointers
+ * passed have to point to addresses within one of these spaces as well.
  */
 NESTED(call_o32, O32_FRAMESZ, ra)
                REG_SUBU        sp,O32_FRAMESZ
@@ -51,32 +60,36 @@ NESTED(call_o32, O32_FRAMESZ, ra)
                REG_S           s0,O32_FRAMESZ-11*SZREG(sp)
 
                move            jp,a0
-               REG_SUBU        s0,a1,O32_FRAMESZ_NEW
-               REG_S           sp,O32_FRAMESZ_NEW-1*SZREG(s0)
+
+               move            fp,sp
+               beqz            a1,0f
+               REG_SUBU        fp,a1,O32_NFRAMESZ
+0:
+               REG_S           sp,O32_NFRAMESZ-1*SZREG(fp)
 
                sll             a0,a2,zero
                sll             a1,a3,zero
                sll             a2,a4,zero
                sll             a3,a5,zero
-               sw              a6,0x10(s0)
-               sw              a7,0x14(s0)
+               sw              a6,4*O32_SZREG(fp)
+               sw              a7,5*O32_SZREG(fp)
 
                PTR_LA          t0,O32_FRAMESZ(sp)
-               PTR_LA          t1,0x18(s0)
+               PTR_LA          t1,6*O32_SZREG(fp)
                li              t2,O32_ARGC-6
 1:
                lw              t3,(t0)
                REG_ADDU        t0,SZREG
                sw              t3,(t1)
                REG_SUBU        t2,1
-               REG_ADDU        t1,4
+               REG_ADDU        t1,O32_SZREG
                bnez            t2,1b
 
-               move            sp,s0
+               move            sp,fp
 
                jalr            jp
 
-               REG_L           sp,O32_FRAMESZ_NEW-1*SZREG(sp)
+               REG_L           sp,O32_NFRAMESZ-1*SZREG(sp)
 
                REG_L           s0,O32_FRAMESZ-11*SZREG(sp)
                REG_L           s1,O32_FRAMESZ-10*SZREG(sp)
index 2c2cb182af4edd8673dc54c00c39aded85a6ed30..6aa264b9856ac99b71b88d54fae19cea27d782de 100644 (file)
@@ -40,7 +40,8 @@
 
 #ifdef CONFIG_64BIT
 
-static u8 o32_stk[16384];
+/* O32 stack has to be 8-byte aligned. */
+static u64 o32_stk[4096];
 #define O32_STK          &o32_stk[sizeof(o32_stk)]
 
 #define __PROM_O32(fun, arg) fun arg __asm__(#fun); \
index c0ead63138453c04d3b20918fbcfb1e1c02c5c3c..b59a2103b61a3f64efd75b29f034e8208c155012 100644 (file)
@@ -113,31 +113,31 @@ extern int (*__pmax_close)(int);
 #define __DEC_PROM_O32(fun, arg) fun arg __asm__(#fun); \
                                 __asm__(#fun " = call_o32")
 
-int __DEC_PROM_O32(_rex_bootinit, (int (*)(void)));
-int __DEC_PROM_O32(_rex_bootread, (int (*)(void)));
-int __DEC_PROM_O32(_rex_getbitmap, (int (*)(memmap *), memmap *));
+int __DEC_PROM_O32(_rex_bootinit, (int (*)(void), void *));
+int __DEC_PROM_O32(_rex_bootread, (int (*)(void), void *));
+int __DEC_PROM_O32(_rex_getbitmap, (int (*)(memmap *), void *, memmap *));
 unsigned long *__DEC_PROM_O32(_rex_slot_address,
-                            (unsigned long *(*)(int), int));
-void *__DEC_PROM_O32(_rex_gettcinfo, (void *(*)(void)));
-int __DEC_PROM_O32(_rex_getsysid, (int (*)(void)));
-void __DEC_PROM_O32(_rex_clear_cache, (void (*)(void)));
-
-int __DEC_PROM_O32(_prom_getchar, (int (*)(void)));
-char *__DEC_PROM_O32(_prom_getenv, (char *(*)(char *), char *));
-int __DEC_PROM_O32(_prom_printf, (int (*)(char *, ...), char *, ...));
-
-
-#define rex_bootinit()         _rex_bootinit(__rex_bootinit)
-#define rex_bootread()         _rex_bootread(__rex_bootread)
-#define rex_getbitmap(x)       _rex_getbitmap(__rex_getbitmap, x)
-#define rex_slot_address(x)    _rex_slot_address(__rex_slot_address, x)
-#define rex_gettcinfo()                _rex_gettcinfo(__rex_gettcinfo)
-#define rex_getsysid()         _rex_getsysid(__rex_getsysid)
-#define rex_clear_cache()      _rex_clear_cache(__rex_clear_cache)
-
-#define prom_getchar()         _prom_getchar(__prom_getchar)
-#define prom_getenv(x)         _prom_getenv(__prom_getenv, x)
-#define prom_printf(x...)      _prom_printf(__prom_printf, x)
+                            (unsigned long *(*)(int), void *, int));
+void *__DEC_PROM_O32(_rex_gettcinfo, (void *(*)(void), void *));
+int __DEC_PROM_O32(_rex_getsysid, (int (*)(void), void *));
+void __DEC_PROM_O32(_rex_clear_cache, (void (*)(void), void *));
+
+int __DEC_PROM_O32(_prom_getchar, (int (*)(void), void *));
+char *__DEC_PROM_O32(_prom_getenv, (char *(*)(char *), void *, char *));
+int __DEC_PROM_O32(_prom_printf, (int (*)(char *, ...), void *, char *, ...));
+
+
+#define rex_bootinit()         _rex_bootinit(__rex_bootinit, NULL)
+#define rex_bootread()         _rex_bootread(__rex_bootread, NULL)
+#define rex_getbitmap(x)       _rex_getbitmap(__rex_getbitmap, NULL, x)
+#define rex_slot_address(x)    _rex_slot_address(__rex_slot_address, NULL, x)
+#define rex_gettcinfo()                _rex_gettcinfo(__rex_gettcinfo, NULL)
+#define rex_getsysid()         _rex_getsysid(__rex_getsysid, NULL)
+#define rex_clear_cache()      _rex_clear_cache(__rex_clear_cache, NULL)
+
+#define prom_getchar()         _prom_getchar(__prom_getchar, NULL)
+#define prom_getenv(x)         _prom_getenv(__prom_getenv, NULL, x)
+#define prom_printf(x...)      _prom_printf(__prom_printf, NULL, x)
 
 #else /* !CONFIG_64BIT */
 
diff --git a/arch/mips/include/asm/rm9k-ocd.h b/arch/mips/include/asm/rm9k-ocd.h
deleted file mode 100644 (file)
index b0b80d9..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- *  Copyright (C) 2004 by Basler Vision Technologies AG
- *  Author: Thomas Koeller <thomas.koeller@baslerweb.com>
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or
- *  (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-
-#if !defined(_ASM_RM9K_OCD_H)
-#define _ASM_RM9K_OCD_H
-
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <asm/io.h>
-
-extern volatile void __iomem * const ocd_base;
-extern volatile void __iomem * const titan_base;
-
-#define ocd_addr(__x__)                (ocd_base + (__x__))
-#define titan_addr(__x__)      (titan_base + (__x__))
-#define scram_addr(__x__)      (scram_base + (__x__))
-
-/* OCD register access */
-#define ocd_readl(__offs__) __raw_readl(ocd_addr(__offs__))
-#define ocd_readw(__offs__) __raw_readw(ocd_addr(__offs__))
-#define ocd_readb(__offs__) __raw_readb(ocd_addr(__offs__))
-#define ocd_writel(__val__, __offs__) \
-       __raw_writel((__val__), ocd_addr(__offs__))
-#define ocd_writew(__val__, __offs__) \
-       __raw_writew((__val__), ocd_addr(__offs__))
-#define ocd_writeb(__val__, __offs__) \
-       __raw_writeb((__val__), ocd_addr(__offs__))
-
-/* TITAN register access - 32 bit-wide only */
-#define titan_readl(__offs__) __raw_readl(titan_addr(__offs__))
-#define titan_writel(__val__, __offs__) \
-       __raw_writel((__val__), titan_addr(__offs__))
-
-/* Protect access to shared TITAN registers */
-extern spinlock_t titan_lock;
-extern int titan_irqflags;
-#define lock_titan_regs() spin_lock_irqsave(&titan_lock, titan_irqflags)
-#define unlock_titan_regs() spin_unlock_irqrestore(&titan_lock, titan_irqflags)
-
-#endif /* !defined(_ASM_RM9K_OCD_H) */
index c6e9cd2bca8dbf7de5512c9be8de0f2b92db7bd4..17960fe7a8ce4ef21b7a94cca10c7b5af61cec17 100644 (file)
@@ -133,6 +133,8 @@ static inline int syscall_get_arch(void)
 #ifdef CONFIG_64BIT
        if (!test_thread_flag(TIF_32BIT_REGS))
                arch |= __AUDIT_ARCH_64BIT;
+       if (test_thread_flag(TIF_32BIT_ADDR))
+               arch |= __AUDIT_ARCH_CONVENTION_MIPS64_N32;
 #endif
 #if defined(__LITTLE_ENDIAN)
        arch |=  __AUDIT_ARCH_LE;
index df6e775f3fef524e8d049c24433eff06dade0fd7..3125797f2a88a6bc3e44cdc215d416d604e7070d 100644 (file)
@@ -484,13 +484,13 @@ enum MIPS6e_i8_func {
  * Damn ...  bitfields depend from byteorder :-(
  */
 #ifdef __MIPSEB__
-#define BITFIELD_FIELD(field, more)                                    \
+#define __BITFIELD_FIELD(field, more)                                  \
        field;                                                          \
        more
 
 #elif defined(__MIPSEL__)
 
-#define BITFIELD_FIELD(field, more)                                    \
+#define __BITFIELD_FIELD(field, more)                                  \
        more                                                            \
        field;
 
@@ -499,112 +499,112 @@ enum MIPS6e_i8_func {
 #endif
 
 struct j_format {
-       BITFIELD_FIELD(unsigned int opcode : 6, /* Jump format */
-       BITFIELD_FIELD(unsigned int target : 26,
+       __BITFIELD_FIELD(unsigned int opcode : 6, /* Jump format */
+       __BITFIELD_FIELD(unsigned int target : 26,
        ;))
 };
 
 struct i_format {                      /* signed immediate format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rs : 5,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(signed int simmediate : 16,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rs : 5,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(signed int simmediate : 16,
        ;))))
 };
 
 struct u_format {                      /* unsigned immediate format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rs : 5,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(unsigned int uimmediate : 16,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rs : 5,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(unsigned int uimmediate : 16,
        ;))))
 };
 
 struct c_format {                      /* Cache (>= R6000) format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rs : 5,
-       BITFIELD_FIELD(unsigned int c_op : 3,
-       BITFIELD_FIELD(unsigned int cache : 2,
-       BITFIELD_FIELD(unsigned int simmediate : 16,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rs : 5,
+       __BITFIELD_FIELD(unsigned int c_op : 3,
+       __BITFIELD_FIELD(unsigned int cache : 2,
+       __BITFIELD_FIELD(unsigned int simmediate : 16,
        ;)))))
 };
 
 struct r_format {                      /* Register format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rs : 5,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(unsigned int rd : 5,
-       BITFIELD_FIELD(unsigned int re : 5,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rs : 5,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(unsigned int rd : 5,
+       __BITFIELD_FIELD(unsigned int re : 5,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))
 };
 
 struct p_format {              /* Performance counter format (R10000) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rs : 5,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(unsigned int rd : 5,
-       BITFIELD_FIELD(unsigned int re : 5,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rs : 5,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(unsigned int rd : 5,
+       __BITFIELD_FIELD(unsigned int re : 5,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))
 };
 
 struct f_format {                      /* FPU register format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int : 1,
-       BITFIELD_FIELD(unsigned int fmt : 4,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(unsigned int rd : 5,
-       BITFIELD_FIELD(unsigned int re : 5,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int : 1,
+       __BITFIELD_FIELD(unsigned int fmt : 4,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(unsigned int rd : 5,
+       __BITFIELD_FIELD(unsigned int re : 5,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;)))))))
 };
 
 struct ma_format {             /* FPU multiply and add format (MIPS IV) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int fr : 5,
-       BITFIELD_FIELD(unsigned int ft : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int fd : 5,
-       BITFIELD_FIELD(unsigned int func : 4,
-       BITFIELD_FIELD(unsigned int fmt : 2,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int fr : 5,
+       __BITFIELD_FIELD(unsigned int ft : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int fd : 5,
+       __BITFIELD_FIELD(unsigned int func : 4,
+       __BITFIELD_FIELD(unsigned int fmt : 2,
        ;)))))))
 };
 
 struct b_format {                      /* BREAK and SYSCALL */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int code : 20,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int code : 20,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;)))
 };
 
 struct ps_format {                     /* MIPS-3D / paired single format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rs : 5,
-       BITFIELD_FIELD(unsigned int ft : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int fd : 5,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rs : 5,
+       __BITFIELD_FIELD(unsigned int ft : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int fd : 5,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))
 };
 
 struct v_format {                              /* MDMX vector format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int sel : 4,
-       BITFIELD_FIELD(unsigned int fmt : 1,
-       BITFIELD_FIELD(unsigned int vt : 5,
-       BITFIELD_FIELD(unsigned int vs : 5,
-       BITFIELD_FIELD(unsigned int vd : 5,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int sel : 4,
+       __BITFIELD_FIELD(unsigned int fmt : 1,
+       __BITFIELD_FIELD(unsigned int vt : 5,
+       __BITFIELD_FIELD(unsigned int vs : 5,
+       __BITFIELD_FIELD(unsigned int vd : 5,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;)))))))
 };
 
 struct spec3_format {   /* SPEC3 */
-       BITFIELD_FIELD(unsigned int opcode:6,
-       BITFIELD_FIELD(unsigned int rs:5,
-       BITFIELD_FIELD(unsigned int rt:5,
-       BITFIELD_FIELD(signed int simmediate:9,
-       BITFIELD_FIELD(unsigned int func:7,
+       __BITFIELD_FIELD(unsigned int opcode:6,
+       __BITFIELD_FIELD(unsigned int rs:5,
+       __BITFIELD_FIELD(unsigned int rt:5,
+       __BITFIELD_FIELD(signed int simmediate:9,
+       __BITFIELD_FIELD(unsigned int func:7,
        ;)))))
 };
 
@@ -616,141 +616,141 @@ struct spec3_format {   /* SPEC3 */
  *     if it is MIPS32 instruction re-encoded for use in the microMIPS ASE.
  */
 struct fb_format {             /* FPU branch format (MIPS32) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int bc : 5,
-       BITFIELD_FIELD(unsigned int cc : 3,
-       BITFIELD_FIELD(unsigned int flag : 2,
-       BITFIELD_FIELD(signed int simmediate : 16,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int bc : 5,
+       __BITFIELD_FIELD(unsigned int cc : 3,
+       __BITFIELD_FIELD(unsigned int flag : 2,
+       __BITFIELD_FIELD(signed int simmediate : 16,
        ;)))))
 };
 
 struct fp0_format {            /* FPU multiply and add format (MIPS32) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int fmt : 5,
-       BITFIELD_FIELD(unsigned int ft : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int fd : 5,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int fmt : 5,
+       __BITFIELD_FIELD(unsigned int ft : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int fd : 5,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))
 };
 
 struct mm_fp0_format {         /* FPU multipy and add format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int ft : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int fd : 5,
-       BITFIELD_FIELD(unsigned int fmt : 3,
-       BITFIELD_FIELD(unsigned int op : 2,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int ft : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int fd : 5,
+       __BITFIELD_FIELD(unsigned int fmt : 3,
+       __BITFIELD_FIELD(unsigned int op : 2,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;)))))))
 };
 
 struct fp1_format {            /* FPU mfc1 and cfc1 format (MIPS32) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int op : 5,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int fd : 5,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int op : 5,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int fd : 5,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))
 };
 
 struct mm_fp1_format {         /* FPU mfc1 and cfc1 format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int fmt : 2,
-       BITFIELD_FIELD(unsigned int op : 8,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int fmt : 2,
+       __BITFIELD_FIELD(unsigned int op : 8,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))
 };
 
 struct mm_fp2_format {         /* FPU movt and movf format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int fd : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int cc : 3,
-       BITFIELD_FIELD(unsigned int zero : 2,
-       BITFIELD_FIELD(unsigned int fmt : 2,
-       BITFIELD_FIELD(unsigned int op : 3,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int fd : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int cc : 3,
+       __BITFIELD_FIELD(unsigned int zero : 2,
+       __BITFIELD_FIELD(unsigned int fmt : 2,
+       __BITFIELD_FIELD(unsigned int op : 3,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))))
 };
 
 struct mm_fp3_format {         /* FPU abs and neg format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int fmt : 3,
-       BITFIELD_FIELD(unsigned int op : 7,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int fmt : 3,
+       __BITFIELD_FIELD(unsigned int op : 7,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))
 };
 
 struct mm_fp4_format {         /* FPU c.cond format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int cc : 3,
-       BITFIELD_FIELD(unsigned int fmt : 3,
-       BITFIELD_FIELD(unsigned int cond : 4,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int cc : 3,
+       __BITFIELD_FIELD(unsigned int fmt : 3,
+       __BITFIELD_FIELD(unsigned int cond : 4,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;)))))))
 };
 
 struct mm_fp5_format {         /* FPU lwxc1 and swxc1 format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int index : 5,
-       BITFIELD_FIELD(unsigned int base : 5,
-       BITFIELD_FIELD(unsigned int fd : 5,
-       BITFIELD_FIELD(unsigned int op : 5,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int index : 5,
+       __BITFIELD_FIELD(unsigned int base : 5,
+       __BITFIELD_FIELD(unsigned int fd : 5,
+       __BITFIELD_FIELD(unsigned int op : 5,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))
 };
 
 struct fp6_format {            /* FPU madd and msub format (MIPS IV) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int fr : 5,
-       BITFIELD_FIELD(unsigned int ft : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int fd : 5,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int fr : 5,
+       __BITFIELD_FIELD(unsigned int ft : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int fd : 5,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))
 };
 
 struct mm_fp6_format {         /* FPU madd and msub format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int ft : 5,
-       BITFIELD_FIELD(unsigned int fs : 5,
-       BITFIELD_FIELD(unsigned int fd : 5,
-       BITFIELD_FIELD(unsigned int fr : 5,
-       BITFIELD_FIELD(unsigned int func : 6,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int ft : 5,
+       __BITFIELD_FIELD(unsigned int fs : 5,
+       __BITFIELD_FIELD(unsigned int fd : 5,
+       __BITFIELD_FIELD(unsigned int fr : 5,
+       __BITFIELD_FIELD(unsigned int func : 6,
        ;))))))
 };
 
 struct mm_i_format {           /* Immediate format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(unsigned int rs : 5,
-       BITFIELD_FIELD(signed int simmediate : 16,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(unsigned int rs : 5,
+       __BITFIELD_FIELD(signed int simmediate : 16,
        ;))))
 };
 
 struct mm_m_format {           /* Multi-word load/store format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rd : 5,
-       BITFIELD_FIELD(unsigned int base : 5,
-       BITFIELD_FIELD(unsigned int func : 4,
-       BITFIELD_FIELD(signed int simmediate : 12,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rd : 5,
+       __BITFIELD_FIELD(unsigned int base : 5,
+       __BITFIELD_FIELD(unsigned int func : 4,
+       __BITFIELD_FIELD(signed int simmediate : 12,
        ;)))))
 };
 
 struct mm_x_format {           /* Scaled indexed load format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int index : 5,
-       BITFIELD_FIELD(unsigned int base : 5,
-       BITFIELD_FIELD(unsigned int rd : 5,
-       BITFIELD_FIELD(unsigned int func : 11,
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int index : 5,
+       __BITFIELD_FIELD(unsigned int base : 5,
+       __BITFIELD_FIELD(unsigned int rd : 5,
+       __BITFIELD_FIELD(unsigned int func : 11,
        ;)))))
 };
 
@@ -758,51 +758,51 @@ struct mm_x_format {              /* Scaled indexed load format (microMIPS) */
  * microMIPS instruction formats (16-bit length)
  */
 struct mm_b0_format {          /* Unconditional branch format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(signed int simmediate : 10,
-       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(signed int simmediate : 10,
+       __BITFIELD_FIELD(unsigned int : 16, /* Ignored */
        ;)))
 };
 
 struct mm_b1_format {          /* Conditional branch format (microMIPS) */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rs : 3,
-       BITFIELD_FIELD(signed int simmediate : 7,
-       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rs : 3,
+       __BITFIELD_FIELD(signed int simmediate : 7,
+       __BITFIELD_FIELD(unsigned int : 16, /* Ignored */
        ;))))
 };
 
 struct mm16_m_format {         /* Multi-word load/store format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int func : 4,
-       BITFIELD_FIELD(unsigned int rlist : 2,
-       BITFIELD_FIELD(unsigned int imm : 4,
-       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int func : 4,
+       __BITFIELD_FIELD(unsigned int rlist : 2,
+       __BITFIELD_FIELD(unsigned int imm : 4,
+       __BITFIELD_FIELD(unsigned int : 16, /* Ignored */
        ;)))))
 };
 
 struct mm16_rb_format {                /* Signed immediate format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rt : 3,
-       BITFIELD_FIELD(unsigned int base : 3,
-       BITFIELD_FIELD(signed int simmediate : 4,
-       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rt : 3,
+       __BITFIELD_FIELD(unsigned int base : 3,
+       __BITFIELD_FIELD(signed int simmediate : 4,
+       __BITFIELD_FIELD(unsigned int : 16, /* Ignored */
        ;)))))
 };
 
 struct mm16_r3_format {                /* Load from global pointer format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rt : 3,
-       BITFIELD_FIELD(signed int simmediate : 7,
-       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rt : 3,
+       __BITFIELD_FIELD(signed int simmediate : 7,
+       __BITFIELD_FIELD(unsigned int : 16, /* Ignored */
        ;))))
 };
 
 struct mm16_r5_format {                /* Load/store from stack pointer format */
-       BITFIELD_FIELD(unsigned int opcode : 6,
-       BITFIELD_FIELD(unsigned int rt : 5,
-       BITFIELD_FIELD(signed int simmediate : 5,
-       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rt : 5,
+       __BITFIELD_FIELD(signed int simmediate : 5,
+       __BITFIELD_FIELD(unsigned int : 16, /* Ignored */
        ;))))
 };
 
@@ -810,57 +810,57 @@ struct mm16_r5_format {           /* Load/store from stack pointer format */
  * MIPS16e instruction formats (16-bit length)
  */
 struct m16e_rr {
-       BITFIELD_FIELD(unsigned int opcode : 5,
-       BITFIELD_FIELD(unsigned int rx : 3,
-       BITFIELD_FIELD(unsigned int nd : 1,
-       BITFIELD_FIELD(unsigned int l : 1,
-       BITFIELD_FIELD(unsigned int ra : 1,
-       BITFIELD_FIELD(unsigned int func : 5,
+       __BITFIELD_FIELD(unsigned int opcode : 5,
+       __BITFIELD_FIELD(unsigned int rx : 3,
+       __BITFIELD_FIELD(unsigned int nd : 1,
+       __BITFIELD_FIELD(unsigned int l : 1,
+       __BITFIELD_FIELD(unsigned int ra : 1,
+       __BITFIELD_FIELD(unsigned int func : 5,
        ;))))))
 };
 
 struct m16e_jal {
-       BITFIELD_FIELD(unsigned int opcode : 5,
-       BITFIELD_FIELD(unsigned int x : 1,
-       BITFIELD_FIELD(unsigned int imm20_16 : 5,
-       BITFIELD_FIELD(signed int imm25_21 : 5,
+       __BITFIELD_FIELD(unsigned int opcode : 5,
+       __BITFIELD_FIELD(unsigned int x : 1,
+       __BITFIELD_FIELD(unsigned int imm20_16 : 5,
+       __BITFIELD_FIELD(signed int imm25_21 : 5,
        ;))))
 };
 
 struct m16e_i64 {
-       BITFIELD_FIELD(unsigned int opcode : 5,
-       BITFIELD_FIELD(unsigned int func : 3,
-       BITFIELD_FIELD(unsigned int imm : 8,
+       __BITFIELD_FIELD(unsigned int opcode : 5,
+       __BITFIELD_FIELD(unsigned int func : 3,
+       __BITFIELD_FIELD(unsigned int imm : 8,
        ;)))
 };
 
 struct m16e_ri64 {
-       BITFIELD_FIELD(unsigned int opcode : 5,
-       BITFIELD_FIELD(unsigned int func : 3,
-       BITFIELD_FIELD(unsigned int ry : 3,
-       BITFIELD_FIELD(unsigned int imm : 5,
+       __BITFIELD_FIELD(unsigned int opcode : 5,
+       __BITFIELD_FIELD(unsigned int func : 3,
+       __BITFIELD_FIELD(unsigned int ry : 3,
+       __BITFIELD_FIELD(unsigned int imm : 5,
        ;))))
 };
 
 struct m16e_ri {
-       BITFIELD_FIELD(unsigned int opcode : 5,
-       BITFIELD_FIELD(unsigned int rx : 3,
-       BITFIELD_FIELD(unsigned int imm : 8,
+       __BITFIELD_FIELD(unsigned int opcode : 5,
+       __BITFIELD_FIELD(unsigned int rx : 3,
+       __BITFIELD_FIELD(unsigned int imm : 8,
        ;)))
 };
 
 struct m16e_rri {
-       BITFIELD_FIELD(unsigned int opcode : 5,
-       BITFIELD_FIELD(unsigned int rx : 3,
-       BITFIELD_FIELD(unsigned int ry : 3,
-       BITFIELD_FIELD(unsigned int imm : 5,
+       __BITFIELD_FIELD(unsigned int opcode : 5,
+       __BITFIELD_FIELD(unsigned int rx : 3,
+       __BITFIELD_FIELD(unsigned int ry : 3,
+       __BITFIELD_FIELD(unsigned int imm : 5,
        ;))))
 };
 
 struct m16e_i8 {
-       BITFIELD_FIELD(unsigned int opcode : 5,
-       BITFIELD_FIELD(unsigned int func : 3,
-       BITFIELD_FIELD(unsigned int imm : 8,
+       __BITFIELD_FIELD(unsigned int opcode : 5,
+       __BITFIELD_FIELD(unsigned int func : 3,
+       __BITFIELD_FIELD(unsigned int imm : 8,
        ;)))
 };
 
index d6e154a9e6a55ef98d964f71629a129f6fd04d27..2692abb28e3637db7705217de88239fb968b2935 100644 (file)
 #define __NR_finit_module              (__NR_Linux + 348)
 #define __NR_sched_setattr             (__NR_Linux + 349)
 #define __NR_sched_getattr             (__NR_Linux + 350)
+#define __NR_renameat2                 (__NR_Linux + 351)
 
 /*
  * Offset of the last Linux o32 flavoured syscall
  */
-#define __NR_Linux_syscalls            350
+#define __NR_Linux_syscalls            351
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 #define __NR_getdents64                        (__NR_Linux + 308)
 #define __NR_sched_setattr             (__NR_Linux + 309)
 #define __NR_sched_getattr             (__NR_Linux + 310)
+#define __NR_renameat2                 (__NR_Linux + 311)
 
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  */
-#define __NR_Linux_syscalls            310
+#define __NR_Linux_syscalls            311
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 #define __NR_finit_module              (__NR_Linux + 312)
 #define __NR_sched_setattr             (__NR_Linux + 313)
 #define __NR_sched_getattr             (__NR_Linux + 314)
+#define __NR_renameat2                 (__NR_Linux + 315)
 
 /*
  * Offset of the last N32 flavoured syscall
  */
-#define __NR_Linux_syscalls            314
+#define __NR_Linux_syscalls            315
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
index e40971b51d2f0bf47e3eb217f43c652c732e7a09..037a44d962f37e1b94251f13b054103e6cbb1ff5 100644 (file)
@@ -124,14 +124,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
        seq_printf(m, "kscratch registers\t: %d\n",
                      hweight8(cpu_data[n].kscratch_mask));
        seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core);
-#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
-       if (cpu_has_mipsmt) {
-               seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id);
-#if defined(CONFIG_MIPS_MT_SMTC)
-               seq_printf(m, "TC\t\t\t: %d\n", cpu_data[n].tc_id);
-#endif
-       }
-#endif
+
        sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
                      cpu_has_vce ? "%u" : "not available");
        seq_printf(m, fmt, 'D', vced_count);
index fdc70b40044265fb2107df186eb0e593a96b9571..3245474f19d5cdec5563a3841ecdcd3760f98e17 100644 (file)
@@ -577,3 +577,4 @@ EXPORT(sys_call_table)
        PTR     sys_finit_module
        PTR     sys_sched_setattr
        PTR     sys_sched_getattr               /* 4350 */
+       PTR     sys_renameat2
index dd99c3285aeae75f65ae982de46a755ae953f140..be2fedd4ae33193937010b376e62c7d9e327022d 100644 (file)
@@ -430,4 +430,5 @@ EXPORT(sys_call_table)
        PTR     sys_getdents64
        PTR     sys_sched_setattr
        PTR     sys_sched_getattr               /* 5310 */
+       PTR     sys_renameat2
        .size   sys_call_table,.-sys_call_table
index f68d2f4f009021de3ed784e9733aee5d3580d8e0..c1dbcda4b816844cc64821aa8d64a79d5c9a38f3 100644 (file)
@@ -423,4 +423,5 @@ EXPORT(sysn32_call_table)
        PTR     sys_finit_module
        PTR     sys_sched_setattr
        PTR     sys_sched_getattr
+       PTR     sys_renameat2                   /* 6315 */
        .size   sysn32_call_table,.-sysn32_call_table
index 70f6acecd928896c54b586b3470c27eb703721be..f1343ccd7ed7e58d14563c4413ede6517ff60853 100644 (file)
@@ -556,4 +556,5 @@ EXPORT(sys32_call_table)
        PTR     sys_finit_module
        PTR     sys_sched_setattr
        PTR     sys_sched_getattr               /* 4350 */
+       PTR     sys_renameat2
        .size   sys32_call_table,.-sys32_call_table
index fac1f5b178ebe3c558ad369a9e1243340da04c80..143b8a37b5e41358f3faa814375869b058c9fee7 100644 (file)
@@ -8,6 +8,7 @@
        };
 
        memory@0 {
+               device_type = "memory";
                reg = <0x0 0x2000000>;
        };
 
index 2e4825e483882b217046caf4a847a99c3db56afe..9901237563c58922d213b9af10b9ed3c504b18e5 100644 (file)
 #define UNIT(unit)  ((unit)*NBYTES)
 
 #define ADDC(sum,reg)                                          \
+       .set    push;                                           \
+       .set    noat;                                           \
        ADD     sum, reg;                                       \
        sltu    v1, sum, reg;                                   \
        ADD     sum, v1;                                        \
+       .set    pop
 
 #define ADDC32(sum,reg)                                                \
+       .set    push;                                           \
+       .set    noat;                                           \
        addu    sum, reg;                                       \
        sltu    v1, sum, reg;                                   \
        addu    sum, v1;                                        \
+       .set    pop
 
 #define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3)   \
        LOAD    _t0, (offset + UNIT(0))(src);                   \
@@ -710,6 +716,8 @@ LEAF(csum_partial)
        ADDC(sum, t2)
 .Ldone\@:
        /* fold checksum */
+       .set    push
+       .set    noat
 #ifdef USE_DOUBLE
        dsll32  v1, sum, 0
        daddu   sum, v1
@@ -732,6 +740,7 @@ LEAF(csum_partial)
        or      sum, sum, t0
 1:
 #endif
+       .set    pop
        .set reorder
        ADDC32(sum, psum)
        jr      ra
index 44713af15a62bc60ebc53ffcbc9e73ad10d87380..705cfb7c1a74e0843e40567219024882ab932ac9 100644 (file)
@@ -6,7 +6,7 @@
  * Copyright (C) 1994 by Waldorf Electronics
  * Copyright (C) 1995 - 2000, 01, 03 by Ralf Baechle
  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
- * Copyright (C) 2007  Maciej W. Rozycki
+ * Copyright (C) 2007, 2014 Maciej W. Rozycki
  */
 #include <linux/module.h>
 #include <linux/param.h>
 #include <asm/compiler.h>
 #include <asm/war.h>
 
+#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
+#define GCC_DADDI_IMM_ASM() "I"
+#else
+#define GCC_DADDI_IMM_ASM() "r"
+#endif
+
 void __delay(unsigned long loops)
 {
        __asm__ __volatile__ (
@@ -22,13 +28,13 @@ void __delay(unsigned long loops)
        "       .align  3                                       \n"
        "1:     bnez    %0, 1b                                  \n"
 #if BITS_PER_LONG == 32
-       "       subu    %0,                                   \n"
+       "       subu    %0, %1                                  \n"
 #else
-       "       dsubu   %0,                                   \n"
+       "       dsubu   %0, %1                                  \n"
 #endif
        "       .set    reorder                                 \n"
        : "=r" (loops)
-       : "0" (loops));
+       : GCC_DADDI_IMM_ASM() (1), "0" (loops));
 }
 EXPORT_SYMBOL(__delay);
 
index d3301cd1e9a51b4c387a7f8c3d4a46e2632761e2..3c32baf8b49447a591e6e552e634eafe5be8d09e 100644 (file)
@@ -35,7 +35,6 @@ LEAF(__strncpy_from_\func\()_asm)
        bnez            v0, .Lfault\@
 
 FEXPORT(__strncpy_from_\func\()_nocheck_asm)
-       .set            noreorder
        move            t0, zero
        move            v1, a1
 .ifeqs "\func","kernel"
@@ -45,21 +44,21 @@ FEXPORT(__strncpy_from_\func\()_nocheck_asm)
 .endif
        PTR_ADDIU       v1, 1
        R10KCBARRIER(0(ra))
+       sb              v0, (a0)
        beqz            v0, 2f
-        sb             v0, (a0)
        PTR_ADDIU       t0, 1
+       PTR_ADDIU       a0, 1
        bne             t0, a2, 1b
-        PTR_ADDIU      a0, 1
 2:     PTR_ADDU        v0, a1, t0
        xor             v0, a1
        bltz            v0, .Lfault\@
-        nop
+       move            v0, t0
        jr              ra                      # return n
-        move           v0, t0
        END(__strncpy_from_\func\()_asm)
 
-.Lfault\@: jr          ra
-         li            v0, -EFAULT
+.Lfault\@:
+       li              v0, -EFAULT
+       jr              ra
 
        .section        __ex_table,"a"
        PTR             1b, .Lfault\@
index 7397be226a06a2a7d0481fac7b2dd476d4fe6d3a..603d79a95f4778e40d5ec94b78f1efa9750cd214 100644 (file)
@@ -64,7 +64,6 @@ config LEMOTE_MACH3A
        bool "Lemote Loongson 3A family machines"
        select ARCH_SPARSEMEM_ENABLE
        select GENERIC_ISA_DMA_SUPPORT_BROKEN
-       select GENERIC_HARDIRQS_NO__DO_IRQ
        select BOOT_ELF32
        select BOARD_SCACHE
        select CSRC_R4K
index e1f427f4f5f3fed4985bb370054421d7d2f91cdc..67dd94ef28e60f4023b16f7da8fac716c558b3bf 100644 (file)
@@ -91,6 +91,7 @@ EXPORT_SYMBOL(clk_put);
 
 int clk_set_rate(struct clk *clk, unsigned long rate)
 {
+       unsigned int rate_khz = rate / 1000;
        int ret = 0;
        int regval;
        int i;
@@ -111,10 +112,10 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
                if (loongson2_clockmod_table[i].frequency ==
                    CPUFREQ_ENTRY_INVALID)
                        continue;
-               if (rate == loongson2_clockmod_table[i].frequency)
+               if (rate_khz == loongson2_clockmod_table[i].frequency)
                        break;
        }
-       if (rate != loongson2_clockmod_table[i].frequency)
+       if (rate_khz != loongson2_clockmod_table[i].frequency)
                return -ENOTSUPP;
 
        clk->rate = rate;
index 30a494db99c2a0eb4d51aa64ca410de956801837..a5427c6e97574c6a1a0dbad628f7a75a3feed516 100644 (file)
 
 #define FASTPATH_SIZE  128
 
+EXPORT(tlbmiss_handler_setup_pgd_start)
 LEAF(tlbmiss_handler_setup_pgd)
-       .space          16 * 4
+1:     j       1b              /* Dummy, will be replaced. */
+       .space  64
 END(tlbmiss_handler_setup_pgd)
 EXPORT(tlbmiss_handler_setup_pgd_end)
 
index ee88367ab3addcda0e4aba412f524493dddd079a..f99ec587b151919bd90437687f08614c24aea32e 100644 (file)
@@ -1422,16 +1422,17 @@ static void build_r4000_tlb_refill_handler(void)
 extern u32 handle_tlbl[], handle_tlbl_end[];
 extern u32 handle_tlbs[], handle_tlbs_end[];
 extern u32 handle_tlbm[], handle_tlbm_end[];
-extern u32 tlbmiss_handler_setup_pgd[], tlbmiss_handler_setup_pgd_end[];
+extern u32 tlbmiss_handler_setup_pgd_start[], tlbmiss_handler_setup_pgd[];
+extern u32 tlbmiss_handler_setup_pgd_end[];
 
 static void build_setup_pgd(void)
 {
        const int a0 = 4;
        const int __maybe_unused a1 = 5;
        const int __maybe_unused a2 = 6;
-       u32 *p = tlbmiss_handler_setup_pgd;
+       u32 *p = tlbmiss_handler_setup_pgd_start;
        const int tlbmiss_handler_setup_pgd_size =
-               tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd;
+               tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd_start;
 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
        long pgdc = (long)pgd_current;
 #endif
index 35eb874ab7f11b25248e40714c2e477ec0f1d4a3..709f58132f5cba9d87adc37afbe84c38db40a5ac 100644 (file)
@@ -7,6 +7,7 @@
        model = "Ralink MT7620A evaluation board";
 
        memory@0 {
+               device_type = "memory";
                reg = <0x0 0x2000000>;
        };
 
index 322d7002595bda983b95f9bb77a17de24980c001..0a685db093d4dbd367228a1cb9e4d42bdf43e0e7 100644 (file)
@@ -7,6 +7,7 @@
        model = "Ralink RT2880 evaluation board";
 
        memory@0 {
+               device_type = "memory";
                reg = <0x8000000 0x2000000>;
        };
 
index 0ac73ea281984909df89403d17fb82fd4e9f3ae4..ec9e9a03554140a4c11947b2ff0a7548cb4fb7fb 100644 (file)
@@ -7,6 +7,7 @@
        model = "Ralink RT3052 evaluation board";
 
        memory@0 {
+               device_type = "memory";
                reg = <0x0 0x2000000>;
        };
 
index 2fa6b330bf4f2fcb8d6f63f4b8df55cb1fe6b065..e8df21a5d10d9eb35a2dfc88af0d137d9c66156e 100644 (file)
@@ -7,6 +7,7 @@
        model = "Ralink RT3883 evaluation board";
 
        memory@0 {
+               device_type = "memory";
                reg = <0x0 0x2000000>;
        };
 
index 1faefed32749c93ff31a7d6237732d3dd6c55f26..108d48e652af4c802da1676b18252a5394d30295 100644 (file)
@@ -22,6 +22,7 @@ config PARISC
        select GENERIC_SMP_IDLE_THREAD
        select GENERIC_STRNCPY_FROM_USER
        select SYSCTL_ARCH_UNALIGN_ALLOW
+       select SYSCTL_EXCEPTION_TRACE
        select HAVE_MOD_ARCH_SPECIFIC
        select VIRT_TO_BUS
        select MODULES_USE_ELF_RELA
index 198a86feb5748fc595f7230941485af0bc84a76a..d951c9681ab316aa2be88cd65b4d18ae62f958cb 100644 (file)
 #define STACK_TOP      TASK_SIZE
 #define STACK_TOP_MAX  DEFAULT_TASK_SIZE
 
+/* Allow bigger stacks for 64-bit processes */
+#define STACK_SIZE_MAX (USER_WIDE_MODE                                 \
+                        ? (1 << 30)    /* 1 GB */                      \
+                        : (CONFIG_MAX_STACK_SIZE_MB*1024*1024))
+
 #endif
 
 #ifndef __ASSEMBLY__
index a580642555b6f0e7f087ade117ce062cb303d429..348356c99514f0cdfb8876b9f22c0464ab8e3734 100644 (file)
@@ -1,6 +1,8 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+generic-y += resource.h
+
 header-y += bitsperlong.h
 header-y += byteorder.h
 header-y += errno.h
@@ -13,7 +15,6 @@ header-y += msgbuf.h
 header-y += pdc.h
 header-y += posix_types.h
 header-y += ptrace.h
-header-y += resource.h
 header-y += sembuf.h
 header-y += setup.h
 header-y += shmbuf.h
diff --git a/arch/parisc/include/uapi/asm/resource.h b/arch/parisc/include/uapi/asm/resource.h
deleted file mode 100644 (file)
index 8b06343..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef _ASM_PARISC_RESOURCE_H
-#define _ASM_PARISC_RESOURCE_H
-
-#define _STK_LIM_MAX   10 * _STK_LIM
-#include <asm-generic/resource.h>
-
-#endif
index 265ae5190b0a70e4dc2539d9a661e1e3d2deda05..47e0e21d2272468bbc864e6e30ced36c18929534 100644 (file)
 #define __NR_sched_setattr     (__NR_Linux + 334)
 #define __NR_sched_getattr     (__NR_Linux + 335)
 #define __NR_utimes            (__NR_Linux + 336)
+#define __NR_renameat2         (__NR_Linux + 337)
 
-#define __NR_Linux_syscalls    (__NR_utimes + 1)
+#define __NR_Linux_syscalls    (__NR_renameat2 + 1)
 
 
 #define __IGNORE_select                /* newselect */
index 31ffa9b5532216620d9a6106d2d4b76e7501ee0b..e1ffea2f9a0b05ccda844969dcb7c519ab17077a 100644 (file)
@@ -72,10 +72,10 @@ static unsigned long mmap_upper_limit(void)
 {
        unsigned long stack_base;
 
-       /* Limit stack size to 1GB - see setup_arg_pages() in fs/exec.c */
+       /* Limit stack size - see setup_arg_pages() in fs/exec.c */
        stack_base = rlimit_max(RLIMIT_STACK);
-       if (stack_base > (1 << 30))
-               stack_base = 1 << 30;
+       if (stack_base > STACK_SIZE_MAX)
+               stack_base = STACK_SIZE_MAX;
 
        return PAGE_ALIGN(STACK_TOP - stack_base);
 }
index a63bb179f79a1fcd56a7bcf1adbe759f46587b71..83878601103701df4497913c89d5702897249a38 100644 (file)
@@ -589,10 +589,13 @@ cas_nocontend:
 # endif
 /* ENABLE_LWS_DEBUG */
 
+       rsm     PSW_SM_I, %r0                           /* Disable interrupts */
+       /* COW breaks can cause contention on UP systems */
        LDCW    0(%sr2,%r20), %r28                      /* Try to acquire the lock */
        cmpb,<>,n       %r0, %r28, cas_action           /* Did we get it? */
 cas_wouldblock:
        ldo     2(%r0), %r28                            /* 2nd case */
+       ssm     PSW_SM_I, %r0
        b       lws_exit                                /* Contended... */
        ldo     -EAGAIN(%r0), %r21                      /* Spin in userspace */
 
@@ -619,15 +622,17 @@ cas_action:
        stw     %r1, 4(%sr2,%r20)
 #endif
        /* The load and store could fail */
-1:     ldw     0(%sr3,%r26), %r28
+1:     ldw,ma  0(%sr3,%r26), %r28
        sub,<>  %r28, %r25, %r0
-2:     stw     %r24, 0(%sr3,%r26)
+2:     stw,ma  %r24, 0(%sr3,%r26)
        /* Free lock */
-       stw     %r20, 0(%sr2,%r20)
+       stw,ma  %r20, 0(%sr2,%r20)
 #if ENABLE_LWS_DEBUG
        /* Clear thread register indicator */
        stw     %r0, 4(%sr2,%r20)
 #endif
+       /* Enable interrupts */
+       ssm     PSW_SM_I, %r0
        /* Return to userspace, set no error */
        b       lws_exit
        copy    %r0, %r21
@@ -639,6 +644,7 @@ cas_action:
 #if ENABLE_LWS_DEBUG
        stw     %r0, 4(%sr2,%r20)
 #endif
+       ssm     PSW_SM_I, %r0
        b       lws_exit
        ldo     -EFAULT(%r0),%r21       /* set errno */
        nop
index 83ead0ea127d23cdcb10115a5b04d76f40e10612..c5fa7a697fba2a13b0779e59904ec7c1428e8ef4 100644 (file)
        ENTRY_SAME(sched_setattr)
        ENTRY_SAME(sched_getattr)       /* 335 */
        ENTRY_COMP(utimes)
+       ENTRY_SAME(renameat2)
 
        /* Nothing yet */
 
index 1cd1d0c83b6d7bd7a21d0a22c57e18f2ac27f65a..47ee620d15d27850ab8ebac1f739dfd3215dae9b 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/interrupt.h>
 #include <linux/console.h>
 #include <linux/bug.h>
+#include <linux/ratelimit.h>
 
 #include <asm/assembly.h>
 #include <asm/uaccess.h>
@@ -42,9 +43,6 @@
 
 #include "../math-emu/math-emu.h"      /* for handle_fpe() */
 
-#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
-                         /*  dumped to the console via printk)          */
-
 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
 DEFINE_SPINLOCK(pa_dbit_lock);
 #endif
@@ -160,6 +158,17 @@ void show_regs(struct pt_regs *regs)
        }
 }
 
+static DEFINE_RATELIMIT_STATE(_hppa_rs,
+       DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
+
+#define parisc_printk_ratelimited(critical, regs, fmt, ...)    {             \
+       if ((critical || show_unhandled_signals) && __ratelimit(&_hppa_rs)) { \
+               printk(fmt, ##__VA_ARGS__);                                   \
+               show_regs(regs);                                              \
+       }                                                                     \
+}
+
+
 static void do_show_stack(struct unwind_frame_info *info)
 {
        int i = 1;
@@ -229,12 +238,10 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err)
                if (err == 0)
                        return; /* STFU */
 
-               printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
+               parisc_printk_ratelimited(1, regs,
+                       KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
                        current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
-#ifdef PRINT_USER_FAULTS
-               /* XXX for debugging only */
-               show_regs(regs);
-#endif
+
                return;
        }
 
@@ -321,14 +328,11 @@ static void handle_break(struct pt_regs *regs)
                        (tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
        }
 
-#ifdef PRINT_USER_FAULTS
-       if (unlikely(iir != GDB_BREAK_INSN)) {
-               printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
+       if (unlikely(iir != GDB_BREAK_INSN))
+               parisc_printk_ratelimited(0, regs,
+                       KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
                        iir & 31, (iir>>13) & ((1<<13)-1),
                        task_pid_nr(current), current->comm);
-               show_regs(regs);
-       }
-#endif
 
        /* send standard GDB signal */
        handle_gdb_break(regs, TRAP_BRKPT);
@@ -758,11 +762,9 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
 
        default:
                if (user_mode(regs)) {
-#ifdef PRINT_USER_FAULTS
-                       printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
-                           task_pid_nr(current), current->comm);
-                       show_regs(regs);
-#endif
+                       parisc_printk_ratelimited(0, regs, KERN_DEBUG
+                               "handle_interruption() pid=%d command='%s'\n",
+                               task_pid_nr(current), current->comm);
                        /* SIGBUS, for lack of a better one. */
                        si.si_signo = SIGBUS;
                        si.si_code = BUS_OBJERR;
@@ -779,16 +781,10 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
 
        if (user_mode(regs)) {
            if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
-#ifdef PRINT_USER_FAULTS
-               if (fault_space == 0)
-                       printk(KERN_DEBUG "User Fault on Kernel Space ");
-               else
-                       printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ",
-                              code);
-               printk(KERN_CONT "pid=%d command='%s'\n",
-                      task_pid_nr(current), current->comm);
-               show_regs(regs);
-#endif
+               parisc_printk_ratelimited(0, regs, KERN_DEBUG
+                               "User fault %d on space 0x%08lx, pid=%d command='%s'\n",
+                               code, fault_space,
+                               task_pid_nr(current), current->comm);
                si.si_signo = SIGSEGV;
                si.si_errno = 0;
                si.si_code = SEGV_MAPERR;
index 747550762f3ca25acf6dabdfcc7d8aef9de31779..3ca9c1131cfe0d80b9b12fb5c0e599a3363942c0 100644 (file)
 #include <asm/uaccess.h>
 #include <asm/traps.h>
 
-#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
-                        /*  dumped to the console via printk)          */
-
-
 /* Various important other fields */
 #define bit22set(x)            (x & 0x00000200)
 #define bits23_25set(x)                (x & 0x000001c0)
@@ -34,6 +30,8 @@
 
 DEFINE_PER_CPU(struct exception_data, exception_data);
 
+int show_unhandled_signals = 1;
+
 /*
  * parisc_acctyp(unsigned int inst) --
  *    Given a PA-RISC memory access instruction, determine if the
@@ -173,6 +171,32 @@ int fixup_exception(struct pt_regs *regs)
        return 0;
 }
 
+/*
+ * Print out info about fatal segfaults, if the show_unhandled_signals
+ * sysctl is set:
+ */
+static inline void
+show_signal_msg(struct pt_regs *regs, unsigned long code,
+               unsigned long address, struct task_struct *tsk,
+               struct vm_area_struct *vma)
+{
+       if (!unhandled_signal(tsk, SIGSEGV))
+               return;
+
+       if (!printk_ratelimit())
+               return;
+
+       pr_warn("\n");
+       pr_warn("do_page_fault() command='%s' type=%lu address=0x%08lx",
+           tsk->comm, code, address);
+       print_vma_addr(KERN_CONT " in ", regs->iaoq[0]);
+       if (vma)
+               pr_warn(" vm_start = 0x%08lx, vm_end = 0x%08lx\n",
+                               vma->vm_start, vma->vm_end);
+
+       show_regs(regs);
+}
+
 void do_page_fault(struct pt_regs *regs, unsigned long code,
                              unsigned long address)
 {
@@ -270,16 +294,8 @@ bad_area:
        if (user_mode(regs)) {
                struct siginfo si;
 
-#ifdef PRINT_USER_FAULTS
-               printk(KERN_DEBUG "\n");
-               printk(KERN_DEBUG "do_page_fault() pid=%d command='%s' type=%lu address=0x%08lx\n",
-                   task_pid_nr(tsk), tsk->comm, code, address);
-               if (vma) {
-                       printk(KERN_DEBUG "vm_start = 0x%08lx, vm_end = 0x%08lx\n",
-                                       vma->vm_start, vma->vm_end);
-               }
-               show_regs(regs);
-#endif
+               show_signal_msg(regs, code, address, tsk, vma);
+
                switch (code) {
                case 15:        /* Data TLB miss fault/Data page fault */
                        /* send SIGSEGV when outside of vma */
index a28f02165e97032c8eda569e97b06a4dc81fb0f9..d367a0aece2aac8b067a6c7bf51c43ef3d488eda 100644 (file)
@@ -139,18 +139,18 @@ static struct addr_range prep_initrd(struct addr_range vmlinux, void *chosen,
  * edit the command line passed to vmlinux (by setting /chosen/bootargs).
  * The buffer is put in it's own section so that tools may locate it easier.
  */
-static char cmdline[COMMAND_LINE_SIZE]
+static char cmdline[BOOT_COMMAND_LINE_SIZE]
        __attribute__((__section__("__builtin_cmdline")));
 
 static void prep_cmdline(void *chosen)
 {
        if (cmdline[0] == '\0')
-               getprop(chosen, "bootargs", cmdline, COMMAND_LINE_SIZE-1);
+               getprop(chosen, "bootargs", cmdline, BOOT_COMMAND_LINE_SIZE-1);
 
        printf("\n\rLinux/PowerPC load: %s", cmdline);
        /* If possible, edit the command line */
        if (console_ops.edit_cmdline)
-               console_ops.edit_cmdline(cmdline, COMMAND_LINE_SIZE);
+               console_ops.edit_cmdline(cmdline, BOOT_COMMAND_LINE_SIZE);
        printf("\n\r");
 
        /* Put the command line back into the devtree for the kernel */
@@ -174,7 +174,7 @@ void start(void)
         * built-in command line wasn't set by an external tool */
        if ((loader_info.cmdline_len > 0) && (cmdline[0] == '\0'))
                memmove(cmdline, loader_info.cmdline,
-                       min(loader_info.cmdline_len, COMMAND_LINE_SIZE-1));
+                       min(loader_info.cmdline_len, BOOT_COMMAND_LINE_SIZE-1));
 
        if (console_ops.open && (console_ops.open() < 0))
                exit();
index b3218ce451bb9be8081dd77cc75f0a3114468be1..8aad3c55aeda2885e331b8c7ebd52436c219e74b 100644 (file)
@@ -15,7 +15,7 @@
 #include "types.h"
 #include "string.h"
 
-#define        COMMAND_LINE_SIZE       512
+#define        BOOT_COMMAND_LINE_SIZE  2048
 #define        MAX_PATH_LEN            256
 #define        MAX_PROP_LEN            256 /* What should this be? */
 
index 9954d98871d061dfc9abb8c33fefcbbe00d8d2ea..4ec2d86d3c50571a2a62f27c31f00739595ed219 100644 (file)
@@ -47,13 +47,13 @@ BSS_STACK(4096);
  * The buffer is put in it's own section so that tools may locate it easier.
  */
 
-static char cmdline[COMMAND_LINE_SIZE]
+static char cmdline[BOOT_COMMAND_LINE_SIZE]
        __attribute__((__section__("__builtin_cmdline")));
 
 static void prep_cmdline(void *chosen)
 {
        if (cmdline[0] == '\0')
-               getprop(chosen, "bootargs", cmdline, COMMAND_LINE_SIZE-1);
+               getprop(chosen, "bootargs", cmdline, BOOT_COMMAND_LINE_SIZE-1);
        else
                setprop_str(chosen, "bootargs", cmdline);
 
index a2efdaa020b0f30b11a08b1352b1f9d7388ad3af..66ad7a74116f15dd803ef7e887b7988ac5efa61a 100644 (file)
@@ -41,14 +41,14 @@ struct opal_takeover_args {
  * size except the last one in the list to be as well.
  */
 struct opal_sg_entry {
-       void    *data;
-       long    length;
+       __be64 data;
+       __be64 length;
 };
 
-/* sg list */
+/* SG list */
 struct opal_sg_list {
-       unsigned long num_entries;
-       struct opal_sg_list *next;
+       __be64 length;
+       __be64 next;
        struct opal_sg_entry entry[];
 };
 
@@ -858,8 +858,8 @@ int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
 int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type,
                      uint32_t addr, __be32 *data, uint32_t sz);
 
-int64_t opal_read_elog(uint64_t buffer, size_t size, uint64_t log_id);
-int64_t opal_get_elog_size(uint64_t *log_id, size_t *size, uint64_t *elog_type);
+int64_t opal_read_elog(uint64_t buffer, uint64_t size, uint64_t log_id);
+int64_t opal_get_elog_size(__be64 *log_id, __be64 *size, __be64 *elog_type);
 int64_t opal_write_elog(uint64_t buffer, uint64_t size, uint64_t offset);
 int64_t opal_send_ack_elog(uint64_t log_id);
 void opal_resend_pending_logs(void);
@@ -868,23 +868,24 @@ int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result);
 int64_t opal_manage_flash(uint8_t op);
 int64_t opal_update_flash(uint64_t blk_list);
 int64_t opal_dump_init(uint8_t dump_type);
-int64_t opal_dump_info(uint32_t *dump_id, uint32_t *dump_size);
-int64_t opal_dump_info2(uint32_t *dump_id, uint32_t *dump_size, uint32_t *dump_type);
+int64_t opal_dump_info(__be32 *dump_id, __be32 *dump_size);
+int64_t opal_dump_info2(__be32 *dump_id, __be32 *dump_size, __be32 *dump_type);
 int64_t opal_dump_read(uint32_t dump_id, uint64_t buffer);
 int64_t opal_dump_ack(uint32_t dump_id);
 int64_t opal_dump_resend_notification(void);
 
-int64_t opal_get_msg(uint64_t buffer, size_t size);
-int64_t opal_check_completion(uint64_t buffer, size_t size, uint64_t token);
+int64_t opal_get_msg(uint64_t buffer, uint64_t size);
+int64_t opal_check_completion(uint64_t buffer, uint64_t size, uint64_t token);
 int64_t opal_sync_host_reboot(void);
 int64_t opal_get_param(uint64_t token, uint32_t param_id, uint64_t buffer,
-               size_t length);
+               uint64_t length);
 int64_t opal_set_param(uint64_t token, uint32_t param_id, uint64_t buffer,
-               size_t length);
+               uint64_t length);
 int64_t opal_sensor_read(uint32_t sensor_hndl, int token, __be32 *sensor_data);
 
 /* Internal functions */
-extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data);
+extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
+                                  int depth, void *data);
 extern int early_init_dt_scan_recoverable_ranges(unsigned long node,
                                 const char *uname, int depth, void *data);
 
@@ -893,10 +894,6 @@ extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len);
 
 extern void hvc_opal_init_early(void);
 
-/* Internal functions */
-extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
-                                  int depth, void *data);
-
 extern int opal_notifier_register(struct notifier_block *nb);
 extern int opal_notifier_unregister(struct notifier_block *nb);
 
@@ -906,9 +903,6 @@ extern void opal_notifier_enable(void);
 extern void opal_notifier_disable(void);
 extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val);
 
-extern int opal_get_chars(uint32_t vtermno, char *buf, int count);
-extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len);
-
 extern int __opal_async_get_token(void);
 extern int opal_async_get_token_interruptible(void);
 extern int __opal_async_release_token(int token);
@@ -916,8 +910,6 @@ extern int opal_async_release_token(int token);
 extern int opal_async_wait_response(uint64_t token, struct opal_msg *msg);
 extern int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data);
 
-extern void hvc_opal_init_early(void);
-
 struct rtc_time;
 extern int opal_set_rtc_time(struct rtc_time *tm);
 extern void opal_get_rtc_time(struct rtc_time *tm);
@@ -937,6 +929,10 @@ extern int opal_resync_timebase(void);
 
 extern void opal_lpc_init(void);
 
+struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
+                                            unsigned long vmalloc_size);
+void opal_free_sg_list(struct opal_sg_list *sg);
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __OPAL_H */
index 552df83f1a49627ddd1d49e1ba982c743a5bfefe..ae3fb68cb28e8df5cb53f078ccc5bcc923c8231c 100644 (file)
@@ -1 +1,6 @@
-#include <asm-generic/setup.h>
+#ifndef _UAPI_ASM_POWERPC_SETUP_H
+#define _UAPI_ASM_POWERPC_SETUP_H
+
+#define COMMAND_LINE_SIZE      2048
+
+#endif /* _UAPI_ASM_POWERPC_SETUP_H */
index 3bd77edd7610ce20267a880069972624eafed62e..450850a49dced7919c3c2d349c2d70aae7cea0ad 100644 (file)
@@ -120,6 +120,7 @@ EXPORT_SYMBOL(giveup_spe);
 EXPORT_SYMBOL(flush_instruction_cache);
 #endif
 EXPORT_SYMBOL(flush_dcache_range);
+EXPORT_SYMBOL(flush_icache_range);
 
 #ifdef CONFIG_SMP
 #ifdef CONFIG_PPC32
index 2f3cdb01506de3d7791712ecd6ffeaf1fcd36352..658e89d2025b0b2dd65bb812d2c89d65867c1015 100644 (file)
@@ -705,7 +705,7 @@ static int __init rtas_flash_init(void)
        if (rtas_token("ibm,update-flash-64-and-reboot") ==
                       RTAS_UNKNOWN_SERVICE) {
                pr_info("rtas_flash: no firmware flash support\n");
-               return 1;
+               return -EINVAL;
        }
 
        rtas_validate_flash_data.buf = kzalloc(VALIDATE_BUF_SIZE, GFP_KERNEL);
index 122a580f732246c02c5e31c301078cb725435919..7e711bdcc6da5adb399e8ac0a55097e76317fe51 100644 (file)
@@ -813,9 +813,6 @@ static void __init clocksource_init(void)
 static int decrementer_set_next_event(unsigned long evt,
                                      struct clock_event_device *dev)
 {
-       /* Don't adjust the decrementer if some irq work is pending */
-       if (test_irq_work_pending())
-               return 0;
        __get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt;
        set_dec(evt);
 
index ffbb871c2bd803827fa5a78658f29d2fa8a1dbd6..b031f932c0cc3dcc0c452c88f8ba2c3c88cf641d 100644 (file)
@@ -242,6 +242,12 @@ kvm_novcpu_exit:
  */
        .globl  kvm_start_guest
 kvm_start_guest:
+
+       /* Set runlatch bit the minute you wake up from nap */
+       mfspr   r1, SPRN_CTRLF
+       ori     r1, r1, 1
+       mtspr   SPRN_CTRLT, r1
+
        ld      r2,PACATOC(r13)
 
        li      r0,KVM_HWTHREAD_IN_KVM
@@ -309,6 +315,11 @@ kvm_no_guest:
        li      r0, KVM_HWTHREAD_IN_NAP
        stb     r0, HSTATE_HWTHREAD_STATE(r13)
 kvm_do_nap:
+       /* Clear the runlatch bit before napping */
+       mfspr   r2, SPRN_CTRLF
+       clrrdi  r2, r2, 1
+       mtspr   SPRN_CTRLT, r2
+
        li      r3, LPCR_PECE0
        mfspr   r4, SPRN_LPCR
        rlwimi  r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
@@ -1999,8 +2010,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
 
        /*
         * Take a nap until a decrementer or external or doobell interrupt
-        * occurs, with PECE1, PECE0 and PECEDP set in LPCR
+        * occurs, with PECE1, PECE0 and PECEDP set in LPCR. Also clear the
+        * runlatch bit before napping.
         */
+       mfspr   r2, SPRN_CTRLF
+       clrrdi  r2, r2, 1
+       mtspr   SPRN_CTRLT, r2
+
        li      r0,1
        stb     r0,HSTATE_HWTHREAD_REQ(r13)
        mfspr   r5,SPRN_LPCR
index 3ea26c25590be1dabe4a057882f35b77f5dfe7c1..cf1d325eae8be814953650cf6b94fd349c0fdd12 100644 (file)
@@ -82,17 +82,14 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
                va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
                va |= penc << 12;
                va |= ssize << 8;
-               /* Add AVAL part */
-               if (psize != apsize) {
-                       /*
-                        * MPSS, 64K base page size and 16MB parge page size
-                        * We don't need all the bits, but rest of the bits
-                        * must be ignored by the processor.
-                        * vpn cover upto 65 bits of va. (0...65) and we need
-                        * 58..64 bits of va.
-                        */
-                       va |= (vpn & 0xfe);
-               }
+               /*
+                * AVAL bits:
+                * We don't need all the bits, but rest of the bits
+                * must be ignored by the processor.
+                * vpn cover upto 65 bits of va. (0...65) and we need
+                * 58..64 bits of va.
+                */
+               va |= (vpn & 0xfe); /* AVAL */
                va |= 1; /* L */
                asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
                             : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
@@ -133,17 +130,14 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
                va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
                va |= penc << 12;
                va |= ssize << 8;
-               /* Add AVAL part */
-               if (psize != apsize) {
-                       /*
-                        * MPSS, 64K base page size and 16MB parge page size
-                        * We don't need all the bits, but rest of the bits
-                        * must be ignored by the processor.
-                        * vpn cover upto 65 bits of va. (0...65) and we need
-                        * 58..64 bits of va.
-                        */
-                       va |= (vpn & 0xfe);
-               }
+               /*
+                * AVAL bits:
+                * We don't need all the bits, but rest of the bits
+                * must be ignored by the processor.
+                * vpn cover upto 65 bits of va. (0...65) and we need
+                * 58..64 bits of va.
+                */
+               va |= (vpn & 0xfe);
                va |= 1; /* L */
                asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
                             : : "r"(va) : "memory");
index 297c9105141365e81316a888176fe80611b5b08a..e0766b82e1656721ff9e93586b47552414936973 100644 (file)
@@ -155,16 +155,28 @@ static ssize_t read_offset_data(void *dest, size_t dest_len,
        return copy_len;
 }
 
-static unsigned long h_get_24x7_catalog_page(char page[static 4096],
-                                            u32 version, u32 index)
+static unsigned long h_get_24x7_catalog_page_(unsigned long phys_4096,
+                                             unsigned long version,
+                                             unsigned long index)
 {
-       WARN_ON(!IS_ALIGNED((unsigned long)page, 4096));
+       pr_devel("h_get_24x7_catalog_page(0x%lx, %lu, %lu)",
+                       phys_4096,
+                       version,
+                       index);
+       WARN_ON(!IS_ALIGNED(phys_4096, 4096));
        return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE,
-                       virt_to_phys(page),
+                       phys_4096,
                        version,
                        index);
 }
 
+static unsigned long h_get_24x7_catalog_page(char page[],
+                                            u64 version, u32 index)
+{
+       return h_get_24x7_catalog_page_(virt_to_phys(page),
+                                       version, index);
+}
+
 static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
                            struct bin_attribute *bin_attr, char *buf,
                            loff_t offset, size_t count)
@@ -173,7 +185,7 @@ static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
        ssize_t ret = 0;
        size_t catalog_len = 0, catalog_page_len = 0, page_count = 0;
        loff_t page_offset = 0;
-       uint32_t catalog_version_num = 0;
+       uint64_t catalog_version_num = 0;
        void *page = kmem_cache_alloc(hv_page_cache, GFP_USER);
        struct hv_24x7_catalog_page_0 *page_0 = page;
        if (!page)
@@ -185,7 +197,7 @@ static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
                goto e_free;
        }
 
-       catalog_version_num = be32_to_cpu(page_0->version);
+       catalog_version_num = be64_to_cpu(page_0->version);
        catalog_page_len = be32_to_cpu(page_0->length);
        catalog_len = catalog_page_len * 4096;
 
@@ -208,8 +220,9 @@ static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
                                page, 4096, page_offset * 4096);
 e_free:
        if (hret)
-               pr_err("h_get_24x7_catalog_page(ver=%d, page=%lld) failed: rc=%ld\n",
-                               catalog_version_num, page_offset, hret);
+               pr_err("h_get_24x7_catalog_page(ver=%lld, page=%lld) failed:"
+                      " rc=%ld\n",
+                      catalog_version_num, page_offset, hret);
        kfree(page);
 
        pr_devel("catalog_read: offset=%lld(%lld) count=%zu(%zu) catalog_len=%zu(%zu) => %zd\n",
@@ -243,7 +256,7 @@ e_free:                                                             \
 static DEVICE_ATTR_RO(_name)
 
 PAGE_0_ATTR(catalog_version, "%lld\n",
-               (unsigned long long)be32_to_cpu(page_0->version));
+               (unsigned long long)be64_to_cpu(page_0->version));
 PAGE_0_ATTR(catalog_len, "%lld\n",
                (unsigned long long)be32_to_cpu(page_0->length) * 4096);
 static BIN_ATTR_RO(catalog, 0/* real length varies */);
@@ -485,13 +498,13 @@ static int hv_24x7_init(void)
        struct hv_perf_caps caps;
 
        if (!firmware_has_feature(FW_FEATURE_LPAR)) {
-               pr_info("not a virtualized system, not enabling\n");
+               pr_debug("not a virtualized system, not enabling\n");
                return -ENODEV;
        }
 
        hret = hv_perf_caps_get(&caps);
        if (hret) {
-               pr_info("could not obtain capabilities, error 0x%80lx, not enabling\n",
+               pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
                                hret);
                return -ENODEV;
        }
index 278ba7b9c2b525287f930445e71e04bd538e3236..c9d399a2df82e6727fa78b4de69fb867cc85552f 100644 (file)
@@ -78,7 +78,7 @@ static ssize_t kernel_version_show(struct device *dev,
        return sprintf(page, "0x%x\n", COUNTER_INFO_VERSION_CURRENT);
 }
 
-DEVICE_ATTR_RO(kernel_version);
+static DEVICE_ATTR_RO(kernel_version);
 HV_CAPS_ATTR(version, "0x%x\n");
 HV_CAPS_ATTR(ga, "%d\n");
 HV_CAPS_ATTR(expanded, "%d\n");
@@ -273,13 +273,13 @@ static int hv_gpci_init(void)
        struct hv_perf_caps caps;
 
        if (!firmware_has_feature(FW_FEATURE_LPAR)) {
-               pr_info("not a virtualized system, not enabling\n");
+               pr_debug("not a virtualized system, not enabling\n");
                return -ENODEV;
        }
 
        hret = hv_perf_caps_get(&caps);
        if (hret) {
-               pr_info("could not obtain capabilities, error 0x%80lx, not enabling\n",
+               pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
                                hret);
                return -ENODEV;
        }
index 253fefe3d1a0e76fd4ed3996711b6f1784725d6c..5b51079f3e3ba52f7ecb51c3fb27a5d7255ebb48 100644 (file)
@@ -549,7 +549,8 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option)
                ret = ioda_eeh_phb_reset(hose, option);
        } else {
                bus = eeh_pe_bus_get(pe);
-               if (pci_is_root_bus(bus))
+               if (pci_is_root_bus(bus) ||
+                   pci_is_root_bus(bus->parent))
                        ret = ioda_eeh_root_reset(hose, option);
                else
                        ret = ioda_eeh_bridge_reset(hose, bus->self, option);
index b9827b0d87e4cd69cdff9f51216ea9429c79fb41..788a1977b9a5203cc9a477be6f9a2a7b71cdd754 100644 (file)
@@ -209,89 +209,20 @@ static struct kobj_type dump_ktype = {
        .default_attrs = dump_default_attrs,
 };
 
-static void free_dump_sg_list(struct opal_sg_list *list)
-{
-       struct opal_sg_list *sg1;
-       while (list) {
-               sg1 = list->next;
-               kfree(list);
-               list = sg1;
-       }
-       list = NULL;
-}
-
-static struct opal_sg_list *dump_data_to_sglist(struct dump_obj *dump)
-{
-       struct opal_sg_list *sg1, *list = NULL;
-       void *addr;
-       int64_t size;
-
-       addr = dump->buffer;
-       size = dump->size;
-
-       sg1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
-       if (!sg1)
-               goto nomem;
-
-       list = sg1;
-       sg1->num_entries = 0;
-       while (size > 0) {
-               /* Translate virtual address to physical address */
-               sg1->entry[sg1->num_entries].data =
-                       (void *)(vmalloc_to_pfn(addr) << PAGE_SHIFT);
-
-               if (size > PAGE_SIZE)
-                       sg1->entry[sg1->num_entries].length = PAGE_SIZE;
-               else
-                       sg1->entry[sg1->num_entries].length = size;
-
-               sg1->num_entries++;
-               if (sg1->num_entries >= SG_ENTRIES_PER_NODE) {
-                       sg1->next = kzalloc(PAGE_SIZE, GFP_KERNEL);
-                       if (!sg1->next)
-                               goto nomem;
-
-                       sg1 = sg1->next;
-                       sg1->num_entries = 0;
-               }
-               addr += PAGE_SIZE;
-               size -= PAGE_SIZE;
-       }
-       return list;
-
-nomem:
-       pr_err("%s : Failed to allocate memory\n", __func__);
-       free_dump_sg_list(list);
-       return NULL;
-}
-
-static void sglist_to_phy_addr(struct opal_sg_list *list)
-{
-       struct opal_sg_list *sg, *next;
-
-       for (sg = list; sg; sg = next) {
-               next = sg->next;
-               /* Don't translate NULL pointer for last entry */
-               if (sg->next)
-                       sg->next = (struct opal_sg_list *)__pa(sg->next);
-               else
-                       sg->next = NULL;
-
-               /* Convert num_entries to length */
-               sg->num_entries =
-                       sg->num_entries * sizeof(struct opal_sg_entry) + 16;
-       }
-}
-
-static int64_t dump_read_info(uint32_t *id, uint32_t *size, uint32_t *type)
+static int64_t dump_read_info(uint32_t *dump_id, uint32_t *dump_size, uint32_t *dump_type)
 {
+       __be32 id, size, type;
        int rc;
-       *type = 0xffffffff;
 
-       rc = opal_dump_info2(id, size, type);
+       type = cpu_to_be32(0xffffffff);
 
+       rc = opal_dump_info2(&id, &size, &type);
        if (rc == OPAL_PARAMETER)
-               rc = opal_dump_info(id, size);
+               rc = opal_dump_info(&id, &size);
+
+       *dump_id = be32_to_cpu(id);
+       *dump_size = be32_to_cpu(size);
+       *dump_type = be32_to_cpu(type);
 
        if (rc)
                pr_warn("%s: Failed to get dump info (%d)\n",
@@ -314,15 +245,12 @@ static int64_t dump_read_data(struct dump_obj *dump)
        }
 
        /* Generate SG list */
-       list = dump_data_to_sglist(dump);
+       list = opal_vmalloc_to_sg_list(dump->buffer, dump->size);
        if (!list) {
                rc = -ENOMEM;
                goto out;
        }
 
-       /* Translate sg list addr to real address */
-       sglist_to_phy_addr(list);
-
        /* First entry address */
        addr = __pa(list);
 
@@ -341,7 +269,7 @@ static int64_t dump_read_data(struct dump_obj *dump)
                        __func__, dump->id);
 
        /* Free SG list */
-       free_dump_sg_list(list);
+       opal_free_sg_list(list);
 
 out:
        return rc;
index ef7bc2a978627422d659d783ea21f0869975df53..10268c41d8302dd39ed73f4e0ad98dd6deb5e25f 100644 (file)
@@ -238,18 +238,25 @@ static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type)
 
 static void elog_work_fn(struct work_struct *work)
 {
-       size_t elog_size;
+       __be64 size;
+       __be64 id;
+       __be64 type;
+       uint64_t elog_size;
        uint64_t log_id;
        uint64_t elog_type;
        int rc;
        char name[2+16+1];
 
-       rc = opal_get_elog_size(&log_id, &elog_size, &elog_type);
+       rc = opal_get_elog_size(&id, &size, &type);
        if (rc != OPAL_SUCCESS) {
                pr_err("ELOG: Opal log read failed\n");
                return;
        }
 
+       elog_size = be64_to_cpu(size);
+       log_id = be64_to_cpu(id);
+       elog_type = be64_to_cpu(type);
+
        BUG_ON(elog_size > OPAL_MAX_ERRLOG_SIZE);
 
        if (elog_size >= OPAL_MAX_ERRLOG_SIZE)
index 714ef972406bcacf66a4a896283c6fb25963ca16..dc487ff0470401b0613b28958ff102dbcc5103e1 100644 (file)
@@ -79,9 +79,6 @@
 /* XXX: Assume candidate image size is <= 1GB */
 #define MAX_IMAGE_SIZE 0x40000000
 
-/* Flash sg list version */
-#define SG_LIST_VERSION (1UL)
-
 /* Image status */
 enum {
        IMAGE_INVALID,
@@ -131,11 +128,15 @@ static DEFINE_MUTEX(image_data_mutex);
  */
 static inline void opal_flash_validate(void)
 {
-       struct validate_flash_t *args_buf = &validate_flash_data;
+       long ret;
+       void *buf = validate_flash_data.buf;
+       __be32 size, result;
 
-       args_buf->status = opal_validate_flash(__pa(args_buf->buf),
-                                              &(args_buf->buf_size),
-                                              &(args_buf->result));
+       ret = opal_validate_flash(__pa(buf), &size, &result);
+
+       validate_flash_data.status = ret;
+       validate_flash_data.buf_size = be32_to_cpu(size);
+       validate_flash_data.result = be32_to_cpu(result);
 }
 
 /*
@@ -267,94 +268,12 @@ static ssize_t manage_store(struct kobject *kobj,
        return count;
 }
 
-/*
- * Free sg list
- */
-static void free_sg_list(struct opal_sg_list *list)
-{
-       struct opal_sg_list *sg1;
-       while (list) {
-               sg1 = list->next;
-               kfree(list);
-               list = sg1;
-       }
-       list = NULL;
-}
-
-/*
- * Build candidate image scatter gather list
- *
- * list format:
- *   -----------------------------------
- *  |  VER (8) | Entry length in bytes  |
- *   -----------------------------------
- *  |  Pointer to next entry            |
- *   -----------------------------------
- *  |  Address of memory area 1         |
- *   -----------------------------------
- *  |  Length of memory area 1          |
- *   -----------------------------------
- *  |   .........                       |
- *   -----------------------------------
- *  |   .........                       |
- *   -----------------------------------
- *  |  Address of memory area N         |
- *   -----------------------------------
- *  |  Length of memory area N          |
- *   -----------------------------------
- */
-static struct opal_sg_list *image_data_to_sglist(void)
-{
-       struct opal_sg_list *sg1, *list = NULL;
-       void *addr;
-       int size;
-
-       addr = image_data.data;
-       size = image_data.size;
-
-       sg1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
-       if (!sg1)
-               return NULL;
-
-       list = sg1;
-       sg1->num_entries = 0;
-       while (size > 0) {
-               /* Translate virtual address to physical address */
-               sg1->entry[sg1->num_entries].data =
-                       (void *)(vmalloc_to_pfn(addr) << PAGE_SHIFT);
-
-               if (size > PAGE_SIZE)
-                       sg1->entry[sg1->num_entries].length = PAGE_SIZE;
-               else
-                       sg1->entry[sg1->num_entries].length = size;
-
-               sg1->num_entries++;
-               if (sg1->num_entries >= SG_ENTRIES_PER_NODE) {
-                       sg1->next = kzalloc(PAGE_SIZE, GFP_KERNEL);
-                       if (!sg1->next) {
-                               pr_err("%s : Failed to allocate memory\n",
-                                      __func__);
-                               goto nomem;
-                       }
-
-                       sg1 = sg1->next;
-                       sg1->num_entries = 0;
-               }
-               addr += PAGE_SIZE;
-               size -= PAGE_SIZE;
-       }
-       return list;
-nomem:
-       free_sg_list(list);
-       return NULL;
-}
-
 /*
  * OPAL update flash
  */
 static int opal_flash_update(int op)
 {
-       struct opal_sg_list *sg, *list, *next;
+       struct opal_sg_list *list;
        unsigned long addr;
        int64_t rc = OPAL_PARAMETER;
 
@@ -364,30 +283,13 @@ static int opal_flash_update(int op)
                goto flash;
        }
 
-       list = image_data_to_sglist();
+       list = opal_vmalloc_to_sg_list(image_data.data, image_data.size);
        if (!list)
                goto invalid_img;
 
        /* First entry address */
        addr = __pa(list);
 
-       /* Translate sg list address to absolute */
-       for (sg = list; sg; sg = next) {
-               next = sg->next;
-               /* Don't translate NULL pointer for last entry */
-               if (sg->next)
-                       sg->next = (struct opal_sg_list *)__pa(sg->next);
-               else
-                       sg->next = NULL;
-
-               /*
-                * Convert num_entries to version/length format
-                * to satisfy OPAL.
-                */
-               sg->num_entries = (SG_LIST_VERSION << 56) |
-                       (sg->num_entries * sizeof(struct opal_sg_entry) + 16);
-       }
-
        pr_alert("FLASH: Image is %u bytes\n", image_data.size);
        pr_alert("FLASH: Image update requested\n");
        pr_alert("FLASH: Image will be updated during system reboot\n");
index 6b614726baf2add5f95237647f128c5c7119e173..d202f9bc3683f5ad0072282173ddfb510b1aec9a 100644 (file)
@@ -39,10 +39,11 @@ struct param_attr {
        struct kobj_attribute kobj_attr;
 };
 
-static int opal_get_sys_param(u32 param_id, u32 length, void *buffer)
+static ssize_t opal_get_sys_param(u32 param_id, u32 length, void *buffer)
 {
        struct opal_msg msg;
-       int ret, token;
+       ssize_t ret;
+       int token;
 
        token = opal_async_get_token_interruptible();
        if (token < 0) {
@@ -59,7 +60,7 @@ static int opal_get_sys_param(u32 param_id, u32 length, void *buffer)
 
        ret = opal_async_wait_response(token, &msg);
        if (ret) {
-               pr_err("%s: Failed to wait for the async response, %d\n",
+               pr_err("%s: Failed to wait for the async response, %zd\n",
                                __func__, ret);
                goto out_token;
        }
@@ -111,7 +112,7 @@ static ssize_t sys_param_show(struct kobject *kobj,
 {
        struct param_attr *attr = container_of(kobj_attr, struct param_attr,
                        kobj_attr);
-       int ret;
+       ssize_t ret;
 
        mutex_lock(&opal_sysparam_mutex);
        ret = opal_get_sys_param(attr->param_id, attr->param_size,
@@ -121,9 +122,10 @@ static ssize_t sys_param_show(struct kobject *kobj,
 
        memcpy(buf, param_data_buf, attr->param_size);
 
+       ret = attr->param_size;
 out:
        mutex_unlock(&opal_sysparam_mutex);
-       return ret ? ret : attr->param_size;
+       return ret;
 }
 
 static ssize_t sys_param_store(struct kobject *kobj,
@@ -131,14 +133,20 @@ static ssize_t sys_param_store(struct kobject *kobj,
 {
        struct param_attr *attr = container_of(kobj_attr, struct param_attr,
                        kobj_attr);
-       int ret;
+       ssize_t ret;
+
+        /* MAX_PARAM_DATA_LEN is sizeof(param_data_buf) */
+        if (count > MAX_PARAM_DATA_LEN)
+                count = MAX_PARAM_DATA_LEN;
 
        mutex_lock(&opal_sysparam_mutex);
        memcpy(param_data_buf, buf, count);
        ret = opal_set_sys_param(attr->param_id, attr->param_size,
                        param_data_buf);
        mutex_unlock(&opal_sysparam_mutex);
-       return ret ? ret : count;
+       if (!ret)
+               ret = count;
+       return ret;
 }
 
 void __init opal_sys_param_init(void)
@@ -214,13 +222,13 @@ void __init opal_sys_param_init(void)
        }
 
        if (of_property_read_u32_array(sysparam, "param-len", size, count)) {
-               pr_err("SYSPARAM: Missing propery param-len in the DT\n");
+               pr_err("SYSPARAM: Missing property param-len in the DT\n");
                goto out_free_perm;
        }
 
 
        if (of_property_read_u8_array(sysparam, "param-perm", perm, count)) {
-               pr_err("SYSPARAM: Missing propery param-perm in the DT\n");
+               pr_err("SYSPARAM: Missing property param-perm in the DT\n");
                goto out_free_perm;
        }
 
@@ -233,6 +241,12 @@ void __init opal_sys_param_init(void)
 
        /* For each of the parameters, populate the parameter attributes */
        for (i = 0; i < count; i++) {
+               if (size[i] > MAX_PARAM_DATA_LEN) {
+                       pr_warn("SYSPARAM: Not creating parameter %d as size "
+                               "exceeds buffer length\n", i);
+                       continue;
+               }
+
                sysfs_attr_init(&attr[i].kobj_attr.attr);
                attr[i].param_id = id[i];
                attr[i].param_size = size[i];
index 49d2f00019e5d8092f7f32e9b3c6dfc53cceeee6..360ad80c754ce3c97ad9b9fead5ad4806e5f6666 100644 (file)
@@ -242,14 +242,14 @@ void opal_notifier_update_evt(uint64_t evt_mask,
 void opal_notifier_enable(void)
 {
        int64_t rc;
-       uint64_t evt = 0;
+       __be64 evt = 0;
 
        atomic_set(&opal_notifier_hold, 0);
 
        /* Process pending events */
        rc = opal_poll_events(&evt);
        if (rc == OPAL_SUCCESS && evt)
-               opal_do_notifier(evt);
+               opal_do_notifier(be64_to_cpu(evt));
 }
 
 void opal_notifier_disable(void)
@@ -529,7 +529,7 @@ static irqreturn_t opal_interrupt(int irq, void *data)
 
        opal_handle_interrupt(virq_to_hw(irq), &events);
 
-       opal_do_notifier(events);
+       opal_do_notifier(be64_to_cpu(events));
 
        return IRQ_HANDLED;
 }
@@ -638,3 +638,66 @@ void opal_shutdown(void)
 
 /* Export this so that test modules can use it */
 EXPORT_SYMBOL_GPL(opal_invalid_call);
+
+/* Convert a region of vmalloc memory to an opal sg list */
+struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
+                                            unsigned long vmalloc_size)
+{
+       struct opal_sg_list *sg, *first = NULL;
+       unsigned long i = 0;
+
+       sg = kzalloc(PAGE_SIZE, GFP_KERNEL);
+       if (!sg)
+               goto nomem;
+
+       first = sg;
+
+       while (vmalloc_size > 0) {
+               uint64_t data = vmalloc_to_pfn(vmalloc_addr) << PAGE_SHIFT;
+               uint64_t length = min(vmalloc_size, PAGE_SIZE);
+
+               sg->entry[i].data = cpu_to_be64(data);
+               sg->entry[i].length = cpu_to_be64(length);
+               i++;
+
+               if (i >= SG_ENTRIES_PER_NODE) {
+                       struct opal_sg_list *next;
+
+                       next = kzalloc(PAGE_SIZE, GFP_KERNEL);
+                       if (!next)
+                               goto nomem;
+
+                       sg->length = cpu_to_be64(
+                                       i * sizeof(struct opal_sg_entry) + 16);
+                       i = 0;
+                       sg->next = cpu_to_be64(__pa(next));
+                       sg = next;
+               }
+
+               vmalloc_addr += length;
+               vmalloc_size -= length;
+       }
+
+       sg->length = cpu_to_be64(i * sizeof(struct opal_sg_entry) + 16);
+
+       return first;
+
+nomem:
+       pr_err("%s : Failed to allocate memory\n", __func__);
+       opal_free_sg_list(first);
+       return NULL;
+}
+
+void opal_free_sg_list(struct opal_sg_list *sg)
+{
+       while (sg) {
+               uint64_t next = be64_to_cpu(sg->next);
+
+               kfree(sg);
+
+               if (next)
+                       sg = __va(next);
+               else
+                       sg = NULL;
+       }
+}
index 3b2b4fb3585b6b9fac45878041772285591d1d63..98824aa991731882cca87f806f464198f356ff26 100644 (file)
@@ -343,7 +343,6 @@ static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
                                pci_name(dev));
                        continue;
                }
-               pci_dev_get(dev);
                pdn->pcidev = dev;
                pdn->pe_number = pe->pe_number;
                pe->dma_weight += pnv_ioda_dma_weight(dev);
@@ -462,7 +461,7 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev
 
        pe = &phb->ioda.pe_array[pdn->pe_number];
        WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
-       set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table);
+       set_iommu_table_base(&pdev->dev, &pe->tce32_table);
 }
 
 static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,
index 61cf8fa9c61b50489009b94c99c993fffaddde28..8723d32632f55b6eb49b25d55711140f60a95fe7 100644 (file)
@@ -162,18 +162,62 @@ static void pnv_shutdown(void)
 }
 
 #ifdef CONFIG_KEXEC
+static void pnv_kexec_wait_secondaries_down(void)
+{
+       int my_cpu, i, notified = -1;
+
+       my_cpu = get_cpu();
+
+       for_each_online_cpu(i) {
+               uint8_t status;
+               int64_t rc;
+
+               if (i == my_cpu)
+                       continue;
+
+               for (;;) {
+                       rc = opal_query_cpu_status(get_hard_smp_processor_id(i),
+                                                  &status);
+                       if (rc != OPAL_SUCCESS || status != OPAL_THREAD_STARTED)
+                               break;
+                       barrier();
+                       if (i != notified) {
+                               printk(KERN_INFO "kexec: waiting for cpu %d "
+                                      "(physical %d) to enter OPAL\n",
+                                      i, paca[i].hw_cpu_id);
+                               notified = i;
+                       }
+               }
+       }
+}
+
 static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
 {
        xics_kexec_teardown_cpu(secondary);
 
-       /* Return secondary CPUs to firmware on OPAL v3 */
-       if (firmware_has_feature(FW_FEATURE_OPALv3) && secondary) {
+       /* On OPAL v3, we return all CPUs to firmware */
+
+       if (!firmware_has_feature(FW_FEATURE_OPALv3))
+               return;
+
+       if (secondary) {
+               /* Return secondary CPUs to firmware on OPAL v3 */
                mb();
                get_paca()->kexec_state = KEXEC_STATE_REAL_MODE;
                mb();
 
                /* Return the CPU to OPAL */
                opal_return_cpu();
+       } else if (crash_shutdown) {
+               /*
+                * On crash, we don't wait for secondaries to go
+                * down as they might be unreachable or hung, so
+                * instead we just wait a bit and move on.
+                */
+               mdelay(1);
+       } else {
+               /* Primary waits for the secondaries to have reached OPAL */
+               pnv_kexec_wait_secondaries_down();
        }
 }
 #endif /* CONFIG_KEXEC */
index 908672bdcea6b2c77d75763a05d10f476e2b2d75..bf5fcd452168c6056492115c0232b40f25a2e407 100644 (file)
@@ -30,6 +30,7 @@
 #include <asm/cputhreads.h>
 #include <asm/xics.h>
 #include <asm/opal.h>
+#include <asm/runlatch.h>
 
 #include "powernv.h"
 
@@ -156,7 +157,9 @@ static void pnv_smp_cpu_kill_self(void)
         */
        mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
        while (!generic_check_cpu_restart(cpu)) {
+               ppc64_runlatch_off();
                power7_nap();
+               ppc64_runlatch_on();
                if (!generic_check_cpu_restart(cpu)) {
                        DBG("CPU%d Unexpected exit while offline !\n", cpu);
                        /* We may be getting an IPI, so we re-enable
index 9b8e05078a63e73a2993cc89890793353a45f9ea..20d62975856fb7fa5795a566629bbb69aa7a3139 100644 (file)
@@ -88,13 +88,14 @@ void set_default_offline_state(int cpu)
 
 static void rtas_stop_self(void)
 {
-       struct rtas_args args = {
-               .token = cpu_to_be32(rtas_stop_self_token),
+       static struct rtas_args args = {
                .nargs = 0,
                .nret = 1,
                .rets = &args.args[0],
        };
 
+       args.token = cpu_to_be32(rtas_stop_self_token);
+
        local_irq_disable();
 
        BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
index 573b488fc48b8a9b79674806d27599993c96b362..7f75c94af822c40322d8a4a751e983ad0e2bcdae 100644 (file)
@@ -100,10 +100,10 @@ static int pseries_remove_memblock(unsigned long base, unsigned int memblock_siz
 
        start_pfn = base >> PAGE_SHIFT;
 
-       if (!pfn_valid(start_pfn)) {
-               memblock_remove(base, memblock_size);
-               return 0;
-       }
+       lock_device_hotplug();
+
+       if (!pfn_valid(start_pfn))
+               goto out;
 
        block_sz = memory_block_size_bytes();
        sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
@@ -114,8 +114,10 @@ static int pseries_remove_memblock(unsigned long base, unsigned int memblock_siz
                base += MIN_MEMORY_BLOCK_SIZE;
        }
 
+out:
        /* Update memory regions for memory remove */
        memblock_remove(base, memblock_size);
+       unlock_device_hotplug();
        return 0;
 }
 
index 228cf91b91c14bc4e865e89371595363aa7858cc..ffd1169ebaab8387c6a15831f5c660b1f90fa243 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/of.h>
 #include <linux/of_platform.h>
 #include <linux/phy.h>
-#include <linux/phy_fixed.h>
 #include <linux/spi/spi.h>
 #include <linux/fsl_devices.h>
 #include <linux/fs_enet_pd.h>
@@ -178,37 +177,6 @@ u32 get_baudrate(void)
 EXPORT_SYMBOL(get_baudrate);
 #endif /* CONFIG_CPM2 */
 
-#ifdef CONFIG_FIXED_PHY
-static int __init of_add_fixed_phys(void)
-{
-       int ret;
-       struct device_node *np;
-       u32 *fixed_link;
-       struct fixed_phy_status status = {};
-
-       for_each_node_by_name(np, "ethernet") {
-               fixed_link  = (u32 *)of_get_property(np, "fixed-link", NULL);
-               if (!fixed_link)
-                       continue;
-
-               status.link = 1;
-               status.duplex = fixed_link[1];
-               status.speed = fixed_link[2];
-               status.pause = fixed_link[3];
-               status.asym_pause = fixed_link[4];
-
-               ret = fixed_phy_add(PHY_POLL, fixed_link[0], &status);
-               if (ret) {
-                       of_node_put(np);
-                       return ret;
-               }
-       }
-
-       return 0;
-}
-arch_initcall(of_add_fixed_phys);
-#endif /* CONFIG_FIXED_PHY */
-
 #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
 static __be32 __iomem *rstcr;
 
index 64603a10b86313aace9d570494fcd86853357253..4914fd3f41eca710778ceaee742f29fee5137faf 100644 (file)
@@ -1058,7 +1058,7 @@ static int __init apm821xx_pciex_core_init(struct device_node *np)
        return 1;
 }
 
-static int apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
+static int __init apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
 {
        u32 val;
 
index cf3c0089bef253d7817a604f9a7f195a4ffd9bc0..23223cd63e54811d9ab85823df1e03303c88036e 100644 (file)
@@ -820,6 +820,9 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
                else
                        memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
                spin_unlock(&ctrblk_lock);
+       } else {
+               if (!nbytes)
+                       memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
        }
        /*
         * final block may be < AES_BLOCK_SIZE, copy only nbytes
index 0a5aac8a9412b64c4b2c43f9999e0f1e2f3815cb..7acb77f7ef1ada0183280c07e75f2c34579bac72 100644 (file)
@@ -429,6 +429,9 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
                else
                        memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
                spin_unlock(&ctrblk_lock);
+       } else {
+               if (!nbytes)
+                       memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
        }
        /* final block may be < DES_BLOCK_SIZE, copy only nbytes */
        if (nbytes) {
index 6e670f88d125d79fd2108575a6f6509e74985678..ebc2913f9ee0bd29f1a8594da675a279d38c8640 100644 (file)
@@ -22,8 +22,8 @@ struct ccwgroup_device {
 /* public: */
        unsigned int count;
        struct device   dev;
-       struct ccw_device *cdev[0];
        struct work_struct ungroup_work;
+       struct ccw_device *cdev[0];
 };
 
 /**
index c544b6f05d95e8e6fee2ef5b5f6f24d7acaf19d8..a25f09fbaf3634f4d71ebdf8fa84ca116a07fe4f 100644 (file)
@@ -59,12 +59,23 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb,
        tlb->batch = NULL;
 }
 
-static inline void tlb_flush_mmu(struct mmu_gather *tlb)
+static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
 {
        __tlb_flush_mm_lazy(tlb->mm);
+}
+
+static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
+{
        tlb_table_flush(tlb);
 }
 
+
+static inline void tlb_flush_mmu(struct mmu_gather *tlb)
+{
+       tlb_flush_mmu_tlbonly(tlb);
+       tlb_flush_mmu_free(tlb);
+}
+
 static inline void tlb_finish_mmu(struct mmu_gather *tlb,
                                  unsigned long start, unsigned long end)
 {
index 9c36dc398f9070afb4d6151d214623d1cbcf9f4a..e9f8fa9337fe1fae0d5d0ba9bf03a8426961a3c0 100644 (file)
@@ -276,7 +276,6 @@ static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter)
        case BPF_S_LD_W_IND:
        case BPF_S_LD_H_IND:
        case BPF_S_LD_B_IND:
-       case BPF_S_LDX_B_MSH:
        case BPF_S_LD_IMM:
        case BPF_S_LD_MEM:
        case BPF_S_MISC_TXA:
@@ -812,7 +811,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
                return NULL;
        memset(header, 0, sz);
        header->pages = sz / PAGE_SIZE;
-       hole = sz - (bpfsize + sizeof(*header));
+       hole = min(sz - (bpfsize + sizeof(*header)), PAGE_SIZE - sizeof(*header));
        /* Insert random number of illegal instructions before BPF code
         * and make sure the first instruction starts at an even address.
         */
index 362192ed12fef1789d2c23d21c654f19632006a0..62f80d2a9df9f35c22b761b58cc7d8d747a689f4 100644 (file)
@@ -86,6 +86,14 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
        }
 }
 
+static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
+{
+}
+
+static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
+{
+}
+
 static inline void tlb_flush_mmu(struct mmu_gather *tlb)
 {
 }
index bdbda1453aa9f168339896d6880e3e2acd44182f..04471dc64847269e815a26752ef029cd9206c154 100644 (file)
@@ -238,4 +238,16 @@ static inline __sum16 ip_compute_csum(const void *buff, int len)
        return csum_fold(csum_partial(buff, len, 0));
 }
 
+#define HAVE_ARCH_CSUM_ADD
+static inline __wsum csum_add(__wsum csum, __wsum addend)
+{
+       __asm__ __volatile__(
+               "addcc   %0, %1, %0\n"
+               "addx    %0, %%g0, %0"
+               : "=r" (csum)
+               : "r" (addend), "0" (csum));
+
+       return csum;
+}
+
 #endif /* !(__SPARC_CHECKSUM_H) */
index 019b9615e43c16b81230508109b2484d6313ee62..2ff81ae8f3afa17b0fd25a5a6786131a9e30bc1b 100644 (file)
@@ -164,4 +164,16 @@ static inline __sum16 ip_compute_csum(const void *buff, int len)
        return csum_fold(csum_partial(buff, len, 0));
 }
 
+#define HAVE_ARCH_CSUM_ADD
+static inline __wsum csum_add(__wsum csum, __wsum addend)
+{
+       __asm__ __volatile__(
+               "addcc   %0, %1, %0\n"
+               "addx    %0, %%g0, %0"
+               : "=r" (csum)
+               : "r" (addend), "0" (csum));
+
+       return csum;
+}
+
 #endif /* !(__SPARC64_CHECKSUM_H) */
index 0f9e94537eee78d9d41fffe82e32fc0098811ed2..1a49ffdf9da91056cb24357b6fdefea772658201 100644 (file)
@@ -24,7 +24,8 @@
 
 /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
  * The page copy blockops can use 0x6000000 to 0x8000000.
- * The TSB is mapped in the 0x8000000 to 0xa000000 range.
+ * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
+ * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range.
  * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
  * The vmalloc area spans 0x100000000 to 0x200000000.
  * Since modules need to be in the lowest 32-bits of the address space,
@@ -33,7 +34,8 @@
  * 0x400000000.
  */
 #define        TLBTEMP_BASE            _AC(0x0000000006000000,UL)
-#define        TSBMAP_BASE             _AC(0x0000000008000000,UL)
+#define        TSBMAP_8K_BASE          _AC(0x0000000008000000,UL)
+#define        TSBMAP_4M_BASE          _AC(0x0000000008400000,UL)
 #define MODULES_VADDR          _AC(0x0000000010000000,UL)
 #define MODULES_LEN            _AC(0x00000000e0000000,UL)
 #define MODULES_END            _AC(0x00000000f0000000,UL)
 
 #include <linux/sched.h>
 
+extern unsigned long sparc64_valid_addr_bitmap[];
+
+/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
+static inline bool __kern_addr_valid(unsigned long paddr)
+{
+       if ((paddr >> MAX_PHYS_ADDRESS_BITS) != 0UL)
+               return false;
+       return test_bit(paddr >> ILOG2_4MB, sparc64_valid_addr_bitmap);
+}
+
+static inline bool kern_addr_valid(unsigned long addr)
+{
+       unsigned long paddr = __pa(addr);
+
+       return __kern_addr_valid(paddr);
+}
+
 /* Entries per page directory level. */
 #define PTRS_PER_PTE   (1UL << (PAGE_SHIFT-3))
 #define PTRS_PER_PMD   (1UL << PMD_BITS)
 /* Kernel has a separate 44bit address space. */
 #define FIRST_USER_ADDRESS     0
 
-#define pte_ERROR(e)   __builtin_trap()
-#define pmd_ERROR(e)   __builtin_trap()
-#define pgd_ERROR(e)   __builtin_trap()
+#define pmd_ERROR(e)                                                   \
+       pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n",             \
+              __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0))
+#define pgd_ERROR(e)                                                   \
+       pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n",             \
+              __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0))
 
 #endif /* !(__ASSEMBLY__) */
 
@@ -258,8 +280,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
 {
        unsigned long mask, tmp;
 
-       /* SUN4U: 0x600307ffffffecb8 (negated == 0x9ffcf80000001347)
-        * SUN4V: 0x30ffffffffffee17 (negated == 0xcf000000000011e8)
+       /* SUN4U: 0x630107ffffffec38 (negated == 0x9cfef800000013c7)
+        * SUN4V: 0x33ffffffffffee07 (negated == 0xcc000000000011f8)
         *
         * Even if we use negation tricks the result is still a 6
         * instruction sequence, so don't try to play fancy and just
@@ -289,10 +311,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
        "       .previous\n"
        : "=r" (mask), "=r" (tmp)
        : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U |
-              _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U |
+              _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U |
               _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U),
          "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
-              _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V |
+              _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V |
               _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V));
 
        return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
@@ -633,7 +655,7 @@ static inline unsigned long pmd_large(pmd_t pmd)
 {
        pte_t pte = __pte(pmd_val(pmd));
 
-       return (pte_val(pte) & _PAGE_PMD_HUGE) && pte_present(pte);
+       return pte_val(pte) & _PAGE_PMD_HUGE;
 }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -719,20 +741,6 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd)
        return __pmd(pte_val(pte));
 }
 
-static inline pmd_t pmd_mknotpresent(pmd_t pmd)
-{
-       unsigned long mask;
-
-       if (tlb_type == hypervisor)
-               mask = _PAGE_PRESENT_4V;
-       else
-               mask = _PAGE_PRESENT_4U;
-
-       pmd_val(pmd) &= ~mask;
-
-       return pmd;
-}
-
 static inline pmd_t pmd_mksplitting(pmd_t pmd)
 {
        pte_t pte = __pte(pmd_val(pmd));
@@ -757,6 +765,20 @@ static inline int pmd_present(pmd_t pmd)
 
 #define pmd_none(pmd)                  (!pmd_val(pmd))
 
+/* pmd_bad() is only called on non-trans-huge PMDs.  Our encoding is
+ * very simple, it's just the physical address.  PTE tables are of
+ * size PAGE_SIZE so make sure the sub-PAGE_SIZE bits are clear and
+ * the top bits outside of the range of any physical address size we
+ * support are clear as well.  We also validate the physical itself.
+ */
+#define pmd_bad(pmd)                   ((pmd_val(pmd) & ~PAGE_MASK) || \
+                                        !__kern_addr_valid(pmd_val(pmd)))
+
+#define pud_none(pud)                  (!pud_val(pud))
+
+#define pud_bad(pud)                   ((pud_val(pud) & ~PAGE_MASK) || \
+                                        !__kern_addr_valid(pud_val(pud)))
+
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
                       pmd_t *pmdp, pmd_t pmd);
@@ -790,10 +812,7 @@ static inline unsigned long __pmd_page(pmd_t pmd)
 #define pud_page_vaddr(pud)            \
        ((unsigned long) __va(pud_val(pud)))
 #define pud_page(pud)                  virt_to_page((void *)pud_page_vaddr(pud))
-#define pmd_bad(pmd)                   (0)
 #define pmd_clear(pmdp)                        (pmd_val(*(pmdp)) = 0UL)
-#define pud_none(pud)                  (!pud_val(pud))
-#define pud_bad(pud)                   (0)
 #define pud_present(pud)               (pud_val(pud) != 0U)
 #define pud_clear(pudp)                        (pud_val(*(pudp)) = 0UL)
 
@@ -893,6 +912,10 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
 extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
                                 pmd_t *pmd);
 
+#define __HAVE_ARCH_PMDP_INVALIDATE
+extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+                           pmd_t *pmdp);
+
 #define __HAVE_ARCH_PGTABLE_DEPOSIT
 extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
                                       pgtable_t pgtable);
@@ -919,18 +942,6 @@ extern unsigned long pte_file(pte_t);
 extern pte_t pgoff_to_pte(unsigned long);
 #define PTE_FILE_MAX_BITS      (64UL - PAGE_SHIFT - 1UL)
 
-extern unsigned long sparc64_valid_addr_bitmap[];
-
-/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
-static inline bool kern_addr_valid(unsigned long addr)
-{
-       unsigned long paddr = __pa(addr);
-
-       if ((paddr >> 41UL) != 0UL)
-               return false;
-       return test_bit(paddr >> 22, sparc64_valid_addr_bitmap);
-}
-
 extern int page_in_phys_avail(unsigned long paddr);
 
 /*
index 2230f80d9fe326dc48576e83a721fcf106efeaf0..90916f955cac80153c59dc74a768c3fdef3fae56 100644 (file)
@@ -171,7 +171,8 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
        andcc           REG1, REG2, %g0;                \
        be,pt           %xcc, 700f;                     \
         sethi          %hi(4 * 1024 * 1024), REG2;     \
-       andn            REG1, REG2, REG1;               \
+       brgez,pn        REG1, FAIL_LABEL;               \
+        andn           REG1, REG2, REG1;               \
        and             VADDR, REG2, REG2;              \
        brlz,pt         REG1, PTE_LABEL;                \
         or             REG1, REG2, REG1;               \
index 26b706a1867dc6b9976b52e1e61ef8e54ce0df91..452f04fe8da698bb8b4620abd40ac6d4fbcd8393 100644 (file)
@@ -282,8 +282,8 @@ sun4v_chip_type:
        stx     %l2, [%l4 + 0x0]
        ldx     [%sp + 2047 + 128 + 0x50], %l3  ! physaddr low
        /* 4MB align */
-       srlx    %l3, 22, %l3
-       sllx    %l3, 22, %l3
+       srlx    %l3, ILOG2_4MB, %l3
+       sllx    %l3, ILOG2_4MB, %l3
        stx     %l3, [%l4 + 0x8]
 
        /* Leave service as-is, "call-method" */
index 542e96ac4d39948c165bd63eb9a43e25c0ba72e4..605d49204580585356a7fda6dede8657641fb7e1 100644 (file)
@@ -277,7 +277,7 @@ kvmap_dtlb_load:
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
 kvmap_vmemmap:
        sub             %g4, %g5, %g5
-       srlx            %g5, 22, %g5
+       srlx            %g5, ILOG2_4MB, %g5
        sethi           %hi(vmemmap_table), %g1
        sllx            %g5, 3, %g5
        or              %g1, %lo(vmemmap_table), %g1
index 6479256fd5a4b650a2f5c3ca3cf5c501033dc9fb..3370945569162dd43ef1252f47c83cf6ad7d87d6 100644 (file)
@@ -68,27 +68,16 @@ EXPORT_SYMBOL(touch_nmi_watchdog);
 
 static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
 {
+       int this_cpu = smp_processor_id();
+
        if (notify_die(DIE_NMIWATCHDOG, str, regs, 0,
                       pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
                return;
 
-       console_verbose();
-       bust_spinlocks(1);
-
-       printk(KERN_EMERG "%s", str);
-       printk(" on CPU%d, ip %08lx, registers:\n",
-              smp_processor_id(), regs->tpc);
-       show_regs(regs);
-       dump_stack();
-
-       bust_spinlocks(0);
-
        if (do_panic || panic_on_oops)
-               panic("Non maskable interrupt");
-
-       nmi_exit();
-       local_irq_enable();
-       do_exit(SIGBUS);
+               panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
+       else
+               WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
 }
 
 notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
index 9781048161ab8865a3ce203074d4067312989b95..745a3633ce148208554a71a38c829d6a8c78e025 100644 (file)
@@ -149,7 +149,7 @@ void cpu_panic(void)
 #define NUM_ROUNDS     64      /* magic value */
 #define NUM_ITERS      5       /* likewise */
 
-static DEFINE_SPINLOCK(itc_sync_lock);
+static DEFINE_RAW_SPINLOCK(itc_sync_lock);
 static unsigned long go[SLAVE + 1];
 
 #define DEBUG_TICK_SYNC        0
@@ -257,7 +257,7 @@ static void smp_synchronize_one_tick(int cpu)
        go[MASTER] = 0;
        membar_safe("#StoreLoad");
 
-       spin_lock_irqsave(&itc_sync_lock, flags);
+       raw_spin_lock_irqsave(&itc_sync_lock, flags);
        {
                for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
                        while (!go[MASTER])
@@ -268,7 +268,7 @@ static void smp_synchronize_one_tick(int cpu)
                        membar_safe("#StoreLoad");
                }
        }
-       spin_unlock_irqrestore(&itc_sync_lock, flags);
+       raw_spin_unlock_irqrestore(&itc_sync_lock, flags);
 }
 
 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
index f7c72b6efc27556cd2e2de7a74539b1ba21831ce..d066eb18650c1598f898f7a4314bdd7ed5b606a7 100644 (file)
@@ -44,7 +44,7 @@ SIGN1(sys32_timer_settime, compat_sys_timer_settime, %o1)
 SIGN1(sys32_io_submit, compat_sys_io_submit, %o1)
 SIGN1(sys32_mq_open, compat_sys_mq_open, %o1)
 SIGN1(sys32_select, compat_sys_select, %o0)
-SIGN3(sys32_futex, compat_sys_futex, %o1, %o2, %o5)
+SIGN1(sys32_futex, compat_sys_futex, %o1)
 SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0)
 SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0)
 SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)
index a364000ca1aa8a495f7e8b9bf59882350f41c6de..7f41d40b7e6e8ccf89b5ce12a9422bbf4e84ac2e 100644 (file)
@@ -151,7 +151,7 @@ static ssize_t store_mmustat_enable(struct device *s,
                        size_t count)
 {
        unsigned long val, err;
-       int ret = sscanf(buf, "%ld", &val);
+       int ret = sscanf(buf, "%lu", &val);
 
        if (ret != 1)
                return -EINVAL;
index 3c1a7cb31579fd0fe55e68bced2dc11757734c35..35ab8b60d25609220c1b1d10c1495642ea3700b2 100644 (file)
@@ -166,17 +166,23 @@ static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
 unsigned long compute_effective_address(struct pt_regs *regs,
                                        unsigned int insn, unsigned int rd)
 {
+       int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
        unsigned int rs1 = (insn >> 14) & 0x1f;
        unsigned int rs2 = insn & 0x1f;
-       int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
+       unsigned long addr;
 
        if (insn & 0x2000) {
                maybe_flush_windows(rs1, 0, rd, from_kernel);
-               return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
+               addr = (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
        } else {
                maybe_flush_windows(rs1, rs2, rd, from_kernel);
-               return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
+               addr = (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
        }
+
+       if (!from_kernel && test_thread_flag(TIF_32BIT))
+               addr &= 0xffffffff;
+
+       return addr;
 }
 
 /* This is just to make gcc think die_if_kernel does return... */
index 2c20ad63ddbf2bbf8a4da5e751e49650d8be7060..30eee6e8a81b2d45797aab304914b10571573b1a 100644 (file)
@@ -236,6 +236,7 @@ FUNC_NAME:  /* %o0=dst, %o1=src, %o2=len */
         */
        VISEntryHalf
 
+       membar          #Sync
        alignaddr       %o1, %g0, %g0
 
        add             %o1, (64 - 1), %o4
index 69bb818fdd798b8d50dff44a46a9941272972a92..4ced3fc66130c30b8870c21cea57611056046d50 100644 (file)
@@ -96,38 +96,51 @@ static unsigned int get_user_insn(unsigned long tpc)
        pte_t *ptep, pte;
        unsigned long pa;
        u32 insn = 0;
-       unsigned long pstate;
 
-       if (pgd_none(*pgdp))
-               goto outret;
+       if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
+               goto out;
        pudp = pud_offset(pgdp, tpc);
-       if (pud_none(*pudp))
-               goto outret;
-       pmdp = pmd_offset(pudp, tpc);
-       if (pmd_none(*pmdp))
-               goto outret;
+       if (pud_none(*pudp) || unlikely(pud_bad(*pudp)))
+               goto out;
 
        /* This disables preemption for us as well. */
-       __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
-       __asm__ __volatile__("wrpr %0, %1, %%pstate"
-                               : : "r" (pstate), "i" (PSTATE_IE));
-       ptep = pte_offset_map(pmdp, tpc);
-       pte = *ptep;
-       if (!pte_present(pte))
-               goto out;
+       local_irq_disable();
+
+       pmdp = pmd_offset(pudp, tpc);
+       if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp)))
+               goto out_irq_enable;
 
-       pa  = (pte_pfn(pte) << PAGE_SHIFT);
-       pa += (tpc & ~PAGE_MASK);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       if (pmd_trans_huge(*pmdp)) {
+               if (pmd_trans_splitting(*pmdp))
+                       goto out_irq_enable;
 
-       /* Use phys bypass so we don't pollute dtlb/dcache. */
-       __asm__ __volatile__("lduwa [%1] %2, %0"
-                            : "=r" (insn)
-                            : "r" (pa), "i" (ASI_PHYS_USE_EC));
+               pa  = pmd_pfn(*pmdp) << PAGE_SHIFT;
+               pa += tpc & ~HPAGE_MASK;
 
+               /* Use phys bypass so we don't pollute dtlb/dcache. */
+               __asm__ __volatile__("lduwa [%1] %2, %0"
+                                    : "=r" (insn)
+                                    : "r" (pa), "i" (ASI_PHYS_USE_EC));
+       } else
+#endif
+       {
+               ptep = pte_offset_map(pmdp, tpc);
+               pte = *ptep;
+               if (pte_present(pte)) {
+                       pa  = (pte_pfn(pte) << PAGE_SHIFT);
+                       pa += (tpc & ~PAGE_MASK);
+
+                       /* Use phys bypass so we don't pollute dtlb/dcache. */
+                       __asm__ __volatile__("lduwa [%1] %2, %0"
+                                            : "=r" (insn)
+                                            : "r" (pa), "i" (ASI_PHYS_USE_EC));
+               }
+               pte_unmap(ptep);
+       }
+out_irq_enable:
+       local_irq_enable();
 out:
-       pte_unmap(ptep);
-       __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
-outret:
        return insn;
 }
 
@@ -153,7 +166,8 @@ show_signal_msg(struct pt_regs *regs, int sig, int code,
 }
 
 static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
-                            unsigned int insn, int fault_code)
+                            unsigned long fault_addr, unsigned int insn,
+                            int fault_code)
 {
        unsigned long addr;
        siginfo_t info;
@@ -161,10 +175,18 @@ static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
        info.si_code = code;
        info.si_signo = sig;
        info.si_errno = 0;
-       if (fault_code & FAULT_CODE_ITLB)
+       if (fault_code & FAULT_CODE_ITLB) {
                addr = regs->tpc;
-       else
-               addr = compute_effective_address(regs, insn, 0);
+       } else {
+               /* If we were able to probe the faulting instruction, use it
+                * to compute a precise fault address.  Otherwise use the fault
+                * time provided address which may only have page granularity.
+                */
+               if (insn)
+                       addr = compute_effective_address(regs, insn, 0);
+               else
+                       addr = fault_addr;
+       }
        info.si_addr = (void __user *) addr;
        info.si_trapno = 0;
 
@@ -239,7 +261,7 @@ static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code,
                /* The si_code was set to make clear whether
                 * this was a SEGV_MAPERR or SEGV_ACCERR fault.
                 */
-               do_fault_siginfo(si_code, SIGSEGV, regs, insn, fault_code);
+               do_fault_siginfo(si_code, SIGSEGV, regs, address, insn, fault_code);
                return;
        }
 
@@ -259,18 +281,6 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
        show_regs(regs);
 }
 
-static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
-                                                        unsigned long addr)
-{
-       static int times;
-
-       if (times++ < 10)
-               printk(KERN_ERR "FAULT[%s:%d]: 32-bit process "
-                      "reports 64-bit fault address [%lx]\n",
-                      current->comm, current->pid, addr);
-       show_regs(regs);
-}
-
 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
 {
        enum ctx_state prev_state = exception_enter();
@@ -300,10 +310,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
                                goto intr_or_no_mm;
                        }
                }
-               if (unlikely((address >> 32) != 0)) {
-                       bogus_32bit_fault_address(regs, address);
+               if (unlikely((address >> 32) != 0))
                        goto intr_or_no_mm;
-               }
        }
 
        if (regs->tstate & TSTATE_PRIV) {
@@ -525,7 +533,7 @@ do_sigbus:
         * Send a sigbus, regardless of whether we were in kernel
         * or user mode.
         */
-       do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, insn, fault_code);
+       do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, address, insn, fault_code);
 
        /* Kernel mode? Handle exceptions or die */
        if (regs->tstate & TSTATE_PRIV)
index c4d3da68b800db27e54761add314e4a882f4e4c6..1aed0432c64bab8344ad524ed28b835381588035 100644 (file)
@@ -73,7 +73,7 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
        struct page *head, *page, *tail;
        int refs;
 
-       if (!pmd_large(pmd))
+       if (!(pmd_val(pmd) & _PAGE_VALID))
                return 0;
 
        if (write && !pmd_write(pmd))
index eafbc65c9c47f63772162d384ef55fd549d1e149..ed3c969a5f4c897e802b02dcfce583acabcf3a14 100644 (file)
@@ -588,7 +588,7 @@ static void __init remap_kernel(void)
        int i, tlb_ent = sparc64_highest_locked_tlbent();
 
        tte_vaddr = (unsigned long) KERNBASE;
-       phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
+       phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
        tte_data = kern_large_tte(phys_page);
 
        kern_locked_tte_data = tte_data;
@@ -1881,7 +1881,7 @@ void __init paging_init(void)
 
        BUILD_BUG_ON(NR_CPUS > 4096);
 
-       kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
+       kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
        kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
 
        /* Invalidate both kernel TSBs.  */
@@ -1937,7 +1937,7 @@ void __init paging_init(void)
        shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
 
        real_end = (unsigned long)_end;
-       num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22);
+       num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB);
        printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
               num_kernel_image_mappings);
 
@@ -2094,7 +2094,7 @@ static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap)
 
                                if (new_start <= old_start &&
                                    new_end >= (old_start + PAGE_SIZE)) {
-                                       set_bit(old_start >> 22, bitmap);
+                                       set_bit(old_start >> ILOG2_4MB, bitmap);
                                        goto do_next_page;
                                }
                        }
@@ -2143,7 +2143,7 @@ void __init mem_init(void)
        addr = PAGE_OFFSET + kern_base;
        last = PAGE_ALIGN(kern_size) + addr;
        while (addr < last) {
-               set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
+               set_bit(__pa(addr) >> ILOG2_4MB, sparc64_valid_addr_bitmap);
                addr += PAGE_SIZE;
        }
 
@@ -2267,7 +2267,7 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
                void *block;
 
                if (!(*vmem_pp & _PAGE_VALID)) {
-                       block = vmemmap_alloc_block(1UL << 22, node);
+                       block = vmemmap_alloc_block(1UL << ILOG2_4MB, node);
                        if (!block)
                                return -ENOMEM;
 
index b12cb5e72812140688d771ed0788b6a2cf2cfc16..b89aba217e3b5105c3fae5737cca2f279db4dc95 100644 (file)
@@ -134,7 +134,7 @@ no_cache_flush:
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
-                              pmd_t pmd, bool exec)
+                              pmd_t pmd)
 {
        unsigned long end;
        pte_t *pte;
@@ -142,8 +142,11 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
        pte = pte_offset_map(&pmd, vaddr);
        end = vaddr + HPAGE_SIZE;
        while (vaddr < end) {
-               if (pte_val(*pte) & _PAGE_VALID)
+               if (pte_val(*pte) & _PAGE_VALID) {
+                       bool exec = pte_exec(*pte);
+
                        tlb_batch_add_one(mm, vaddr, exec);
+               }
                pte++;
                vaddr += PAGE_SIZE;
        }
@@ -177,19 +180,30 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
        }
 
        if (!pmd_none(orig)) {
-               pte_t orig_pte = __pte(pmd_val(orig));
-               bool exec = pte_exec(orig_pte);
-
                addr &= HPAGE_MASK;
                if (pmd_trans_huge(orig)) {
+                       pte_t orig_pte = __pte(pmd_val(orig));
+                       bool exec = pte_exec(orig_pte);
+
                        tlb_batch_add_one(mm, addr, exec);
                        tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec);
                } else {
-                       tlb_batch_pmd_scan(mm, addr, orig, exec);
+                       tlb_batch_pmd_scan(mm, addr, orig);
                }
        }
 }
 
+void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+                    pmd_t *pmdp)
+{
+       pmd_t entry = *pmdp;
+
+       pmd_val(entry) &= ~_PAGE_VALID;
+
+       set_pmd_at(vma->vm_mm, address, pmdp, entry);
+       flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+}
+
 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
                                pgtable_t pgtable)
 {
index f5d506fdddad3dea459aa97308e9a8872f34ffe8..fe19b81acc091b4d994da81f5580fe4438664388 100644 (file)
@@ -133,7 +133,19 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
        mm->context.tsb_block[tsb_idx].tsb_nentries =
                tsb_bytes / sizeof(struct tsb);
 
-       base = TSBMAP_BASE;
+       switch (tsb_idx) {
+       case MM_TSB_BASE:
+               base = TSBMAP_8K_BASE;
+               break;
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+       case MM_TSB_HUGE:
+               base = TSBMAP_4M_BASE;
+               break;
+#endif
+       default:
+               BUG();
+       }
+
        tte = pgprot_val(PAGE_KERNEL_LOCKED);
        tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
        BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
index 29b0301c18aab26f2a613d397da9856a8d1eec3b..16eb63fac57de1395bc70507c3616f64af68ab4f 100644 (file)
@@ -58,14 +58,26 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
 extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
                               unsigned long end);
 
+static inline void
+tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
+{
+       flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
+}
+
+static inline void
+tlb_flush_mmu_free(struct mmu_gather *tlb)
+{
+       init_tlb_gather(tlb);
+}
+
 static inline void
 tlb_flush_mmu(struct mmu_gather *tlb)
 {
        if (!tlb->need_flush)
                return;
 
-       flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
-       init_tlb_gather(tlb);
+       tlb_flush_mmu_tlbonly(tlb);
+       tlb_flush_mmu_free(tlb);
 }
 
 /* tlb_finish_mmu
index 75298d3358e7f3d2c7ff4e2b2c5ed8b81a8dca6d..08eec0b691b061ade61d8f9df7258ea4bee49f42 100644 (file)
@@ -136,6 +136,7 @@ extern int os_ioctl_generic(int fd, unsigned int cmd, unsigned long arg);
 extern int os_get_ifname(int fd, char *namebuf);
 extern int os_set_slip(int fd);
 extern int os_mode_fd(int fd, int mode);
+extern int os_fsync_file(int fd);
 
 extern int os_seek_file(int fd, unsigned long long offset);
 extern int os_open_file(const char *file, struct openflags flags, int mode);
index f116db15d4028217767f2a7f51dc0bba38b151a9..30fdd5d0067b26c91fb8c831da5a1d4008c79e22 100644 (file)
@@ -103,6 +103,7 @@ void __init setup_physmem(unsigned long start, unsigned long reserve_end,
         */
        os_seek_file(physmem_fd, __pa(&__syscall_stub_start));
        os_write_file(physmem_fd, &__syscall_stub_start, PAGE_SIZE);
+       os_fsync_file(physmem_fd);
 
        bootmap_size = init_bootmem(pfn, pfn + delta);
        free_bootmem(__pa(reserve_end) + bootmap_size,
index 07a750197bb09d3b5ce59b631aed874d58603386..08d90fba952c3e1ba07e0fa488012aa26a6b473c 100644 (file)
@@ -237,6 +237,12 @@ void os_close_file(int fd)
 {
        close(fd);
 }
+int os_fsync_file(int fd)
+{
+       if (fsync(fd) < 0)
+           return -errno;
+       return 0;
+}
 
 int os_seek_file(int fd, unsigned long long offset)
 {
index e1704ff600ff9e677a98a4711d5c8b7b2ff8cf6a..df9191acd926cfb3b5a0c3582549105ac0729d5a 100644 (file)
@@ -151,6 +151,7 @@ int __init main(int argc, char **argv, char **envp)
 #endif
 
        do_uml_initcalls();
+       change_sig(SIGPIPE, 0);
        ret = linux_main(argc, argv);
 
        /*
index 3c4af77e51a2f9047ad603fe1032334111d08e9f..897e9ad0c108ed7ef70571628ed2fde039109508 100644 (file)
 #include <string.h>
 #include <sys/stat.h>
 #include <sys/mman.h>
-#include <sys/param.h>
+#include <sys/vfs.h>
+#include <linux/magic.h>
 #include <init.h>
 #include <os.h>
 
-/* Modified by which_tmpdir, which is called during early boot */
-static char *default_tmpdir = "/tmp";
-
-/*
- *  Modified when creating the physical memory file and when checking
- * the tmp filesystem for usability, both happening during early boot.
- */
+/* Set by make_tempfile() during early boot. */
 static char *tempdir = NULL;
 
-static void __init find_tempdir(void)
+/* Check if dir is on tmpfs. Return 0 if yes, -1 if no or error. */
+static int __init check_tmpfs(const char *dir)
 {
-       const char *dirs[] = { "TMP", "TEMP", "TMPDIR", NULL };
-       int i;
-       char *dir = NULL;
-
-       if (tempdir != NULL)
-               /* We've already been called */
-               return;
-       for (i = 0; dirs[i]; i++) {
-               dir = getenv(dirs[i]);
-               if ((dir != NULL) && (*dir != '\0'))
-                       break;
-       }
-       if ((dir == NULL) || (*dir == '\0'))
-               dir = default_tmpdir;
+       struct statfs st;
 
-       tempdir = malloc(strlen(dir) + 2);
-       if (tempdir == NULL) {
-               fprintf(stderr, "Failed to malloc tempdir, "
-                       "errno = %d\n", errno);
-               return;
-       }
-       strcpy(tempdir, dir);
-       strcat(tempdir, "/");
-}
-
-/*
- * Remove bytes from the front of the buffer and refill it so that if there's a
- * partial string that we care about, it will be completed, and we can recognize
- * it.
- */
-static int pop(int fd, char *buf, size_t size, size_t npop)
-{
-       ssize_t n;
-       size_t len = strlen(&buf[npop]);
-
-       memmove(buf, &buf[npop], len + 1);
-       n = read(fd, &buf[len], size - len - 1);
-       if (n < 0)
-               return -errno;
-
-       buf[len + n] = '\0';
-       return 1;
-}
-
-/*
- * This will return 1, with the first character in buf being the
- * character following the next instance of c in the file.  This will
- * read the file as needed.  If there's an error, -errno is returned;
- * if the end of the file is reached, 0 is returned.
- */
-static int next(int fd, char *buf, size_t size, char c)
-{
-       ssize_t n;
-       char *ptr;
-
-       while ((ptr = strchr(buf, c)) == NULL) {
-               n = read(fd, buf, size - 1);
-               if (n == 0)
-                       return 0;
-               else if (n < 0)
-                       return -errno;
-
-               buf[n] = '\0';
+       printf("Checking if %s is on tmpfs...", dir);
+       if (statfs(dir, &st) < 0) {
+               printf("%s\n", strerror(errno));
+       } else if (st.f_type != TMPFS_MAGIC) {
+               printf("no\n");
+       } else {
+               printf("OK\n");
+               return 0;
        }
-
-       return pop(fd, buf, size, ptr - buf + 1);
+       return -1;
 }
 
 /*
- * Decode an octal-escaped and space-terminated path of the form used by
- * /proc/mounts. May be used to decode a path in-place. "out" must be at least
- * as large as the input. The output is always null-terminated. "len" gets the
- * length of the output, excluding the trailing null. Returns 0 if a full path
- * was successfully decoded, otherwise an error.
+ * Choose the tempdir to use. We want something on tmpfs so that our memory is
+ * not subject to the host's vm.dirty_ratio. If a tempdir is specified in the
+ * environment, we use that even if it's not on tmpfs, but we warn the user.
+ * Otherwise, we try common tmpfs locations, and if no tmpfs directory is found
+ * then we fall back to /tmp.
  */
-static int decode_path(const char *in, char *out, size_t *len)
+static char * __init choose_tempdir(void)
 {
-       char *first = out;
-       int c;
+       static const char * const vars[] = {
+               "TMPDIR",
+               "TMP",
+               "TEMP",
+               NULL
+       };
+       static const char fallback_dir[] = "/tmp";
+       static const char * const tmpfs_dirs[] = {
+               "/dev/shm",
+               fallback_dir,
+               NULL
+       };
        int i;
-       int ret = -EINVAL;
-       while (1) {
-               switch (*in) {
-               case '\0':
-                       goto out;
-
-               case ' ':
-                       ret = 0;
-                       goto out;
-
-               case '\\':
-                       in++;
-                       c = 0;
-                       for (i = 0; i < 3; i++) {
-                               if (*in < '0' || *in > '7')
-                                       goto out;
-                               c = (c << 3) | (*in++ - '0');
-                       }
-                       *(unsigned char *)out++ = (unsigned char) c;
-                       break;
-
-               default:
-                       *out++ = *in++;
-                       break;
+       const char *dir;
+
+       printf("Checking environment variables for a tempdir...");
+       for (i = 0; vars[i]; i++) {
+               dir = getenv(vars[i]);
+               if ((dir != NULL) && (*dir != '\0')) {
+                       printf("%s\n", dir);
+                       if (check_tmpfs(dir) >= 0)
+                               goto done;
+                       else
+                               goto warn;
                }
        }
+       printf("none found\n");
 
-out:
-       *out = '\0';
-       *len = out - first;
-       return ret;
-}
-
-/*
- * Computes the length of s when encoded with three-digit octal escape sequences
- * for the characters in chars.
- */
-static size_t octal_encoded_length(const char *s, const char *chars)
-{
-       size_t len = strlen(s);
-       while ((s = strpbrk(s, chars)) != NULL) {
-               len += 3;
-               s++;
-       }
-
-       return len;
-}
-
-enum {
-       OUTCOME_NOTHING_MOUNTED,
-       OUTCOME_TMPFS_MOUNT,
-       OUTCOME_NON_TMPFS_MOUNT,
-};
-
-/* Read a line of /proc/mounts data looking for a tmpfs mount at "path". */
-static int read_mount(int fd, char *buf, size_t bufsize, const char *path,
-                     int *outcome)
-{
-       int found;
-       int match;
-       char *space;
-       size_t len;
-
-       enum {
-               MATCH_NONE,
-               MATCH_EXACT,
-               MATCH_PARENT,
-       };
-
-       found = next(fd, buf, bufsize, ' ');
-       if (found != 1)
-               return found;
-
-       /*
-        * If there's no following space in the buffer, then this path is
-        * truncated, so it can't be the one we're looking for.
-        */
-       space = strchr(buf, ' ');
-       if (space) {
-               match = MATCH_NONE;
-               if (!decode_path(buf, buf, &len)) {
-                       if (!strcmp(buf, path))
-                               match = MATCH_EXACT;
-                       else if (!strncmp(buf, path, len)
-                                && (path[len] == '/' || !strcmp(buf, "/")))
-                               match = MATCH_PARENT;
-               }
-
-               found = pop(fd, buf, bufsize, space - buf + 1);
-               if (found != 1)
-                       return found;
-
-               switch (match) {
-               case MATCH_EXACT:
-                       if (!strncmp(buf, "tmpfs", strlen("tmpfs")))
-                               *outcome = OUTCOME_TMPFS_MOUNT;
-                       else
-                               *outcome = OUTCOME_NON_TMPFS_MOUNT;
-                       break;
-
-               case MATCH_PARENT:
-                       /* This mount obscures any previous ones. */
-                       *outcome = OUTCOME_NOTHING_MOUNTED;
-                       break;
-               }
+       for (i = 0; tmpfs_dirs[i]; i++) {
+               dir = tmpfs_dirs[i];
+               if (check_tmpfs(dir) >= 0)
+                       goto done;
        }
 
-       return next(fd, buf, bufsize, '\n');
+       dir = fallback_dir;
+warn:
+       printf("Warning: tempdir %s is not on tmpfs\n", dir);
+done:
+       /* Make a copy since getenv results may not remain valid forever. */
+       return strdup(dir);
 }
 
-/* which_tmpdir is called only during early boot */
-static int checked_tmpdir = 0;
-
 /*
- * Look for a tmpfs mounted at /dev/shm.  I couldn't find a cleaner
- * way to do this than to parse /proc/mounts.  statfs will return the
- * same filesystem magic number and fs id for both /dev and /dev/shm
- * when they are both tmpfs, so you can't tell if they are different
- * filesystems.  Also, there seems to be no other way of finding the
- * mount point of a filesystem from within it.
- *
- * If a /dev/shm tmpfs entry is found, then we switch to using it.
- * Otherwise, we stay with the default /tmp.
+ * Create an unlinked tempfile in a suitable tempdir. template must be the
+ * basename part of the template with a leading '/'.
  */
-static void which_tmpdir(void)
+static int __init make_tempfile(const char *template)
 {
+       char *tempname;
        int fd;
-       int found;
-       int outcome;
-       char *path;
-       char *buf;
-       size_t bufsize;
 
-       if (checked_tmpdir)
-               return;
-
-       checked_tmpdir = 1;
-
-       printf("Checking for tmpfs mount on /dev/shm...");
-
-       path = realpath("/dev/shm", NULL);
-       if (!path) {
-               printf("failed to check real path, errno = %d\n", errno);
-               return;
-       }
-       printf("%s...", path);
-
-       /*
-        * The buffer needs to be able to fit the full octal-escaped path, a
-        * space, and a trailing null in order to successfully decode it.
-        */
-       bufsize = octal_encoded_length(path, " \t\n\\") + 2;
-
-       if (bufsize < 128)
-               bufsize = 128;
-
-       buf = malloc(bufsize);
-       if (!buf) {
-               printf("malloc failed, errno = %d\n", errno);
-               goto out;
-       }
-       buf[0] = '\0';
-
-       fd = open("/proc/mounts", O_RDONLY);
-       if (fd < 0) {
-               printf("failed to open /proc/mounts, errno = %d\n", errno);
-               goto out1;
-       }
-
-       outcome = OUTCOME_NOTHING_MOUNTED;
-       while (1) {
-               found = read_mount(fd, buf, bufsize, path, &outcome);
-               if (found != 1)
-                       break;
-       }
-
-       if (found < 0) {
-               printf("read returned errno %d\n", -found);
-       } else {
-               switch (outcome) {
-               case OUTCOME_TMPFS_MOUNT:
-                       printf("OK\n");
-                       default_tmpdir = "/dev/shm";
-                       break;
-
-               case OUTCOME_NON_TMPFS_MOUNT:
-                       printf("not tmpfs\n");
-                       break;
-
-               default:
-                       printf("nothing mounted on /dev/shm\n");
-                       break;
+       if (tempdir == NULL) {
+               tempdir = choose_tempdir();
+               if (tempdir == NULL) {
+                       fprintf(stderr, "Failed to choose tempdir: %s\n",
+                               strerror(errno));
+                       return -1;
                }
        }
 
-       close(fd);
-out1:
-       free(buf);
-out:
-       free(path);
-}
-
-static int __init make_tempfile(const char *template, char **out_tempname,
-                               int do_unlink)
-{
-       char *tempname;
-       int fd;
-
-       which_tmpdir();
-       tempname = malloc(MAXPATHLEN);
+       tempname = malloc(strlen(tempdir) + strlen(template) + 1);
        if (tempname == NULL)
                return -1;
 
-       find_tempdir();
-       if ((tempdir == NULL) || (strlen(tempdir) >= MAXPATHLEN))
-               goto out;
-
-       if (template[0] != '/')
-               strcpy(tempname, tempdir);
-       else
-               tempname[0] = '\0';
-       strncat(tempname, template, MAXPATHLEN-1-strlen(tempname));
+       strcpy(tempname, tempdir);
+       strcat(tempname, template);
        fd = mkstemp(tempname);
        if (fd < 0) {
                fprintf(stderr, "open - cannot create %s: %s\n", tempname,
                        strerror(errno));
                goto out;
        }
-       if (do_unlink && (unlink(tempname) < 0)) {
+       if (unlink(tempname) < 0) {
                perror("unlink");
                goto close;
        }
-       if (out_tempname) {
-               *out_tempname = tempname;
-       } else
-               free(tempname);
+       free(tempname);
        return fd;
 close:
        close(fd);
@@ -351,14 +131,14 @@ out:
        return -1;
 }
 
-#define TEMPNAME_TEMPLATE "vm_file-XXXXXX"
+#define TEMPNAME_TEMPLATE "/vm_file-XXXXXX"
 
 static int __init create_tmp_file(unsigned long long len)
 {
        int fd, err;
        char zero;
 
-       fd = make_tempfile(TEMPNAME_TEMPLATE, NULL, 1);
+       fd = make_tempfile(TEMPNAME_TEMPLATE);
        if (fd < 0)
                exit(1);
 
@@ -402,7 +182,6 @@ int __init create_mem_file(unsigned long long len)
        return fd;
 }
 
-
 void __init check_tmpexec(void)
 {
        void *addr;
@@ -410,14 +189,13 @@ void __init check_tmpexec(void)
 
        addr = mmap(NULL, UM_KERN_PAGE_SIZE,
                    PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE, fd, 0);
-       printf("Checking PROT_EXEC mmap in %s...",tempdir);
-       fflush(stdout);
+       printf("Checking PROT_EXEC mmap in %s...", tempdir);
        if (addr == MAP_FAILED) {
                err = errno;
-               perror("failed");
+               printf("%s\n", strerror(err));
                close(fd);
                if (err == EPERM)
-                       printf("%s must be not mounted noexec\n",tempdir);
+                       printf("%s must be not mounted noexec\n", tempdir);
                exit(1);
        }
        printf("OK\n");
index d1b7c377a234e900b0af97d7a784e5cfeedd64f0..33f71b01fd22e74d5e3d80b26ce4694689642064 100644 (file)
@@ -79,11 +79,14 @@ else
         UTS_MACHINE := x86_64
         CHECKFLAGS += -D__x86_64__ -m64
 
+        biarch := -m64
         KBUILD_AFLAGS += -m64
         KBUILD_CFLAGS += -m64
 
         # Don't autogenerate traditional x87, MMX or SSE instructions
-        KBUILD_CFLAGS += -mno-mmx -mno-sse -mno-80387 -mno-fp-ret-in-387
+        KBUILD_CFLAGS += -mno-mmx -mno-sse
+        KBUILD_CFLAGS += $(call cc-option,-mno-80387)
+        KBUILD_CFLAGS += $(call cc-option,-mno-fp-ret-in-387)
 
        # Use -mpreferred-stack-boundary=3 if supported.
        KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)
index abb9eba61b500192cd816dd9283fe8c8fb70b858..dbe8dd2fe247fb0632a79d5a20d1b4714729c47d 100644 (file)
@@ -71,7 +71,7 @@ $(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE
 
 SETUP_OBJS = $(addprefix $(obj)/,$(setup-y))
 
-sed-voffset := -e 's/^\([0-9a-fA-F]*\) . \(_text\|_end\)$$/\#define VO_\2 0x\1/p'
+sed-voffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(_text\|_end\)$$/\#define VO_\2 0x\1/p'
 
 quiet_cmd_voffset = VOFFSET $@
       cmd_voffset = $(NM) $< | sed -n $(sed-voffset) > $@
@@ -80,7 +80,7 @@ targets += voffset.h
 $(obj)/voffset.h: vmlinux FORCE
        $(call if_changed,voffset)
 
-sed-zoffset := -e 's/^\([0-9a-fA-F]*\) . \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p'
+sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p'
 
 quiet_cmd_zoffset = ZOFFSET $@
       cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@
index 17684615374b269f2d2eee0a826ab04d4eaf0f22..57ab74df7eeaa3eef4954b89f36d20b4c528afd7 100644 (file)
@@ -354,7 +354,7 @@ static void parse_elf(void *output)
        free(phdrs);
 }
 
-asmlinkage void *decompress_kernel(void *rmode, memptr heap,
+asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
                                  unsigned char *input_data,
                                  unsigned long input_len,
                                  unsigned char *output,
index e6fd8a026c7be574e28d49d321d552d53c72b7cb..cd00e17744914c5b2e1d7d16da887a2809ebb5b5 100644 (file)
@@ -184,8 +184,15 @@ static inline unsigned add32_with_carry(unsigned a, unsigned b)
        asm("addl %2,%0\n\t"
            "adcl $0,%0"
            : "=r" (a)
-           : "0" (a), "r" (b));
+           : "0" (a), "rm" (b));
        return a;
 }
 
+#define HAVE_ARCH_CSUM_ADD
+static inline __wsum csum_add(__wsum csum, __wsum addend)
+{
+       return (__force __wsum)add32_with_carry((__force unsigned)csum,
+                                               (__force unsigned)addend);
+}
+
 #endif /* _ASM_X86_CHECKSUM_64_H */
index b18df579c0e99b09ff33261e692135338826de3a..36f7125945e3e241cdf2ac825124fd8d7883e0ba 100644 (file)
@@ -63,6 +63,7 @@
 /* hpet memory map physical address */
 extern unsigned long hpet_address;
 extern unsigned long force_hpet_address;
+extern int boot_hpet_disable;
 extern u8 hpet_blockid;
 extern int hpet_force_user;
 extern u8 hpet_msi_disable;
index a8091216963b006145baa000e905edbdececae63..68c05398bba9b449a1324d54b584ce52d52aa8d1 100644 (file)
@@ -52,6 +52,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
                                         unsigned long addr, pte_t *ptep)
 {
+       ptep_clear_flush(vma, addr, ptep);
 }
 
 static inline int huge_pte_none(pte_t pte)
index c827ace3121bc0f7ff3dc9d4cc74024207adc68c..fcf2b3ae1bf0208d9e6b153dc81c6b0ed60f8352 100644 (file)
 #define MSR_IA32_MISC_ENABLE_MWAIT_BIT                 18
 #define MSR_IA32_MISC_ENABLE_MWAIT                     (1ULL << MSR_IA32_MISC_ENABLE_MWAIT_BIT)
 #define MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT           22
-#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID               (1ULL << MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT);
+#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID               (1ULL << MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT)
 #define MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT          23
 #define MSR_IA32_MISC_ENABLE_XTPR_DISABLE              (1ULL << MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT)
 #define MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT            34
index 3a2ae4c88948dcadb5b9afbfc2e10ef502202930..31368207837c2fbcd93f73d94ccf9c2d572d5cf5 100644 (file)
@@ -31,7 +31,7 @@ static char temp_stack[4096];
  *
  * Wrapper around acpi_enter_sleep_state() to be called by assmebly.
  */
-acpi_status asmlinkage x86_acpi_enter_sleep_state(u8 state)
+acpi_status asmlinkage __visible x86_acpi_enter_sleep_state(u8 state)
 {
        return acpi_enter_sleep_state(state);
 }
index 6ad4658de7056e02f104b505f35910587ec712f3..992060e09897dd2b068f8d8ebf6f02fa15fa2845 100644 (file)
@@ -2189,7 +2189,7 @@ void send_cleanup_vector(struct irq_cfg *cfg)
        cfg->move_in_progress = 0;
 }
 
-asmlinkage void smp_irq_move_cleanup_interrupt(void)
+asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
 {
        unsigned vector, me;
 
@@ -3425,6 +3425,11 @@ int get_nr_irqs_gsi(void)
        return nr_irqs_gsi;
 }
 
+unsigned int arch_dynirq_lower_bound(unsigned int from)
+{
+       return from < nr_irqs_gsi ? nr_irqs_gsi : from;
+}
+
 int __init arch_probe_nr_irqs(void)
 {
        int nr;
index d921b7ee659525e7d040ff5ea5c6a569239a64a7..36a1bb6d1ee0d431752a170714aa6a43576ffada 100644 (file)
@@ -429,14 +429,14 @@ static inline void __smp_thermal_interrupt(void)
        smp_thermal_vector();
 }
 
-asmlinkage void smp_thermal_interrupt(struct pt_regs *regs)
+asmlinkage __visible void smp_thermal_interrupt(struct pt_regs *regs)
 {
        entering_irq();
        __smp_thermal_interrupt();
        exiting_ack_irq();
 }
 
-asmlinkage void smp_trace_thermal_interrupt(struct pt_regs *regs)
+asmlinkage __visible void smp_trace_thermal_interrupt(struct pt_regs *regs)
 {
        entering_irq();
        trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
index fe6b1c86645b668758331b78d93ab8c549ab09c2..7245980186eea047e643af5010e1509bfc991632 100644 (file)
@@ -24,14 +24,14 @@ static inline void __smp_threshold_interrupt(void)
        mce_threshold_vector();
 }
 
-asmlinkage void smp_threshold_interrupt(void)
+asmlinkage __visible void smp_threshold_interrupt(void)
 {
        entering_irq();
        __smp_threshold_interrupt();
        exiting_ack_irq();
 }
 
-asmlinkage void smp_trace_threshold_interrupt(void)
+asmlinkage __visible void smp_trace_threshold_interrupt(void)
 {
        entering_irq();
        trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR);
index aa333d9668866f808955209f8cd71737d447eafc..adb02aa62af5e310ff51ca720d1163b247489ccb 100644 (file)
@@ -169,7 +169,6 @@ static struct event_constraint intel_slm_event_constraints[] __read_mostly =
 {
        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
-       FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF */
        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
        EVENT_CONSTRAINT_END
 };
index 7c87424d4140ee488eab90f8c4828a893a7744f6..619f7699487aa1ec60f5a2687bede8e19d0a2c7a 100644 (file)
@@ -543,7 +543,8 @@ static int rapl_cpu_prepare(int cpu)
        if (phys_id < 0)
                return -1;
 
-       if (!rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits))
+       /* protect rdmsrl() to handle virtualization */
+       if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits))
                return -1;
 
        pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
index 384df5105fbc9883626ec5482151babd43ce482a..136ac74dee823005cea04ea9600a0c62cdf5685c 100644 (file)
@@ -27,6 +27,7 @@
 static int __init x86_rdrand_setup(char *s)
 {
        setup_clear_cpu_cap(X86_FEATURE_RDRAND);
+       setup_clear_cpu_cap(X86_FEATURE_RDSEED);
        return 1;
 }
 __setup("nordrand", x86_rdrand_setup);
index 6e2537c3219060b31a9344c5df0dd90d8d3afbd2..6cda0baeac9d7810dadbfadc9b9cb2b228c68c5e 100644 (file)
@@ -17,6 +17,7 @@
 #include <asm/dma.h>
 #include <asm/io_apic.h>
 #include <asm/apic.h>
+#include <asm/hpet.h>
 #include <asm/iommu.h>
 #include <asm/gart.h>
 #include <asm/irq_remapping.h>
@@ -530,6 +531,15 @@ static void __init intel_graphics_stolen(int num, int slot, int func)
        }
 }
 
+static void __init force_disable_hpet(int num, int slot, int func)
+{
+#ifdef CONFIG_HPET_TIMER
+       boot_hpet_disable = 1;
+       pr_info("x86/hpet: Will disable the HPET for this platform because it's not reliable\n");
+#endif
+}
+
+
 #define QFLAG_APPLY_ONCE       0x1
 #define QFLAG_APPLIED          0x2
 #define QFLAG_DONE             (QFLAG_APPLY_ONCE|QFLAG_APPLIED)
@@ -567,6 +577,12 @@ static struct chipset early_qrk[] __initdata = {
          PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
        { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID,
          QFLAG_APPLY_ONCE, intel_graphics_stolen },
+       /*
+        * HPET on current version of Baytrail platform has accuracy
+        * problems, disable it for now:
+        */
+       { PCI_VENDOR_ID_INTEL, 0x0f00,
+               PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
        {}
 };
 
index c61a14a4a3109f92ee63bb4df2ba470540c1d266..d6c1b983699576523aacb5b8066d48311eb94a67 100644 (file)
@@ -29,7 +29,7 @@ static void __init i386_default_early_setup(void)
        reserve_ebda_region();
 }
 
-asmlinkage void __init i386_start_kernel(void)
+asmlinkage __visible void __init i386_start_kernel(void)
 {
        sanitize_boot_params(&boot_params);
 
index 85126ccbdf6b1e957c231a73378cc1ffe78092e8..068054f4bf20b75e124d4776ee9ffa889a6e1a0c 100644 (file)
@@ -137,7 +137,7 @@ static void __init copy_bootdata(char *real_mode_data)
        }
 }
 
-asmlinkage void __init x86_64_start_kernel(char * real_mode_data)
+asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
 {
        int i;
 
index 8d80ae0116039b6c71945737befc4f88dea097df..4177bfbc80b0d44400c563fa46affdbe79bc25d9 100644 (file)
@@ -88,7 +88,7 @@ static inline void hpet_clear_mapping(void)
 /*
  * HPET command line enable / disable
  */
-static int boot_hpet_disable;
+int boot_hpet_disable;
 int hpet_force_user;
 static int hpet_verbose;
 
index af1d14a9ebdae1ac2fddace4c15271babd1613a3..dcbbaa165bdeed61dd2b504a13ca05ced99737c7 100644 (file)
@@ -20,6 +20,8 @@
 #include <asm/mmu_context.h>
 #include <asm/syscalls.h>
 
+int sysctl_ldt16 = 0;
+
 #ifdef CONFIG_SMP
 static void flush_ldt(void *current_mm)
 {
@@ -234,7 +236,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
         * IRET leaking the high bits of the kernel stack address.
         */
 #ifdef CONFIG_X86_64
-       if (!ldt_info.seg_32bit) {
+       if (!ldt_info.seg_32bit && !sysctl_ldt16) {
                error = -EINVAL;
                goto out_unlock;
        }
index 9c0280f93d05dbf9b644abd040b472b9c69d4f55..898d077617a99ab7c6ef055b06f409c8222a4249 100644 (file)
@@ -52,7 +52,7 @@
 
 asmlinkage extern void ret_from_fork(void);
 
-asmlinkage DEFINE_PER_CPU(unsigned long, old_rsp);
+__visible DEFINE_PER_CPU(unsigned long, old_rsp);
 
 /* Prints also some state that isn't saved in the pt_regs */
 void __show_regs(struct pt_regs *regs, int all)
index 3399d3a997303322a9b5dc425080dd3553f98594..52b1157c53eb7f275b22d4a74def7daced321a80 100644 (file)
@@ -191,6 +191,16 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
                },
        },
 
+       /* Certec */
+       {       /* Handle problems with rebooting on Certec BPC600 */
+               .callback = set_pci_reboot,
+               .ident = "Certec BPC600",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Certec"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "BPC600"),
+               },
+       },
+
        /* Dell */
        {       /* Handle problems with rebooting on Dell DXP061 */
                .callback = set_bios_reboot,
index 7c3a5a61f2e46384c22abdf761afb4611d1efae6..be8e1bde07aa47ff373f0245e0f4b7d6d2edcfd5 100644 (file)
@@ -168,7 +168,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
  * this function calls the 'stop' function on all other CPUs in the system.
  */
 
-asmlinkage void smp_reboot_interrupt(void)
+asmlinkage __visible void smp_reboot_interrupt(void)
 {
        ack_APIC_irq();
        irq_enter();
index 57409f6b8c623e38c1a60dfa1029bb9d194c99e6..f73b5d435bdca59ff7c12c157a36997773fc2e07 100644 (file)
@@ -357,7 +357,7 @@ exit:
  * for scheduling or signal handling. The actual stack switch is done in
  * entry.S
  */
-asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
+asmlinkage __visible __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
 {
        struct pt_regs *regs = eregs;
        /* Did already sync */
@@ -601,11 +601,11 @@ do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
 #endif
 }
 
-asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
+asmlinkage __visible void __attribute__((weak)) smp_thermal_interrupt(void)
 {
 }
 
-asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
+asmlinkage __visible void __attribute__((weak)) smp_threshold_interrupt(void)
 {
 }
 
index f6584a90aba346566d38b6df763a9b0669fd733f..b99b9ad8540c525f79468d6fce0682681478c008 100644 (file)
@@ -26,6 +26,9 @@
 
 #define TOPOLOGY_REGISTER_OFFSET 0x10
 
+/* Flag below is initialized once during vSMP PCI initialization. */
+static int irq_routing_comply = 1;
+
 #if defined CONFIG_PCI && defined CONFIG_PARAVIRT
 /*
  * Interrupt control on vSMPowered systems:
@@ -33,7 +36,7 @@
  * and vice versa.
  */
 
-asmlinkage unsigned long vsmp_save_fl(void)
+asmlinkage __visible unsigned long vsmp_save_fl(void)
 {
        unsigned long flags = native_save_fl();
 
@@ -53,7 +56,7 @@ __visible void vsmp_restore_fl(unsigned long flags)
 }
 PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
 
-asmlinkage void vsmp_irq_disable(void)
+asmlinkage __visible void vsmp_irq_disable(void)
 {
        unsigned long flags = native_save_fl();
 
@@ -61,7 +64,7 @@ asmlinkage void vsmp_irq_disable(void)
 }
 PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
 
-asmlinkage void vsmp_irq_enable(void)
+asmlinkage __visible void vsmp_irq_enable(void)
 {
        unsigned long flags = native_save_fl();
 
@@ -101,6 +104,10 @@ static void __init set_vsmp_pv_ops(void)
 #ifdef CONFIG_SMP
        if (cap & ctl & BIT(8)) {
                ctl &= ~BIT(8);
+
+               /* Interrupt routing set to ignore */
+               irq_routing_comply = 0;
+
 #ifdef CONFIG_PROC_FS
                /* Don't let users change irq affinity via procfs */
                no_irq_affinity = 1;
@@ -218,7 +225,9 @@ static void vsmp_apic_post_init(void)
 {
        /* need to update phys_pkg_id */
        apic->phys_pkg_id = apicid_phys_pkg_id;
-       apic->vector_allocation_domain = fill_vector_allocation_domain;
+
+       if (!irq_routing_comply)
+               apic->vector_allocation_domain = fill_vector_allocation_domain;
 }
 
 void __init vsmp_init(void)
index f9c6e56e14b5f31229b75721db9975dac5fe58c9..9531fbb123ba2223257f48dd724b59a45289389c 100644 (file)
@@ -43,7 +43,7 @@ void update_vsyscall(struct timekeeper *tk)
        vdata->monotonic_time_sec       = tk->xtime_sec
                                        + tk->wall_to_monotonic.tv_sec;
        vdata->monotonic_time_snsec     = tk->xtime_nsec
-                                       + (tk->wall_to_monotonic.tv_nsec
+                                       + ((u64)tk->wall_to_monotonic.tv_nsec
                                                << tk->shift);
        while (vdata->monotonic_time_snsec >=
                                        (((u64)NSEC_PER_SEC) << tk->shift)) {
index 1f68c5831924d15dd741032cde2fafc46aae50ab..33e8c028842fb4b0b59bc269a973b195a104cdf8 100644 (file)
@@ -503,7 +503,7 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
                                [number##_HIGH] = VMCS12_OFFSET(name)+4
 
 
-static const unsigned long shadow_read_only_fields[] = {
+static unsigned long shadow_read_only_fields[] = {
        /*
         * We do NOT shadow fields that are modified when L0
         * traps and emulates any vmx instruction (e.g. VMPTRLD,
@@ -526,10 +526,10 @@ static const unsigned long shadow_read_only_fields[] = {
        GUEST_LINEAR_ADDRESS,
        GUEST_PHYSICAL_ADDRESS
 };
-static const int max_shadow_read_only_fields =
+static int max_shadow_read_only_fields =
        ARRAY_SIZE(shadow_read_only_fields);
 
-static const unsigned long shadow_read_write_fields[] = {
+static unsigned long shadow_read_write_fields[] = {
        GUEST_RIP,
        GUEST_RSP,
        GUEST_CR0,
@@ -558,7 +558,7 @@ static const unsigned long shadow_read_write_fields[] = {
        HOST_FS_SELECTOR,
        HOST_GS_SELECTOR
 };
-static const int max_shadow_read_write_fields =
+static int max_shadow_read_write_fields =
        ARRAY_SIZE(shadow_read_write_fields);
 
 static const unsigned short vmcs_field_to_offset_table[] = {
@@ -3009,6 +3009,41 @@ static void free_kvm_area(void)
        }
 }
 
+static void init_vmcs_shadow_fields(void)
+{
+       int i, j;
+
+       /* No checks for read only fields yet */
+
+       for (i = j = 0; i < max_shadow_read_write_fields; i++) {
+               switch (shadow_read_write_fields[i]) {
+               case GUEST_BNDCFGS:
+                       if (!vmx_mpx_supported())
+                               continue;
+                       break;
+               default:
+                       break;
+               }
+
+               if (j < i)
+                       shadow_read_write_fields[j] =
+                               shadow_read_write_fields[i];
+               j++;
+       }
+       max_shadow_read_write_fields = j;
+
+       /* shadowed fields guest access without vmexit */
+       for (i = 0; i < max_shadow_read_write_fields; i++) {
+               clear_bit(shadow_read_write_fields[i],
+                         vmx_vmwrite_bitmap);
+               clear_bit(shadow_read_write_fields[i],
+                         vmx_vmread_bitmap);
+       }
+       for (i = 0; i < max_shadow_read_only_fields; i++)
+               clear_bit(shadow_read_only_fields[i],
+                         vmx_vmread_bitmap);
+}
+
 static __init int alloc_kvm_area(void)
 {
        int cpu;
@@ -3039,6 +3074,8 @@ static __init int hardware_setup(void)
                enable_vpid = 0;
        if (!cpu_has_vmx_shadow_vmcs())
                enable_shadow_vmcs = 0;
+       if (enable_shadow_vmcs)
+               init_vmcs_shadow_fields();
 
        if (!cpu_has_vmx_ept() ||
            !cpu_has_vmx_ept_4levels()) {
@@ -8803,14 +8840,6 @@ static int __init vmx_init(void)
 
        memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
        memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
-       /* shadowed read/write fields */
-       for (i = 0; i < max_shadow_read_write_fields; i++) {
-               clear_bit(shadow_read_write_fields[i], vmx_vmwrite_bitmap);
-               clear_bit(shadow_read_write_fields[i], vmx_vmread_bitmap);
-       }
-       /* shadowed read only fields */
-       for (i = 0; i < max_shadow_read_only_fields; i++)
-               clear_bit(shadow_read_only_fields[i], vmx_vmread_bitmap);
 
        /*
         * Allow direct access to the PC debug port (it is often used for I/O
index 8b8fc0b792baeddf76a1b0ee3dac3a6a9b5cd0de..b6c0bacca9bdfb86b77a115353e7b848b8313c5b 100644 (file)
@@ -280,7 +280,7 @@ int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 }
 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
 
-asmlinkage void kvm_spurious_fault(void)
+asmlinkage __visible void kvm_spurious_fault(void)
 {
        /* Fault while not rebooting.  We want the trace. */
        BUG();
index ad1fb5f53925e8634fac38da497128fed1904c55..aae94132bc24dd42b548d3dc18214b668195c06f 100644 (file)
@@ -233,13 +233,13 @@ static void lguest_end_context_switch(struct task_struct *next)
  * flags word contains all kind of stuff, but in practice Linux only cares
  * about the interrupt flag.  Our "save_flags()" just returns that.
  */
-asmlinkage unsigned long lguest_save_fl(void)
+asmlinkage __visible unsigned long lguest_save_fl(void)
 {
        return lguest_data.irq_enabled;
 }
 
 /* Interrupts go off... */
-asmlinkage void lguest_irq_disable(void)
+asmlinkage __visible void lguest_irq_disable(void)
 {
        lguest_data.irq_enabled = 0;
 }
index db9db446b71a66fe5bd59de47232e8fa69e8c96e..43623739c7cf315038f908d9623e601d8069c35b 100644 (file)
@@ -76,7 +76,7 @@ static inline int __flip_bit(u32 msr, u8 bit, bool set)
        if (m1.q == m.q)
                return 0;
 
-       err = msr_write(msr, &m);
+       err = msr_write(msr, &m1);
        if (err)
                return err;
 
index a5449089cd9fef6e58a03174c5fe5a34cd48983a..9e6545f269e548e7cff7f4ee51876c440a7b6706 100644 (file)
@@ -302,7 +302,7 @@ static struct {
              0x242  in div_Xsig.S
  */
 
-asmlinkage void FPU_exception(int n)
+asmlinkage __visible void FPU_exception(int n)
 {
        int i, int_type;
 
@@ -492,7 +492,7 @@ int real_2op_NaN(FPU_REG const *b, u_char tagb,
 
 /* Invalid arith operation on Valid registers */
 /* Returns < 0 if the exception is unmasked */
-asmlinkage int arith_invalid(int deststnr)
+asmlinkage __visible int arith_invalid(int deststnr)
 {
 
        EXCEPTION(EX_Invalid);
@@ -507,7 +507,7 @@ asmlinkage int arith_invalid(int deststnr)
 }
 
 /* Divide a finite number by zero */
-asmlinkage int FPU_divide_by_zero(int deststnr, u_char sign)
+asmlinkage __visible int FPU_divide_by_zero(int deststnr, u_char sign)
 {
        FPU_REG *dest = &st(deststnr);
        int tag = TAG_Valid;
@@ -539,7 +539,7 @@ int set_precision_flag(int flags)
 }
 
 /* This may be called often, so keep it lean */
-asmlinkage void set_precision_flag_up(void)
+asmlinkage __visible void set_precision_flag_up(void)
 {
        if (control_word & CW_Precision)
                partial_status |= (SW_Precision | SW_C1);       /* The masked response */
@@ -548,7 +548,7 @@ asmlinkage void set_precision_flag_up(void)
 }
 
 /* This may be called often, so keep it lean */
-asmlinkage void set_precision_flag_down(void)
+asmlinkage __visible void set_precision_flag_down(void)
 {
        if (control_word & CW_Precision) {      /* The masked response */
                partial_status &= ~SW_C1;
@@ -557,7 +557,7 @@ asmlinkage void set_precision_flag_down(void)
                EXCEPTION(EX_Precision);
 }
 
-asmlinkage int denormal_operand(void)
+asmlinkage __visible int denormal_operand(void)
 {
        if (control_word & CW_Denormal) {       /* The masked response */
                partial_status |= SW_Denorm_Op;
@@ -568,7 +568,7 @@ asmlinkage int denormal_operand(void)
        }
 }
 
-asmlinkage int arith_overflow(FPU_REG *dest)
+asmlinkage __visible int arith_overflow(FPU_REG *dest)
 {
        int tag = TAG_Valid;
 
@@ -596,7 +596,7 @@ asmlinkage int arith_overflow(FPU_REG *dest)
 
 }
 
-asmlinkage int arith_underflow(FPU_REG *dest)
+asmlinkage __visible int arith_underflow(FPU_REG *dest)
 {
        int tag = TAG_Valid;
 
index 01495755701bd3d068db95df291ef71096d9cf33..6440221ced0d4925d3fee4a0c11b424a6b696f2d 100644 (file)
 
 /*
  * Calling convention :
- * rdi : skb pointer
+ * rbx : skb pointer (callee saved)
  * esi : offset of byte(s) to fetch in skb (can be scratched)
- * r : copy of skb->data
+ * r10 : copy of skb->data
  * r9d : hlen = skb->len - skb->data_len
  */
-#define SKBDATA        %r8
+#define SKBDATA        %r10
 #define SKF_MAX_NEG_OFF    $(-0x200000) /* SKF_LL_OFF from filter.h */
+#define MAX_BPF_STACK (512 /* from filter.h */ + \
+       32 /* space for rbx,r13,r14,r15 */ + \
+       8 /* space for skb_copy_bits */)
 
 sk_load_word:
        .globl  sk_load_word
@@ -68,53 +71,31 @@ sk_load_byte_positive_offset:
        movzbl  (SKBDATA,%rsi),%eax
        ret
 
-/**
- * sk_load_byte_msh - BPF_S_LDX_B_MSH helper
- *
- * Implements BPF_S_LDX_B_MSH : ldxb  4*([offset]&0xf)
- * Must preserve A accumulator (%eax)
- * Inputs : %esi is the offset value
- */
-sk_load_byte_msh:
-       .globl  sk_load_byte_msh
-       test    %esi,%esi
-       js      bpf_slow_path_byte_msh_neg
-
-sk_load_byte_msh_positive_offset:
-       .globl  sk_load_byte_msh_positive_offset
-       cmp     %esi,%r9d      /* if (offset >= hlen) goto bpf_slow_path_byte_msh */
-       jle     bpf_slow_path_byte_msh
-       movzbl  (SKBDATA,%rsi),%ebx
-       and     $15,%bl
-       shl     $2,%bl
-       ret
-
 /* rsi contains offset and can be scratched */
 #define bpf_slow_path_common(LEN)              \
-       push    %rdi;    /* save skb */         \
+       mov     %rbx, %rdi; /* arg1 == skb */   \
        push    %r9;                            \
        push    SKBDATA;                        \
 /* rsi already has offset */                   \
        mov     $LEN,%ecx;      /* len */       \
-       lea     -12(%rbp),%rdx;                 \
+       lea     - MAX_BPF_STACK + 32(%rbp),%rdx;                        \
        call    skb_copy_bits;                  \
        test    %eax,%eax;                      \
        pop     SKBDATA;                        \
-       pop     %r9;                            \
-       pop     %rdi
+       pop     %r9;
 
 
 bpf_slow_path_word:
        bpf_slow_path_common(4)
        js      bpf_error
-       mov     -12(%rbp),%eax
+       mov     - MAX_BPF_STACK + 32(%rbp),%eax
        bswap   %eax
        ret
 
 bpf_slow_path_half:
        bpf_slow_path_common(2)
        js      bpf_error
-       mov     -12(%rbp),%ax
+       mov     - MAX_BPF_STACK + 32(%rbp),%ax
        rol     $8,%ax
        movzwl  %ax,%eax
        ret
@@ -122,21 +103,11 @@ bpf_slow_path_half:
 bpf_slow_path_byte:
        bpf_slow_path_common(1)
        js      bpf_error
-       movzbl  -12(%rbp),%eax
-       ret
-
-bpf_slow_path_byte_msh:
-       xchg    %eax,%ebx /* dont lose A , X is about to be scratched */
-       bpf_slow_path_common(1)
-       js      bpf_error
-       movzbl  -12(%rbp),%eax
-       and     $15,%al
-       shl     $2,%al
-       xchg    %eax,%ebx
+       movzbl  - MAX_BPF_STACK + 32(%rbp),%eax
        ret
 
 #define sk_negative_common(SIZE)                               \
-       push    %rdi;   /* save skb */                          \
+       mov     %rbx, %rdi; /* arg1 == skb */                   \
        push    %r9;                                            \
        push    SKBDATA;                                        \
 /* rsi already has offset */                                   \
@@ -145,10 +116,8 @@ bpf_slow_path_byte_msh:
        test    %rax,%rax;                                      \
        pop     SKBDATA;                                        \
        pop     %r9;                                            \
-       pop     %rdi;                                           \
        jz      bpf_error
 
-
 bpf_slow_path_word_neg:
        cmp     SKF_MAX_NEG_OFF, %esi   /* test range */
        jl      bpf_error       /* offset lower -> error  */
@@ -179,22 +148,12 @@ sk_load_byte_negative_offset:
        movzbl  (%rax), %eax
        ret
 
-bpf_slow_path_byte_msh_neg:
-       cmp     SKF_MAX_NEG_OFF, %esi
-       jl      bpf_error
-sk_load_byte_msh_negative_offset:
-       .globl  sk_load_byte_msh_negative_offset
-       xchg    %eax,%ebx /* dont lose A , X is about to be scratched */
-       sk_negative_common(1)
-       movzbl  (%rax),%eax
-       and     $15,%al
-       shl     $2,%al
-       xchg    %eax,%ebx
-       ret
-
 bpf_error:
 # force a return 0 from jit handler
-       xor             %eax,%eax
-       mov             -8(%rbp),%rbx
+       xor     %eax,%eax
+       mov     - MAX_BPF_STACK(%rbp),%rbx
+       mov     - MAX_BPF_STACK + 8(%rbp),%r13
+       mov     - MAX_BPF_STACK + 16(%rbp),%r14
+       mov     - MAX_BPF_STACK + 24(%rbp),%r15
        leaveq
        ret
index dc017735bb91b7b2ec61f333b091c63accdb921b..080f3f071bb05d46a79a6daa3f848073014b4928 100644 (file)
@@ -1,6 +1,7 @@
 /* bpf_jit_comp.c : BPF JIT compiler
  *
  * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
+ * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
 #include <linux/if_vlan.h>
 #include <linux/random.h>
 
-/*
- * Conventions :
- *  EAX : BPF A accumulator
- *  EBX : BPF X accumulator
- *  RDI : pointer to skb   (first argument given to JIT function)
- *  RBP : frame pointer (even if CONFIG_FRAME_POINTER=n)
- *  ECX,EDX,ESI : scratch registers
- *  r9d : skb->len - skb->data_len (headlen)
- *  r8  : skb->data
- * -8(RBP) : saved RBX value
- * -16(RBP)..-80(RBP) : BPF_MEMWORDS values
- */
 int bpf_jit_enable __read_mostly;
 
 /*
  * assembly code in arch/x86/net/bpf_jit.S
  */
-extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
+extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
 extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
-extern u8 sk_load_byte_positive_offset[], sk_load_byte_msh_positive_offset[];
+extern u8 sk_load_byte_positive_offset[];
 extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
-extern u8 sk_load_byte_negative_offset[], sk_load_byte_msh_negative_offset[];
+extern u8 sk_load_byte_negative_offset[];
 
 static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
 {
@@ -56,30 +45,44 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
 #define EMIT2(b1, b2)          EMIT((b1) + ((b2) << 8), 2)
 #define EMIT3(b1, b2, b3)      EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
 #define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
-#define EMIT1_off32(b1, off)   do { EMIT1(b1); EMIT(off, 4);} while (0)
-
-#define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
-#define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
+#define EMIT1_off32(b1, off) \
+       do {EMIT1(b1); EMIT(off, 4); } while (0)
+#define EMIT2_off32(b1, b2, off) \
+       do {EMIT2(b1, b2); EMIT(off, 4); } while (0)
+#define EMIT3_off32(b1, b2, b3, off) \
+       do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
+#define EMIT4_off32(b1, b2, b3, b4, off) \
+       do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
 
 static inline bool is_imm8(int value)
 {
        return value <= 127 && value >= -128;
 }
 
-static inline bool is_near(int offset)
+static inline bool is_simm32(s64 value)
 {
-       return offset <= 127 && offset >= -128;
+       return value == (s64) (s32) value;
 }
 
-#define EMIT_JMP(offset)                                               \
-do {                                                                   \
-       if (offset) {                                                   \
-               if (is_near(offset))                                    \
-                       EMIT2(0xeb, offset); /* jmp .+off8 */           \
-               else                                                    \
-                       EMIT1_off32(0xe9, offset); /* jmp .+off32 */    \
-       }                                                               \
-} while (0)
+/* mov A, X */
+#define EMIT_mov(A, X) \
+       do {if (A != X) \
+               EMIT3(add_2mod(0x48, A, X), 0x89, add_2reg(0xC0, A, X)); \
+       } while (0)
+
+static int bpf_size_to_x86_bytes(int bpf_size)
+{
+       if (bpf_size == BPF_W)
+               return 4;
+       else if (bpf_size == BPF_H)
+               return 2;
+       else if (bpf_size == BPF_B)
+               return 1;
+       else if (bpf_size == BPF_DW)
+               return 4; /* imm32 */
+       else
+               return 0;
+}
 
 /* list of x86 cond jumps opcodes (. + s8)
  * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
@@ -90,27 +93,8 @@ do {                                                                 \
 #define X86_JNE 0x75
 #define X86_JBE 0x76
 #define X86_JA  0x77
-
-#define EMIT_COND_JMP(op, offset)                              \
-do {                                                           \
-       if (is_near(offset))                                    \
-               EMIT2(op, offset); /* jxx .+off8 */             \
-       else {                                                  \
-               EMIT2(0x0f, op + 0x10);                         \
-               EMIT(offset, 4); /* jxx .+off32 */              \
-       }                                                       \
-} while (0)
-
-#define COND_SEL(CODE, TOP, FOP)       \
-       case CODE:                      \
-               t_op = TOP;             \
-               f_op = FOP;             \
-               goto cond_branch
-
-
-#define SEEN_DATAREF 1 /* might call external helpers */
-#define SEEN_XREG    2 /* ebx is used */
-#define SEEN_MEM     4 /* use mem[] for temporary storage */
+#define X86_JGE 0x7D
+#define X86_JG  0x7F
 
 static inline void bpf_flush_icache(void *start, void *end)
 {
@@ -125,26 +109,6 @@ static inline void bpf_flush_icache(void *start, void *end)
 #define CHOOSE_LOAD_FUNC(K, func) \
        ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
 
-/* Helper to find the offset of pkt_type in sk_buff
- * We want to make sure its still a 3bit field starting at a byte boundary.
- */
-#define PKT_TYPE_MAX 7
-static int pkt_type_offset(void)
-{
-       struct sk_buff skb_probe = {
-               .pkt_type = ~0,
-       };
-       char *ct = (char *)&skb_probe;
-       unsigned int off;
-
-       for (off = 0; off < sizeof(struct sk_buff); off++) {
-               if (ct[off] == PKT_TYPE_MAX)
-                       return off;
-       }
-       pr_err_once("Please fix pkt_type_offset(), as pkt_type couldn't be found\n");
-       return -1;
-}
-
 struct bpf_binary_header {
        unsigned int    pages;
        /* Note : for security reasons, bpf code will follow a randomly
@@ -171,590 +135,778 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
        memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
 
        header->pages = sz / PAGE_SIZE;
-       hole = sz - (proglen + sizeof(*header));
+       hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header));
 
        /* insert a random number of int3 instructions before BPF code */
        *image_ptr = &header->image[prandom_u32() % hole];
        return header;
 }
 
-void bpf_jit_compile(struct sk_filter *fp)
+/* pick a register outside of BPF range for JIT internal work */
+#define AUX_REG (MAX_BPF_REG + 1)
+
+/* the following table maps BPF registers to x64 registers.
+ * x64 register r12 is unused, since if used as base address register
+ * in load/store instructions, it always needs an extra byte of encoding
+ */
+static const int reg2hex[] = {
+       [BPF_REG_0] = 0,  /* rax */
+       [BPF_REG_1] = 7,  /* rdi */
+       [BPF_REG_2] = 6,  /* rsi */
+       [BPF_REG_3] = 2,  /* rdx */
+       [BPF_REG_4] = 1,  /* rcx */
+       [BPF_REG_5] = 0,  /* r8 */
+       [BPF_REG_6] = 3,  /* rbx callee saved */
+       [BPF_REG_7] = 5,  /* r13 callee saved */
+       [BPF_REG_8] = 6,  /* r14 callee saved */
+       [BPF_REG_9] = 7,  /* r15 callee saved */
+       [BPF_REG_FP] = 5, /* rbp readonly */
+       [AUX_REG] = 3,    /* r11 temp register */
+};
+
+/* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15
+ * which need extra byte of encoding.
+ * rax,rcx,...,rbp have simpler encoding
+ */
+static inline bool is_ereg(u32 reg)
 {
-       u8 temp[64];
-       u8 *prog;
-       unsigned int proglen, oldproglen = 0;
-       int ilen, i;
-       int t_offset, f_offset;
-       u8 t_op, f_op, seen = 0, pass;
-       u8 *image = NULL;
-       struct bpf_binary_header *header = NULL;
-       u8 *func;
-       int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */
-       unsigned int cleanup_addr; /* epilogue code offset */
-       unsigned int *addrs;
-       const struct sock_filter *filter = fp->insns;
-       int flen = fp->len;
+       if (reg == BPF_REG_5 || reg == AUX_REG ||
+           (reg >= BPF_REG_7 && reg <= BPF_REG_9))
+               return true;
+       else
+               return false;
+}
 
-       if (!bpf_jit_enable)
-               return;
+/* add modifiers if 'reg' maps to x64 registers r8..r15 */
+static inline u8 add_1mod(u8 byte, u32 reg)
+{
+       if (is_ereg(reg))
+               byte |= 1;
+       return byte;
+}
 
-       addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL);
-       if (addrs == NULL)
-               return;
+static inline u8 add_2mod(u8 byte, u32 r1, u32 r2)
+{
+       if (is_ereg(r1))
+               byte |= 1;
+       if (is_ereg(r2))
+               byte |= 4;
+       return byte;
+}
 
-       /* Before first pass, make a rough estimation of addrs[]
-        * each bpf instruction is translated to less than 64 bytes
+/* encode dest register 'a_reg' into x64 opcode 'byte' */
+static inline u8 add_1reg(u8 byte, u32 a_reg)
+{
+       return byte + reg2hex[a_reg];
+}
+
+/* encode dest 'a_reg' and src 'x_reg' registers into x64 opcode 'byte' */
+static inline u8 add_2reg(u8 byte, u32 a_reg, u32 x_reg)
+{
+       return byte + reg2hex[a_reg] + (reg2hex[x_reg] << 3);
+}
+
+struct jit_context {
+       unsigned int cleanup_addr; /* epilogue code offset */
+       bool seen_ld_abs;
+};
+
+static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image,
+                 int oldproglen, struct jit_context *ctx)
+{
+       struct sock_filter_int *insn = bpf_prog->insnsi;
+       int insn_cnt = bpf_prog->len;
+       u8 temp[64];
+       int i;
+       int proglen = 0;
+       u8 *prog = temp;
+       int stacksize = MAX_BPF_STACK +
+               32 /* space for rbx, r13, r14, r15 */ +
+               8 /* space for skb_copy_bits() buffer */;
+
+       EMIT1(0x55); /* push rbp */
+       EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
+
+       /* sub rsp, stacksize */
+       EMIT3_off32(0x48, 0x81, 0xEC, stacksize);
+
+       /* all classic BPF filters use R6(rbx) save it */
+
+       /* mov qword ptr [rbp-X],rbx */
+       EMIT3_off32(0x48, 0x89, 0x9D, -stacksize);
+
+       /* sk_convert_filter() maps classic BPF register X to R7 and uses R8
+        * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
+        * R8(r14). R9(r15) spill could be made conditional, but there is only
+        * one 'bpf_error' return path out of helper functions inside bpf_jit.S
+        * The overhead of extra spill is negligible for any filter other
+        * than synthetic ones. Therefore not worth adding complexity.
         */
-       for (proglen = 0, i = 0; i < flen; i++) {
-               proglen += 64;
-               addrs[i] = proglen;
+
+       /* mov qword ptr [rbp-X],r13 */
+       EMIT3_off32(0x4C, 0x89, 0xAD, -stacksize + 8);
+       /* mov qword ptr [rbp-X],r14 */
+       EMIT3_off32(0x4C, 0x89, 0xB5, -stacksize + 16);
+       /* mov qword ptr [rbp-X],r15 */
+       EMIT3_off32(0x4C, 0x89, 0xBD, -stacksize + 24);
+
+       /* clear A and X registers */
+       EMIT2(0x31, 0xc0); /* xor eax, eax */
+       EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */
+
+       if (ctx->seen_ld_abs) {
+               /* r9d : skb->len - skb->data_len (headlen)
+                * r10 : skb->data
+                */
+               if (is_imm8(offsetof(struct sk_buff, len)))
+                       /* mov %r9d, off8(%rdi) */
+                       EMIT4(0x44, 0x8b, 0x4f,
+                             offsetof(struct sk_buff, len));
+               else
+                       /* mov %r9d, off32(%rdi) */
+                       EMIT3_off32(0x44, 0x8b, 0x8f,
+                                   offsetof(struct sk_buff, len));
+
+               if (is_imm8(offsetof(struct sk_buff, data_len)))
+                       /* sub %r9d, off8(%rdi) */
+                       EMIT4(0x44, 0x2b, 0x4f,
+                             offsetof(struct sk_buff, data_len));
+               else
+                       EMIT3_off32(0x44, 0x2b, 0x8f,
+                                   offsetof(struct sk_buff, data_len));
+
+               if (is_imm8(offsetof(struct sk_buff, data)))
+                       /* mov %r10, off8(%rdi) */
+                       EMIT4(0x4c, 0x8b, 0x57,
+                             offsetof(struct sk_buff, data));
+               else
+                       /* mov %r10, off32(%rdi) */
+                       EMIT3_off32(0x4c, 0x8b, 0x97,
+                                   offsetof(struct sk_buff, data));
        }
-       cleanup_addr = proglen; /* epilogue address */
 
-       for (pass = 0; pass < 10; pass++) {
-               u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
-               /* no prologue/epilogue for trivial filters (RET something) */
-               proglen = 0;
-               prog = temp;
+       for (i = 0; i < insn_cnt; i++, insn++) {
+               const s32 K = insn->imm;
+               u32 a_reg = insn->a_reg;
+               u32 x_reg = insn->x_reg;
+               u8 b1 = 0, b2 = 0, b3 = 0;
+               s64 jmp_offset;
+               u8 jmp_cond;
+               int ilen;
+               u8 *func;
+
+               switch (insn->code) {
+                       /* ALU */
+               case BPF_ALU | BPF_ADD | BPF_X:
+               case BPF_ALU | BPF_SUB | BPF_X:
+               case BPF_ALU | BPF_AND | BPF_X:
+               case BPF_ALU | BPF_OR | BPF_X:
+               case BPF_ALU | BPF_XOR | BPF_X:
+               case BPF_ALU64 | BPF_ADD | BPF_X:
+               case BPF_ALU64 | BPF_SUB | BPF_X:
+               case BPF_ALU64 | BPF_AND | BPF_X:
+               case BPF_ALU64 | BPF_OR | BPF_X:
+               case BPF_ALU64 | BPF_XOR | BPF_X:
+                       switch (BPF_OP(insn->code)) {
+                       case BPF_ADD: b2 = 0x01; break;
+                       case BPF_SUB: b2 = 0x29; break;
+                       case BPF_AND: b2 = 0x21; break;
+                       case BPF_OR: b2 = 0x09; break;
+                       case BPF_XOR: b2 = 0x31; break;
+                       }
+                       if (BPF_CLASS(insn->code) == BPF_ALU64)
+                               EMIT1(add_2mod(0x48, a_reg, x_reg));
+                       else if (is_ereg(a_reg) || is_ereg(x_reg))
+                               EMIT1(add_2mod(0x40, a_reg, x_reg));
+                       EMIT2(b2, add_2reg(0xC0, a_reg, x_reg));
+                       break;
 
-               if (seen_or_pass0) {
-                       EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */
-                       EMIT4(0x48, 0x83, 0xec, 96);    /* subq  $96,%rsp       */
-                       /* note : must save %rbx in case bpf_error is hit */
-                       if (seen_or_pass0 & (SEEN_XREG | SEEN_DATAREF))
-                               EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */
-                       if (seen_or_pass0 & SEEN_XREG)
-                               CLEAR_X(); /* make sure we dont leek kernel memory */
-
-                       /*
-                        * If this filter needs to access skb data,
-                        * loads r9 and r8 with :
-                        *  r9 = skb->len - skb->data_len
-                        *  r8 = skb->data
+                       /* mov A, X */
+               case BPF_ALU64 | BPF_MOV | BPF_X:
+                       EMIT_mov(a_reg, x_reg);
+                       break;
+
+                       /* mov32 A, X */
+               case BPF_ALU | BPF_MOV | BPF_X:
+                       if (is_ereg(a_reg) || is_ereg(x_reg))
+                               EMIT1(add_2mod(0x40, a_reg, x_reg));
+                       EMIT2(0x89, add_2reg(0xC0, a_reg, x_reg));
+                       break;
+
+                       /* neg A */
+               case BPF_ALU | BPF_NEG:
+               case BPF_ALU64 | BPF_NEG:
+                       if (BPF_CLASS(insn->code) == BPF_ALU64)
+                               EMIT1(add_1mod(0x48, a_reg));
+                       else if (is_ereg(a_reg))
+                               EMIT1(add_1mod(0x40, a_reg));
+                       EMIT2(0xF7, add_1reg(0xD8, a_reg));
+                       break;
+
+               case BPF_ALU | BPF_ADD | BPF_K:
+               case BPF_ALU | BPF_SUB | BPF_K:
+               case BPF_ALU | BPF_AND | BPF_K:
+               case BPF_ALU | BPF_OR | BPF_K:
+               case BPF_ALU | BPF_XOR | BPF_K:
+               case BPF_ALU64 | BPF_ADD | BPF_K:
+               case BPF_ALU64 | BPF_SUB | BPF_K:
+               case BPF_ALU64 | BPF_AND | BPF_K:
+               case BPF_ALU64 | BPF_OR | BPF_K:
+               case BPF_ALU64 | BPF_XOR | BPF_K:
+                       if (BPF_CLASS(insn->code) == BPF_ALU64)
+                               EMIT1(add_1mod(0x48, a_reg));
+                       else if (is_ereg(a_reg))
+                               EMIT1(add_1mod(0x40, a_reg));
+
+                       switch (BPF_OP(insn->code)) {
+                       case BPF_ADD: b3 = 0xC0; break;
+                       case BPF_SUB: b3 = 0xE8; break;
+                       case BPF_AND: b3 = 0xE0; break;
+                       case BPF_OR: b3 = 0xC8; break;
+                       case BPF_XOR: b3 = 0xF0; break;
+                       }
+
+                       if (is_imm8(K))
+                               EMIT3(0x83, add_1reg(b3, a_reg), K);
+                       else
+                               EMIT2_off32(0x81, add_1reg(b3, a_reg), K);
+                       break;
+
+               case BPF_ALU64 | BPF_MOV | BPF_K:
+                       /* optimization: if imm32 is positive,
+                        * use 'mov eax, imm32' (which zero-extends imm32)
+                        * to save 2 bytes
                         */
-                       if (seen_or_pass0 & SEEN_DATAREF) {
-                               if (offsetof(struct sk_buff, len) <= 127)
-                                       /* mov    off8(%rdi),%r9d */
-                                       EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len));
-                               else {
-                                       /* mov    off32(%rdi),%r9d */
-                                       EMIT3(0x44, 0x8b, 0x8f);
-                                       EMIT(offsetof(struct sk_buff, len), 4);
-                               }
-                               if (is_imm8(offsetof(struct sk_buff, data_len)))
-                                       /* sub    off8(%rdi),%r9d */
-                                       EMIT4(0x44, 0x2b, 0x4f, offsetof(struct sk_buff, data_len));
-                               else {
-                                       EMIT3(0x44, 0x2b, 0x8f);
-                                       EMIT(offsetof(struct sk_buff, data_len), 4);
-                               }
+                       if (K < 0) {
+                               /* 'mov rax, imm32' sign extends imm32 */
+                               b1 = add_1mod(0x48, a_reg);
+                               b2 = 0xC7;
+                               b3 = 0xC0;
+                               EMIT3_off32(b1, b2, add_1reg(b3, a_reg), K);
+                               break;
+                       }
 
-                               if (is_imm8(offsetof(struct sk_buff, data)))
-                                       /* mov off8(%rdi),%r8 */
-                                       EMIT4(0x4c, 0x8b, 0x47, offsetof(struct sk_buff, data));
-                               else {
-                                       /* mov off32(%rdi),%r8 */
-                                       EMIT3(0x4c, 0x8b, 0x87);
-                                       EMIT(offsetof(struct sk_buff, data), 4);
-                               }
+               case BPF_ALU | BPF_MOV | BPF_K:
+                       /* mov %eax, imm32 */
+                       if (is_ereg(a_reg))
+                               EMIT1(add_1mod(0x40, a_reg));
+                       EMIT1_off32(add_1reg(0xB8, a_reg), K);
+                       break;
+
+                       /* A %= X, A /= X, A %= K, A /= K */
+               case BPF_ALU | BPF_MOD | BPF_X:
+               case BPF_ALU | BPF_DIV | BPF_X:
+               case BPF_ALU | BPF_MOD | BPF_K:
+               case BPF_ALU | BPF_DIV | BPF_K:
+               case BPF_ALU64 | BPF_MOD | BPF_X:
+               case BPF_ALU64 | BPF_DIV | BPF_X:
+               case BPF_ALU64 | BPF_MOD | BPF_K:
+               case BPF_ALU64 | BPF_DIV | BPF_K:
+                       EMIT1(0x50); /* push rax */
+                       EMIT1(0x52); /* push rdx */
+
+                       if (BPF_SRC(insn->code) == BPF_X)
+                               /* mov r11, X */
+                               EMIT_mov(AUX_REG, x_reg);
+                       else
+                               /* mov r11, K */
+                               EMIT3_off32(0x49, 0xC7, 0xC3, K);
+
+                       /* mov rax, A */
+                       EMIT_mov(BPF_REG_0, a_reg);
+
+                       /* xor edx, edx
+                        * equivalent to 'xor rdx, rdx', but one byte less
+                        */
+                       EMIT2(0x31, 0xd2);
+
+                       if (BPF_SRC(insn->code) == BPF_X) {
+                               /* if (X == 0) return 0 */
+
+                               /* cmp r11, 0 */
+                               EMIT4(0x49, 0x83, 0xFB, 0x00);
+
+                               /* jne .+9 (skip over pop, pop, xor and jmp) */
+                               EMIT2(X86_JNE, 1 + 1 + 2 + 5);
+                               EMIT1(0x5A); /* pop rdx */
+                               EMIT1(0x58); /* pop rax */
+                               EMIT2(0x31, 0xc0); /* xor eax, eax */
+
+                               /* jmp cleanup_addr
+                                * addrs[i] - 11, because there are 11 bytes
+                                * after this insn: div, mov, pop, pop, mov
+                                */
+                               jmp_offset = ctx->cleanup_addr - (addrs[i] - 11);
+                               EMIT1_off32(0xE9, jmp_offset);
                        }
-               }
 
-               switch (filter[0].code) {
-               case BPF_S_RET_K:
-               case BPF_S_LD_W_LEN:
-               case BPF_S_ANC_PROTOCOL:
-               case BPF_S_ANC_IFINDEX:
-               case BPF_S_ANC_MARK:
-               case BPF_S_ANC_RXHASH:
-               case BPF_S_ANC_CPU:
-               case BPF_S_ANC_VLAN_TAG:
-               case BPF_S_ANC_VLAN_TAG_PRESENT:
-               case BPF_S_ANC_QUEUE:
-               case BPF_S_ANC_PKTTYPE:
-               case BPF_S_LD_W_ABS:
-               case BPF_S_LD_H_ABS:
-               case BPF_S_LD_B_ABS:
-                       /* first instruction sets A register (or is RET 'constant') */
+                       if (BPF_CLASS(insn->code) == BPF_ALU64)
+                               /* div r11 */
+                               EMIT3(0x49, 0xF7, 0xF3);
+                       else
+                               /* div r11d */
+                               EMIT3(0x41, 0xF7, 0xF3);
+
+                       if (BPF_OP(insn->code) == BPF_MOD)
+                               /* mov r11, rdx */
+                               EMIT3(0x49, 0x89, 0xD3);
+                       else
+                               /* mov r11, rax */
+                               EMIT3(0x49, 0x89, 0xC3);
+
+                       EMIT1(0x5A); /* pop rdx */
+                       EMIT1(0x58); /* pop rax */
+
+                       /* mov A, r11 */
+                       EMIT_mov(a_reg, AUX_REG);
                        break;
-               default:
-                       /* make sure we dont leak kernel information to user */
-                       CLEAR_A(); /* A = 0 */
-               }
 
-               for (i = 0; i < flen; i++) {
-                       unsigned int K = filter[i].k;
+               case BPF_ALU | BPF_MUL | BPF_K:
+               case BPF_ALU | BPF_MUL | BPF_X:
+               case BPF_ALU64 | BPF_MUL | BPF_K:
+               case BPF_ALU64 | BPF_MUL | BPF_X:
+                       EMIT1(0x50); /* push rax */
+                       EMIT1(0x52); /* push rdx */
+
+                       /* mov r11, A */
+                       EMIT_mov(AUX_REG, a_reg);
+
+                       if (BPF_SRC(insn->code) == BPF_X)
+                               /* mov rax, X */
+                               EMIT_mov(BPF_REG_0, x_reg);
+                       else
+                               /* mov rax, K */
+                               EMIT3_off32(0x48, 0xC7, 0xC0, K);
+
+                       if (BPF_CLASS(insn->code) == BPF_ALU64)
+                               EMIT1(add_1mod(0x48, AUX_REG));
+                       else if (is_ereg(AUX_REG))
+                               EMIT1(add_1mod(0x40, AUX_REG));
+                       /* mul(q) r11 */
+                       EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
+
+                       /* mov r11, rax */
+                       EMIT_mov(AUX_REG, BPF_REG_0);
+
+                       EMIT1(0x5A); /* pop rdx */
+                       EMIT1(0x58); /* pop rax */
+
+                       /* mov A, r11 */
+                       EMIT_mov(a_reg, AUX_REG);
+                       break;
 
-                       switch (filter[i].code) {
-                       case BPF_S_ALU_ADD_X: /* A += X; */
-                               seen |= SEEN_XREG;
-                               EMIT2(0x01, 0xd8);              /* add %ebx,%eax */
-                               break;
-                       case BPF_S_ALU_ADD_K: /* A += K; */
-                               if (!K)
-                                       break;
-                               if (is_imm8(K))
-                                       EMIT3(0x83, 0xc0, K);   /* add imm8,%eax */
-                               else
-                                       EMIT1_off32(0x05, K);   /* add imm32,%eax */
-                               break;
-                       case BPF_S_ALU_SUB_X: /* A -= X; */
-                               seen |= SEEN_XREG;
-                               EMIT2(0x29, 0xd8);              /* sub    %ebx,%eax */
-                               break;
-                       case BPF_S_ALU_SUB_K: /* A -= K */
-                               if (!K)
-                                       break;
-                               if (is_imm8(K))
-                                       EMIT3(0x83, 0xe8, K); /* sub imm8,%eax */
-                               else
-                                       EMIT1_off32(0x2d, K); /* sub imm32,%eax */
-                               break;
-                       case BPF_S_ALU_MUL_X: /* A *= X; */
-                               seen |= SEEN_XREG;
-                               EMIT3(0x0f, 0xaf, 0xc3);        /* imul %ebx,%eax */
-                               break;
-                       case BPF_S_ALU_MUL_K: /* A *= K */
-                               if (is_imm8(K))
-                                       EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
-                               else {
-                                       EMIT2(0x69, 0xc0);              /* imul imm32,%eax */
-                                       EMIT(K, 4);
-                               }
-                               break;
-                       case BPF_S_ALU_DIV_X: /* A /= X; */
-                               seen |= SEEN_XREG;
-                               EMIT2(0x85, 0xdb);      /* test %ebx,%ebx */
-                               if (pc_ret0 > 0) {
-                                       /* addrs[pc_ret0 - 1] is start address of target
-                                        * (addrs[i] - 4) is the address following this jmp
-                                        * ("xor %edx,%edx; div %ebx" being 4 bytes long)
-                                        */
-                                       EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
-                                                               (addrs[i] - 4));
-                               } else {
-                                       EMIT_COND_JMP(X86_JNE, 2 + 5);
-                                       CLEAR_A();
-                                       EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */
-                               }
-                               EMIT4(0x31, 0xd2, 0xf7, 0xf3); /* xor %edx,%edx; div %ebx */
-                               break;
-                       case BPF_S_ALU_MOD_X: /* A %= X; */
-                               seen |= SEEN_XREG;
-                               EMIT2(0x85, 0xdb);      /* test %ebx,%ebx */
-                               if (pc_ret0 > 0) {
-                                       /* addrs[pc_ret0 - 1] is start address of target
-                                        * (addrs[i] - 6) is the address following this jmp
-                                        * ("xor %edx,%edx; div %ebx;mov %edx,%eax" being 6 bytes long)
-                                        */
-                                       EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
-                                                               (addrs[i] - 6));
-                               } else {
-                                       EMIT_COND_JMP(X86_JNE, 2 + 5);
-                                       CLEAR_A();
-                                       EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 6)); /* jmp .+off32 */
-                               }
-                               EMIT2(0x31, 0xd2);      /* xor %edx,%edx */
-                               EMIT2(0xf7, 0xf3);      /* div %ebx */
-                               EMIT2(0x89, 0xd0);      /* mov %edx,%eax */
-                               break;
-                       case BPF_S_ALU_MOD_K: /* A %= K; */
-                               if (K == 1) {
-                                       CLEAR_A();
-                                       break;
-                               }
-                               EMIT2(0x31, 0xd2);      /* xor %edx,%edx */
-                               EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
-                               EMIT2(0xf7, 0xf1);      /* div %ecx */
-                               EMIT2(0x89, 0xd0);      /* mov %edx,%eax */
-                               break;
-                       case BPF_S_ALU_DIV_K: /* A /= K */
-                               if (K == 1)
-                                       break;
-                               EMIT2(0x31, 0xd2);      /* xor %edx,%edx */
-                               EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
-                               EMIT2(0xf7, 0xf1);      /* div %ecx */
-                               break;
-                       case BPF_S_ALU_AND_X:
-                               seen |= SEEN_XREG;
-                               EMIT2(0x21, 0xd8);              /* and %ebx,%eax */
-                               break;
-                       case BPF_S_ALU_AND_K:
-                               if (K >= 0xFFFFFF00) {
-                                       EMIT2(0x24, K & 0xFF); /* and imm8,%al */
-                               } else if (K >= 0xFFFF0000) {
-                                       EMIT2(0x66, 0x25);      /* and imm16,%ax */
-                                       EMIT(K, 2);
-                               } else {
-                                       EMIT1_off32(0x25, K);   /* and imm32,%eax */
-                               }
-                               break;
-                       case BPF_S_ALU_OR_X:
-                               seen |= SEEN_XREG;
-                               EMIT2(0x09, 0xd8);              /* or %ebx,%eax */
-                               break;
-                       case BPF_S_ALU_OR_K:
-                               if (is_imm8(K))
-                                       EMIT3(0x83, 0xc8, K); /* or imm8,%eax */
-                               else
-                                       EMIT1_off32(0x0d, K);   /* or imm32,%eax */
-                               break;
-                       case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
-                       case BPF_S_ALU_XOR_X:
-                               seen |= SEEN_XREG;
-                               EMIT2(0x31, 0xd8);              /* xor %ebx,%eax */
-                               break;
-                       case BPF_S_ALU_XOR_K: /* A ^= K; */
-                               if (K == 0)
-                                       break;
-                               if (is_imm8(K))
-                                       EMIT3(0x83, 0xf0, K);   /* xor imm8,%eax */
-                               else
-                                       EMIT1_off32(0x35, K);   /* xor imm32,%eax */
-                               break;
-                       case BPF_S_ALU_LSH_X: /* A <<= X; */
-                               seen |= SEEN_XREG;
-                               EMIT4(0x89, 0xd9, 0xd3, 0xe0);  /* mov %ebx,%ecx; shl %cl,%eax */
-                               break;
-                       case BPF_S_ALU_LSH_K:
-                               if (K == 0)
-                                       break;
-                               else if (K == 1)
-                                       EMIT2(0xd1, 0xe0); /* shl %eax */
-                               else
-                                       EMIT3(0xc1, 0xe0, K);
-                               break;
-                       case BPF_S_ALU_RSH_X: /* A >>= X; */
-                               seen |= SEEN_XREG;
-                               EMIT4(0x89, 0xd9, 0xd3, 0xe8);  /* mov %ebx,%ecx; shr %cl,%eax */
-                               break;
-                       case BPF_S_ALU_RSH_K: /* A >>= K; */
-                               if (K == 0)
-                                       break;
-                               else if (K == 1)
-                                       EMIT2(0xd1, 0xe8); /* shr %eax */
-                               else
-                                       EMIT3(0xc1, 0xe8, K);
-                               break;
-                       case BPF_S_ALU_NEG:
-                               EMIT2(0xf7, 0xd8);              /* neg %eax */
-                               break;
-                       case BPF_S_RET_K:
-                               if (!K) {
-                                       if (pc_ret0 == -1)
-                                               pc_ret0 = i;
-                                       CLEAR_A();
-                               } else {
-                                       EMIT1_off32(0xb8, K);   /* mov $imm32,%eax */
-                               }
-                               /* fallinto */
-                       case BPF_S_RET_A:
-                               if (seen_or_pass0) {
-                                       if (i != flen - 1) {
-                                               EMIT_JMP(cleanup_addr - addrs[i]);
-                                               break;
-                                       }
-                                       if (seen_or_pass0 & SEEN_XREG)
-                                               EMIT4(0x48, 0x8b, 0x5d, 0xf8);  /* mov  -8(%rbp),%rbx */
-                                       EMIT1(0xc9);            /* leaveq */
-                               }
-                               EMIT1(0xc3);            /* ret */
-                               break;
-                       case BPF_S_MISC_TAX: /* X = A */
-                               seen |= SEEN_XREG;
-                               EMIT2(0x89, 0xc3);      /* mov    %eax,%ebx */
-                               break;
-                       case BPF_S_MISC_TXA: /* A = X */
-                               seen |= SEEN_XREG;
-                               EMIT2(0x89, 0xd8);      /* mov    %ebx,%eax */
-                               break;
-                       case BPF_S_LD_IMM: /* A = K */
-                               if (!K)
-                                       CLEAR_A();
-                               else
-                                       EMIT1_off32(0xb8, K); /* mov $imm32,%eax */
-                               break;
-                       case BPF_S_LDX_IMM: /* X = K */
-                               seen |= SEEN_XREG;
-                               if (!K)
-                                       CLEAR_X();
+                       /* shifts */
+               case BPF_ALU | BPF_LSH | BPF_K:
+               case BPF_ALU | BPF_RSH | BPF_K:
+               case BPF_ALU | BPF_ARSH | BPF_K:
+               case BPF_ALU64 | BPF_LSH | BPF_K:
+               case BPF_ALU64 | BPF_RSH | BPF_K:
+               case BPF_ALU64 | BPF_ARSH | BPF_K:
+                       if (BPF_CLASS(insn->code) == BPF_ALU64)
+                               EMIT1(add_1mod(0x48, a_reg));
+                       else if (is_ereg(a_reg))
+                               EMIT1(add_1mod(0x40, a_reg));
+
+                       switch (BPF_OP(insn->code)) {
+                       case BPF_LSH: b3 = 0xE0; break;
+                       case BPF_RSH: b3 = 0xE8; break;
+                       case BPF_ARSH: b3 = 0xF8; break;
+                       }
+                       EMIT3(0xC1, add_1reg(b3, a_reg), K);
+                       break;
+
+               case BPF_ALU | BPF_END | BPF_FROM_BE:
+                       switch (K) {
+                       case 16:
+                               /* emit 'ror %ax, 8' to swap lower 2 bytes */
+                               EMIT1(0x66);
+                               if (is_ereg(a_reg))
+                                       EMIT1(0x41);
+                               EMIT3(0xC1, add_1reg(0xC8, a_reg), 8);
+                               break;
+                       case 32:
+                               /* emit 'bswap eax' to swap lower 4 bytes */
+                               if (is_ereg(a_reg))
+                                       EMIT2(0x41, 0x0F);
                                else
-                                       EMIT1_off32(0xbb, K); /* mov $imm32,%ebx */
-                               break;
-                       case BPF_S_LD_MEM: /* A = mem[K] : mov off8(%rbp),%eax */
-                               seen |= SEEN_MEM;
-                               EMIT3(0x8b, 0x45, 0xf0 - K*4);
-                               break;
-                       case BPF_S_LDX_MEM: /* X = mem[K] : mov off8(%rbp),%ebx */
-                               seen |= SEEN_XREG | SEEN_MEM;
-                               EMIT3(0x8b, 0x5d, 0xf0 - K*4);
-                               break;
-                       case BPF_S_ST: /* mem[K] = A : mov %eax,off8(%rbp) */
-                               seen |= SEEN_MEM;
-                               EMIT3(0x89, 0x45, 0xf0 - K*4);
-                               break;
-                       case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */
-                               seen |= SEEN_XREG | SEEN_MEM;
-                               EMIT3(0x89, 0x5d, 0xf0 - K*4);
-                               break;
-                       case BPF_S_LD_W_LEN: /* A = skb->len; */
-                               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
-                               if (is_imm8(offsetof(struct sk_buff, len)))
-                                       /* mov    off8(%rdi),%eax */
-                                       EMIT3(0x8b, 0x47, offsetof(struct sk_buff, len));
-                               else {
-                                       EMIT2(0x8b, 0x87);
-                                       EMIT(offsetof(struct sk_buff, len), 4);
-                               }
-                               break;
-                       case BPF_S_LDX_W_LEN: /* X = skb->len; */
-                               seen |= SEEN_XREG;
-                               if (is_imm8(offsetof(struct sk_buff, len)))
-                                       /* mov off8(%rdi),%ebx */
-                                       EMIT3(0x8b, 0x5f, offsetof(struct sk_buff, len));
-                               else {
-                                       EMIT2(0x8b, 0x9f);
-                                       EMIT(offsetof(struct sk_buff, len), 4);
-                               }
-                               break;
-                       case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
-                               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
-                               if (is_imm8(offsetof(struct sk_buff, protocol))) {
-                                       /* movzwl off8(%rdi),%eax */
-                                       EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, protocol));
-                               } else {
-                                       EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
-                                       EMIT(offsetof(struct sk_buff, protocol), 4);
-                               }
-                               EMIT2(0x86, 0xc4); /* ntohs() : xchg   %al,%ah */
-                               break;
-                       case BPF_S_ANC_IFINDEX:
-                               if (is_imm8(offsetof(struct sk_buff, dev))) {
-                                       /* movq off8(%rdi),%rax */
-                                       EMIT4(0x48, 0x8b, 0x47, offsetof(struct sk_buff, dev));
-                               } else {
-                                       EMIT3(0x48, 0x8b, 0x87); /* movq off32(%rdi),%rax */
-                                       EMIT(offsetof(struct sk_buff, dev), 4);
-                               }
-                               EMIT3(0x48, 0x85, 0xc0);        /* test %rax,%rax */
-                               EMIT_COND_JMP(X86_JE, cleanup_addr - (addrs[i] - 6));
-                               BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
-                               EMIT2(0x8b, 0x80);      /* mov off32(%rax),%eax */
-                               EMIT(offsetof(struct net_device, ifindex), 4);
+                                       EMIT1(0x0F);
+                               EMIT1(add_1reg(0xC8, a_reg));
                                break;
-                       case BPF_S_ANC_MARK:
-                               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
-                               if (is_imm8(offsetof(struct sk_buff, mark))) {
-                                       /* mov off8(%rdi),%eax */
-                                       EMIT3(0x8b, 0x47, offsetof(struct sk_buff, mark));
-                               } else {
-                                       EMIT2(0x8b, 0x87);
-                                       EMIT(offsetof(struct sk_buff, mark), 4);
-                               }
-                               break;
-                       case BPF_S_ANC_RXHASH:
-                               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
-                               if (is_imm8(offsetof(struct sk_buff, hash))) {
-                                       /* mov off8(%rdi),%eax */
-                                       EMIT3(0x8b, 0x47, offsetof(struct sk_buff, hash));
-                               } else {
-                                       EMIT2(0x8b, 0x87);
-                                       EMIT(offsetof(struct sk_buff, hash), 4);
-                               }
-                               break;
-                       case BPF_S_ANC_QUEUE:
-                               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
-                               if (is_imm8(offsetof(struct sk_buff, queue_mapping))) {
-                                       /* movzwl off8(%rdi),%eax */
-                                       EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, queue_mapping));
-                               } else {
-                                       EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
-                                       EMIT(offsetof(struct sk_buff, queue_mapping), 4);
-                               }
-                               break;
-                       case BPF_S_ANC_CPU:
-#ifdef CONFIG_SMP
-                               EMIT4(0x65, 0x8b, 0x04, 0x25); /* mov %gs:off32,%eax */
-                               EMIT((u32)(unsigned long)&cpu_number, 4); /* A = smp_processor_id(); */
-#else
-                               CLEAR_A();
-#endif
-                               break;
-                       case BPF_S_ANC_VLAN_TAG:
-                       case BPF_S_ANC_VLAN_TAG_PRESENT:
-                               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
-                               if (is_imm8(offsetof(struct sk_buff, vlan_tci))) {
-                                       /* movzwl off8(%rdi),%eax */
-                                       EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, vlan_tci));
-                               } else {
-                                       EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
-                                       EMIT(offsetof(struct sk_buff, vlan_tci), 4);
-                               }
-                               BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
-                               if (filter[i].code == BPF_S_ANC_VLAN_TAG) {
-                                       EMIT3(0x80, 0xe4, 0xef); /* and    $0xef,%ah */
-                               } else {
-                                       EMIT3(0xc1, 0xe8, 0x0c); /* shr    $0xc,%eax */
-                                       EMIT3(0x83, 0xe0, 0x01); /* and    $0x1,%eax */
-                               }
-                               break;
-                       case BPF_S_ANC_PKTTYPE:
-                       {
-                               int off = pkt_type_offset();
-
-                               if (off < 0)
-                                       goto out;
-                               if (is_imm8(off)) {
-                                       /* movzbl off8(%rdi),%eax */
-                                       EMIT4(0x0f, 0xb6, 0x47, off);
-                               } else {
-                                       /* movbl off32(%rdi),%eax */
-                                       EMIT3(0x0f, 0xb6, 0x87);
-                                       EMIT(off, 4);
-                               }
-                               EMIT3(0x83, 0xe0, PKT_TYPE_MAX); /* and    $0x7,%eax */
+                       case 64:
+                               /* emit 'bswap rax' to swap 8 bytes */
+                               EMIT3(add_1mod(0x48, a_reg), 0x0F,
+                                     add_1reg(0xC8, a_reg));
                                break;
                        }
-                       case BPF_S_LD_W_ABS:
-                               func = CHOOSE_LOAD_FUNC(K, sk_load_word);
-common_load:                   seen |= SEEN_DATAREF;
-                               t_offset = func - (image + addrs[i]);
-                               EMIT1_off32(0xbe, K); /* mov imm32,%esi */
-                               EMIT1_off32(0xe8, t_offset); /* call */
-                               break;
-                       case BPF_S_LD_H_ABS:
-                               func = CHOOSE_LOAD_FUNC(K, sk_load_half);
-                               goto common_load;
-                       case BPF_S_LD_B_ABS:
-                               func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
-                               goto common_load;
-                       case BPF_S_LDX_B_MSH:
-                               func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
-                               seen |= SEEN_DATAREF | SEEN_XREG;
-                               t_offset = func - (image + addrs[i]);
-                               EMIT1_off32(0xbe, K);   /* mov imm32,%esi */
-                               EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */
-                               break;
-                       case BPF_S_LD_W_IND:
-                               func = sk_load_word;
-common_load_ind:               seen |= SEEN_DATAREF | SEEN_XREG;
-                               t_offset = func - (image + addrs[i]);
-                               if (K) {
-                                       if (is_imm8(K)) {
-                                               EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
-                                       } else {
-                                               EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
-                                               EMIT(K, 4);
-                                       }
-                               } else {
-                                       EMIT2(0x89,0xde); /* mov %ebx,%esi */
-                               }
-                               EMIT1_off32(0xe8, t_offset);    /* call sk_load_xxx_ind */
-                               break;
-                       case BPF_S_LD_H_IND:
-                               func = sk_load_half;
-                               goto common_load_ind;
-                       case BPF_S_LD_B_IND:
-                               func = sk_load_byte;
-                               goto common_load_ind;
-                       case BPF_S_JMP_JA:
-                               t_offset = addrs[i + K] - addrs[i];
-                               EMIT_JMP(t_offset);
-                               break;
-                       COND_SEL(BPF_S_JMP_JGT_K, X86_JA, X86_JBE);
-                       COND_SEL(BPF_S_JMP_JGE_K, X86_JAE, X86_JB);
-                       COND_SEL(BPF_S_JMP_JEQ_K, X86_JE, X86_JNE);
-                       COND_SEL(BPF_S_JMP_JSET_K,X86_JNE, X86_JE);
-                       COND_SEL(BPF_S_JMP_JGT_X, X86_JA, X86_JBE);
-                       COND_SEL(BPF_S_JMP_JGE_X, X86_JAE, X86_JB);
-                       COND_SEL(BPF_S_JMP_JEQ_X, X86_JE, X86_JNE);
-                       COND_SEL(BPF_S_JMP_JSET_X,X86_JNE, X86_JE);
-
-cond_branch:                   f_offset = addrs[i + filter[i].jf] - addrs[i];
-                               t_offset = addrs[i + filter[i].jt] - addrs[i];
-
-                               /* same targets, can avoid doing the test :) */
-                               if (filter[i].jt == filter[i].jf) {
-                                       EMIT_JMP(t_offset);
-                                       break;
-                               }
+                       break;
+
+               case BPF_ALU | BPF_END | BPF_FROM_LE:
+                       break;
 
-                               switch (filter[i].code) {
-                               case BPF_S_JMP_JGT_X:
-                               case BPF_S_JMP_JGE_X:
-                               case BPF_S_JMP_JEQ_X:
-                                       seen |= SEEN_XREG;
-                                       EMIT2(0x39, 0xd8); /* cmp %ebx,%eax */
-                                       break;
-                               case BPF_S_JMP_JSET_X:
-                                       seen |= SEEN_XREG;
-                                       EMIT2(0x85, 0xd8); /* test %ebx,%eax */
-                                       break;
-                               case BPF_S_JMP_JEQ_K:
-                                       if (K == 0) {
-                                               EMIT2(0x85, 0xc0); /* test   %eax,%eax */
-                                               break;
-                                       }
-                               case BPF_S_JMP_JGT_K:
-                               case BPF_S_JMP_JGE_K:
-                                       if (K <= 127)
-                                               EMIT3(0x83, 0xf8, K); /* cmp imm8,%eax */
+                       /* ST: *(u8*)(a_reg + off) = imm */
+               case BPF_ST | BPF_MEM | BPF_B:
+                       if (is_ereg(a_reg))
+                               EMIT2(0x41, 0xC6);
+                       else
+                               EMIT1(0xC6);
+                       goto st;
+               case BPF_ST | BPF_MEM | BPF_H:
+                       if (is_ereg(a_reg))
+                               EMIT3(0x66, 0x41, 0xC7);
+                       else
+                               EMIT2(0x66, 0xC7);
+                       goto st;
+               case BPF_ST | BPF_MEM | BPF_W:
+                       if (is_ereg(a_reg))
+                               EMIT2(0x41, 0xC7);
+                       else
+                               EMIT1(0xC7);
+                       goto st;
+               case BPF_ST | BPF_MEM | BPF_DW:
+                       EMIT2(add_1mod(0x48, a_reg), 0xC7);
+
+st:                    if (is_imm8(insn->off))
+                               EMIT2(add_1reg(0x40, a_reg), insn->off);
+                       else
+                               EMIT1_off32(add_1reg(0x80, a_reg), insn->off);
+
+                       EMIT(K, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
+                       break;
+
+                       /* STX: *(u8*)(a_reg + off) = x_reg */
+               case BPF_STX | BPF_MEM | BPF_B:
+                       /* emit 'mov byte ptr [rax + off], al' */
+                       if (is_ereg(a_reg) || is_ereg(x_reg) ||
+                           /* have to add extra byte for x86 SIL, DIL regs */
+                           x_reg == BPF_REG_1 || x_reg == BPF_REG_2)
+                               EMIT2(add_2mod(0x40, a_reg, x_reg), 0x88);
+                       else
+                               EMIT1(0x88);
+                       goto stx;
+               case BPF_STX | BPF_MEM | BPF_H:
+                       if (is_ereg(a_reg) || is_ereg(x_reg))
+                               EMIT3(0x66, add_2mod(0x40, a_reg, x_reg), 0x89);
+                       else
+                               EMIT2(0x66, 0x89);
+                       goto stx;
+               case BPF_STX | BPF_MEM | BPF_W:
+                       if (is_ereg(a_reg) || is_ereg(x_reg))
+                               EMIT2(add_2mod(0x40, a_reg, x_reg), 0x89);
+                       else
+                               EMIT1(0x89);
+                       goto stx;
+               case BPF_STX | BPF_MEM | BPF_DW:
+                       EMIT2(add_2mod(0x48, a_reg, x_reg), 0x89);
+stx:                   if (is_imm8(insn->off))
+                               EMIT2(add_2reg(0x40, a_reg, x_reg), insn->off);
+                       else
+                               EMIT1_off32(add_2reg(0x80, a_reg, x_reg),
+                                           insn->off);
+                       break;
+
+                       /* LDX: a_reg = *(u8*)(x_reg + off) */
+               case BPF_LDX | BPF_MEM | BPF_B:
+                       /* emit 'movzx rax, byte ptr [rax + off]' */
+                       EMIT3(add_2mod(0x48, x_reg, a_reg), 0x0F, 0xB6);
+                       goto ldx;
+               case BPF_LDX | BPF_MEM | BPF_H:
+                       /* emit 'movzx rax, word ptr [rax + off]' */
+                       EMIT3(add_2mod(0x48, x_reg, a_reg), 0x0F, 0xB7);
+                       goto ldx;
+               case BPF_LDX | BPF_MEM | BPF_W:
+                       /* emit 'mov eax, dword ptr [rax+0x14]' */
+                       if (is_ereg(a_reg) || is_ereg(x_reg))
+                               EMIT2(add_2mod(0x40, x_reg, a_reg), 0x8B);
+                       else
+                               EMIT1(0x8B);
+                       goto ldx;
+               case BPF_LDX | BPF_MEM | BPF_DW:
+                       /* emit 'mov rax, qword ptr [rax+0x14]' */
+                       EMIT2(add_2mod(0x48, x_reg, a_reg), 0x8B);
+ldx:                   /* if insn->off == 0 we can save one extra byte, but
+                        * special case of x86 r13 which always needs an offset
+                        * is not worth the hassle
+                        */
+                       if (is_imm8(insn->off))
+                               EMIT2(add_2reg(0x40, x_reg, a_reg), insn->off);
+                       else
+                               EMIT1_off32(add_2reg(0x80, x_reg, a_reg),
+                                           insn->off);
+                       break;
+
+                       /* STX XADD: lock *(u32*)(a_reg + off) += x_reg */
+               case BPF_STX | BPF_XADD | BPF_W:
+                       /* emit 'lock add dword ptr [rax + off], eax' */
+                       if (is_ereg(a_reg) || is_ereg(x_reg))
+                               EMIT3(0xF0, add_2mod(0x40, a_reg, x_reg), 0x01);
+                       else
+                               EMIT2(0xF0, 0x01);
+                       goto xadd;
+               case BPF_STX | BPF_XADD | BPF_DW:
+                       EMIT3(0xF0, add_2mod(0x48, a_reg, x_reg), 0x01);
+xadd:                  if (is_imm8(insn->off))
+                               EMIT2(add_2reg(0x40, a_reg, x_reg), insn->off);
+                       else
+                               EMIT1_off32(add_2reg(0x80, a_reg, x_reg),
+                                           insn->off);
+                       break;
+
+                       /* call */
+               case BPF_JMP | BPF_CALL:
+                       func = (u8 *) __bpf_call_base + K;
+                       jmp_offset = func - (image + addrs[i]);
+                       if (ctx->seen_ld_abs) {
+                               EMIT2(0x41, 0x52); /* push %r10 */
+                               EMIT2(0x41, 0x51); /* push %r9 */
+                               /* need to adjust jmp offset, since
+                                * pop %r9, pop %r10 take 4 bytes after call insn
+                                */
+                               jmp_offset += 4;
+                       }
+                       if (!K || !is_simm32(jmp_offset)) {
+                               pr_err("unsupported bpf func %d addr %p image %p\n",
+                                      K, func, image);
+                               return -EINVAL;
+                       }
+                       EMIT1_off32(0xE8, jmp_offset);
+                       if (ctx->seen_ld_abs) {
+                               EMIT2(0x41, 0x59); /* pop %r9 */
+                               EMIT2(0x41, 0x5A); /* pop %r10 */
+                       }
+                       break;
+
+                       /* cond jump */
+               case BPF_JMP | BPF_JEQ | BPF_X:
+               case BPF_JMP | BPF_JNE | BPF_X:
+               case BPF_JMP | BPF_JGT | BPF_X:
+               case BPF_JMP | BPF_JGE | BPF_X:
+               case BPF_JMP | BPF_JSGT | BPF_X:
+               case BPF_JMP | BPF_JSGE | BPF_X:
+                       /* cmp a_reg, x_reg */
+                       EMIT3(add_2mod(0x48, a_reg, x_reg), 0x39,
+                             add_2reg(0xC0, a_reg, x_reg));
+                       goto emit_cond_jmp;
+
+               case BPF_JMP | BPF_JSET | BPF_X:
+                       /* test a_reg, x_reg */
+                       EMIT3(add_2mod(0x48, a_reg, x_reg), 0x85,
+                             add_2reg(0xC0, a_reg, x_reg));
+                       goto emit_cond_jmp;
+
+               case BPF_JMP | BPF_JSET | BPF_K:
+                       /* test a_reg, imm32 */
+                       EMIT1(add_1mod(0x48, a_reg));
+                       EMIT2_off32(0xF7, add_1reg(0xC0, a_reg), K);
+                       goto emit_cond_jmp;
+
+               case BPF_JMP | BPF_JEQ | BPF_K:
+               case BPF_JMP | BPF_JNE | BPF_K:
+               case BPF_JMP | BPF_JGT | BPF_K:
+               case BPF_JMP | BPF_JGE | BPF_K:
+               case BPF_JMP | BPF_JSGT | BPF_K:
+               case BPF_JMP | BPF_JSGE | BPF_K:
+                       /* cmp a_reg, imm8/32 */
+                       EMIT1(add_1mod(0x48, a_reg));
+
+                       if (is_imm8(K))
+                               EMIT3(0x83, add_1reg(0xF8, a_reg), K);
+                       else
+                               EMIT2_off32(0x81, add_1reg(0xF8, a_reg), K);
+
+emit_cond_jmp:         /* convert BPF opcode to x86 */
+                       switch (BPF_OP(insn->code)) {
+                       case BPF_JEQ:
+                               jmp_cond = X86_JE;
+                               break;
+                       case BPF_JSET:
+                       case BPF_JNE:
+                               jmp_cond = X86_JNE;
+                               break;
+                       case BPF_JGT:
+                               /* GT is unsigned '>', JA in x86 */
+                               jmp_cond = X86_JA;
+                               break;
+                       case BPF_JGE:
+                               /* GE is unsigned '>=', JAE in x86 */
+                               jmp_cond = X86_JAE;
+                               break;
+                       case BPF_JSGT:
+                               /* signed '>', GT in x86 */
+                               jmp_cond = X86_JG;
+                               break;
+                       case BPF_JSGE:
+                               /* signed '>=', GE in x86 */
+                               jmp_cond = X86_JGE;
+                               break;
+                       default: /* to silence gcc warning */
+                               return -EFAULT;
+                       }
+                       jmp_offset = addrs[i + insn->off] - addrs[i];
+                       if (is_imm8(jmp_offset)) {
+                               EMIT2(jmp_cond, jmp_offset);
+                       } else if (is_simm32(jmp_offset)) {
+                               EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
+                       } else {
+                               pr_err("cond_jmp gen bug %llx\n", jmp_offset);
+                               return -EFAULT;
+                       }
+
+                       break;
+
+               case BPF_JMP | BPF_JA:
+                       jmp_offset = addrs[i + insn->off] - addrs[i];
+                       if (!jmp_offset)
+                               /* optimize out nop jumps */
+                               break;
+emit_jmp:
+                       if (is_imm8(jmp_offset)) {
+                               EMIT2(0xEB, jmp_offset);
+                       } else if (is_simm32(jmp_offset)) {
+                               EMIT1_off32(0xE9, jmp_offset);
+                       } else {
+                               pr_err("jmp gen bug %llx\n", jmp_offset);
+                               return -EFAULT;
+                       }
+                       break;
+
+               case BPF_LD | BPF_IND | BPF_W:
+                       func = sk_load_word;
+                       goto common_load;
+               case BPF_LD | BPF_ABS | BPF_W:
+                       func = CHOOSE_LOAD_FUNC(K, sk_load_word);
+common_load:           ctx->seen_ld_abs = true;
+                       jmp_offset = func - (image + addrs[i]);
+                       if (!func || !is_simm32(jmp_offset)) {
+                               pr_err("unsupported bpf func %d addr %p image %p\n",
+                                      K, func, image);
+                               return -EINVAL;
+                       }
+                       if (BPF_MODE(insn->code) == BPF_ABS) {
+                               /* mov %esi, imm32 */
+                               EMIT1_off32(0xBE, K);
+                       } else {
+                               /* mov %rsi, x_reg */
+                               EMIT_mov(BPF_REG_2, x_reg);
+                               if (K) {
+                                       if (is_imm8(K))
+                                               /* add %esi, imm8 */
+                                               EMIT3(0x83, 0xC6, K);
                                        else
-                                               EMIT1_off32(0x3d, K); /* cmp imm32,%eax */
-                                       break;
-                               case BPF_S_JMP_JSET_K:
-                                       if (K <= 0xFF)
-                                               EMIT2(0xa8, K); /* test imm8,%al */
-                                       else if (!(K & 0xFFFF00FF))
-                                               EMIT3(0xf6, 0xc4, K >> 8); /* test imm8,%ah */
-                                       else if (K <= 0xFFFF) {
-                                               EMIT2(0x66, 0xa9); /* test imm16,%ax */
-                                               EMIT(K, 2);
-                                       } else {
-                                               EMIT1_off32(0xa9, K); /* test imm32,%eax */
-                                       }
-                                       break;
+                                               /* add %esi, imm32 */
+                                               EMIT2_off32(0x81, 0xC6, K);
                                }
-                               if (filter[i].jt != 0) {
-                                       if (filter[i].jf && f_offset)
-                                               t_offset += is_near(f_offset) ? 2 : 5;
-                                       EMIT_COND_JMP(t_op, t_offset);
-                                       if (filter[i].jf)
-                                               EMIT_JMP(f_offset);
-                                       break;
-                               }
-                               EMIT_COND_JMP(f_op, f_offset);
-                               break;
-                       default:
-                               /* hmm, too complex filter, give up with jit compiler */
-                               goto out;
                        }
-                       ilen = prog - temp;
-                       if (image) {
-                               if (unlikely(proglen + ilen > oldproglen)) {
-                                       pr_err("bpb_jit_compile fatal error\n");
-                                       kfree(addrs);
-                                       module_free(NULL, header);
-                                       return;
-                               }
-                               memcpy(image + proglen, temp, ilen);
+                       /* skb pointer is in R6 (%rbx), it will be copied into
+                        * %rdi if skb_copy_bits() call is necessary.
+                        * sk_load_* helpers also use %r10 and %r9d.
+                        * See bpf_jit.S
+                        */
+                       EMIT1_off32(0xE8, jmp_offset); /* call */
+                       break;
+
+               case BPF_LD | BPF_IND | BPF_H:
+                       func = sk_load_half;
+                       goto common_load;
+               case BPF_LD | BPF_ABS | BPF_H:
+                       func = CHOOSE_LOAD_FUNC(K, sk_load_half);
+                       goto common_load;
+               case BPF_LD | BPF_IND | BPF_B:
+                       func = sk_load_byte;
+                       goto common_load;
+               case BPF_LD | BPF_ABS | BPF_B:
+                       func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
+                       goto common_load;
+
+               case BPF_JMP | BPF_EXIT:
+                       if (i != insn_cnt - 1) {
+                               jmp_offset = ctx->cleanup_addr - addrs[i];
+                               goto emit_jmp;
                        }
-                       proglen += ilen;
-                       addrs[i] = proglen;
-                       prog = temp;
+                       /* update cleanup_addr */
+                       ctx->cleanup_addr = proglen;
+                       /* mov rbx, qword ptr [rbp-X] */
+                       EMIT3_off32(0x48, 0x8B, 0x9D, -stacksize);
+                       /* mov r13, qword ptr [rbp-X] */
+                       EMIT3_off32(0x4C, 0x8B, 0xAD, -stacksize + 8);
+                       /* mov r14, qword ptr [rbp-X] */
+                       EMIT3_off32(0x4C, 0x8B, 0xB5, -stacksize + 16);
+                       /* mov r15, qword ptr [rbp-X] */
+                       EMIT3_off32(0x4C, 0x8B, 0xBD, -stacksize + 24);
+
+                       EMIT1(0xC9); /* leave */
+                       EMIT1(0xC3); /* ret */
+                       break;
+
+               default:
+                       /* By design x64 JIT should support all BPF instructions
+                        * This error will be seen if new instruction was added
+                        * to interpreter, but not to JIT
+                        * or if there is junk in sk_filter
+                        */
+                       pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
+                       return -EINVAL;
                }
-               /* last bpf instruction is always a RET :
-                * use it to give the cleanup instruction(s) addr
-                */
-               cleanup_addr = proglen - 1; /* ret */
-               if (seen_or_pass0)
-                       cleanup_addr -= 1; /* leaveq */
-               if (seen_or_pass0 & SEEN_XREG)
-                       cleanup_addr -= 4; /* mov  -8(%rbp),%rbx */
 
+               ilen = prog - temp;
+               if (image) {
+                       if (unlikely(proglen + ilen > oldproglen)) {
+                               pr_err("bpf_jit_compile fatal error\n");
+                               return -EFAULT;
+                       }
+                       memcpy(image + proglen, temp, ilen);
+               }
+               proglen += ilen;
+               addrs[i] = proglen;
+               prog = temp;
+       }
+       return proglen;
+}
+
+void bpf_jit_compile(struct sk_filter *prog)
+{
+}
+
+void bpf_int_jit_compile(struct sk_filter *prog)
+{
+       struct bpf_binary_header *header = NULL;
+       int proglen, oldproglen = 0;
+       struct jit_context ctx = {};
+       u8 *image = NULL;
+       int *addrs;
+       int pass;
+       int i;
+
+       if (!bpf_jit_enable)
+               return;
+
+       if (!prog || !prog->len)
+               return;
+
+       addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
+       if (!addrs)
+               return;
+
+       /* Before first pass, make a rough estimation of addrs[]
+        * each bpf instruction is translated to less than 64 bytes
+        */
+       for (proglen = 0, i = 0; i < prog->len; i++) {
+               proglen += 64;
+               addrs[i] = proglen;
+       }
+       ctx.cleanup_addr = proglen;
+
+       for (pass = 0; pass < 10; pass++) {
+               proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
+               if (proglen <= 0) {
+                       image = NULL;
+                       if (header)
+                               module_free(NULL, header);
+                       goto out;
+               }
                if (image) {
                        if (proglen != oldproglen)
-                               pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen);
+                               pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
+                                      proglen, oldproglen);
                        break;
                }
                if (proglen == oldproglen) {
@@ -766,17 +918,16 @@ cond_branch:                      f_offset = addrs[i + filter[i].jf] - addrs[i];
        }
 
        if (bpf_jit_enable > 1)
-               bpf_jit_dump(flen, proglen, pass, image);
+               bpf_jit_dump(prog->len, proglen, 0, image);
 
        if (image) {
                bpf_flush_icache(header, image + proglen);
                set_memory_ro((unsigned long)header, header->pages);
-               fp->bpf_func = (void *)image;
-               fp->jited = 1;
+               prog->bpf_func = (void *)image;
+               prog->jited = 1;
        }
 out:
        kfree(addrs);
-       return;
 }
 
 static void bpf_jit_free_deferred(struct work_struct *work)
index 81b506d5befd46e5a494d6cbef7aa71edceca7f1..524142117296898237466e9630da1dadd8bbe1fd 100644 (file)
 
 static const struct font_desc *font;
 static u32 efi_x, efi_y;
+static void *efi_fb;
+static bool early_efi_keep;
 
-static __init void early_efi_clear_scanline(unsigned int y)
+/*
+ * efi earlyprintk need use early_ioremap to map the framebuffer.
+ * But early_ioremap is not usable for earlyprintk=efi,keep, ioremap should
+ * be used instead. ioremap will be available after paging_init() which is
+ * earlier than initcall callbacks. Thus adding this early initcall function
+ * early_efi_map_fb to map the whole efi framebuffer.
+ */
+static __init int early_efi_map_fb(void)
 {
-       unsigned long base, *dst;
-       u16 len;
+       unsigned long base, size;
+
+       if (!early_efi_keep)
+               return 0;
 
        base = boot_params.screen_info.lfb_base;
-       len = boot_params.screen_info.lfb_linelength;
+       size = boot_params.screen_info.lfb_size;
+       efi_fb = ioremap(base, size);
+
+       return efi_fb ? 0 : -ENOMEM;
+}
+early_initcall(early_efi_map_fb);
+
+/*
+ * early_efi_map maps efi framebuffer region [start, start + len -1]
+ * In case earlyprintk=efi,keep we have the whole framebuffer mapped already
+ * so just return the offset efi_fb + start.
+ */
+static __init_refok void *early_efi_map(unsigned long start, unsigned long len)
+{
+       unsigned long base;
+
+       base = boot_params.screen_info.lfb_base;
+
+       if (efi_fb)
+               return (efi_fb + start);
+       else
+               return early_ioremap(base + start, len);
+}
 
-       dst = early_ioremap(base + y*len, len);
+static __init_refok void early_efi_unmap(void *addr, unsigned long len)
+{
+       if (!efi_fb)
+               early_iounmap(addr, len);
+}
+
+static void early_efi_clear_scanline(unsigned int y)
+{
+       unsigned long *dst;
+       u16 len;
+
+       len = boot_params.screen_info.lfb_linelength;
+       dst = early_efi_map(y*len, len);
        if (!dst)
                return;
 
        memset(dst, 0, len);
-       early_iounmap(dst, len);
+       early_efi_unmap(dst, len);
 }
 
-static __init void early_efi_scroll_up(void)
+static void early_efi_scroll_up(void)
 {
-       unsigned long base, *dst, *src;
+       unsigned long *dst, *src;
        u16 len;
        u32 i, height;
 
-       base = boot_params.screen_info.lfb_base;
        len = boot_params.screen_info.lfb_linelength;
        height = boot_params.screen_info.lfb_height;
 
        for (i = 0; i < height - font->height; i++) {
-               dst = early_ioremap(base + i*len, len);
+               dst = early_efi_map(i*len, len);
                if (!dst)
                        return;
 
-               src = early_ioremap(base + (i + font->height) * len, len);
+               src = early_efi_map((i + font->height) * len, len);
                if (!src) {
-                       early_iounmap(dst, len);
+                       early_efi_unmap(dst, len);
                        return;
                }
 
                memmove(dst, src, len);
 
-               early_iounmap(src, len);
-               early_iounmap(dst, len);
+               early_efi_unmap(src, len);
+               early_efi_unmap(dst, len);
        }
 }
 
@@ -79,16 +123,14 @@ static void early_efi_write_char(u32 *dst, unsigned char c, unsigned int h)
        }
 }
 
-static __init void
+static void
 early_efi_write(struct console *con, const char *str, unsigned int num)
 {
        struct screen_info *si;
-       unsigned long base;
        unsigned int len;
        const char *s;
        void *dst;
 
-       base = boot_params.screen_info.lfb_base;
        si = &boot_params.screen_info;
        len = si->lfb_linelength;
 
@@ -109,7 +151,7 @@ early_efi_write(struct console *con, const char *str, unsigned int num)
                for (h = 0; h < font->height; h++) {
                        unsigned int n, x;
 
-                       dst = early_ioremap(base + (efi_y + h) * len, len);
+                       dst = early_efi_map((efi_y + h) * len, len);
                        if (!dst)
                                return;
 
@@ -123,7 +165,7 @@ early_efi_write(struct console *con, const char *str, unsigned int num)
                                s++;
                        }
 
-                       early_iounmap(dst, len);
+                       early_efi_unmap(dst, len);
                }
 
                num -= count;
@@ -179,6 +221,9 @@ static __init int early_efi_setup(struct console *con, char *options)
        for (i = 0; i < (yres - efi_y) / font->height; i++)
                early_efi_scroll_up();
 
+       /* early_console_register will unset CON_BOOT in case ,keep */
+       if (!(con->flags & CON_BOOT))
+               early_efi_keep = true;
        return 0;
 }
 
index ff0174dda810fd026bfde830747d52f48ffbdb0d..a9acde72d4ed9844b62c0ce87a5d441f21004ced 100644 (file)
@@ -75,7 +75,7 @@ static int xo1_power_state_enter(suspend_state_t pm_state)
        return 0;
 }
 
-asmlinkage int xo1_do_sleep(u8 sleep_state)
+asmlinkage __visible int xo1_do_sleep(u8 sleep_state)
 {
        void *pgd_addr = __va(read_cr3());
 
index 304fca20d96ee3e1540d7045a9e0e25118c240b0..35e2bb6c0f372d1ff6f91a6078efa339b25efd78 100644 (file)
@@ -23,7 +23,7 @@
 extern __visible const void __nosave_begin, __nosave_end;
 
 /* Defined in hibernate_asm_64.S */
-extern asmlinkage int restore_image(void);
+extern asmlinkage __visible int restore_image(void);
 
 /*
  * Address to jump to in the last phase of restore in order to get to the image
index 2e263f367b139c30da34b9e346007e206132955e..9df017ab2285a015a111cc384a0f28d6e488bbf8 100644 (file)
@@ -9,12 +9,9 @@ SECTIONS
 #ifdef BUILD_VDSO32
 #include <asm/vdso32.h>
 
-       .hpet_sect : {
-               hpet_page = . - VDSO_OFFSET(VDSO_HPET_PAGE);
-       } :text :hpet_sect
+       hpet_page = . - VDSO_OFFSET(VDSO_HPET_PAGE);
 
-       .vvar_sect : {
-               vvar = . - VDSO_OFFSET(VDSO_VVAR_PAGE);
+       vvar = . - VDSO_OFFSET(VDSO_VVAR_PAGE);
 
        /* Place all vvars at the offsets in asm/vvar.h. */
 #define EMIT_VVAR(name, offset) vvar_ ## name = vvar + offset;
@@ -22,7 +19,6 @@ SECTIONS
 #include <asm/vvar.h>
 #undef __VVAR_KERNEL_LDS
 #undef EMIT_VVAR
-       } :text :vvar_sect
 #endif
        . = SIZEOF_HEADERS;
 
@@ -61,7 +57,12 @@ SECTIONS
         */
        . = ALIGN(0x100);
 
-       .text           : { *(.text*) }                 :text   =0x90909090
+       .text           : { *(.text*) }                 :text   =0x90909090,
+
+       /*
+        * The comma above works around a bug in gold:
+        * https://sourceware.org/bugzilla/show_bug.cgi?id=16804
+        */
 
        /DISCARD/ : {
                *(.discard)
@@ -84,8 +85,4 @@ PHDRS
        dynamic         PT_DYNAMIC      FLAGS(4);               /* PF_R */
        note            PT_NOTE         FLAGS(4);               /* PF_R */
        eh_frame_hdr    PT_GNU_EH_FRAME;
-#ifdef BUILD_VDSO32
-       vvar_sect       PT_NULL         FLAGS(4);               /* PF_R */
-       hpet_sect       PT_NULL         FLAGS(4);               /* PF_R */
-#endif
 }
index 00348980a3a64a49180be23bda3517d314c6bf81..e1f220e3ca6899af1d542ce7d22903e961754c7c 100644 (file)
@@ -39,6 +39,7 @@
 #ifdef CONFIG_X86_64
 #define vdso_enabled                   sysctl_vsyscall32
 #define arch_setup_additional_pages    syscall32_setup_pages
+extern int sysctl_ldt16;
 #endif
 
 /*
@@ -249,6 +250,13 @@ static struct ctl_table abi_table2[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
+       {
+               .procname       = "ldt16",
+               .data           = &sysctl_ldt16,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
        {}
 };
 
index 201d09a7c46bbae56a21d15e56e222d67163f5ff..c34bfc4bbe7faeaa9fac33d23331688ecdd85078 100644 (file)
@@ -1515,7 +1515,7 @@ static void __init xen_pvh_early_guest_init(void)
 }
 
 /* First C function to be called on Xen boot */
-asmlinkage void __init xen_start_kernel(void)
+asmlinkage __visible void __init xen_start_kernel(void)
 {
        struct physdev_set_iopl set_iopl;
        int rc;
index 08f763de26fe4132d7e6dcf0a7b50a660af76319..a1207cb6472a90ce9e57deeb1bd63eb8ebd74c4d 100644 (file)
@@ -23,7 +23,7 @@ void xen_force_evtchn_callback(void)
        (void)HYPERVISOR_xen_version(0, NULL);
 }
 
-asmlinkage unsigned long xen_save_fl(void)
+asmlinkage __visible unsigned long xen_save_fl(void)
 {
        struct vcpu_info *vcpu;
        unsigned long flags;
@@ -63,7 +63,7 @@ __visible void xen_restore_fl(unsigned long flags)
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
 
-asmlinkage void xen_irq_disable(void)
+asmlinkage __visible void xen_irq_disable(void)
 {
        /* There's a one instruction preempt window here.  We need to
           make sure we're don't switch CPUs between getting the vcpu
@@ -74,7 +74,7 @@ asmlinkage void xen_irq_disable(void)
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
 
-asmlinkage void xen_irq_enable(void)
+asmlinkage __visible void xen_irq_enable(void)
 {
        struct vcpu_info *vcpu;
 
index 02d6d29a63c13716168c0a68b4bdfe55c71ef9b0..3a617af60d465196bb894cebdc4042ccd4e4a92f 100644 (file)
@@ -14,6 +14,7 @@ config XTENSA
        select GENERIC_PCI_IOMAP
        select ARCH_WANT_IPC_PARSE_VERSION
        select ARCH_WANT_OPTIONAL_GPIOLIB
+       select BUILDTIME_EXTABLE_SORT
        select CLONE_BACKWARDS
        select IRQ_DOMAIN
        select HAVE_OPROFILE
@@ -189,6 +190,24 @@ config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
 
          If in doubt, say Y.
 
+config HIGHMEM
+       bool "High Memory Support"
+       help
+         Linux can use the full amount of RAM in the system by
+         default. However, the default MMUv2 setup only maps the
+         lowermost 128 MB of memory linearly to the areas starting
+         at 0xd0000000 (cached) and 0xd8000000 (uncached).
+         When there are more than 128 MB memory in the system not
+         all of it can be "permanently mapped" by the kernel.
+         The physical memory that's not permanently mapped is called
+         "high memory".
+
+         If you are compiling a kernel which will never run on a
+         machine with more than 128 MB total physical RAM, answer
+         N here.
+
+         If unsure, say Y.
+
 endmenu
 
 config XTENSA_CALIBRATE_CCOUNT
@@ -224,7 +243,6 @@ choice
 
 config XTENSA_PLATFORM_ISS
        bool "ISS"
-       depends on TTY
        select XTENSA_CALIBRATE_CCOUNT
        select SERIAL_CONSOLE
        help
diff --git a/arch/xtensa/boot/dts/kc705.dts b/arch/xtensa/boot/dts/kc705.dts
new file mode 100644 (file)
index 0000000..742a347
--- /dev/null
@@ -0,0 +1,11 @@
+/dts-v1/;
+/include/ "xtfpga.dtsi"
+/include/ "xtfpga-flash-128m.dtsi"
+
+/ {
+       compatible = "cdns,xtensa-kc705";
+       memory@0 {
+               device_type = "memory";
+               reg = <0x00000000 0x08000000>;
+       };
+};
diff --git a/arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi b/arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi
new file mode 100644 (file)
index 0000000..d3a88e0
--- /dev/null
@@ -0,0 +1,28 @@
+/ {
+       soc {
+               flash: flash@00000000 {
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       compatible = "cfi-flash";
+                       reg = <0x00000000 0x08000000>;
+                       bank-width = <2>;
+                       device-width = <2>;
+                       partition@0x0 {
+                               label = "data";
+                               reg = <0x00000000 0x06000000>;
+                       };
+                       partition@0x6000000 {
+                               label = "boot loader area";
+                               reg = <0x06000000 0x00800000>;
+                       };
+                       partition@0x6800000 {
+                               label = "kernel image";
+                               reg = <0x06800000 0x017e0000>;
+                       };
+                       partition@0x7fe0000 {
+                               label = "boot environment";
+                               reg = <0x07fe0000 0x00020000>;
+                       };
+               };
+        };
+};
index e5703c7beeb6dad04d2006929984b0fd8a7c3289..1d97203c18e7f787b5696b4468e4901c9b88fcbb 100644 (file)
@@ -1,26 +1,28 @@
 / {
-       flash: flash@f8000000 {
-               #address-cells = <1>;
-               #size-cells = <1>;
-               compatible = "cfi-flash";
-               reg = <0xf8000000 0x01000000>;
-               bank-width = <2>;
-               device-width = <2>;
-               partition@0x0 {
-                       label = "boot loader area";
-                       reg = <0x00000000 0x00400000>;
+       soc {
+               flash: flash@08000000 {
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       compatible = "cfi-flash";
+                       reg = <0x08000000 0x01000000>;
+                       bank-width = <2>;
+                       device-width = <2>;
+                       partition@0x0 {
+                               label = "boot loader area";
+                               reg = <0x00000000 0x00400000>;
+                       };
+                       partition@0x400000 {
+                               label = "kernel image";
+                               reg = <0x00400000 0x00600000>;
+                       };
+                       partition@0xa00000 {
+                               label = "data";
+                               reg = <0x00a00000 0x005e0000>;
+                       };
+                       partition@0xfe0000 {
+                               label = "boot environment";
+                               reg = <0x00fe0000 0x00020000>;
+                       };
                };
-               partition@0x400000 {
-                       label = "kernel image";
-                       reg = <0x00400000 0x00600000>;
-               };
-               partition@0xa00000 {
-                       label = "data";
-                       reg = <0x00a00000 0x005e0000>;
-               };
-               partition@0xfe0000 {
-                       label = "boot environment";
-                       reg = <0x00fe0000 0x00020000>;
-               };
-        };
+       };
 };
index 6f9c10d6b689a9696d17296025a21a1167322fbc..d1c621ca8be10cba5565fa9ce1e7b82cabaef973 100644 (file)
@@ -1,18 +1,20 @@
 / {
-       flash: flash@f8000000 {
-               #address-cells = <1>;
-               #size-cells = <1>;
-               compatible = "cfi-flash";
-               reg = <0xf8000000 0x00400000>;
-               bank-width = <2>;
-               device-width = <2>;
-               partition@0x0 {
-                       label = "boot loader area";
-                       reg = <0x00000000 0x003f0000>;
+       soc {
+               flash: flash@08000000 {
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       compatible = "cfi-flash";
+                       reg = <0x08000000 0x00400000>;
+                       bank-width = <2>;
+                       device-width = <2>;
+                       partition@0x0 {
+                               label = "boot loader area";
+                               reg = <0x00000000 0x003f0000>;
+                       };
+                       partition@0x3f0000 {
+                               label = "boot environment";
+                               reg = <0x003f0000 0x00010000>;
+                       };
                };
-               partition@0x3f0000 {
-                       label = "boot environment";
-                       reg = <0x003f0000 0x00010000>;
-               };
-        };
+       };
 };
index e7370b11348e8d06c113d420704664740dbdec9b..dec9178840f695f0bcdd1de3cd5b17339fce8627 100644 (file)
                };
        };
 
-       serial0: serial@fd050020 {
-               device_type = "serial";
-               compatible = "ns16550a";
-               no-loopback-test;
-               reg = <0xfd050020 0x20>;
-               reg-shift = <2>;
-               interrupts = <0 1>; /* external irq 0 */
-               clocks = <&osc>;
-       };
+       soc {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               compatible = "simple-bus";
+               ranges = <0x00000000 0xf0000000 0x10000000>;
 
-       enet0: ethoc@fd030000 {
-               compatible = "opencores,ethoc";
-               reg = <0xfd030000 0x4000 0xfd800000 0x4000>;
-               interrupts = <1 1>; /* external irq 1 */
-               local-mac-address = [00 50 c2 13 6f 00];
-               clocks = <&osc>;
+               serial0: serial@0d050020 {
+                       device_type = "serial";
+                       compatible = "ns16550a";
+                       no-loopback-test;
+                       reg = <0x0d050020 0x20>;
+                       reg-shift = <2>;
+                       interrupts = <0 1>; /* external irq 0 */
+                       clocks = <&osc>;
+               };
+
+               enet0: ethoc@0d030000 {
+                       compatible = "opencores,ethoc";
+                       reg = <0x0d030000 0x4000 0x0d800000 0x4000>;
+                       interrupts = <1 1>; /* external irq 1 */
+                       local-mac-address = [00 50 c2 13 6f 00];
+                       clocks = <&osc>;
+               };
        };
 };
index 23392c5630ce9939b04bae3ff6de4fd67417852a..892aab399ac873c885953e24430a3bc741aca6ed 100644 (file)
@@ -37,23 +37,14 @@ typedef struct bp_tag {
        unsigned long data[0];  /* data */
 } bp_tag_t;
 
-typedef struct meminfo {
+struct bp_meminfo {
        unsigned long type;
        unsigned long start;
        unsigned long end;
-} meminfo_t;
-
-#define SYSMEM_BANKS_MAX 5
+};
 
 #define MEMORY_TYPE_CONVENTIONAL       0x1000
 #define MEMORY_TYPE_NONE               0x2000
 
-typedef struct sysmem_info {
-       int nr_banks;
-       meminfo_t bank[SYSMEM_BANKS_MAX];
-} sysmem_info_t;
-
-extern sysmem_info_t sysmem;
-
 #endif
 #endif
diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h
new file mode 100644 (file)
index 0000000..9f6c33d
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ *
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ */
+
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
+#include <asm/pgtable.h>
+#ifdef CONFIG_HIGHMEM
+#include <linux/threads.h>
+#include <asm/kmap_types.h>
+#endif
+
+/*
+ * Here we define all the compile-time 'special' virtual
+ * addresses. The point is to have a constant address at
+ * compile time, but to set the physical address only
+ * in the boot process. We allocate these special  addresses
+ * from the end of the consistent memory region backwards.
+ * Also this lets us do fail-safe vmalloc(), we
+ * can guarantee that these special addresses and
+ * vmalloc()-ed addresses never overlap.
+ *
+ * these 'compile-time allocated' memory buffers are
+ * fixed-size 4k pages. (or larger if used with an increment
+ * higher than 1) use fixmap_set(idx,phys) to associate
+ * physical memory with fixmap indices.
+ */
+enum fixed_addresses {
+#ifdef CONFIG_HIGHMEM
+       /* reserved pte's for temporary kernel mappings */
+       FIX_KMAP_BEGIN,
+       FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
+#endif
+       __end_of_fixed_addresses
+};
+
+#define FIXADDR_TOP     (VMALLOC_START - PAGE_SIZE)
+#define FIXADDR_SIZE   (__end_of_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START  ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK)
+
+#include <asm-generic/fixmap.h>
+
+#define kmap_get_fixmap_pte(vaddr) \
+       pte_offset_kernel( \
+               pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), \
+               (vaddr) \
+       )
+
+#endif
index 80be15124697d8e85a23859a466e97f178936010..2653ef5d55f1c9ed92d35d50f91136732334ec1b 100644 (file)
@@ -6,11 +6,54 @@
  * this archive for more details.
  *
  * Copyright (C) 2003 - 2005 Tensilica Inc.
+ * Copyright (C) 2014 Cadence Design Systems Inc.
  */
 
 #ifndef _XTENSA_HIGHMEM_H
 #define _XTENSA_HIGHMEM_H
 
-extern void flush_cache_kmaps(void);
+#include <asm/cacheflush.h>
+#include <asm/fixmap.h>
+#include <asm/kmap_types.h>
+#include <asm/pgtable.h>
+
+#define PKMAP_BASE             (FIXADDR_START - PMD_SIZE)
+#define LAST_PKMAP             PTRS_PER_PTE
+#define LAST_PKMAP_MASK                (LAST_PKMAP - 1)
+#define PKMAP_NR(virt)         (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
+#define PKMAP_ADDR(nr)         (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+
+#define kmap_prot              PAGE_KERNEL
+
+extern pte_t *pkmap_page_table;
+
+void *kmap_high(struct page *page);
+void kunmap_high(struct page *page);
+
+static inline void *kmap(struct page *page)
+{
+       BUG_ON(in_interrupt());
+       if (!PageHighMem(page))
+               return page_address(page);
+       return kmap_high(page);
+}
+
+static inline void kunmap(struct page *page)
+{
+       BUG_ON(in_interrupt());
+       if (!PageHighMem(page))
+               return;
+       kunmap_high(page);
+}
+
+static inline void flush_cache_kmaps(void)
+{
+       flush_cache_all();
+}
+
+void *kmap_atomic(struct page *page);
+void __kunmap_atomic(void *kvaddr);
+
+void kmap_init(void);
 
 #endif
index 216446295ada686ccb4b454319b2b45e85d96720..4b0ca35a93b1a731bf0ce2c1db32f9fabb890fef 100644 (file)
@@ -310,6 +310,10 @@ set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval)
        update_pte(ptep, pteval);
 }
 
+static inline void set_pte(pte_t *ptep, pte_t pteval)
+{
+       update_pte(ptep, pteval);
+}
 
 static inline void
 set_pmd(pmd_t *pmdp, pmd_t pmdval)
diff --git a/arch/xtensa/include/asm/sysmem.h b/arch/xtensa/include/asm/sysmem.h
new file mode 100644 (file)
index 0000000..c015c5c
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * sysmem-related prototypes.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2014 Cadence Design Systems Inc.
+ */
+
+#ifndef _XTENSA_SYSMEM_H
+#define _XTENSA_SYSMEM_H
+
+#define SYSMEM_BANKS_MAX 31
+
+struct meminfo {
+       unsigned long start;
+       unsigned long end;
+};
+
+/*
+ * Bank array is sorted by .start.
+ * Banks don't overlap and there's at least one page gap
+ * between adjacent bank entries.
+ */
+struct sysmem_info {
+       int nr_banks;
+       struct meminfo bank[SYSMEM_BANKS_MAX];
+};
+
+extern struct sysmem_info sysmem;
+
+int add_sysmem_bank(unsigned long start, unsigned long end);
+int mem_reserve(unsigned long, unsigned long, int);
+void bootmem_init(void);
+void zones_init(void);
+
+#endif /* _XTENSA_SYSMEM_H */
index fc34274ce41bc81b3ddaa167fe887179f04c8ad7..06875feb27c28ebb870820706dc286cd9740f1ce 100644 (file)
@@ -36,6 +36,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma,
                unsigned long page);
 void local_flush_tlb_range(struct vm_area_struct *vma,
                unsigned long start, unsigned long end);
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
 
 #ifdef CONFIG_SMP
 
@@ -44,12 +45,7 @@ void flush_tlb_mm(struct mm_struct *);
 void flush_tlb_page(struct vm_area_struct *, unsigned long);
 void flush_tlb_range(struct vm_area_struct *, unsigned long,
                unsigned long);
-
-static inline void flush_tlb_kernel_range(unsigned long start,
-               unsigned long end)
-{
-       flush_tlb_all();
-}
+void flush_tlb_kernel_range(unsigned long start, unsigned long end);
 
 #else /* !CONFIG_SMP */
 
@@ -58,7 +54,8 @@ static inline void flush_tlb_kernel_range(unsigned long start,
 #define flush_tlb_page(vma, page)         local_flush_tlb_page(vma, page)
 #define flush_tlb_range(vma, vmaddr, end)  local_flush_tlb_range(vma, vmaddr, \
                                                                 end)
-#define flush_tlb_kernel_range(start, end) local_flush_tlb_all()
+#define flush_tlb_kernel_range(start, end) local_flush_tlb_kernel_range(start, \
+                                                                       end)
 
 #endif /* CONFIG_SMP */
 
index 84fe931bb60e1f012417d202d002813b12cf68aa..9757bb74e53296f66372dd08506d94163e8801cc 100644 (file)
@@ -50,6 +50,7 @@
 #include <asm/param.h>
 #include <asm/traps.h>
 #include <asm/smp.h>
+#include <asm/sysmem.h>
 
 #include <platform/hardware.h>
 
@@ -88,12 +89,6 @@ static char __initdata command_line[COMMAND_LINE_SIZE];
 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
 #endif
 
-sysmem_info_t __initdata sysmem;
-
-extern int mem_reserve(unsigned long, unsigned long, int);
-extern void bootmem_init(void);
-extern void zones_init(void);
-
 /*
  * Boot parameter parsing.
  *
@@ -113,31 +108,14 @@ typedef struct tagtable {
 
 /* parse current tag */
 
-static int __init add_sysmem_bank(unsigned long type, unsigned long start,
-               unsigned long end)
-{
-       if (sysmem.nr_banks >= SYSMEM_BANKS_MAX) {
-               printk(KERN_WARNING
-                               "Ignoring memory bank 0x%08lx size %ldKB\n",
-                               start, end - start);
-               return -EINVAL;
-       }
-       sysmem.bank[sysmem.nr_banks].type  = type;
-       sysmem.bank[sysmem.nr_banks].start = PAGE_ALIGN(start);
-       sysmem.bank[sysmem.nr_banks].end   = end & PAGE_MASK;
-       sysmem.nr_banks++;
-
-       return 0;
-}
-
 static int __init parse_tag_mem(const bp_tag_t *tag)
 {
-       meminfo_t *mi = (meminfo_t *)(tag->data);
+       struct bp_meminfo *mi = (struct bp_meminfo *)(tag->data);
 
        if (mi->type != MEMORY_TYPE_CONVENTIONAL)
                return -1;
 
-       return add_sysmem_bank(mi->type, mi->start, mi->end);
+       return add_sysmem_bank(mi->start, mi->end);
 }
 
 __tagtable(BP_TAG_MEMORY, parse_tag_mem);
@@ -146,8 +124,8 @@ __tagtable(BP_TAG_MEMORY, parse_tag_mem);
 
 static int __init parse_tag_initrd(const bp_tag_t* tag)
 {
-       meminfo_t* mi;
-       mi = (meminfo_t*)(tag->data);
+       struct bp_meminfo *mi = (struct bp_meminfo *)(tag->data);
+
        initrd_start = (unsigned long)__va(mi->start);
        initrd_end = (unsigned long)__va(mi->end);
 
@@ -255,7 +233,7 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
                return;
 
        size &= PAGE_MASK;
-       add_sysmem_bank(MEMORY_TYPE_CONVENTIONAL, base, base + size);
+       add_sysmem_bank(base, base + size);
 }
 
 void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
@@ -292,8 +270,6 @@ device_initcall(xtensa_device_probe);
 
 void __init init_arch(bp_tag_t *bp_start)
 {
-       sysmem.nr_banks = 0;
-
        /* Parse boot parameters */
 
        if (bp_start)
@@ -304,10 +280,9 @@ void __init init_arch(bp_tag_t *bp_start)
 #endif
 
        if (sysmem.nr_banks == 0) {
-               sysmem.nr_banks = 1;
-               sysmem.bank[0].start = PLATFORM_DEFAULT_MEM_START;
-               sysmem.bank[0].end = PLATFORM_DEFAULT_MEM_START
-                                    + PLATFORM_DEFAULT_MEM_SIZE;
+               add_sysmem_bank(PLATFORM_DEFAULT_MEM_START,
+                               PLATFORM_DEFAULT_MEM_START +
+                               PLATFORM_DEFAULT_MEM_SIZE);
        }
 
 #ifdef CONFIG_CMDLINE_BOOL
@@ -487,7 +462,7 @@ void __init setup_arch(char **cmdline_p)
 #ifdef CONFIG_BLK_DEV_INITRD
        if (initrd_start < initrd_end) {
                initrd_is_mapped = mem_reserve(__pa(initrd_start),
-                                              __pa(initrd_end), 0);
+                                              __pa(initrd_end), 0) == 0;
                initrd_below_start_ok = 1;
        } else {
                initrd_start = 0;
@@ -532,6 +507,7 @@ void __init setup_arch(char **cmdline_p)
                    __pa(&_Level6InterruptVector_text_end), 0);
 #endif
 
+       parse_early_param();
        bootmem_init();
 
        unflatten_and_copy_device_tree();
index aa8bd8717927185bd5b422316885ddaa98d889f7..40b5a3771fb063fb02ffaa7fe07a426a3a684677 100644 (file)
@@ -496,6 +496,21 @@ void flush_tlb_range(struct vm_area_struct *vma,
        on_each_cpu(ipi_flush_tlb_range, &fd, 1);
 }
 
+static void ipi_flush_tlb_kernel_range(void *arg)
+{
+       struct flush_data *fd = arg;
+       local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+       struct flush_data fd = {
+               .addr1 = start,
+               .addr2 = end,
+       };
+       on_each_cpu(ipi_flush_tlb_kernel_range, &fd, 1);
+}
+
 /* Cache flush functions */
 
 static void ipi_flush_cache_all(void *arg)
index 80b33ed51f31174fd41a53bebd957c140517d8f9..4d2872fd9bb5ebf89bb15127841e5ae28e8d9b58 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/in6.h>
 
 #include <asm/uaccess.h>
+#include <asm/cacheflush.h>
 #include <asm/checksum.h>
 #include <asm/dma.h>
 #include <asm/io.h>
@@ -105,6 +106,7 @@ EXPORT_SYMBOL(csum_partial_copy_generic);
  * Architecture-specific symbols
  */
 EXPORT_SYMBOL(__xtensa_copy_user);
+EXPORT_SYMBOL(__invalidate_icache_range);
 
 /*
  * Kernel hacking ...
@@ -127,3 +129,8 @@ EXPORT_SYMBOL(common_exception_return);
 #ifdef CONFIG_FUNCTION_TRACER
 EXPORT_SYMBOL(_mcount);
 #endif
+
+EXPORT_SYMBOL(__invalidate_dcache_range);
+#if XCHAL_DCACHE_IS_WRITEBACK
+EXPORT_SYMBOL(__flush_dcache_range);
+#endif
index f0b646d2f843feb5945601dbe65966a4a71cfec9..f54f78e24d7b5e72733f23da806272a4cba6b882 100644 (file)
@@ -4,3 +4,4 @@
 
 obj-y                  := init.o cache.o misc.o
 obj-$(CONFIG_MMU)      += fault.o mmu.o tlb.o
+obj-$(CONFIG_HIGHMEM)  += highmem.o
index ba4c47f291b17843047a410549b09cb59ba52967..63cbb867dadd64d8907176f1bd60420f8a41217a 100644 (file)
  *
  */
 
+#if (DCACHE_WAY_SIZE > PAGE_SIZE) && defined(CONFIG_HIGHMEM)
+#error "HIGHMEM is not supported on cores with aliasing cache."
+#endif
+
 #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
 
 /*
@@ -179,10 +183,11 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
 #else
        if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)
            && (vma->vm_flags & VM_EXEC) != 0) {
-               unsigned long paddr = (unsigned long) page_address(page);
+               unsigned long paddr = (unsigned long)kmap_atomic(page);
                __flush_dcache_page(paddr);
                __invalidate_icache_page(paddr);
                set_bit(PG_arch_1, &page->flags);
+               kunmap_atomic((void *)paddr);
        }
 #endif
 }
diff --git a/arch/xtensa/mm/highmem.c b/arch/xtensa/mm/highmem.c
new file mode 100644 (file)
index 0000000..17a8c0d
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ * High memory support for Xtensa architecture
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License.  See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2014 Cadence Design Systems Inc.
+ */
+
+#include <linux/export.h>
+#include <linux/highmem.h>
+#include <asm/tlbflush.h>
+
+static pte_t *kmap_pte;
+
+void *kmap_atomic(struct page *page)
+{
+       enum fixed_addresses idx;
+       unsigned long vaddr;
+       int type;
+
+       pagefault_disable();
+       if (!PageHighMem(page))
+               return page_address(page);
+
+       type = kmap_atomic_idx_push();
+       idx = type + KM_TYPE_NR * smp_processor_id();
+       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+#ifdef CONFIG_DEBUG_HIGHMEM
+       BUG_ON(!pte_none(*(kmap_pte - idx)));
+#endif
+       set_pte(kmap_pte - idx, mk_pte(page, PAGE_KERNEL_EXEC));
+
+       return (void *)vaddr;
+}
+EXPORT_SYMBOL(kmap_atomic);
+
+void __kunmap_atomic(void *kvaddr)
+{
+       int idx, type;
+
+       if (kvaddr >= (void *)FIXADDR_START &&
+           kvaddr < (void *)FIXADDR_TOP) {
+               type = kmap_atomic_idx();
+               idx = type + KM_TYPE_NR * smp_processor_id();
+
+               /*
+                * Force other mappings to Oops if they'll try to access this
+                * pte without first remap it.  Keeping stale mappings around
+                * is a bad idea also, in case the page changes cacheability
+                * attributes or becomes a protected page in a hypervisor.
+                */
+               pte_clear(&init_mm, kvaddr, kmap_pte - idx);
+               local_flush_tlb_kernel_range((unsigned long)kvaddr,
+                                            (unsigned long)kvaddr + PAGE_SIZE);
+
+               kmap_atomic_idx_pop();
+       }
+
+       pagefault_enable();
+}
+EXPORT_SYMBOL(__kunmap_atomic);
+
+void __init kmap_init(void)
+{
+       unsigned long kmap_vstart;
+
+       /* cache the first kmap pte */
+       kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
+       kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
+}
index aff108df92d3a301e8ba0ccaf7e13a5fb26c9038..4224256bb215f17c52d91662f186ecb250dee361 100644 (file)
@@ -8,6 +8,7 @@
  * for more details.
  *
  * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2014 Cadence Design Systems Inc.
  *
  * Chris Zankel        <chris@zankel.net>
  * Joe Taylor  <joe@tensilica.com, joetylr@yahoo.com>
@@ -19,6 +20,7 @@
 #include <linux/errno.h>
 #include <linux/bootmem.h>
 #include <linux/gfp.h>
+#include <linux/highmem.h>
 #include <linux/swap.h>
 #include <linux/mman.h>
 #include <linux/nodemask.h>
 #include <asm/bootparam.h>
 #include <asm/page.h>
 #include <asm/sections.h>
+#include <asm/sysmem.h>
+
+struct sysmem_info sysmem __initdata;
+
+static void __init sysmem_dump(void)
+{
+       unsigned i;
+
+       pr_debug("Sysmem:\n");
+       for (i = 0; i < sysmem.nr_banks; ++i)
+               pr_debug("  0x%08lx - 0x%08lx (%ldK)\n",
+                        sysmem.bank[i].start, sysmem.bank[i].end,
+                        (sysmem.bank[i].end - sysmem.bank[i].start) >> 10);
+}
+
+/*
+ * Find bank with maximal .start such that bank.start <= start
+ */
+static inline struct meminfo * __init find_bank(unsigned long start)
+{
+       unsigned i;
+       struct meminfo *it = NULL;
+
+       for (i = 0; i < sysmem.nr_banks; ++i)
+               if (sysmem.bank[i].start <= start)
+                       it = sysmem.bank + i;
+               else
+                       break;
+       return it;
+}
+
+/*
+ * Move all memory banks starting at 'from' to a new place at 'to',
+ * adjust nr_banks accordingly.
+ * Both 'from' and 'to' must be inside the sysmem.bank.
+ *
+ * Returns: 0 (success), -ENOMEM (not enough space in the sysmem.bank).
+ */
+static int __init move_banks(struct meminfo *to, struct meminfo *from)
+{
+       unsigned n = sysmem.nr_banks - (from - sysmem.bank);
+
+       if (to > from && to - from + sysmem.nr_banks > SYSMEM_BANKS_MAX)
+               return -ENOMEM;
+       if (to != from)
+               memmove(to, from, n * sizeof(struct meminfo));
+       sysmem.nr_banks += to - from;
+       return 0;
+}
+
+/*
+ * Add new bank to sysmem. Resulting sysmem is the union of bytes of the
+ * original sysmem and the new bank.
+ *
+ * Returns: 0 (success), < 0 (error)
+ */
+int __init add_sysmem_bank(unsigned long start, unsigned long end)
+{
+       unsigned i;
+       struct meminfo *it = NULL;
+       unsigned long sz;
+       unsigned long bank_sz = 0;
+
+       if (start == end ||
+           (start < end) != (PAGE_ALIGN(start) < (end & PAGE_MASK))) {
+               pr_warn("Ignoring small memory bank 0x%08lx size: %ld bytes\n",
+                       start, end - start);
+               return -EINVAL;
+       }
+
+       start = PAGE_ALIGN(start);
+       end &= PAGE_MASK;
+       sz = end - start;
+
+       it = find_bank(start);
+
+       if (it)
+               bank_sz = it->end - it->start;
+
+       if (it && bank_sz >= start - it->start) {
+               if (end - it->start > bank_sz)
+                       it->end = end;
+               else
+                       return 0;
+       } else {
+               if (!it)
+                       it = sysmem.bank;
+               else
+                       ++it;
+
+               if (it - sysmem.bank < sysmem.nr_banks &&
+                   it->start - start <= sz) {
+                       it->start = start;
+                       if (it->end - it->start < sz)
+                               it->end = end;
+                       else
+                               return 0;
+               } else {
+                       if (move_banks(it + 1, it) < 0) {
+                               pr_warn("Ignoring memory bank 0x%08lx size %ld bytes\n",
+                                       start, end - start);
+                               return -EINVAL;
+                       }
+                       it->start = start;
+                       it->end = end;
+                       return 0;
+               }
+       }
+       sz = it->end - it->start;
+       for (i = it + 1 - sysmem.bank; i < sysmem.nr_banks; ++i)
+               if (sysmem.bank[i].start - it->start <= sz) {
+                       if (sz < sysmem.bank[i].end - it->start)
+                               it->end = sysmem.bank[i].end;
+               } else {
+                       break;
+               }
+
+       move_banks(it + 1, sysmem.bank + i);
+       return 0;
+}
 
 /*
  * mem_reserve(start, end, must_exist)
  *
  * Reserve some memory from the memory pool.
+ * If must_exist is set and a part of the region being reserved does not exist
+ * memory map is not altered.
  *
  * Parameters:
  *  start      Start of region,
  *  must_exist Must exist in memory pool.
  *
  * Returns:
- *  0 (memory area couldn't be mapped)
- * -1 (success)
+ *  0 (success)
+ *  < 0 (error)
  */
 
 int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
 {
-       int i;
-
-       if (start == end)
-               return 0;
+       struct meminfo *it;
+       struct meminfo *rm = NULL;
+       unsigned long sz;
+       unsigned long bank_sz = 0;
 
        start = start & PAGE_MASK;
        end = PAGE_ALIGN(end);
+       sz = end - start;
+       if (!sz)
+               return -EINVAL;
 
-       for (i = 0; i < sysmem.nr_banks; i++)
-               if (start < sysmem.bank[i].end
-                   && end >= sysmem.bank[i].start)
-                       break;
+       it = find_bank(start);
+
+       if (it)
+               bank_sz = it->end - it->start;
 
-       if (i == sysmem.nr_banks) {
-               if (must_exist)
-                       printk (KERN_WARNING "mem_reserve: [0x%0lx, 0x%0lx) "
-                               "not in any region!\n", start, end);
-               return 0;
+       if ((!it || end - it->start > bank_sz) && must_exist) {
+               pr_warn("mem_reserve: [0x%0lx, 0x%0lx) not in any region!\n",
+                       start, end);
+               return -EINVAL;
        }
 
-       if (start > sysmem.bank[i].start) {
-               if (end < sysmem.bank[i].end) {
-                       /* split entry */
-                       if (sysmem.nr_banks >= SYSMEM_BANKS_MAX)
-                               panic("meminfo overflow\n");
-                       sysmem.bank[sysmem.nr_banks].start = end;
-                       sysmem.bank[sysmem.nr_banks].end = sysmem.bank[i].end;
-                       sysmem.nr_banks++;
+       if (it && start - it->start < bank_sz) {
+               if (start == it->start) {
+                       if (end - it->start < bank_sz) {
+                               it->start = end;
+                               return 0;
+                       } else {
+                               rm = it;
+                       }
+               } else {
+                       it->end = start;
+                       if (end - it->start < bank_sz)
+                               return add_sysmem_bank(end,
+                                                      it->start + bank_sz);
+                       ++it;
                }
-               sysmem.bank[i].end = start;
+       }
 
-       } else if (end < sysmem.bank[i].end) {
-               sysmem.bank[i].start = end;
+       if (!it)
+               it = sysmem.bank;
 
-       } else {
-               /* remove entry */
-               sysmem.nr_banks--;
-               sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start;
-               sysmem.bank[i].end   = sysmem.bank[sysmem.nr_banks].end;
+       for (; it < sysmem.bank + sysmem.nr_banks; ++it) {
+               if (it->end - start <= sz) {
+                       if (!rm)
+                               rm = it;
+               } else {
+                       if (it->start - start < sz)
+                               it->start = end;
+                       break;
+               }
        }
-       return -1;
+
+       if (rm)
+               move_banks(rm, it);
+
+       return 0;
 }
 
 
@@ -99,6 +239,7 @@ void __init bootmem_init(void)
        unsigned long bootmap_start, bootmap_size;
        int i;
 
+       sysmem_dump();
        max_low_pfn = max_pfn = 0;
        min_low_pfn = ~0;
 
@@ -156,19 +297,13 @@ void __init bootmem_init(void)
 
 void __init zones_init(void)
 {
-       unsigned long zones_size[MAX_NR_ZONES];
-       int i;
-
        /* All pages are DMA-able, so we put them all in the DMA zone. */
-
-       zones_size[ZONE_DMA] = max_low_pfn - ARCH_PFN_OFFSET;
-       for (i = 1; i < MAX_NR_ZONES; i++)
-               zones_size[i] = 0;
-
+       unsigned long zones_size[MAX_NR_ZONES] = {
+               [ZONE_DMA] = max_low_pfn - ARCH_PFN_OFFSET,
 #ifdef CONFIG_HIGHMEM
-       zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn;
+               [ZONE_HIGHMEM] = max_pfn - max_low_pfn,
 #endif
-
+       };
        free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL);
 }
 
@@ -178,16 +313,38 @@ void __init zones_init(void)
 
 void __init mem_init(void)
 {
-       max_mapnr = max_low_pfn - ARCH_PFN_OFFSET;
-       high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
-
 #ifdef CONFIG_HIGHMEM
-#error HIGHGMEM not implemented in init.c
+       unsigned long tmp;
+
+       reset_all_zones_managed_pages();
+       for (tmp = max_low_pfn; tmp < max_pfn; tmp++)
+               free_highmem_page(pfn_to_page(tmp));
 #endif
 
+       max_mapnr = max_pfn - ARCH_PFN_OFFSET;
+       high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);
+
        free_all_bootmem();
 
        mem_init_print_info(NULL);
+       pr_info("virtual kernel memory layout:\n"
+#ifdef CONFIG_HIGHMEM
+               "    pkmap   : 0x%08lx - 0x%08lx  (%5lu kB)\n"
+               "    fixmap  : 0x%08lx - 0x%08lx  (%5lu kB)\n"
+#endif
+               "    vmalloc : 0x%08x - 0x%08x  (%5u MB)\n"
+               "    lowmem  : 0x%08x - 0x%08lx  (%5lu MB)\n",
+#ifdef CONFIG_HIGHMEM
+               PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE,
+               (LAST_PKMAP*PAGE_SIZE) >> 10,
+               FIXADDR_START, FIXADDR_TOP,
+               (FIXADDR_TOP - FIXADDR_START) >> 10,
+#endif
+               VMALLOC_START, VMALLOC_END,
+               (VMALLOC_END - VMALLOC_START) >> 20,
+               PAGE_OFFSET, PAGE_OFFSET +
+               (max_low_pfn - min_low_pfn) * PAGE_SIZE,
+               ((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20);
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
@@ -204,3 +361,53 @@ void free_initmem(void)
 {
        free_initmem_default(-1);
 }
+
+static void __init parse_memmap_one(char *p)
+{
+       char *oldp;
+       unsigned long start_at, mem_size;
+
+       if (!p)
+               return;
+
+       oldp = p;
+       mem_size = memparse(p, &p);
+       if (p == oldp)
+               return;
+
+       switch (*p) {
+       case '@':
+               start_at = memparse(p + 1, &p);
+               add_sysmem_bank(start_at, start_at + mem_size);
+               break;
+
+       case '$':
+               start_at = memparse(p + 1, &p);
+               mem_reserve(start_at, start_at + mem_size, 0);
+               break;
+
+       case 0:
+               mem_reserve(mem_size, 0, 0);
+               break;
+
+       default:
+               pr_warn("Unrecognized memmap syntax: %s\n", p);
+               break;
+       }
+}
+
+static int __init parse_memmap_opt(char *str)
+{
+       while (str) {
+               char *k = strchr(str, ',');
+
+               if (k)
+                       *k++ = 0;
+
+               parse_memmap_one(str);
+               str = k;
+       }
+
+       return 0;
+}
+early_param("memmap", parse_memmap_opt);
index 861203e958da828deb140122752e95b47ddbf35f..3429b483d9f85cd2495e01c8c0a11d05bc22e16c 100644 (file)
@@ -3,6 +3,7 @@
  *
  * Extracted from init.c
  */
+#include <linux/bootmem.h>
 #include <linux/percpu.h>
 #include <linux/init.h>
 #include <linux/string.h>
 #include <asm/initialize_mmu.h>
 #include <asm/io.h>
 
+#if defined(CONFIG_HIGHMEM)
+static void * __init init_pmd(unsigned long vaddr)
+{
+       pgd_t *pgd = pgd_offset_k(vaddr);
+       pmd_t *pmd = pmd_offset(pgd, vaddr);
+
+       if (pmd_none(*pmd)) {
+               unsigned i;
+               pte_t *pte = alloc_bootmem_low_pages(PAGE_SIZE);
+
+               for (i = 0; i < 1024; i++)
+                       pte_clear(NULL, 0, pte + i);
+
+               set_pmd(pmd, __pmd(((unsigned long)pte) & PAGE_MASK));
+               BUG_ON(pte != pte_offset_kernel(pmd, 0));
+               pr_debug("%s: vaddr: 0x%08lx, pmd: 0x%p, pte: 0x%p\n",
+                        __func__, vaddr, pmd, pte);
+               return pte;
+       } else {
+               return pte_offset_kernel(pmd, 0);
+       }
+}
+
+static void __init fixedrange_init(void)
+{
+       BUILD_BUG_ON(FIXADDR_SIZE > PMD_SIZE);
+       init_pmd(__fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK);
+}
+#endif
+
 void __init paging_init(void)
 {
        memset(swapper_pg_dir, 0, PAGE_SIZE);
+#ifdef CONFIG_HIGHMEM
+       fixedrange_init();
+       pkmap_page_table = init_pmd(PKMAP_BASE);
+       kmap_init();
+#endif
 }
 
 /*
index ade623826788b387f150cfae5a90821f07f6b028..5ece856c5725c7cc72d0a0175bf9229330fabec5 100644 (file)
@@ -149,6 +149,21 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
        local_irq_restore(flags);
 }
 
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+       if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET &&
+           end - start < _TLB_ENTRIES << PAGE_SHIFT) {
+               start &= PAGE_MASK;
+               while (start < end) {
+                       invalidate_itlb_mapping(start);
+                       invalidate_dtlb_mapping(start);
+                       start += PAGE_SIZE;
+               }
+       } else {
+               local_flush_tlb_all();
+       }
+}
+
 #ifdef CONFIG_DEBUG_TLB_SANITY
 
 static unsigned get_pte_for_vaddr(unsigned vaddr)
index d2369b799c5077f7b9240135cba96d74acfcc7cf..b3e89291cfbafcb35a1eb07f7f584c35ef7f2d81 100644 (file)
@@ -4,6 +4,7 @@
 # "prom monitor" library routines under Linux.
 #
 
-obj-y                  = console.o setup.o
+obj-y                  = setup.o
+obj-$(CONFIG_TTY)      += console.o
 obj-$(CONFIG_NET)      += network.o
 obj-$(CONFIG_BLK_DEV_SIMDISK) += simdisk.o
index f9bc8796629089a540c892109a46a9fe19f68568..b90555cb80890135fee12f7cc3ec361127106836 100644 (file)
@@ -92,18 +92,8 @@ void __init platform_setup(char** cmdline)
 
 /* early initialization */
 
-extern sysmem_info_t __initdata sysmem;
-
-void platform_init(bp_tag_t* first)
+void __init platform_init(bp_tag_t *first)
 {
-       /* Set default memory block if not provided by the bootloader. */
-
-       if (sysmem.nr_banks == 0) {
-               sysmem.nr_banks = 1;
-               sysmem.bank[0].start = PLATFORM_DEFAULT_MEM_START;
-               sysmem.bank[0].end = PLATFORM_DEFAULT_MEM_START
-                                    + PLATFORM_DEFAULT_MEM_SIZE;
-       }
 }
 
 /* Heartbeat. Let the LED blink. */
index e4a4145926f629787ce0647f3036a98298c8f055..1039fb9ff5f5f998628884dedab422c9b405a36c 100644 (file)
@@ -451,7 +451,20 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
        struct blkcg_gq *blkg;
        int i;
 
-       mutex_lock(&blkcg_pol_mutex);
+       /*
+        * XXX: We invoke cgroup_add/rm_cftypes() under blkcg_pol_mutex
+        * which ends up putting cgroup's internal cgroup_tree_mutex under
+        * it; however, cgroup_tree_mutex is nested above cgroup file
+        * active protection and grabbing blkcg_pol_mutex from a cgroup
+        * file operation creates a possible circular dependency.  cgroup
+        * internal locking is planned to go through further simplification
+        * and this issue should go away soon.  For now, let's trylock
+        * blkcg_pol_mutex and restart the write on failure.
+        *
+        * http://lkml.kernel.org/g/5363C04B.4010400@oracle.com
+        */
+       if (!mutex_trylock(&blkcg_pol_mutex))
+               return restart_syscall();
        spin_lock_irq(&blkcg->lock);
 
        /*
index 1512e41cd93d74a4e7ab3fde6809e64468f797a8..43665d0d0905ddddf018fe68655c1ff7685b0b9e 100644 (file)
@@ -466,7 +466,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
        type -= CRYPTO_MSG_BASE;
        link = &crypto_dispatch[type];
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) &&
index d05d81b19b50c229b2aa100bf01f8ac2818c0c1c..7183b6af5dac2b72fc53edc7044d743eac57ed45 100644 (file)
@@ -119,7 +119,7 @@ obj-$(CONFIG_SGI_SN)                += sn/
 obj-y                          += firmware/
 obj-$(CONFIG_CRYPTO)           += crypto/
 obj-$(CONFIG_SUPERH)           += sh/
-obj-$(CONFIG_ARCH_SHMOBILE_LEGACY)     += sh/
+obj-$(CONFIG_ARCH_SHMOBILE)    += sh/
 ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
 obj-y                          += clocksource/
 endif
index ab686b31010034083ba3b4784ba288fb93b3f554..a34a22841002495713a74f960dea482ecee8dc11 100644 (file)
@@ -47,6 +47,23 @@ config ACPI_SLEEP
        depends on SUSPEND || HIBERNATION
        default y
 
+config ACPI_PROCFS_POWER
+       bool "Deprecated power /proc/acpi directories"
+       depends on PROC_FS
+       help
+         For backwards compatibility, this option allows
+          deprecated power /proc/acpi/ directories to exist, even when
+          they have been replaced by functions in /sys.
+          The deprecated directories (and their replacements) include:
+         /proc/acpi/battery/* (/sys/class/power_supply/*)
+         /proc/acpi/ac_adapter/* (sys/class/power_supply/*)
+         This option has no effect on /proc/acpi/ directories
+         and functions, which do not yet exist in /sys
+         This option, together with the proc directories, will be
+         deleted in the future.
+
+         Say N to delete power /proc/acpi/ directories that have moved to /sys/
+
 config ACPI_EC_DEBUGFS
        tristate "EC read/write access through /sys/kernel/debug/ec"
        default n
index 0331f91d56e663d63a7268d1a77bb3ae1080f715..bce34afadcd05d250831f098ca533565750345e6 100644 (file)
@@ -47,6 +47,7 @@ acpi-y                                += sysfs.o
 acpi-$(CONFIG_X86)             += acpi_cmos_rtc.o
 acpi-$(CONFIG_DEBUG_FS)                += debugfs.o
 acpi-$(CONFIG_ACPI_NUMA)       += numa.o
+acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
 ifdef CONFIG_ACPI_VIDEO
 acpi-y                         += video_detect.o
 endif
index 2c01c1da29ce39f637136a12a512f2a473333233..c67f6f5ad61107b7ded8069a53894763e91ab892 100644 (file)
@@ -52,11 +52,39 @@ MODULE_AUTHOR("Paul Diefenbaugh");
 MODULE_DESCRIPTION("ACPI AC Adapter Driver");
 MODULE_LICENSE("GPL");
 
+static int acpi_ac_add(struct acpi_device *device);
+static int acpi_ac_remove(struct acpi_device *device);
+static void acpi_ac_notify(struct acpi_device *device, u32 event);
+
+static const struct acpi_device_id ac_device_ids[] = {
+       {"ACPI0003", 0},
+       {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, ac_device_ids);
+
+#ifdef CONFIG_PM_SLEEP
+static int acpi_ac_resume(struct device *dev);
+#endif
+static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
+
 static int ac_sleep_before_get_state_ms;
 
+static struct acpi_driver acpi_ac_driver = {
+       .name = "ac",
+       .class = ACPI_AC_CLASS,
+       .ids = ac_device_ids,
+       .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
+       .ops = {
+               .add = acpi_ac_add,
+               .remove = acpi_ac_remove,
+               .notify = acpi_ac_notify,
+               },
+       .drv.pm = &acpi_ac_pm,
+};
+
 struct acpi_ac {
        struct power_supply charger;
-       struct platform_device *pdev;
+       struct acpi_device * device;
        unsigned long long state;
        struct notifier_block battery_nb;
 };
@@ -69,10 +97,12 @@ struct acpi_ac {
 
 static int acpi_ac_get_state(struct acpi_ac *ac)
 {
-       acpi_status status;
-       acpi_handle handle = ACPI_HANDLE(&ac->pdev->dev);
+       acpi_status status = AE_OK;
+
+       if (!ac)
+               return -EINVAL;
 
-       status = acpi_evaluate_integer(handle, "_PSR", NULL,
+       status = acpi_evaluate_integer(ac->device->handle, "_PSR", NULL,
                                       &ac->state);
        if (ACPI_FAILURE(status)) {
                ACPI_EXCEPTION((AE_INFO, status,
@@ -117,10 +147,9 @@ static enum power_supply_property ac_props[] = {
                                    Driver Model
    -------------------------------------------------------------------------- */
 
-static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data)
+static void acpi_ac_notify(struct acpi_device *device, u32 event)
 {
-       struct acpi_ac *ac = data;
-       struct acpi_device *adev;
+       struct acpi_ac *ac = acpi_driver_data(device);
 
        if (!ac)
                return;
@@ -143,11 +172,10 @@ static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data)
                        msleep(ac_sleep_before_get_state_ms);
 
                acpi_ac_get_state(ac);
-               adev = ACPI_COMPANION(&ac->pdev->dev);
-               acpi_bus_generate_netlink_event(adev->pnp.device_class,
-                                               dev_name(&ac->pdev->dev),
-                                               event, (u32) ac->state);
-               acpi_notifier_call_chain(adev, event, (u32) ac->state);
+               acpi_bus_generate_netlink_event(device->pnp.device_class,
+                                                 dev_name(&device->dev), event,
+                                                 (u32) ac->state);
+               acpi_notifier_call_chain(device, event, (u32) ac->state);
                kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
        }
 
@@ -192,49 +220,39 @@ static struct dmi_system_id ac_dmi_table[] = {
        {},
 };
 
-static int acpi_ac_probe(struct platform_device *pdev)
+static int acpi_ac_add(struct acpi_device *device)
 {
        int result = 0;
        struct acpi_ac *ac = NULL;
-       struct acpi_device *adev;
 
-       if (!pdev)
-               return -EINVAL;
 
-       adev = ACPI_COMPANION(&pdev->dev);
-       if (!adev)
-               return -ENODEV;
+       if (!device)
+               return -EINVAL;
 
        ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL);
        if (!ac)
                return -ENOMEM;
 
-       strcpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME);
-       strcpy(acpi_device_class(adev), ACPI_AC_CLASS);
-       ac->pdev = pdev;
-       platform_set_drvdata(pdev, ac);
+       ac->device = device;
+       strcpy(acpi_device_name(device), ACPI_AC_DEVICE_NAME);
+       strcpy(acpi_device_class(device), ACPI_AC_CLASS);
+       device->driver_data = ac;
 
        result = acpi_ac_get_state(ac);
        if (result)
                goto end;
 
-       ac->charger.name = acpi_device_bid(adev);
+       ac->charger.name = acpi_device_bid(device);
        ac->charger.type = POWER_SUPPLY_TYPE_MAINS;
        ac->charger.properties = ac_props;
        ac->charger.num_properties = ARRAY_SIZE(ac_props);
        ac->charger.get_property = get_ac_property;
-       result = power_supply_register(&pdev->dev, &ac->charger);
+       result = power_supply_register(&ac->device->dev, &ac->charger);
        if (result)
                goto end;
 
-       result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev),
-                       ACPI_ALL_NOTIFY, acpi_ac_notify_handler, ac);
-       if (result) {
-               power_supply_unregister(&ac->charger);
-               goto end;
-       }
        printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
-              acpi_device_name(adev), acpi_device_bid(adev),
+              acpi_device_name(device), acpi_device_bid(device),
               ac->state ? "on-line" : "off-line");
 
        ac->battery_nb.notifier_call = acpi_ac_battery_notify;
@@ -256,7 +274,7 @@ static int acpi_ac_resume(struct device *dev)
        if (!dev)
                return -EINVAL;
 
-       ac = platform_get_drvdata(to_platform_device(dev));
+       ac = acpi_driver_data(to_acpi_device(dev));
        if (!ac)
                return -EINVAL;
 
@@ -270,19 +288,17 @@ static int acpi_ac_resume(struct device *dev)
 #else
 #define acpi_ac_resume NULL
 #endif
-static SIMPLE_DEV_PM_OPS(acpi_ac_pm_ops, NULL, acpi_ac_resume);
 
-static int acpi_ac_remove(struct platform_device *pdev)
+static int acpi_ac_remove(struct acpi_device *device)
 {
-       struct acpi_ac *ac;
+       struct acpi_ac *ac = NULL;
+
 
-       if (!pdev)
+       if (!device || !acpi_driver_data(device))
                return -EINVAL;
 
-       acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev),
-                       ACPI_ALL_NOTIFY, acpi_ac_notify_handler);
+       ac = acpi_driver_data(device);
 
-       ac = platform_get_drvdata(pdev);
        if (ac->charger.dev)
                power_supply_unregister(&ac->charger);
        unregister_acpi_notifier(&ac->battery_nb);
@@ -292,23 +308,6 @@ static int acpi_ac_remove(struct platform_device *pdev)
        return 0;
 }
 
-static const struct acpi_device_id acpi_ac_match[] = {
-       { "ACPI0003", 0 },
-       { }
-};
-MODULE_DEVICE_TABLE(acpi, acpi_ac_match);
-
-static struct platform_driver acpi_ac_driver = {
-       .probe          = acpi_ac_probe,
-       .remove         = acpi_ac_remove,
-       .driver         = {
-               .name   = "acpi-ac",
-               .owner  = THIS_MODULE,
-               .pm     = &acpi_ac_pm_ops,
-               .acpi_match_table = ACPI_PTR(acpi_ac_match),
-       },
-};
-
 static int __init acpi_ac_init(void)
 {
        int result;
@@ -316,7 +315,7 @@ static int __init acpi_ac_init(void)
        if (acpi_disabled)
                return -ENODEV;
 
-       result = platform_driver_register(&acpi_ac_driver);
+       result = acpi_bus_register_driver(&acpi_ac_driver);
        if (result < 0)
                return -ENODEV;
 
@@ -325,7 +324,7 @@ static int __init acpi_ac_init(void)
 
 static void __exit acpi_ac_exit(void)
 {
-       platform_driver_unregister(&acpi_ac_driver);
+       acpi_bus_unregister_driver(&acpi_ac_driver);
 }
 module_init(acpi_ac_init);
 module_exit(acpi_ac_exit);
index dbfe49e5fd63cc179559b2c5caee57d27324c012..1d4950388fa13b9fc6050fbaf419ae68c796f24d 100644 (file)
@@ -29,7 +29,6 @@ ACPI_MODULE_NAME("platform");
 static const struct acpi_device_id acpi_platform_device_ids[] = {
 
        { "PNP0D40" },
-       { "ACPI0003" },
        { "VPC2004" },
        { "BCM4752" },
 
index c29c2c3ec0ad8ffc2c6427393593dbf147899d1b..52c81c49cc7d8396fa5b36a4d24b757f934ced46 100644 (file)
@@ -170,6 +170,9 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
        acpi_status status;
        int ret;
 
+       if (pr->apic_id == -1)
+               return -ENODEV;
+
        status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
        if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT))
                return -ENODEV;
@@ -260,10 +263,8 @@ static int acpi_processor_get_info(struct acpi_device *device)
        }
 
        apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id);
-       if (apic_id < 0) {
+       if (apic_id < 0)
                acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n");
-               return -ENODEV;
-       }
        pr->apic_id = apic_id;
 
        cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
@@ -404,7 +405,6 @@ static int acpi_processor_add(struct acpi_device *device,
                goto err;
 
        pr->dev = dev;
-       dev->offline = pr->flags.need_hotplug_init;
 
        /* Trigger the processor driver's .probe() if present. */
        if (device_attach(dev) >= 0)
index 49bbc71fad54efd709ba9b7e6a610b5f8a30a7a3..a08a448068dd99981be3eeb9643e0255b20ae8df 100644 (file)
@@ -141,9 +141,9 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE);
  * address. Although ACPICA adheres to the ACPI specification which
  * requires the use of the corresponding 64-bit address if it is non-zero,
  * some machines have been found to have a corrupted non-zero 64-bit
- * address. Default is FALSE, do not favor the 32-bit addresses.
+ * address. Default is TRUE, favor the 32-bit addresses.
  */
-ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, TRUE);
 
 /*
  * Optionally truncate I/O addresses to 16 bits. Provides compatibility
index 68d97441432cca3140d2151a50af3384b034464a..12878e1982f77d5f69fcad7d266f7ac922f2a12d 100644 (file)
 #include "accommon.h"
 #include "acdispat.h"
 #include "acinterp.h"
+#include "amlcode.h"
 
 #define _COMPONENT          ACPI_EXECUTER
 ACPI_MODULE_NAME("exfield")
 
+/* Local prototypes */
+static u32
+acpi_ex_get_serial_access_length(u32 accessor_type, u32 access_length);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_get_serial_access_bytes
+ *
+ * PARAMETERS:  accessor_type   - The type of the protocol indicated by region
+ *                                field access attributes
+ *              access_length   - The access length of the region field
+ *
+ * RETURN:      Decoded access length
+ *
+ * DESCRIPTION: This routine returns the length of the generic_serial_bus
+ *              protocol bytes
+ *
+ ******************************************************************************/
+
+static u32
+acpi_ex_get_serial_access_length(u32 accessor_type, u32 access_length)
+{
+       u32 length;
+
+       switch (accessor_type) {
+       case AML_FIELD_ATTRIB_QUICK:
+
+               length = 0;
+               break;
+
+       case AML_FIELD_ATTRIB_SEND_RCV:
+       case AML_FIELD_ATTRIB_BYTE:
+
+               length = 1;
+               break;
+
+       case AML_FIELD_ATTRIB_WORD:
+       case AML_FIELD_ATTRIB_WORD_CALL:
+
+               length = 2;
+               break;
+
+       case AML_FIELD_ATTRIB_MULTIBYTE:
+       case AML_FIELD_ATTRIB_RAW_BYTES:
+       case AML_FIELD_ATTRIB_RAW_PROCESS:
+
+               length = access_length;
+               break;
+
+       case AML_FIELD_ATTRIB_BLOCK:
+       case AML_FIELD_ATTRIB_BLOCK_CALL:
+       default:
+
+               length = ACPI_GSBUS_BUFFER_SIZE;
+               break;
+       }
+
+       return (length);
+}
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ex_read_data_from_field
@@ -63,8 +124,9 @@ ACPI_MODULE_NAME("exfield")
  *              Buffer, depending on the size of the field.
  *
  ******************************************************************************/
+
 acpi_status
-acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
+acpi_ex_read_data_from_field(struct acpi_walk_state * walk_state,
                             union acpi_operand_object *obj_desc,
                             union acpi_operand_object **ret_buffer_desc)
 {
@@ -73,6 +135,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
        acpi_size length;
        void *buffer;
        u32 function;
+       u16 accessor_type;
 
        ACPI_FUNCTION_TRACE_PTR(ex_read_data_from_field, obj_desc);
 
@@ -116,9 +179,22 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
                            ACPI_READ | (obj_desc->field.attribute << 16);
                } else if (obj_desc->field.region_obj->region.space_id ==
                           ACPI_ADR_SPACE_GSBUS) {
-                       length = ACPI_GSBUS_BUFFER_SIZE;
-                       function =
-                           ACPI_READ | (obj_desc->field.attribute << 16);
+                       accessor_type = obj_desc->field.attribute;
+                       length = acpi_ex_get_serial_access_length(accessor_type,
+                                                                 obj_desc->
+                                                                 field.
+                                                                 access_length);
+
+                       /*
+                        * Add additional 2 bytes for modeled generic_serial_bus data buffer:
+                        * typedef struct {
+                        *     BYTEStatus; // Byte 0 of the data buffer
+                        *     BYTELength; // Byte 1 of the data buffer
+                        *     BYTE[x-1]Data; // Bytes 2-x of the arbitrary length data buffer,
+                        * }
+                        */
+                       length += 2;
+                       function = ACPI_READ | (accessor_type << 16);
                } else {        /* IPMI */
 
                        length = ACPI_IPMI_BUFFER_SIZE;
@@ -231,6 +307,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
        void *buffer;
        union acpi_operand_object *buffer_desc;
        u32 function;
+       u16 accessor_type;
 
        ACPI_FUNCTION_TRACE_PTR(ex_write_data_to_field, obj_desc);
 
@@ -284,9 +361,22 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
                            ACPI_WRITE | (obj_desc->field.attribute << 16);
                } else if (obj_desc->field.region_obj->region.space_id ==
                           ACPI_ADR_SPACE_GSBUS) {
-                       length = ACPI_GSBUS_BUFFER_SIZE;
-                       function =
-                           ACPI_WRITE | (obj_desc->field.attribute << 16);
+                       accessor_type = obj_desc->field.attribute;
+                       length = acpi_ex_get_serial_access_length(accessor_type,
+                                                                 obj_desc->
+                                                                 field.
+                                                                 access_length);
+
+                       /*
+                        * Add additional 2 bytes for modeled generic_serial_bus data buffer:
+                        * typedef struct {
+                        *     BYTEStatus; // Byte 0 of the data buffer
+                        *     BYTELength; // Byte 1 of the data buffer
+                        *     BYTE[x-1]Data; // Bytes 2-x of the arbitrary length data buffer,
+                        * }
+                        */
+                       length += 2;
+                       function = ACPI_WRITE | (accessor_type << 16);
                } else {        /* IPMI */
 
                        length = ACPI_IPMI_BUFFER_SIZE;
index a4702eee91a820d131960754c14da6aa62f50939..9fb85f38de90e3b073635ef2b854895b13e7b5b8 100644 (file)
@@ -461,6 +461,7 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
        u32 table_count;
        struct acpi_table_header *table;
        acpi_physical_address address;
+       acpi_physical_address rsdt_address;
        u32 length;
        u8 *table_entry;
        acpi_status status;
@@ -488,11 +489,14 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
                 * as per the ACPI specification.
                 */
                address = (acpi_physical_address) rsdp->xsdt_physical_address;
+               rsdt_address =
+                   (acpi_physical_address) rsdp->rsdt_physical_address;
                table_entry_size = ACPI_XSDT_ENTRY_SIZE;
        } else {
                /* Root table is an RSDT (32-bit physical addresses) */
 
                address = (acpi_physical_address) rsdp->rsdt_physical_address;
+               rsdt_address = address;
                table_entry_size = ACPI_RSDT_ENTRY_SIZE;
        }
 
@@ -515,8 +519,7 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
 
                        /* Fall back to the RSDT */
 
-                       address =
-                           (acpi_physical_address) rsdp->rsdt_physical_address;
+                       address = rsdt_address;
                        table_entry_size = ACPI_RSDT_ENTRY_SIZE;
                }
        }
index 9a2c63b2005038476e5a5e77360fd836aad3f25f..6e7b2a12860d31ac533c730e97d080daaa6f1385 100644 (file)
 #include <linux/suspend.h>
 #include <asm/unaligned.h>
 
+#ifdef CONFIG_ACPI_PROCFS_POWER
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <asm/uaccess.h>
+#endif
+
 #include <linux/acpi.h>
 #include <linux/power_supply.h>
 
@@ -64,6 +70,19 @@ static unsigned int cache_time = 1000;
 module_param(cache_time, uint, 0644);
 MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
 
+#ifdef CONFIG_ACPI_PROCFS_POWER
+extern struct proc_dir_entry *acpi_lock_battery_dir(void);
+extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
+
+enum acpi_battery_files {
+       info_tag = 0,
+       state_tag,
+       alarm_tag,
+       ACPI_BATTERY_NUMFILES,
+};
+
+#endif
+
 static const struct acpi_device_id battery_device_ids[] = {
        {"PNP0C0A", 0},
        {"", 0},
@@ -299,6 +318,14 @@ static enum power_supply_property energy_battery_props[] = {
        POWER_SUPPLY_PROP_SERIAL_NUMBER,
 };
 
+#ifdef CONFIG_ACPI_PROCFS_POWER
+inline char *acpi_battery_units(struct acpi_battery *battery)
+{
+       return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ?
+               "mA" : "mW";
+}
+#endif
+
 /* --------------------------------------------------------------------------
                                Battery Management
    -------------------------------------------------------------------------- */
@@ -716,6 +743,279 @@ static void acpi_battery_refresh(struct acpi_battery *battery)
        sysfs_add_battery(battery);
 }
 
+/* --------------------------------------------------------------------------
+                              FS Interface (/proc)
+   -------------------------------------------------------------------------- */
+
+#ifdef CONFIG_ACPI_PROCFS_POWER
+static struct proc_dir_entry *acpi_battery_dir;
+
+static int acpi_battery_print_info(struct seq_file *seq, int result)
+{
+       struct acpi_battery *battery = seq->private;
+
+       if (result)
+               goto end;
+
+       seq_printf(seq, "present:                 %s\n",
+                  acpi_battery_present(battery) ? "yes" : "no");
+       if (!acpi_battery_present(battery))
+               goto end;
+       if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
+               seq_printf(seq, "design capacity:         unknown\n");
+       else
+               seq_printf(seq, "design capacity:         %d %sh\n",
+                          battery->design_capacity,
+                          acpi_battery_units(battery));
+
+       if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
+               seq_printf(seq, "last full capacity:      unknown\n");
+       else
+               seq_printf(seq, "last full capacity:      %d %sh\n",
+                          battery->full_charge_capacity,
+                          acpi_battery_units(battery));
+
+       seq_printf(seq, "battery technology:      %srechargeable\n",
+                  (!battery->technology)?"non-":"");
+
+       if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN)
+               seq_printf(seq, "design voltage:          unknown\n");
+       else
+               seq_printf(seq, "design voltage:          %d mV\n",
+                          battery->design_voltage);
+       seq_printf(seq, "design capacity warning: %d %sh\n",
+                  battery->design_capacity_warning,
+                  acpi_battery_units(battery));
+       seq_printf(seq, "design capacity low:     %d %sh\n",
+                  battery->design_capacity_low,
+                  acpi_battery_units(battery));
+       seq_printf(seq, "cycle count:             %i\n", battery->cycle_count);
+       seq_printf(seq, "capacity granularity 1:  %d %sh\n",
+                  battery->capacity_granularity_1,
+                  acpi_battery_units(battery));
+       seq_printf(seq, "capacity granularity 2:  %d %sh\n",
+                  battery->capacity_granularity_2,
+                  acpi_battery_units(battery));
+       seq_printf(seq, "model number:            %s\n", battery->model_number);
+       seq_printf(seq, "serial number:           %s\n", battery->serial_number);
+       seq_printf(seq, "battery type:            %s\n", battery->type);
+       seq_printf(seq, "OEM info:                %s\n", battery->oem_info);
+      end:
+       if (result)
+               seq_printf(seq, "ERROR: Unable to read battery info\n");
+       return result;
+}
+
+static int acpi_battery_print_state(struct seq_file *seq, int result)
+{
+       struct acpi_battery *battery = seq->private;
+
+       if (result)
+               goto end;
+
+       seq_printf(seq, "present:                 %s\n",
+                  acpi_battery_present(battery) ? "yes" : "no");
+       if (!acpi_battery_present(battery))
+               goto end;
+
+       seq_printf(seq, "capacity state:          %s\n",
+                       (battery->state & 0x04) ? "critical" : "ok");
+       if ((battery->state & 0x01) && (battery->state & 0x02))
+               seq_printf(seq,
+                          "charging state:          charging/discharging\n");
+       else if (battery->state & 0x01)
+               seq_printf(seq, "charging state:          discharging\n");
+       else if (battery->state & 0x02)
+               seq_printf(seq, "charging state:          charging\n");
+       else
+               seq_printf(seq, "charging state:          charged\n");
+
+       if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
+               seq_printf(seq, "present rate:            unknown\n");
+       else
+               seq_printf(seq, "present rate:            %d %s\n",
+                          battery->rate_now, acpi_battery_units(battery));
+
+       if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN)
+               seq_printf(seq, "remaining capacity:      unknown\n");
+       else
+               seq_printf(seq, "remaining capacity:      %d %sh\n",
+                          battery->capacity_now, acpi_battery_units(battery));
+       if (battery->voltage_now == ACPI_BATTERY_VALUE_UNKNOWN)
+               seq_printf(seq, "present voltage:         unknown\n");
+       else
+               seq_printf(seq, "present voltage:         %d mV\n",
+                          battery->voltage_now);
+      end:
+       if (result)
+               seq_printf(seq, "ERROR: Unable to read battery state\n");
+
+       return result;
+}
+
+static int acpi_battery_print_alarm(struct seq_file *seq, int result)
+{
+       struct acpi_battery *battery = seq->private;
+
+       if (result)
+               goto end;
+
+       if (!acpi_battery_present(battery)) {
+               seq_printf(seq, "present:                 no\n");
+               goto end;
+       }
+       seq_printf(seq, "alarm:                   ");
+       if (!battery->alarm)
+               seq_printf(seq, "unsupported\n");
+       else
+               seq_printf(seq, "%u %sh\n", battery->alarm,
+                               acpi_battery_units(battery));
+      end:
+       if (result)
+               seq_printf(seq, "ERROR: Unable to read battery alarm\n");
+       return result;
+}
+
+static ssize_t acpi_battery_write_alarm(struct file *file,
+                                       const char __user * buffer,
+                                       size_t count, loff_t * ppos)
+{
+       int result = 0;
+       char alarm_string[12] = { '\0' };
+       struct seq_file *m = file->private_data;
+       struct acpi_battery *battery = m->private;
+
+       if (!battery || (count > sizeof(alarm_string) - 1))
+               return -EINVAL;
+       if (!acpi_battery_present(battery)) {
+               result = -ENODEV;
+               goto end;
+       }
+       if (copy_from_user(alarm_string, buffer, count)) {
+               result = -EFAULT;
+               goto end;
+       }
+       alarm_string[count] = '\0';
+       battery->alarm = simple_strtol(alarm_string, NULL, 0);
+       result = acpi_battery_set_alarm(battery);
+      end:
+       if (!result)
+               return count;
+       return result;
+}
+
+typedef int(*print_func)(struct seq_file *seq, int result);
+
+static print_func acpi_print_funcs[ACPI_BATTERY_NUMFILES] = {
+       acpi_battery_print_info,
+       acpi_battery_print_state,
+       acpi_battery_print_alarm,
+};
+
+static int acpi_battery_read(int fid, struct seq_file *seq)
+{
+       struct acpi_battery *battery = seq->private;
+       int result = acpi_battery_update(battery);
+       return acpi_print_funcs[fid](seq, result);
+}
+
+#define DECLARE_FILE_FUNCTIONS(_name) \
+static int acpi_battery_read_##_name(struct seq_file *seq, void *offset) \
+{ \
+       return acpi_battery_read(_name##_tag, seq); \
+} \
+static int acpi_battery_##_name##_open_fs(struct inode *inode, struct file *file) \
+{ \
+       return single_open(file, acpi_battery_read_##_name, PDE_DATA(inode)); \
+}
+
+DECLARE_FILE_FUNCTIONS(info);
+DECLARE_FILE_FUNCTIONS(state);
+DECLARE_FILE_FUNCTIONS(alarm);
+
+#undef DECLARE_FILE_FUNCTIONS
+
+#define FILE_DESCRIPTION_RO(_name) \
+       { \
+       .name = __stringify(_name), \
+       .mode = S_IRUGO, \
+       .ops = { \
+               .open = acpi_battery_##_name##_open_fs, \
+               .read = seq_read, \
+               .llseek = seq_lseek, \
+               .release = single_release, \
+               .owner = THIS_MODULE, \
+               }, \
+       }
+
+#define FILE_DESCRIPTION_RW(_name) \
+       { \
+       .name = __stringify(_name), \
+       .mode = S_IFREG | S_IRUGO | S_IWUSR, \
+       .ops = { \
+               .open = acpi_battery_##_name##_open_fs, \
+               .read = seq_read, \
+               .llseek = seq_lseek, \
+               .write = acpi_battery_write_##_name, \
+               .release = single_release, \
+               .owner = THIS_MODULE, \
+               }, \
+       }
+
+static const struct battery_file {
+       struct file_operations ops;
+       umode_t mode;
+       const char *name;
+} acpi_battery_file[] = {
+       FILE_DESCRIPTION_RO(info),
+       FILE_DESCRIPTION_RO(state),
+       FILE_DESCRIPTION_RW(alarm),
+};
+
+#undef FILE_DESCRIPTION_RO
+#undef FILE_DESCRIPTION_RW
+
+static int acpi_battery_add_fs(struct acpi_device *device)
+{
+       struct proc_dir_entry *entry = NULL;
+       int i;
+
+       printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded,"
+                       " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
+       if (!acpi_device_dir(device)) {
+               acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
+                                                    acpi_battery_dir);
+               if (!acpi_device_dir(device))
+                       return -ENODEV;
+       }
+
+       for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) {
+               entry = proc_create_data(acpi_battery_file[i].name,
+                                        acpi_battery_file[i].mode,
+                                        acpi_device_dir(device),
+                                        &acpi_battery_file[i].ops,
+                                        acpi_driver_data(device));
+               if (!entry)
+                       return -ENODEV;
+       }
+       return 0;
+}
+
+static void acpi_battery_remove_fs(struct acpi_device *device)
+{
+       int i;
+       if (!acpi_device_dir(device))
+               return;
+       for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i)
+               remove_proc_entry(acpi_battery_file[i].name,
+                                 acpi_device_dir(device));
+
+       remove_proc_entry(acpi_device_bid(device), acpi_battery_dir);
+       acpi_device_dir(device) = NULL;
+}
+
+#endif
+
 /* --------------------------------------------------------------------------
                                  Driver Interface
    -------------------------------------------------------------------------- */
@@ -790,6 +1090,15 @@ static int acpi_battery_add(struct acpi_device *device)
        result = acpi_battery_update(battery);
        if (result)
                goto fail;
+#ifdef CONFIG_ACPI_PROCFS_POWER
+       result = acpi_battery_add_fs(device);
+#endif
+       if (result) {
+#ifdef CONFIG_ACPI_PROCFS_POWER
+               acpi_battery_remove_fs(device);
+#endif
+               goto fail;
+       }
 
        printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
                ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
@@ -816,6 +1125,9 @@ static int acpi_battery_remove(struct acpi_device *device)
                return -EINVAL;
        battery = acpi_driver_data(device);
        unregister_pm_notifier(&battery->pm_nb);
+#ifdef CONFIG_ACPI_PROCFS_POWER
+       acpi_battery_remove_fs(device);
+#endif
        sysfs_remove_battery(battery);
        mutex_destroy(&battery->lock);
        mutex_destroy(&battery->sysfs_lock);
@@ -866,7 +1178,19 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
 
        if (dmi_check_system(bat_dmi_table))
                battery_bix_broken_package = 1;
-       acpi_bus_register_driver(&acpi_battery_driver);
+       
+#ifdef CONFIG_ACPI_PROCFS_POWER
+       acpi_battery_dir = acpi_lock_battery_dir();
+       if (!acpi_battery_dir)
+               return;
+#endif
+       if (acpi_bus_register_driver(&acpi_battery_driver) < 0) {
+#ifdef CONFIG_ACPI_PROCFS_POWER
+               acpi_unlock_battery_dir(acpi_battery_dir);
+#endif
+               return;
+       }
+       return;
 }
 
 static int __init acpi_battery_init(void)
@@ -878,6 +1202,9 @@ static int __init acpi_battery_init(void)
 static void __exit acpi_battery_exit(void)
 {
        acpi_bus_unregister_driver(&acpi_battery_driver);
+#ifdef CONFIG_ACPI_PROCFS_POWER
+       acpi_unlock_battery_dir(acpi_battery_dir);
+#endif
 }
 
 module_init(acpi_battery_init);
index afec4526c48aa04e2921a199396e8fb2ea7489be..3d8413d02a975f0643275a247524a0c9d7569341 100644 (file)
@@ -314,6 +314,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
                     DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
                },
        },
+       {
+       .callback = dmi_disable_osi_win8,
+       .ident = "Dell Inspiron 7737",
+       .matches = {
+                   DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                   DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7737"),
+               },
+       },
 
        /*
         * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
@@ -374,6 +382,19 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
                     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T500"),
                },
        },
+       /*
+        * Without this this EEEpc exports a non working WMI interface, with
+        * this it exports a working "good old" eeepc_laptop interface, fixing
+        * both brightness control, and rfkill not working.
+        */
+       {
+       .callback = dmi_enable_osi_linux,
+       .ident = "Asus EEE PC 1015PX",
+       .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
+                    DMI_MATCH(DMI_PRODUCT_NAME, "1015PX"),
+               },
+       },
        {}
 };
 
index e7e5844c87d0c8de87379ae7ea6eef8ad91cb79f..cf925c4f36b70ee173ad2ad5a688416cfdb31734 100644 (file)
@@ -380,9 +380,8 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
                break;
 
        default:
-               acpi_handle_warn(handle, "Unsupported event type 0x%x\n", type);
-               ost_code = ACPI_OST_SC_UNRECOGNIZED_NOTIFY;
-               goto err;
+               acpi_handle_debug(handle, "Unknown event type 0x%x\n", type);
+               break;
        }
 
        adev = acpi_bus_get_acpi_device(handle);
diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c
new file mode 100644 (file)
index 0000000..6c9ee68
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or (at
+ *  your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+#define PREFIX "ACPI: "
+
+ACPI_MODULE_NAME("cm_sbs");
+#define ACPI_AC_CLASS          "ac_adapter"
+#define ACPI_BATTERY_CLASS     "battery"
+#define _COMPONENT             ACPI_SBS_COMPONENT
+static struct proc_dir_entry *acpi_ac_dir;
+static struct proc_dir_entry *acpi_battery_dir;
+
+static DEFINE_MUTEX(cm_sbs_mutex);
+
+static int lock_ac_dir_cnt;
+static int lock_battery_dir_cnt;
+
+struct proc_dir_entry *acpi_lock_ac_dir(void)
+{
+       mutex_lock(&cm_sbs_mutex);
+       if (!acpi_ac_dir)
+               acpi_ac_dir = proc_mkdir(ACPI_AC_CLASS, acpi_root_dir);
+       if (acpi_ac_dir) {
+               lock_ac_dir_cnt++;
+       } else {
+               printk(KERN_ERR PREFIX
+                                 "Cannot create %s\n", ACPI_AC_CLASS);
+       }
+       mutex_unlock(&cm_sbs_mutex);
+       return acpi_ac_dir;
+}
+EXPORT_SYMBOL(acpi_lock_ac_dir);
+
+void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir_param)
+{
+       mutex_lock(&cm_sbs_mutex);
+       if (acpi_ac_dir_param)
+               lock_ac_dir_cnt--;
+       if (lock_ac_dir_cnt == 0 && acpi_ac_dir_param && acpi_ac_dir) {
+               remove_proc_entry(ACPI_AC_CLASS, acpi_root_dir);
+               acpi_ac_dir = NULL;
+       }
+       mutex_unlock(&cm_sbs_mutex);
+}
+EXPORT_SYMBOL(acpi_unlock_ac_dir);
+
+struct proc_dir_entry *acpi_lock_battery_dir(void)
+{
+       mutex_lock(&cm_sbs_mutex);
+       if (!acpi_battery_dir) {
+               acpi_battery_dir =
+                   proc_mkdir(ACPI_BATTERY_CLASS, acpi_root_dir);
+       }
+       if (acpi_battery_dir) {
+               lock_battery_dir_cnt++;
+       } else {
+               printk(KERN_ERR PREFIX
+                                 "Cannot create %s\n", ACPI_BATTERY_CLASS);
+       }
+       mutex_unlock(&cm_sbs_mutex);
+       return acpi_battery_dir;
+}
+EXPORT_SYMBOL(acpi_lock_battery_dir);
+
+void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir_param)
+{
+       mutex_lock(&cm_sbs_mutex);
+       if (acpi_battery_dir_param)
+               lock_battery_dir_cnt--;
+       if (lock_battery_dir_cnt == 0 && acpi_battery_dir_param
+           && acpi_battery_dir) {
+               remove_proc_entry(ACPI_BATTERY_CLASS, acpi_root_dir);
+               acpi_battery_dir = NULL;
+       }
+       mutex_unlock(&cm_sbs_mutex);
+       return;
+}
+EXPORT_SYMBOL(acpi_unlock_battery_dir);
index d7d32c28829b17834507bf8683f2c2a5c77d0a0d..ad11ba4a412dedc893ae4bdbb9230b3583bbf00b 100644 (file)
@@ -206,13 +206,13 @@ unlock:
        spin_unlock_irqrestore(&ec->lock, flags);
 }
 
-static int acpi_ec_sync_query(struct acpi_ec *ec);
+static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data);
 
 static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
 {
        if (state & ACPI_EC_FLAG_SCI) {
                if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
-                       return acpi_ec_sync_query(ec);
+                       return acpi_ec_sync_query(ec, NULL);
        }
        return 0;
 }
@@ -443,10 +443,8 @@ acpi_handle ec_get_handle(void)
 
 EXPORT_SYMBOL(ec_get_handle);
 
-static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data);
-
 /*
- * Clears stale _Q events that might have accumulated in the EC.
+ * Process _Q events that might have accumulated in the EC.
  * Run with locked ec mutex.
  */
 static void acpi_ec_clear(struct acpi_ec *ec)
@@ -455,7 +453,7 @@ static void acpi_ec_clear(struct acpi_ec *ec)
        u8 value = 0;
 
        for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
-               status = acpi_ec_query_unlocked(ec, &value);
+               status = acpi_ec_sync_query(ec, &value);
                if (status || !value)
                        break;
        }
@@ -582,13 +580,18 @@ static void acpi_ec_run(void *cxt)
        kfree(handler);
 }
 
-static int acpi_ec_sync_query(struct acpi_ec *ec)
+static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data)
 {
        u8 value = 0;
        int status;
        struct acpi_ec_query_handler *handler, *copy;
-       if ((status = acpi_ec_query_unlocked(ec, &value)))
+
+       status = acpi_ec_query_unlocked(ec, &value);
+       if (data)
+               *data = value;
+       if (status)
                return status;
+
        list_for_each_entry(handler, &ec->list, node) {
                if (value == handler->query_bit) {
                        /* have custom handler for this bit */
@@ -612,7 +615,7 @@ static void acpi_ec_gpe_query(void *ec_cxt)
        if (!ec)
                return;
        mutex_lock(&ec->mutex);
-       acpi_ec_sync_query(ec);
+       acpi_ec_sync_query(ec, NULL);
        mutex_unlock(&ec->mutex);
 }
 
index 8b6990e417ec870d7c77e42c994b1e2245b3b88e..f8bc5a755dda411963e097cc41c27bec0752da71 100644 (file)
@@ -457,10 +457,10 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
        },
        {
         .callback = video_set_use_native_backlight,
-        .ident = "ThinkPad T430s",
+        .ident = "ThinkPad T430 and T430s",
         .matches = {
                DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-               DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430s"),
+               DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430"),
                },
        },
        {
@@ -472,7 +472,7 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
                },
        },
        {
-       .callback = video_set_use_native_backlight,
+        .callback = video_set_use_native_backlight,
        .ident = "ThinkPad X1 Carbon",
        .matches = {
                DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
@@ -500,7 +500,7 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
         .ident = "Dell Inspiron 7520",
         .matches = {
                DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-               DMI_MATCH(DMI_PRODUCT_VERSION, "Inspiron 7520"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"),
                },
        },
        {
@@ -511,6 +511,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
                DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5733Z"),
                },
        },
+       {
+        .callback = video_set_use_native_backlight,
+        .ident = "Acer Aspire 5742G",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5742G"),
+               },
+       },
        {
         .callback = video_set_use_native_backlight,
         .ident = "Acer Aspire V5-431",
index 20e03a7eb8b431f692e534f6a3d895a2c2cd9476..0033fafc470be5f7be9b84da74d689143abb5357 100644 (file)
@@ -116,7 +116,7 @@ config AHCI_ST
 
 config AHCI_IMX
        tristate "Freescale i.MX AHCI SATA support"
-       depends on MFD_SYSCON
+       depends on MFD_SYSCON && (ARCH_MXC || COMPILE_TEST)
        help
          This option enables support for the Freescale i.MX SoC's
          onboard AHCI SATA.
@@ -134,8 +134,7 @@ config AHCI_SUNXI
 
 config AHCI_XGENE
        tristate "APM X-Gene 6.0Gbps AHCI SATA host controller support"
-       depends on ARM64 || COMPILE_TEST
-       select PHY_XGENE
+       depends on PHY_XGENE
        help
         This option enables support for APM X-Gene SoC SATA host controller.
 
@@ -816,7 +815,7 @@ config PATA_AT32
 
 config PATA_AT91
        tristate "PATA support for AT91SAM9260"
-       depends on ARM && ARCH_AT91
+       depends on ARM && SOC_AT91SAM9
        help
          This option enables support for IDE devices on the Atmel AT91SAM9260 SoC.
 
index 5a0bf8ed649b8cf9266530ef309aec89a54f99ef..60707814a84b19e2581d6309c2a17d4823d1e015 100644 (file)
@@ -1115,6 +1115,17 @@ static bool ahci_broken_online(struct pci_dev *pdev)
        return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff);
 }
 
+static bool ahci_broken_devslp(struct pci_dev *pdev)
+{
+       /* device with broken DEVSLP but still showing SDS capability */
+       static const struct pci_device_id ids[] = {
+               { PCI_VDEVICE(INTEL, 0x0f23)}, /* Valleyview SoC */
+               {}
+       };
+
+       return pci_match_id(ids, pdev);
+}
+
 #ifdef CONFIG_ATA_ACPI
 static void ahci_gtf_filter_workaround(struct ata_host *host)
 {
@@ -1164,9 +1175,9 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
 #endif
 
 static int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports,
-                        struct ahci_host_priv *hpriv)
+                               struct ahci_host_priv *hpriv)
 {
-       int nvec;
+       int rc, nvec;
 
        if (hpriv->flags & AHCI_HFLAG_NO_MSI)
                goto intx;
@@ -1183,12 +1194,19 @@ static int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports,
        if (nvec < n_ports)
                goto single_msi;
 
-       nvec = pci_enable_msi_range(pdev, nvec, nvec);
-       if (nvec == -ENOSPC)
+       rc = pci_enable_msi_exact(pdev, nvec);
+       if (rc == -ENOSPC)
                goto single_msi;
-       else if (nvec < 0)
+       else if (rc < 0)
                goto intx;
 
+       /* fallback to single MSI mode if the controller enforced MRSM mode */
+       if (readl(hpriv->mmio + HOST_CTL) & HOST_MRSM) {
+               pci_disable_msi(pdev);
+               printk(KERN_INFO "ahci: MRSM is on, fallback to single MSI\n");
+               goto single_msi;
+       }
+
        return nvec;
 
 single_msi:
@@ -1232,18 +1250,18 @@ int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis)
                return rc;
 
        for (i = 0; i < host->n_ports; i++) {
-               const char* desc;
                struct ahci_port_priv *pp = host->ports[i]->private_data;
 
-               /* pp is NULL for dummy ports */
-               if (pp)
-                       desc = pp->irq_desc;
-               else
-                       desc = dev_driver_string(host->dev);
+               /* Do not receive interrupts sent by dummy ports */
+               if (!pp) {
+                       disable_irq(irq + i);
+                       continue;
+               }
 
-               rc = devm_request_threaded_irq(host->dev,
-                       irq + i, ahci_hw_interrupt, ahci_thread_fn, IRQF_SHARED,
-                       desc, host->ports[i]);
+               rc = devm_request_threaded_irq(host->dev, irq + i,
+                                              ahci_hw_interrupt,
+                                              ahci_thread_fn, IRQF_SHARED,
+                                              pp->irq_desc, host->ports[i]);
                if (rc)
                        goto out_free_irqs;
        }
@@ -1357,6 +1375,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
 
+       /* must set flag prior to save config in order to take effect */
+       if (ahci_broken_devslp(pdev))
+               hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
+
        /* save initial config */
        ahci_pci_save_initial_config(pdev, hpriv);
 
index 51af275b3388541baad3f7bf021a098de9da9bf0..af63c75c20011e10d76be66edb27d595cb47978c 100644 (file)
@@ -94,6 +94,7 @@ enum {
        /* HOST_CTL bits */
        HOST_RESET              = (1 << 0),  /* reset controller; self-clear */
        HOST_IRQ_EN             = (1 << 1),  /* global IRQ enable */
+       HOST_MRSM               = (1 << 2),  /* MSI Revert to Single Message */
        HOST_AHCI_EN            = (1 << 31), /* AHCI enabled */
 
        /* HOST_CAP bits */
@@ -235,6 +236,7 @@ enum {
                                                        port start (wait until
                                                        error-handling stage) */
        AHCI_HFLAG_MULTI_MSI            = (1 << 16), /* multiple PCI MSIs */
+       AHCI_HFLAG_NO_DEVSLP            = (1 << 17), /* no device sleep */
 
        /* ap->flags bits */
 
index 497c7abe1c7df5ef79ccd246828b68a1251c5201..8befeb69eeb1133afc62daa43b1fb41af584994e 100644 (file)
 #include "ahci.h"
 
 enum {
-       PORT_PHY_CTL = 0x178,                   /* Port0 PHY Control */
-       PORT_PHY_CTL_PDDQ_LOC = 0x100000,       /* PORT_PHY_CTL bits */
-       HOST_TIMER1MS = 0xe0,                   /* Timer 1-ms */
+       /* Timer 1-ms Register */
+       IMX_TIMER1MS                            = 0x00e0,
+       /* Port0 PHY Control Register */
+       IMX_P0PHYCR                             = 0x0178,
+       IMX_P0PHYCR_TEST_PDDQ                   = 1 << 20,
+       IMX_P0PHYCR_CR_READ                     = 1 << 19,
+       IMX_P0PHYCR_CR_WRITE                    = 1 << 18,
+       IMX_P0PHYCR_CR_CAP_DATA                 = 1 << 17,
+       IMX_P0PHYCR_CR_CAP_ADDR                 = 1 << 16,
+       /* Port0 PHY Status Register */
+       IMX_P0PHYSR                             = 0x017c,
+       IMX_P0PHYSR_CR_ACK                      = 1 << 18,
+       IMX_P0PHYSR_CR_DATA_OUT                 = 0xffff << 0,
+       /* Lane0 Output Status Register */
+       IMX_LANE0_OUT_STAT                      = 0x2003,
+       IMX_LANE0_OUT_STAT_RX_PLL_STATE         = 1 << 1,
+       /* Clock Reset Register */
+       IMX_CLOCK_RESET                         = 0x7f3f,
+       IMX_CLOCK_RESET_RESET                   = 1 << 0,
 };
 
 enum ahci_imx_type {
@@ -54,9 +70,149 @@ MODULE_PARM_DESC(hotplug, "AHCI IMX hot-plug support (0=Don't support, 1=support
 
 static void ahci_imx_host_stop(struct ata_host *host);
 
+static int imx_phy_crbit_assert(void __iomem *mmio, u32 bit, bool assert)
+{
+       int timeout = 10;
+       u32 crval;
+       u32 srval;
+
+       /* Assert or deassert the bit */
+       crval = readl(mmio + IMX_P0PHYCR);
+       if (assert)
+               crval |= bit;
+       else
+               crval &= ~bit;
+       writel(crval, mmio + IMX_P0PHYCR);
+
+       /* Wait for the cr_ack signal */
+       do {
+               srval = readl(mmio + IMX_P0PHYSR);
+               if ((assert ? srval : ~srval) & IMX_P0PHYSR_CR_ACK)
+                       break;
+               usleep_range(100, 200);
+       } while (--timeout);
+
+       return timeout ? 0 : -ETIMEDOUT;
+}
+
+static int imx_phy_reg_addressing(u16 addr, void __iomem *mmio)
+{
+       u32 crval = addr;
+       int ret;
+
+       /* Supply the address on cr_data_in */
+       writel(crval, mmio + IMX_P0PHYCR);
+
+       /* Assert the cr_cap_addr signal */
+       ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_ADDR, true);
+       if (ret)
+               return ret;
+
+       /* Deassert cr_cap_addr */
+       ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_ADDR, false);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int imx_phy_reg_write(u16 val, void __iomem *mmio)
+{
+       u32 crval = val;
+       int ret;
+
+       /* Supply the data on cr_data_in */
+       writel(crval, mmio + IMX_P0PHYCR);
+
+       /* Assert the cr_cap_data signal */
+       ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_DATA, true);
+       if (ret)
+               return ret;
+
+       /* Deassert cr_cap_data */
+       ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_DATA, false);
+       if (ret)
+               return ret;
+
+       if (val & IMX_CLOCK_RESET_RESET) {
+               /*
+                * In case we're resetting the phy, it's unable to acknowledge,
+                * so we return immediately here.
+                */
+               crval |= IMX_P0PHYCR_CR_WRITE;
+               writel(crval, mmio + IMX_P0PHYCR);
+               goto out;
+       }
+
+       /* Assert the cr_write signal */
+       ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_WRITE, true);
+       if (ret)
+               return ret;
+
+       /* Deassert cr_write */
+       ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_WRITE, false);
+       if (ret)
+               return ret;
+
+out:
+       return 0;
+}
+
+static int imx_phy_reg_read(u16 *val, void __iomem *mmio)
+{
+       int ret;
+
+       /* Assert the cr_read signal */
+       ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_READ, true);
+       if (ret)
+               return ret;
+
+       /* Capture the data from cr_data_out[] */
+       *val = readl(mmio + IMX_P0PHYSR) & IMX_P0PHYSR_CR_DATA_OUT;
+
+       /* Deassert cr_read */
+       ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_READ, false);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int imx_sata_phy_reset(struct ahci_host_priv *hpriv)
+{
+       void __iomem *mmio = hpriv->mmio;
+       int timeout = 10;
+       u16 val;
+       int ret;
+
+       /* Reset SATA PHY by setting RESET bit of PHY register CLOCK_RESET */
+       ret = imx_phy_reg_addressing(IMX_CLOCK_RESET, mmio);
+       if (ret)
+               return ret;
+       ret = imx_phy_reg_write(IMX_CLOCK_RESET_RESET, mmio);
+       if (ret)
+               return ret;
+
+       /* Wait for PHY RX_PLL to be stable */
+       do {
+               usleep_range(100, 200);
+               ret = imx_phy_reg_addressing(IMX_LANE0_OUT_STAT, mmio);
+               if (ret)
+                       return ret;
+               ret = imx_phy_reg_read(&val, mmio);
+               if (ret)
+                       return ret;
+               if (val & IMX_LANE0_OUT_STAT_RX_PLL_STATE)
+                       break;
+       } while (--timeout);
+
+       return timeout ? 0 : -ETIMEDOUT;
+}
+
 static int imx_sata_enable(struct ahci_host_priv *hpriv)
 {
        struct imx_ahci_priv *imxpriv = hpriv->plat_data;
+       struct device *dev = &imxpriv->ahci_pdev->dev;
        int ret;
 
        if (imxpriv->no_device)
@@ -101,6 +257,14 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv)
                regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
                                   IMX6Q_GPR13_SATA_MPLL_CLK_EN,
                                   IMX6Q_GPR13_SATA_MPLL_CLK_EN);
+
+               usleep_range(100, 200);
+
+               ret = imx_sata_phy_reset(hpriv);
+               if (ret) {
+                       dev_err(dev, "failed to reset phy: %d\n", ret);
+                       goto disable_regulator;
+               }
        }
 
        usleep_range(1000, 2000);
@@ -156,8 +320,8 @@ static void ahci_imx_error_handler(struct ata_port *ap)
         * without full reset once the pddq mode is enabled making it
         * impossible to use as part of libata LPM.
         */
-       reg_val = readl(mmio + PORT_PHY_CTL);
-       writel(reg_val | PORT_PHY_CTL_PDDQ_LOC, mmio + PORT_PHY_CTL);
+       reg_val = readl(mmio + IMX_P0PHYCR);
+       writel(reg_val | IMX_P0PHYCR_TEST_PDDQ, mmio + IMX_P0PHYCR);
        imx_sata_disable(hpriv);
        imxpriv->no_device = true;
 }
@@ -217,6 +381,7 @@ static int imx_ahci_probe(struct platform_device *pdev)
        if (!imxpriv)
                return -ENOMEM;
 
+       imxpriv->ahci_pdev = pdev;
        imxpriv->no_device = false;
        imxpriv->first_time = true;
        imxpriv->type = (enum ahci_imx_type)of_id->data;
@@ -248,7 +413,7 @@ static int imx_ahci_probe(struct platform_device *pdev)
 
        /*
         * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
-        * and IP vendor specific register HOST_TIMER1MS.
+        * and IP vendor specific register IMX_TIMER1MS.
         * Configure CAP_SSS (support stagered spin up).
         * Implement the port0.
         * Get the ahb clock rate, and configure the TIMER1MS register.
@@ -265,7 +430,7 @@ static int imx_ahci_probe(struct platform_device *pdev)
        }
 
        reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
-       writel(reg_val, hpriv->mmio + HOST_TIMER1MS);
+       writel(reg_val, hpriv->mmio + IMX_TIMER1MS);
 
        ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info, 0, 0);
        if (ret)
index 6bd4f660b4e15966ca2c351b4501c0521491de32..b9861453fc8148612a740418f5fee3088d7b65a1 100644 (file)
@@ -452,6 +452,13 @@ void ahci_save_initial_config(struct device *dev,
                cap &= ~HOST_CAP_SNTF;
        }
 
+       if ((cap2 & HOST_CAP2_SDS) && (hpriv->flags & AHCI_HFLAG_NO_DEVSLP)) {
+               dev_info(dev,
+                        "controller can't do DEVSLP, turning off\n");
+               cap2 &= ~HOST_CAP2_SDS;
+               cap2 &= ~HOST_CAP2_SADM;
+       }
+
        if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) {
                dev_info(dev, "controller can do FBS, turning on CAP_FBS\n");
                cap |= HOST_CAP_FBS;
index c19734d96d7e6a029a1adf9667ce5a897708b58c..ea83828bfea94b41eae6947cfbbbaa8dfbc15b5a 100644 (file)
@@ -4224,8 +4224,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "PIONEER DVD-RW  DVR-216D",   NULL,   ATA_HORKAGE_NOSETXFER },
 
        /* devices that don't properly handle queued TRIM commands */
-       { "Micron_M500*",               NULL,   ATA_HORKAGE_NO_NCQ_TRIM, },
-       { "Crucial_CT???M500SSD*",      NULL,   ATA_HORKAGE_NO_NCQ_TRIM, },
+       { "Micron_M500*",               "MU0[1-4]*",    ATA_HORKAGE_NO_NCQ_TRIM, },
+       { "Crucial_CT???M500SSD*",      "MU0[1-4]*",    ATA_HORKAGE_NO_NCQ_TRIM, },
+       { "Micron_M550*",               NULL,           ATA_HORKAGE_NO_NCQ_TRIM, },
+       { "Crucial_CT???M550SSD*",      NULL,           ATA_HORKAGE_NO_NCQ_TRIM, },
 
        /*
         * Some WD SATA-I drives spin up and down erratically when the link
@@ -4792,21 +4794,26 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
 {
        struct ata_queued_cmd *qc = NULL;
-       unsigned int i;
+       unsigned int i, tag;
 
        /* no command while frozen */
        if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
                return NULL;
 
-       /* the last tag is reserved for internal command. */
-       for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
-               if (!test_and_set_bit(i, &ap->qc_allocated)) {
-                       qc = __ata_qc_from_tag(ap, i);
+       for (i = 0; i < ATA_MAX_QUEUE; i++) {
+               tag = (i + ap->last_tag + 1) % ATA_MAX_QUEUE;
+
+               /* the last tag is reserved for internal command. */
+               if (tag == ATA_TAG_INTERNAL)
+                       continue;
+
+               if (!test_and_set_bit(tag, &ap->qc_allocated)) {
+                       qc = __ata_qc_from_tag(ap, tag);
+                       qc->tag = tag;
+                       ap->last_tag = tag;
                        break;
                }
-
-       if (qc)
-               qc->tag = i;
+       }
 
        return qc;
 }
@@ -6307,6 +6314,8 @@ int ata_host_activate(struct ata_host *host, int irq,
 static void ata_port_detach(struct ata_port *ap)
 {
        unsigned long flags;
+       struct ata_link *link;
+       struct ata_device *dev;
 
        if (!ap->ops->error_handler)
                goto skip_eh;
@@ -6326,6 +6335,13 @@ static void ata_port_detach(struct ata_port *ap)
        cancel_delayed_work_sync(&ap->hotplug_task);
 
  skip_eh:
+       /* clean up zpodd on port removal */
+       ata_for_each_link(link, ap, HOST_FIRST) {
+               ata_for_each_dev(dev, link, ALL) {
+                       if (zpodd_dev_enabled(dev))
+                               zpodd_exit(dev);
+               }
+       }
        if (ap->pmp_link) {
                int i;
                for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
index 6fac524c2f500381ac8d76b2a94bf4d88314f1e7..4edb1a81f63f68e3f37680b7334a3d7b81788f96 100644 (file)
@@ -898,9 +898,12 @@ static int arasan_cf_probe(struct platform_device *pdev)
 
        cf_card_detect(acdev, 0);
 
-       return ata_host_activate(host, acdev->irq, irq_handler, 0,
-                       &arasan_cf_sht);
+       ret = ata_host_activate(host, acdev->irq, irq_handler, 0,
+                               &arasan_cf_sht);
+       if (!ret)
+               return 0;
 
+       cf_exit(acdev);
 free_clk:
        clk_put(acdev->clk);
        return ret;
index e9c87274a781551d4496ac81b213855dd766ae41..8a66f23af4c40bd0ffd9e01776c02a16b26f0584 100644 (file)
@@ -407,12 +407,13 @@ static int pata_at91_probe(struct platform_device *pdev)
 
        host->private_data = info;
 
-       return ata_host_activate(host, gpio_is_valid(irq) ? gpio_to_irq(irq) : 0,
-                       gpio_is_valid(irq) ? ata_sff_interrupt : NULL,
-                       irq_flags, &pata_at91_sht);
+       ret = ata_host_activate(host, gpio_is_valid(irq) ? gpio_to_irq(irq) : 0,
+                               gpio_is_valid(irq) ? ata_sff_interrupt : NULL,
+                               irq_flags, &pata_at91_sht);
+       if (ret)
+               goto err_put;
 
-       if (!ret)
-               return 0;
+       return 0;
 
 err_put:
        clk_put(info->mck);
index a79566d056666f0d0449785856b679492d71456b..0610e78c8a2a8334cfa8b6d585285606af3613bc 100644 (file)
@@ -594,9 +594,13 @@ static int __init pata_s3c_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, host);
 
-       return ata_host_activate(host, info->irq,
-                       info->irq ? pata_s3c_irq : NULL,
-                       0, &pata_s3c_sht);
+       ret = ata_host_activate(host, info->irq,
+                               info->irq ? pata_s3c_irq : NULL,
+                               0, &pata_s3c_sht);
+       if (ret)
+               goto stop_clk;
+
+       return 0;
 
 stop_clk:
        clk_disable(info->clk);
index 1bdf104e90bb7f1924acf51f78caa39817d78efb..b621f56a36be5850b1abc4d1619b665fbce7ccf3 100644 (file)
@@ -2551,12 +2551,12 @@ done:
                timeout = 5 * 1000;
                while (atomic_read(&vc->scq->used) > 0) {
                        timeout = msleep_interruptible(timeout);
-                       if (!timeout)
+                       if (!timeout) {
+                               pr_warn("%s: SCQ drain timeout: %u used\n",
+                                       card->name, atomic_read(&vc->scq->used));
                                break;
+                       }
                }
-               if (!timeout)
-                       printk("%s: SCQ drain timeout: %u used\n",
-                              card->name, atomic_read(&vc->scq->used));
 
                writel(TCMDQ_HALT | vc->index, SAR_REG_TCMDQ);
                clear_scd(card, vc->scq, vc->class);
index 8986b9f22781fa667cf41a37b5889e8ac3be0a36..62ec61e8f84ac90d7c4e433ccc4ead67ff96fbd3 100644 (file)
@@ -52,6 +52,7 @@ static DEFINE_MUTEX(deferred_probe_mutex);
 static LIST_HEAD(deferred_probe_pending_list);
 static LIST_HEAD(deferred_probe_active_list);
 static struct workqueue_struct *deferred_wq;
+static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
 
 /**
  * deferred_probe_work_func() - Retry probing devices in the active list.
@@ -135,6 +136,17 @@ static bool driver_deferred_probe_enable = false;
  * This functions moves all devices from the pending list to the active
  * list and schedules the deferred probe workqueue to process them.  It
  * should be called anytime a driver is successfully bound to a device.
+ *
+ * Note, there is a race condition in multi-threaded probe. In the case where
+ * more than one device is probing at the same time, it is possible for one
+ * probe to complete successfully while another is about to defer. If the second
+ * depends on the first, then it will get put on the pending list after the
+ * trigger event has already occured and will be stuck there.
+ *
+ * The atomic 'deferred_trigger_count' is used to determine if a successful
+ * trigger has occurred in the midst of probing a driver. If the trigger count
+ * changes in the midst of a probe, then deferred processing should be triggered
+ * again.
  */
 static void driver_deferred_probe_trigger(void)
 {
@@ -147,6 +159,7 @@ static void driver_deferred_probe_trigger(void)
         * into the active list so they can be retried by the workqueue
         */
        mutex_lock(&deferred_probe_mutex);
+       atomic_inc(&deferred_trigger_count);
        list_splice_tail_init(&deferred_probe_pending_list,
                              &deferred_probe_active_list);
        mutex_unlock(&deferred_probe_mutex);
@@ -265,6 +278,7 @@ static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
 static int really_probe(struct device *dev, struct device_driver *drv)
 {
        int ret = 0;
+       int local_trigger_count = atomic_read(&deferred_trigger_count);
 
        atomic_inc(&probe_count);
        pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
@@ -310,6 +324,9 @@ probe_failed:
                /* Driver requested deferred probing */
                dev_info(dev, "Driver %s requests probe deferral\n", drv->name);
                driver_deferred_probe_add(dev);
+               /* Did a trigger occur while probing? Need to re-trigger if yes */
+               if (local_trigger_count != atomic_read(&deferred_trigger_count))
+                       driver_deferred_probe_trigger();
        } else if (ret != -ENODEV && ret != -ENXIO) {
                /* driver matched but the probe failed */
                printk(KERN_WARNING
index e714709704e4578ccc3703ca30108e596461ad28..5b47210889e038d72f7a172062b3c4ddd2daa07d 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/string.h>
 #include <linux/platform_device.h>
 #include <linux/of_device.h>
+#include <linux/of_irq.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/dma-mapping.h>
@@ -87,7 +88,11 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
                return -ENXIO;
        return dev->archdata.irqs[num];
 #else
-       struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num);
+       struct resource *r;
+       if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node)
+               return of_irq_get(dev->dev.of_node, num);
+
+       r = platform_get_resource(dev, IORESOURCE_IRQ, num);
 
        return r ? r->start : -ENXIO;
 #endif
index 8f5565bf34cda31504e526ccc3d79d4e7fe20fd2..fa9bb742df6e0becfa8bca52576f17b5bdafe2bf 100644 (file)
@@ -3067,7 +3067,10 @@ static int raw_cmd_copyout(int cmd, void __user *param,
        int ret;
 
        while (ptr) {
-               ret = copy_to_user(param, ptr, sizeof(*ptr));
+               struct floppy_raw_cmd cmd = *ptr;
+               cmd.next = NULL;
+               cmd.kernel_data = NULL;
+               ret = copy_to_user(param, &cmd, sizeof(cmd));
                if (ret)
                        return -EFAULT;
                param += sizeof(struct floppy_raw_cmd);
@@ -3121,10 +3124,11 @@ loop:
                return -ENOMEM;
        *rcmd = ptr;
        ret = copy_from_user(ptr, param, sizeof(*ptr));
-       if (ret)
-               return -EFAULT;
        ptr->next = NULL;
        ptr->buffer_length = 0;
+       ptr->kernel_data = NULL;
+       if (ret)
+               return -EFAULT;
        param += sizeof(struct floppy_raw_cmd);
        if (ptr->cmd_count > 33)
                        /* the command may now also take up the space
@@ -3140,7 +3144,6 @@ loop:
        for (i = 0; i < 16; i++)
                ptr->reply[i] = 0;
        ptr->resultcode = 0;
-       ptr->kernel_data = NULL;
 
        if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
                if (ptr->length <= 0)
index be571fef185da6a597fcdac3d15093ed8e47fb5b..a83b57e57b6370572d53325638355a0d94ce24bf 100644 (file)
@@ -82,6 +82,7 @@ static const struct usb_device_id ath3k_table[] = {
        { USB_DEVICE(0x04CA, 0x3004) },
        { USB_DEVICE(0x04CA, 0x3005) },
        { USB_DEVICE(0x04CA, 0x3006) },
+       { USB_DEVICE(0x04CA, 0x3007) },
        { USB_DEVICE(0x04CA, 0x3008) },
        { USB_DEVICE(0x04CA, 0x300b) },
        { USB_DEVICE(0x0930, 0x0219) },
@@ -131,6 +132,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
        { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
index 7399303d7d9978447ff722d823c1dcece408029a..dc79f88f8717f478c8d8ab6d0a7d45849f772ac1 100644 (file)
@@ -59,6 +59,8 @@ struct btmrvl_device {
 };
 
 struct btmrvl_adapter {
+       void *hw_regs_buf;
+       u8 *hw_regs;
        u32 int_count;
        struct sk_buff_head tx_queue;
        u8 psmode;
@@ -140,7 +142,7 @@ void btmrvl_interrupt(struct btmrvl_private *priv);
 bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb);
 int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb);
 
-int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd);
+int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, u8 subcmd);
 int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv);
 int btmrvl_enable_ps(struct btmrvl_private *priv);
 int btmrvl_prepare_command(struct btmrvl_private *priv);
index 2c4997ce248484703a1b859c5e518396fcdbfa64..e9dbddb0b8f1efb1f15ede65f50d80390ca370c3 100644 (file)
@@ -24,6 +24,7 @@
 #include <net/bluetooth/hci_core.h>
 
 #include "btmrvl_drv.h"
+#include "btmrvl_sdio.h"
 
 #define VERSION "1.0"
 
@@ -201,7 +202,7 @@ static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 opcode,
        return 0;
 }
 
-int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd)
+int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, u8 subcmd)
 {
        int ret;
 
@@ -337,10 +338,25 @@ static int btmrvl_tx_pkt(struct btmrvl_private *priv, struct sk_buff *skb)
 
 static void btmrvl_init_adapter(struct btmrvl_private *priv)
 {
+       int buf_size;
+
        skb_queue_head_init(&priv->adapter->tx_queue);
 
        priv->adapter->ps_state = PS_AWAKE;
 
+       buf_size = ALIGN_SZ(SDIO_BLOCK_SIZE, BTSDIO_DMA_ALIGN);
+       priv->adapter->hw_regs_buf = kzalloc(buf_size, GFP_KERNEL);
+       if (!priv->adapter->hw_regs_buf) {
+               priv->adapter->hw_regs = NULL;
+               BT_ERR("Unable to allocate buffer for hw_regs.");
+       } else {
+               priv->adapter->hw_regs =
+                       (u8 *)ALIGN_ADDR(priv->adapter->hw_regs_buf,
+                                        BTSDIO_DMA_ALIGN);
+               BT_DBG("hw_regs_buf=%p hw_regs=%p",
+                      priv->adapter->hw_regs_buf, priv->adapter->hw_regs);
+       }
+
        init_waitqueue_head(&priv->adapter->cmd_wait_q);
 }
 
@@ -348,6 +364,7 @@ static void btmrvl_free_adapter(struct btmrvl_private *priv)
 {
        skb_queue_purge(&priv->adapter->tx_queue);
 
+       kfree(priv->adapter->hw_regs_buf);
        kfree(priv->adapter);
 
        priv->adapter = NULL;
index 1b52c9f5230d324d0476a2d7dd3f308e7fd723d8..9dedca516ff50567a278fb9a511dbcdfd7a1980c 100644 (file)
@@ -64,6 +64,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8688 = {
        .io_port_0 = 0x00,
        .io_port_1 = 0x01,
        .io_port_2 = 0x02,
+       .int_read_to_clear = false,
 };
 static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
        .cfg = 0x00,
@@ -80,6 +81,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
        .io_port_0 = 0x78,
        .io_port_1 = 0x79,
        .io_port_2 = 0x7a,
+       .int_read_to_clear = false,
 };
 
 static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = {
@@ -97,6 +99,9 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = {
        .io_port_0 = 0xd8,
        .io_port_1 = 0xd9,
        .io_port_2 = 0xda,
+       .int_read_to_clear = true,
+       .host_int_rsr = 0x01,
+       .card_misc_cfg = 0xcc,
 };
 
 static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
@@ -667,46 +672,78 @@ static int btmrvl_sdio_process_int_status(struct btmrvl_private *priv)
        return 0;
 }
 
-static void btmrvl_sdio_interrupt(struct sdio_func *func)
+static int btmrvl_sdio_read_to_clear(struct btmrvl_sdio_card *card, u8 *ireg)
 {
-       struct btmrvl_private *priv;
-       struct btmrvl_sdio_card *card;
-       ulong flags;
-       u8 ireg = 0;
+       struct btmrvl_adapter *adapter = card->priv->adapter;
        int ret;
 
-       card = sdio_get_drvdata(func);
-       if (!card || !card->priv) {
-               BT_ERR("sbi_interrupt(%p) card or priv is "
-                               "NULL, card=%p\n", func, card);
-               return;
+       ret = sdio_readsb(card->func, adapter->hw_regs, 0, SDIO_BLOCK_SIZE);
+       if (ret) {
+               BT_ERR("sdio_readsb: read int hw_regs failed: %d", ret);
+               return ret;
        }
 
-       priv = card->priv;
+       *ireg = adapter->hw_regs[card->reg->host_intstatus];
+       BT_DBG("hw_regs[%#x]=%#x", card->reg->host_intstatus, *ireg);
+
+       return 0;
+}
 
-       ireg = sdio_readb(card->func, card->reg->host_intstatus, &ret);
+static int btmrvl_sdio_write_to_clear(struct btmrvl_sdio_card *card, u8 *ireg)
+{
+       int ret;
+
+       *ireg = sdio_readb(card->func, card->reg->host_intstatus, &ret);
        if (ret) {
-               BT_ERR("sdio_readb: read int status register failed");
-               return;
+               BT_ERR("sdio_readb: read int status failed: %d", ret);
+               return ret;
        }
 
-       if (ireg != 0) {
+       if (*ireg) {
                /*
                 * DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS
                 * Clear the interrupt status register and re-enable the
                 * interrupt.
                 */
-               BT_DBG("ireg = 0x%x", ireg);
+               BT_DBG("int_status = 0x%x", *ireg);
 
-               sdio_writeb(card->func, ~(ireg) & (DN_LD_HOST_INT_STATUS |
-                                       UP_LD_HOST_INT_STATUS),
-                               card->reg->host_intstatus, &ret);
+               sdio_writeb(card->func, ~(*ireg) & (DN_LD_HOST_INT_STATUS |
+                                                   UP_LD_HOST_INT_STATUS),
+                           card->reg->host_intstatus, &ret);
                if (ret) {
-                       BT_ERR("sdio_writeb: clear int status register failed");
-                       return;
+                       BT_ERR("sdio_writeb: clear int status failed: %d", ret);
+                       return ret;
                }
        }
 
+       return 0;
+}
+
+static void btmrvl_sdio_interrupt(struct sdio_func *func)
+{
+       struct btmrvl_private *priv;
+       struct btmrvl_sdio_card *card;
+       ulong flags;
+       u8 ireg = 0;
+       int ret;
+
+       card = sdio_get_drvdata(func);
+       if (!card || !card->priv) {
+               BT_ERR("sbi_interrupt(%p) card or priv is "
+                               "NULL, card=%p\n", func, card);
+               return;
+       }
+
+       priv = card->priv;
+
+       if (card->reg->int_read_to_clear)
+               ret = btmrvl_sdio_read_to_clear(card, &ireg);
+       else
+               ret = btmrvl_sdio_write_to_clear(card, &ireg);
+
+       if (ret)
+               return;
+
        spin_lock_irqsave(&priv->driver_lock, flags);
        sdio_ireg |= ireg;
        spin_unlock_irqrestore(&priv->driver_lock, flags);
@@ -777,6 +814,30 @@ static int btmrvl_sdio_register_dev(struct btmrvl_sdio_card *card)
 
        BT_DBG("SDIO FUNC%d IO port: 0x%x", func->num, card->ioport);
 
+       if (card->reg->int_read_to_clear) {
+               reg = sdio_readb(func, card->reg->host_int_rsr, &ret);
+               if (ret < 0) {
+                       ret = -EIO;
+                       goto release_irq;
+               }
+               sdio_writeb(func, reg | 0x3f, card->reg->host_int_rsr, &ret);
+               if (ret < 0) {
+                       ret = -EIO;
+                       goto release_irq;
+               }
+
+               reg = sdio_readb(func, card->reg->card_misc_cfg, &ret);
+               if (ret < 0) {
+                       ret = -EIO;
+                       goto release_irq;
+               }
+               sdio_writeb(func, reg | 0x10, card->reg->card_misc_cfg, &ret);
+               if (ret < 0) {
+                       ret = -EIO;
+                       goto release_irq;
+               }
+       }
+
        sdio_set_drvdata(func, card);
 
        sdio_release_host(func);
index 43d35a609ca9a94795afb731d230fa88ca109bef..d4dd3b0fa53d16d68da0101494e43ca4664716c2 100644 (file)
@@ -78,6 +78,9 @@ struct btmrvl_sdio_card_reg {
        u8 io_port_0;
        u8 io_port_1;
        u8 io_port_2;
+       bool int_read_to_clear;
+       u8 host_int_rsr;
+       u8 card_misc_cfg;
 };
 
 struct btmrvl_sdio_card {
index f338b0c5a8de507a153b6761886b943c702176a4..a7dfbf9a3afb6be53e372f78d9ee8202bdb17d08 100644 (file)
@@ -152,6 +152,7 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
@@ -1485,10 +1486,8 @@ static int btusb_probe(struct usb_interface *intf,
        if (id->driver_info & BTUSB_BCM92035)
                hdev->setup = btusb_setup_bcm92035;
 
-       if (id->driver_info & BTUSB_INTEL) {
-               usb_enable_autosuspend(data->udev);
+       if (id->driver_info & BTUSB_INTEL)
                hdev->setup = btusb_setup_intel;
-       }
 
        /* Interface numbers are hardcoded in the specification */
        data->isoc = usb_ifnum_to_if(data->udev, 1);
index 7048a583fe51a695a044ad541a0f894e3bf7c510..66db9a803373efb92c8966c9f69fdb6b8ec7aa59 100644 (file)
@@ -55,13 +55,6 @@ struct h4_struct {
        struct sk_buff_head txq;
 };
 
-/* H4 receiver States */
-#define H4_W4_PACKET_TYPE      0
-#define H4_W4_EVENT_HDR                1
-#define H4_W4_ACL_HDR          2
-#define H4_W4_SCO_HDR          3
-#define H4_W4_DATA             4
-
 /* Initialize protocol */
 static int h4_open(struct hci_uart *hu)
 {
index 293e2e0a0a87c7d9877c27524fd98503ba16c1cf..00b73448b22ea7b77a55785cfbbddf022bf4bfd3 100644 (file)
@@ -56,6 +56,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/debugfs.h>
+#include <linux/log2.h>
 
 /*
  * DDR target is the same on all platforms.
@@ -222,12 +223,6 @@ static int mvebu_mbus_window_conflicts(struct mvebu_mbus_state *mbus,
                 */
                if ((u64)base < wend && end > wbase)
                        return 0;
-
-               /*
-                * Check if target/attribute conflicts
-                */
-               if (target == wtarget && attr == wattr)
-                       return 0;
        }
 
        return 1;
@@ -266,6 +261,17 @@ static int mvebu_mbus_setup_window(struct mvebu_mbus_state *mbus,
                mbus->soc->win_cfg_offset(win);
        u32 ctrl, remap_addr;
 
+       if (!is_power_of_2(size)) {
+               WARN(true, "Invalid MBus window size: 0x%zx\n", size);
+               return -EINVAL;
+       }
+
+       if ((base & (phys_addr_t)(size - 1)) != 0) {
+               WARN(true, "Invalid MBus base/size: %pa len 0x%zx\n", &base,
+                    size);
+               return -EINVAL;
+       }
+
        ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) |
                (attr << WIN_CTRL_ATTR_SHIFT)    |
                (target << WIN_CTRL_TGT_SHIFT)   |
@@ -413,6 +419,10 @@ static int mvebu_devs_debug_show(struct seq_file *seq, void *v)
                           win, (unsigned long long)wbase,
                           (unsigned long long)(wbase + wsize), wtarget, wattr);
 
+               if (!is_power_of_2(wsize) ||
+                   ((wbase & (u64)(wsize - 1)) != 0))
+                       seq_puts(seq, " (Invalid base/size!!)");
+
                if (win < mbus->soc->num_remappable_wins) {
                        seq_printf(seq, " (remap %016llx)\n",
                                   (unsigned long long)wremap);
index 8121b4c70edec77114e20295e195fa1d5f2cc797..b29703324e9431d24c5c0b588efbae14d8d2d5ba 100644 (file)
@@ -730,6 +730,7 @@ static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
 
        agp_copy_info(agp_bridge, &kerninfo);
 
+       memset(&userinfo, 0, sizeof(userinfo));
        userinfo.version.major = kerninfo.version.major;
        userinfo.version.minor = kerninfo.version.minor;
        userinfo.bridge_id = kerninfo.device->vendor |
index 6b75713d953a4e719cd33610f6034bec430c5d95..102c50d38902ca43fed85641618ba202a7251679 100644 (file)
@@ -995,8 +995,11 @@ retry:
                ibytes = min_t(size_t, ibytes, have_bytes - reserved);
        if (ibytes < min)
                ibytes = 0;
-       entropy_count = max_t(int, 0,
-                             entropy_count - (ibytes << (ENTROPY_SHIFT + 3)));
+       if (have_bytes >= ibytes + reserved)
+               entropy_count -= ibytes << (ENTROPY_SHIFT + 3);
+       else
+               entropy_count = reserved << (ENTROPY_SHIFT + 3);
+
        if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
                goto retry;
 
index b3ea223585bdeac64de19606f96b21b280c96210..61dcc8011ec711246727ce5c2750277f33c219f4 100644 (file)
@@ -328,13 +328,11 @@ int tpm_add_ppi(struct kobject *parent)
        /* Cache TPM ACPI handle and version string */
        acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
                            ppi_callback, NULL, NULL, &tpm_ppi_handle);
-       if (tpm_ppi_handle == NULL)
-               return -ENODEV;
-
-       return sysfs_create_group(parent, &ppi_attr_grp);
+       return tpm_ppi_handle ? sysfs_create_group(parent, &ppi_attr_grp) : 0;
 }
 
 void tpm_remove_ppi(struct kobject *parent)
 {
-       sysfs_remove_group(parent, &ppi_attr_grp);
+       if (tpm_ppi_handle)
+               sysfs_remove_group(parent, &ppi_attr_grp);
 }
index c7607feb18dd159b7626fd48340c764f50738c10..54a06526f64f09d7a49b8e043bd91d2e3d2dfffd 100644 (file)
@@ -27,7 +27,7 @@ LIST_HEAD(ccu_list);  /* The list of set up CCUs */
 
 static bool clk_requires_trigger(struct kona_clk *bcm_clk)
 {
-       struct peri_clk_data *peri = bcm_clk->peri;
+       struct peri_clk_data *peri = bcm_clk->u.peri;
        struct bcm_clk_sel *sel;
        struct bcm_clk_div *div;
 
@@ -63,7 +63,7 @@ static bool peri_clk_data_offsets_valid(struct kona_clk *bcm_clk)
        u32 limit;
 
        BUG_ON(bcm_clk->type != bcm_clk_peri);
-       peri = bcm_clk->peri;
+       peri = bcm_clk->u.peri;
        name = bcm_clk->name;
        range = bcm_clk->ccu->range;
 
@@ -81,19 +81,19 @@ static bool peri_clk_data_offsets_valid(struct kona_clk *bcm_clk)
 
        div = &peri->div;
        if (divider_exists(div)) {
-               if (div->offset > limit) {
+               if (div->u.s.offset > limit) {
                        pr_err("%s: bad divider offset for %s (%u > %u)\n",
-                               __func__, name, div->offset, limit);
+                               __func__, name, div->u.s.offset, limit);
                        return false;
                }
        }
 
        div = &peri->pre_div;
        if (divider_exists(div)) {
-               if (div->offset > limit) {
+               if (div->u.s.offset > limit) {
                        pr_err("%s: bad pre-divider offset for %s "
                                        "(%u > %u)\n",
-                               __func__, name, div->offset, limit);
+                               __func__, name, div->u.s.offset, limit);
                        return false;
                }
        }
@@ -249,21 +249,22 @@ static bool div_valid(struct bcm_clk_div *div, const char *field_name,
 {
        if (divider_is_fixed(div)) {
                /* Any fixed divider value but 0 is OK */
-               if (div->fixed == 0) {
+               if (div->u.fixed == 0) {
                        pr_err("%s: bad %s fixed value 0 for %s\n", __func__,
                                field_name, clock_name);
                        return false;
                }
                return true;
        }
-       if (!bitfield_valid(div->shift, div->width, field_name, clock_name))
+       if (!bitfield_valid(div->u.s.shift, div->u.s.width,
+                               field_name, clock_name))
                return false;
 
        if (divider_has_fraction(div))
-               if (div->frac_width > div->width) {
+               if (div->u.s.frac_width > div->u.s.width) {
                        pr_warn("%s: bad %s fraction width for %s (%u > %u)\n",
                                __func__, field_name, clock_name,
-                               div->frac_width, div->width);
+                               div->u.s.frac_width, div->u.s.width);
                        return false;
                }
 
@@ -278,7 +279,7 @@ static bool div_valid(struct bcm_clk_div *div, const char *field_name,
  */
 static bool kona_dividers_valid(struct kona_clk *bcm_clk)
 {
-       struct peri_clk_data *peri = bcm_clk->peri;
+       struct peri_clk_data *peri = bcm_clk->u.peri;
        struct bcm_clk_div *div;
        struct bcm_clk_div *pre_div;
        u32 limit;
@@ -295,7 +296,7 @@ static bool kona_dividers_valid(struct kona_clk *bcm_clk)
 
        limit = BITS_PER_BYTE * sizeof(u32);
 
-       return div->frac_width + pre_div->frac_width <= limit;
+       return div->u.s.frac_width + pre_div->u.s.frac_width <= limit;
 }
 
 
@@ -328,7 +329,7 @@ peri_clk_data_valid(struct kona_clk *bcm_clk)
        if (!peri_clk_data_offsets_valid(bcm_clk))
                return false;
 
-       peri = bcm_clk->peri;
+       peri = bcm_clk->u.peri;
        name = bcm_clk->name;
        gate = &peri->gate;
        if (gate_exists(gate) && !gate_valid(gate, "gate", name))
@@ -588,12 +589,12 @@ static void bcm_clk_teardown(struct kona_clk *bcm_clk)
 {
        switch (bcm_clk->type) {
        case bcm_clk_peri:
-               peri_clk_teardown(bcm_clk->data, &bcm_clk->init_data);
+               peri_clk_teardown(bcm_clk->u.data, &bcm_clk->init_data);
                break;
        default:
                break;
        }
-       bcm_clk->data = NULL;
+       bcm_clk->u.data = NULL;
        bcm_clk->type = bcm_clk_none;
 }
 
@@ -644,7 +645,7 @@ struct clk *kona_clk_setup(struct ccu_data *ccu, const char *name,
                break;
        }
        bcm_clk->type = type;
-       bcm_clk->data = data;
+       bcm_clk->u.data = data;
 
        /* Make sure everything makes sense before we set it up */
        if (!kona_clk_valid(bcm_clk)) {
index e3d339e08309f66ac61d2286c7c28bca9c4cec76..db11a87449f236c6bc1fcf4e5f6084750a1d4f3a 100644 (file)
@@ -61,7 +61,7 @@ u64 do_div_round_closest(u64 dividend, unsigned long divisor)
 /* Convert a divider into the scaled divisor value it represents. */
 static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div)
 {
-       return (u64)reg_div + ((u64)1 << div->frac_width);
+       return (u64)reg_div + ((u64)1 << div->u.s.frac_width);
 }
 
 /*
@@ -77,7 +77,7 @@ u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths)
        BUG_ON(billionths >= BILLION);
 
        combined = (u64)div_value * BILLION + billionths;
-       combined <<= div->frac_width;
+       combined <<= div->u.s.frac_width;
 
        return do_div_round_closest(combined, BILLION);
 }
@@ -87,7 +87,7 @@ static inline u64
 scaled_div_min(struct bcm_clk_div *div)
 {
        if (divider_is_fixed(div))
-               return (u64)div->fixed;
+               return (u64)div->u.fixed;
 
        return scaled_div_value(div, 0);
 }
@@ -98,9 +98,9 @@ u64 scaled_div_max(struct bcm_clk_div *div)
        u32 reg_div;
 
        if (divider_is_fixed(div))
-               return (u64)div->fixed;
+               return (u64)div->u.fixed;
 
-       reg_div = ((u32)1 << div->width) - 1;
+       reg_div = ((u32)1 << div->u.s.width) - 1;
 
        return scaled_div_value(div, reg_div);
 }
@@ -115,7 +115,7 @@ divider(struct bcm_clk_div *div, u64 scaled_div)
        BUG_ON(scaled_div < scaled_div_min(div));
        BUG_ON(scaled_div > scaled_div_max(div));
 
-       return (u32)(scaled_div - ((u64)1 << div->frac_width));
+       return (u32)(scaled_div - ((u64)1 << div->u.s.frac_width));
 }
 
 /* Return a rate scaled for use when dividing by a scaled divisor. */
@@ -125,7 +125,7 @@ scale_rate(struct bcm_clk_div *div, u32 rate)
        if (divider_is_fixed(div))
                return (u64)rate;
 
-       return (u64)rate << div->frac_width;
+       return (u64)rate << div->u.s.frac_width;
 }
 
 /* CCU access */
@@ -398,14 +398,14 @@ static u64 divider_read_scaled(struct ccu_data *ccu, struct bcm_clk_div *div)
        u32 reg_div;
 
        if (divider_is_fixed(div))
-               return (u64)div->fixed;
+               return (u64)div->u.fixed;
 
        flags = ccu_lock(ccu);
-       reg_val = __ccu_read(ccu, div->offset);
+       reg_val = __ccu_read(ccu, div->u.s.offset);
        ccu_unlock(ccu, flags);
 
        /* Extract the full divider field from the register value */
-       reg_div = bitfield_extract(reg_val, div->shift, div->width);
+       reg_div = bitfield_extract(reg_val, div->u.s.shift, div->u.s.width);
 
        /* Return the scaled divisor value it represents */
        return scaled_div_value(div, reg_div);
@@ -433,16 +433,17 @@ static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
         * state was defined in the device tree, we just find out
         * what its current value is rather than updating it.
         */
-       if (div->scaled_div == BAD_SCALED_DIV_VALUE) {
-               reg_val = __ccu_read(ccu, div->offset);
-               reg_div = bitfield_extract(reg_val, div->shift, div->width);
-               div->scaled_div = scaled_div_value(div, reg_div);
+       if (div->u.s.scaled_div == BAD_SCALED_DIV_VALUE) {
+               reg_val = __ccu_read(ccu, div->u.s.offset);
+               reg_div = bitfield_extract(reg_val, div->u.s.shift,
+                                               div->u.s.width);
+               div->u.s.scaled_div = scaled_div_value(div, reg_div);
 
                return 0;
        }
 
        /* Convert the scaled divisor to the value we need to record */
-       reg_div = divider(div, div->scaled_div);
+       reg_div = divider(div, div->u.s.scaled_div);
 
        /* Clock needs to be enabled before changing the rate */
        enabled = __is_clk_gate_enabled(ccu, gate);
@@ -452,9 +453,10 @@ static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
        }
 
        /* Replace the divider value and record the result */
-       reg_val = __ccu_read(ccu, div->offset);
-       reg_val = bitfield_replace(reg_val, div->shift, div->width, reg_div);
-       __ccu_write(ccu, div->offset, reg_val);
+       reg_val = __ccu_read(ccu, div->u.s.offset);
+       reg_val = bitfield_replace(reg_val, div->u.s.shift, div->u.s.width,
+                                       reg_div);
+       __ccu_write(ccu, div->u.s.offset, reg_val);
 
        /* If the trigger fails we still want to disable the gate */
        if (!__clk_trigger(ccu, trig))
@@ -490,11 +492,11 @@ static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
 
        BUG_ON(divider_is_fixed(div));
 
-       previous = div->scaled_div;
+       previous = div->u.s.scaled_div;
        if (previous == scaled_div)
                return 0;       /* No change */
 
-       div->scaled_div = scaled_div;
+       div->u.s.scaled_div = scaled_div;
 
        flags = ccu_lock(ccu);
        __ccu_write_enable(ccu);
@@ -505,7 +507,7 @@ static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
        ccu_unlock(ccu, flags);
 
        if (ret)
-               div->scaled_div = previous;             /* Revert the change */
+               div->u.s.scaled_div = previous;         /* Revert the change */
 
        return ret;
 
@@ -802,7 +804,7 @@ static int selector_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
 static int kona_peri_clk_enable(struct clk_hw *hw)
 {
        struct kona_clk *bcm_clk = to_kona_clk(hw);
-       struct bcm_clk_gate *gate = &bcm_clk->peri->gate;
+       struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
 
        return clk_gate(bcm_clk->ccu, bcm_clk->name, gate, true);
 }
@@ -810,7 +812,7 @@ static int kona_peri_clk_enable(struct clk_hw *hw)
 static void kona_peri_clk_disable(struct clk_hw *hw)
 {
        struct kona_clk *bcm_clk = to_kona_clk(hw);
-       struct bcm_clk_gate *gate = &bcm_clk->peri->gate;
+       struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
 
        (void)clk_gate(bcm_clk->ccu, bcm_clk->name, gate, false);
 }
@@ -818,7 +820,7 @@ static void kona_peri_clk_disable(struct clk_hw *hw)
 static int kona_peri_clk_is_enabled(struct clk_hw *hw)
 {
        struct kona_clk *bcm_clk = to_kona_clk(hw);
-       struct bcm_clk_gate *gate = &bcm_clk->peri->gate;
+       struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
 
        return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0;
 }
@@ -827,7 +829,7 @@ static unsigned long kona_peri_clk_recalc_rate(struct clk_hw *hw,
                        unsigned long parent_rate)
 {
        struct kona_clk *bcm_clk = to_kona_clk(hw);
-       struct peri_clk_data *data = bcm_clk->peri;
+       struct peri_clk_data *data = bcm_clk->u.peri;
 
        return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div,
                                parent_rate);
@@ -837,20 +839,20 @@ static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate,
                        unsigned long *parent_rate)
 {
        struct kona_clk *bcm_clk = to_kona_clk(hw);
-       struct bcm_clk_div *div = &bcm_clk->peri->div;
+       struct bcm_clk_div *div = &bcm_clk->u.peri->div;
 
        if (!divider_exists(div))
                return __clk_get_rate(hw->clk);
 
        /* Quietly avoid a zero rate */
-       return round_rate(bcm_clk->ccu, div, &bcm_clk->peri->pre_div,
+       return round_rate(bcm_clk->ccu, div, &bcm_clk->u.peri->pre_div,
                                rate ? rate : 1, *parent_rate, NULL);
 }
 
 static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index)
 {
        struct kona_clk *bcm_clk = to_kona_clk(hw);
-       struct peri_clk_data *data = bcm_clk->peri;
+       struct peri_clk_data *data = bcm_clk->u.peri;
        struct bcm_clk_sel *sel = &data->sel;
        struct bcm_clk_trig *trig;
        int ret;
@@ -884,7 +886,7 @@ static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index)
 static u8 kona_peri_clk_get_parent(struct clk_hw *hw)
 {
        struct kona_clk *bcm_clk = to_kona_clk(hw);
-       struct peri_clk_data *data = bcm_clk->peri;
+       struct peri_clk_data *data = bcm_clk->u.peri;
        u8 index;
 
        index = selector_read_index(bcm_clk->ccu, &data->sel);
@@ -897,7 +899,7 @@ static int kona_peri_clk_set_rate(struct clk_hw *hw, unsigned long rate,
                        unsigned long parent_rate)
 {
        struct kona_clk *bcm_clk = to_kona_clk(hw);
-       struct peri_clk_data *data = bcm_clk->peri;
+       struct peri_clk_data *data = bcm_clk->u.peri;
        struct bcm_clk_div *div = &data->div;
        u64 scaled_div = 0;
        int ret;
@@ -958,7 +960,7 @@ struct clk_ops kona_peri_clk_ops = {
 static bool __peri_clk_init(struct kona_clk *bcm_clk)
 {
        struct ccu_data *ccu = bcm_clk->ccu;
-       struct peri_clk_data *peri = bcm_clk->peri;
+       struct peri_clk_data *peri = bcm_clk->u.peri;
        const char *name = bcm_clk->name;
        struct bcm_clk_trig *trig;
 
index 5e139adc3dc5909deee925ef38d2915894d52014..dee690951bb6c21677515039d5cac39770418414 100644 (file)
@@ -57,7 +57,7 @@
 #define divider_exists(div)            FLAG_TEST(div, DIV, EXISTS)
 #define divider_is_fixed(div)          FLAG_TEST(div, DIV, FIXED)
 #define divider_has_fraction(div)      (!divider_is_fixed(div) && \
-                                               (div)->frac_width > 0)
+                                               (div)->u.s.frac_width > 0)
 
 #define selector_exists(sel)           ((sel)->width != 0)
 #define trigger_exists(trig)           FLAG_TEST(trig, TRIG, EXISTS)
@@ -244,9 +244,9 @@ struct bcm_clk_div {
                        u32 frac_width; /* field fraction width */
 
                        u64 scaled_div; /* scaled divider value */
-               };
+               } s;
                u32 fixed;      /* non-zero fixed divider value */
-       };
+       } u;
        u32 flags;              /* BCM_CLK_DIV_FLAGS_* below */
 };
 
@@ -263,28 +263,28 @@ struct bcm_clk_div {
 /* A fixed (non-zero) divider */
 #define FIXED_DIVIDER(_value)                                          \
        {                                                               \
-               .fixed = (_value),                                      \
+               .u.fixed = (_value),                                    \
                .flags = FLAG(DIV, EXISTS)|FLAG(DIV, FIXED),            \
        }
 
 /* A divider with an integral divisor */
 #define DIVIDER(_offset, _shift, _width)                               \
        {                                                               \
-               .offset = (_offset),                                    \
-               .shift = (_shift),                                      \
-               .width = (_width),                                      \
-               .scaled_div = BAD_SCALED_DIV_VALUE,                     \
+               .u.s.offset = (_offset),                                \
+               .u.s.shift = (_shift),                                  \
+               .u.s.width = (_width),                                  \
+               .u.s.scaled_div = BAD_SCALED_DIV_VALUE,                 \
                .flags = FLAG(DIV, EXISTS),                             \
        }
 
 /* A divider whose divisor has an integer and fractional part */
 #define FRAC_DIVIDER(_offset, _shift, _width, _frac_width)             \
        {                                                               \
-               .offset = (_offset),                                    \
-               .shift = (_shift),                                      \
-               .width = (_width),                                      \
-               .frac_width = (_frac_width),                            \
-               .scaled_div = BAD_SCALED_DIV_VALUE,                     \
+               .u.s.offset = (_offset),                                \
+               .u.s.shift = (_shift),                                  \
+               .u.s.width = (_width),                                  \
+               .u.s.frac_width = (_frac_width),                        \
+               .u.s.scaled_div = BAD_SCALED_DIV_VALUE,                 \
                .flags = FLAG(DIV, EXISTS),                             \
        }
 
@@ -380,7 +380,7 @@ struct kona_clk {
        union {
                void *data;
                struct peri_clk_data *peri;
-       };
+       } u;
 };
 #define to_kona_clk(_hw) \
        container_of(_hw, struct kona_clk, hw)
index ec22112e569f7f3dc8f7c9477d0e3f99951e66c6..4637697c139f634a36d81169bad5b96c3553473d 100644 (file)
@@ -144,6 +144,37 @@ static bool _is_valid_div(struct clk_divider *divider, unsigned int div)
        return true;
 }
 
+static int _round_up_table(const struct clk_div_table *table, int div)
+{
+       const struct clk_div_table *clkt;
+       int up = _get_table_maxdiv(table);
+
+       for (clkt = table; clkt->div; clkt++) {
+               if (clkt->div == div)
+                       return clkt->div;
+               else if (clkt->div < div)
+                       continue;
+
+               if ((clkt->div - div) < (up - div))
+                       up = clkt->div;
+       }
+
+       return up;
+}
+
+static int _div_round_up(struct clk_divider *divider,
+               unsigned long parent_rate, unsigned long rate)
+{
+       int div = DIV_ROUND_UP(parent_rate, rate);
+
+       if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+               div = __roundup_pow_of_two(div);
+       if (divider->table)
+               div = _round_up_table(divider->table, div);
+
+       return div;
+}
+
 static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
                unsigned long *best_parent_rate)
 {
@@ -159,7 +190,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
 
        if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
                parent_rate = *best_parent_rate;
-               bestdiv = DIV_ROUND_UP(parent_rate, rate);
+               bestdiv = _div_round_up(divider, parent_rate, rate);
                bestdiv = bestdiv == 0 ? 1 : bestdiv;
                bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
                return bestdiv;
@@ -219,6 +250,10 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
        u32 val;
 
        div = DIV_ROUND_UP(parent_rate, rate);
+
+       if (!_is_valid_div(divider, div))
+               return -EINVAL;
+
        value = _get_val(divider, div);
 
        if (value > div_mask(divider))
index dff0373f53c1fb1df1784366a28c35323cde17a5..7cf2c093cc54f28dbebd9482e43233a49daf02e3 100644 (file)
@@ -1984,9 +1984,28 @@ struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
 }
 EXPORT_SYMBOL_GPL(__clk_register);
 
-static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
+/**
+ * clk_register - allocate a new clock, register it and return an opaque cookie
+ * @dev: device that is registering this clock
+ * @hw: link to hardware-specific clock data
+ *
+ * clk_register is the primary interface for populating the clock tree with new
+ * clock nodes.  It returns a pointer to the newly allocated struct clk which
+ * cannot be dereferenced by driver code but may be used in conjuction with the
+ * rest of the clock API.  In the event of an error clk_register will return an
+ * error code; drivers must test for an error code after calling clk_register.
+ */
+struct clk *clk_register(struct device *dev, struct clk_hw *hw)
 {
        int i, ret;
+       struct clk *clk;
+
+       clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+       if (!clk) {
+               pr_err("%s: could not allocate clk\n", __func__);
+               ret = -ENOMEM;
+               goto fail_out;
+       }
 
        clk->name = kstrdup(hw->init->name, GFP_KERNEL);
        if (!clk->name) {
@@ -2026,7 +2045,7 @@ static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
 
        ret = __clk_init(dev, clk);
        if (!ret)
-               return 0;
+               return clk;
 
 fail_parent_names_copy:
        while (--i >= 0)
@@ -2035,36 +2054,6 @@ fail_parent_names_copy:
 fail_parent_names:
        kfree(clk->name);
 fail_name:
-       return ret;
-}
-
-/**
- * clk_register - allocate a new clock, register it and return an opaque cookie
- * @dev: device that is registering this clock
- * @hw: link to hardware-specific clock data
- *
- * clk_register is the primary interface for populating the clock tree with new
- * clock nodes.  It returns a pointer to the newly allocated struct clk which
- * cannot be dereferenced by driver code but may be used in conjuction with the
- * rest of the clock API.  In the event of an error clk_register will return an
- * error code; drivers must test for an error code after calling clk_register.
- */
-struct clk *clk_register(struct device *dev, struct clk_hw *hw)
-{
-       int ret;
-       struct clk *clk;
-
-       clk = kzalloc(sizeof(*clk), GFP_KERNEL);
-       if (!clk) {
-               pr_err("%s: could not allocate clk\n", __func__);
-               ret = -ENOMEM;
-               goto fail_out;
-       }
-
-       ret = _clk_register(dev, hw, clk);
-       if (!ret)
-               return clk;
-
        kfree(clk);
 fail_out:
        return ERR_PTR(ret);
@@ -2151,9 +2140,10 @@ void clk_unregister(struct clk *clk)
 
        if (!hlist_empty(&clk->children)) {
                struct clk *child;
+               struct hlist_node *t;
 
                /* Reparent all children to the orphan list. */
-               hlist_for_each_entry(child, &clk->children, child_node)
+               hlist_for_each_entry_safe(child, t, &clk->children, child_node)
                        clk_set_parent(child, NULL);
        }
 
@@ -2173,7 +2163,7 @@ EXPORT_SYMBOL_GPL(clk_unregister);
 
 static void devm_clk_release(struct device *dev, void *res)
 {
-       clk_unregister(res);
+       clk_unregister(*(struct clk **)res);
 }
 
 /**
@@ -2188,18 +2178,18 @@ static void devm_clk_release(struct device *dev, void *res)
 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
 {
        struct clk *clk;
-       int ret;
+       struct clk **clkp;
 
-       clk = devres_alloc(devm_clk_release, sizeof(*clk), GFP_KERNEL);
-       if (!clk)
+       clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
+       if (!clkp)
                return ERR_PTR(-ENOMEM);
 
-       ret = _clk_register(dev, hw, clk);
-       if (!ret) {
-               devres_add(dev, clk);
+       clk = clk_register(dev, hw);
+       if (!IS_ERR(clk)) {
+               *clkp = clk;
+               devres_add(dev, clkp);
        } else {
-               devres_free(clk);
-               clk = ERR_PTR(ret);
+               devres_free(clkp);
        }
 
        return clk;
index 2e5810c88d1150874ece970fb64136a450ef64a6..1f6324e29a8099b09930f26234c703d7973be686 100644 (file)
@@ -156,6 +156,7 @@ cpg_mstp_clock_register(const char *name, const char *parent_name,
 static void __init cpg_mstp_clocks_init(struct device_node *np)
 {
        struct mstp_clock_group *group;
+       const char *idxname;
        struct clk **clks;
        unsigned int i;
 
@@ -184,6 +185,11 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
        for (i = 0; i < MSTP_MAX_CLOCKS; ++i)
                clks[i] = ERR_PTR(-ENOENT);
 
+       if (of_find_property(np, "clock-indices", &i))
+               idxname = "clock-indices";
+       else
+               idxname = "renesas,clock-indices";
+
        for (i = 0; i < MSTP_MAX_CLOCKS; ++i) {
                const char *parent_name;
                const char *name;
@@ -197,8 +203,7 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
                        continue;
 
                parent_name = of_clk_get_parent_name(np, i);
-               ret = of_property_read_u32_index(np, "renesas,clock-indices", i,
-                                                &clkidx);
+               ret = of_property_read_u32_index(np, idxname, i, &clkidx);
                if (parent_name == NULL || ret < 0)
                        break;
 
index 88dafb5e96270fa13923c5a95792c910a1bd00d4..de6da957a09d6ebe82f416370c84a7dc50acea8e 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/clk-provider.h>
 #include <linux/io.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 
 #include "clk.h"
 
@@ -43,6 +44,8 @@
 
 #define to_socfpga_clk(p) container_of(p, struct socfpga_pll, hw.hw)
 
+void __iomem *clk_mgr_base_addr;
+
 static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
                                         unsigned long parent_rate)
 {
@@ -87,6 +90,7 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
        const char *clk_name = node->name;
        const char *parent_name[SOCFPGA_MAX_PARENTS];
        struct clk_init_data init;
+       struct device_node *clkmgr_np;
        int rc;
        int i = 0;
 
@@ -96,6 +100,9 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
        if (WARN_ON(!pll_clk))
                return NULL;
 
+       clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr");
+       clk_mgr_base_addr = of_iomap(clkmgr_np, 0);
+       BUG_ON(!clk_mgr_base_addr);
        pll_clk->hw.reg = clk_mgr_base_addr + reg;
 
        of_property_read_string(node, "clock-output-names", &clk_name);
index 35a960a993f95c72b6247e52032c4184cbdd00b1..43db947e5f0e51e60c232832d379faca9ca386d3 100644 (file)
  * You should have received a copy of the GNU General Public License
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
-#include <linux/clk.h>
-#include <linux/clkdev.h>
-#include <linux/clk-provider.h>
-#include <linux/io.h>
 #include <linux/of.h>
-#include <linux/of_address.h>
 
 #include "clk.h"
 
-void __iomem *clk_mgr_base_addr;
-
-static const struct of_device_id socfpga_child_clocks[] __initconst = {
-       { .compatible = "altr,socfpga-pll-clock", socfpga_pll_init, },
-       { .compatible = "altr,socfpga-perip-clk", socfpga_periph_init, },
-       { .compatible = "altr,socfpga-gate-clk", socfpga_gate_init, },
-       {},
-};
-
-static void __init socfpga_clkmgr_init(struct device_node *node)
-{
-       clk_mgr_base_addr = of_iomap(node, 0);
-       of_clk_init(socfpga_child_clocks);
-}
-CLK_OF_DECLARE(socfpga_mgr, "altr,clk-mgr", socfpga_clkmgr_init);
+CLK_OF_DECLARE(socfpga_pll_clk, "altr,socfpga-pll-clock", socfpga_pll_init);
+CLK_OF_DECLARE(socfpga_perip_clk, "altr,socfpga-perip-clk", socfpga_periph_init);
+CLK_OF_DECLARE(socfpga_gate_clk, "altr,socfpga-gate-clk", socfpga_gate_init);
 
index 0d20241e07704df196c72ef22510775d22f5bfde..e1769addf435ef1dd7e1643c6f6afc799e55f013 100644 (file)
@@ -1718,7 +1718,7 @@ struct clk *tegra_clk_register_plle_tegra114(const char *name,
                                        "pll_re_vco");
        } else {
                val_aux &= ~(PLLE_AUX_PLLRE_SEL | PLLE_AUX_PLLP_SEL);
-               pll_writel(val, pll_params->aux_reg, pll);
+               pll_writel(val_aux, pll_params->aux_reg, pll);
        }
 
        clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
index 166e02f16c8a25f28f4441584dc6e8babc448f3b..cc37c342c4cb9a18dd355a66e06a993ff43de4e5 100644 (file)
@@ -764,7 +764,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
        [tegra_clk_sdmmc2_8] = { .dt_id = TEGRA124_CLK_SDMMC2, .present = true },
        [tegra_clk_i2s1] = { .dt_id = TEGRA124_CLK_I2S1, .present = true },
        [tegra_clk_i2c1] = { .dt_id = TEGRA124_CLK_I2C1, .present = true },
-       [tegra_clk_ndflash] = { .dt_id = TEGRA124_CLK_NDFLASH, .present = true },
        [tegra_clk_sdmmc1_8] = { .dt_id = TEGRA124_CLK_SDMMC1, .present = true },
        [tegra_clk_sdmmc4_8] = { .dt_id = TEGRA124_CLK_SDMMC4, .present = true },
        [tegra_clk_pwm] = { .dt_id = TEGRA124_CLK_PWM, .present = true },
@@ -809,7 +808,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
        [tegra_clk_trace] = { .dt_id = TEGRA124_CLK_TRACE, .present = true },
        [tegra_clk_soc_therm] = { .dt_id = TEGRA124_CLK_SOC_THERM, .present = true },
        [tegra_clk_dtv] = { .dt_id = TEGRA124_CLK_DTV, .present = true },
-       [tegra_clk_ndspeed] = { .dt_id = TEGRA124_CLK_NDSPEED, .present = true },
        [tegra_clk_i2cslow] = { .dt_id = TEGRA124_CLK_I2CSLOW, .present = true },
        [tegra_clk_dsib] = { .dt_id = TEGRA124_CLK_DSIB, .present = true },
        [tegra_clk_tsec] = { .dt_id = TEGRA124_CLK_TSEC, .present = true },
@@ -952,7 +950,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
        [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_3_MUX, .present = true },
        [tegra_clk_dsia_mux] = { .dt_id = TEGRA124_CLK_DSIA_MUX, .present = true },
        [tegra_clk_dsib_mux] = { .dt_id = TEGRA124_CLK_DSIB_MUX, .present = true },
-       [tegra_clk_uarte] = { .dt_id = TEGRA124_CLK_UARTE, .present = true },
 };
 
 static struct tegra_devclk devclks[] __initdata = {
index 67c8de572c50188af9fb50a1629bcfd0a73193f0..b4877e0ee91051ddbc2cebc4d4d569c6ffc3c2cb 100644 (file)
@@ -110,9 +110,25 @@ static struct ti_dt_clk am43xx_clks[] = {
 
 int __init am43xx_dt_clk_init(void)
 {
+       struct clk *clk1, *clk2;
+
        ti_dt_clocks_register(am43xx_clks);
 
        omap2_clk_disable_autoidle_all();
 
+       /*
+        * cpsw_cpts_rft_clk  has got the choice of 3 clocksources
+        * dpll_core_m4_ck, dpll_core_m5_ck and dpll_disp_m2_ck.
+        * By default dpll_core_m4_ck is selected, witn this as clock
+        * source the CPTS doesnot work properly. It gives clockcheck errors
+        * while running PTP.
+        * clockcheck: clock jumped backward or running slower than expected!
+        * By selecting dpll_core_m5_ck as the clocksource fixes this issue.
+        * In AM335x dpll_core_m5_ck is the default clocksource.
+        */
+       clk1 = clk_get_sys(NULL, "cpsw_cpts_rft_clk");
+       clk2 = clk_get_sys(NULL, "dpll_core_m5_ck");
+       clk_set_parent(clk1, clk2);
+
        return 0;
 }
index 2dc8b41a339dba3dc359e7ee34b10de98e8eb3fe..422391242b39ceac4a322ee532288ee46b6eba32 100644 (file)
@@ -100,9 +100,11 @@ void __init vexpress_osc_of_setup(struct device_node *node)
        struct clk *clk;
        u32 range[2];
 
+       vexpress_sysreg_of_early_init();
+
        osc = kzalloc(sizeof(*osc), GFP_KERNEL);
        if (!osc)
-               goto error;
+               return;
 
        osc->func = vexpress_config_func_get_by_node(node);
        if (!osc->func) {
index 57e823c44d2ad326eeaaa788fbb67d80ba620c7d..5163ec13429d1e37082b8d32c84684284217322f 100644 (file)
@@ -66,6 +66,7 @@ static int arch_timer_ppi[MAX_TIMER_PPI];
 static struct clock_event_device __percpu *arch_timer_evt;
 
 static bool arch_timer_use_virtual = true;
+static bool arch_timer_c3stop;
 static bool arch_timer_mem_use_virtual;
 
 /*
@@ -263,7 +264,8 @@ static void __arch_timer_setup(unsigned type,
        clk->features = CLOCK_EVT_FEAT_ONESHOT;
 
        if (type == ARCH_CP15_TIMER) {
-               clk->features |= CLOCK_EVT_FEAT_C3STOP;
+               if (arch_timer_c3stop)
+                       clk->features |= CLOCK_EVT_FEAT_C3STOP;
                clk->name = "arch_sys_timer";
                clk->rating = 450;
                clk->cpumask = cpumask_of(smp_processor_id());
@@ -665,6 +667,8 @@ static void __init arch_timer_init(struct device_node *np)
                }
        }
 
+       arch_timer_c3stop = !of_property_read_bool(np, "always-on");
+
        arch_timer_register();
        arch_timer_common_init();
 }
index a6ee6d7cd63f19a4cdad01a1a82956f990a76194..acf5a329d5387653b4359d27feb772afe389a92b 100644 (file)
@@ -416,8 +416,6 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
        evt->set_mode = exynos4_tick_set_mode;
        evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
        evt->rating = 450;
-       clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1),
-                                       0xf, 0x7fffffff);
 
        exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
 
@@ -430,9 +428,12 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
                                evt->irq);
                        return -EIO;
                }
+               irq_force_affinity(mct_irqs[MCT_L0_IRQ + cpu], cpumask_of(cpu));
        } else {
                enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
        }
+       clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1),
+                                       0xf, 0x7fffffff);
 
        return 0;
 }
@@ -450,7 +451,6 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
                                           unsigned long action, void *hcpu)
 {
        struct mct_clock_event_device *mevt;
-       unsigned int cpu;
 
        /*
         * Grab cpu pointer in each case to avoid spurious
@@ -461,12 +461,6 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
                mevt = this_cpu_ptr(&percpu_mct_tick);
                exynos4_local_timer_setup(&mevt->evt);
                break;
-       case CPU_ONLINE:
-               cpu = (unsigned long)hcpu;
-               if (mct_int_type == MCT_INT_SPI)
-                       irq_set_affinity(mct_irqs[MCT_L0_IRQ + cpu],
-                                               cpumask_of(cpu));
-               break;
        case CPU_DYING:
                mevt = this_cpu_ptr(&percpu_mct_tick);
                exynos4_local_timer_stop(&mevt->evt);
index ca81809d159d5ebf49ddcb6f9df0bc38208fefce..7ce442148c3f5dfb32449498bc9352066050688e 100644 (file)
@@ -212,4 +212,9 @@ error_free:
        return ret;
 }
 
-CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_add);
+static void __init zevio_timer_init(struct device_node *node)
+{
+       BUG_ON(zevio_timer_add(node));
+}
+
+CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_init);
index 148d707a1d439375ef36bf2ac8e6655fe2a61042..ccdd4c7e748b3b1e63b75e08398ac7972fde66b8 100644 (file)
@@ -369,7 +369,7 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
                return;
 
        /* Can only change if privileged. */
-       if (!capable(CAP_NET_ADMIN)) {
+       if (!__netlink_ns_capable(nsp, &init_user_ns, CAP_NET_ADMIN)) {
                err = EPERM;
                goto out;
        }
index 0e9cce82844bf519f62b7bed24bf0783d89400e9..580503513f0f10687d46e666e19f253d7fbbfb51 100644 (file)
@@ -92,11 +92,7 @@ config ARM_EXYNOS_CPU_FREQ_BOOST_SW
 
 config ARM_HIGHBANK_CPUFREQ
        tristate "Calxeda Highbank-based"
-       depends on ARCH_HIGHBANK
-       select GENERIC_CPUFREQ_CPU0
-       select PM_OPP
-       select REGULATOR
-
+       depends on ARCH_HIGHBANK && GENERIC_CPUFREQ_CPU0 && REGULATOR
        default m
        help
          This adds the CPUFreq driver for Calxeda Highbank SoC
index 099967302bf25939019875846f7819461a4805c2..eab8ccfe6bebed652b8fec4849a5fc9fb67d82f3 100644 (file)
@@ -37,6 +37,7 @@
 #define BYT_RATIOS             0x66a
 #define BYT_VIDS               0x66b
 #define BYT_TURBO_RATIOS       0x66c
+#define BYT_TURBO_VIDS         0x66d
 
 
 #define FRAC_BITS 6
@@ -70,8 +71,9 @@ struct pstate_data {
 };
 
 struct vid_data {
-       int32_t min;
-       int32_t max;
+       int min;
+       int max;
+       int turbo;
        int32_t ratio;
 };
 
@@ -359,14 +361,14 @@ static int byt_get_min_pstate(void)
 {
        u64 value;
        rdmsrl(BYT_RATIOS, value);
-       return (value >> 8) & 0xFF;
+       return (value >> 8) & 0x3F;
 }
 
 static int byt_get_max_pstate(void)
 {
        u64 value;
        rdmsrl(BYT_RATIOS, value);
-       return (value >> 16) & 0xFF;
+       return (value >> 16) & 0x3F;
 }
 
 static int byt_get_turbo_pstate(void)
@@ -393,6 +395,9 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
        vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
        vid = fp_toint(vid_fp);
 
+       if (pstate > cpudata->pstate.max_pstate)
+               vid = cpudata->vid.turbo;
+
        val |= vid;
 
        wrmsrl(MSR_IA32_PERF_CTL, val);
@@ -402,13 +407,17 @@ static void byt_get_vid(struct cpudata *cpudata)
 {
        u64 value;
 
+
        rdmsrl(BYT_VIDS, value);
-       cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
-       cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
+       cpudata->vid.min = int_tofp((value >> 8) & 0x3f);
+       cpudata->vid.max = int_tofp((value >> 16) & 0x3f);
        cpudata->vid.ratio = div_fp(
                cpudata->vid.max - cpudata->vid.min,
                int_tofp(cpudata->pstate.max_pstate -
                        cpudata->pstate.min_pstate));
+
+       rdmsrl(BYT_TURBO_VIDS, value);
+       cpudata->vid.turbo = value & 0x7f;
 }
 
 
@@ -545,12 +554,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
 
        if (pstate_funcs.get_vid)
                pstate_funcs.get_vid(cpu);
-
-       /*
-        * goto max pstate so we don't slow up boot if we are built-in if we are
-        * a module we will take care of it during normal operation
-        */
-       intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
+       intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
 }
 
 static inline void intel_pstate_calc_busy(struct cpudata *cpu,
@@ -695,11 +699,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
        cpu = all_cpu_data[cpunum];
 
        intel_pstate_get_cpu_pstates(cpu);
-       if (!cpu->pstate.current_pstate) {
-               all_cpu_data[cpunum] = NULL;
-               kfree(cpu);
-               return -ENODATA;
-       }
 
        cpu->cpu = cpunum;
 
@@ -710,7 +709,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
        cpu->timer.expires = jiffies + HZ/100;
        intel_pstate_busy_pid_reset(cpu);
        intel_pstate_sample(cpu);
-       intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
 
        add_timer_on(&cpu->timer, cpunum);
 
index d00e5d1abd258b469bf48862a5f14b08e04f97a7..5c4369b5d834d93f05095cf6052848ec2d2ddec4 100644 (file)
@@ -242,7 +242,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
  * Sets a new clock ratio.
  */
 
-static void longhaul_setstate(struct cpufreq_policy *policy,
+static int longhaul_setstate(struct cpufreq_policy *policy,
                unsigned int table_index)
 {
        unsigned int mults_index;
@@ -258,10 +258,12 @@ static void longhaul_setstate(struct cpufreq_policy *policy,
        /* Safety precautions */
        mult = mults[mults_index & 0x1f];
        if (mult == -1)
-               return;
+               return -EINVAL;
+
        speed = calc_speed(mult);
        if ((speed > highest_speed) || (speed < lowest_speed))
-               return;
+               return -EINVAL;
+
        /* Voltage transition before frequency transition? */
        if (can_scale_voltage && longhaul_index < table_index)
                dir = 1;
@@ -269,8 +271,6 @@ static void longhaul_setstate(struct cpufreq_policy *policy,
        freqs.old = calc_speed(longhaul_get_cpu_mult());
        freqs.new = speed;
 
-       cpufreq_freq_transition_begin(policy, &freqs);
-
        pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
                        fsb, mult/10, mult%10, print_speed(speed/1000));
 retry_loop:
@@ -385,12 +385,14 @@ retry_loop:
                        goto retry_loop;
                }
        }
-       /* Report true CPU frequency */
-       cpufreq_freq_transition_end(policy, &freqs, 0);
 
-       if (!bm_timeout)
+       if (!bm_timeout) {
                printk(KERN_INFO PFX "Warning: Timeout while waiting for "
                                "idle PCI bus.\n");
+               return -EBUSY;
+       }
+
+       return 0;
 }
 
 /*
@@ -631,9 +633,10 @@ static int longhaul_target(struct cpufreq_policy *policy,
        unsigned int i;
        unsigned int dir = 0;
        u8 vid, current_vid;
+       int retval = 0;
 
        if (!can_scale_voltage)
-               longhaul_setstate(policy, table_index);
+               retval = longhaul_setstate(policy, table_index);
        else {
                /* On test system voltage transitions exceeding single
                 * step up or down were turning motherboard off. Both
@@ -648,7 +651,7 @@ static int longhaul_target(struct cpufreq_policy *policy,
                while (i != table_index) {
                        vid = (longhaul_table[i].driver_data >> 8) & 0x1f;
                        if (vid != current_vid) {
-                               longhaul_setstate(policy, i);
+                               retval = longhaul_setstate(policy, i);
                                current_vid = vid;
                                msleep(200);
                        }
@@ -657,10 +660,11 @@ static int longhaul_target(struct cpufreq_policy *policy,
                        else
                                i--;
                }
-               longhaul_setstate(policy, table_index);
+               retval = longhaul_setstate(policy, table_index);
        }
+
        longhaul_index = table_index;
-       return 0;
+       return retval;
 }
 
 
@@ -968,7 +972,15 @@ static void __exit longhaul_exit(void)
 
        for (i = 0; i < numscales; i++) {
                if (mults[i] == maxmult) {
+                       struct cpufreq_freqs freqs;
+
+                       freqs.old = policy->cur;
+                       freqs.new = longhaul_table[i].frequency;
+                       freqs.flags = 0;
+
+                       cpufreq_freq_transition_begin(policy, &freqs);
                        longhaul_setstate(policy, i);
+                       cpufreq_freq_transition_end(policy, &freqs, 0);
                        break;
                }
        }
index f0bc31f5db27a41db3d7f8556e72274b42a5eda5..d4add86219444af31891ef8c76013eaed0838282 100644 (file)
@@ -62,7 +62,7 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
        set_cpus_allowed_ptr(current, &cpus_allowed);
 
        /* setting the cpu frequency */
-       clk_set_rate(policy->clk, freq);
+       clk_set_rate(policy->clk, freq * 1000);
 
        return 0;
 }
@@ -92,7 +92,7 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
             i++)
                loongson2_clockmod_table[i].frequency = (rate * i) / 8;
 
-       ret = clk_set_rate(cpuclk, rate);
+       ret = clk_set_rate(cpuclk, rate * 1000);
        if (ret) {
                clk_put(cpuclk);
                return ret;
index 49f120e1bc7be0ecb879f184424d1dc56ee63981..78904e6ca4a020d53a60f4139711441a069ad447 100644 (file)
@@ -138,22 +138,14 @@ static void powernow_k6_set_cpu_multiplier(unsigned int best_i)
 static int powernow_k6_target(struct cpufreq_policy *policy,
                unsigned int best_i)
 {
-       struct cpufreq_freqs freqs;
 
        if (clock_ratio[best_i].driver_data > max_multiplier) {
                printk(KERN_ERR PFX "invalid target frequency\n");
                return -EINVAL;
        }
 
-       freqs.old = busfreq * powernow_k6_get_cpu_multiplier();
-       freqs.new = busfreq * clock_ratio[best_i].driver_data;
-
-       cpufreq_freq_transition_begin(policy, &freqs);
-
        powernow_k6_set_cpu_multiplier(best_i);
 
-       cpufreq_freq_transition_end(policy, &freqs, 0);
-
        return 0;
 }
 
@@ -227,9 +219,20 @@ have_busfreq:
 static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
 {
        unsigned int i;
-       for (i = 0; i < 8; i++) {
-               if (i == max_multiplier)
+
+       for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
+               if (clock_ratio[i].driver_data == max_multiplier) {
+                       struct cpufreq_freqs freqs;
+
+                       freqs.old = policy->cur;
+                       freqs.new = clock_ratio[i].frequency;
+                       freqs.flags = 0;
+
+                       cpufreq_freq_transition_begin(policy, &freqs);
                        powernow_k6_target(policy, i);
+                       cpufreq_freq_transition_end(policy, &freqs, 0);
+                       break;
+               }
        }
        return 0;
 }
index f911645c3f6db59e18f32b68c06bfc890283c029..e61e224475ad457fd71b6934b2a5256a2d96fcc0 100644 (file)
@@ -269,8 +269,6 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
 
        freqs.new = powernow_table[index].frequency;
 
-       cpufreq_freq_transition_begin(policy, &freqs);
-
        /* Now do the magic poking into the MSRs.  */
 
        if (have_a0 == 1)       /* A0 errata 5 */
@@ -290,8 +288,6 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
        if (have_a0 == 1)
                local_irq_enable();
 
-       cpufreq_freq_transition_end(policy, &freqs, 0);
-
        return 0;
 }
 
index 9edccc63245df25d93c22adfb6d6d608b069dfb2..af4968813e76b433acbbda0d3e6f4f28d6476e43 100644 (file)
@@ -29,6 +29,7 @@
 
 #include <asm/cputhreads.h>
 #include <asm/reg.h>
+#include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */
 
 #define POWERNV_MAX_PSTATES    256
 
index b7e677be1df034cdebdea6580b5726bc77c9aa39..0af618abebafa4b44b323d1811c1f885a52e0beb 100644 (file)
@@ -138,6 +138,7 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
        struct cpufreq_frequency_table *table;
        struct cpu_data *data;
        unsigned int cpu = policy->cpu;
+       u64 transition_latency_hz;
 
        np = of_get_cpu_node(cpu, NULL);
        if (!np)
@@ -205,8 +206,10 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
        for_each_cpu(i, per_cpu(cpu_mask, cpu))
                per_cpu(cpu_data, i) = data;
 
+       transition_latency_hz = 12ULL * NSEC_PER_SEC;
        policy->cpuinfo.transition_latency =
-                               (12 * NSEC_PER_SEC) / fsl_get_sys_freq();
+               do_div(transition_latency_hz, fsl_get_sys_freq());
+
        of_node_put(np);
 
        return 0;
index 8d045afa7fb406445b4996334e22a13e9f9572d8..6f9dfa80563a344249ef153aa4f92b17b35ce158 100644 (file)
@@ -60,9 +60,7 @@ static int __init ucv2_cpu_init(struct cpufreq_policy *policy)
        policy->max = policy->cpuinfo.max_freq = 1000000;
        policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
        policy->clk = clk_get(NULL, "MAIN_CLK");
-       if (IS_ERR(policy->clk))
-               return PTR_ERR(policy->clk);
-       return 0;
+       return PTR_ERR_OR_ZERO(policy->clk);
 }
 
 static struct cpufreq_driver ucv2_driver = {
index 9f25f5296029aeb0a22e0caf81bf35ca41b00063..0eabd81e1a902711bb838eb93dfed8e3289be610 100644 (file)
        char *tmp;                                              \
                                                                \
        tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC);  \
-       sprintf(tmp, format, param);                            \
-       strcat(str, tmp);                                       \
-       kfree(tmp);                                             \
+       if (likely(tmp)) {                                      \
+               sprintf(tmp, format, param);                    \
+               strcat(str, tmp);                               \
+               kfree(tmp);                                     \
+       } else {                                                \
+               strcat(str, "kmalloc failure in SPRINTFCAT");   \
+       }                                                       \
 }
 
 static void report_jump_idx(u32 status, char *outstr)
index ba06d1d2f99e39c50e5ed82f08fffdbbe20b7720..5c5863842de92bdd653fb0f24fdea96228c9908a 100644 (file)
@@ -197,7 +197,7 @@ config AMCC_PPC440SPE_ADMA
 
 config TIMB_DMA
        tristate "Timberdale FPGA DMA support"
-       depends on MFD_TIMBERDALE || HAS_IOMEM
+       depends on MFD_TIMBERDALE
        select DMA_ENGINE
        help
          Enable support for the Timberdale FPGA DMA engine.
index cd04eb7b182e338994f03f8f1d1f84c28d49fc40..926360c2db6abcb3b448e815246e182fe1a7d3ae 100644 (file)
@@ -182,11 +182,13 @@ static void edma_execute(struct edma_chan *echan)
                                  echan->ecc->dummy_slot);
        }
 
-       edma_resume(echan->ch_num);
-
        if (edesc->processed <= MAX_NR_SG) {
                dev_dbg(dev, "first transfer starting %d\n", echan->ch_num);
                edma_start(echan->ch_num);
+       } else {
+               dev_dbg(dev, "chan: %d: completed %d elements, resuming\n",
+                       echan->ch_num, edesc->processed);
+               edma_resume(echan->ch_num);
        }
 
        /*
index 381e793184baefdbda1806544d855a02487cda6a..b396a7fb53abb5df611e4fbd551000758f876100 100644 (file)
@@ -968,7 +968,17 @@ static struct platform_driver fsl_edma_driver = {
        .remove         = fsl_edma_remove,
 };
 
-module_platform_driver(fsl_edma_driver);
+static int __init fsl_edma_init(void)
+{
+       return platform_driver_register(&fsl_edma_driver);
+}
+subsys_initcall(fsl_edma_init);
+
+static void __exit fsl_edma_exit(void)
+{
+       platform_driver_unregister(&fsl_edma_driver);
+}
+module_exit(fsl_edma_exit);
 
 MODULE_ALIAS("platform:fsl-edma");
 MODULE_DESCRIPTION("Freescale eDMA engine driver");
index a1bd8298d55f1973313b969bf560c76967212dc4..03f7820fa333b89a445dc3831386ba371b93a1e1 100644 (file)
@@ -666,7 +666,7 @@ static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec,
        struct sirfsoc_dma *sdma = ofdma->of_dma_data;
        unsigned int request = dma_spec->args[0];
 
-       if (request > SIRFSOC_DMA_CHANNELS)
+       if (request >= SIRFSOC_DMA_CHANNELS)
                return NULL;
 
        return dma_get_slave_channel(&sdma->channels[request].chan);
index 3ee852c9925b6a1aa1887df101662194eeb3c42c..071c2c969eec06ad929ecfb871c614297a615e9e 100644 (file)
@@ -756,6 +756,7 @@ static const struct {
         */
        { ACPI_SIG_IBFT },
        { "iBFT" },
+       { "BIFT" },     /* Broadcom iSCSI Offload */
 };
 
 static void __init acpi_find_ibft_region(void)
index e73c6755a5eb6b324d06f14f4a7cf574d025c8ca..70304220a479a9862f4f5709e6a9a88a785b487d 100644 (file)
@@ -305,6 +305,8 @@ static struct ichx_desc ich6_desc = {
 
        .ngpio = 50,
        .have_blink = true,
+       .regs = ichx_regs,
+       .reglen = ichx_reglen,
 };
 
 /* Intel 3100 */
@@ -324,6 +326,8 @@ static struct ichx_desc i3100_desc = {
        .uses_gpe0 = true,
 
        .ngpio = 50,
+       .regs = ichx_regs,
+       .reglen = ichx_reglen,
 };
 
 /* ICH7 and ICH8-based */
index 99a68310e7c09eb053f5cd28395549ece9f8babe..3d53fd6880d1970b074f6b0eda2575ea55e27476 100644 (file)
@@ -894,9 +894,11 @@ static int mcp23s08_probe(struct spi_device *spi)
                        dev_err(&spi->dev, "invalid spi-present-mask\n");
                        return -ENODEV;
                }
-
-               for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++)
+               for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) {
+                       if ((spi_present_mask & (1 << addr)))
+                               chips++;
                        pullups[addr] = 0;
+               }
        } else {
                type = spi_get_device_id(spi)->driver_data;
                pdata = dev_get_platdata(&spi->dev);
@@ -919,12 +921,12 @@ static int mcp23s08_probe(struct spi_device *spi)
                        pullups[addr] = pdata->chip[addr].pullups;
                }
 
-               if (!chips)
-                       return -ENODEV;
-
                base = pdata->base;
        }
 
+       if (!chips)
+               return -ENODEV;
+
        data = kzalloc(sizeof(*data) + chips * sizeof(struct mcp23s08),
                        GFP_KERNEL);
        if (!data)
index bf0f8b476696eeade9ff5b7947dc64628181a7ca..401add28933f4f6f39c70b626e081ec39aef554b 100644 (file)
@@ -233,7 +233,7 @@ static void acpi_gpiochip_request_interrupts(struct acpi_gpio_chip *acpi_gpio)
 {
        struct gpio_chip *chip = acpi_gpio->chip;
 
-       if (!chip->dev || !chip->to_irq)
+       if (!chip->to_irq)
                return;
 
        INIT_LIST_HEAD(&acpi_gpio->events);
@@ -253,7 +253,7 @@ static void acpi_gpiochip_free_interrupts(struct acpi_gpio_chip *acpi_gpio)
        struct acpi_gpio_event *event, *ep;
        struct gpio_chip *chip = acpi_gpio->chip;
 
-       if (!chip->dev || !chip->to_irq)
+       if (!chip->to_irq)
                return;
 
        list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
@@ -451,7 +451,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
                if (function == ACPI_WRITE)
                        gpiod_set_raw_value(desc, !!((1 << i) & *value));
                else
-                       *value |= gpiod_get_raw_value(desc) << i;
+                       *value |= (u64)gpiod_get_raw_value(desc) << i;
        }
 
 out:
@@ -501,6 +501,9 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
        acpi_handle handle;
        acpi_status status;
 
+       if (!chip || !chip->dev)
+               return;
+
        handle = ACPI_HANDLE(chip->dev);
        if (!handle)
                return;
@@ -531,6 +534,9 @@ void acpi_gpiochip_remove(struct gpio_chip *chip)
        acpi_handle handle;
        acpi_status status;
 
+       if (!chip || !chip->dev)
+               return;
+
        handle = ACPI_HANDLE(chip->dev);
        if (!handle)
                return;
index 761013f8b82f5a3d7c534f201a45f2c5902cd188..f48817d974802c3ec771814bef84bc97ab6039b5 100644 (file)
@@ -1387,8 +1387,8 @@ static int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
 {
        struct gpio_chip *chip = d->host_data;
 
-       irq_set_chip_and_handler(irq, chip->irqchip, chip->irq_handler);
        irq_set_chip_data(irq, chip);
+       irq_set_chip_and_handler(irq, chip->irqchip, chip->irq_handler);
 #ifdef CONFIG_ARM
        set_irq_flags(irq, IRQF_VALID);
 #else
index e930d4fe29c71c2d82d2317bb6681ac8d3f39d6a..1ef5ab9c9d519d175b202dbf01cba243a870e1f7 100644 (file)
@@ -145,6 +145,7 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
 
        plane->crtc = crtc;
        plane->fb = crtc->primary->fb;
+       drm_framebuffer_reference(plane->fb);
 
        return 0;
 }
index c786cd4f457bb8893fc4f02e8e27338832b1bc58..2a3ad24276f8380415940d00b87708f9cbe0d9e2 100644 (file)
@@ -263,7 +263,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
        buffer->sgt = sgt;
        exynos_gem_obj->base.import_attach = attach;
 
-       DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
+       DRM_DEBUG_PRIME("dma_addr = %pad, size = 0x%lx\n", &buffer->dma_addr,
                                                                buffer->size);
 
        return &exynos_gem_obj->base;
index eb73e3bf2a0cbe6e56f9ae6b5d81b188038f05e3..4ac438187568ed4a6894436433e404c94b90a7a2 100644 (file)
@@ -1426,9 +1426,9 @@ static int exynos_dsi_probe(struct platform_device *pdev)
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        dsi->reg_base = devm_ioremap_resource(&pdev->dev, res);
-       if (!dsi->reg_base) {
+       if (IS_ERR(dsi->reg_base)) {
                dev_err(&pdev->dev, "failed to remap io region\n");
-               return -EADDRNOTAVAIL;
+               return PTR_ERR(dsi->reg_base);
        }
 
        dsi->phy = devm_phy_get(&pdev->dev, "dsim");
index 7afead9c3f30258b7869e69e0f2015d9c2921fca..852f2dadaebdbbe3a385b5fe28cd65a560f0c210 100644 (file)
@@ -220,7 +220,7 @@ static void vidi_win_commit(struct exynos_drm_manager *mgr, int zpos)
 
        win_data->enabled = true;
 
-       DRM_DEBUG_KMS("dma_addr = 0x%x\n", win_data->dma_addr);
+       DRM_DEBUG_KMS("dma_addr = %pad\n", &win_data->dma_addr);
 
        if (ctx->vblank_on)
                schedule_work(&ctx->work);
index ec82f6bff1225dc06e5b591838c59778f65cecb5..108e1ec2fa4b491b7d34d3a977e0c3b427a975ab 100644 (file)
@@ -1954,6 +1954,9 @@ struct drm_i915_cmd_table {
 #define IS_ULT(dev)            (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
 #define IS_HSW_GT3(dev)                (IS_HASWELL(dev) && \
                                 ((dev)->pdev->device & 0x00F0) == 0x0020)
+/* ULX machines are also considered ULT. */
+#define IS_HSW_ULX(dev)                ((dev)->pdev->device == 0x0A0E || \
+                                (dev)->pdev->device == 0x0A1E)
 #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
 
 /*
index ab5e93c30aa2bde43b1ed892f0437b7b5d0c8289..154b0f8bb88de02addd24d21e51494728897a622 100644 (file)
@@ -34,25 +34,35 @@ static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv);
 
 bool intel_enable_ppgtt(struct drm_device *dev, bool full)
 {
-       if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
+       if (i915.enable_ppgtt == 0)
                return false;
 
        if (i915.enable_ppgtt == 1 && full)
                return false;
 
+       return true;
+}
+
+static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
+{
+       if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
+               return 0;
+
+       if (enable_ppgtt == 1)
+               return 1;
+
+       if (enable_ppgtt == 2 && HAS_PPGTT(dev))
+               return 2;
+
 #ifdef CONFIG_INTEL_IOMMU
        /* Disable ppgtt on SNB if VT-d is on. */
        if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
                DRM_INFO("Disabling PPGTT because VT-d is on\n");
-               return false;
+               return 0;
        }
 #endif
 
-       /* Full ppgtt disabled by default for now due to issues. */
-       if (full)
-               return false; /* HAS_PPGTT(dev) */
-       else
-               return HAS_ALIASING_PPGTT(dev);
+       return HAS_ALIASING_PPGTT(dev) ? 1 : 0;
 }
 
 #define GEN6_PPGTT_PD_ENTRIES 512
@@ -2031,6 +2041,14 @@ int i915_gem_gtt_init(struct drm_device *dev)
                 gtt->base.total >> 20);
        DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
        DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
+       /*
+        * i915.enable_ppgtt is read-only, so do an early pass to validate the
+        * user's requested state against the hardware/driver capabilities.  We
+        * do this now so that we can print out any log messages once rather
+        * than every time we check intel_enable_ppgtt().
+        */
+       i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
+       DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
 
        return 0;
 }
index 7753249b3a959cce7f31b8c9cf1ba0b36c18dce8..f98ba4e6e70b940c150c504782fc943abee1544a 100644 (file)
@@ -1362,10 +1362,20 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
        spin_lock(&dev_priv->irq_lock);
        for (i = 1; i < HPD_NUM_PINS; i++) {
 
-               WARN_ONCE(hpd[i] & hotplug_trigger &&
-                         dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED,
-                         "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
-                         hotplug_trigger, i, hpd[i]);
+               if (hpd[i] & hotplug_trigger &&
+                   dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
+                       /*
+                        * On GMCH platforms the interrupt mask bits only
+                        * prevent irq generation, not the setting of the
+                        * hotplug bits itself. So only WARN about unexpected
+                        * interrupts on saner platforms.
+                        */
+                       WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
+                                 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
+                                 hotplug_trigger, i, hpd[i]);
+
+                       continue;
+               }
 
                if (!(hpd[i] & hotplug_trigger) ||
                    dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
index 9f5b18d9d8850e886eeb44ca68b0bd17af4c3f30..c77af69c2d8f5f8c97f5dc2045fd99a07960617b 100644 (file)
@@ -827,6 +827,7 @@ enum punit_power_well {
 # define MI_FLUSH_ENABLE                               (1 << 12)
 # define ASYNC_FLIP_PERF_DISABLE                       (1 << 14)
 # define MODE_IDLE                                     (1 << 9)
+# define STOP_RING                                     (1 << 8)
 
 #define GEN6_GT_MODE   0x20d0
 #define GEN7_GT_MODE   0x7008
index fa486c5fbb0250650b558632c6f63aa0ebf5bd63..aff4a113cda3c0cd724d3b4d13ce0fb8bbc48313 100644 (file)
@@ -560,47 +560,71 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
 
        dev_priv->vbt.edp_pps = *edp_pps;
 
-       dev_priv->vbt.edp_rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
-               DP_LINK_BW_1_62;
+       switch (edp_link_params->rate) {
+       case EDP_RATE_1_62:
+               dev_priv->vbt.edp_rate = DP_LINK_BW_1_62;
+               break;
+       case EDP_RATE_2_7:
+               dev_priv->vbt.edp_rate = DP_LINK_BW_2_7;
+               break;
+       default:
+               DRM_DEBUG_KMS("VBT has unknown eDP link rate value %u\n",
+                             edp_link_params->rate);
+               break;
+       }
+
        switch (edp_link_params->lanes) {
-       case 0:
+       case EDP_LANE_1:
                dev_priv->vbt.edp_lanes = 1;
                break;
-       case 1:
+       case EDP_LANE_2:
                dev_priv->vbt.edp_lanes = 2;
                break;
-       case 3:
-       default:
+       case EDP_LANE_4:
                dev_priv->vbt.edp_lanes = 4;
                break;
+       default:
+               DRM_DEBUG_KMS("VBT has unknown eDP lane count value %u\n",
+                             edp_link_params->lanes);
+               break;
        }
+
        switch (edp_link_params->preemphasis) {
-       case 0:
+       case EDP_PREEMPHASIS_NONE:
                dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
                break;
-       case 1:
+       case EDP_PREEMPHASIS_3_5dB:
                dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
                break;
-       case 2:
+       case EDP_PREEMPHASIS_6dB:
                dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
                break;
-       case 3:
+       case EDP_PREEMPHASIS_9_5dB:
                dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
                break;
+       default:
+               DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n",
+                             edp_link_params->preemphasis);
+               break;
        }
+
        switch (edp_link_params->vswing) {
-       case 0:
+       case EDP_VSWING_0_4V:
                dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400;
                break;
-       case 1:
+       case EDP_VSWING_0_6V:
                dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600;
                break;
-       case 2:
+       case EDP_VSWING_0_8V:
                dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800;
                break;
-       case 3:
+       case EDP_VSWING_1_2V:
                dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200;
                break;
+       default:
+               DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n",
+                             edp_link_params->vswing);
+               break;
        }
 }
 
index dae976f51d83357a51637fca61bd830bb9753010..48aa516a1ac0c354cd5e7ad3ab95897e4210a337 100644 (file)
@@ -9654,11 +9654,22 @@ intel_pipe_config_compare(struct drm_device *dev,
        PIPE_CONF_CHECK_I(pipe_src_w);
        PIPE_CONF_CHECK_I(pipe_src_h);
 
-       PIPE_CONF_CHECK_I(gmch_pfit.control);
-       /* pfit ratios are autocomputed by the hw on gen4+ */
-       if (INTEL_INFO(dev)->gen < 4)
-               PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
-       PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
+       /*
+        * FIXME: BIOS likes to set up a cloned config with lvds+external
+        * screen. Since we don't yet re-compute the pipe config when moving
+        * just the lvds port away to another pipe the sw tracking won't match.
+        *
+        * Proper atomic modesets with recomputed global state will fix this.
+        * Until then just don't check gmch state for inherited modes.
+        */
+       if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
+               PIPE_CONF_CHECK_I(gmch_pfit.control);
+               /* pfit ratios are autocomputed by the hw on gen4+ */
+               if (INTEL_INFO(dev)->gen < 4)
+                       PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
+               PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
+       }
+
        PIPE_CONF_CHECK_I(pch_pfit.enabled);
        if (current_config->pch_pfit.enabled) {
                PIPE_CONF_CHECK_I(pch_pfit.pos);
@@ -11384,15 +11395,6 @@ void intel_modeset_init(struct drm_device *dev)
        }
 }
 
-static void
-intel_connector_break_all_links(struct intel_connector *connector)
-{
-       connector->base.dpms = DRM_MODE_DPMS_OFF;
-       connector->base.encoder = NULL;
-       connector->encoder->connectors_active = false;
-       connector->encoder->base.crtc = NULL;
-}
-
 static void intel_enable_pipe_a(struct drm_device *dev)
 {
        struct intel_connector *connector;
@@ -11474,8 +11476,17 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
                        if (connector->encoder->base.crtc != &crtc->base)
                                continue;
 
-                       intel_connector_break_all_links(connector);
+                       connector->base.dpms = DRM_MODE_DPMS_OFF;
+                       connector->base.encoder = NULL;
                }
+               /* multiple connectors may have the same encoder:
+                *  handle them and break crtc link separately */
+               list_for_each_entry(connector, &dev->mode_config.connector_list,
+                                   base.head)
+                       if (connector->encoder->base.crtc == &crtc->base) {
+                               connector->encoder->base.crtc = NULL;
+                               connector->encoder->connectors_active = false;
+                       }
 
                WARN_ON(crtc->active);
                crtc->base.enabled = false;
@@ -11557,6 +11568,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
                                      drm_get_encoder_name(&encoder->base));
                        encoder->disable(encoder);
                }
+               encoder->base.crtc = NULL;
+               encoder->connectors_active = false;
 
                /* Inconsistent output/port/pipe state happens presumably due to
                 * a bug in one of the get_hw_state functions. Or someplace else
@@ -11567,8 +11580,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
                                    base.head) {
                        if (connector->encoder != encoder)
                                continue;
-
-                       intel_connector_break_all_links(connector);
+                       connector->base.dpms = DRM_MODE_DPMS_OFF;
+                       connector->base.encoder = NULL;
                }
        }
        /* Enabled encoders without active connectors will be fixed in
@@ -11616,6 +11629,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
                            base.head) {
                memset(&crtc->config, 0, sizeof(crtc->config));
 
+               crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
+
                crtc->active = dev_priv->display.get_pipe_config(crtc,
                                                                 &crtc->config);
 
index d2a55884ad523b8a54546bfbbe0565889e73261e..2a00cb828d20c7549a7b00a6745c228998cc136b 100644 (file)
@@ -105,7 +105,8 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp)
        case DP_LINK_BW_2_7:
                break;
        case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
-               if ((IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) &&
+               if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) ||
+                    INTEL_INFO(dev)->gen >= 8) &&
                    intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
                        max_link_bw = DP_LINK_BW_5_4;
                else
@@ -120,6 +121,22 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp)
        return max_link_bw;
 }
 
+static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       u8 source_max, sink_max;
+
+       source_max = 4;
+       if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
+           (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
+               source_max = 2;
+
+       sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
+
+       return min(source_max, sink_max);
+}
+
 /*
  * The units on the numbers in the next two are... bizarre.  Examples will
  * make it clearer; this one parallels an example in the eDP spec.
@@ -170,7 +187,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
        }
 
        max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
-       max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
+       max_lanes = intel_dp_max_lane_count(intel_dp);
 
        max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
        mode_rate = intel_dp_link_required(target_clock, 18);
@@ -750,8 +767,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
        struct intel_crtc *intel_crtc = encoder->new_crtc;
        struct intel_connector *intel_connector = intel_dp->attached_connector;
        int lane_count, clock;
-       int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
+       int min_lane_count = 1;
+       int max_lane_count = intel_dp_max_lane_count(intel_dp);
        /* Conveniently, the link BW constants become indices with a shift...*/
+       int min_clock = 0;
        int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
        int bpp, mode_rate;
        static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
@@ -784,19 +803,38 @@ intel_dp_compute_config(struct intel_encoder *encoder,
        /* Walk through all bpp values. Luckily they're all nicely spaced with 2
         * bpc in between. */
        bpp = pipe_config->pipe_bpp;
-       if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
-           dev_priv->vbt.edp_bpp < bpp) {
-               DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
-                             dev_priv->vbt.edp_bpp);
-               bpp = dev_priv->vbt.edp_bpp;
+       if (is_edp(intel_dp)) {
+               if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
+                       DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
+                                     dev_priv->vbt.edp_bpp);
+                       bpp = dev_priv->vbt.edp_bpp;
+               }
+
+               if (IS_BROADWELL(dev)) {
+                       /* Yes, it's an ugly hack. */
+                       min_lane_count = max_lane_count;
+                       DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n",
+                                     min_lane_count);
+               } else if (dev_priv->vbt.edp_lanes) {
+                       min_lane_count = min(dev_priv->vbt.edp_lanes,
+                                            max_lane_count);
+                       DRM_DEBUG_KMS("using min %u lanes per VBT\n",
+                                     min_lane_count);
+               }
+
+               if (dev_priv->vbt.edp_rate) {
+                       min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock);
+                       DRM_DEBUG_KMS("using min %02x link bw per VBT\n",
+                                     bws[min_clock]);
+               }
        }
 
        for (; bpp >= 6*3; bpp -= 2*3) {
                mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
                                                   bpp);
 
-               for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
-                       for (clock = 0; clock <= max_clock; clock++) {
+               for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
+                       for (clock = min_clock; clock <= max_clock; clock++) {
                                link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
                                link_avail = intel_dp_max_data_rate(link_clock,
                                                                    lane_count);
@@ -3619,7 +3657,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
 {
        struct drm_connector *connector = &intel_connector->base;
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct intel_encoder *intel_encoder = &intel_dig_port->base;
+       struct drm_device *dev = intel_encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_display_mode *fixed_mode = NULL;
        bool has_dpcd;
@@ -3629,6 +3668,14 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
        if (!is_edp(intel_dp))
                return true;
 
+       /* The VDD bit needs a power domain reference, so if the bit is already
+        * enabled when we boot, grab this reference. */
+       if (edp_have_panel_vdd(intel_dp)) {
+               enum intel_display_power_domain power_domain;
+               power_domain = intel_display_port_power_domain(intel_encoder);
+               intel_display_power_get(dev_priv, power_domain);
+       }
+
        /* Cache DPCD and EDID for edp. */
        intel_edp_panel_vdd_on(intel_dp);
        has_dpcd = intel_dp_get_dpcd(intel_dp);
index 0542de98226018a9427519d0eb6996714f3c0582..328b1a70264b4c12a07400de7284b2968813aa94 100644 (file)
@@ -236,7 +236,8 @@ struct intel_crtc_config {
         * tracked with quirk flags so that fastboot and state checker can act
         * accordingly.
         */
-#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
+#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS      (1<<0) /* unreliable sync mode.flags */
+#define PIPE_CONFIG_QUIRK_INHERITED_MODE       (1<<1) /* mode inherited from firmware */
        unsigned long quirks;
 
        /* User requested mode, only valid as a starting point to
index b4d44e62f0c769746a538f70afdf9916c95f6bd8..f73ba5e6b7a8d685b4530e16c6735c095c2aeb5f 100644 (file)
@@ -132,6 +132,16 @@ static int intelfb_create(struct drm_fb_helper *helper,
 
        mutex_lock(&dev->struct_mutex);
 
+       if (intel_fb &&
+           (sizes->fb_width > intel_fb->base.width ||
+            sizes->fb_height > intel_fb->base.height)) {
+               DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d),"
+                             " releasing it\n",
+                             intel_fb->base.width, intel_fb->base.height,
+                             sizes->fb_width, sizes->fb_height);
+               drm_framebuffer_unreference(&intel_fb->base);
+               intel_fb = ifbdev->fb = NULL;
+       }
        if (!intel_fb || WARN_ON(!intel_fb->obj)) {
                DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
                ret = intelfb_alloc(helper, sizes);
@@ -377,6 +387,15 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
                                                          height);
                }
 
+               /* No preferred mode marked by the EDID? Are there any modes? */
+               if (!modes[i] && !list_empty(&connector->modes)) {
+                       DRM_DEBUG_KMS("using first mode listed on connector %s\n",
+                                     drm_get_connector_name(connector));
+                       modes[i] = list_first_entry(&connector->modes,
+                                                   struct drm_display_mode,
+                                                   head);
+               }
+
                /* last resort: use current mode */
                if (!modes[i]) {
                        /*
index b0413e190625b26c0552e8a737479f52b3dd5c9d..157267aa356165b7b7fcd94c3318a79f04f2bb8c 100644 (file)
@@ -821,11 +821,11 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
        }
 }
 
-static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
+static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
 {
        struct drm_device *dev = intel_hdmi_to_dev(hdmi);
 
-       if (!hdmi->has_hdmi_sink || IS_G4X(dev))
+       if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev))
                return 165000;
        else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
                return 300000;
@@ -837,7 +837,8 @@ static enum drm_mode_status
 intel_hdmi_mode_valid(struct drm_connector *connector,
                      struct drm_display_mode *mode)
 {
-       if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector)))
+       if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector),
+                                              true))
                return MODE_CLOCK_HIGH;
        if (mode->clock < 20000)
                return MODE_CLOCK_LOW;
@@ -879,7 +880,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
        struct drm_device *dev = encoder->base.dev;
        struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
        int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2;
-       int portclock_limit = hdmi_portclock_limit(intel_hdmi);
+       int portclock_limit = hdmi_portclock_limit(intel_hdmi, false);
        int desired_bpp;
 
        if (intel_hdmi->color_range_auto) {
index 0eead16aeda7404053de8c9885992b0904c1049c..cb8cfb7e09749938383c18a7eb6e2bc149823095 100644 (file)
@@ -492,6 +492,7 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
        enum pipe pipe = intel_get_pipe_from_connector(connector);
        u32 freq;
        unsigned long flags;
+       u64 n;
 
        if (!panel->backlight.present || pipe == INVALID_PIPE)
                return;
@@ -502,10 +503,9 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
 
        /* scale to hardware max, but be careful to not overflow */
        freq = panel->backlight.max;
-       if (freq < max)
-               level = level * freq / max;
-       else
-               level = freq / max * level;
+       n = (u64)level * freq;
+       do_div(n, max);
+       level = n;
 
        panel->backlight.level = level;
        if (panel->backlight.device)
index 19e94c3edc1957d96b928fb23d696b393216fc42..d93dcf683e8c3695960ca93c92dce87a7823d3f2 100644 (file)
@@ -2095,6 +2095,43 @@ static void intel_print_wm_latency(struct drm_device *dev,
        }
 }
 
+static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
+                                   uint16_t wm[5], uint16_t min)
+{
+       int level, max_level = ilk_wm_max_level(dev_priv->dev);
+
+       if (wm[0] >= min)
+               return false;
+
+       wm[0] = max(wm[0], min);
+       for (level = 1; level <= max_level; level++)
+               wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
+
+       return true;
+}
+
+static void snb_wm_latency_quirk(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       bool changed;
+
+       /*
+        * The BIOS provided WM memory latency values are often
+        * inadequate for high resolution displays. Adjust them.
+        */
+       changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
+               ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
+               ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
+
+       if (!changed)
+               return;
+
+       DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
+       intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
+       intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
+       intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
+}
+
 static void ilk_setup_wm_latency(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2112,6 +2149,9 @@ static void ilk_setup_wm_latency(struct drm_device *dev)
        intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
        intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
        intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
+
+       if (IS_GEN6(dev))
+               snb_wm_latency_quirk(dev);
 }
 
 static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
index 6bc68bdcf433cf06a68d52d95b063f9a42795194..79fb4cc2137c19d2cf4ad5dc23e6c5883eb58e4d 100644 (file)
@@ -437,32 +437,41 @@ static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
        I915_WRITE(HWS_PGA, addr);
 }
 
-static int init_ring_common(struct intel_ring_buffer *ring)
+static bool stop_ring(struct intel_ring_buffer *ring)
 {
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj = ring->obj;
-       int ret = 0;
-       u32 head;
+       struct drm_i915_private *dev_priv = to_i915(ring->dev);
 
-       gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+       if (!IS_GEN2(ring->dev)) {
+               I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
+               if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
+                       DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
+                       return false;
+               }
+       }
 
-       /* Stop the ring if it's running. */
        I915_WRITE_CTL(ring, 0);
        I915_WRITE_HEAD(ring, 0);
        ring->write_tail(ring, 0);
-       if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000))
-               DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
 
-       if (I915_NEED_GFX_HWS(dev))
-               intel_ring_setup_status_page(ring);
-       else
-               ring_setup_phys_status_page(ring);
+       if (!IS_GEN2(ring->dev)) {
+               (void)I915_READ_CTL(ring);
+               I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
+       }
 
-       head = I915_READ_HEAD(ring) & HEAD_ADDR;
+       return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0;
+}
 
-       /* G45 ring initialization fails to reset head to zero */
-       if (head != 0) {
+static int init_ring_common(struct intel_ring_buffer *ring)
+{
+       struct drm_device *dev = ring->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj = ring->obj;
+       int ret = 0;
+
+       gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+
+       if (!stop_ring(ring)) {
+               /* G45 ring initialization often fails to reset head to zero */
                DRM_DEBUG_KMS("%s head not reset to zero "
                              "ctl %08x head %08x tail %08x start %08x\n",
                              ring->name,
@@ -471,9 +480,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
                              I915_READ_TAIL(ring),
                              I915_READ_START(ring));
 
-               I915_WRITE_HEAD(ring, 0);
-
-               if (I915_READ_HEAD(ring) & HEAD_ADDR) {
+               if (!stop_ring(ring)) {
                        DRM_ERROR("failed to set %s head to zero "
                                  "ctl %08x head %08x tail %08x start %08x\n",
                                  ring->name,
@@ -481,9 +488,16 @@ static int init_ring_common(struct intel_ring_buffer *ring)
                                  I915_READ_HEAD(ring),
                                  I915_READ_TAIL(ring),
                                  I915_READ_START(ring));
+                       ret = -EIO;
+                       goto out;
                }
        }
 
+       if (I915_NEED_GFX_HWS(dev))
+               intel_ring_setup_status_page(ring);
+       else
+               ring_setup_phys_status_page(ring);
+
        /* Initialize the ring. This must happen _after_ we've cleared the ring
         * registers with the above sequence (the readback of the HEAD registers
         * also enforces ordering), otherwise the hw might lose the new ring
index 270a6a9734387b6079e9348e17773214a293ad0a..2b91c4b4d34b2efd6029bb52f46ece14eb2e515f 100644 (file)
@@ -34,6 +34,7 @@ struct  intel_hw_status_page {
 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
 
 #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
+#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
 
 enum intel_ring_hangcheck_action {
        HANGCHECK_IDLE = 0,
index d27155adf5db2b039dee51b9aa0de25e60c8967e..46be00d66df3da3e74597ba9c02f0ac745741751 100644 (file)
@@ -2424,8 +2424,8 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
        if (ret < 0)
                goto err1;
 
-       ret = sysfs_create_link(&encoder->ddc.dev.kobj,
-                               &drm_connector->kdev->kobj,
+       ret = sysfs_create_link(&drm_connector->kdev->kobj,
+                               &encoder->ddc.dev.kobj,
                                encoder->ddc.dev.kobj.name);
        if (ret < 0)
                goto err2;
index f729dc71d5beb031599aca72f82c90ec946e2fe0..d0c75779d3f6f91e9cc98b0f1a853344783fdb48 100644 (file)
@@ -185,6 +185,8 @@ static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
 {
        __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
                           _MASKED_BIT_DISABLE(0xffff));
+       __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
+                          _MASKED_BIT_DISABLE(0xffff));
        /* something from same cacheline, but !FORCEWAKE_VLV */
        __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
 }
index 3e6c0f3ed592a6b746610b9b6dd1986e50e1ace7..ef9957dbac943bdda6a1e6fd9bca142ebb202cef 100644 (file)
@@ -510,9 +510,8 @@ static void update_cursor(struct drm_crtc *crtc)
                                        MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
                } else {
                        /* disable cursor: */
-                       mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), 0);
-                       mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
-                                       MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB));
+                       mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
+                                       mdp4_kms->blank_cursor_iova);
                }
 
                /* and drop the iova ref + obj rev when done scanning out: */
@@ -574,11 +573,9 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
 
        if (old_bo) {
                /* drop our previous reference: */
-               msm_gem_put_iova(old_bo, mdp4_kms->id);
-               drm_gem_object_unreference_unlocked(old_bo);
+               drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, old_bo);
        }
 
-       crtc_flush(crtc);
        request_pending(crtc, PENDING_CURSOR);
 
        return 0;
index c740ccd1cc67fb4d1a08ef6a65efc671daec8595..8edd531cb62166ad1291be18ffc26ba033cbc71d 100644 (file)
@@ -70,12 +70,12 @@ irqreturn_t mdp4_irq(struct msm_kms *kms)
 
        VERB("status=%08x", status);
 
+       mdp_dispatch_irqs(mdp_kms, status);
+
        for (id = 0; id < priv->num_crtcs; id++)
                if (status & mdp4_crtc_vblank(priv->crtcs[id]))
                        drm_handle_vblank(dev, id);
 
-       mdp_dispatch_irqs(mdp_kms, status);
-
        return IRQ_HANDLED;
 }
 
index 272e707c948704e6ff36cc8df263723fdbbd2a71..0bb4faa17523e0862d7f32df0939976424e82d6a 100644 (file)
@@ -144,6 +144,10 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
 static void mdp4_destroy(struct msm_kms *kms)
 {
        struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+       if (mdp4_kms->blank_cursor_iova)
+               msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
+       if (mdp4_kms->blank_cursor_bo)
+               drm_gem_object_unreference(mdp4_kms->blank_cursor_bo);
        kfree(mdp4_kms);
 }
 
@@ -372,6 +376,23 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
                goto fail;
        }
 
+       mutex_lock(&dev->struct_mutex);
+       mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC);
+       mutex_unlock(&dev->struct_mutex);
+       if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
+               ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
+               dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
+               mdp4_kms->blank_cursor_bo = NULL;
+               goto fail;
+       }
+
+       ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id,
+                       &mdp4_kms->blank_cursor_iova);
+       if (ret) {
+               dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
+               goto fail;
+       }
+
        return kms;
 
 fail:
index 66a4d31aec80e010e5f5914705f05f7f346dc68c..715520c54cdec48f93750da0843213878aaabf86 100644 (file)
@@ -44,6 +44,10 @@ struct mdp4_kms {
        struct clk *lut_clk;
 
        struct mdp_irq error_handler;
+
+       /* empty/blank cursor bo to use when cursor is "disabled" */
+       struct drm_gem_object *blank_cursor_bo;
+       uint32_t blank_cursor_iova;
 };
 #define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
 
index 353d494a497f22c96e0f51d2049cd765ad595f07..f2b985bc2adf41f8330dd4bfb8dcafdb30b43c53 100644 (file)
@@ -71,11 +71,11 @@ static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
 
        VERB("status=%08x", status);
 
+       mdp_dispatch_irqs(mdp_kms, status);
+
        for (id = 0; id < priv->num_crtcs; id++)
                if (status & mdp5_crtc_vblank(priv->crtcs[id]))
                        drm_handle_vblank(dev, id);
-
-       mdp_dispatch_irqs(mdp_kms, status);
 }
 
 irqreturn_t mdp5_irq(struct msm_kms *kms)
index 6c6d7d4c9b4e77848994222f9bd5bf2b26b6a043..a752ab83b8104124a232d3e6701846c661fabfa3 100644 (file)
@@ -62,11 +62,8 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
        dma_addr_t paddr;
        int ret, size;
 
-       /* only doing ARGB32 since this is what is needed to alpha-blend
-        * with video overlays:
-        */
        sizes->surface_bpp = 32;
-       sizes->surface_depth = 32;
+       sizes->surface_depth = 24;
 
        DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
                        sizes->surface_height, sizes->surface_bpp,
index 3da8264d3039017bd358ccaca48197356f932f68..bb8026daebc9426759d2bb31f2a6360dfed606a3 100644 (file)
@@ -118,8 +118,10 @@ static void put_pages(struct drm_gem_object *obj)
 
                if (iommu_present(&platform_bus_type))
                        drm_gem_put_pages(obj, msm_obj->pages, true, false);
-               else
+               else {
                        drm_mm_remove_node(msm_obj->vram_node);
+                       drm_free_large(msm_obj->pages);
+               }
 
                msm_obj->pages = NULL;
        }
index 7762665ad8fdb5b461680ee84b7b224b77a5d4d5..876de9ac3793fd30af193ac7e8410818bdb56e7a 100644 (file)
@@ -1009,7 +1009,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id,
        }
 
        if (outp == 8)
-               return false;
+               return conf;
 
        data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1);
        if (data == 0x0000)
index 1dc37b1ddbfac0a2ecddcfdcd59a80a267e65520..b0d0fb2f4d083813a799d1c02bb3d953646f2889 100644 (file)
@@ -863,7 +863,7 @@ gm107_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
 {
        mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
        mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
-       mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
+       mmio_data(0x200000, 0x1000, NV_MEM_ACCESS_RW);
 
        mmio_list(0x40800c, 0x00000000,  8, 1);
        mmio_list(0x408010, 0x80000000,  0, 0);
@@ -877,6 +877,8 @@ gm107_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
        mmio_list(0x418e24, 0x00000000,  8, 0);
        mmio_list(0x418e28, 0x80000030,  0, 0);
 
+       mmio_list(0x4064c8, 0x018002c0,  0, 0);
+
        mmio_list(0x418810, 0x80000000, 12, 2);
        mmio_list(0x419848, 0x10000000, 12, 2);
        mmio_list(0x419c2c, 0x10000000, 12, 2);
index fb0b6b2d1427f436f14666c3611d4d6807bd2d36..222e8ebb669dff496534331682e7a77c7e6bd9e0 100644 (file)
@@ -168,7 +168,8 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios)
         */
        i = 16;
        do {
-               if ((nv_rd32(bios, 0x300000) & 0xffff) == 0xaa55)
+               u32 data = le32_to_cpu(nv_rd32(bios, 0x300000)) & 0xffff;
+               if (data == 0xaa55)
                        break;
        } while (i--);
 
@@ -176,14 +177,15 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios)
                goto out;
 
        /* read entire bios image to system memory */
-       bios->size = ((nv_rd32(bios, 0x300000) >> 16) & 0xff) * 512;
+       bios->size = (le32_to_cpu(nv_rd32(bios, 0x300000)) >> 16) & 0xff;
+       bios->size = bios->size * 512;
        if (!bios->size)
                goto out;
 
        bios->data = kmalloc(bios->size, GFP_KERNEL);
        if (bios->data) {
-               for (i = 0; i < bios->size; i+=4)
-                       nv_wo32(bios, i, nv_rd32(bios, 0x300000 + i));
+               for (i = 0; i < bios->size; i += 4)
+                       ((u32 *)bios->data)[i/4] = nv_rd32(bios, 0x300000 + i);
        }
 
        /* check the PCI record header */
index 43fec17ea540b6c53af704931d4baf29d2391e4e..bbf117be572f4617cff0fd06907fb2a13977ae93 100644 (file)
@@ -40,6 +40,7 @@ pwm_info(struct nouveau_therm *therm, int line)
                case 0x00: return 2;
                case 0x19: return 1;
                case 0x1c: return 0;
+               case 0x1e: return 2;
                default:
                        break;
                }
index 83face3f608f020f70d7d11dda363c5817102238..279206997e5cd7d091d44e26f84ce5efdfb06f1d 100644 (file)
@@ -389,9 +389,6 @@ bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
        acpi_status status;
        acpi_handle dhandle, rom_handle;
 
-       if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected)
-               return false;
-
        dhandle = ACPI_HANDLE(&pdev->dev);
        if (!dhandle)
                return false;
index 3ff030dc1ee35d34925b2fd5464f1236ad10d925..da764a4ed9588273fe6cf02ed0d6c589aa2ebda0 100644 (file)
@@ -764,9 +764,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
        }
 
        ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
-       mutex_unlock(&chan->cli->mutex);
        if (ret)
                goto fail_unreserve;
+       mutex_unlock(&chan->cli->mutex);
 
        /* Update the crtc struct and cleanup */
        crtc->primary->fb = fb;
index fb187c78978f8d5a139359381870aaea8ad1cc91..c31c12b4e66681614f0d294e4fd0a0cf19350c88 100644 (file)
@@ -1177,27 +1177,43 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
 
                /* Set NUM_BANKS. */
                if (rdev->family >= CHIP_TAHITI) {
-                       unsigned tileb, index, num_banks, tile_split_bytes;
+                       unsigned index, num_banks;
 
-                       /* Calculate the macrotile mode index. */
-                       tile_split_bytes = 64 << tile_split;
-                       tileb = 8 * 8 * target_fb->bits_per_pixel / 8;
-                       tileb = min(tile_split_bytes, tileb);
+                       if (rdev->family >= CHIP_BONAIRE) {
+                               unsigned tileb, tile_split_bytes;
 
-                       for (index = 0; tileb > 64; index++) {
-                               tileb >>= 1;
-                       }
+                               /* Calculate the macrotile mode index. */
+                               tile_split_bytes = 64 << tile_split;
+                               tileb = 8 * 8 * target_fb->bits_per_pixel / 8;
+                               tileb = min(tile_split_bytes, tileb);
 
-                       if (index >= 16) {
-                               DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n",
-                                         target_fb->bits_per_pixel, tile_split);
-                               return -EINVAL;
-                       }
+                               for (index = 0; tileb > 64; index++)
+                                       tileb >>= 1;
+
+                               if (index >= 16) {
+                                       DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n",
+                                                 target_fb->bits_per_pixel, tile_split);
+                                       return -EINVAL;
+                               }
 
-                       if (rdev->family >= CHIP_BONAIRE)
                                num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
-                       else
+                       } else {
+                               switch (target_fb->bits_per_pixel) {
+                               case 8:
+                                       index = 10;
+                                       break;
+                               case 16:
+                                       index = SI_TILE_MODE_COLOR_2D_SCANOUT_16BPP;
+                                       break;
+                               default:
+                               case 32:
+                                       index = SI_TILE_MODE_COLOR_2D_SCANOUT_32BPP;
+                                       break;
+                               }
+
                                num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3;
+                       }
+
                        fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
                } else {
                        /* NI and older. */
@@ -1720,8 +1736,9 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
                }
                /* otherwise, pick one of the plls */
                if ((rdev->family == CHIP_KAVERI) ||
-                   (rdev->family == CHIP_KABINI)) {
-                       /* KB/KV has PPLL1 and PPLL2 */
+                   (rdev->family == CHIP_KABINI) ||
+                   (rdev->family == CHIP_MULLINS)) {
+                       /* KB/KV/ML has PPLL1 and PPLL2 */
                        pll_in_use = radeon_get_pll_use_mask(crtc);
                        if (!(pll_in_use & (1 << ATOM_PPLL2)))
                                return ATOM_PPLL2;
@@ -1885,6 +1902,9 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
            (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
                is_tvcv = true;
 
+       if (!radeon_crtc->adjusted_clock)
+               return -EINVAL;
+
        atombios_crtc_set_pll(crtc, adjusted_mode);
 
        if (ASIC_IS_DCE4(rdev))
index 15936524f226ca46b9631cadf43022aa2cc81d31..54e4f52549af47f19edf39340754a4e33a4da49c 100644 (file)
@@ -209,6 +209,7 @@ void radeon_dp_aux_init(struct radeon_connector *radeon_connector)
 {
        int ret;
 
+       radeon_connector->ddc_bus->rec.hpd = radeon_connector->hpd.hpd;
        radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev;
        radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer;
        ret = drm_dp_aux_register_i2c_bus(&radeon_connector->ddc_bus->aux);
@@ -365,11 +366,11 @@ static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector)
        if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
                return;
 
-       if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_SINK_OUI, buf, 3))
+       if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_SINK_OUI, buf, 3) == 3)
                DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
                              buf[0], buf[1], buf[2]);
 
-       if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_BRANCH_OUI, buf, 3))
+       if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_BRANCH_OUI, buf, 3) == 3)
                DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
                              buf[0], buf[1], buf[2]);
 }
@@ -418,21 +419,23 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
 
        if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
                /* DP bridge chips */
-               drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux,
-                                 DP_EDP_CONFIGURATION_CAP, &tmp);
-               if (tmp & 1)
-                       panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
-               else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) ||
-                        (dp_bridge == ENCODER_OBJECT_ID_TRAVIS))
-                       panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
-               else
-                       panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+               if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux,
+                                     DP_EDP_CONFIGURATION_CAP, &tmp) == 1) {
+                       if (tmp & 1)
+                               panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+                       else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) ||
+                                (dp_bridge == ENCODER_OBJECT_ID_TRAVIS))
+                               panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
+                       else
+                               panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+               }
        } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
                /* eDP */
-               drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux,
-                                 DP_EDP_CONFIGURATION_CAP, &tmp);
-               if (tmp & 1)
-                       panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+               if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux,
+                                     DP_EDP_CONFIGURATION_CAP, &tmp) == 1) {
+                       if (tmp & 1)
+                               panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+               }
        }
 
        return panel_mode;
@@ -808,11 +811,15 @@ void radeon_dp_link_train(struct drm_encoder *encoder,
        else
                dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A;
 
-       drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, DP_MAX_LANE_COUNT, &tmp);
-       if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED))
-               dp_info.tp3_supported = true;
-       else
+       if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, DP_MAX_LANE_COUNT, &tmp)
+           == 1) {
+               if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED))
+                       dp_info.tp3_supported = true;
+               else
+                       dp_info.tp3_supported = false;
+       } else {
                dp_info.tp3_supported = false;
+       }
 
        memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE);
        dp_info.rdev = rdev;
index 199eb194716f83b64aa7c3850d30606f4ba3f053..d2fd989680857d5a08a7d65d586bb0b4a40be6e2 100644 (file)
@@ -63,6 +63,12 @@ MODULE_FIRMWARE("radeon/KABINI_ce.bin");
 MODULE_FIRMWARE("radeon/KABINI_mec.bin");
 MODULE_FIRMWARE("radeon/KABINI_rlc.bin");
 MODULE_FIRMWARE("radeon/KABINI_sdma.bin");
+MODULE_FIRMWARE("radeon/MULLINS_pfp.bin");
+MODULE_FIRMWARE("radeon/MULLINS_me.bin");
+MODULE_FIRMWARE("radeon/MULLINS_ce.bin");
+MODULE_FIRMWARE("radeon/MULLINS_mec.bin");
+MODULE_FIRMWARE("radeon/MULLINS_rlc.bin");
+MODULE_FIRMWARE("radeon/MULLINS_sdma.bin");
 
 extern int r600_ih_ring_alloc(struct radeon_device *rdev);
 extern void r600_ih_ring_fini(struct radeon_device *rdev);
@@ -1473,6 +1479,43 @@ static const u32 hawaii_mgcg_cgcg_init[] =
        0xd80c, 0xff000ff0, 0x00000100
 };
 
+static const u32 godavari_golden_registers[] =
+{
+       0x55e4, 0xff607fff, 0xfc000100,
+       0x6ed8, 0x00010101, 0x00010000,
+       0x9830, 0xffffffff, 0x00000000,
+       0x98302, 0xf00fffff, 0x00000400,
+       0x6130, 0xffffffff, 0x00010000,
+       0x5bb0, 0x000000f0, 0x00000070,
+       0x5bc0, 0xf0311fff, 0x80300000,
+       0x98f8, 0x73773777, 0x12010001,
+       0x98fc, 0xffffffff, 0x00000010,
+       0x8030, 0x00001f0f, 0x0000100a,
+       0x2f48, 0x73773777, 0x12010001,
+       0x2408, 0x000fffff, 0x000c007f,
+       0x8a14, 0xf000003f, 0x00000007,
+       0x8b24, 0xffffffff, 0x00ff0fff,
+       0x30a04, 0x0000ff0f, 0x00000000,
+       0x28a4c, 0x07ffffff, 0x06000000,
+       0x4d8, 0x00000fff, 0x00000100,
+       0xd014, 0x00010000, 0x00810001,
+       0xd814, 0x00010000, 0x00810001,
+       0x3e78, 0x00000001, 0x00000002,
+       0xc768, 0x00000008, 0x00000008,
+       0xc770, 0x00000f00, 0x00000800,
+       0xc774, 0x00000f00, 0x00000800,
+       0xc798, 0x00ffffff, 0x00ff7fbf,
+       0xc79c, 0x00ffffff, 0x00ff7faf,
+       0x8c00, 0x000000ff, 0x00000001,
+       0x214f8, 0x01ff01ff, 0x00000002,
+       0x21498, 0x007ff800, 0x00200000,
+       0x2015c, 0xffffffff, 0x00000f40,
+       0x88c4, 0x001f3ae3, 0x00000082,
+       0x88d4, 0x0000001f, 0x00000010,
+       0x30934, 0xffffffff, 0x00000000
+};
+
+
 static void cik_init_golden_registers(struct radeon_device *rdev)
 {
        switch (rdev->family) {
@@ -1504,6 +1547,20 @@ static void cik_init_golden_registers(struct radeon_device *rdev)
                                                 kalindi_golden_spm_registers,
                                                 (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
                break;
+       case CHIP_MULLINS:
+               radeon_program_register_sequence(rdev,
+                                                kalindi_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
+               radeon_program_register_sequence(rdev,
+                                                godavari_golden_registers,
+                                                (const u32)ARRAY_SIZE(godavari_golden_registers));
+               radeon_program_register_sequence(rdev,
+                                                kalindi_golden_common_registers,
+                                                (const u32)ARRAY_SIZE(kalindi_golden_common_registers));
+               radeon_program_register_sequence(rdev,
+                                                kalindi_golden_spm_registers,
+                                                (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
+               break;
        case CHIP_KAVERI:
                radeon_program_register_sequence(rdev,
                                                 spectre_mgcg_cgcg_init,
@@ -1834,6 +1891,15 @@ static int cik_init_microcode(struct radeon_device *rdev)
                rlc_req_size = KB_RLC_UCODE_SIZE * 4;
                sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
                break;
+       case CHIP_MULLINS:
+               chip_name = "MULLINS";
+               pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
+               me_req_size = CIK_ME_UCODE_SIZE * 4;
+               ce_req_size = CIK_CE_UCODE_SIZE * 4;
+               mec_req_size = CIK_MEC_UCODE_SIZE * 4;
+               rlc_req_size = ML_RLC_UCODE_SIZE * 4;
+               sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
+               break;
        default: BUG();
        }
 
@@ -3272,6 +3338,7 @@ static void cik_gpu_init(struct radeon_device *rdev)
                gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_KABINI:
+       case CHIP_MULLINS:
        default:
                rdev->config.cik.max_shader_engines = 1;
                rdev->config.cik.max_tile_pipes = 2;
@@ -3702,6 +3769,7 @@ int cik_copy_cpdma(struct radeon_device *rdev,
        r = radeon_fence_emit(rdev, fence, ring->idx);
        if (r) {
                radeon_ring_unlock_undo(rdev, ring);
+               radeon_semaphore_free(rdev, &sem, NULL);
                return r;
        }
 
@@ -5800,6 +5868,9 @@ static int cik_rlc_resume(struct radeon_device *rdev)
        case CHIP_KABINI:
                size = KB_RLC_UCODE_SIZE;
                break;
+       case CHIP_MULLINS:
+               size = ML_RLC_UCODE_SIZE;
+               break;
        }
 
        cik_rlc_stop(rdev);
@@ -6548,6 +6619,7 @@ void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
                buffer[count++] = cpu_to_le32(0x00000000);
                break;
        case CHIP_KABINI:
+       case CHIP_MULLINS:
                buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
                buffer[count++] = cpu_to_le32(0x00000000);
                break;
@@ -6693,6 +6765,19 @@ static void cik_disable_interrupt_state(struct radeon_device *rdev)
                WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
                WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
        }
+       /* pflip */
+       if (rdev->num_crtc >= 2) {
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+       }
+       if (rdev->num_crtc >= 4) {
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+       }
+       if (rdev->num_crtc >= 6) {
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+       }
 
        /* dac hotplug */
        WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
@@ -7049,6 +7134,25 @@ int cik_irq_set(struct radeon_device *rdev)
                WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
        }
 
+       if (rdev->num_crtc >= 2) {
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_MASK);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_MASK);
+       }
+       if (rdev->num_crtc >= 4) {
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_MASK);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_MASK);
+       }
+       if (rdev->num_crtc >= 6) {
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_MASK);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_MASK);
+       }
+
        WREG32(DC_HPD1_INT_CONTROL, hpd1);
        WREG32(DC_HPD2_INT_CONTROL, hpd2);
        WREG32(DC_HPD3_INT_CONTROL, hpd3);
@@ -7085,6 +7189,29 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
        rdev->irq.stat_regs.cik.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
        rdev->irq.stat_regs.cik.disp_int_cont6 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE6);
 
+       rdev->irq.stat_regs.cik.d1grph_int = RREG32(GRPH_INT_STATUS +
+               EVERGREEN_CRTC0_REGISTER_OFFSET);
+       rdev->irq.stat_regs.cik.d2grph_int = RREG32(GRPH_INT_STATUS +
+               EVERGREEN_CRTC1_REGISTER_OFFSET);
+       if (rdev->num_crtc >= 4) {
+               rdev->irq.stat_regs.cik.d3grph_int = RREG32(GRPH_INT_STATUS +
+                       EVERGREEN_CRTC2_REGISTER_OFFSET);
+               rdev->irq.stat_regs.cik.d4grph_int = RREG32(GRPH_INT_STATUS +
+                       EVERGREEN_CRTC3_REGISTER_OFFSET);
+       }
+       if (rdev->num_crtc >= 6) {
+               rdev->irq.stat_regs.cik.d5grph_int = RREG32(GRPH_INT_STATUS +
+                       EVERGREEN_CRTC4_REGISTER_OFFSET);
+               rdev->irq.stat_regs.cik.d6grph_int = RREG32(GRPH_INT_STATUS +
+                       EVERGREEN_CRTC5_REGISTER_OFFSET);
+       }
+
+       if (rdev->irq.stat_regs.cik.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
+               WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_CLEAR);
+       if (rdev->irq.stat_regs.cik.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
+               WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_CLEAR);
        if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT)
                WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
        if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT)
@@ -7095,6 +7222,12 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
                WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
 
        if (rdev->num_crtc >= 4) {
+               if (rdev->irq.stat_regs.cik.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
+                       WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+                              GRPH_PFLIP_INT_CLEAR);
+               if (rdev->irq.stat_regs.cik.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
+                       WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+                              GRPH_PFLIP_INT_CLEAR);
                if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
                        WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
                if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
@@ -7106,6 +7239,12 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
        }
 
        if (rdev->num_crtc >= 6) {
+               if (rdev->irq.stat_regs.cik.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
+                       WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+                              GRPH_PFLIP_INT_CLEAR);
+               if (rdev->irq.stat_regs.cik.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
+                       WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET,
+                              GRPH_PFLIP_INT_CLEAR);
                if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
                        WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
                if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
@@ -7457,6 +7596,15 @@ restart_ih:
                                break;
                        }
                        break;
+               case 8: /* D1 page flip */
+               case 10: /* D2 page flip */
+               case 12: /* D3 page flip */
+               case 14: /* D4 page flip */
+               case 16: /* D5 page flip */
+               case 18: /* D6 page flip */
+                       DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
+                       radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
+                       break;
                case 42: /* HPD hotplug */
                        switch (src_data) {
                        case 0:
index 89b4afa5041c322a15afc67bfbba7f7cf2cef8d6..72e464c79a88a777c27d2ada64667bf9e8798aa2 100644 (file)
@@ -562,6 +562,7 @@ int cik_copy_dma(struct radeon_device *rdev,
        r = radeon_fence_emit(rdev, fence, ring->idx);
        if (r) {
                radeon_ring_unlock_undo(rdev, ring);
+               radeon_semaphore_free(rdev, &sem, NULL);
                return r;
        }
 
@@ -597,7 +598,7 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
        tmp = 0xCAFEDEAD;
        writel(tmp, ptr);
 
-       r = radeon_ring_lock(rdev, ring, 4);
+       r = radeon_ring_lock(rdev, ring, 5);
        if (r) {
                DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
                return r;
index 213873270d5f6b705974aa57867f0dff47b7901a..dd7926394a8fdaf6821fdd10151f98639c7c082f 100644 (file)
 #       define DC_HPD6_RX_INTERRUPT                     (1 << 18)
 #define DISP_INTERRUPT_STATUS_CONTINUE6                 0x6780
 
+/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
+#define GRPH_INT_STATUS                                 0x6858
+#       define GRPH_PFLIP_INT_OCCURRED                  (1 << 0)
+#       define GRPH_PFLIP_INT_CLEAR                     (1 << 8)
+/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
+#define GRPH_INT_CONTROL                                0x685c
+#       define GRPH_PFLIP_INT_MASK                      (1 << 0)
+#       define GRPH_PFLIP_INT_TYPE                      (1 << 8)
+
 #define        DAC_AUTODETECT_INT_CONTROL                      0x67c8
 
 #define DC_HPD1_INT_STATUS                              0x601c
index b406546440da7cda8d7da10f7acbf6e6c22186e0..0f7a51a3694f0fff5bde09c7a4bf57dd7bd51963 100644 (file)
@@ -4371,7 +4371,6 @@ int evergreen_irq_set(struct radeon_device *rdev)
        u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
        u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
        u32 grbm_int_cntl = 0;
-       u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
        u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
        u32 dma_cntl, dma_cntl1 = 0;
        u32 thermal_int = 0;
@@ -4554,15 +4553,21 @@ int evergreen_irq_set(struct radeon_device *rdev)
                WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
        }
 
-       WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
-       WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
+       WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
+              GRPH_PFLIP_INT_MASK);
+       WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
+              GRPH_PFLIP_INT_MASK);
        if (rdev->num_crtc >= 4) {
-               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
-               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_MASK);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_MASK);
        }
        if (rdev->num_crtc >= 6) {
-               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
-               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_MASK);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_MASK);
        }
 
        WREG32(DC_HPD1_INT_CONTROL, hpd1);
@@ -4951,6 +4956,15 @@ restart_ih:
                                break;
                        }
                        break;
+               case 8: /* D1 page flip */
+               case 10: /* D2 page flip */
+               case 12: /* D3 page flip */
+               case 14: /* D4 page flip */
+               case 16: /* D5 page flip */
+               case 18: /* D6 page flip */
+                       DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
+                       radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
+                       break;
                case 42: /* HPD hotplug */
                        switch (src_data) {
                        case 0:
index 287fe966d7de135161704b0e80ec38676ae1424b..478caefe0fef918011fadd78e1419ac92183ae54 100644 (file)
@@ -151,6 +151,7 @@ int evergreen_copy_dma(struct radeon_device *rdev,
        r = radeon_fence_emit(rdev, fence, ring->idx);
        if (r) {
                radeon_ring_unlock_undo(rdev, ring);
+               radeon_semaphore_free(rdev, &sem, NULL);
                return r;
        }
 
index 16ec9d56a234b107742a13acd5a9684f62aabf42..3f6e817d97ee80cb0013c85818a1ec0c4c110e79 100644 (file)
@@ -546,6 +546,52 @@ static int kv_set_divider_value(struct radeon_device *rdev,
        return 0;
 }
 
+static u32 kv_convert_vid2_to_vid7(struct radeon_device *rdev,
+                                  struct sumo_vid_mapping_table *vid_mapping_table,
+                                  u32 vid_2bit)
+{
+       struct radeon_clock_voltage_dependency_table *vddc_sclk_table =
+               &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+       u32 i;
+
+       if (vddc_sclk_table && vddc_sclk_table->count) {
+               if (vid_2bit < vddc_sclk_table->count)
+                       return vddc_sclk_table->entries[vid_2bit].v;
+               else
+                       return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v;
+       } else {
+               for (i = 0; i < vid_mapping_table->num_entries; i++) {
+                       if (vid_mapping_table->entries[i].vid_2bit == vid_2bit)
+                               return vid_mapping_table->entries[i].vid_7bit;
+               }
+               return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit;
+       }
+}
+
+static u32 kv_convert_vid7_to_vid2(struct radeon_device *rdev,
+                                  struct sumo_vid_mapping_table *vid_mapping_table,
+                                  u32 vid_7bit)
+{
+       struct radeon_clock_voltage_dependency_table *vddc_sclk_table =
+               &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+       u32 i;
+
+       if (vddc_sclk_table && vddc_sclk_table->count) {
+               for (i = 0; i < vddc_sclk_table->count; i++) {
+                       if (vddc_sclk_table->entries[i].v == vid_7bit)
+                               return i;
+               }
+               return vddc_sclk_table->count - 1;
+       } else {
+               for (i = 0; i < vid_mapping_table->num_entries; i++) {
+                       if (vid_mapping_table->entries[i].vid_7bit == vid_7bit)
+                               return vid_mapping_table->entries[i].vid_2bit;
+               }
+
+               return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit;
+       }
+}
+
 static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev,
                                            u16 voltage)
 {
@@ -556,9 +602,9 @@ static u16 kv_convert_2bit_index_to_voltage(struct radeon_device *rdev,
                                            u32 vid_2bit)
 {
        struct kv_power_info *pi = kv_get_pi(rdev);
-       u32 vid_8bit = sumo_convert_vid2_to_vid7(rdev,
-                                                &pi->sys_info.vid_mapping_table,
-                                                vid_2bit);
+       u32 vid_8bit = kv_convert_vid2_to_vid7(rdev,
+                                              &pi->sys_info.vid_mapping_table,
+                                              vid_2bit);
 
        return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit);
 }
@@ -639,7 +685,7 @@ static int kv_force_lowest_valid(struct radeon_device *rdev)
 
 static int kv_unforce_levels(struct radeon_device *rdev)
 {
-       if (rdev->family == CHIP_KABINI)
+       if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
                return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
        else
                return kv_set_enabled_levels(rdev);
@@ -1362,13 +1408,20 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
        struct radeon_uvd_clock_voltage_dependency_table *table =
                &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
        int ret;
+       u32 mask;
 
        if (!gate) {
-               if (!pi->caps_uvd_dpm || table->count || pi->caps_stable_p_state)
+               if (table->count)
                        pi->uvd_boot_level = table->count - 1;
                else
                        pi->uvd_boot_level = 0;
 
+               if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) {
+                       mask = 1 << pi->uvd_boot_level;
+               } else {
+                       mask = 0x1f;
+               }
+
                ret = kv_copy_bytes_to_smc(rdev,
                                           pi->dpm_table_start +
                                           offsetof(SMU7_Fusion_DpmTable, UvdBootLevel),
@@ -1377,11 +1430,9 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
                if (ret)
                        return ret;
 
-               if (!pi->caps_uvd_dpm ||
-                   pi->caps_stable_p_state)
-                       kv_send_msg_to_smc_with_parameter(rdev,
-                                                         PPSMC_MSG_UVDDPM_SetEnabledMask,
-                                                         (1 << pi->uvd_boot_level));
+               kv_send_msg_to_smc_with_parameter(rdev,
+                                                 PPSMC_MSG_UVDDPM_SetEnabledMask,
+                                                 mask);
        }
 
        return kv_enable_uvd_dpm(rdev, !gate);
@@ -1617,7 +1668,7 @@ static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate)
        if (pi->acp_power_gated == gate)
                return;
 
-       if (rdev->family == CHIP_KABINI)
+       if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
                return;
 
        pi->acp_power_gated = gate;
@@ -1786,7 +1837,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
                }
        }
 
-       if (rdev->family == CHIP_KABINI) {
+       if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
                if (pi->enable_dpm) {
                        kv_set_valid_clock_range(rdev, new_ps);
                        kv_update_dfs_bypass_settings(rdev, new_ps);
@@ -1812,6 +1863,8 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
                                return ret;
                        }
                        kv_update_sclk_t(rdev);
+                       if (rdev->family == CHIP_MULLINS)
+                               kv_enable_nb_dpm(rdev);
                }
        } else {
                if (pi->enable_dpm) {
@@ -1862,7 +1915,7 @@ void kv_dpm_reset_asic(struct radeon_device *rdev)
 {
        struct kv_power_info *pi = kv_get_pi(rdev);
 
-       if (rdev->family == CHIP_KABINI) {
+       if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
                kv_force_lowest_valid(rdev);
                kv_init_graphics_levels(rdev);
                kv_program_bootup_state(rdev);
@@ -1901,14 +1954,41 @@ static void kv_construct_max_power_limits_table(struct radeon_device *rdev,
 static void kv_patch_voltage_values(struct radeon_device *rdev)
 {
        int i;
-       struct radeon_uvd_clock_voltage_dependency_table *table =
+       struct radeon_uvd_clock_voltage_dependency_table *uvd_table =
                &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
+       struct radeon_vce_clock_voltage_dependency_table *vce_table =
+               &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+       struct radeon_clock_voltage_dependency_table *samu_table =
+               &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
+       struct radeon_clock_voltage_dependency_table *acp_table =
+               &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
 
-       if (table->count) {
-               for (i = 0; i < table->count; i++)
-                       table->entries[i].v =
+       if (uvd_table->count) {
+               for (i = 0; i < uvd_table->count; i++)
+                       uvd_table->entries[i].v =
                                kv_convert_8bit_index_to_voltage(rdev,
-                                                                table->entries[i].v);
+                                                                uvd_table->entries[i].v);
+       }
+
+       if (vce_table->count) {
+               for (i = 0; i < vce_table->count; i++)
+                       vce_table->entries[i].v =
+                               kv_convert_8bit_index_to_voltage(rdev,
+                                                                vce_table->entries[i].v);
+       }
+
+       if (samu_table->count) {
+               for (i = 0; i < samu_table->count; i++)
+                       samu_table->entries[i].v =
+                               kv_convert_8bit_index_to_voltage(rdev,
+                                                                samu_table->entries[i].v);
+       }
+
+       if (acp_table->count) {
+               for (i = 0; i < acp_table->count; i++)
+                       acp_table->entries[i].v =
+                               kv_convert_8bit_index_to_voltage(rdev,
+                                                                acp_table->entries[i].v);
        }
 
 }
@@ -1941,7 +2021,7 @@ static int kv_force_dpm_highest(struct radeon_device *rdev)
                        break;
        }
 
-       if (rdev->family == CHIP_KABINI)
+       if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
                return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
        else
                return kv_set_enabled_level(rdev, i);
@@ -1961,7 +2041,7 @@ static int kv_force_dpm_lowest(struct radeon_device *rdev)
                        break;
        }
 
-       if (rdev->family == CHIP_KABINI)
+       if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
                return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
        else
                return kv_set_enabled_level(rdev, i);
@@ -2118,7 +2198,7 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
        else
                pi->battery_state = false;
 
-       if (rdev->family == CHIP_KABINI) {
+       if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
                ps->dpm0_pg_nb_ps_lo = 0x1;
                ps->dpm0_pg_nb_ps_hi = 0x0;
                ps->dpmx_nb_ps_lo = 0x1;
@@ -2179,7 +2259,7 @@ static int kv_calculate_nbps_level_settings(struct radeon_device *rdev)
        if (pi->lowest_valid > pi->highest_valid)
                return -EINVAL;
 
-       if (rdev->family == CHIP_KABINI) {
+       if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
                for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
                        pi->graphics_level[i].GnbSlow = 1;
                        pi->graphics_level[i].ForceNbPs1 = 0;
@@ -2253,9 +2333,9 @@ static void kv_init_graphics_levels(struct radeon_device *rdev)
                                break;
 
                        kv_set_divider_value(rdev, i, table->entries[i].clk);
-                       vid_2bit = sumo_convert_vid7_to_vid2(rdev,
-                                                            &pi->sys_info.vid_mapping_table,
-                                                            table->entries[i].v);
+                       vid_2bit = kv_convert_vid7_to_vid2(rdev,
+                                                          &pi->sys_info.vid_mapping_table,
+                                                          table->entries[i].v);
                        kv_set_vid(rdev, i, vid_2bit);
                        kv_set_at(rdev, i, pi->at[i]);
                        kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
@@ -2324,7 +2404,7 @@ static void kv_program_nbps_index_settings(struct radeon_device *rdev,
        struct kv_power_info *pi = kv_get_pi(rdev);
        u32 nbdpmconfig1;
 
-       if (rdev->family == CHIP_KABINI)
+       if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
                return;
 
        if (pi->sys_info.nb_dpm_enable) {
@@ -2631,9 +2711,6 @@ int kv_dpm_init(struct radeon_device *rdev)
 
         pi->sram_end = SMC_RAM_END;
 
-       if (rdev->family == CHIP_KABINI)
-               pi->high_voltage_t = 4001;
-
        pi->enable_nb_dpm = true;
 
        pi->caps_power_containment = true;
index 6e887d004ebad7041e2080af850cc6ac2a12367d..bbc189fd3ddc47f57993689cac1dd08f2b5a9c69 100644 (file)
@@ -2839,6 +2839,7 @@ int r600_copy_cpdma(struct radeon_device *rdev,
        r = radeon_fence_emit(rdev, fence, ring->idx);
        if (r) {
                radeon_ring_unlock_undo(rdev, ring);
+               radeon_semaphore_free(rdev, &sem, NULL);
                return r;
        }
 
@@ -3505,7 +3506,6 @@ int r600_irq_set(struct radeon_device *rdev)
        u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
        u32 grbm_int_cntl = 0;
        u32 hdmi0, hdmi1;
-       u32 d1grph = 0, d2grph = 0;
        u32 dma_cntl;
        u32 thermal_int = 0;
 
@@ -3614,8 +3614,8 @@ int r600_irq_set(struct radeon_device *rdev)
        WREG32(CP_INT_CNTL, cp_int_cntl);
        WREG32(DMA_CNTL, dma_cntl);
        WREG32(DxMODE_INT_MASK, mode_int);
-       WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
-       WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
+       WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
+       WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
        WREG32(GRBM_INT_CNTL, grbm_int_cntl);
        if (ASIC_IS_DCE3(rdev)) {
                WREG32(DC_HPD1_INT_CONTROL, hpd1);
@@ -3918,6 +3918,14 @@ restart_ih:
                                break;
                        }
                        break;
+               case 9: /* D1 pflip */
+                       DRM_DEBUG("IH: D1 flip\n");
+                       radeon_crtc_handle_flip(rdev, 0);
+                       break;
+               case 11: /* D2 pflip */
+                       DRM_DEBUG("IH: D2 flip\n");
+                       radeon_crtc_handle_flip(rdev, 1);
+                       break;
                case 19: /* HPD/DAC hotplug */
                        switch (src_data) {
                        case 0:
index 53fcb28f5578d76919fa182488b32be8a1c6278a..4969cef44a1911b706e933fb8252397ddd893785 100644 (file)
@@ -489,6 +489,7 @@ int r600_copy_dma(struct radeon_device *rdev,
        r = radeon_fence_emit(rdev, fence, ring->idx);
        if (r) {
                radeon_ring_unlock_undo(rdev, ring);
+               radeon_semaphore_free(rdev, &sem, NULL);
                return r;
        }
 
index cbf7e3269f84882d1352a44ea402fa5dc4cbba7c..9c61b74ef4415cbf1f7bce501b268662d80da8de 100644 (file)
@@ -158,16 +158,18 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
        u32 line_time_us, vblank_lines;
        u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
 
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               radeon_crtc = to_radeon_crtc(crtc);
-               if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
-                       line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) /
-                               radeon_crtc->hw_mode.clock;
-                       vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end -
-                               radeon_crtc->hw_mode.crtc_vdisplay +
-                               (radeon_crtc->v_border * 2);
-                       vblank_time_us = vblank_lines * line_time_us;
-                       break;
+       if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
+               list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+                       radeon_crtc = to_radeon_crtc(crtc);
+                       if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
+                               line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) /
+                                       radeon_crtc->hw_mode.clock;
+                               vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end -
+                                       radeon_crtc->hw_mode.crtc_vdisplay +
+                                       (radeon_crtc->v_border * 2);
+                               vblank_time_us = vblank_lines * line_time_us;
+                               break;
+                       }
                }
        }
 
@@ -181,14 +183,15 @@ u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
        struct radeon_crtc *radeon_crtc;
        u32 vrefresh = 0;
 
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               radeon_crtc = to_radeon_crtc(crtc);
-               if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
-                       vrefresh = radeon_crtc->hw_mode.vrefresh;
-                       break;
+       if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
+               list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+                       radeon_crtc = to_radeon_crtc(crtc);
+                       if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
+                               vrefresh = radeon_crtc->hw_mode.vrefresh;
+                               break;
+                       }
                }
        }
-
        return vrefresh;
 }
 
index b58e1afdda7697e9a2e094c3f07bacc8a1621699..8149e7cf430330095da34f98d27f842832c2c91c 100644 (file)
@@ -730,6 +730,12 @@ struct cik_irq_stat_regs {
        u32 disp_int_cont4;
        u32 disp_int_cont5;
        u32 disp_int_cont6;
+       u32 d1grph_int;
+       u32 d2grph_int;
+       u32 d3grph_int;
+       u32 d4grph_int;
+       u32 d5grph_int;
+       u32 d6grph_int;
 };
 
 union radeon_irq_stat_regs {
@@ -1636,6 +1642,7 @@ struct radeon_vce {
        unsigned                fb_version;
        atomic_t                handles[RADEON_MAX_VCE_HANDLES];
        struct drm_file         *filp[RADEON_MAX_VCE_HANDLES];
+       unsigned                img_size[RADEON_MAX_VCE_HANDLES];
        struct delayed_work     idle_work;
 };
 
@@ -1649,7 +1656,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
                               uint32_t handle, struct radeon_fence **fence);
 void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp);
 void radeon_vce_note_usage(struct radeon_device *rdev);
-int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi);
+int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, unsigned size);
 int radeon_vce_cs_parse(struct radeon_cs_parser *p);
 bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
                               struct radeon_ring *ring,
@@ -2634,7 +2641,8 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
 #define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE))
 #define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI))
 #define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE))
-#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI))
+#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI) || \
+                            (rdev->family == CHIP_MULLINS))
 
 #define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \
                              (rdev->ddev->pdev->device == 0x6850) || \
index b8a24a75d4fff0e48e60b1f078f33e36c5209d94..be20e62dac83c5b6d96bcfc2b44a9140691b7bb8 100644 (file)
@@ -2516,6 +2516,7 @@ int radeon_asic_init(struct radeon_device *rdev)
                break;
        case CHIP_KAVERI:
        case CHIP_KABINI:
+       case CHIP_MULLINS:
                rdev->asic = &kv_asic;
                /* set num crtcs */
                if (rdev->family == CHIP_KAVERI) {
index dedea72f48c45b95022ec9bcb60cf85e8073e187..a9fb0d016d387683a1d4bb96f77c046b7cd804b7 100644 (file)
@@ -528,6 +528,13 @@ static bool radeon_atpx_detect(void)
                has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
        }
 
+       /* some newer PX laptops mark the dGPU as a non-VGA display device */
+       while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
+               vga_count++;
+
+               has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
+       }
+
        if (has_atpx && vga_count == 2) {
                acpi_get_name(radeon_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer);
                printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
index b3633d9a531703a1cd4189c061a0907e89ee1085..9ab30976287d4c27e0dc94e0cadfa993e6ec77cc 100644 (file)
@@ -196,6 +196,20 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
                }
        }
 
+       if (!found) {
+               while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
+                       dhandle = ACPI_HANDLE(&pdev->dev);
+                       if (!dhandle)
+                               continue;
+
+                       status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
+                       if (!ACPI_FAILURE(status)) {
+                               found = true;
+                               break;
+                       }
+               }
+       }
+
        if (!found)
                return false;
 
index 511fe26198e4a3e5a779af5e7f82bf62be570da6..0e770bbf7e29d723457b5d58b83b7f3278f003d5 100644 (file)
@@ -99,6 +99,7 @@ static const char radeon_family_name[][16] = {
        "KAVERI",
        "KABINI",
        "HAWAII",
+       "MULLINS",
        "LAST",
 };
 
index 2f7cbb901fb18c04c30c48e7357c23d01edc2529..f00dbbf4d806511a86b034a73c2d68adac971b99 100644 (file)
@@ -284,6 +284,10 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
        u32 update_pending;
        int vpos, hpos;
 
+       /* can happen during initialization */
+       if (radeon_crtc == NULL)
+               return;
+
        spin_lock_irqsave(&rdev->ddev->event_lock, flags);
        work = radeon_crtc->unpin_work;
        if (work == NULL ||
@@ -826,19 +830,51 @@ static void avivo_reduce_ratio(unsigned *nom, unsigned *den,
 
        /* make sure nominator is large enough */
         if (*nom < nom_min) {
-               tmp = (nom_min + *nom - 1) / *nom;
+               tmp = DIV_ROUND_UP(nom_min, *nom);
                *nom *= tmp;
                *den *= tmp;
        }
 
        /* make sure the denominator is large enough */
        if (*den < den_min) {
-               tmp = (den_min + *den - 1) / *den;
+               tmp = DIV_ROUND_UP(den_min, *den);
                *nom *= tmp;
                *den *= tmp;
        }
 }
 
+/**
+ * avivo_get_fb_ref_div - feedback and ref divider calculation
+ *
+ * @nom: nominator
+ * @den: denominator
+ * @post_div: post divider
+ * @fb_div_max: feedback divider maximum
+ * @ref_div_max: reference divider maximum
+ * @fb_div: resulting feedback divider
+ * @ref_div: resulting reference divider
+ *
+ * Calculate feedback and reference divider for a given post divider. Makes
+ * sure we stay within the limits.
+ */
+static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
+                                unsigned fb_div_max, unsigned ref_div_max,
+                                unsigned *fb_div, unsigned *ref_div)
+{
+       /* limit reference * post divider to a maximum */
+       ref_div_max = min(128 / post_div, ref_div_max);
+
+       /* get matching reference and feedback divider */
+       *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
+       *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
+
+       /* limit fb divider to its maximum */
+        if (*fb_div > fb_div_max) {
+               *ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
+               *fb_div = fb_div_max;
+       }
+}
+
 /**
  * radeon_compute_pll_avivo - compute PLL paramaters
  *
@@ -860,6 +896,9 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
                              u32 *ref_div_p,
                              u32 *post_div_p)
 {
+       unsigned target_clock = pll->flags & RADEON_PLL_USE_FRAC_FB_DIV ?
+               freq : freq / 10;
+
        unsigned fb_div_min, fb_div_max, fb_div;
        unsigned post_div_min, post_div_max, post_div;
        unsigned ref_div_min, ref_div_max, ref_div;
@@ -880,14 +919,18 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
                ref_div_min = pll->reference_div;
        else
                ref_div_min = pll->min_ref_div;
-       ref_div_max = pll->max_ref_div;
+
+       if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV &&
+           pll->flags & RADEON_PLL_USE_REF_DIV)
+               ref_div_max = pll->reference_div;
+       else
+               ref_div_max = pll->max_ref_div;
 
        /* determine allowed post divider range */
        if (pll->flags & RADEON_PLL_USE_POST_DIV) {
                post_div_min = pll->post_div;
                post_div_max = pll->post_div;
        } else {
-               unsigned target_clock = freq / 10;
                unsigned vco_min, vco_max;
 
                if (pll->flags & RADEON_PLL_IS_LCD) {
@@ -898,6 +941,11 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
                        vco_max = pll->pll_out_max;
                }
 
+               if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
+                       vco_min *= 10;
+                       vco_max *= 10;
+               }
+
                post_div_min = vco_min / target_clock;
                if ((target_clock * post_div_min) < vco_min)
                        ++post_div_min;
@@ -912,7 +960,7 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
        }
 
        /* represent the searched ratio as fractional number */
-       nom = pll->flags & RADEON_PLL_USE_FRAC_FB_DIV ? freq : freq / 10;
+       nom = target_clock;
        den = pll->reference_freq;
 
        /* reduce the numbers to a simpler ratio */
@@ -926,7 +974,12 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
        diff_best = ~0;
 
        for (post_div = post_div_min; post_div <= post_div_max; ++post_div) {
-               unsigned diff = abs(den - den / post_div * post_div);
+               unsigned diff;
+               avivo_get_fb_ref_div(nom, den, post_div, fb_div_max,
+                                    ref_div_max, &fb_div, &ref_div);
+               diff = abs(target_clock - (pll->reference_freq * fb_div) /
+                       (ref_div * post_div));
+
                if (diff < diff_best || (diff == diff_best &&
                    !(pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP))) {
 
@@ -936,33 +989,24 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
        }
        post_div = post_div_best;
 
-       /* limit reference * post divider to a maximum */
-       ref_div_max = min(210 / post_div, ref_div_max);
-
-       /* get matching reference and feedback divider */
-       ref_div = max(DIV_ROUND_CLOSEST(den, post_div), 1u);
-       fb_div = DIV_ROUND_CLOSEST(nom * ref_div * post_div, den);
-
-       /* we're almost done, but reference and feedback
-          divider might be to large now */
-
-       nom = fb_div;
-       den = ref_div;
-
-        if (fb_div > fb_div_max) {
-               ref_div = DIV_ROUND_CLOSEST(den * fb_div_max, nom);
-               fb_div = fb_div_max;
-       }
-
-       if (ref_div > ref_div_max) {
-               ref_div = ref_div_max;
-               fb_div = DIV_ROUND_CLOSEST(nom * ref_div_max, den);
-       }
+       /* get the feedback and reference divider for the optimal value */
+       avivo_get_fb_ref_div(nom, den, post_div, fb_div_max, ref_div_max,
+                            &fb_div, &ref_div);
 
        /* reduce the numbers to a simpler ratio once more */
        /* this also makes sure that the reference divider is large enough */
        avivo_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min);
 
+       /* avoid high jitter with small fractional dividers */
+       if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) {
+               fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 50);
+               if (fb_div < fb_div_min) {
+                       unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div);
+                       fb_div *= tmp;
+                       ref_div *= tmp;
+               }
+       }
+
        /* and finally save the result */
        if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
                *fb_div_p = fb_div / 10;
@@ -979,7 +1023,7 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
        *post_div_p = post_div;
 
        DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
-                     freq, *dot_clock_p, *fb_div_p, *frac_fb_div_p,
+                     freq, *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p,
                      ref_div, post_div);
 }
 
index 9da5da4ffd176907d2b3f9b077e883cd9ab89323..4b7b87f71a6371308a64e78c7289a4e28117ef5f 100644 (file)
@@ -97,6 +97,7 @@ enum radeon_family {
        CHIP_KAVERI,
        CHIP_KABINI,
        CHIP_HAWAII,
+       CHIP_MULLINS,
        CHIP_LAST,
 };
 
index fb3d13f693ddb18fd32e8a4ef1a486305c615eed..eaaedba0467595aaced591c6d70666c75ac97211 100644 (file)
@@ -107,11 +107,9 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
                flags |= RADEON_IS_PCI;
        }
 
-       if (radeon_runtime_pm == 1)
-               flags |= RADEON_IS_PX;
-       else if ((radeon_runtime_pm == -1) &&
-                radeon_has_atpx() &&
-                ((flags & RADEON_IS_IGP) == 0))
+       if ((radeon_runtime_pm != 0) &&
+           radeon_has_atpx() &&
+           ((flags & RADEON_IS_IGP) == 0))
                flags |= RADEON_IS_PX;
 
        /* radeon_device_init should report only fatal error
@@ -579,28 +577,29 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                        return r;
                }
 
-               r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
-               if (r) {
-                       radeon_vm_fini(rdev, &fpriv->vm);
-                       kfree(fpriv);
-                       return r;
-               }
+               if (rdev->accel_working) {
+                       r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
+                       if (r) {
+                               radeon_vm_fini(rdev, &fpriv->vm);
+                               kfree(fpriv);
+                               return r;
+                       }
 
-               /* map the ib pool buffer read only into
-                * virtual address space */
-               bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
-                                        rdev->ring_tmp_bo.bo);
-               r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
-                                         RADEON_VM_PAGE_READABLE |
-                                         RADEON_VM_PAGE_SNOOPED);
+                       /* map the ib pool buffer read only into
+                        * virtual address space */
+                       bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
+                                                rdev->ring_tmp_bo.bo);
+                       r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
+                                                 RADEON_VM_PAGE_READABLE |
+                                                 RADEON_VM_PAGE_SNOOPED);
 
-               radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
-               if (r) {
-                       radeon_vm_fini(rdev, &fpriv->vm);
-                       kfree(fpriv);
-                       return r;
+                       radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+                       if (r) {
+                               radeon_vm_fini(rdev, &fpriv->vm);
+                               kfree(fpriv);
+                               return r;
+                       }
                }
-
                file_priv->driver_priv = fpriv;
        }
 
@@ -628,13 +627,15 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
                struct radeon_bo_va *bo_va;
                int r;
 
-               r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
-               if (!r) {
-                       bo_va = radeon_vm_bo_find(&fpriv->vm,
-                                                 rdev->ring_tmp_bo.bo);
-                       if (bo_va)
-                               radeon_vm_bo_rmv(rdev, bo_va);
-                       radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+               if (rdev->accel_working) {
+                       r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
+                       if (!r) {
+                               bo_va = radeon_vm_bo_find(&fpriv->vm,
+                                                         rdev->ring_tmp_bo.bo);
+                               if (bo_va)
+                                       radeon_vm_bo_rmv(rdev, bo_va);
+                               radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+                       }
                }
 
                radeon_vm_fini(rdev, &fpriv->vm);
index 19bec0dbfa38bf052db75cb52401adfcb29ac986..4faa4d6f9bb4f0616e0575d916069b47fa9389ed 100644 (file)
@@ -458,7 +458,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
                         * into account. We don't want to disallow buffer moves
                         * completely.
                         */
-                       if (current_domain != RADEON_GEM_DOMAIN_CPU &&
+                       if ((lobj->alt_domain & current_domain) != 0 &&
                            (domain & current_domain) == 0 && /* will be moved */
                            bytes_moved > bytes_moved_threshold) {
                                /* don't move it */
@@ -699,22 +699,30 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
        rbo = container_of(bo, struct radeon_bo, tbo);
        radeon_bo_check_tiling(rbo, 0, 0);
        rdev = rbo->rdev;
-       if (bo->mem.mem_type == TTM_PL_VRAM) {
-               size = bo->mem.num_pages << PAGE_SHIFT;
-               offset = bo->mem.start << PAGE_SHIFT;
-               if ((offset + size) > rdev->mc.visible_vram_size) {
-                       /* hurrah the memory is not visible ! */
-                       radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
-                       rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
-                       r = ttm_bo_validate(bo, &rbo->placement, false, false);
-                       if (unlikely(r != 0))
-                               return r;
-                       offset = bo->mem.start << PAGE_SHIFT;
-                       /* this should not happen */
-                       if ((offset + size) > rdev->mc.visible_vram_size)
-                               return -EINVAL;
-               }
+       if (bo->mem.mem_type != TTM_PL_VRAM)
+               return 0;
+
+       size = bo->mem.num_pages << PAGE_SHIFT;
+       offset = bo->mem.start << PAGE_SHIFT;
+       if ((offset + size) <= rdev->mc.visible_vram_size)
+               return 0;
+
+       /* hurrah the memory is not visible ! */
+       radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
+       rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+       r = ttm_bo_validate(bo, &rbo->placement, false, false);
+       if (unlikely(r == -ENOMEM)) {
+               radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
+               return ttm_bo_validate(bo, &rbo->placement, false, false);
+       } else if (unlikely(r != 0)) {
+               return r;
        }
+
+       offset = bo->mem.start << PAGE_SHIFT;
+       /* this should never happen */
+       if ((offset + size) > rdev->mc.visible_vram_size)
+               return -EINVAL;
+
        return 0;
 }
 
index ee738a524639e41e75c7af5279c583e4b3c7ee10..53d6e1bb48dc326bb3487fde5f02749df56d4e57 100644 (file)
@@ -361,6 +361,11 @@ static ssize_t radeon_set_pm_profile(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct radeon_device *rdev = ddev->dev_private;
 
+       /* Can't set profile when the card is off */
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return -EINVAL;
+
        mutex_lock(&rdev->pm.mutex);
        if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
                if (strncmp("default", buf, strlen("default")) == 0)
@@ -409,6 +414,13 @@ static ssize_t radeon_set_pm_method(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct radeon_device *rdev = ddev->dev_private;
 
+       /* Can't set method when the card is off */
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
+               count = -EINVAL;
+               goto fail;
+       }
+
        /* we don't support the legacy modes with dpm */
        if (rdev->pm.pm_method == PM_METHOD_DPM) {
                count = -EINVAL;
@@ -446,6 +458,10 @@ static ssize_t radeon_get_dpm_state(struct device *dev,
        struct radeon_device *rdev = ddev->dev_private;
        enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
 
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return snprintf(buf, PAGE_SIZE, "off\n");
+
        return snprintf(buf, PAGE_SIZE, "%s\n",
                        (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
                        (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
@@ -459,6 +475,11 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct radeon_device *rdev = ddev->dev_private;
 
+       /* Can't set dpm state when the card is off */
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return -EINVAL;
+
        mutex_lock(&rdev->pm.mutex);
        if (strncmp("battery", buf, strlen("battery")) == 0)
                rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
@@ -485,6 +506,10 @@ static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
        struct radeon_device *rdev = ddev->dev_private;
        enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
 
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return snprintf(buf, PAGE_SIZE, "off\n");
+
        return snprintf(buf, PAGE_SIZE, "%s\n",
                        (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
                        (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
@@ -500,6 +525,11 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
        enum radeon_dpm_forced_level level;
        int ret = 0;
 
+       /* Can't force performance level when the card is off */
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return -EINVAL;
+
        mutex_lock(&rdev->pm.mutex);
        if (strncmp("low", buf, strlen("low")) == 0) {
                level = RADEON_DPM_FORCED_LEVEL_LOW;
@@ -538,8 +568,14 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
                                      char *buf)
 {
        struct radeon_device *rdev = dev_get_drvdata(dev);
+       struct drm_device *ddev = rdev->ddev;
        int temp;
 
+       /* Can't get temperature when the card is off */
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+               return -EINVAL;
+
        if (rdev->asic->pm.get_temperature)
                temp = radeon_get_temperature(rdev);
        else
@@ -603,7 +639,6 @@ static const struct attribute_group *hwmon_groups[] = {
 static int radeon_hwmon_init(struct radeon_device *rdev)
 {
        int err = 0;
-       struct device *hwmon_dev;
 
        switch (rdev->pm.int_thermal_type) {
        case THERMAL_TYPE_RV6XX:
@@ -616,11 +651,11 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
        case THERMAL_TYPE_KV:
                if (rdev->asic->pm.get_temperature == NULL)
                        return err;
-               hwmon_dev = hwmon_device_register_with_groups(rdev->dev,
-                                                             "radeon", rdev,
-                                                             hwmon_groups);
-               if (IS_ERR(hwmon_dev)) {
-                       err = PTR_ERR(hwmon_dev);
+               rdev->pm.int_hwmon_dev = hwmon_device_register_with_groups(rdev->dev,
+                                                                          "radeon", rdev,
+                                                                          hwmon_groups);
+               if (IS_ERR(rdev->pm.int_hwmon_dev)) {
+                       err = PTR_ERR(rdev->pm.int_hwmon_dev);
                        dev_err(rdev->dev,
                                "Unable to register hwmon device: %d\n", err);
                }
@@ -632,6 +667,12 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
        return err;
 }
 
+static void radeon_hwmon_fini(struct radeon_device *rdev)
+{
+       if (rdev->pm.int_hwmon_dev)
+               hwmon_device_unregister(rdev->pm.int_hwmon_dev);
+}
+
 static void radeon_dpm_thermal_work_handler(struct work_struct *work)
 {
        struct radeon_device *rdev =
@@ -1257,6 +1298,7 @@ int radeon_pm_init(struct radeon_device *rdev)
        case CHIP_RV670:
        case CHIP_RS780:
        case CHIP_RS880:
+       case CHIP_RV770:
        case CHIP_BARTS:
        case CHIP_TURKS:
        case CHIP_CAICOS:
@@ -1273,7 +1315,6 @@ int radeon_pm_init(struct radeon_device *rdev)
                else
                        rdev->pm.pm_method = PM_METHOD_PROFILE;
                break;
-       case CHIP_RV770:
        case CHIP_RV730:
        case CHIP_RV710:
        case CHIP_RV740:
@@ -1295,6 +1336,7 @@ int radeon_pm_init(struct radeon_device *rdev)
        case CHIP_KABINI:
        case CHIP_KAVERI:
        case CHIP_HAWAII:
+       case CHIP_MULLINS:
                /* DPM requires the RLC, RV770+ dGPU requires SMC */
                if (!rdev->rlc_fw)
                        rdev->pm.pm_method = PM_METHOD_PROFILE;
@@ -1353,6 +1395,8 @@ static void radeon_pm_fini_old(struct radeon_device *rdev)
                device_remove_file(rdev->dev, &dev_attr_power_method);
        }
 
+       radeon_hwmon_fini(rdev);
+
        if (rdev->pm.power_state)
                kfree(rdev->pm.power_state);
 }
@@ -1372,6 +1416,8 @@ static void radeon_pm_fini_dpm(struct radeon_device *rdev)
        }
        radeon_dpm_fini(rdev);
 
+       radeon_hwmon_fini(rdev);
+
        if (rdev->pm.power_state)
                kfree(rdev->pm.power_state);
 }
@@ -1397,12 +1443,14 @@ static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
 
        rdev->pm.active_crtcs = 0;
        rdev->pm.active_crtc_count = 0;
-       list_for_each_entry(crtc,
-               &ddev->mode_config.crtc_list, head) {
-               radeon_crtc = to_radeon_crtc(crtc);
-               if (radeon_crtc->enabled) {
-                       rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
-                       rdev->pm.active_crtc_count++;
+       if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
+               list_for_each_entry(crtc,
+                                   &ddev->mode_config.crtc_list, head) {
+                       radeon_crtc = to_radeon_crtc(crtc);
+                       if (radeon_crtc->enabled) {
+                               rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
+                               rdev->pm.active_crtc_count++;
+                       }
                }
        }
 
@@ -1469,12 +1517,14 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
        /* update active crtc counts */
        rdev->pm.dpm.new_active_crtcs = 0;
        rdev->pm.dpm.new_active_crtc_count = 0;
-       list_for_each_entry(crtc,
-               &ddev->mode_config.crtc_list, head) {
-               radeon_crtc = to_radeon_crtc(crtc);
-               if (crtc->enabled) {
-                       rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
-                       rdev->pm.dpm.new_active_crtc_count++;
+       if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
+               list_for_each_entry(crtc,
+                                   &ddev->mode_config.crtc_list, head) {
+                       radeon_crtc = to_radeon_crtc(crtc);
+                       if (crtc->enabled) {
+                               rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
+                               rdev->pm.dpm.new_active_crtc_count++;
+                       }
                }
        }
 
@@ -1600,8 +1650,12 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        struct radeon_device *rdev = dev->dev_private;
+       struct drm_device *ddev = rdev->ddev;
 
-       if (rdev->pm.dpm_enabled) {
+       if  ((rdev->flags & RADEON_IS_PX) &&
+            (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
+               seq_printf(m, "PX asic powered off\n");
+       } else if (rdev->pm.dpm_enabled) {
                mutex_lock(&rdev->pm.mutex);
                if (rdev->asic->dpm.debugfs_print_current_performance_level)
                        radeon_dpm_debugfs_print_current_performance_level(rdev, m);
index 58d12938c0b80bf022bc8c5490a8cb660041fa5d..4e7c3269b183644ea87c4879d764eac7fb246e80 100644 (file)
@@ -52,6 +52,7 @@
 #define BONAIRE_RLC_UCODE_SIZE       2048
 #define KB_RLC_UCODE_SIZE            2560
 #define KV_RLC_UCODE_SIZE            2560
+#define ML_RLC_UCODE_SIZE            2560
 
 /* MC */
 #define BTC_MC_UCODE_SIZE            6024
index 5748bdaeacceb2f593bab46448ba5bf49fb940ae..1b65ae2433cd0e4063a546cb9806f6d917ba83a9 100644 (file)
@@ -99,6 +99,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
        case CHIP_KABINI:
        case CHIP_KAVERI:
        case CHIP_HAWAII:
+       case CHIP_MULLINS:
                fw_name = FIRMWARE_BONAIRE;
                break;
 
@@ -465,6 +466,10 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
        cmd = radeon_get_ib_value(p, p->idx) >> 1;
 
        if (cmd < 0x4) {
+               if (end <= start) {
+                       DRM_ERROR("invalid reloc offset %X!\n", offset);
+                       return -EINVAL;
+               }
                if ((end - start) < buf_sizes[cmd]) {
                        DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
                                  (unsigned)(end - start), buf_sizes[cmd]);
index ced53dd03e7c1f12449850c1eaa2d2568a552750..3971d968af6c0d86d0ba6e6f08f94d26714e17ec 100644 (file)
@@ -66,6 +66,7 @@ int radeon_vce_init(struct radeon_device *rdev)
        case CHIP_BONAIRE:
        case CHIP_KAVERI:
        case CHIP_KABINI:
+       case CHIP_MULLINS:
                fw_name = FIRMWARE_BONAIRE;
                break;
 
@@ -442,13 +443,16 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
  * @p: parser context
  * @lo: address of lower dword
  * @hi: address of higher dword
+ * @size: size of checker for relocation buffer
  *
  * Patch relocation inside command stream with real buffer address
  */
-int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
+int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
+                       unsigned size)
 {
        struct radeon_cs_chunk *relocs_chunk;
-       uint64_t offset;
+       struct radeon_cs_reloc *reloc;
+       uint64_t start, end, offset;
        unsigned idx;
 
        relocs_chunk = &p->chunks[p->chunk_relocs_idx];
@@ -461,14 +465,59 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
                return -EINVAL;
        }
 
-       offset += p->relocs_ptr[(idx / 4)]->gpu_offset;
+       reloc = p->relocs_ptr[(idx / 4)];
+       start = reloc->gpu_offset;
+       end = start + radeon_bo_size(reloc->robj);
+       start += offset;
 
-        p->ib.ptr[lo] = offset & 0xFFFFFFFF;
-        p->ib.ptr[hi] = offset >> 32;
+       p->ib.ptr[lo] = start & 0xFFFFFFFF;
+       p->ib.ptr[hi] = start >> 32;
+
+       if (end <= start) {
+               DRM_ERROR("invalid reloc offset %llX!\n", offset);
+               return -EINVAL;
+       }
+       if ((end - start) < size) {
+               DRM_ERROR("buffer to small (%d / %d)!\n",
+                       (unsigned)(end - start), size);
+               return -EINVAL;
+       }
 
        return 0;
 }
 
+/**
+ * radeon_vce_validate_handle - validate stream handle
+ *
+ * @p: parser context
+ * @handle: handle to validate
+ *
+ * Validates the handle and return the found session index or -EINVAL
+ * we we don't have another free session index.
+ */
+int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
+{
+       unsigned i;
+
+       /* validate the handle */
+       for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
+               if (atomic_read(&p->rdev->vce.handles[i]) == handle)
+                       return i;
+       }
+
+       /* handle not found try to alloc a new one */
+       for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
+               if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
+                       p->rdev->vce.filp[i] = p->filp;
+                       p->rdev->vce.img_size[i] = 0;
+                       return i;
+               }
+       }
+
+       DRM_ERROR("No more free VCE handles!\n");
+       return -EINVAL;
+}
+
 /**
  * radeon_vce_cs_parse - parse and validate the command stream
  *
@@ -477,8 +526,10 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
  */
 int radeon_vce_cs_parse(struct radeon_cs_parser *p)
 {
-       uint32_t handle = 0;
-       bool destroy = false;
+       int session_idx = -1;
+       bool destroyed = false;
+       uint32_t tmp, handle = 0;
+       uint32_t *size = &tmp;
        int i, r;
 
        while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) {
@@ -490,13 +541,29 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
                        return -EINVAL;
                }
 
+               if (destroyed) {
+                       DRM_ERROR("No other command allowed after destroy!\n");
+                       return -EINVAL;
+               }
+
                switch (cmd) {
                case 0x00000001: // session
                        handle = radeon_get_ib_value(p, p->idx + 2);
+                       session_idx = radeon_vce_validate_handle(p, handle);
+                       if (session_idx < 0)
+                               return session_idx;
+                       size = &p->rdev->vce.img_size[session_idx];
                        break;
 
                case 0x00000002: // task info
+                       break;
+
                case 0x01000001: // create
+                       *size = radeon_get_ib_value(p, p->idx + 8) *
+                               radeon_get_ib_value(p, p->idx + 10) *
+                               8 * 3 / 2;
+                       break;
+
                case 0x04000001: // config extension
                case 0x04000002: // pic control
                case 0x04000005: // rate control
@@ -505,23 +572,39 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
                        break;
 
                case 0x03000001: // encode
-                       r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9);
+                       r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9,
+                                               *size);
                        if (r)
                                return r;
 
-                       r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11);
+                       r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11,
+                                               *size / 3);
                        if (r)
                                return r;
                        break;
 
                case 0x02000001: // destroy
-                       destroy = true;
+                       destroyed = true;
                        break;
 
                case 0x05000001: // context buffer
+                       r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+                                               *size * 2);
+                       if (r)
+                               return r;
+                       break;
+
                case 0x05000004: // video bitstream buffer
+                       tmp = radeon_get_ib_value(p, p->idx + 4);
+                       r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+                                               tmp);
+                       if (r)
+                               return r;
+                       break;
+
                case 0x05000005: // feedback buffer
-                       r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2);
+                       r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+                                               4096);
                        if (r)
                                return r;
                        break;
@@ -531,33 +614,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
                        return -EINVAL;
                }
 
+               if (session_idx == -1) {
+                       DRM_ERROR("no session command at start of IB\n");
+                       return -EINVAL;
+               }
+
                p->idx += len / 4;
        }
 
-       if (destroy) {
+       if (destroyed) {
                /* IB contains a destroy msg, free the handle */
                for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
                        atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0);
-
-               return 0;
-        }
-
-       /* create or encode, validate the handle */
-       for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
-               if (atomic_read(&p->rdev->vce.handles[i]) == handle)
-                       return 0;
        }
 
-       /* handle not found try to alloc a new one */
-       for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
-               if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
-                       p->rdev->vce.filp[i] = p->filp;
-                       return 0;
-               }
-       }
-
-       DRM_ERROR("No more free VCE handles!\n");
-       return -EINVAL;
+       return 0;
 }
 
 /**
index 2aae6ce49d3286888ea22347b60d71cc5220eef4..d9ab99f47612743bb41361a8cca8114999e52fca 100644 (file)
@@ -595,7 +595,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
        ndw = 64;
 
        /* assume the worst case */
-       ndw += vm->max_pde_used * 12;
+       ndw += vm->max_pde_used * 16;
 
        /* update too big for an IB */
        if (ndw > 0xfffff)
index aca8cbe8a335dce1d13487c60e13f3193630fec7..bbf2e076ee457816924a736c57d2192cabba88ac 100644 (file)
@@ -86,6 +86,7 @@ int rv770_copy_dma(struct radeon_device *rdev,
        r = radeon_fence_emit(rdev, fence, ring->idx);
        if (r) {
                radeon_ring_unlock_undo(rdev, ring);
+               radeon_semaphore_free(rdev, &sem, NULL);
                return r;
        }
 
index ac708e006180d3467cfe0478c2484eeb64a2df68..22a63c98ba14c688ab259fa666fe3e5d111fb792 100644 (file)
@@ -5780,7 +5780,6 @@ int si_irq_set(struct radeon_device *rdev)
        u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
        u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0;
        u32 grbm_int_cntl = 0;
-       u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
        u32 dma_cntl, dma_cntl1;
        u32 thermal_int = 0;
 
@@ -5919,16 +5918,22 @@ int si_irq_set(struct radeon_device *rdev)
        }
 
        if (rdev->num_crtc >= 2) {
-               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
-               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_MASK);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_MASK);
        }
        if (rdev->num_crtc >= 4) {
-               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
-               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_MASK);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_MASK);
        }
        if (rdev->num_crtc >= 6) {
-               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
-               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_MASK);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
+                      GRPH_PFLIP_INT_MASK);
        }
 
        if (!ASIC_IS_NODCE(rdev)) {
@@ -6292,6 +6297,15 @@ restart_ih:
                                break;
                        }
                        break;
+               case 8: /* D1 page flip */
+               case 10: /* D2 page flip */
+               case 12: /* D3 page flip */
+               case 14: /* D4 page flip */
+               case 16: /* D5 page flip */
+               case 18: /* D6 page flip */
+                       DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
+                       radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
+                       break;
                case 42: /* HPD hotplug */
                        switch (src_data) {
                        case 0:
index cf0fdad8c278ef6921bbab3677c2cd596bcf1575..de0ca070122f62ee0c8b7fcc6d8db0360adfb948 100644 (file)
@@ -213,6 +213,7 @@ int si_copy_dma(struct radeon_device *rdev,
        r = radeon_fence_emit(rdev, fence, ring->idx);
        if (r) {
                radeon_ring_unlock_undo(rdev, ring);
+               radeon_semaphore_free(rdev, &sem, NULL);
                return r;
        }
 
index 683532f849311d1ca19de3daccb168b368d4c0b5..7321283602ce0c1d8429193efe4221ba26a33761 100644 (file)
 #define                SPLL_CHG_STATUS                         (1 << 1)
 #define        SPLL_CNTL_MODE                                  0x618
 #define                SPLL_SW_DIR_CONTROL                     (1 << 0)
-#      define SPLL_REFCLK_SEL(x)                       ((x) << 8)
-#      define SPLL_REFCLK_SEL_MASK                     0xFF00
+#      define SPLL_REFCLK_SEL(x)                       ((x) << 26)
+#      define SPLL_REFCLK_SEL_MASK                     (3 << 26)
 
 #define        CG_SPLL_SPREAD_SPECTRUM                         0x620
 #define                SSEN                                    (1 << 0)
index 0a243f0e5d6889129fff88d3f5a3656450dd14cf..be42c8125203b22bd62d9fc4be8510672ba577b3 100644 (file)
@@ -83,7 +83,10 @@ int uvd_v1_0_init(struct radeon_device *rdev)
        int r;
 
        /* raise clocks while booting up the VCPU */
-       radeon_set_uvd_clocks(rdev, 53300, 40000);
+       if (rdev->family < CHIP_RV740)
+               radeon_set_uvd_clocks(rdev, 10000, 10000);
+       else
+               radeon_set_uvd_clocks(rdev, 53300, 40000);
 
        r = uvd_v1_0_start(rdev);
        if (r)
@@ -407,7 +410,10 @@ int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
        struct radeon_fence *fence = NULL;
        int r;
 
-       r = radeon_set_uvd_clocks(rdev, 53300, 40000);
+       if (rdev->family < CHIP_RV740)
+               r = radeon_set_uvd_clocks(rdev, 10000, 10000);
+       else
+               r = radeon_set_uvd_clocks(rdev, 53300, 40000);
        if (r) {
                DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
                return r;
index 36c717af6cf90830324a3538ac05dfd4306f3ccb..edb871d7d395cbb4af140120953b9e854d72aae7 100644 (file)
@@ -312,7 +312,7 @@ static void tegra_crtc_disable(struct drm_crtc *crtc)
        struct drm_device *drm = crtc->dev;
        struct drm_plane *plane;
 
-       list_for_each_entry(plane, &drm->mode_config.plane_list, head) {
+       drm_for_each_legacy_plane(plane, &drm->mode_config.plane_list) {
                if (plane->crtc == crtc) {
                        tegra_plane_disable(plane);
                        plane->crtc = NULL;
index 931490b9cfed04b1365a87fb2d83c0526e114fdd..87df0b3674fda203c96baef3ff3030a87424a800 100644 (file)
@@ -1214,14 +1214,36 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
                SVGA3dCmdSurfaceDMA dma;
        } *cmd;
        int ret;
+       SVGA3dCmdSurfaceDMASuffix *suffix;
+       uint32_t bo_size;
 
        cmd = container_of(header, struct vmw_dma_cmd, header);
+       suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
+                                              header->size - sizeof(*suffix));
+
+       /* Make sure device and verifier stays in sync. */
+       if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
+               DRM_ERROR("Invalid DMA suffix size.\n");
+               return -EINVAL;
+       }
+
        ret = vmw_translate_guest_ptr(dev_priv, sw_context,
                                      &cmd->dma.guest.ptr,
                                      &vmw_bo);
        if (unlikely(ret != 0))
                return ret;
 
+       /* Make sure DMA doesn't cross BO boundaries. */
+       bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
+       if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
+               DRM_ERROR("Invalid DMA offset.\n");
+               return -EINVAL;
+       }
+
+       bo_size -= cmd->dma.guest.ptr.offset;
+       if (unlikely(suffix->maximumOffset > bo_size))
+               suffix->maximumOffset = bo_size;
+
        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
                                user_surface_converter, &cmd->dma.host.sid,
                                NULL);
index 10a2c08664596a3c43d1687cab911ad3fc3ab970..da52279de939652c9f1cffc1f60cf4308ce795ea 100644 (file)
@@ -1253,7 +1253,8 @@ EXPORT_SYMBOL_GPL(hid_output_report);
 
 static int hid_report_len(struct hid_report *report)
 {
-       return ((report->size - 1) >> 3) + 1 + (report->id > 0) + 7;
+       /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
+       return ((report->size - 1) >> 3) + 1 + (report->id > 0);
 }
 
 /*
@@ -1266,7 +1267,7 @@ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
         * of implement() working on 8 byte chunks
         */
 
-       int len = hid_report_len(report);
+       int len = hid_report_len(report) + 7;
 
        return kmalloc(len, flags);
 }
index c8af7202c28da3027e73cfd0a9d8b95d6036dbe1..34bb2205d2ea21bed4593d2b50a3f9e9e3e5b52f 100644 (file)
 
 #define USB_VENDOR_ID_DREAM_CHEEKY     0x1d34
 
+#define USB_VENDOR_ID_ELITEGROUP       0x03fc
+#define USB_DEVICE_ID_ELITEGROUP_05D8  0x05d8
+
 #define USB_VENDOR_ID_ELO              0x04E7
 #define USB_DEVICE_ID_ELO_TS2515       0x0022
 #define USB_DEVICE_ID_ELO_TS2700       0x0020
 #define USB_DEVICE_ID_SYNAPTICS_LTS2   0x1d10
 #define USB_DEVICE_ID_SYNAPTICS_HD     0x0ac3
 #define USB_DEVICE_ID_SYNAPTICS_QUAD_HD        0x1ac3
+#define USB_DEVICE_ID_SYNAPTICS_TP_V103        0x5710
+
+#define USB_VENDOR_ID_TEXAS_INSTRUMENTS        0x2047
+#define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA    0x0855
 
 #define USB_VENDOR_ID_THINGM           0x27b8
 #define USB_DEVICE_ID_BLINK1           0x01ed
index 35278e43c7a48d1999283c21f0f60cceccbc3b84..51e25b9407f259dfa219012aa9f28b274f764f27 100644 (file)
@@ -1155,6 +1155,11 @@ static const struct hid_device_id mt_devices[] = {
                MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
                        USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
 
+       /* Elitegroup panel */
+       { .driver_data = MT_CLS_SERIAL,
+               MT_USB_DEVICE(USB_VENDOR_ID_ELITEGROUP,
+                       USB_DEVICE_ID_ELITEGROUP_05D8) },
+
        /* Flatfrog Panels */
        { .driver_data = MT_CLS_FLATFROG,
                MT_USB_DEVICE(USB_VENDOR_ID_FLATFROG,
index af8244b1c1f428dab026544802ca951089a02fd1..be14b5690e942d5a1c47d9e2f45b706f804d2162 100644 (file)
@@ -708,6 +708,9 @@ static const struct hid_device_id sensor_hub_devices[] = {
        { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0,
                        USB_DEVICE_ID_STM_HID_SENSOR),
                        .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
+       { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_TEXAS_INSTRUMENTS,
+                       USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA),
+                       .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
        { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, HID_ANY_ID,
                     HID_ANY_ID) },
        { }
index dbd83878ff99ec029a1cda07b265ddcd27418710..8e4ddb369883257384a2d0fb315c4ab231eb486a 100644 (file)
@@ -119,6 +119,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_HD, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD, HID_QUIRK_NO_INIT_REPORTS },
+       { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103, HID_QUIRK_NO_INIT_REPORTS },
 
        { 0, 0 }
 };
index 6d02e3b063756f6225078df7e00f981478ef5f45..d76f0b70c6e09a0dd4bf52d77df7aa85651377c9 100644 (file)
@@ -365,12 +365,12 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
                if (cpu_has_tjmax(c))
                        dev_warn(dev, "Unable to read TjMax from CPU %u\n", id);
        } else {
-               val = (eax >> 16) & 0x7f;
+               val = (eax >> 16) & 0xff;
                /*
                 * If the TjMax is not plausible, an assumption
                 * will be used
                 */
-               if (val >= 85) {
+               if (val) {
                        dev_dbg(dev, "TjMax is %d degrees C\n", val);
                        return val * 1000;
                }
index 90ec1173b8a125c629542e079cac66872ea60ec4..01723f04fe45bfa449b60d07a05cd905cc9ccc98 100644 (file)
@@ -163,7 +163,7 @@ static ssize_t store_hyst(struct device *dev,
        if (retval < 0)
                goto fail;
 
-       hyst = val - retval * 1000;
+       hyst = retval * 1000 - val;
        hyst = DIV_ROUND_CLOSEST(hyst, 1000);
        if (hyst < 0 || hyst > 255) {
                retval = -ERANGE;
@@ -330,7 +330,7 @@ static int emc1403_detect(struct i2c_client *client,
        }
 
        id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG);
-       if (id != 0x01)
+       if (id < 0x01 || id > 0x04)
                return -ENODEV;
 
        return 0;
@@ -355,9 +355,9 @@ static int emc1403_probe(struct i2c_client *client,
        if (id->driver_data)
                data->groups[1] = &emc1404_group;
 
-       hwmon_dev = hwmon_device_register_with_groups(&client->dev,
-                                                     client->name, data,
-                                                     data->groups);
+       hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
+                                                          client->name, data,
+                                                          data->groups);
        if (IS_ERR(hwmon_dev))
                return PTR_ERR(hwmon_dev);
 
index c104cc32989df7af8ce972b6a5803dfdf5dbaf61..c9cddf5f056bbd6fe014dbd6dfc8dc25eac949d6 100644 (file)
@@ -1,4 +1,4 @@
-/*
+    /*
  * Driver for Linear Technology LTC2945 I2C Power Monitor
  *
  * Copyright (c) 2014 Guenter Roeck
@@ -314,8 +314,8 @@ static ssize_t ltc2945_reset_history(struct device *dev,
                reg = LTC2945_MAX_ADIN_H;
                break;
        default:
-               BUG();
-               break;
+               WARN_ONCE(1, "Bad register: 0x%x\n", reg);
+               return -EINVAL;
        }
        /* Reset maximum */
        ret = regmap_bulk_write(regmap, reg, buf_max, num_regs);
index d867e6bb2be1f7e1b1ebb41788c27f568bd7c579..8242b75d96c87e9b69a079f00e871e70f1867c7f 100644 (file)
 struct vexpress_hwmon_data {
        struct device *hwmon_dev;
        struct vexpress_config_func *func;
+       const char *name;
 };
 
 static ssize_t vexpress_hwmon_name_show(struct device *dev,
                struct device_attribute *dev_attr, char *buffer)
 {
-       const char *compatible = of_get_property(dev->of_node, "compatible",
-                       NULL);
+       struct vexpress_hwmon_data *data = dev_get_drvdata(dev);
 
-       return sprintf(buffer, "%s\n", compatible);
+       return sprintf(buffer, "%s\n", data->name);
 }
 
 static ssize_t vexpress_hwmon_label_show(struct device *dev,
@@ -43,9 +43,6 @@ static ssize_t vexpress_hwmon_label_show(struct device *dev,
 {
        const char *label = of_get_property(dev->of_node, "label", NULL);
 
-       if (!label)
-               return -ENOENT;
-
        return snprintf(buffer, PAGE_SIZE, "%s\n", label);
 }
 
@@ -84,6 +81,20 @@ static ssize_t vexpress_hwmon_u64_show(struct device *dev,
                        to_sensor_dev_attr(dev_attr)->index));
 }
 
+static umode_t vexpress_hwmon_attr_is_visible(struct kobject *kobj,
+               struct attribute *attr, int index)
+{
+       struct device *dev = kobj_to_dev(kobj);
+       struct device_attribute *dev_attr = container_of(attr,
+                               struct device_attribute, attr);
+
+       if (dev_attr->show == vexpress_hwmon_label_show &&
+                       !of_get_property(dev->of_node, "label", NULL))
+               return 0;
+
+       return attr->mode;
+}
+
 static DEVICE_ATTR(name, S_IRUGO, vexpress_hwmon_name_show, NULL);
 
 #define VEXPRESS_HWMON_ATTRS(_name, _label_attr, _input_attr)  \
@@ -94,14 +105,27 @@ struct attribute *vexpress_hwmon_attrs_##_name[] = {               \
        NULL                                                    \
 }
 
+struct vexpress_hwmon_type {
+       const char *name;
+       const struct attribute_group **attr_groups;
+};
+
 #if !defined(CONFIG_REGULATOR_VEXPRESS)
 static DEVICE_ATTR(in1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
 static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, vexpress_hwmon_u32_show,
                NULL, 1000);
 static VEXPRESS_HWMON_ATTRS(volt, in1_label, in1_input);
 static struct attribute_group vexpress_hwmon_group_volt = {
+       .is_visible = vexpress_hwmon_attr_is_visible,
        .attrs = vexpress_hwmon_attrs_volt,
 };
+static struct vexpress_hwmon_type vexpress_hwmon_volt = {
+       .name = "vexpress_volt",
+       .attr_groups = (const struct attribute_group *[]) {
+               &vexpress_hwmon_group_volt,
+               NULL,
+       },
+};
 #endif
 
 static DEVICE_ATTR(curr1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
@@ -109,52 +133,84 @@ static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, vexpress_hwmon_u32_show,
                NULL, 1000);
 static VEXPRESS_HWMON_ATTRS(amp, curr1_label, curr1_input);
 static struct attribute_group vexpress_hwmon_group_amp = {
+       .is_visible = vexpress_hwmon_attr_is_visible,
        .attrs = vexpress_hwmon_attrs_amp,
 };
+static struct vexpress_hwmon_type vexpress_hwmon_amp = {
+       .name = "vexpress_amp",
+       .attr_groups = (const struct attribute_group *[]) {
+               &vexpress_hwmon_group_amp,
+               NULL
+       },
+};
 
 static DEVICE_ATTR(temp1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, vexpress_hwmon_u32_show,
                NULL, 1000);
 static VEXPRESS_HWMON_ATTRS(temp, temp1_label, temp1_input);
 static struct attribute_group vexpress_hwmon_group_temp = {
+       .is_visible = vexpress_hwmon_attr_is_visible,
        .attrs = vexpress_hwmon_attrs_temp,
 };
+static struct vexpress_hwmon_type vexpress_hwmon_temp = {
+       .name = "vexpress_temp",
+       .attr_groups = (const struct attribute_group *[]) {
+               &vexpress_hwmon_group_temp,
+               NULL
+       },
+};
 
 static DEVICE_ATTR(power1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
 static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, vexpress_hwmon_u32_show,
                NULL, 1);
 static VEXPRESS_HWMON_ATTRS(power, power1_label, power1_input);
 static struct attribute_group vexpress_hwmon_group_power = {
+       .is_visible = vexpress_hwmon_attr_is_visible,
        .attrs = vexpress_hwmon_attrs_power,
 };
+static struct vexpress_hwmon_type vexpress_hwmon_power = {
+       .name = "vexpress_power",
+       .attr_groups = (const struct attribute_group *[]) {
+               &vexpress_hwmon_group_power,
+               NULL
+       },
+};
 
 static DEVICE_ATTR(energy1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
 static SENSOR_DEVICE_ATTR(energy1_input, S_IRUGO, vexpress_hwmon_u64_show,
                NULL, 1);
 static VEXPRESS_HWMON_ATTRS(energy, energy1_label, energy1_input);
 static struct attribute_group vexpress_hwmon_group_energy = {
+       .is_visible = vexpress_hwmon_attr_is_visible,
        .attrs = vexpress_hwmon_attrs_energy,
 };
+static struct vexpress_hwmon_type vexpress_hwmon_energy = {
+       .name = "vexpress_energy",
+       .attr_groups = (const struct attribute_group *[]) {
+               &vexpress_hwmon_group_energy,
+               NULL
+       },
+};
 
 static struct of_device_id vexpress_hwmon_of_match[] = {
 #if !defined(CONFIG_REGULATOR_VEXPRESS)
        {
                .compatible = "arm,vexpress-volt",
-               .data = &vexpress_hwmon_group_volt,
+               .data = &vexpress_hwmon_volt,
        },
 #endif
        {
                .compatible = "arm,vexpress-amp",
-               .data = &vexpress_hwmon_group_amp,
+               .data = &vexpress_hwmon_amp,
        }, {
                .compatible = "arm,vexpress-temp",
-               .data = &vexpress_hwmon_group_temp,
+               .data = &vexpress_hwmon_temp,
        }, {
                .compatible = "arm,vexpress-power",
-               .data = &vexpress_hwmon_group_power,
+               .data = &vexpress_hwmon_power,
        }, {
                .compatible = "arm,vexpress-energy",
-               .data = &vexpress_hwmon_group_energy,
+               .data = &vexpress_hwmon_energy,
        },
        {}
 };
@@ -165,6 +221,7 @@ static int vexpress_hwmon_probe(struct platform_device *pdev)
        int err;
        const struct of_device_id *match;
        struct vexpress_hwmon_data *data;
+       const struct vexpress_hwmon_type *type;
 
        data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
        if (!data)
@@ -174,12 +231,14 @@ static int vexpress_hwmon_probe(struct platform_device *pdev)
        match = of_match_device(vexpress_hwmon_of_match, &pdev->dev);
        if (!match)
                return -ENODEV;
+       type = match->data;
+       data->name = type->name;
 
        data->func = vexpress_config_func_get_by_dev(&pdev->dev);
        if (!data->func)
                return -ENODEV;
 
-       err = sysfs_create_group(&pdev->dev.kobj, match->data);
+       err = sysfs_create_groups(&pdev->dev.kobj, type->attr_groups);
        if (err)
                goto error;
 
index 22e92c3d3d07448cea9aa37fcc4ad6c612570e13..3c20e4bd6dd1380238df20f06941828ec84aadd0 100644 (file)
@@ -422,6 +422,9 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
         */
        dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR);
 
+       /* enforce disabled interrupts (due to HW issues) */
+       i2c_dw_disable_int(dev);
+
        /* Enable the adapter */
        __i2c_dw_enable(dev, true);
 
index 28cbe1b2a2ec2958547b843d2f86d672fedb5e0b..32c85e9ecdaeb141f0653c3a7e8c85784a1140ce 100644 (file)
@@ -999,7 +999,7 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
 
        dev->virtbase = devm_ioremap(&adev->dev, adev->res.start,
                                resource_size(&adev->res));
-       if (IS_ERR(dev->virtbase)) {
+       if (!dev->virtbase) {
                ret = -ENOMEM;
                goto err_no_mem;
        }
index 1b4cf14f1106aac597b1ab0346ed848e2139ba22..2a5efb5b487cdc2e4fb8774e94167f6af53ca7d8 100644 (file)
@@ -479,7 +479,7 @@ static int qup_i2c_xfer(struct i2c_adapter *adap,
        int ret, idx;
 
        ret = pm_runtime_get_sync(qup->dev);
-       if (ret)
+       if (ret < 0)
                goto out;
 
        writel(1, qup->base + QUP_SW_RESET);
index d4fa8eba6e9d2e40e11cb0ccba5ee64606dd8fff..06d47aafbb79c75a18a66e12964544a9a2b3496f 100644 (file)
@@ -561,6 +561,12 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
 
        ret = -EINVAL;
        for (i = 0; i < num; i++) {
+               /* This HW can't send STOP after address phase */
+               if (msgs[i].len == 0) {
+                       ret = -EOPNOTSUPP;
+                       break;
+               }
+
                /*-------------- spin lock -----------------*/
                spin_lock_irqsave(&priv->lock, flags);
 
@@ -625,7 +631,8 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
 
 static u32 rcar_i2c_func(struct i2c_adapter *adap)
 {
-       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+       /* This HW can't do SMBUS_QUICK and NOSTART */
+       return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
 }
 
 static const struct i2c_algorithm rcar_i2c_algo = {
index ae4491062e411a83a5729a1e267e2430f23f069c..bb3a9964f7e00c2b2c604e32490b0c5f015c4bfe 100644 (file)
@@ -1276,10 +1276,10 @@ static int s3c24xx_i2c_resume(struct device *dev)
        struct platform_device *pdev = to_platform_device(dev);
        struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
 
-       i2c->suspended = 0;
        clk_prepare_enable(i2c->clk);
        s3c24xx_i2c_init(i2c);
        clk_disable_unprepare(i2c->clk);
+       i2c->suspended = 0;
 
        return 0;
 }
index a43220c2e3d943a3437e29df1352aca30593e4cf..4d140bbbe1006c172cecd2fd5313778e8eb5db17 100644 (file)
@@ -750,9 +750,10 @@ void intel_idle_state_table_update(void)
                        if (package_num + 1 > num_sockets) {
                                num_sockets = package_num + 1;
 
-                               if (num_sockets > 4)
+                               if (num_sockets > 4) {
                                        cpuidle_state_table = ivt_cstates_8s;
                                        return;
+                               }
                        }
                }
 
index d86196cfe4b47091add5d756da6d3dbf7fded9eb..24c28e3f93a3c960b7fd5c51e9f5348e003ababf 100644 (file)
@@ -106,7 +106,7 @@ config AT91_ADC
          Say yes here to build support for Atmel AT91 ADC.
 
 config EXYNOS_ADC
-       bool "Exynos ADC driver support"
+       tristate "Exynos ADC driver support"
        depends on OF
        help
          Core support for the ADC block found in the Samsung EXYNOS series
@@ -114,7 +114,7 @@ config EXYNOS_ADC
          this resource.
 
 config LP8788_ADC
-       bool "LP8788 ADC driver"
+       tristate "LP8788 ADC driver"
        depends on MFD_LP8788
        help
          Say yes here to build support for TI LP8788 ADC.
index 5b1aa027c034b09c1569047231716ea66f75f919..89777ed9abd858773b128c3a4d9fd6b34bdd9584 100644 (file)
@@ -765,14 +765,17 @@ static int at91_adc_probe_pdata(struct at91_adc_state *st,
        if (!pdata)
                return -EINVAL;
 
+       st->caps = (struct at91_adc_caps *)
+                       platform_get_device_id(pdev)->driver_data;
+
        st->use_external = pdata->use_external_triggers;
        st->vref_mv = pdata->vref;
        st->channels_mask = pdata->channels_used;
-       st->num_channels = pdata->num_channels;
+       st->num_channels = st->caps->num_channels;
        st->startup_time = pdata->startup_time;
        st->trigger_number = pdata->trigger_number;
        st->trigger_list = pdata->trigger_list;
-       st->registers = pdata->registers;
+       st->registers = &st->caps->registers;
 
        return 0;
 }
@@ -1004,8 +1007,11 @@ static int at91_adc_probe(struct platform_device *pdev)
         * the best converted final value between two channels selection
         * The formula thus is : Sample and Hold Time = (shtim + 1) / ADCClock
         */
-       shtim = round_up((st->sample_hold_time * adc_clk_khz /
-                         1000) - 1, 1);
+       if (st->sample_hold_time > 0)
+               shtim = round_up((st->sample_hold_time * adc_clk_khz / 1000)
+                                - 1, 1);
+       else
+               shtim = 0;
 
        reg = AT91_ADC_PRESCAL_(prsc) & st->registers->mr_prescal_mask;
        reg |= AT91_ADC_STARTUP_(ticks) & st->registers->mr_startup_mask;
@@ -1101,7 +1107,6 @@ static int at91_adc_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_OF
 static struct at91_adc_caps at91sam9260_caps = {
        .calc_startup_ticks = calc_startup_ticks_9260,
        .num_channels = 4,
@@ -1154,11 +1159,27 @@ static const struct of_device_id at91_adc_dt_ids[] = {
        {},
 };
 MODULE_DEVICE_TABLE(of, at91_adc_dt_ids);
-#endif
+
+static const struct platform_device_id at91_adc_ids[] = {
+       {
+               .name = "at91sam9260-adc",
+               .driver_data = (unsigned long)&at91sam9260_caps,
+       }, {
+               .name = "at91sam9g45-adc",
+               .driver_data = (unsigned long)&at91sam9g45_caps,
+       }, {
+               .name = "at91sam9x5-adc",
+               .driver_data = (unsigned long)&at91sam9x5_caps,
+       }, {
+               /* terminator */
+       }
+};
+MODULE_DEVICE_TABLE(platform, at91_adc_ids);
 
 static struct platform_driver at91_adc_driver = {
        .probe = at91_adc_probe,
        .remove = at91_adc_remove,
+       .id_table = at91_adc_ids,
        .driver = {
                   .name = DRIVER_NAME,
                   .of_match_table = of_match_ptr(at91_adc_dt_ids),
index d25b262193a7d4bccad40a42b79685cf4956e6fc..affa93f517893b1c6db2ece1c1e83afeea1828aa 100644 (file)
@@ -344,7 +344,7 @@ static int exynos_adc_probe(struct platform_device *pdev)
 
        exynos_adc_hw_init(info);
 
-       ret = of_platform_populate(np, exynos_adc_match, NULL, &pdev->dev);
+       ret = of_platform_populate(np, exynos_adc_match, NULL, &indio_dev->dev);
        if (ret < 0) {
                dev_err(&pdev->dev, "failed adding child nodes\n");
                goto err_of_populate;
@@ -353,7 +353,7 @@ static int exynos_adc_probe(struct platform_device *pdev)
        return 0;
 
 err_of_populate:
-       device_for_each_child(&pdev->dev, NULL,
+       device_for_each_child(&indio_dev->dev, NULL,
                                exynos_adc_remove_devices);
        regulator_disable(info->vdd);
        clk_disable_unprepare(info->clk);
@@ -369,7 +369,7 @@ static int exynos_adc_remove(struct platform_device *pdev)
        struct iio_dev *indio_dev = platform_get_drvdata(pdev);
        struct exynos_adc *info = iio_priv(indio_dev);
 
-       device_for_each_child(&pdev->dev, NULL,
+       device_for_each_child(&indio_dev->dev, NULL,
                                exynos_adc_remove_devices);
        regulator_disable(info->vdd);
        clk_disable_unprepare(info->clk);
index cb9f96b446a55cd138f129db3443d44821b32781..d8ad606c7cd0c7e054d3764afe1b9edb069aaed7 100644 (file)
@@ -660,6 +660,7 @@ static int inv_mpu_probe(struct i2c_client *client,
 {
        struct inv_mpu6050_state *st;
        struct iio_dev *indio_dev;
+       struct inv_mpu6050_platform_data *pdata;
        int result;
 
        if (!i2c_check_functionality(client->adapter,
@@ -672,8 +673,10 @@ static int inv_mpu_probe(struct i2c_client *client,
 
        st = iio_priv(indio_dev);
        st->client = client;
-       st->plat_data = *(struct inv_mpu6050_platform_data
-                               *)dev_get_platdata(&client->dev);
+       pdata = (struct inv_mpu6050_platform_data
+                       *)dev_get_platdata(&client->dev);
+       if (pdata)
+               st->plat_data = *pdata;
        /* power is turned on inside check chip type*/
        result = inv_check_and_setup_chip(st, id);
        if (result)
index e108f2a9d827fca1e37932f18b5232ca4e9b65ea..e472cff6eeae38168331bf8b13a6583e9756842e 100644 (file)
@@ -165,7 +165,8 @@ static ssize_t iio_scan_el_show(struct device *dev,
        int ret;
        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 
-       ret = test_bit(to_iio_dev_attr(attr)->address,
+       /* Ensure ret is 0 or 1. */
+       ret = !!test_bit(to_iio_dev_attr(attr)->address,
                       indio_dev->buffer->scan_mask);
 
        return sprintf(buf, "%d\n", ret);
@@ -862,7 +863,8 @@ int iio_scan_mask_query(struct iio_dev *indio_dev,
        if (!buffer->scan_mask)
                return 0;
 
-       return test_bit(bit, buffer->scan_mask);
+       /* Ensure return value is 0 or 1. */
+       return !!test_bit(bit, buffer->scan_mask);
 };
 EXPORT_SYMBOL_GPL(iio_scan_mask_query);
 
index 47a6dbac2d0ca8b23dcea31158b43d3c3df3d2f0..d976e6ce60dbb22a57ee293b7baed954f648d2d3 100644 (file)
@@ -221,6 +221,7 @@ static int cm32181_read_raw(struct iio_dev *indio_dev,
                *val = cm32181->calibscale;
                return IIO_VAL_INT;
        case IIO_CHAN_INFO_INT_TIME:
+               *val = 0;
                ret = cm32181_read_als_it(cm32181, val2);
                return ret;
        }
index a45e07492db318a22171546291d2b590e0d0668f..39fc67e82138470a1f8523d7de2902f072603a6f 100644 (file)
@@ -652,7 +652,19 @@ static int cm36651_probe(struct i2c_client *client,
        cm36651->client = client;
        cm36651->ps_client = i2c_new_dummy(client->adapter,
                                                     CM36651_I2C_ADDR_PS);
+       if (!cm36651->ps_client) {
+               dev_err(&client->dev, "%s: new i2c device failed\n", __func__);
+               ret = -ENODEV;
+               goto error_disable_reg;
+       }
+
        cm36651->ara_client = i2c_new_dummy(client->adapter, CM36651_ARA);
+       if (!cm36651->ara_client) {
+               dev_err(&client->dev, "%s: new i2c device failed\n", __func__);
+               ret = -ENODEV;
+               goto error_i2c_unregister_ps;
+       }
+
        mutex_init(&cm36651->lock);
        indio_dev->dev.parent = &client->dev;
        indio_dev->channels = cm36651_channels;
@@ -664,7 +676,7 @@ static int cm36651_probe(struct i2c_client *client,
        ret = cm36651_setup_reg(cm36651);
        if (ret) {
                dev_err(&client->dev, "%s: register setup failed\n", __func__);
-               goto error_disable_reg;
+               goto error_i2c_unregister_ara;
        }
 
        ret = request_threaded_irq(client->irq, NULL, cm36651_irq_handler,
@@ -672,7 +684,7 @@ static int cm36651_probe(struct i2c_client *client,
                                                        "cm36651", indio_dev);
        if (ret) {
                dev_err(&client->dev, "%s: request irq failed\n", __func__);
-               goto error_disable_reg;
+               goto error_i2c_unregister_ara;
        }
 
        ret = iio_device_register(indio_dev);
@@ -685,6 +697,10 @@ static int cm36651_probe(struct i2c_client *client,
 
 error_free_irq:
        free_irq(client->irq, indio_dev);
+error_i2c_unregister_ara:
+       i2c_unregister_device(cm36651->ara_client);
+error_i2c_unregister_ps:
+       i2c_unregister_device(cm36651->ps_client);
 error_disable_reg:
        regulator_disable(cm36651->vled_reg);
        return ret;
@@ -698,6 +714,8 @@ static int cm36651_remove(struct i2c_client *client)
        iio_device_unregister(indio_dev);
        regulator_disable(cm36651->vled_reg);
        free_irq(client->irq, indio_dev);
+       i2c_unregister_device(cm36651->ps_client);
+       i2c_unregister_device(cm36651->ara_client);
 
        return 0;
 }
index d4e8983fba537d71b8da25b5b0768f088678722d..23f38cf2c5cd030c2ba9e3aebd0d199c4925e930 100644 (file)
@@ -1,10 +1,10 @@
 config INFINIBAND_CXGB4
-       tristate "Chelsio T4 RDMA Driver"
+       tristate "Chelsio T4/T5 RDMA Driver"
        depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n)
        select GENERIC_ALLOCATOR
        ---help---
-         This is an iWARP/RDMA driver for the Chelsio T4 1GbE and
-         10GbE adapters.
+         This is an iWARP/RDMA driver for the Chelsio T4 and T5
+         1GbE, 10GbE adapters and T5 40GbE adapter.
 
          For general information about Chelsio and our products, visit
          our website at <http://www.chelsio.com>.
index 185452abf32cf336049e20802759a4352996392b..1f863a96a480fd1ab087989acea029a781c7c23e 100644 (file)
@@ -587,6 +587,10 @@ static int send_connect(struct c4iw_ep *ep)
                opt2 |= SACK_EN(1);
        if (wscale && enable_tcp_window_scaling)
                opt2 |= WND_SCALE_EN(1);
+       if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
+               opt2 |= T5_OPT_2_VALID;
+               opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
+       }
        t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
 
        if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
@@ -996,7 +1000,7 @@ static void close_complete_upcall(struct c4iw_ep *ep, int status)
 static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
 {
        PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
-       state_set(&ep->com, ABORTING);
+       __state_set(&ep->com, ABORTING);
        set_bit(ABORT_CONN, &ep->com.history);
        return send_abort(ep, skb, gfp);
 }
@@ -1154,7 +1158,7 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
        return credits;
 }
 
-static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
+static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
 {
        struct mpa_message *mpa;
        struct mpa_v2_conn_params *mpa_v2_params;
@@ -1164,6 +1168,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
        struct c4iw_qp_attributes attrs;
        enum c4iw_qp_attr_mask mask;
        int err;
+       int disconnect = 0;
 
        PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
 
@@ -1173,7 +1178,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
         * will abort the connection.
         */
        if (stop_ep_timer(ep))
-               return;
+               return 0;
 
        /*
         * If we get more than the supported amount of private data
@@ -1195,7 +1200,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
         * if we don't even have the mpa message, then bail.
         */
        if (ep->mpa_pkt_len < sizeof(*mpa))
-               return;
+               return 0;
        mpa = (struct mpa_message *) ep->mpa_pkt;
 
        /* Validate MPA header. */
@@ -1235,7 +1240,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
         * We'll continue process when more data arrives.
         */
        if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
-               return;
+               return 0;
 
        if (mpa->flags & MPA_REJECT) {
                err = -ECONNREFUSED;
@@ -1337,9 +1342,11 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
                attrs.layer_etype = LAYER_MPA | DDP_LLP;
                attrs.ecode = MPA_NOMATCH_RTR;
                attrs.next_state = C4IW_QP_STATE_TERMINATE;
+               attrs.send_term = 1;
                err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
-                               C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
+                               C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
                err = -ENOMEM;
+               disconnect = 1;
                goto out;
        }
 
@@ -1355,9 +1362,11 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
                attrs.layer_etype = LAYER_MPA | DDP_LLP;
                attrs.ecode = MPA_INSUFF_IRD;
                attrs.next_state = C4IW_QP_STATE_TERMINATE;
+               attrs.send_term = 1;
                err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
-                               C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
+                               C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
                err = -ENOMEM;
+               disconnect = 1;
                goto out;
        }
        goto out;
@@ -1366,7 +1375,7 @@ err:
        send_abort(ep, skb, GFP_KERNEL);
 out:
        connect_reply_upcall(ep, err);
-       return;
+       return disconnect;
 }
 
 static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
@@ -1524,6 +1533,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
        unsigned int tid = GET_TID(hdr);
        struct tid_info *t = dev->rdev.lldi.tids;
        __u8 status = hdr->status;
+       int disconnect = 0;
 
        ep = lookup_tid(t, tid);
        if (!ep)
@@ -1539,7 +1549,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
        switch (ep->com.state) {
        case MPA_REQ_SENT:
                ep->rcv_seq += dlen;
-               process_mpa_reply(ep, skb);
+               disconnect = process_mpa_reply(ep, skb);
                break;
        case MPA_REQ_WAIT:
                ep->rcv_seq += dlen;
@@ -1555,13 +1565,16 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
                               ep->com.state, ep->hwtid, status);
                attrs.next_state = C4IW_QP_STATE_TERMINATE;
                c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
-                              C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
+                              C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+               disconnect = 1;
                break;
        }
        default:
                break;
        }
        mutex_unlock(&ep->com.mutex);
+       if (disconnect)
+               c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
        return 0;
 }
 
@@ -2009,6 +2022,10 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
                if (tcph->ece && tcph->cwr)
                        opt2 |= CCTRL_ECN(1);
        }
+       if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
+               opt2 |= T5_OPT_2_VALID;
+               opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
+       }
 
        rpl = cplhdr(skb);
        INIT_TP_WR(rpl, ep->hwtid);
@@ -3482,9 +3499,9 @@ static void process_timeout(struct c4iw_ep *ep)
                        __func__, ep, ep->hwtid, ep->com.state);
                abort = 0;
        }
-       mutex_unlock(&ep->com.mutex);
        if (abort)
                abort_connection(ep, NULL, GFP_KERNEL);
+       mutex_unlock(&ep->com.mutex);
        c4iw_put_ep(&ep->com);
 }
 
index 7b8c5806a09d84d912d274d4a5da814109e921b8..7474b490760a413f9f13d9e04ead79319a6fd55e 100644 (file)
@@ -435,6 +435,7 @@ struct c4iw_qp_attributes {
        u8 ecode;
        u16 sq_db_inc;
        u16 rq_db_inc;
+       u8 send_term;
 };
 
 struct c4iw_qp {
index 7b5114cb486f64f118beb7f2415ad415d75f40ae..086f62f5dc9e2ba5978e81f02e8e392c8e201774 100644 (file)
@@ -1388,11 +1388,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
                        qhp->attr.layer_etype = attrs->layer_etype;
                        qhp->attr.ecode = attrs->ecode;
                        ep = qhp->ep;
-                       disconnect = 1;
-                       c4iw_get_ep(&qhp->ep->com);
-                       if (!internal)
+                       if (!internal) {
+                               c4iw_get_ep(&qhp->ep->com);
                                terminate = 1;
-                       else {
+                               disconnect = 1;
+                       } else {
+                               terminate = qhp->attr.send_term;
                                ret = rdma_fini(rhp, qhp, ep);
                                if (ret)
                                        goto err;
@@ -1776,11 +1777,15 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        /*
         * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
         * ringing the queue db when we're in DB_FULL mode.
+        * Only allow this on T4 devices.
         */
        attrs.sq_db_inc = attr->sq_psn;
        attrs.rq_db_inc = attr->rq_psn;
        mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
        mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
+       if (is_t5(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) &&
+           (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB)))
+               return -EINVAL;
 
        return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
 }
index dc193c292671ca49e889bc6bedb10d7c1a5fe8ce..6121ca08fe588bff67aab81fe7df06119287292b 100644 (file)
@@ -836,4 +836,18 @@ struct ulptx_idata {
 #define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
 #define F_RX_DACK_CHANGE    V_RX_DACK_CHANGE(1U)
 
+enum {                     /* TCP congestion control algorithms */
+       CONG_ALG_RENO,
+       CONG_ALG_TAHOE,
+       CONG_ALG_NEWRENO,
+       CONG_ALG_HIGHSPEED
+};
+
+#define S_CONG_CNTRL    14
+#define M_CONG_CNTRL    0x3
+#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL)
+#define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL)
+
+#define T5_OPT_2_VALID       (1 << 31)
+
 #endif /* _T4FW_RI_API_H_ */
index 1b6dbe156a3708692a743cd58fc3351d4b01e533..199c7896f08188ca40fa9cd30462f0631d387338 100644 (file)
@@ -48,6 +48,7 @@
 
 #include <linux/mlx4/driver.h>
 #include <linux/mlx4/cmd.h>
+#include <linux/mlx4/qp.h>
 
 #include "mlx4_ib.h"
 #include "user.h"
@@ -1614,6 +1615,53 @@ static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event,
 }
 #endif
 
+#define MLX4_IB_INVALID_MAC    ((u64)-1)
+static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
+                              struct net_device *dev,
+                              int port)
+{
+       u64 new_smac = 0;
+       u64 release_mac = MLX4_IB_INVALID_MAC;
+       struct mlx4_ib_qp *qp;
+
+       read_lock(&dev_base_lock);
+       new_smac = mlx4_mac_to_u64(dev->dev_addr);
+       read_unlock(&dev_base_lock);
+
+       mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
+       qp = ibdev->qp1_proxy[port - 1];
+       if (qp) {
+               int new_smac_index;
+               u64 old_smac = qp->pri.smac;
+               struct mlx4_update_qp_params update_params;
+
+               if (new_smac == old_smac)
+                       goto unlock;
+
+               new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
+
+               if (new_smac_index < 0)
+                       goto unlock;
+
+               update_params.smac_index = new_smac_index;
+               if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC,
+                                  &update_params)) {
+                       release_mac = new_smac;
+                       goto unlock;
+               }
+
+               qp->pri.smac = new_smac;
+               qp->pri.smac_index = new_smac_index;
+
+               release_mac = old_smac;
+       }
+
+unlock:
+       mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
+       if (release_mac != MLX4_IB_INVALID_MAC)
+               mlx4_unregister_mac(ibdev->dev, port, release_mac);
+}
+
 static void mlx4_ib_get_dev_addr(struct net_device *dev,
                                 struct mlx4_ib_dev *ibdev, u8 port)
 {
@@ -1689,9 +1737,13 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
        return 0;
 }
 
-static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
+static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
+                                struct net_device *dev,
+                                unsigned long event)
+
 {
        struct mlx4_ib_iboe *iboe;
+       int update_qps_port = -1;
        int port;
 
        iboe = &ibdev->iboe;
@@ -1719,6 +1771,11 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
                }
                curr_master = iboe->masters[port - 1];
 
+               if (dev == iboe->netdevs[port - 1] &&
+                   (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
+                    event == NETDEV_UP || event == NETDEV_CHANGE))
+                       update_qps_port = port;
+
                if (curr_netdev) {
                        port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
                                                IB_PORT_ACTIVE : IB_PORT_DOWN;
@@ -1752,6 +1809,9 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
        }
 
        spin_unlock(&iboe->lock);
+
+       if (update_qps_port > 0)
+               mlx4_ib_update_qps(ibdev, dev, update_qps_port);
 }
 
 static int mlx4_ib_netdev_event(struct notifier_block *this,
@@ -1764,7 +1824,7 @@ static int mlx4_ib_netdev_event(struct notifier_block *this,
                return NOTIFY_DONE;
 
        ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
-       mlx4_ib_scan_netdevs(ibdev);
+       mlx4_ib_scan_netdevs(ibdev, dev, event);
 
        return NOTIFY_DONE;
 }
@@ -2043,6 +2103,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
                goto err_map;
 
        for (i = 0; i < ibdev->num_ports; ++i) {
+               mutex_init(&ibdev->qp1_proxy_lock[i]);
                if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
                                                IB_LINK_LAYER_ETHERNET) {
                        err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
@@ -2126,7 +2187,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
                for (i = 1 ; i <= ibdev->num_ports ; ++i)
                        reset_gid_table(ibdev, i);
                rtnl_lock();
-               mlx4_ib_scan_netdevs(ibdev);
+               mlx4_ib_scan_netdevs(ibdev, NULL, 0);
                rtnl_unlock();
                mlx4_ib_init_gid_table(ibdev);
        }
index f589522fddfd9efa4e32fdd0a7e8f63e49a54927..66b0b7dbd9f41cac95cfb76b4e766249e1abd83b 100644 (file)
@@ -522,6 +522,9 @@ struct mlx4_ib_dev {
        int steer_qpn_count;
        int steer_qpn_base;
        int steering_support;
+       struct mlx4_ib_qp      *qp1_proxy[MLX4_MAX_PORTS];
+       /* lock when destroying qp1_proxy and getting netdev events */
+       struct mutex            qp1_proxy_lock[MLX4_MAX_PORTS];
 };
 
 struct ib_event_work {
index 41308af4163c3dc852adc23f983bdd86d279374e..dc57482ae7af2b2ed8935676a67b459847b3302c 100644 (file)
@@ -1132,6 +1132,12 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp)
        if (is_qp0(dev, mqp))
                mlx4_CLOSE_PORT(dev->dev, mqp->port);
 
+       if (dev->qp1_proxy[mqp->port - 1] == mqp) {
+               mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]);
+               dev->qp1_proxy[mqp->port - 1] = NULL;
+               mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]);
+       }
+
        pd = get_pd(mqp);
        destroy_qp_common(dev, mqp, !!pd->ibpd.uobject);
 
@@ -1646,6 +1652,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
                                err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
                                if (err)
                                        return -EINVAL;
+                               if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
+                                       dev->qp1_proxy[qp->port - 1] = qp;
                        }
                }
        }
index c4b3940845e60570fcbc5e5d0fa5ef807551d45e..078cadd6c797afeb0e22267fb76e5362a4b97326 100644 (file)
@@ -105,5 +105,5 @@ static const struct ethtool_ops ipoib_ethtool_ops = {
 
 void ipoib_set_ethtool_ops(struct net_device *dev)
 {
-       SET_ETHTOOL_OPS(dev, &ipoib_ethtool_ops);
+       dev->ethtool_ops = &ipoib_ethtool_ops;
 }
index c98fdb185931644bd3ed92d337f85aa0d2517149..a1710465faaf2345ae9c88755790e3e74a6e76e8 100644 (file)
@@ -28,6 +28,7 @@
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 #include <target/iscsi/iscsi_transport.h>
+#include <linux/semaphore.h>
 
 #include "isert_proto.h"
 #include "ib_isert.h"
@@ -561,7 +562,15 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
        struct isert_device *device;
        struct ib_device *ib_dev = cma_id->device;
        int ret = 0;
-       u8 pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
+       u8 pi_support;
+
+       spin_lock_bh(&np->np_thread_lock);
+       if (!np->enabled) {
+               spin_unlock_bh(&np->np_thread_lock);
+               pr_debug("iscsi_np is not enabled, reject connect request\n");
+               return rdma_reject(cma_id, NULL, 0);
+       }
+       spin_unlock_bh(&np->np_thread_lock);
 
        pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
                 cma_id, cma_id->context);
@@ -652,6 +661,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
                goto out_mr;
        }
 
+       pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
        if (pi_support && !device->pi_capable) {
                pr_err("Protection information requested but not supported\n");
                ret = -EINVAL;
@@ -663,11 +673,11 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
                goto out_conn_dev;
 
        mutex_lock(&isert_np->np_accept_mutex);
-       list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node);
+       list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
        mutex_unlock(&isert_np->np_accept_mutex);
 
-       pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np);
-       wake_up(&isert_np->np_accept_wq);
+       pr_debug("isert_connect_request() up np_sem np: %p\n", np);
+       up(&isert_np->np_sem);
        return 0;
 
 out_conn_dev:
@@ -2999,7 +3009,7 @@ isert_setup_np(struct iscsi_np *np,
                pr_err("Unable to allocate struct isert_np\n");
                return -ENOMEM;
        }
-       init_waitqueue_head(&isert_np->np_accept_wq);
+       sema_init(&isert_np->np_sem, 0);
        mutex_init(&isert_np->np_accept_mutex);
        INIT_LIST_HEAD(&isert_np->np_accept_list);
        init_completion(&isert_np->np_login_comp);
@@ -3047,18 +3057,6 @@ out:
        return ret;
 }
 
-static int
-isert_check_accept_queue(struct isert_np *isert_np)
-{
-       int empty;
-
-       mutex_lock(&isert_np->np_accept_mutex);
-       empty = list_empty(&isert_np->np_accept_list);
-       mutex_unlock(&isert_np->np_accept_mutex);
-
-       return empty;
-}
-
 static int
 isert_rdma_accept(struct isert_conn *isert_conn)
 {
@@ -3151,16 +3149,14 @@ isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
        int max_accept = 0, ret;
 
 accept_wait:
-       ret = wait_event_interruptible(isert_np->np_accept_wq,
-                       !isert_check_accept_queue(isert_np) ||
-                       np->np_thread_state == ISCSI_NP_THREAD_RESET);
+       ret = down_interruptible(&isert_np->np_sem);
        if (max_accept > 5)
                return -ENODEV;
 
        spin_lock_bh(&np->np_thread_lock);
        if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
                spin_unlock_bh(&np->np_thread_lock);
-               pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
+               pr_debug("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
                return -ENODEV;
        }
        spin_unlock_bh(&np->np_thread_lock);
index 4c072ae34c01a3021e57cb5378a3949b97d0876c..da6612e6800004b0984880d54ef57bcaab1b0be1 100644 (file)
@@ -182,7 +182,7 @@ struct isert_device {
 };
 
 struct isert_np {
-       wait_queue_head_t       np_accept_wq;
+       struct semaphore        np_sem;
        struct rdma_cm_id       *np_cm_id;
        struct mutex            np_accept_mutex;
        struct list_head        np_accept_list;
index 2626773ff29b956de97d5c62d383e36e13ee4868..2dd1d0dd4f7de03233752e57704ff60c17e6d992 100644 (file)
@@ -243,6 +243,12 @@ static void (*atkbd_platform_fixup)(struct atkbd *, const void *data);
 static void *atkbd_platform_fixup_data;
 static unsigned int (*atkbd_platform_scancode_fixup)(struct atkbd *, unsigned int);
 
+/*
+ * Certain keyboards to not like ATKBD_CMD_RESET_DIS and stop responding
+ * to many commands until full reset (ATKBD_CMD_RESET_BAT) is performed.
+ */
+static bool atkbd_skip_deactivate;
+
 static ssize_t atkbd_attr_show_helper(struct device *dev, char *buf,
                                ssize_t (*handler)(struct atkbd *, char *));
 static ssize_t atkbd_attr_set_helper(struct device *dev, const char *buf, size_t count,
@@ -768,7 +774,8 @@ static int atkbd_probe(struct atkbd *atkbd)
  * Make sure nothing is coming from the keyboard and disturbs our
  * internal state.
  */
-       atkbd_deactivate(atkbd);
+       if (!atkbd_skip_deactivate)
+               atkbd_deactivate(atkbd);
 
        return 0;
 }
@@ -1638,6 +1645,12 @@ static int __init atkbd_setup_scancode_fixup(const struct dmi_system_id *id)
        return 1;
 }
 
+static int __init atkbd_deactivate_fixup(const struct dmi_system_id *id)
+{
+       atkbd_skip_deactivate = true;
+       return 1;
+}
+
 static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = {
        {
                .matches = {
@@ -1775,6 +1788,20 @@ static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = {
                .callback = atkbd_setup_scancode_fixup,
                .driver_data = atkbd_oqo_01plus_scancode_fixup,
        },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "LW25-B7HV"),
+               },
+               .callback = atkbd_deactivate_fixup,
+       },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "P1-J273B"),
+               },
+               .callback = atkbd_deactivate_fixup,
+       },
        { }
 };
 
index 55c15304ddbce997a1027e319b81e7b3fc8ba98b..4e491c1762cfe5e1ecd9bd562e8244237261aa07 100644 (file)
@@ -392,6 +392,13 @@ static const struct of_device_id tca8418_dt_ids[] = {
        { }
 };
 MODULE_DEVICE_TABLE(of, tca8418_dt_ids);
+
+/*
+ * The device tree based i2c loader looks for
+ * "i2c:" + second_component_of(property("compatible"))
+ * and therefore we need an alias to be found.
+ */
+MODULE_ALIAS("i2c:tca8418");
 #endif
 
 static struct i2c_driver tca8418_keypad_driver = {
index 52d3a9b28f0b80a253eb04584016b767c90c22cc..b36831c828d3fe7a19be14872a7eabb2b8a38ecd 100644 (file)
@@ -70,6 +70,7 @@
 #define BMA150_CFG_5_REG       0x11
 
 #define BMA150_CHIP_ID         2
+#define BMA180_CHIP_ID         3
 #define BMA150_CHIP_ID_REG     BMA150_DATA_0_REG
 
 #define BMA150_ACC_X_LSB_REG   BMA150_DATA_2_REG
@@ -539,7 +540,7 @@ static int bma150_probe(struct i2c_client *client,
        }
 
        chip_id = i2c_smbus_read_byte_data(client, BMA150_CHIP_ID_REG);
-       if (chip_id != BMA150_CHIP_ID) {
+       if (chip_id != BMA150_CHIP_ID && chip_id != BMA180_CHIP_ID) {
                dev_err(&client->dev, "BMA150 chip id error: %d\n", chip_id);
                return -EINVAL;
        }
@@ -643,6 +644,7 @@ static UNIVERSAL_DEV_PM_OPS(bma150_pm, bma150_suspend, bma150_resume, NULL);
 
 static const struct i2c_device_id bma150_id[] = {
        { "bma150", 0 },
+       { "bma180", 0 },
        { "smb380", 0 },
        { "bma023", 0 },
        { }
index 4b11ede34950e57587daa09f8c0af0ed63602ea4..4765799fef746b0e7900b4f8aaf327f8af717576 100644 (file)
@@ -109,7 +109,6 @@ static int da9055_onkey_probe(struct platform_device *pdev)
 
        INIT_DELAYED_WORK(&onkey->work, da9055_onkey_work);
 
-       irq = regmap_irq_get_virq(da9055->irq_data, irq);
        err = request_threaded_irq(irq, NULL, da9055_onkey_irq,
                                   IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
                                   "ONKEY", onkey);
index 08ead2aaede5d8ba8a324ff8157beb74d7984b2d..20c80f543d5e56fbe59f15403f2d64d9cb791cff 100644 (file)
@@ -169,6 +169,7 @@ static int soc_button_pnp_probe(struct pnp_dev *pdev,
                                soc_button_remove(pdev);
                                return error;
                        }
+                       continue;
                }
 
                priv->children[i] = pd;
index ef1cf52f8bb99212a96e9fe415ebf81bbb1e21bf..b96e978a37b76a4a43ab15bf6b80e1a295a8767a 100644 (file)
@@ -11,6 +11,7 @@
  */
 
 #include <linux/delay.h>
+#include <linux/dmi.h>
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/input.h>
@@ -831,7 +832,11 @@ static int elantech_set_absolute_mode(struct psmouse *psmouse)
                break;
 
        case 3:
-               etd->reg_10 = 0x0b;
+               if (etd->set_hw_resolution)
+                       etd->reg_10 = 0x0b;
+               else
+                       etd->reg_10 = 0x03;
+
                if (elantech_write_reg(psmouse, 0x10, etd->reg_10))
                        rc = -1;
 
@@ -1330,6 +1335,22 @@ static int elantech_reconnect(struct psmouse *psmouse)
        return 0;
 }
 
+/*
+ * Some hw_version 3 models go into error state when we try to set bit 3 of r10
+ */
+static const struct dmi_system_id no_hw_res_dmi_table[] = {
+#if defined(CONFIG_DMI) && defined(CONFIG_X86)
+       {
+               /* Gigabyte U2442 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "U2442"),
+               },
+       },
+#endif
+       { }
+};
+
 /*
  * determine hardware version and set some properties according to it.
  */
@@ -1353,6 +1374,7 @@ static int elantech_set_properties(struct elantech_data *etd)
                case 6:
                case 7:
                case 8:
+               case 9:
                        etd->hw_version = 4;
                        break;
                default:
@@ -1389,6 +1411,9 @@ static int elantech_set_properties(struct elantech_data *etd)
         */
        etd->crc_enabled = ((etd->fw_version & 0x4000) == 0x4000);
 
+       /* Enable real hardware resolution on hw_version 3 ? */
+       etd->set_hw_resolution = !dmi_check_system(no_hw_res_dmi_table);
+
        return 0;
 }
 
index 036a04abaef72314a8dcc7d444f6ab3feea377f9..9e0e2a1f340d52817fc18bf79d849bed6d340932 100644 (file)
@@ -130,6 +130,7 @@ struct elantech_data {
        bool jumpy_cursor;
        bool reports_pressure;
        bool crc_enabled;
+       bool set_hw_resolution;
        unsigned char hw_version;
        unsigned int fw_version;
        unsigned int single_finger_reports;
index d8d49d10f9bb60d477124bba603fb9f61be1efdb..d68d33fb5ac20f50fdedf503eb19e39e8de790a2 100644 (file)
@@ -117,6 +117,44 @@ void synaptics_reset(struct psmouse *psmouse)
 }
 
 #ifdef CONFIG_MOUSE_PS2_SYNAPTICS
+/* This list has been kindly provided by Synaptics. */
+static const char * const topbuttonpad_pnp_ids[] = {
+       "LEN0017",
+       "LEN0018",
+       "LEN0019",
+       "LEN0023",
+       "LEN002A",
+       "LEN002B",
+       "LEN002C",
+       "LEN002D",
+       "LEN002E",
+       "LEN0033", /* Helix */
+       "LEN0034", /* T431s, T540, X1 Carbon 2nd */
+       "LEN0035", /* X240 */
+       "LEN0036", /* T440 */
+       "LEN0037",
+       "LEN0038",
+       "LEN0041",
+       "LEN0042", /* Yoga */
+       "LEN0045",
+       "LEN0046",
+       "LEN0047",
+       "LEN0048",
+       "LEN0049",
+       "LEN2000",
+       "LEN2001",
+       "LEN2002",
+       "LEN2003",
+       "LEN2004", /* L440 */
+       "LEN2005",
+       "LEN2006",
+       "LEN2007",
+       "LEN2008",
+       "LEN2009",
+       "LEN200A",
+       "LEN200B",
+       NULL
+};
 
 /*****************************************************************************
  *     Synaptics communications functions
@@ -1255,8 +1293,10 @@ static void set_abs_position_params(struct input_dev *dev,
        input_abs_set_res(dev, y_code, priv->y_res);
 }
 
-static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
+static void set_input_params(struct psmouse *psmouse,
+                            struct synaptics_data *priv)
 {
+       struct input_dev *dev = psmouse->dev;
        int i;
 
        /* Things that apply to both modes */
@@ -1325,6 +1365,17 @@ static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
 
        if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
                __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
+               /* See if this buttonpad has a top button area */
+               if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4)) {
+                       for (i = 0; topbuttonpad_pnp_ids[i]; i++) {
+                               if (strstr(psmouse->ps2dev.serio->firmware_id,
+                                          topbuttonpad_pnp_ids[i])) {
+                                       __set_bit(INPUT_PROP_TOPBUTTONPAD,
+                                                 dev->propbit);
+                                       break;
+                               }
+                       }
+               }
                /* Clickpads report only left button */
                __clear_bit(BTN_RIGHT, dev->keybit);
                __clear_bit(BTN_MIDDLE, dev->keybit);
@@ -1514,6 +1565,22 @@ static const struct dmi_system_id min_max_dmi_table[] __initconst = {
                },
                .driver_data = (int []){1232, 5710, 1156, 4696},
        },
+       {
+               /* Lenovo ThinkPad Edge E431 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Edge E431"),
+               },
+               .driver_data = (int []){1024, 5022, 2508, 4832},
+       },
+       {
+               /* Lenovo ThinkPad T431s */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T431"),
+               },
+               .driver_data = (int []){1024, 5112, 2024, 4832},
+       },
        {
                /* Lenovo ThinkPad T440s */
                .matches = {
@@ -1522,6 +1589,14 @@ static const struct dmi_system_id min_max_dmi_table[] __initconst = {
                },
                .driver_data = (int []){1024, 5112, 2024, 4832},
        },
+       {
+               /* Lenovo ThinkPad L440 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L440"),
+               },
+               .driver_data = (int []){1024, 5112, 2024, 4832},
+       },
        {
                /* Lenovo ThinkPad T540p */
                .matches = {
@@ -1530,6 +1605,32 @@ static const struct dmi_system_id min_max_dmi_table[] __initconst = {
                },
                .driver_data = (int []){1024, 5056, 2058, 4832},
        },
+       {
+               /* Lenovo ThinkPad L540 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L540"),
+               },
+               .driver_data = (int []){1024, 5112, 2024, 4832},
+       },
+       {
+               /* Lenovo Yoga S1 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
+                                       "ThinkPad S1 Yoga"),
+               },
+               .driver_data = (int []){1232, 5710, 1156, 4696},
+       },
+       {
+               /* Lenovo ThinkPad X1 Carbon Haswell (3rd generation) */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION,
+                                       "ThinkPad X1 Carbon 2nd"),
+               },
+               .driver_data = (int []){1024, 5112, 2024, 4832},
+       },
 #endif
        { }
 };
@@ -1593,7 +1694,7 @@ static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
                     priv->capabilities, priv->ext_cap, priv->ext_cap_0c,
                     priv->board_id, priv->firmware_id);
 
-       set_input_params(psmouse->dev, priv);
+       set_input_params(psmouse, priv);
 
        /*
         * Encode touchpad model so that it can be used to set
index 0ec9abbe31fec3af5248808fce517fc863ff75b2..381b20d4c5618d8fc7f5c3e616479d4663d9ae03 100644 (file)
@@ -702,6 +702,17 @@ static int i8042_pnp_aux_irq;
 static char i8042_pnp_kbd_name[32];
 static char i8042_pnp_aux_name[32];
 
+static void i8042_pnp_id_to_string(struct pnp_id *id, char *dst, int dst_size)
+{
+       strlcpy(dst, "PNP:", dst_size);
+
+       while (id) {
+               strlcat(dst, " ", dst_size);
+               strlcat(dst, id->id, dst_size);
+               id = id->next;
+       }
+}
+
 static int i8042_pnp_kbd_probe(struct pnp_dev *dev, const struct pnp_device_id *did)
 {
        if (pnp_port_valid(dev, 0) && pnp_port_len(dev, 0) == 1)
@@ -718,6 +729,8 @@ static int i8042_pnp_kbd_probe(struct pnp_dev *dev, const struct pnp_device_id *
                strlcat(i8042_pnp_kbd_name, ":", sizeof(i8042_pnp_kbd_name));
                strlcat(i8042_pnp_kbd_name, pnp_dev_name(dev), sizeof(i8042_pnp_kbd_name));
        }
+       i8042_pnp_id_to_string(dev->id, i8042_kbd_firmware_id,
+                              sizeof(i8042_kbd_firmware_id));
 
        /* Keyboard ports are always supposed to be wakeup-enabled */
        device_set_wakeup_enable(&dev->dev, true);
@@ -742,6 +755,8 @@ static int i8042_pnp_aux_probe(struct pnp_dev *dev, const struct pnp_device_id *
                strlcat(i8042_pnp_aux_name, ":", sizeof(i8042_pnp_aux_name));
                strlcat(i8042_pnp_aux_name, pnp_dev_name(dev), sizeof(i8042_pnp_aux_name));
        }
+       i8042_pnp_id_to_string(dev->id, i8042_aux_firmware_id,
+                              sizeof(i8042_aux_firmware_id));
 
        i8042_pnp_aux_devices++;
        return 0;
index 020053fa5aaa38fce82b5fc2d6ced8546661c9df..3807c3e971cca79e6ff42b745409ac418487bb0c 100644 (file)
@@ -87,6 +87,8 @@ MODULE_PARM_DESC(debug, "Turn i8042 debugging mode on and off");
 #endif
 
 static bool i8042_bypass_aux_irq_test;
+static char i8042_kbd_firmware_id[128];
+static char i8042_aux_firmware_id[128];
 
 #include "i8042.h"
 
@@ -1218,6 +1220,8 @@ static int __init i8042_create_kbd_port(void)
        serio->dev.parent       = &i8042_platform_device->dev;
        strlcpy(serio->name, "i8042 KBD port", sizeof(serio->name));
        strlcpy(serio->phys, I8042_KBD_PHYS_DESC, sizeof(serio->phys));
+       strlcpy(serio->firmware_id, i8042_kbd_firmware_id,
+               sizeof(serio->firmware_id));
 
        port->serio = serio;
        port->irq = I8042_KBD_IRQ;
@@ -1244,6 +1248,8 @@ static int __init i8042_create_aux_port(int idx)
        if (idx < 0) {
                strlcpy(serio->name, "i8042 AUX port", sizeof(serio->name));
                strlcpy(serio->phys, I8042_AUX_PHYS_DESC, sizeof(serio->phys));
+               strlcpy(serio->firmware_id, i8042_aux_firmware_id,
+                       sizeof(serio->firmware_id));
                serio->close = i8042_port_close;
        } else {
                snprintf(serio->name, sizeof(serio->name), "i8042 AUX%d port", idx);
index 8f4c4ab04bc2d8c61d5fda2a65696806e61ad6f2..b29134de983b85ff2df85ba171d8e4a7f8cdac5b 100644 (file)
@@ -451,6 +451,13 @@ static ssize_t serio_set_bind_mode(struct device *dev, struct device_attribute *
        return retval;
 }
 
+static ssize_t firmware_id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct serio *serio = to_serio_port(dev);
+
+       return sprintf(buf, "%s\n", serio->firmware_id);
+}
+
 static DEVICE_ATTR_RO(type);
 static DEVICE_ATTR_RO(proto);
 static DEVICE_ATTR_RO(id);
@@ -473,12 +480,14 @@ static DEVICE_ATTR_RO(modalias);
 static DEVICE_ATTR_WO(drvctl);
 static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL);
 static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode);
+static DEVICE_ATTR_RO(firmware_id);
 
 static struct attribute *serio_device_attrs[] = {
        &dev_attr_modalias.attr,
        &dev_attr_description.attr,
        &dev_attr_drvctl.attr,
        &dev_attr_bind_mode.attr,
+       &dev_attr_firmware_id.attr,
        NULL
 };
 
@@ -921,9 +930,14 @@ static int serio_uevent(struct device *dev, struct kobj_uevent_env *env)
        SERIO_ADD_UEVENT_VAR("SERIO_PROTO=%02x", serio->id.proto);
        SERIO_ADD_UEVENT_VAR("SERIO_ID=%02x", serio->id.id);
        SERIO_ADD_UEVENT_VAR("SERIO_EXTRA=%02x", serio->id.extra);
+
        SERIO_ADD_UEVENT_VAR("MODALIAS=serio:ty%02Xpr%02Xid%02Xex%02X",
                                serio->id.type, serio->id.proto, serio->id.id, serio->id.extra);
 
+       if (serio->firmware_id[0])
+               SERIO_ADD_UEVENT_VAR("SERIO_FIRMWARE_ID=%s",
+                                    serio->firmware_id);
+
        return 0;
 }
 #undef SERIO_ADD_UEVENT_VAR
index b16ebef5b9111c7a999845300e1c33070f2a31b4..611fc3905d00d9fafa7cf1088b28f6c22fccca28 100644 (file)
 #define HID_USAGE_PAGE_DIGITIZER       0x0d
 #define HID_USAGE_PAGE_DESKTOP         0x01
 #define HID_USAGE                      0x09
-#define HID_USAGE_X                    0x30
-#define HID_USAGE_Y                    0x31
-#define HID_USAGE_X_TILT               0x3d
-#define HID_USAGE_Y_TILT               0x3e
-#define HID_USAGE_FINGER               0x22
-#define HID_USAGE_STYLUS               0x20
-#define HID_USAGE_CONTACTMAX           0x55
+#define HID_USAGE_X                    ((HID_USAGE_PAGE_DESKTOP << 16) | 0x30)
+#define HID_USAGE_Y                    ((HID_USAGE_PAGE_DESKTOP << 16) | 0x31)
+#define HID_USAGE_PRESSURE             ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x30)
+#define HID_USAGE_X_TILT               ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x3d)
+#define HID_USAGE_Y_TILT               ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x3e)
+#define HID_USAGE_FINGER               ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x22)
+#define HID_USAGE_STYLUS               ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x20)
+#define HID_USAGE_CONTACTMAX           ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x55)
 #define HID_COLLECTION                 0xa1
 #define HID_COLLECTION_LOGICAL         0x02
 #define HID_COLLECTION_END             0xc0
 
-enum {
-       WCM_UNDEFINED = 0,
-       WCM_DESKTOP,
-       WCM_DIGITIZER,
-};
-
 struct hid_descriptor {
        struct usb_descriptor_header header;
        __le16   bcdHID;
@@ -305,7 +300,7 @@ static int wacom_parse_hid(struct usb_interface *intf,
        char limit = 0;
        /* result has to be defined as int for some devices */
        int result = 0, touch_max = 0;
-       int i = 0, usage = WCM_UNDEFINED, finger = 0, pen = 0;
+       int i = 0, page = 0, finger = 0, pen = 0;
        unsigned char *report;
 
        report = kzalloc(hid_desc->wDescriptorLength, GFP_KERNEL);
@@ -332,134 +327,121 @@ static int wacom_parse_hid(struct usb_interface *intf,
 
                switch (report[i]) {
                case HID_USAGE_PAGE:
-                       switch (report[i + 1]) {
-                       case HID_USAGE_PAGE_DIGITIZER:
-                               usage = WCM_DIGITIZER;
-                               i++;
-                               break;
-
-                       case HID_USAGE_PAGE_DESKTOP:
-                               usage = WCM_DESKTOP;
-                               i++;
-                               break;
-                       }
+                       page = report[i + 1];
+                       i++;
                        break;
 
                case HID_USAGE:
-                       switch (report[i + 1]) {
+                       switch (page << 16 | report[i + 1]) {
                        case HID_USAGE_X:
-                               if (usage == WCM_DESKTOP) {
-                                       if (finger) {
-                                               features->device_type = BTN_TOOL_FINGER;
-                                               /* touch device at least supports one touch point */
-                                               touch_max = 1;
-                                               switch (features->type) {
-                                               case TABLETPC2FG:
-                                                       features->pktlen = WACOM_PKGLEN_TPC2FG;
-                                                       break;
-
-                                               case MTSCREEN:
-                                               case WACOM_24HDT:
-                                                       features->pktlen = WACOM_PKGLEN_MTOUCH;
-                                                       break;
-
-                                               case MTTPC:
-                                                       features->pktlen = WACOM_PKGLEN_MTTPC;
-                                                       break;
-
-                                               case BAMBOO_PT:
-                                                       features->pktlen = WACOM_PKGLEN_BBTOUCH;
-                                                       break;
-
-                                               default:
-                                                       features->pktlen = WACOM_PKGLEN_GRAPHIRE;
-                                                       break;
-                                               }
-
-                                               switch (features->type) {
-                                               case BAMBOO_PT:
-                                                       features->x_phy =
-                                                               get_unaligned_le16(&report[i + 5]);
-                                                       features->x_max =
-                                                               get_unaligned_le16(&report[i + 8]);
-                                                       i += 15;
-                                                       break;
-
-                                               case WACOM_24HDT:
-                                                       features->x_max =
-                                                               get_unaligned_le16(&report[i + 3]);
-                                                       features->x_phy =
-                                                               get_unaligned_le16(&report[i + 8]);
-                                                       features->unit = report[i - 1];
-                                                       features->unitExpo = report[i - 3];
-                                                       i += 12;
-                                                       break;
-
-                                               default:
-                                                       features->x_max =
-                                                               get_unaligned_le16(&report[i + 3]);
-                                                       features->x_phy =
-                                                               get_unaligned_le16(&report[i + 6]);
-                                                       features->unit = report[i + 9];
-                                                       features->unitExpo = report[i + 11];
-                                                       i += 12;
-                                                       break;
-                                               }
-                                       } else if (pen) {
-                                               /* penabled only accepts exact bytes of data */
-                                               if (features->type >= TABLETPC)
-                                                       features->pktlen = WACOM_PKGLEN_GRAPHIRE;
-                                               features->device_type = BTN_TOOL_PEN;
+                               if (finger) {
+                                       features->device_type = BTN_TOOL_FINGER;
+                                       /* touch device at least supports one touch point */
+                                       touch_max = 1;
+                                       switch (features->type) {
+                                       case TABLETPC2FG:
+                                               features->pktlen = WACOM_PKGLEN_TPC2FG;
+                                               break;
+
+                                       case MTSCREEN:
+                                       case WACOM_24HDT:
+                                               features->pktlen = WACOM_PKGLEN_MTOUCH;
+                                               break;
+
+                                       case MTTPC:
+                                               features->pktlen = WACOM_PKGLEN_MTTPC;
+                                               break;
+
+                                       case BAMBOO_PT:
+                                               features->pktlen = WACOM_PKGLEN_BBTOUCH;
+                                               break;
+
+                                       default:
+                                               features->pktlen = WACOM_PKGLEN_GRAPHIRE;
+                                               break;
+                                       }
+
+                                       switch (features->type) {
+                                       case BAMBOO_PT:
+                                               features->x_phy =
+                                                       get_unaligned_le16(&report[i + 5]);
+                                               features->x_max =
+                                                       get_unaligned_le16(&report[i + 8]);
+                                               i += 15;
+                                               break;
+
+                                       case WACOM_24HDT:
                                                features->x_max =
                                                        get_unaligned_le16(&report[i + 3]);
-                                               i += 4;
+                                               features->x_phy =
+                                                       get_unaligned_le16(&report[i + 8]);
+                                               features->unit = report[i - 1];
+                                               features->unitExpo = report[i - 3];
+                                               i += 12;
+                                               break;
+
+                                       default:
+                                               features->x_max =
+                                                       get_unaligned_le16(&report[i + 3]);
+                                               features->x_phy =
+                                                       get_unaligned_le16(&report[i + 6]);
+                                               features->unit = report[i + 9];
+                                               features->unitExpo = report[i + 11];
+                                               i += 12;
+                                               break;
                                        }
+                               } else if (pen) {
+                                       /* penabled only accepts exact bytes of data */
+                                       if (features->type >= TABLETPC)
+                                               features->pktlen = WACOM_PKGLEN_GRAPHIRE;
+                                       features->device_type = BTN_TOOL_PEN;
+                                       features->x_max =
+                                               get_unaligned_le16(&report[i + 3]);
+                                       i += 4;
                                }
                                break;
 
                        case HID_USAGE_Y:
-                               if (usage == WCM_DESKTOP) {
-                                       if (finger) {
-                                               switch (features->type) {
-                                               case TABLETPC2FG:
-                                               case MTSCREEN:
-                                               case MTTPC:
-                                                       features->y_max =
-                                                               get_unaligned_le16(&report[i + 3]);
-                                                       features->y_phy =
-                                                               get_unaligned_le16(&report[i + 6]);
-                                                       i += 7;
-                                                       break;
-
-                                               case WACOM_24HDT:
-                                                       features->y_max =
-                                                               get_unaligned_le16(&report[i + 3]);
-                                                       features->y_phy =
-                                                               get_unaligned_le16(&report[i - 2]);
-                                                       i += 7;
-                                                       break;
-
-                                               case BAMBOO_PT:
-                                                       features->y_phy =
-                                                               get_unaligned_le16(&report[i + 3]);
-                                                       features->y_max =
-                                                               get_unaligned_le16(&report[i + 6]);
-                                                       i += 12;
-                                                       break;
-
-                                               default:
-                                                       features->y_max =
-                                                               features->x_max;
-                                                       features->y_phy =
-                                                               get_unaligned_le16(&report[i + 3]);
-                                                       i += 4;
-                                                       break;
-                                               }
-                                       } else if (pen) {
+                               if (finger) {
+                                       switch (features->type) {
+                                       case TABLETPC2FG:
+                                       case MTSCREEN:
+                                       case MTTPC:
+                                               features->y_max =
+                                                       get_unaligned_le16(&report[i + 3]);
+                                               features->y_phy =
+                                                       get_unaligned_le16(&report[i + 6]);
+                                               i += 7;
+                                               break;
+
+                                       case WACOM_24HDT:
+                                               features->y_max =
+                                                       get_unaligned_le16(&report[i + 3]);
+                                               features->y_phy =
+                                                       get_unaligned_le16(&report[i - 2]);
+                                               i += 7;
+                                               break;
+
+                                       case BAMBOO_PT:
+                                               features->y_phy =
+                                                       get_unaligned_le16(&report[i + 3]);
+                                               features->y_max =
+                                                       get_unaligned_le16(&report[i + 6]);
+                                               i += 12;
+                                               break;
+
+                                       default:
                                                features->y_max =
+                                                       features->x_max;
+                                               features->y_phy =
                                                        get_unaligned_le16(&report[i + 3]);
                                                i += 4;
+                                               break;
                                        }
+                               } else if (pen) {
+                                       features->y_max =
+                                               get_unaligned_le16(&report[i + 3]);
+                                       i += 4;
                                }
                                break;
 
@@ -484,12 +466,20 @@ static int wacom_parse_hid(struct usb_interface *intf,
                                        wacom_retrieve_report_data(intf, features);
                                i++;
                                break;
+
+                       case HID_USAGE_PRESSURE:
+                               if (pen) {
+                                       features->pressure_max =
+                                               get_unaligned_le16(&report[i + 3]);
+                                       i += 4;
+                               }
+                               break;
                        }
                        break;
 
                case HID_COLLECTION_END:
                        /* reset UsagePage and Finger */
-                       finger = usage = 0;
+                       finger = page = 0;
                        break;
 
                case HID_COLLECTION:
index 05f371df6c400a882621c13509f6a13a40eed077..4822c57a3756f4e3175fac3e1def90611028eb6d 100644 (file)
@@ -178,10 +178,9 @@ static int wacom_ptu_irq(struct wacom_wac *wacom)
 
 static int wacom_dtu_irq(struct wacom_wac *wacom)
 {
-       struct wacom_features *features = &wacom->features;
-       char *data = wacom->data;
+       unsigned char *data = wacom->data;
        struct input_dev *input = wacom->input;
-       int prox = data[1] & 0x20, pressure;
+       int prox = data[1] & 0x20;
 
        dev_dbg(input->dev.parent,
                "%s: received report #%d", __func__, data[0]);
@@ -198,10 +197,7 @@ static int wacom_dtu_irq(struct wacom_wac *wacom)
        input_report_key(input, BTN_STYLUS2, data[1] & 0x10);
        input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2]));
        input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4]));
-       pressure = ((data[7] & 0x01) << 8) | data[6];
-       if (pressure < 0)
-               pressure = features->pressure_max + pressure + 1;
-       input_report_abs(input, ABS_PRESSURE, pressure);
+       input_report_abs(input, ABS_PRESSURE, ((data[7] & 0x01) << 8) | data[6]);
        input_report_key(input, BTN_TOUCH, data[1] & 0x05);
        if (!prox) /* out-prox */
                wacom->id[0] = 0;
@@ -906,7 +902,7 @@ static int int_dist(int x1, int y1, int x2, int y2)
 static int wacom_24hdt_irq(struct wacom_wac *wacom)
 {
        struct input_dev *input = wacom->input;
-       char *data = wacom->data;
+       unsigned char *data = wacom->data;
        int i;
        int current_num_contacts = data[61];
        int contacts_to_send = 0;
@@ -959,7 +955,7 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom)
 static int wacom_mt_touch(struct wacom_wac *wacom)
 {
        struct input_dev *input = wacom->input;
-       char *data = wacom->data;
+       unsigned char *data = wacom->data;
        int i;
        int current_num_contacts = data[2];
        int contacts_to_send = 0;
@@ -1038,7 +1034,7 @@ static int wacom_tpc_mt_touch(struct wacom_wac *wacom)
 
 static int wacom_tpc_single_touch(struct wacom_wac *wacom, size_t len)
 {
-       char *data = wacom->data;
+       unsigned char *data = wacom->data;
        struct input_dev *input = wacom->input;
        bool prox;
        int x = 0, y = 0;
@@ -1074,10 +1070,8 @@ static int wacom_tpc_single_touch(struct wacom_wac *wacom, size_t len)
 
 static int wacom_tpc_pen(struct wacom_wac *wacom)
 {
-       struct wacom_features *features = &wacom->features;
-       char *data = wacom->data;
+       unsigned char *data = wacom->data;
        struct input_dev *input = wacom->input;
-       int pressure;
        bool prox = data[1] & 0x20;
 
        if (!wacom->shared->stylus_in_proximity) /* first in prox */
@@ -1093,10 +1087,7 @@ static int wacom_tpc_pen(struct wacom_wac *wacom)
                input_report_key(input, BTN_STYLUS2, data[1] & 0x10);
                input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2]));
                input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4]));
-               pressure = ((data[7] & 0x01) << 8) | data[6];
-               if (pressure < 0)
-                       pressure = features->pressure_max + pressure + 1;
-               input_report_abs(input, ABS_PRESSURE, pressure);
+               input_report_abs(input, ABS_PRESSURE, ((data[7] & 0x03) << 8) | data[6]);
                input_report_key(input, BTN_TOUCH, data[1] & 0x05);
                input_report_key(input, wacom->tool[0], prox);
                return 1;
@@ -1107,7 +1098,7 @@ static int wacom_tpc_pen(struct wacom_wac *wacom)
 
 static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
 {
-       char *data = wacom->data;
+       unsigned char *data = wacom->data;
 
        dev_dbg(wacom->input->dev.parent,
                "%s: received report #%d\n", __func__, data[0]);
@@ -1838,7 +1829,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
        case DTU:
                if (features->type == DTUS) {
                        input_set_capability(input_dev, EV_MSC, MSC_SERIAL);
-                       for (i = 0; i < 3; i++)
+                       for (i = 0; i < 4; i++)
                                __set_bit(BTN_0 + i, input_dev->keybit);
                }
                __set_bit(BTN_TOOL_PEN, input_dev->keybit);
index 45a06e495ed25702831441da6be558fa88491671..7f8aa981500d88c10d8b23cc19f489befe7bedd6 100644 (file)
@@ -425,7 +425,7 @@ static int ads7845_read12_ser(struct device *dev, unsigned command)
 name ## _show(struct device *dev, struct device_attribute *attr, char *buf) \
 { \
        struct ads7846 *ts = dev_get_drvdata(dev); \
-       ssize_t v = ads7846_read12_ser(dev, \
+       ssize_t v = ads7846_read12_ser(&ts->spi->dev, \
                        READ_12BIT_SER(var)); \
        if (v < 0) \
                return v; \
index c949520bd196ec47cf6e572e68cdd67c11b9f84f..57068e8035b5b2553141cb8f1a6ad27f334ea6ec 100644 (file)
@@ -3999,7 +3999,7 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
        iommu_flush_dte(iommu, devid);
        if (devid != alias) {
                irq_lookup_table[alias] = table;
-               set_dte_irq_entry(devid, table);
+               set_dte_irq_entry(alias, table);
                iommu_flush_dte(iommu, alias);
        }
 
index b76c58dbe30ce5ac38c8422b66b5ed2ea4f0fb1e..0e08545d72989e114f016751f68df5a3191088c0 100644 (file)
@@ -788,7 +788,7 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
                 * per device. But we can enable the exclusion range per
                 * device. This is done here
                 */
-               set_dev_entry_bit(m->devid, DEV_ENTRY_EX);
+               set_dev_entry_bit(devid, DEV_ENTRY_EX);
                iommu->exclusion_start = m->range_start;
                iommu->exclusion_length = m->range_length;
        }
index 5208828792e603ad3f5effaa22b2e9c09ca0ac02..203b2e6a91cfca1184eb37df8761b339bc2a3728 100644 (file)
@@ -504,8 +504,10 @@ static void do_fault(struct work_struct *work)
 
        write = !!(fault->flags & PPR_FAULT_WRITE);
 
+       down_read(&fault->state->mm->mmap_sem);
        npages = get_user_pages(fault->state->task, fault->state->mm,
                                fault->address, 1, write, 0, &page, NULL);
+       up_read(&fault->state->mm->mmap_sem);
 
        if (npages == 1) {
                put_page(page);
index 8b89e33a89fe99057d99e322ce2c287b41c0eb2f..647c3c7fd7428f31dd2b313a1fc8b29ac33b482c 100644 (file)
@@ -1381,7 +1381,7 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
 
        do {
                next = pmd_addr_end(addr, end);
-               ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn,
+               ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, next, pfn,
                                              prot, stage);
                phys += next - addr;
        } while (pmd++, addr = next, addr < end);
@@ -1499,7 +1499,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
 
        ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0);
        arm_smmu_tlb_inv_context(&smmu_domain->root_cfg);
-       return ret ? ret : size;
+       return ret ? 0 : size;
 }
 
 static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
index f445c10df8dfd2beb53b8fb4628d7094d0e17b5a..39f8b717fe8482f6e757bc7f9064187923b90cca 100644 (file)
@@ -152,7 +152,8 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
        info->seg = pci_domain_nr(dev->bus);
        info->level = level;
        if (event == BUS_NOTIFY_ADD_DEVICE) {
-               for (tmp = dev, level--; tmp; tmp = tmp->bus->self) {
+               for (tmp = dev; tmp; tmp = tmp->bus->self) {
+                       level--;
                        info->path[level].device = PCI_SLOT(tmp->devfn);
                        info->path[level].function = PCI_FUNC(tmp->devfn);
                        if (pci_is_root_bus(tmp->bus))
index 69fa7da5e48beba40a9595f67117505efc4e069b..f256ffc02e29df18ce8c43266fafe68b1971beb0 100644 (file)
@@ -1009,11 +1009,13 @@ static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
        if (level == 1)
                return freelist;
 
-       for (pte = page_address(pg); !first_pte_in_page(pte); pte++) {
+       pte = page_address(pg);
+       do {
                if (dma_pte_present(pte) && !dma_pte_superpage(pte))
                        freelist = dma_pte_list_pagetables(domain, level - 1,
                                                           pte, freelist);
-       }
+               pte++;
+       } while (!first_pte_in_page(pte));
 
        return freelist;
 }
@@ -2235,7 +2237,9 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
                                bridge_devfn = dev_tmp->devfn;
                        }
                        spin_lock_irqsave(&device_domain_lock, flags);
-                       info = dmar_search_domain_by_dev_info(segment, bus, devfn);
+                       info = dmar_search_domain_by_dev_info(segment,
+                                                             bridge_bus,
+                                                             bridge_devfn);
                        if (info) {
                                iommu = info->iommu;
                                domain = info->domain;
index 41be897df8d5521250d79dee5362c08fe0f80067..3899ba7821c5e78d4496c29ad3fba2b8b4ffcad9 100644 (file)
@@ -41,6 +41,7 @@
 #define ARMADA_370_XP_INT_SET_ENABLE_OFFS      (0x30)
 #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS    (0x34)
 #define ARMADA_370_XP_INT_SOURCE_CTL(irq)      (0x100 + irq*4)
+#define ARMADA_370_XP_INT_SOURCE_CPU_MASK      0xF
 
 #define ARMADA_370_XP_CPU_INTACK_OFFS          (0x44)
 #define ARMADA_375_PPI_CAUSE                   (0x10)
@@ -132,8 +133,7 @@ static int armada_370_xp_setup_msi_irq(struct msi_chip *chip,
                                       struct msi_desc *desc)
 {
        struct msi_msg msg;
-       irq_hw_number_t hwirq;
-       int virq;
+       int virq, hwirq;
 
        hwirq = armada_370_xp_alloc_msi();
        if (hwirq < 0)
@@ -159,8 +159,19 @@ static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip,
                                           unsigned int irq)
 {
        struct irq_data *d = irq_get_irq_data(irq);
+       unsigned long hwirq = d->hwirq;
+
        irq_dispose_mapping(irq);
-       armada_370_xp_free_msi(d->hwirq);
+       armada_370_xp_free_msi(hwirq);
+}
+
+static int armada_370_xp_check_msi_device(struct msi_chip *chip, struct pci_dev *dev,
+                                         int nvec, int type)
+{
+       /* We support MSI, but not MSI-X */
+       if (type == PCI_CAP_ID_MSI)
+               return 0;
+       return -EINVAL;
 }
 
 static struct irq_chip armada_370_xp_msi_irq_chip = {
@@ -201,6 +212,7 @@ static int armada_370_xp_msi_init(struct device_node *node,
 
        msi_chip->setup_irq = armada_370_xp_setup_msi_irq;
        msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq;
+       msi_chip->check_device = armada_370_xp_check_msi_device;
        msi_chip->of_node = node;
 
        armada_370_xp_msi_domain =
@@ -244,35 +256,18 @@ static DEFINE_RAW_SPINLOCK(irq_controller_lock);
 static int armada_xp_set_affinity(struct irq_data *d,
                                  const struct cpumask *mask_val, bool force)
 {
-       unsigned long reg;
-       unsigned long new_mask = 0;
-       unsigned long online_mask = 0;
-       unsigned long count = 0;
        irq_hw_number_t hwirq = irqd_to_hwirq(d);
+       unsigned long reg, mask;
        int cpu;
 
-       for_each_cpu(cpu, mask_val) {
-               new_mask |= 1 << cpu_logical_map(cpu);
-               count++;
-       }
-
-       /*
-        * Forbid mutlicore interrupt affinity
-        * This is required since the MPIC HW doesn't limit
-        * several CPUs from acknowledging the same interrupt.
-        */
-       if (count > 1)
-               return -EINVAL;
-
-       for_each_cpu(cpu, cpu_online_mask)
-               online_mask |= 1 << cpu_logical_map(cpu);
+       /* Select a single core from the affinity mask which is online */
+       cpu = cpumask_any_and(mask_val, cpu_online_mask);
+       mask = 1UL << cpu_logical_map(cpu);
 
        raw_spin_lock(&irq_controller_lock);
-
        reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
-       reg = (reg & (~online_mask)) | new_mask;
+       reg = (reg & (~ARMADA_370_XP_INT_SOURCE_CPU_MASK)) | mask;
        writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
-
        raw_spin_unlock(&irq_controller_lock);
 
        return 0;
@@ -494,15 +489,6 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
 
 #ifdef CONFIG_SMP
        armada_xp_mpic_smp_cpu_init();
-
-       /*
-        * Set the default affinity from all CPUs to the boot cpu.
-        * This is required since the MPIC doesn't limit several CPUs
-        * from acknowledging the same interrupt.
-        */
-       cpumask_clear(irq_default_affinity);
-       cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
-
 #endif
 
        armada_370_xp_msi_init(node, main_int_res.start);
index fc817d28d1fe50341bc4c9fe0d4de188f599d947..3d15d16a7088d2d886ef769f96534d896ded1b73 100644 (file)
@@ -107,7 +107,7 @@ static int __init crossbar_of_init(struct device_node *node)
        int i, size, max, reserved = 0, entry;
        const __be32 *irqsr;
 
-       cb = kzalloc(sizeof(struct cb_device *), GFP_KERNEL);
+       cb = kzalloc(sizeof(*cb), GFP_KERNEL);
 
        if (!cb)
                return -ENOMEM;
index 4300b6606f5e3276c11656ff29506b665e9a2a87..57d165e026f43ac4ba3f0ba0a0f422025ad2b117 100644 (file)
@@ -246,10 +246,14 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
                            bool force)
 {
        void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
-       unsigned int shift = (gic_irq(d) % 4) * 8;
-       unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
+       unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
        u32 val, mask, bit;
 
+       if (!force)
+               cpu = cpumask_any_and(mask_val, cpu_online_mask);
+       else
+               cpu = cpumask_first(mask_val);
+
        if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
                return -EINVAL;
 
index 414dbf6da89afd5890c73785cbcb41f3ab903e54..fc9f9d03fa13b879ba382d079cb35a1108b19d0d 100644 (file)
@@ -197,25 +197,6 @@ typedef struct _hfc4s8s_hw {
 
 
 
-/***************************/
-/* inline function defines */
-/***************************/
-#ifdef HISAX_HFC4S8S_PCIMEM    /* inline functions memory mapped */
-
-/* memory write and dummy IO read to avoid PCI byte merge problems */
-#define Write_hfc8(a, b, c) {(*((volatile u_char *)(a->membase + b)) = c); inb(a->iobase + 4);}
-/* memory write without dummy IO access for fifo data access */
-#define fWrite_hfc8(a, b, c) (*((volatile u_char *)(a->membase + b)) = c)
-#define Read_hfc8(a, b) (*((volatile u_char *)(a->membase + b)))
-#define Write_hfc16(a, b, c) (*((volatile unsigned short *)(a->membase + b)) = c)
-#define Read_hfc16(a, b) (*((volatile unsigned short *)(a->membase + b)))
-#define Write_hfc32(a, b, c) (*((volatile unsigned long *)(a->membase + b)) = c)
-#define Read_hfc32(a, b) (*((volatile unsigned long *)(a->membase + b)))
-#define wait_busy(a) {while ((Read_hfc8(a, R_STATUS) & M_BUSY));}
-#define PCI_ENA_MEMIO  0x03
-
-#else
-
 /* inline functions io mapped */
 static inline void
 SetRegAddr(hfc4s8s_hw *a, u_char b)
@@ -306,8 +287,6 @@ wait_busy(hfc4s8s_hw *a)
 
 #define PCI_ENA_REGIO  0x01
 
-#endif                         /* HISAX_HFC4S8S_PCIMEM */
-
 /******************************************************/
 /* function to read critical counter registers that   */
 /* may be updated by the chip during read             */
@@ -724,26 +703,15 @@ rx_d_frame(struct hfc4s8s_l1 *l1p, int ech)
                                return;
                        } else {
                                /* read errornous D frame */
-
-#ifndef HISAX_HFC4S8S_PCIMEM
                                SetRegAddr(l1p->hw, A_FIFO_DATA0);
-#endif
 
                                while (z1 >= 4) {
-#ifdef HISAX_HFC4S8S_PCIMEM
-                                       Read_hfc32(l1p->hw, A_FIFO_DATA0);
-#else
                                        fRead_hfc32(l1p->hw);
-#endif
                                        z1 -= 4;
                                }
 
                                while (z1--)
-#ifdef HISAX_HFC4S8S_PCIMEM
-                                       Read_hfc8(l1p->hw, A_FIFO_DATA0);
-#else
-                               fRead_hfc8(l1p->hw);
-#endif
+                                       fRead_hfc8(l1p->hw);
 
                                Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1);
                                wait_busy(l1p->hw);
@@ -753,27 +721,16 @@ rx_d_frame(struct hfc4s8s_l1 *l1p, int ech)
 
                cp = skb->data;
 
-#ifndef HISAX_HFC4S8S_PCIMEM
                SetRegAddr(l1p->hw, A_FIFO_DATA0);
-#endif
 
                while (z1 >= 4) {
-#ifdef HISAX_HFC4S8S_PCIMEM
-                       *((unsigned long *) cp) =
-                               Read_hfc32(l1p->hw, A_FIFO_DATA0);
-#else
                        *((unsigned long *) cp) = fRead_hfc32(l1p->hw);
-#endif
                        cp += 4;
                        z1 -= 4;
                }
 
                while (z1--)
-#ifdef HISAX_HFC4S8S_PCIMEM
-                       *cp++ = Read_hfc8(l1p->hw, A_FIFO_DATA0);
-#else
-               *cp++ = fRead_hfc8(l1p->hw);
-#endif
+                       *cp++ = fRead_hfc8(l1p->hw);
 
                Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1); /* increment f counter */
                wait_busy(l1p->hw);
@@ -859,28 +816,17 @@ rx_b_frame(struct hfc4s8s_btype *bch)
                        wait_busy(l1->hw);
                        return;
                }
-#ifndef HISAX_HFC4S8S_PCIMEM
                SetRegAddr(l1->hw, A_FIFO_DATA0);
-#endif
 
                while (z1 >= 4) {
-#ifdef HISAX_HFC4S8S_PCIMEM
-                       *((unsigned long *) bch->rx_ptr) =
-                               Read_hfc32(l1->hw, A_FIFO_DATA0);
-#else
                        *((unsigned long *) bch->rx_ptr) =
                                fRead_hfc32(l1->hw);
-#endif
                        bch->rx_ptr += 4;
                        z1 -= 4;
                }
 
                while (z1--)
-#ifdef HISAX_HFC4S8S_PCIMEM
-                       *(bch->rx_ptr++) = Read_hfc8(l1->hw, A_FIFO_DATA0);
-#else
-               *(bch->rx_ptr++) = fRead_hfc8(l1->hw);
-#endif
+                       *(bch->rx_ptr++) = fRead_hfc8(l1->hw);
 
                if (hdlc_complete) {
                        /* increment f counter */
@@ -940,29 +886,17 @@ tx_d_frame(struct hfc4s8s_l1 *l1p)
        if ((skb = skb_dequeue(&l1p->d_tx_queue))) {
                cp = skb->data;
                cnt = skb->len;
-#ifndef HISAX_HFC4S8S_PCIMEM
                SetRegAddr(l1p->hw, A_FIFO_DATA0);
-#endif
 
                while (cnt >= 4) {
-#ifdef HISAX_HFC4S8S_PCIMEM
-                       fWrite_hfc32(l1p->hw, A_FIFO_DATA0,
-                                    *(unsigned long *) cp);
-#else
                        SetRegAddr(l1p->hw, A_FIFO_DATA0);
                        fWrite_hfc32(l1p->hw, *(unsigned long *) cp);
-#endif
                        cp += 4;
                        cnt -= 4;
                }
 
-#ifdef HISAX_HFC4S8S_PCIMEM
-               while (cnt--)
-                       fWrite_hfc8(l1p->hw, A_FIFO_DATA0, *cp++);
-#else
                while (cnt--)
                        fWrite_hfc8(l1p->hw, *cp++);
-#endif
 
                l1p->tx_cnt = skb->truesize;
                Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1); /* increment f counter */
@@ -1037,26 +971,15 @@ tx_b_frame(struct hfc4s8s_btype *bch)
                cp = skb->data + bch->tx_cnt;
                bch->tx_cnt += cnt;
 
-#ifndef HISAX_HFC4S8S_PCIMEM
                SetRegAddr(l1->hw, A_FIFO_DATA0);
-#endif
                while (cnt >= 4) {
-#ifdef HISAX_HFC4S8S_PCIMEM
-                       fWrite_hfc32(l1->hw, A_FIFO_DATA0,
-                                    *(unsigned long *) cp);
-#else
                        fWrite_hfc32(l1->hw, *(unsigned long *) cp);
-#endif
                        cp += 4;
                        cnt -= 4;
                }
 
                while (cnt--)
-#ifdef HISAX_HFC4S8S_PCIMEM
-                       fWrite_hfc8(l1->hw, A_FIFO_DATA0, *cp++);
-#else
-               fWrite_hfc8(l1->hw, *cp++);
-#endif
+                       fWrite_hfc8(l1->hw, *cp++);
 
                if (bch->tx_cnt >= skb->len) {
                        if (bch->mode == L1_MODE_HDLC) {
@@ -1281,10 +1204,8 @@ hfc4s8s_interrupt(int intno, void *dev_id)
        if (!hw || !(hw->mr.r_irq_ctrl & M_GLOB_IRQ_EN))
                return IRQ_NONE;
 
-#ifndef        HISAX_HFC4S8S_PCIMEM
        /* read current selected regsister */
        old_ioreg = GetRegAddr(hw);
-#endif
 
        /* Layer 1 State change */
        hw->mr.r_irq_statech |=
@@ -1292,9 +1213,7 @@ hfc4s8s_interrupt(int intno, void *dev_id)
        if (!
            (b = (Read_hfc8(hw, R_STATUS) & (M_MISC_IRQSTA | M_FR_IRQSTA)))
            && !hw->mr.r_irq_statech) {
-#ifndef        HISAX_HFC4S8S_PCIMEM
                SetRegAddr(hw, old_ioreg);
-#endif
                return IRQ_NONE;
        }
 
@@ -1322,9 +1241,7 @@ hfc4s8s_interrupt(int intno, void *dev_id)
        /* queue the request to allow other cards to interrupt */
        schedule_work(&hw->tqueue);
 
-#ifndef        HISAX_HFC4S8S_PCIMEM
        SetRegAddr(hw, old_ioreg);
-#endif
        return IRQ_HANDLED;
 }                              /* hfc4s8s_interrupt */
 
@@ -1471,13 +1388,8 @@ static void
 release_pci_ports(hfc4s8s_hw *hw)
 {
        pci_write_config_word(hw->pdev, PCI_COMMAND, 0);
-#ifdef HISAX_HFC4S8S_PCIMEM
-       if (hw->membase)
-               iounmap((void *) hw->membase);
-#else
        if (hw->iobase)
                release_region(hw->iobase, 8);
-#endif
 }
 
 /*****************************************/
@@ -1486,11 +1398,7 @@ release_pci_ports(hfc4s8s_hw *hw)
 static void
 enable_pci_ports(hfc4s8s_hw *hw)
 {
-#ifdef HISAX_HFC4S8S_PCIMEM
-       pci_write_config_word(hw->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
-#else
        pci_write_config_word(hw->pdev, PCI_COMMAND, PCI_ENA_REGIO);
-#endif
 }
 
 /*************************************/
@@ -1561,15 +1469,9 @@ setup_instance(hfc4s8s_hw *hw)
                       hw->irq);
                goto out;
        }
-#ifdef HISAX_HFC4S8S_PCIMEM
-       printk(KERN_INFO
-              "HFC-4S/8S: found PCI card at membase 0x%p, irq %d\n",
-              hw->hw_membase, hw->irq);
-#else
        printk(KERN_INFO
               "HFC-4S/8S: found PCI card at iobase 0x%x, irq %d\n",
               hw->iobase, hw->irq);
-#endif
 
        hfc_hardware_enable(hw, 1, 0);
 
@@ -1614,17 +1516,12 @@ hfc4s8s_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        hw->irq = pdev->irq;
        hw->iobase = pci_resource_start(pdev, 0);
 
-#ifdef HISAX_HFC4S8S_PCIMEM
-       hw->hw_membase = (u_char *) pci_resource_start(pdev, 1);
-       hw->membase = ioremap((ulong) hw->hw_membase, 256);
-#else
        if (!request_region(hw->iobase, 8, hw->card_name)) {
                printk(KERN_INFO
                       "HFC-4S/8S: failed to request address space at 0x%04x\n",
                       hw->iobase);
                goto out;
        }
-#endif
 
        pci_set_drvdata(pdev, hw);
        err = setup_instance(hw);
index 51dae9167238a3ed1ff972508a02f338de6702a0..96d1df05044fb48ffceb988dd90540db9f125cdd 100644 (file)
@@ -425,7 +425,7 @@ afterXPR:
                                if (cs->debug & L1_DEB_MONITOR)
                                        debugl1(cs, "ICC %02x -> MOX1", cs->dc.icc.mon_tx[cs->dc.icc.mon_txp - 1]);
                        }
-               AfterMOX1:
+               AfterMOX1: ;
 #endif
                }
        }
index a5da511e3c9ae4f381ae1526d2ab2779039aba9a..61ac6323744602ff27a95b56042c19d5d4e527e1 100644 (file)
@@ -634,7 +634,7 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
 #ifdef CONFIG_IPPP_FILTER
        case PPPIOCSPASS:
        {
-               struct sock_fprog fprog;
+               struct sock_fprog_kern fprog;
                struct sock_filter *code;
                int err, len = get_filter(argp, &code);
 
@@ -653,7 +653,7 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
        }
        case PPPIOCSACTIVE:
        {
-               struct sock_fprog fprog;
+               struct sock_fprog_kern fprog;
                struct sock_filter *code;
                int err, len = get_filter(argp, &code);
 
index 2c0d2c2bf94648e273b7614a65e1bc09976663e8..9f454d76cc060984317e31310b60e34080e0c6c5 100644 (file)
@@ -287,11 +287,9 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask,
        p = frame;
 
        /* restart timer */
-       if ((int)(hc->keep_tl.expires-jiffies) < 5 * HZ) {
-               del_timer(&hc->keep_tl);
-               hc->keep_tl.expires = jiffies + L1OIP_KEEPALIVE * HZ;
-               add_timer(&hc->keep_tl);
-       } else
+       if (time_before(hc->keep_tl.expires, jiffies + 5 * HZ))
+               mod_timer(&hc->keep_tl, jiffies + L1OIP_KEEPALIVE * HZ);
+       else
                hc->keep_tl.expires = jiffies + L1OIP_KEEPALIVE * HZ;
 
        if (debug & DEBUG_L1OIP_MSG)
@@ -621,11 +619,9 @@ multiframe:
                goto multiframe;
 
        /* restart timer */
-       if ((int)(hc->timeout_tl.expires-jiffies) < 5 * HZ || !hc->timeout_on) {
+       if (time_before(hc->timeout_tl.expires, jiffies + 5 * HZ) || !hc->timeout_on) {
                hc->timeout_on = 1;
-               del_timer(&hc->timeout_tl);
-               hc->timeout_tl.expires = jiffies + L1OIP_TIMEOUT * HZ;
-               add_timer(&hc->timeout_tl);
+               mod_timer(&hc->timeout_tl, jiffies + L1OIP_TIMEOUT * HZ);
        } else /* only adjust timer */
                hc->timeout_tl.expires = jiffies + L1OIP_TIMEOUT * HZ;
 
index 1bf4a71919ec73957a00550dec49b3a3b3a1292c..9380be7b18954b9308ed42abe5fafa2f87c0f76a 100644 (file)
@@ -2488,6 +2488,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
 
                } else {
                        inc_hit_counter(cache, bio);
+                       pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
 
                        if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
                            !is_dirty(cache, lookup_result.cblock))
index 784695d22fde1acaaf11acd78c7263438c04648e..53b213226c015ae29cd636c033a0c088776029aa 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/crypto.h>
 #include <linux/workqueue.h>
 #include <linux/backing-dev.h>
-#include <linux/percpu.h>
 #include <linux/atomic.h>
 #include <linux/scatterlist.h>
 #include <asm/page.h>
@@ -43,6 +42,7 @@ struct convert_context {
        struct bvec_iter iter_out;
        sector_t cc_sector;
        atomic_t cc_pending;
+       struct ablkcipher_request *req;
 };
 
 /*
@@ -111,15 +111,7 @@ struct iv_tcw_private {
 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
 
 /*
- * Duplicated per-CPU state for cipher.
- */
-struct crypt_cpu {
-       struct ablkcipher_request *req;
-};
-
-/*
- * The fields in here must be read only after initialization,
- * changing state should be in crypt_cpu.
+ * The fields in here must be read only after initialization.
  */
 struct crypt_config {
        struct dm_dev *dev;
@@ -150,12 +142,6 @@ struct crypt_config {
        sector_t iv_offset;
        unsigned int iv_size;
 
-       /*
-        * Duplicated per cpu state. Access through
-        * per_cpu_ptr() only.
-        */
-       struct crypt_cpu __percpu *cpu;
-
        /* ESSIV: struct crypto_cipher *essiv_tfm */
        void *iv_private;
        struct crypto_ablkcipher **tfms;
@@ -192,11 +178,6 @@ static void clone_init(struct dm_crypt_io *, struct bio *);
 static void kcryptd_queue_crypt(struct dm_crypt_io *io);
 static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
 
-static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
-{
-       return this_cpu_ptr(cc->cpu);
-}
-
 /*
  * Use this to access cipher attributes that are the same for each CPU.
  */
@@ -903,16 +884,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
 static void crypt_alloc_req(struct crypt_config *cc,
                            struct convert_context *ctx)
 {
-       struct crypt_cpu *this_cc = this_crypt_config(cc);
        unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
 
-       if (!this_cc->req)
-               this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
+       if (!ctx->req)
+               ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
 
-       ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]);
-       ablkcipher_request_set_callback(this_cc->req,
+       ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
+       ablkcipher_request_set_callback(ctx->req,
            CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
-           kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
+           kcryptd_async_done, dmreq_of_req(cc, ctx->req));
 }
 
 /*
@@ -921,7 +901,6 @@ static void crypt_alloc_req(struct crypt_config *cc,
 static int crypt_convert(struct crypt_config *cc,
                         struct convert_context *ctx)
 {
-       struct crypt_cpu *this_cc = this_crypt_config(cc);
        int r;
 
        atomic_set(&ctx->cc_pending, 1);
@@ -932,7 +911,7 @@ static int crypt_convert(struct crypt_config *cc,
 
                atomic_inc(&ctx->cc_pending);
 
-               r = crypt_convert_block(cc, ctx, this_cc->req);
+               r = crypt_convert_block(cc, ctx, ctx->req);
 
                switch (r) {
                /* async */
@@ -941,7 +920,7 @@ static int crypt_convert(struct crypt_config *cc,
                        reinit_completion(&ctx->restart);
                        /* fall through*/
                case -EINPROGRESS:
-                       this_cc->req = NULL;
+                       ctx->req = NULL;
                        ctx->cc_sector++;
                        continue;
 
@@ -1040,6 +1019,7 @@ static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc,
        io->sector = sector;
        io->error = 0;
        io->base_io = NULL;
+       io->ctx.req = NULL;
        atomic_set(&io->io_pending, 0);
 
        return io;
@@ -1065,6 +1045,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
        if (!atomic_dec_and_test(&io->io_pending))
                return;
 
+       if (io->ctx.req)
+               mempool_free(io->ctx.req, cc->req_pool);
        mempool_free(io, cc->io_pool);
 
        if (likely(!base_io))
@@ -1492,8 +1474,6 @@ static int crypt_wipe_key(struct crypt_config *cc)
 static void crypt_dtr(struct dm_target *ti)
 {
        struct crypt_config *cc = ti->private;
-       struct crypt_cpu *cpu_cc;
-       int cpu;
 
        ti->private = NULL;
 
@@ -1505,13 +1485,6 @@ static void crypt_dtr(struct dm_target *ti)
        if (cc->crypt_queue)
                destroy_workqueue(cc->crypt_queue);
 
-       if (cc->cpu)
-               for_each_possible_cpu(cpu) {
-                       cpu_cc = per_cpu_ptr(cc->cpu, cpu);
-                       if (cpu_cc->req)
-                               mempool_free(cpu_cc->req, cc->req_pool);
-               }
-
        crypt_free_tfms(cc);
 
        if (cc->bs)
@@ -1530,9 +1503,6 @@ static void crypt_dtr(struct dm_target *ti)
        if (cc->dev)
                dm_put_device(ti, cc->dev);
 
-       if (cc->cpu)
-               free_percpu(cc->cpu);
-
        kzfree(cc->cipher);
        kzfree(cc->cipher_string);
 
@@ -1588,13 +1558,6 @@ static int crypt_ctr_cipher(struct dm_target *ti,
        if (tmp)
                DMWARN("Ignoring unexpected additional cipher options");
 
-       cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)),
-                                __alignof__(struct crypt_cpu));
-       if (!cc->cpu) {
-               ti->error = "Cannot allocate per cpu state";
-               goto bad_mem;
-       }
-
        /*
         * For compatibility with the original dm-crypt mapping format, if
         * only the cipher name is supplied, use cbc-plain.
index aa009e86587189a20a4ccf4734ad9d999cb5701f..fa0f6cbd6a41283d21b1286558eb4764e2c06b4b 100644 (file)
@@ -1566,8 +1566,8 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
                }
                if (m->pg_init_required)
                        __pg_init_all_paths(m);
-               spin_unlock_irqrestore(&m->lock, flags);
                dm_table_run_md_queue_async(m->ti->table);
+               spin_unlock_irqrestore(&m->lock, flags);
        }
 
        return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
index 53728be84dee35ac8dfabbf48087919841049f1a..2e71de8e0048c9eb1e1815d79fbedc8aaedf772a 100644 (file)
@@ -27,6 +27,7 @@
 #define MAPPING_POOL_SIZE 1024
 #define PRISON_CELLS 1024
 #define COMMIT_PERIOD HZ
+#define NO_SPACE_TIMEOUT (HZ * 60)
 
 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
                "A percentage of time allocated for copy on write");
@@ -175,6 +176,7 @@ struct pool {
        struct workqueue_struct *wq;
        struct work_struct worker;
        struct delayed_work waker;
+       struct delayed_work no_space_timeout;
 
        unsigned long last_commit_jiffies;
        unsigned ref_count;
@@ -232,6 +234,13 @@ struct thin_c {
        struct bio_list deferred_bio_list;
        struct bio_list retry_on_resume_list;
        struct rb_root sort_bio_list; /* sorted list of deferred bios */
+
+       /*
+        * Ensures the thin is not destroyed until the worker has finished
+        * iterating the active_thins list.
+        */
+       atomic_t refcount;
+       struct completion can_destroy;
 };
 
 /*----------------------------------------------------------------*/
@@ -928,7 +937,7 @@ static int commit(struct pool *pool)
 {
        int r;
 
-       if (get_pool_mode(pool) != PM_WRITE)
+       if (get_pool_mode(pool) >= PM_READ_ONLY)
                return -EINVAL;
 
        r = dm_pool_commit_metadata(pool->pmd);
@@ -1486,6 +1495,45 @@ static void process_thin_deferred_bios(struct thin_c *tc)
        blk_finish_plug(&plug);
 }
 
+static void thin_get(struct thin_c *tc);
+static void thin_put(struct thin_c *tc);
+
+/*
+ * We can't hold rcu_read_lock() around code that can block.  So we
+ * find a thin with the rcu lock held; bump a refcount; then drop
+ * the lock.
+ */
+static struct thin_c *get_first_thin(struct pool *pool)
+{
+       struct thin_c *tc = NULL;
+
+       rcu_read_lock();
+       if (!list_empty(&pool->active_thins)) {
+               tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
+               thin_get(tc);
+       }
+       rcu_read_unlock();
+
+       return tc;
+}
+
+static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
+{
+       struct thin_c *old_tc = tc;
+
+       rcu_read_lock();
+       list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) {
+               thin_get(tc);
+               thin_put(old_tc);
+               rcu_read_unlock();
+               return tc;
+       }
+       thin_put(old_tc);
+       rcu_read_unlock();
+
+       return NULL;
+}
+
 static void process_deferred_bios(struct pool *pool)
 {
        unsigned long flags;
@@ -1493,10 +1541,11 @@ static void process_deferred_bios(struct pool *pool)
        struct bio_list bios;
        struct thin_c *tc;
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(tc, &pool->active_thins, list)
+       tc = get_first_thin(pool);
+       while (tc) {
                process_thin_deferred_bios(tc);
-       rcu_read_unlock();
+               tc = get_next_thin(pool, tc);
+       }
 
        /*
         * If there are any deferred flush bios, we must commit
@@ -1543,6 +1592,20 @@ static void do_waker(struct work_struct *ws)
        queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
 }
 
+/*
+ * We're holding onto IO to allow userland time to react.  After the
+ * timeout either the pool will have been resized (and thus back in
+ * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO.
+ */
+static void do_no_space_timeout(struct work_struct *ws)
+{
+       struct pool *pool = container_of(to_delayed_work(ws), struct pool,
+                                        no_space_timeout);
+
+       if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space)
+               set_pool_mode(pool, PM_READ_ONLY);
+}
+
 /*----------------------------------------------------------------*/
 
 struct noflush_work {
@@ -1578,7 +1641,7 @@ static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
 {
        struct noflush_work w;
 
-       INIT_WORK(&w.worker, fn);
+       INIT_WORK_ONSTACK(&w.worker, fn);
        w.tc = tc;
        atomic_set(&w.complete, 0);
        init_waitqueue_head(&w.wait);
@@ -1668,6 +1731,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
                pool->process_discard = process_discard;
                pool->process_prepared_mapping = process_prepared_mapping;
                pool->process_prepared_discard = process_prepared_discard_passdown;
+
+               if (!pool->pf.error_if_no_space)
+                       queue_delayed_work(pool->wq, &pool->no_space_timeout, NO_SPACE_TIMEOUT);
                break;
 
        case PM_WRITE:
@@ -2053,6 +2119,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
 
        INIT_WORK(&pool->worker, do_worker);
        INIT_DELAYED_WORK(&pool->waker, do_waker);
+       INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
        spin_lock_init(&pool->lock);
        bio_list_init(&pool->deferred_flush_bios);
        INIT_LIST_HEAD(&pool->prepared_mappings);
@@ -2615,6 +2682,7 @@ static void pool_postsuspend(struct dm_target *ti)
        struct pool *pool = pt->pool;
 
        cancel_delayed_work(&pool->waker);
+       cancel_delayed_work(&pool->no_space_timeout);
        flush_workqueue(pool->wq);
        (void) commit(pool);
 }
@@ -3061,11 +3129,25 @@ static struct target_type pool_target = {
 /*----------------------------------------------------------------
  * Thin target methods
  *--------------------------------------------------------------*/
+static void thin_get(struct thin_c *tc)
+{
+       atomic_inc(&tc->refcount);
+}
+
+static void thin_put(struct thin_c *tc)
+{
+       if (atomic_dec_and_test(&tc->refcount))
+               complete(&tc->can_destroy);
+}
+
 static void thin_dtr(struct dm_target *ti)
 {
        struct thin_c *tc = ti->private;
        unsigned long flags;
 
+       thin_put(tc);
+       wait_for_completion(&tc->can_destroy);
+
        spin_lock_irqsave(&tc->pool->lock, flags);
        list_del_rcu(&tc->list);
        spin_unlock_irqrestore(&tc->pool->lock, flags);
@@ -3101,6 +3183,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
        struct thin_c *tc;
        struct dm_dev *pool_dev, *origin_dev;
        struct mapped_device *pool_md;
+       unsigned long flags;
 
        mutex_lock(&dm_thin_pool_table.mutex);
 
@@ -3191,9 +3274,12 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
 
        mutex_unlock(&dm_thin_pool_table.mutex);
 
-       spin_lock(&tc->pool->lock);
+       atomic_set(&tc->refcount, 1);
+       init_completion(&tc->can_destroy);
+
+       spin_lock_irqsave(&tc->pool->lock, flags);
        list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
-       spin_unlock(&tc->pool->lock);
+       spin_unlock_irqrestore(&tc->pool->lock, flags);
        /*
         * This synchronize_rcu() call is needed here otherwise we risk a
         * wake_worker() call finding no bios to process (because the newly
index 796007a5e0e1a4b6e83b0871c1fca1ef8c0c461f..7a7bab8947ae3485d31c132cb3398251c7d507cf 100644 (file)
@@ -330,15 +330,17 @@ test_block_hash:
                                return r;
                        }
                }
-
                todo = 1 << v->data_dev_block_bits;
-               while (io->iter.bi_size) {
+               do {
                        u8 *page;
+                       unsigned len;
                        struct bio_vec bv = bio_iter_iovec(bio, io->iter);
 
                        page = kmap_atomic(bv.bv_page);
-                       r = crypto_shash_update(desc, page + bv.bv_offset,
-                                               bv.bv_len);
+                       len = bv.bv_len;
+                       if (likely(len >= todo))
+                               len = todo;
+                       r = crypto_shash_update(desc, page + bv.bv_offset, len);
                        kunmap_atomic(page);
 
                        if (r < 0) {
@@ -346,8 +348,9 @@ test_block_hash:
                                return r;
                        }
 
-                       bio_advance_iter(bio, &io->iter, bv.bv_len);
-               }
+                       bio_advance_iter(bio, &io->iter, len);
+                       todo -= len;
+               } while (todo);
 
                if (!v->version) {
                        r = crypto_shash_update(desc, v->salt, v->salt_size);
index 8fda38d23e3847aa4d96ecd147e996514a5a4af7..237b7e0ddc7ae2617af41cb1fb9dc0cc195f44ab 100644 (file)
@@ -8516,7 +8516,8 @@ static int md_notify_reboot(struct notifier_block *this,
                if (mddev_trylock(mddev)) {
                        if (mddev->pers)
                                __md_stop_writes(mddev);
-                       mddev->safemode = 2;
+                       if (mddev->persistent)
+                               mddev->safemode = 2;
                        mddev_unlock(mddev);
                }
                need_delay = 1;
index 33fc408e5eacef0a1dce55fd5c0d578fc244b663..cb882aae9e20d4f7032a400884f45f50de57528f 100644 (file)
@@ -1172,6 +1172,13 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
        int max_sectors;
        int sectors;
 
+       /*
+        * Register the new request and wait if the reconstruction
+        * thread has put up a bar for new requests.
+        * Continue immediately if no resync is active currently.
+        */
+       wait_barrier(conf);
+
        sectors = bio_sectors(bio);
        while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
            bio->bi_iter.bi_sector < conf->reshape_progress &&
@@ -1552,12 +1559,6 @@ static void make_request(struct mddev *mddev, struct bio *bio)
 
        md_write_start(mddev, bio);
 
-       /*
-        * Register the new request and wait if the reconstruction
-        * thread has put up a bar for new requests.
-        * Continue immediately if no resync is active currently.
-        */
-       wait_barrier(conf);
 
        do {
 
index e8a1ce204036f45cca1d1d495f29c8c7b72942d5..cdd7c1b7259b008e873cc711294a3f0cecba422a 100644 (file)
@@ -1109,7 +1109,7 @@ static int ov7670_enum_framesizes(struct v4l2_subdev *sd,
         * windows that fall outside that.
         */
        for (i = 0; i < n_win_sizes; i++) {
-               struct ov7670_win_size *win = &info->devtype->win_sizes[index];
+               struct ov7670_win_size *win = &info->devtype->win_sizes[i];
                if (info->min_width && win->width < info->min_width)
                        continue;
                if (info->min_height && win->height < info->min_height)
index a4459301b5f829efcae8563241ef1108933194d7..ee0f57e01b5677df58c9f74565767bcaf57d54af 100644 (file)
@@ -1616,7 +1616,7 @@ static int s5c73m3_get_platform_data(struct s5c73m3 *state)
        if (ret < 0)
                return -EINVAL;
 
-       node_ep = v4l2_of_get_next_endpoint(node, NULL);
+       node_ep = of_graph_get_next_endpoint(node, NULL);
        if (!node_ep) {
                dev_warn(dev, "no endpoint defined for node: %s\n",
                                                node->full_name);
index d5a7a135f75d39d5bc2f6fb3366639dd6dc7ee34..703560fa5e73b456cbc0951ccb61957bdfecebac 100644 (file)
@@ -93,6 +93,7 @@ static long media_device_enum_entities(struct media_device *mdev,
        struct media_entity *ent;
        struct media_entity_desc u_ent;
 
+       memset(&u_ent, 0, sizeof(u_ent));
        if (copy_from_user(&u_ent.id, &uent->id, sizeof(u_ent.id)))
                return -EFAULT;
 
index c137abfa0c543dd33f15e3888ab4590900733bba..20f1655e6d7595c328b5499476f4dde7773cccd0 100644 (file)
@@ -56,7 +56,7 @@ config VIDEO_VIU
 
 config VIDEO_TIMBERDALE
        tristate "Support for timberdale Video In/LogiWIN"
-       depends on VIDEO_V4L2 && I2C && DMADEVICES
+       depends on MFD_TIMBERDALE && VIDEO_V4L2 && I2C && DMADEVICES
        select DMA_ENGINE
        select TIMB_DMA
        select VIDEO_ADV7180
index b4f12d00be059c3f381b31295d435074ee291dab..65670825296209917cc33546d1d43abb60af5ed5 100644 (file)
@@ -372,18 +372,32 @@ static int vpbe_stop_streaming(struct vb2_queue *vq)
 {
        struct vpbe_fh *fh = vb2_get_drv_priv(vq);
        struct vpbe_layer *layer = fh->layer;
+       struct vpbe_display *disp = fh->disp_dev;
+       unsigned long flags;
 
        if (!vb2_is_streaming(vq))
                return 0;
 
        /* release all active buffers */
+       spin_lock_irqsave(&disp->dma_queue_lock, flags);
+       if (layer->cur_frm == layer->next_frm) {
+               vb2_buffer_done(&layer->cur_frm->vb, VB2_BUF_STATE_ERROR);
+       } else {
+               if (layer->cur_frm != NULL)
+                       vb2_buffer_done(&layer->cur_frm->vb,
+                                       VB2_BUF_STATE_ERROR);
+               if (layer->next_frm != NULL)
+                       vb2_buffer_done(&layer->next_frm->vb,
+                                       VB2_BUF_STATE_ERROR);
+       }
+
        while (!list_empty(&layer->dma_queue)) {
                layer->next_frm = list_entry(layer->dma_queue.next,
                                                struct vpbe_disp_buffer, list);
                list_del(&layer->next_frm->list);
                vb2_buffer_done(&layer->next_frm->vb, VB2_BUF_STATE_ERROR);
        }
-
+       spin_unlock_irqrestore(&disp->dma_queue_lock, flags);
        return 0;
 }
 
index d762246eabf5a3b78a8c348e7a5e5f149580e143..0379cb9f9a9c25c7b0a4efb52e741d4723ddcdc4 100644 (file)
@@ -734,6 +734,8 @@ static int vpfe_release(struct file *file)
                }
                vpfe_dev->io_usrs = 0;
                vpfe_dev->numbuffers = config_params.numbuffers;
+               videobuf_stop(&vpfe_dev->buffer_queue);
+               videobuf_mmap_free(&vpfe_dev->buffer_queue);
        }
 
        /* Decrement device usrs counter */
index 756da78bac23109dbd3e7da464243da381354231..8dea0b84a3ad66788ab29b2437303ebd3bc38d6f 100644 (file)
@@ -358,8 +358,31 @@ static int vpif_stop_streaming(struct vb2_queue *vq)
 
        common = &ch->common[VPIF_VIDEO_INDEX];
 
+       /* Disable channel as per its device type and channel id */
+       if (VPIF_CHANNEL0_VIDEO == ch->channel_id) {
+               enable_channel0(0);
+               channel0_intr_enable(0);
+       }
+       if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) ||
+               (2 == common->started)) {
+               enable_channel1(0);
+               channel1_intr_enable(0);
+       }
+       common->started = 0;
+
        /* release all active buffers */
        spin_lock_irqsave(&common->irqlock, flags);
+       if (common->cur_frm == common->next_frm) {
+               vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_ERROR);
+       } else {
+               if (common->cur_frm != NULL)
+                       vb2_buffer_done(&common->cur_frm->vb,
+                                       VB2_BUF_STATE_ERROR);
+               if (common->next_frm != NULL)
+                       vb2_buffer_done(&common->next_frm->vb,
+                                       VB2_BUF_STATE_ERROR);
+       }
+
        while (!list_empty(&common->dma_queue)) {
                common->next_frm = list_entry(common->dma_queue.next,
                                                struct vpif_cap_buffer, list);
@@ -933,17 +956,6 @@ static int vpif_release(struct file *filep)
        if (fh->io_allowed[VPIF_VIDEO_INDEX]) {
                /* Reset io_usrs member of channel object */
                common->io_usrs = 0;
-               /* Disable channel as per its device type and channel id */
-               if (VPIF_CHANNEL0_VIDEO == ch->channel_id) {
-                       enable_channel0(0);
-                       channel0_intr_enable(0);
-               }
-               if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) ||
-                   (2 == common->started)) {
-                       enable_channel1(0);
-                       channel1_intr_enable(0);
-               }
-               common->started = 0;
                /* Free buffers allocated */
                vb2_queue_release(&common->buffer_queue);
                vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
index 0ac841e35aa48dd59bdf17b32f97afbe94b561b5..aed41edd050102e89248cecbb2607a4f45e6eb12 100644 (file)
@@ -320,8 +320,31 @@ static int vpif_stop_streaming(struct vb2_queue *vq)
 
        common = &ch->common[VPIF_VIDEO_INDEX];
 
+       /* Disable channel */
+       if (VPIF_CHANNEL2_VIDEO == ch->channel_id) {
+               enable_channel2(0);
+               channel2_intr_enable(0);
+       }
+       if ((VPIF_CHANNEL3_VIDEO == ch->channel_id) ||
+               (2 == common->started)) {
+               enable_channel3(0);
+               channel3_intr_enable(0);
+       }
+       common->started = 0;
+
        /* release all active buffers */
        spin_lock_irqsave(&common->irqlock, flags);
+       if (common->cur_frm == common->next_frm) {
+               vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_ERROR);
+       } else {
+               if (common->cur_frm != NULL)
+                       vb2_buffer_done(&common->cur_frm->vb,
+                                       VB2_BUF_STATE_ERROR);
+               if (common->next_frm != NULL)
+                       vb2_buffer_done(&common->next_frm->vb,
+                                       VB2_BUF_STATE_ERROR);
+       }
+
        while (!list_empty(&common->dma_queue)) {
                common->next_frm = list_entry(common->dma_queue.next,
                                                struct vpif_disp_buffer, list);
@@ -773,18 +796,6 @@ static int vpif_release(struct file *filep)
        if (fh->io_allowed[VPIF_VIDEO_INDEX]) {
                /* Reset io_usrs member of channel object */
                common->io_usrs = 0;
-               /* Disable channel */
-               if (VPIF_CHANNEL2_VIDEO == ch->channel_id) {
-                       enable_channel2(0);
-                       channel2_intr_enable(0);
-               }
-               if ((VPIF_CHANNEL3_VIDEO == ch->channel_id) ||
-                   (2 == common->started)) {
-                       enable_channel3(0);
-                       channel3_intr_enable(0);
-               }
-               common->started = 0;
-
                /* Free buffers allocated */
                vb2_queue_release(&common->buffer_queue);
                vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
index da2fc86cc52433bd8f1c6b32a898baa87c2a1064..25dbf5b05a96186527cf520f568f03721b181a80 100644 (file)
@@ -122,7 +122,7 @@ static struct fimc_fmt fimc_formats[] = {
        }, {
                .name           = "YUV 4:2:2 planar, Y/Cb/Cr",
                .fourcc         = V4L2_PIX_FMT_YUV422P,
-               .depth          = { 12 },
+               .depth          = { 16 },
                .color          = FIMC_FMT_YCBYCR422,
                .memplanes      = 1,
                .colplanes      = 3,
index 3aecaf4650942429eba75ee89cc33180aac4ae07..f0c9c42867de2e8cad570229f8592a6b445f1b47 100644 (file)
@@ -195,7 +195,7 @@ static int fc2580_set_params(struct dvb_frontend *fe)
 
        f_ref = 2UL * priv->cfg->clock / r_val;
        n_val = div_u64_rem(f_vco, f_ref, &k_val);
-       k_val_reg = 1UL * k_val * (1 << 20) / f_ref;
+       k_val_reg = div_u64(1ULL * k_val * (1 << 20), f_ref);
 
        ret = fc2580_wr_reg(priv, 0x18, r18_val | ((k_val_reg >> 16) & 0xff));
        if (ret < 0)
@@ -348,8 +348,8 @@ static int fc2580_set_params(struct dvb_frontend *fe)
        if (ret < 0)
                goto err;
 
-       ret = fc2580_wr_reg(priv, 0x37, 1UL * priv->cfg->clock * \
-                       fc2580_if_filter_lut[i].mul / 1000000000);
+       ret = fc2580_wr_reg(priv, 0x37, div_u64(1ULL * priv->cfg->clock *
+                       fc2580_if_filter_lut[i].mul, 1000000000));
        if (ret < 0)
                goto err;
 
index be38a9e637e08d20f673cb0c8de85fea608442d8..646c994521361ba7579466d814a8e7d2422397b5 100644 (file)
@@ -22,6 +22,7 @@
 #define FC2580_PRIV_H
 
 #include "fc2580.h"
+#include <linux/math64.h>
 
 struct fc2580_reg_val {
        u8 reg;
index 7407b8338ccfa33ce6a4179e5b9e99632a3f6ebf..bc38f03394cda0e1a397b390f354b620fe486a73 100644 (file)
@@ -41,4 +41,3 @@ ccflags-y += -I$(srctree)/drivers/media/dvb-core
 ccflags-y += -I$(srctree)/drivers/media/dvb-frontends
 ccflags-y += -I$(srctree)/drivers/media/tuners
 ccflags-y += -I$(srctree)/drivers/media/common
-ccflags-y += -I$(srctree)/drivers/staging/media/rtl2832u_sdr
index 61d196e8b3abde6dc0d97e26fe3ca7cae9292957..dcbd392e6efc8f38265d8b7fd805c4b38eff9410 100644 (file)
@@ -24,7 +24,6 @@
 
 #include "rtl2830.h"
 #include "rtl2832.h"
-#include "rtl2832_sdr.h"
 
 #include "qt1010.h"
 #include "mt2060.h"
 #include "tua9001.h"
 #include "r820t.h"
 
+/*
+ * RTL2832_SDR module is in staging. That logic is added in order to avoid any
+ * hard dependency to drivers/staging/ directory as we want compile mainline
+ * driver even whole staging directory is missing.
+ */
+#include <media/v4l2-subdev.h>
+
+#if IS_ENABLED(CONFIG_DVB_RTL2832_SDR)
+struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe,
+       struct i2c_adapter *i2c, const struct rtl2832_config *cfg,
+       struct v4l2_subdev *sd);
+#else
+static inline struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe,
+       struct i2c_adapter *i2c, const struct rtl2832_config *cfg,
+       struct v4l2_subdev *sd)
+{
+       return NULL;
+}
+#endif
+
+#ifdef CONFIG_MEDIA_ATTACH
+#define dvb_attach_sdr(FUNCTION, ARGS...) ({ \
+       void *__r = NULL; \
+       typeof(&FUNCTION) __a = symbol_request(FUNCTION); \
+       if (__a) { \
+               __r = (void *) __a(ARGS); \
+               if (__r == NULL) \
+                       symbol_put(FUNCTION); \
+       } \
+       __r; \
+})
+
+#else
+#define dvb_attach_sdr(FUNCTION, ARGS...) ({ \
+       FUNCTION(ARGS); \
+})
+
+#endif
+
 static int rtl28xxu_disable_rc;
 module_param_named(disable_rc, rtl28xxu_disable_rc, int, 0644);
 MODULE_PARM_DESC(disable_rc, "disable RTL2832U remote controller");
@@ -908,7 +946,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
                                adap->fe[0]->ops.tuner_ops.get_rf_strength;
 
                /* attach SDR */
-               dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
+               dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
                                &rtl28xxu_rtl2832_fc0012_config, NULL);
                break;
        case TUNER_RTL2832_FC0013:
@@ -920,7 +958,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
                                adap->fe[0]->ops.tuner_ops.get_rf_strength;
 
                /* attach SDR */
-               dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
+               dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
                                &rtl28xxu_rtl2832_fc0013_config, NULL);
                break;
        case TUNER_RTL2832_E4000: {
@@ -951,7 +989,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
                        i2c_set_adapdata(i2c_adap_internal, d);
 
                        /* attach SDR */
-                       dvb_attach(rtl2832_sdr_attach, adap->fe[0],
+                       dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0],
                                        i2c_adap_internal,
                                        &rtl28xxu_rtl2832_e4000_config, sd);
                }
@@ -982,7 +1020,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
                                adap->fe[0]->ops.tuner_ops.get_rf_strength;
 
                /* attach SDR */
-               dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
+               dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
                                &rtl28xxu_rtl2832_r820t_config, NULL);
                break;
        case TUNER_RTL2832_R828D:
index 7277dbd2afcdb8c88629aafc2c9013b38cd79390..ecbcb39feb71ad21f7b765c36ac20858234d01f6 100644 (file)
@@ -1430,10 +1430,8 @@ static const struct usb_device_id device_table[] = {
        {USB_DEVICE(0x0c45, 0x600d), SB(PAS106, 101)},
        {USB_DEVICE(0x0c45, 0x6011), SB(OV6650, 101)},
        {USB_DEVICE(0x0c45, 0x6019), SB(OV7630, 101)},
-#if !IS_ENABLED(CONFIG_USB_SN9C102)
        {USB_DEVICE(0x0c45, 0x6024), SB(TAS5130CXX, 102)},
        {USB_DEVICE(0x0c45, 0x6025), SB(TAS5130CXX, 102)},
-#endif
        {USB_DEVICE(0x0c45, 0x6027), SB(OV7630, 101)}, /* Genius Eye 310 */
        {USB_DEVICE(0x0c45, 0x6028), SB(PAS202, 102)},
        {USB_DEVICE(0x0c45, 0x6029), SB(PAS106, 102)},
index 04b2daf567bec232d384337491543151d890c69a..7e2411c36419c394b3d23a81eb5bdd4b5fee88c2 100644 (file)
@@ -178,6 +178,9 @@ struct v4l2_create_buffers32 {
 
 static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
 {
+       if (get_user(kp->type, &up->type))
+               return -EFAULT;
+
        switch (kp->type) {
        case V4L2_BUF_TYPE_VIDEO_CAPTURE:
        case V4L2_BUF_TYPE_VIDEO_OUTPUT:
@@ -204,17 +207,16 @@ static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us
 
 static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
 {
-       if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)) ||
-                       get_user(kp->type, &up->type))
-                       return -EFAULT;
+       if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)))
+               return -EFAULT;
        return __get_v4l2_format32(kp, up);
 }
 
 static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
 {
        if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) ||
-           copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format.fmt)))
-                       return -EFAULT;
+           copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format)))
+               return -EFAULT;
        return __get_v4l2_format32(&kp->format, &up->format);
 }
 
index 110c03627051cb749ef92d853e47322714b1dd51..b59a17fb7c3e3f3eff43faecac3610f58df1dbd8 100644 (file)
@@ -108,8 +108,19 @@ static int devbus_set_timing_params(struct devbus *devbus,
                        node->full_name);
                return err;
        }
-       /* Convert bit width to byte width */
-       r.bus_width /= 8;
+
+       /*
+        * The bus width is encoded into the register as 0 for 8 bits,
+        * and 1 for 16 bits, so we do the necessary conversion here.
+        */
+       if (r.bus_width == 8)
+               r.bus_width = 0;
+       else if (r.bus_width == 16)
+               r.bus_width = 1;
+       else {
+               dev_err(devbus->dev, "invalid bus width %d\n", r.bus_width);
+               return -EINVAL;
+       }
 
        err = get_timing_param_ps(devbus, node, "devbus,badr-skew-ps",
                                 &r.badr_skew);
index c9de3d598ea515279ff0f79f1214984b041266db..1d15735f9ef930ed18e384b1c63b7deb1fd42981 100644 (file)
@@ -338,28 +338,58 @@ int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
                int num_sg, bool read, int timeout)
 {
        struct completion trans_done;
-       int err = 0, count;
+       u8 dir;
+       int err = 0, i, count;
        long timeleft;
        unsigned long flags;
+       struct scatterlist *sg;
+       enum dma_data_direction dma_dir;
+       u32 val;
+       dma_addr_t addr;
+       unsigned int len;
+
+       dev_dbg(&(pcr->pci->dev), "--> %s: num_sg = %d\n", __func__, num_sg);
+
+       /* don't transfer data during abort processing */
+       if (pcr->remove_pci)
+               return -EINVAL;
+
+       if ((sglist == NULL) || (num_sg <= 0))
+               return -EINVAL;
 
-       count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read);
+       if (read) {
+               dir = DEVICE_TO_HOST;
+               dma_dir = DMA_FROM_DEVICE;
+       } else {
+               dir = HOST_TO_DEVICE;
+               dma_dir = DMA_TO_DEVICE;
+       }
+
+       count = dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dma_dir);
        if (count < 1) {
                dev_err(&(pcr->pci->dev), "scatterlist map failed\n");
                return -EINVAL;
        }
        dev_dbg(&(pcr->pci->dev), "DMA mapping count: %d\n", count);
 
+       val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
+       pcr->sgi = 0;
+       for_each_sg(sglist, sg, count, i) {
+               addr = sg_dma_address(sg);
+               len = sg_dma_len(sg);
+               rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
+       }
 
        spin_lock_irqsave(&pcr->lock, flags);
 
        pcr->done = &trans_done;
        pcr->trans_result = TRANS_NOT_READY;
        init_completion(&trans_done);
+       rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
+       rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
 
        spin_unlock_irqrestore(&pcr->lock, flags);
 
-       rtsx_pci_dma_transfer(pcr, sglist, count, read);
-
        timeleft = wait_for_completion_interruptible_timeout(
                        &trans_done, msecs_to_jiffies(timeout));
        if (timeleft <= 0) {
@@ -383,7 +413,7 @@ out:
        pcr->done = NULL;
        spin_unlock_irqrestore(&pcr->lock, flags);
 
-       rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read);
+       dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dma_dir);
 
        if ((err < 0) && (err != -ENODEV))
                rtsx_pci_stop_cmd(pcr);
@@ -395,73 +425,6 @@ out:
 }
 EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
 
-int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
-               int num_sg, bool read)
-{
-       enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
-
-       if (pcr->remove_pci)
-               return -EINVAL;
-
-       if ((sglist == NULL) || num_sg < 1)
-               return -EINVAL;
-
-       return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir);
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg);
-
-int rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
-               int num_sg, bool read)
-{
-       enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
-
-       if (pcr->remove_pci)
-               return -EINVAL;
-
-       if (sglist == NULL || num_sg < 1)
-               return -EINVAL;
-
-       dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir);
-       return num_sg;
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg);
-
-int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
-               int sg_count, bool read)
-{
-       struct scatterlist *sg;
-       dma_addr_t addr;
-       unsigned int len;
-       int i;
-       u32 val;
-       u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE;
-       unsigned long flags;
-
-       if (pcr->remove_pci)
-               return -EINVAL;
-
-       if ((sglist == NULL) || (sg_count < 1))
-               return -EINVAL;
-
-       val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
-       pcr->sgi = 0;
-       for_each_sg(sglist, sg, sg_count, i) {
-               addr = sg_dma_address(sg);
-               len = sg_dma_len(sg);
-               rtsx_pci_add_sg_tbl(pcr, addr, len, i == sg_count - 1);
-       }
-
-       spin_lock_irqsave(&pcr->lock, flags);
-
-       rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
-       rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
-
-       spin_unlock_irqrestore(&pcr->lock, flags);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer);
-
 int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
 {
        int err;
@@ -873,8 +836,6 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
        int_reg = rtsx_pci_readl(pcr, RTSX_BIPR);
        /* Clear interrupt flag */
        rtsx_pci_writel(pcr, RTSX_BIPR, int_reg);
-       dev_dbg(&pcr->pci->dev, "=========== BIPR 0x%8x ==========\n", int_reg);
-
        if ((int_reg & pcr->bier) == 0) {
                spin_unlock(&pcr->lock);
                return IRQ_NONE;
@@ -905,28 +866,17 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
        }
 
        if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
-               if (int_reg & (TRANS_FAIL_INT | DELINK_INT))
+               if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
                        pcr->trans_result = TRANS_RESULT_FAIL;
-               else if (int_reg & TRANS_OK_INT)
+                       if (pcr->done)
+                               complete(pcr->done);
+               } else if (int_reg & TRANS_OK_INT) {
                        pcr->trans_result = TRANS_RESULT_OK;
-
-               if (pcr->done)
-                       complete(pcr->done);
-
-               if (int_reg & SD_EXIST) {
-                       struct rtsx_slot *slot = &pcr->slots[RTSX_SD_CARD];
-                       if (slot && slot->done_transfer)
-                               slot->done_transfer(slot->p_dev);
-               }
-
-               if (int_reg & MS_EXIST) {
-                       struct rtsx_slot *slot = &pcr->slots[RTSX_SD_CARD];
-                       if (slot && slot->done_transfer)
-                               slot->done_transfer(slot->p_dev);
+                       if (pcr->done)
+                               complete(pcr->done);
                }
        }
 
-
        if (pcr->card_inserted || pcr->card_removed)
                schedule_delayed_work(&pcr->carddet_work,
                                msecs_to_jiffies(200));
index 0a87e56913411a9abab46c99b13053abee3d96af..cc8d4a6099cdc602071af7616e1451896e7f6959 100644 (file)
@@ -448,7 +448,6 @@ mmc_spi_command_send(struct mmc_spi_host *host,
 {
        struct scratch          *data = host->data;
        u8                      *cp = data->status;
-       u32                     arg = cmd->arg;
        int                     status;
        struct spi_transfer     *t;
 
@@ -465,14 +464,12 @@ mmc_spi_command_send(struct mmc_spi_host *host,
         * We init the whole buffer to all-ones, which is what we need
         * to write while we're reading (later) response data.
         */
-       memset(cp++, 0xff, sizeof(data->status));
+       memset(cp, 0xff, sizeof(data->status));
 
-       *cp++ = 0x40 | cmd->opcode;
-       *cp++ = (u8)(arg >> 24);
-       *cp++ = (u8)(arg >> 16);
-       *cp++ = (u8)(arg >> 8);
-       *cp++ = (u8)arg;
-       *cp++ = (crc7(0, &data->status[1], 5) << 1) | 0x01;
+       cp[1] = 0x40 | cmd->opcode;
+       put_unaligned_be32(cmd->arg, cp+2);
+       cp[6] = crc7_be(0, cp+1, 5) | 0x01;
+       cp += 7;
 
        /* Then, read up to 13 bytes (while writing all-ones):
         *  - N(CR) (== 1..8) bytes of all-ones
@@ -711,10 +708,7 @@ mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
         * so we have to cope with this situation and check the response
         * bit-by-bit. Arggh!!!
         */
-       pattern  = scratch->status[0] << 24;
-       pattern |= scratch->status[1] << 16;
-       pattern |= scratch->status[2] << 8;
-       pattern |= scratch->status[3];
+       pattern = get_unaligned_be32(scratch->status);
 
        /* First 3 bit of pattern are undefined */
        pattern |= 0xE0000000;
index 5fb994f9a653570d75f2b3deb6aab32129d17503..0b9ded13a3ae89d72a94e7e8e075f62496483f0e 100644 (file)
 #include <linux/mfd/rtsx_pci.h>
 #include <asm/unaligned.h>
 
-struct realtek_next {
-       unsigned int    sg_count;
-       s32             cookie;
-};
-
 struct realtek_pci_sdmmc {
        struct platform_device  *pdev;
        struct rtsx_pcr         *pcr;
        struct mmc_host         *mmc;
        struct mmc_request      *mrq;
-       struct mmc_command      *cmd;
-       struct mmc_data         *data;
-
-       spinlock_t              lock;
-       struct timer_list       timer;
-       struct tasklet_struct   cmd_tasklet;
-       struct tasklet_struct   data_tasklet;
-       struct tasklet_struct   finish_tasklet;
-
-       u8                      rsp_type;
-       u8                      rsp_len;
-       int                     sg_count;
+
+       struct mutex            host_mutex;
+
        u8                      ssc_depth;
        unsigned int            clock;
        bool                    vpclk;
@@ -62,13 +48,8 @@ struct realtek_pci_sdmmc {
        int                     power_state;
 #define SDMMC_POWER_ON         1
 #define SDMMC_POWER_OFF                0
-
-       struct realtek_next     next_data;
 };
 
-static int sd_start_multi_rw(struct realtek_pci_sdmmc *host,
-               struct mmc_request *mrq);
-
 static inline struct device *sdmmc_dev(struct realtek_pci_sdmmc *host)
 {
        return &(host->pdev->dev);
@@ -105,95 +86,6 @@ static void sd_print_debug_regs(struct realtek_pci_sdmmc *host)
 #define sd_print_debug_regs(host)
 #endif /* DEBUG */
 
-static void sd_isr_done_transfer(struct platform_device *pdev)
-{
-       struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev);
-
-       spin_lock(&host->lock);
-       if (host->cmd)
-               tasklet_schedule(&host->cmd_tasklet);
-       if (host->data)
-               tasklet_schedule(&host->data_tasklet);
-       spin_unlock(&host->lock);
-}
-
-static void sd_request_timeout(unsigned long host_addr)
-{
-       struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr;
-       unsigned long flags;
-
-       spin_lock_irqsave(&host->lock, flags);
-
-       if (!host->mrq) {
-               dev_err(sdmmc_dev(host), "error: no request exist\n");
-               goto out;
-       }
-
-       if (host->cmd)
-               host->cmd->error = -ETIMEDOUT;
-       if (host->data)
-               host->data->error = -ETIMEDOUT;
-
-       dev_dbg(sdmmc_dev(host), "timeout for request\n");
-
-out:
-       tasklet_schedule(&host->finish_tasklet);
-       spin_unlock_irqrestore(&host->lock, flags);
-}
-
-static void sd_finish_request(unsigned long host_addr)
-{
-       struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr;
-       struct rtsx_pcr *pcr = host->pcr;
-       struct mmc_request *mrq;
-       struct mmc_command *cmd;
-       struct mmc_data *data;
-       unsigned long flags;
-       bool any_error;
-
-       spin_lock_irqsave(&host->lock, flags);
-
-       del_timer(&host->timer);
-       mrq = host->mrq;
-       if (!mrq) {
-               dev_err(sdmmc_dev(host), "error: no request need finish\n");
-               goto out;
-       }
-
-       cmd = mrq->cmd;
-       data = mrq->data;
-
-       any_error = (mrq->sbc && mrq->sbc->error) ||
-               (mrq->stop && mrq->stop->error) ||
-               (cmd && cmd->error) || (data && data->error);
-
-       if (any_error) {
-               rtsx_pci_stop_cmd(pcr);
-               sd_clear_error(host);
-       }
-
-       if (data) {
-               if (any_error)
-                       data->bytes_xfered = 0;
-               else
-                       data->bytes_xfered = data->blocks * data->blksz;
-
-               if (!data->host_cookie)
-                       rtsx_pci_dma_unmap_sg(pcr, data->sg, data->sg_len,
-                                       data->flags & MMC_DATA_READ);
-
-       }
-
-       host->mrq = NULL;
-       host->cmd = NULL;
-       host->data = NULL;
-
-out:
-       spin_unlock_irqrestore(&host->lock, flags);
-       mutex_unlock(&pcr->pcr_mutex);
-       mmc_request_done(host->mmc, mrq);
-}
-
 static int sd_read_data(struct realtek_pci_sdmmc *host, u8 *cmd, u16 byte_cnt,
                u8 *buf, int buf_len, int timeout)
 {
@@ -311,7 +203,8 @@ static int sd_write_data(struct realtek_pci_sdmmc *host, u8 *cmd, u16 byte_cnt,
        return 0;
 }
 
-static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd)
+static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
+               struct mmc_command *cmd)
 {
        struct rtsx_pcr *pcr = host->pcr;
        u8 cmd_idx = (u8)cmd->opcode;
@@ -319,14 +212,11 @@ static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd)
        int err = 0;
        int timeout = 100;
        int i;
+       u8 *ptr;
+       int stat_idx = 0;
        u8 rsp_type;
        int rsp_len = 5;
-       unsigned long flags;
-
-       if (host->cmd)
-               dev_err(sdmmc_dev(host), "error: cmd already exist\n");
-
-       host->cmd = cmd;
+       bool clock_toggled = false;
 
        dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n",
                        __func__, cmd_idx, arg);
@@ -361,8 +251,6 @@ static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd)
                err = -EINVAL;
                goto out;
        }
-       host->rsp_type = rsp_type;
-       host->rsp_len = rsp_len;
 
        if (rsp_type == SD_RSP_TYPE_R1b)
                timeout = 3000;
@@ -372,6 +260,8 @@ static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd)
                                0xFF, SD_CLK_TOGGLE_EN);
                if (err < 0)
                        goto out;
+
+               clock_toggled = true;
        }
 
        rtsx_pci_init_cmd(pcr);
@@ -395,60 +285,25 @@ static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd)
                /* Read data from ping-pong buffer */
                for (i = PPBUF_BASE2; i < PPBUF_BASE2 + 16; i++)
                        rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0);
+               stat_idx = 16;
        } else if (rsp_type != SD_RSP_TYPE_R0) {
                /* Read data from SD_CMDx registers */
                for (i = SD_CMD0; i <= SD_CMD4; i++)
                        rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0);
+               stat_idx = 5;
        }
 
        rtsx_pci_add_cmd(pcr, READ_REG_CMD, SD_STAT1, 0, 0);
 
-       mod_timer(&host->timer, jiffies + msecs_to_jiffies(timeout));
-
-       spin_lock_irqsave(&pcr->lock, flags);
-       pcr->trans_result = TRANS_NOT_READY;
-       rtsx_pci_send_cmd_no_wait(pcr);
-       spin_unlock_irqrestore(&pcr->lock, flags);
-
-       return;
-
-out:
-       cmd->error = err;
-       tasklet_schedule(&host->finish_tasklet);
-}
-
-static void sd_get_rsp(unsigned long host_addr)
-{
-       struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr;
-       struct rtsx_pcr *pcr = host->pcr;
-       struct mmc_command *cmd;
-       int i, err = 0, stat_idx;
-       u8 *ptr, rsp_type;
-       unsigned long flags;
-
-       spin_lock_irqsave(&host->lock, flags);
-
-       cmd = host->cmd;
-       host->cmd = NULL;
-
-       if (!cmd) {
-               dev_err(sdmmc_dev(host), "error: cmd not exist\n");
+       err = rtsx_pci_send_cmd(pcr, timeout);
+       if (err < 0) {
+               sd_print_debug_regs(host);
+               sd_clear_error(host);
+               dev_dbg(sdmmc_dev(host),
+                       "rtsx_pci_send_cmd error (err = %d)\n", err);
                goto out;
        }
 
-       spin_lock(&pcr->lock);
-       if (pcr->trans_result == TRANS_NO_DEVICE)
-               err = -ENODEV;
-       else if (pcr->trans_result != TRANS_RESULT_OK)
-               err = -EINVAL;
-       spin_unlock(&pcr->lock);
-
-       if (err < 0)
-               goto out;
-
-       rsp_type = host->rsp_type;
-       stat_idx = host->rsp_len;
-
        if (rsp_type == SD_RSP_TYPE_R0) {
                err = 0;
                goto out;
@@ -485,106 +340,26 @@ static void sd_get_rsp(unsigned long host_addr)
                                cmd->resp[0]);
        }
 
-       if (cmd == host->mrq->sbc) {
-               sd_send_cmd(host, host->mrq->cmd);
-               spin_unlock_irqrestore(&host->lock, flags);
-               return;
-       }
-
-       if (cmd == host->mrq->stop)
-               goto out;
-
-       if (cmd->data) {
-               sd_start_multi_rw(host, host->mrq);
-               spin_unlock_irqrestore(&host->lock, flags);
-               return;
-       }
-
 out:
        cmd->error = err;
 
-       tasklet_schedule(&host->finish_tasklet);
-       spin_unlock_irqrestore(&host->lock, flags);
-}
-
-static int sd_pre_dma_transfer(struct realtek_pci_sdmmc *host,
-                       struct mmc_data *data, struct realtek_next *next)
-{
-       struct rtsx_pcr *pcr = host->pcr;
-       int read = data->flags & MMC_DATA_READ;
-       int sg_count = 0;
-
-       if (!next && data->host_cookie &&
-               data->host_cookie != host->next_data.cookie) {
-               dev_err(sdmmc_dev(host),
-                       "error: invalid cookie data[%d] host[%d]\n",
-                       data->host_cookie, host->next_data.cookie);
-               data->host_cookie = 0;
-       }
-
-       if (next || (!next && data->host_cookie != host->next_data.cookie))
-               sg_count = rtsx_pci_dma_map_sg(pcr,
-                               data->sg, data->sg_len, read);
-       else
-               sg_count = host->next_data.sg_count;
-
-       if (next) {
-               next->sg_count = sg_count;
-               if (++next->cookie < 0)
-                       next->cookie = 1;
-               data->host_cookie = next->cookie;
-       }
-
-       return sg_count;
-}
-
-static void sdmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
-               bool is_first_req)
-{
-       struct realtek_pci_sdmmc *host = mmc_priv(mmc);
-       struct mmc_data *data = mrq->data;
-
-       if (data->host_cookie) {
-               dev_err(sdmmc_dev(host),
-                       "error: descard already cookie data[%d]\n",
-                       data->host_cookie);
-               data->host_cookie = 0;
-       }
-
-       dev_dbg(sdmmc_dev(host), "dma sg prepared: %d\n",
-               sd_pre_dma_transfer(host, data, &host->next_data));
-}
-
-static void sdmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
-               int err)
-{
-       struct realtek_pci_sdmmc *host = mmc_priv(mmc);
-       struct rtsx_pcr *pcr = host->pcr;
-       struct mmc_data *data = mrq->data;
-       int read = data->flags & MMC_DATA_READ;
-
-       rtsx_pci_dma_unmap_sg(pcr, data->sg, data->sg_len, read);
-       data->host_cookie = 0;
+       if (err && clock_toggled)
+               rtsx_pci_write_register(pcr, SD_BUS_STAT,
+                               SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0);
 }
 
-static int sd_start_multi_rw(struct realtek_pci_sdmmc *host,
-               struct mmc_request *mrq)
+static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
 {
        struct rtsx_pcr *pcr = host->pcr;
        struct mmc_host *mmc = host->mmc;
        struct mmc_card *card = mmc->card;
        struct mmc_data *data = mrq->data;
        int uhs = mmc_card_uhs(card);
-       int read = data->flags & MMC_DATA_READ;
+       int read = (data->flags & MMC_DATA_READ) ? 1 : 0;
        u8 cfg2, trans_mode;
        int err;
        size_t data_len = data->blksz * data->blocks;
 
-       if (host->data)
-               dev_err(sdmmc_dev(host), "error: data already exist\n");
-
-       host->data = data;
-
        if (read) {
                cfg2 = SD_CALCULATE_CRC7 | SD_CHECK_CRC16 |
                        SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_0;
@@ -635,54 +410,15 @@ static int sd_start_multi_rw(struct realtek_pci_sdmmc *host,
        rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER,
                        SD_TRANSFER_END, SD_TRANSFER_END);
 
-       mod_timer(&host->timer, jiffies + 10 * HZ);
        rtsx_pci_send_cmd_no_wait(pcr);
 
-       err = rtsx_pci_dma_transfer(pcr, data->sg, host->sg_count, read);
-       if (err < 0) {
-               data->error = err;
-               tasklet_schedule(&host->finish_tasklet);
-       }
-       return 0;
-}
-
-static void sd_finish_multi_rw(unsigned long host_addr)
-{
-       struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr;
-       struct rtsx_pcr *pcr = host->pcr;
-       struct mmc_data *data;
-       int err = 0;
-       unsigned long flags;
-
-       spin_lock_irqsave(&host->lock, flags);
-
-       if (!host->data) {
-               dev_err(sdmmc_dev(host), "error: no data exist\n");
-               goto out;
-       }
-
-       data = host->data;
-       host->data = NULL;
-
-       if (pcr->trans_result == TRANS_NO_DEVICE)
-               err = -ENODEV;
-       else if (pcr->trans_result != TRANS_RESULT_OK)
-               err = -EINVAL;
-
+       err = rtsx_pci_transfer_data(pcr, data->sg, data->sg_len, read, 10000);
        if (err < 0) {
-               data->error = err;
-               goto out;
-       }
-
-       if (!host->mrq->sbc && data->stop) {
-               sd_send_cmd(host, data->stop);
-               spin_unlock_irqrestore(&host->lock, flags);
-               return;
+               sd_clear_error(host);
+               return err;
        }
 
-out:
-       tasklet_schedule(&host->finish_tasklet);
-       spin_unlock_irqrestore(&host->lock, flags);
+       return 0;
 }
 
 static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host)
@@ -901,13 +637,6 @@ static int sd_tuning_rx(struct realtek_pci_sdmmc *host, u8 opcode)
        return 0;
 }
 
-static inline bool sd_use_muti_rw(struct mmc_command *cmd)
-{
-       return mmc_op_multi(cmd->opcode) ||
-               (cmd->opcode == MMC_READ_SINGLE_BLOCK) ||
-               (cmd->opcode == MMC_WRITE_BLOCK);
-}
-
 static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
 {
        struct realtek_pci_sdmmc *host = mmc_priv(mmc);
@@ -916,14 +645,6 @@ static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
        struct mmc_data *data = mrq->data;
        unsigned int data_size = 0;
        int err;
-       unsigned long flags;
-
-       mutex_lock(&pcr->pcr_mutex);
-       spin_lock_irqsave(&host->lock, flags);
-
-       if (host->mrq)
-               dev_err(sdmmc_dev(host), "error: request already exist\n");
-       host->mrq = mrq;
 
        if (host->eject) {
                cmd->error = -ENOMEDIUM;
@@ -936,6 +657,8 @@ static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
                goto finish;
        }
 
+       mutex_lock(&pcr->pcr_mutex);
+
        rtsx_pci_start_run(pcr);
 
        rtsx_pci_switch_clock(pcr, host->clock, host->ssc_depth,
@@ -944,28 +667,46 @@ static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
        rtsx_pci_write_register(pcr, CARD_SHARE_MODE,
                        CARD_SHARE_MASK, CARD_SHARE_48_SD);
 
+       mutex_lock(&host->host_mutex);
+       host->mrq = mrq;
+       mutex_unlock(&host->host_mutex);
+
        if (mrq->data)
                data_size = data->blocks * data->blksz;
 
-       if (sd_use_muti_rw(cmd))
-               host->sg_count = sd_pre_dma_transfer(host, data, NULL);
+       if (!data_size || mmc_op_multi(cmd->opcode) ||
+                       (cmd->opcode == MMC_READ_SINGLE_BLOCK) ||
+                       (cmd->opcode == MMC_WRITE_BLOCK)) {
+               sd_send_cmd_get_rsp(host, cmd);
 
-       if (!data_size || sd_use_muti_rw(cmd)) {
-               if (mrq->sbc)
-                       sd_send_cmd(host, mrq->sbc);
-               else
-                       sd_send_cmd(host, cmd);
-               spin_unlock_irqrestore(&host->lock, flags);
+               if (!cmd->error && data_size) {
+                       sd_rw_multi(host, mrq);
+
+                       if (mmc_op_multi(cmd->opcode) && mrq->stop)
+                               sd_send_cmd_get_rsp(host, mrq->stop);
+               }
        } else {
-               spin_unlock_irqrestore(&host->lock, flags);
                sd_normal_rw(host, mrq);
-               tasklet_schedule(&host->finish_tasklet);
        }
-       return;
+
+       if (mrq->data) {
+               if (cmd->error || data->error)
+                       data->bytes_xfered = 0;
+               else
+                       data->bytes_xfered = data->blocks * data->blksz;
+       }
+
+       mutex_unlock(&pcr->pcr_mutex);
 
 finish:
-       tasklet_schedule(&host->finish_tasklet);
-       spin_unlock_irqrestore(&host->lock, flags);
+       if (cmd->error)
+               dev_dbg(sdmmc_dev(host), "cmd->error = %d\n", cmd->error);
+
+       mutex_lock(&host->host_mutex);
+       host->mrq = NULL;
+       mutex_unlock(&host->host_mutex);
+
+       mmc_request_done(mmc, mrq);
 }
 
 static int sd_set_bus_width(struct realtek_pci_sdmmc *host,
@@ -1400,8 +1141,6 @@ out:
 }
 
 static const struct mmc_host_ops realtek_pci_sdmmc_ops = {
-       .pre_req = sdmmc_pre_req,
-       .post_req = sdmmc_post_req,
        .request = sdmmc_request,
        .set_ios = sdmmc_set_ios,
        .get_ro = sdmmc_get_ro,
@@ -1465,7 +1204,6 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
        struct realtek_pci_sdmmc *host;
        struct rtsx_pcr *pcr;
        struct pcr_handle *handle = pdev->dev.platform_data;
-       unsigned long host_addr;
 
        if (!handle)
                return -ENXIO;
@@ -1489,15 +1227,8 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
        pcr->slots[RTSX_SD_CARD].p_dev = pdev;
        pcr->slots[RTSX_SD_CARD].card_event = rtsx_pci_sdmmc_card_event;
 
-       host_addr = (unsigned long)host;
-       host->next_data.cookie = 1;
-       setup_timer(&host->timer, sd_request_timeout, host_addr);
-       tasklet_init(&host->cmd_tasklet, sd_get_rsp, host_addr);
-       tasklet_init(&host->data_tasklet, sd_finish_multi_rw, host_addr);
-       tasklet_init(&host->finish_tasklet, sd_finish_request, host_addr);
-       spin_lock_init(&host->lock);
+       mutex_init(&host->host_mutex);
 
-       pcr->slots[RTSX_SD_CARD].done_transfer = sd_isr_done_transfer;
        realtek_init_host(host);
 
        mmc_add_host(mmc);
@@ -1510,8 +1241,6 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev)
        struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev);
        struct rtsx_pcr *pcr;
        struct mmc_host *mmc;
-       struct mmc_request *mrq;
-       unsigned long flags;
 
        if (!host)
                return 0;
@@ -1519,33 +1248,22 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev)
        pcr = host->pcr;
        pcr->slots[RTSX_SD_CARD].p_dev = NULL;
        pcr->slots[RTSX_SD_CARD].card_event = NULL;
-       pcr->slots[RTSX_SD_CARD].done_transfer = NULL;
        mmc = host->mmc;
-       mrq = host->mrq;
 
-       spin_lock_irqsave(&host->lock, flags);
+       mutex_lock(&host->host_mutex);
        if (host->mrq) {
                dev_dbg(&(pdev->dev),
                        "%s: Controller removed during transfer\n",
                        mmc_hostname(mmc));
 
-               if (mrq->sbc)
-                       mrq->sbc->error = -ENOMEDIUM;
-               if (mrq->cmd)
-                       mrq->cmd->error = -ENOMEDIUM;
-               if (mrq->stop)
-                       mrq->stop->error = -ENOMEDIUM;
-               if (mrq->data)
-                       mrq->data->error = -ENOMEDIUM;
+               rtsx_pci_complete_unfinished_transfer(pcr);
 
-               tasklet_schedule(&host->finish_tasklet);
+               host->mrq->cmd->error = -ENOMEDIUM;
+               if (host->mrq->stop)
+                       host->mrq->stop->error = -ENOMEDIUM;
+               mmc_request_done(mmc, host->mrq);
        }
-       spin_unlock_irqrestore(&host->lock, flags);
-
-       del_timer_sync(&host->timer);
-       tasklet_kill(&host->cmd_tasklet);
-       tasklet_kill(&host->data_tasklet);
-       tasklet_kill(&host->finish_tasklet);
+       mutex_unlock(&host->host_mutex);
 
        mmc_remove_host(mmc);
        host->eject = true;
index 4615d79fc93f795c869687117744e01653219478..b922c8efcf4012376548c15e4a3af50562d47095 100644 (file)
@@ -523,6 +523,7 @@ static struct nand_ecclayout hwecc4_2048 = {
 #if defined(CONFIG_OF)
 static const struct of_device_id davinci_nand_of_match[] = {
        {.compatible = "ti,davinci-nand", },
+       {.compatible = "ti,keystone-nand", },
        {},
 };
 MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
@@ -581,6 +582,11 @@ static struct davinci_nand_pdata
                    of_property_read_bool(pdev->dev.of_node,
                        "ti,davinci-nand-use-bbt"))
                        pdata->bbt_options = NAND_BBT_USE_FLASH;
+
+               if (of_device_is_compatible(pdev->dev.of_node,
+                                           "ti,keystone-nand")) {
+                       pdata->options |= NAND_NO_SUBPAGE_WRITE;
+               }
        }
 
        return dev_get_platdata(&pdev->dev);
index 7ff473c871a9a249bd02007c2176ec5c471d7626..8d659e6a1b4c0899e32706b8bfa7fe3270ff715a 100644 (file)
@@ -431,7 +431,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
         * Create one workqueue per volume (per registered block device).
         * Rembember workqueues are cheap, they're not threads.
         */
-       dev->wq = alloc_workqueue(gd->disk_name, 0, 0);
+       dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
        if (!dev->wq)
                goto out_free_queue;
        INIT_WORK(&dev->work, ubiblock_do_work);
index 02317c1c02385914c94175fa8757089c677e2b94..0f3425dac91046300f93587d4f341e080c98e322 100644 (file)
@@ -671,6 +671,8 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
 
        e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
        self_check_in_wl_tree(ubi, e, &ubi->free);
+       ubi->free_count--;
+       ubi_assert(ubi->free_count >= 0);
        rb_erase(&e->u.rb, &ubi->free);
 
        return e;
@@ -684,6 +686,9 @@ int ubi_wl_get_peb(struct ubi_device *ubi)
        peb = __wl_get_peb(ubi);
        spin_unlock(&ubi->wl_lock);
 
+       if (peb < 0)
+               return peb;
+
        err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset,
                                    ubi->peb_size - ubi->vid_hdr_aloffset);
        if (err) {
@@ -1068,6 +1073,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
 
                        /* Give the unused PEB back */
                        wl_tree_add(e2, &ubi->free);
+                       ubi->free_count++;
                        goto out_cancel;
                }
                self_check_in_wl_tree(ubi, e1, &ubi->used);
index b667a51ed21517a3ee6cf2be6ab4c7e306a713a2..0dfeaf5da3f2c914e2fd43e40d4bf4f3d8aff079 100644 (file)
@@ -157,7 +157,7 @@ static inline struct aggregator *__get_first_agg(struct port *port)
 
        rcu_read_lock();
        first_slave = bond_first_slave_rcu(bond);
-       agg = first_slave ? &(SLAVE_AD_INFO(first_slave).aggregator) : NULL;
+       agg = first_slave ? &(SLAVE_AD_INFO(first_slave)->aggregator) : NULL;
        rcu_read_unlock();
 
        return agg;
@@ -192,7 +192,7 @@ static inline void __enable_port(struct port *port)
 {
        struct slave *slave = port->slave;
 
-       if ((slave->link == BOND_LINK_UP) && IS_UP(slave->dev))
+       if ((slave->link == BOND_LINK_UP) && bond_slave_is_up(slave))
                bond_set_slave_active_flags(slave, BOND_SLAVE_NOTIFY_LATER);
 }
 
@@ -241,7 +241,7 @@ static inline int __check_agg_selection_timer(struct port *port)
  */
 static inline void __get_state_machine_lock(struct port *port)
 {
-       spin_lock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
+       spin_lock_bh(&(SLAVE_AD_INFO(port->slave)->state_machine_lock));
 }
 
 /**
@@ -250,7 +250,7 @@ static inline void __get_state_machine_lock(struct port *port)
  */
 static inline void __release_state_machine_lock(struct port *port)
 {
-       spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
+       spin_unlock_bh(&(SLAVE_AD_INFO(port->slave)->state_machine_lock));
 }
 
 /**
@@ -350,7 +350,7 @@ static u8 __get_duplex(struct port *port)
 static inline void __initialize_port_locks(struct slave *slave)
 {
        /* make sure it isn't called twice */
-       spin_lock_init(&(SLAVE_AD_INFO(slave).state_machine_lock));
+       spin_lock_init(&(SLAVE_AD_INFO(slave)->state_machine_lock));
 }
 
 /* Conversions */
@@ -688,8 +688,8 @@ static struct aggregator *__get_active_agg(struct aggregator *aggregator)
        struct slave *slave;
 
        bond_for_each_slave_rcu(bond, slave, iter)
-               if (SLAVE_AD_INFO(slave).aggregator.is_active)
-                       return &(SLAVE_AD_INFO(slave).aggregator);
+               if (SLAVE_AD_INFO(slave)->aggregator.is_active)
+                       return &(SLAVE_AD_INFO(slave)->aggregator);
 
        return NULL;
 }
@@ -1293,7 +1293,7 @@ static void ad_port_selection_logic(struct port *port)
        }
        /* search on all aggregators for a suitable aggregator for this port */
        bond_for_each_slave(bond, slave, iter) {
-               aggregator = &(SLAVE_AD_INFO(slave).aggregator);
+               aggregator = &(SLAVE_AD_INFO(slave)->aggregator);
 
                /* keep a free aggregator for later use(if needed) */
                if (!aggregator->lag_ports) {
@@ -1504,7 +1504,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
        best = (active && agg_device_up(active)) ? active : NULL;
 
        bond_for_each_slave_rcu(bond, slave, iter) {
-               agg = &(SLAVE_AD_INFO(slave).aggregator);
+               agg = &(SLAVE_AD_INFO(slave)->aggregator);
 
                agg->is_active = 0;
 
@@ -1549,7 +1549,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
                         best->slave ? best->slave->dev->name : "NULL");
 
                bond_for_each_slave_rcu(bond, slave, iter) {
-                       agg = &(SLAVE_AD_INFO(slave).aggregator);
+                       agg = &(SLAVE_AD_INFO(slave)->aggregator);
 
                        pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
                                 agg->aggregator_identifier, agg->num_of_ports,
@@ -1840,16 +1840,16 @@ void bond_3ad_bind_slave(struct slave *slave)
        struct aggregator *aggregator;
 
        /* check that the slave has not been initialized yet. */
-       if (SLAVE_AD_INFO(slave).port.slave != slave) {
+       if (SLAVE_AD_INFO(slave)->port.slave != slave) {
 
                /* port initialization */
-               port = &(SLAVE_AD_INFO(slave).port);
+               port = &(SLAVE_AD_INFO(slave)->port);
 
                ad_initialize_port(port, bond->params.lacp_fast);
 
                __initialize_port_locks(slave);
                port->slave = slave;
-               port->actor_port_number = SLAVE_AD_INFO(slave).id;
+               port->actor_port_number = SLAVE_AD_INFO(slave)->id;
                /* key is determined according to the link speed, duplex and user key(which
                 * is yet not supported)
                 */
@@ -1874,7 +1874,7 @@ void bond_3ad_bind_slave(struct slave *slave)
                __disable_port(port);
 
                /* aggregator initialization */
-               aggregator = &(SLAVE_AD_INFO(slave).aggregator);
+               aggregator = &(SLAVE_AD_INFO(slave)->aggregator);
 
                ad_initialize_agg(aggregator);
 
@@ -1903,8 +1903,8 @@ void bond_3ad_unbind_slave(struct slave *slave)
        struct slave *slave_iter;
        struct list_head *iter;
 
-       aggregator = &(SLAVE_AD_INFO(slave).aggregator);
-       port = &(SLAVE_AD_INFO(slave).port);
+       aggregator = &(SLAVE_AD_INFO(slave)->aggregator);
+       port = &(SLAVE_AD_INFO(slave)->port);
 
        /* if slave is null, the whole port is not initialized */
        if (!port->slave) {
@@ -1932,7 +1932,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
                    (aggregator->lag_ports->next_port_in_aggregator)) {
                        /* find new aggregator for the related port(s) */
                        bond_for_each_slave(bond, slave_iter, iter) {
-                               new_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator);
+                               new_aggregator = &(SLAVE_AD_INFO(slave_iter)->aggregator);
                                /* if the new aggregator is empty, or it is
                                 * connected to our port only
                                 */
@@ -2010,7 +2010,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
 
        /* find the aggregator that this port is connected to */
        bond_for_each_slave(bond, slave_iter, iter) {
-               temp_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator);
+               temp_aggregator = &(SLAVE_AD_INFO(slave_iter)->aggregator);
                prev_port = NULL;
                /* search the port in the aggregator's related ports */
                for (temp_port = temp_aggregator->lag_ports; temp_port;
@@ -2076,7 +2076,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
        if (BOND_AD_INFO(bond).agg_select_timer &&
            !(--BOND_AD_INFO(bond).agg_select_timer)) {
                slave = bond_first_slave_rcu(bond);
-               port = slave ? &(SLAVE_AD_INFO(slave).port) : NULL;
+               port = slave ? &(SLAVE_AD_INFO(slave)->port) : NULL;
 
                /* select the active aggregator for the bond */
                if (port) {
@@ -2094,7 +2094,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
 
        /* for each port run the state machines */
        bond_for_each_slave_rcu(bond, slave, iter) {
-               port = &(SLAVE_AD_INFO(slave).port);
+               port = &(SLAVE_AD_INFO(slave)->port);
                if (!port->slave) {
                        pr_warn_ratelimited("%s: Warning: Found an uninitialized port\n",
                                            bond->dev->name);
@@ -2155,7 +2155,7 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave,
 
        if (length >= sizeof(struct lacpdu)) {
 
-               port = &(SLAVE_AD_INFO(slave).port);
+               port = &(SLAVE_AD_INFO(slave)->port);
 
                if (!port->slave) {
                        pr_warn_ratelimited("%s: Warning: port of slave %s is uninitialized\n",
@@ -2212,7 +2212,7 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
 {
        struct port *port;
 
-       port = &(SLAVE_AD_INFO(slave).port);
+       port = &(SLAVE_AD_INFO(slave)->port);
 
        /* if slave is null, the whole port is not initialized */
        if (!port->slave) {
@@ -2245,7 +2245,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
 {
        struct port *port;
 
-       port = &(SLAVE_AD_INFO(slave).port);
+       port = &(SLAVE_AD_INFO(slave)->port);
 
        /* if slave is null, the whole port is not initialized */
        if (!port->slave) {
@@ -2279,7 +2279,7 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
 {
        struct port *port;
 
-       port = &(SLAVE_AD_INFO(slave).port);
+       port = &(SLAVE_AD_INFO(slave)->port);
 
        /* if slave is null, the whole port is not initialized */
        if (!port->slave) {
@@ -2347,7 +2347,7 @@ int bond_3ad_set_carrier(struct bonding *bond)
                ret = 0;
                goto out;
        }
-       active = __get_active_agg(&(SLAVE_AD_INFO(first_slave).aggregator));
+       active = __get_active_agg(&(SLAVE_AD_INFO(first_slave)->aggregator));
        if (active) {
                /* are enough slaves available to consider link up? */
                if (active->num_of_ports < bond->params.min_links) {
@@ -2384,7 +2384,7 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond,
        struct port *port;
 
        bond_for_each_slave_rcu(bond, slave, iter) {
-               port = &(SLAVE_AD_INFO(slave).port);
+               port = &(SLAVE_AD_INFO(slave)->port);
                if (port->aggregator && port->aggregator->is_active) {
                        aggregator = port->aggregator;
                        break;
@@ -2440,22 +2440,22 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
                goto err_free;
        }
 
-       slave_agg_no = bond_xmit_hash(bond, skb, slaves_in_agg);
+       slave_agg_no = bond_xmit_hash(bond, skb) % slaves_in_agg;
        first_ok_slave = NULL;
 
        bond_for_each_slave_rcu(bond, slave, iter) {
-               agg = SLAVE_AD_INFO(slave).port.aggregator;
+               agg = SLAVE_AD_INFO(slave)->port.aggregator;
                if (!agg || agg->aggregator_identifier != agg_id)
                        continue;
 
                if (slave_agg_no >= 0) {
-                       if (!first_ok_slave && SLAVE_IS_OK(slave))
+                       if (!first_ok_slave && bond_slave_can_tx(slave))
                                first_ok_slave = slave;
                        slave_agg_no--;
                        continue;
                }
 
-               if (SLAVE_IS_OK(slave)) {
+               if (bond_slave_can_tx(slave)) {
                        bond_dev_queue_xmit(bond, skb, slave->dev);
                        goto out;
                }
@@ -2522,7 +2522,7 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
 
        lacp_fast = bond->params.lacp_fast;
        bond_for_each_slave(bond, slave, iter) {
-               port = &(SLAVE_AD_INFO(slave).port);
+               port = &(SLAVE_AD_INFO(slave)->port);
                __get_state_machine_lock(port);
                if (lacp_fast)
                        port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT;
index 9f69e818b0009db7881b3f8c862393836e5a604b..7bbbf1ca08873ea51cec5c92a1072d8fe543251a 100644 (file)
@@ -82,7 +82,8 @@ static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb)
 }
 
 /* Forward declaration */
-static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]);
+static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
+                                     bool strict_match);
 static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp);
 static void rlb_src_unlink(struct bonding *bond, u32 index);
 static void rlb_src_link(struct bonding *bond, u32 ip_src_hash,
@@ -228,7 +229,7 @@ static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
 
        /* Find the slave with the largest gap */
        bond_for_each_slave_rcu(bond, slave, iter) {
-               if (SLAVE_IS_OK(slave)) {
+               if (bond_slave_can_tx(slave)) {
                        long long gap = compute_gap(slave);
 
                        if (max_gap < gap) {
@@ -383,7 +384,7 @@ static struct slave *rlb_next_rx_slave(struct bonding *bond)
        bool found = false;
 
        bond_for_each_slave(bond, slave, iter) {
-               if (!SLAVE_IS_OK(slave))
+               if (!bond_slave_can_tx(slave))
                        continue;
                if (!found) {
                        if (!before || before->speed < slave->speed)
@@ -416,7 +417,7 @@ static struct slave *__rlb_next_rx_slave(struct bonding *bond)
        bool found = false;
 
        bond_for_each_slave_rcu(bond, slave, iter) {
-               if (!SLAVE_IS_OK(slave))
+               if (!bond_slave_can_tx(slave))
                        continue;
                if (!found) {
                        if (!before || before->speed < slave->speed)
@@ -459,7 +460,7 @@ static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
 
        bond->alb_info.rlb_promisc_timeout_counter = 0;
 
-       alb_send_learning_packets(bond->curr_active_slave, addr);
+       alb_send_learning_packets(bond->curr_active_slave, addr, true);
 }
 
 /* slave being removed should not be active at this point
@@ -995,7 +996,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
 /*********************** tlb/rlb shared functions *********************/
 
 static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
-                           u16 vid)
+                           __be16 vlan_proto, u16 vid)
 {
        struct learning_pkt pkt;
        struct sk_buff *skb;
@@ -1021,7 +1022,7 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
        skb->dev = slave->dev;
 
        if (vid) {
-               skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vid);
+               skb = vlan_put_tag(skb, vlan_proto, vid);
                if (!skb) {
                        pr_err("%s: Error: failed to insert VLAN tag\n",
                               slave->bond->dev->name);
@@ -1032,22 +1033,32 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
        dev_queue_xmit(skb);
 }
 
-
-static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
+static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
+                                     bool strict_match)
 {
        struct bonding *bond = bond_get_bond_by_slave(slave);
        struct net_device *upper;
        struct list_head *iter;
 
        /* send untagged */
-       alb_send_lp_vid(slave, mac_addr, 0);
+       alb_send_lp_vid(slave, mac_addr, 0, 0);
 
        /* loop through vlans and send one packet for each */
        rcu_read_lock();
        netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
-               if (upper->priv_flags & IFF_802_1Q_VLAN)
-                       alb_send_lp_vid(slave, mac_addr,
-                                       vlan_dev_vlan_id(upper));
+               if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) {
+                       if (strict_match &&
+                           ether_addr_equal_64bits(mac_addr,
+                                                   upper->dev_addr)) {
+                               alb_send_lp_vid(slave, mac_addr,
+                                               vlan_dev_vlan_proto(upper),
+                                               vlan_dev_vlan_id(upper));
+                       } else if (!strict_match) {
+                               alb_send_lp_vid(slave, upper->dev_addr,
+                                               vlan_dev_vlan_proto(upper),
+                                               vlan_dev_vlan_id(upper));
+                       }
+               }
        }
        rcu_read_unlock();
 }
@@ -1057,7 +1068,7 @@ static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[])
        struct net_device *dev = slave->dev;
        struct sockaddr s_addr;
 
-       if (slave->bond->params.mode == BOND_MODE_TLB) {
+       if (BOND_MODE(slave->bond) == BOND_MODE_TLB) {
                memcpy(dev->dev_addr, addr, dev->addr_len);
                return 0;
        }
@@ -1100,14 +1111,14 @@ static void alb_swap_mac_addr(struct slave *slave1, struct slave *slave2)
 static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
                                struct slave *slave2)
 {
-       int slaves_state_differ = (SLAVE_IS_OK(slave1) != SLAVE_IS_OK(slave2));
+       int slaves_state_differ = (bond_slave_can_tx(slave1) != bond_slave_can_tx(slave2));
        struct slave *disabled_slave = NULL;
 
        ASSERT_RTNL();
 
        /* fasten the change in the switch */
-       if (SLAVE_IS_OK(slave1)) {
-               alb_send_learning_packets(slave1, slave1->dev->dev_addr);
+       if (bond_slave_can_tx(slave1)) {
+               alb_send_learning_packets(slave1, slave1->dev->dev_addr, false);
                if (bond->alb_info.rlb_enabled) {
                        /* inform the clients that the mac address
                         * has changed
@@ -1118,8 +1129,8 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
                disabled_slave = slave1;
        }
 
-       if (SLAVE_IS_OK(slave2)) {
-               alb_send_learning_packets(slave2, slave2->dev->dev_addr);
+       if (bond_slave_can_tx(slave2)) {
+               alb_send_learning_packets(slave2, slave2->dev->dev_addr, false);
                if (bond->alb_info.rlb_enabled) {
                        /* inform the clients that the mac address
                         * has changed
@@ -1347,6 +1358,77 @@ void bond_alb_deinitialize(struct bonding *bond)
                rlb_deinitialize(bond);
 }
 
+static int bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond,
+               struct slave *tx_slave)
+{
+       struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+       struct ethhdr *eth_data = eth_hdr(skb);
+
+       if (!tx_slave) {
+               /* unbalanced or unassigned, send through primary */
+               tx_slave = rcu_dereference(bond->curr_active_slave);
+               if (bond->params.tlb_dynamic_lb)
+                       bond_info->unbalanced_load += skb->len;
+       }
+
+       if (tx_slave && bond_slave_can_tx(tx_slave)) {
+               if (tx_slave != rcu_dereference(bond->curr_active_slave)) {
+                       ether_addr_copy(eth_data->h_source,
+                                       tx_slave->dev->dev_addr);
+               }
+
+               bond_dev_queue_xmit(bond, skb, tx_slave->dev);
+               goto out;
+       }
+
+       if (tx_slave && bond->params.tlb_dynamic_lb) {
+               _lock_tx_hashtbl(bond);
+               __tlb_clear_slave(bond, tx_slave, 0);
+               _unlock_tx_hashtbl(bond);
+       }
+
+       /* no suitable interface, frame not sent */
+       dev_kfree_skb_any(skb);
+out:
+       return NETDEV_TX_OK;
+}
+
+int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
+{
+       struct bonding *bond = netdev_priv(bond_dev);
+       struct ethhdr *eth_data;
+       struct slave *tx_slave = NULL;
+       u32 hash_index;
+
+       skb_reset_mac_header(skb);
+       eth_data = eth_hdr(skb);
+
+       /* Do not TX balance any multicast or broadcast */
+       if (!is_multicast_ether_addr(eth_data->h_dest)) {
+               switch (skb->protocol) {
+               case htons(ETH_P_IP):
+               case htons(ETH_P_IPX):
+                   /* In case of IPX, it will falback to L2 hash */
+               case htons(ETH_P_IPV6):
+                       hash_index = bond_xmit_hash(bond, skb);
+                       if (bond->params.tlb_dynamic_lb) {
+                               tx_slave = tlb_choose_channel(bond,
+                                                             hash_index & 0xFF,
+                                                             skb->len);
+                       } else {
+                               struct list_head *iter;
+                               int idx = hash_index % bond->slave_cnt;
+
+                               bond_for_each_slave_rcu(bond, tx_slave, iter)
+                                       if (--idx < 0)
+                                               break;
+                       }
+                       break;
+               }
+       }
+       return bond_do_alb_xmit(skb, bond, tx_slave);
+}
+
 int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
@@ -1355,7 +1437,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
        struct slave *tx_slave = NULL;
        static const __be32 ip_bcast = htonl(0xffffffff);
        int hash_size = 0;
-       int do_tx_balance = 1;
+       bool do_tx_balance = true;
        u32 hash_index = 0;
        const u8 *hash_start = NULL;
        struct ipv6hdr *ip6hdr;
@@ -1370,7 +1452,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
                if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast) ||
                    (iph->daddr == ip_bcast) ||
                    (iph->protocol == IPPROTO_IGMP)) {
-                       do_tx_balance = 0;
+                       do_tx_balance = false;
                        break;
                }
                hash_start = (char *)&(iph->daddr);
@@ -1382,7 +1464,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
                 * that here just in case.
                 */
                if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast)) {
-                       do_tx_balance = 0;
+                       do_tx_balance = false;
                        break;
                }
 
@@ -1390,7 +1472,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
                 * broadcasts in IPv4.
                 */
                if (ether_addr_equal_64bits(eth_data->h_dest, mac_v6_allmcast)) {
-                       do_tx_balance = 0;
+                       do_tx_balance = false;
                        break;
                }
 
@@ -1400,7 +1482,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
                 */
                ip6hdr = ipv6_hdr(skb);
                if (ipv6_addr_any(&ip6hdr->saddr)) {
-                       do_tx_balance = 0;
+                       do_tx_balance = false;
                        break;
                }
 
@@ -1410,7 +1492,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
        case ETH_P_IPX:
                if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) {
                        /* something is wrong with this packet */
-                       do_tx_balance = 0;
+                       do_tx_balance = false;
                        break;
                }
 
@@ -1419,7 +1501,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
                         * this family since it has an "ARP" like
                         * mechanism
                         */
-                       do_tx_balance = 0;
+                       do_tx_balance = false;
                        break;
                }
 
@@ -1427,12 +1509,12 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
                hash_size = ETH_ALEN;
                break;
        case ETH_P_ARP:
-               do_tx_balance = 0;
+               do_tx_balance = false;
                if (bond_info->rlb_enabled)
                        tx_slave = rlb_arp_xmit(skb, bond);
                break;
        default:
-               do_tx_balance = 0;
+               do_tx_balance = false;
                break;
        }
 
@@ -1441,32 +1523,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
                tx_slave = tlb_choose_channel(bond, hash_index, skb->len);
        }
 
-       if (!tx_slave) {
-               /* unbalanced or unassigned, send through primary */
-               tx_slave = rcu_dereference(bond->curr_active_slave);
-               bond_info->unbalanced_load += skb->len;
-       }
-
-       if (tx_slave && SLAVE_IS_OK(tx_slave)) {
-               if (tx_slave != rcu_dereference(bond->curr_active_slave)) {
-                       ether_addr_copy(eth_data->h_source,
-                                       tx_slave->dev->dev_addr);
-               }
-
-               bond_dev_queue_xmit(bond, skb, tx_slave->dev);
-               goto out;
-       }
-
-       if (tx_slave) {
-               _lock_tx_hashtbl(bond);
-               __tlb_clear_slave(bond, tx_slave, 0);
-               _unlock_tx_hashtbl(bond);
-       }
-
-       /* no suitable interface, frame not sent */
-       dev_kfree_skb_any(skb);
-out:
-       return NETDEV_TX_OK;
+       return bond_do_alb_xmit(skb, bond, tx_slave);
 }
 
 void bond_alb_monitor(struct work_struct *work)
@@ -1490,6 +1547,8 @@ void bond_alb_monitor(struct work_struct *work)
 
        /* send learning packets */
        if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) {
+               bool strict_match;
+
                /* change of curr_active_slave involves swapping of mac addresses.
                 * in order to avoid this swapping from happening while
                 * sending the learning packets, the curr_slave_lock must be held for
@@ -1497,8 +1556,15 @@ void bond_alb_monitor(struct work_struct *work)
                 */
                read_lock(&bond->curr_slave_lock);
 
-               bond_for_each_slave_rcu(bond, slave, iter)
-                       alb_send_learning_packets(slave, slave->dev->dev_addr);
+               bond_for_each_slave_rcu(bond, slave, iter) {
+                       /* If updating current_active, use all currently
+                        * user mac addreses (!strict_match).  Otherwise, only
+                        * use mac of the slave device.
+                        */
+                       strict_match = (slave != bond->curr_active_slave);
+                       alb_send_learning_packets(slave, slave->dev->dev_addr,
+                                                 strict_match);
+               }
 
                read_unlock(&bond->curr_slave_lock);
 
@@ -1699,7 +1765,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
        /* in TLB mode, the slave might flip down/up with the old dev_addr,
         * and thus filter bond->dev_addr's packets, so force bond's mac
         */
-       if (bond->params.mode == BOND_MODE_TLB) {
+       if (BOND_MODE(bond) == BOND_MODE_TLB) {
                struct sockaddr sa;
                u8 tmp_addr[ETH_ALEN];
 
@@ -1721,7 +1787,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
        } else {
                /* set the new_slave to the bond mac address */
                alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
-               alb_send_learning_packets(new_slave, bond->dev->dev_addr);
+               alb_send_learning_packets(new_slave, bond->dev->dev_addr,
+                                         false);
        }
 
        write_lock_bh(&bond->curr_slave_lock);
@@ -1764,7 +1831,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
                alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr);
 
                read_lock(&bond->lock);
-               alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr);
+               alb_send_learning_packets(bond->curr_active_slave,
+                                         bond_dev->dev_addr, false);
                if (bond->alb_info.rlb_enabled) {
                        /* inform clients mac address has changed */
                        rlb_req_update_slave_clients(bond, bond->curr_active_slave);
index e09dd4bfafffcf585b8f853f7661e2e416c58602..5fc76c01636cb6eb0e9e96d14fc0c79566741900 100644 (file)
@@ -175,6 +175,7 @@ void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave);
 void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link);
 void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave);
 int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
+int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
 void bond_alb_monitor(struct work_struct *);
 int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr);
 void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id);
index 2d3f7fa541ffe755fc1bf5f9e51aeaa464b4e032..658e761c4568dff39ef18db8c548f8ed349c8ee0 100644 (file)
@@ -23,7 +23,7 @@ static int bond_debug_rlb_hash_show(struct seq_file *m, void *v)
        struct rlb_client_info *client_info;
        u32 hash_index;
 
-       if (bond->params.mode != BOND_MODE_ALB)
+       if (BOND_MODE(bond) != BOND_MODE_ALB)
                return 0;
 
        seq_printf(m, "SourceIP        DestinationIP   "
index 69aff72c895716fe6c579d2bf7f46c79ddca2a36..59a12c61ceb4eeb758c393126b56004a57a12869 100644 (file)
@@ -343,7 +343,7 @@ static int bond_set_carrier(struct bonding *bond)
        if (!bond_has_slaves(bond))
                goto down;
 
-       if (bond->params.mode == BOND_MODE_8023AD)
+       if (BOND_MODE(bond) == BOND_MODE_8023AD)
                return bond_3ad_set_carrier(bond);
 
        bond_for_each_slave(bond, slave, iter) {
@@ -497,7 +497,7 @@ static int bond_set_promiscuity(struct bonding *bond, int inc)
        struct list_head *iter;
        int err = 0;
 
-       if (USES_PRIMARY(bond->params.mode)) {
+       if (bond_uses_primary(bond)) {
                /* write lock already acquired */
                if (bond->curr_active_slave) {
                        err = dev_set_promiscuity(bond->curr_active_slave->dev,
@@ -523,7 +523,7 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
        struct list_head *iter;
        int err = 0;
 
-       if (USES_PRIMARY(bond->params.mode)) {
+       if (bond_uses_primary(bond)) {
                /* write lock already acquired */
                if (bond->curr_active_slave) {
                        err = dev_set_allmulti(bond->curr_active_slave->dev,
@@ -574,7 +574,7 @@ static void bond_hw_addr_flush(struct net_device *bond_dev,
        dev_uc_unsync(slave_dev, bond_dev);
        dev_mc_unsync(slave_dev, bond_dev);
 
-       if (bond->params.mode == BOND_MODE_8023AD) {
+       if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                /* del lacpdu mc addr from mc list */
                u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
 
@@ -585,8 +585,8 @@ static void bond_hw_addr_flush(struct net_device *bond_dev,
 /*--------------------------- Active slave change ---------------------------*/
 
 /* Update the hardware address list and promisc/allmulti for the new and
- * old active slaves (if any).  Modes that are !USES_PRIMARY keep all
- * slaves up date at all times; only the USES_PRIMARY modes need to call
+ * old active slaves (if any).  Modes that are not using primary keep all
+ * slaves up date at all times; only the modes that use primary need to call
  * this function to swap these settings during a failover.
  */
 static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
@@ -747,7 +747,7 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
        bond_for_each_slave(bond, slave, iter) {
                if (slave->link == BOND_LINK_UP)
                        return slave;
-               if (slave->link == BOND_LINK_BACK && IS_UP(slave->dev) &&
+               if (slave->link == BOND_LINK_BACK && bond_slave_is_up(slave) &&
                    slave->delay < mintime) {
                        mintime = slave->delay;
                        bestslave = slave;
@@ -801,7 +801,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
                new_active->last_link_up = jiffies;
 
                if (new_active->link == BOND_LINK_BACK) {
-                       if (USES_PRIMARY(bond->params.mode)) {
+                       if (bond_uses_primary(bond)) {
                                pr_info("%s: making interface %s the new active one %d ms earlier\n",
                                        bond->dev->name, new_active->dev->name,
                                        (bond->params.updelay - new_active->delay) * bond->params.miimon);
@@ -810,20 +810,20 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
                        new_active->delay = 0;
                        new_active->link = BOND_LINK_UP;
 
-                       if (bond->params.mode == BOND_MODE_8023AD)
+                       if (BOND_MODE(bond) == BOND_MODE_8023AD)
                                bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
 
                        if (bond_is_lb(bond))
                                bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
                } else {
-                       if (USES_PRIMARY(bond->params.mode)) {
+                       if (bond_uses_primary(bond)) {
                                pr_info("%s: making interface %s the new active one\n",
                                        bond->dev->name, new_active->dev->name);
                        }
                }
        }
 
-       if (USES_PRIMARY(bond->params.mode))
+       if (bond_uses_primary(bond))
                bond_hw_addr_swap(bond, new_active, old_active);
 
        if (bond_is_lb(bond)) {
@@ -838,7 +838,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
                rcu_assign_pointer(bond->curr_active_slave, new_active);
        }
 
-       if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
+       if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
                if (old_active)
                        bond_set_slave_inactive_flags(old_active,
                                                      BOND_SLAVE_NOTIFY_NOW);
@@ -876,8 +876,8 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
         * resend only if bond is brought up with the affected
         * bonding modes and the retransmission is enabled */
        if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) &&
-           ((USES_PRIMARY(bond->params.mode) && new_active) ||
-            bond->params.mode == BOND_MODE_ROUNDROBIN)) {
+           ((bond_uses_primary(bond) && new_active) ||
+            BOND_MODE(bond) == BOND_MODE_ROUNDROBIN)) {
                bond->igmp_retrans = bond->params.resend_igmp;
                queue_delayed_work(bond->wq, &bond->mcast_work, 1);
        }
@@ -958,7 +958,7 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev)
        struct slave *slave;
 
        bond_for_each_slave(bond, slave, iter)
-               if (IS_UP(slave->dev))
+               if (bond_slave_is_up(slave))
                        slave_disable_netpoll(slave);
 }
 
@@ -1038,6 +1038,7 @@ static void bond_compute_features(struct bonding *bond)
 
        if (!bond_has_slaves(bond))
                goto done;
+       vlan_features &= NETIF_F_ALL_FOR_ALL;
 
        bond_for_each_slave(bond, slave, iter) {
                vlan_features = netdev_increment_features(vlan_features,
@@ -1084,7 +1085,7 @@ static bool bond_should_deliver_exact_match(struct sk_buff *skb,
                                            struct bonding *bond)
 {
        if (bond_is_slave_inactive(slave)) {
-               if (bond->params.mode == BOND_MODE_ALB &&
+               if (BOND_MODE(bond) == BOND_MODE_ALB &&
                    skb->pkt_type != PACKET_BROADCAST &&
                    skb->pkt_type != PACKET_MULTICAST)
                        return false;
@@ -1126,7 +1127,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
 
        skb->dev = bond->dev;
 
-       if (bond->params.mode == BOND_MODE_ALB &&
+       if (BOND_MODE(bond) == BOND_MODE_ALB &&
            bond->dev->priv_flags & IFF_BRIDGE_PORT &&
            skb->pkt_type == PACKET_HOST) {
 
@@ -1163,6 +1164,35 @@ static void bond_upper_dev_unlink(struct net_device *bond_dev,
        rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL);
 }
 
+static struct slave *bond_alloc_slave(struct bonding *bond)
+{
+       struct slave *slave = NULL;
+
+       slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
+       if (!slave)
+               return NULL;
+
+       if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+               SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info),
+                                              GFP_KERNEL);
+               if (!SLAVE_AD_INFO(slave)) {
+                       kfree(slave);
+                       return NULL;
+               }
+       }
+       return slave;
+}
+
+static void bond_free_slave(struct slave *slave)
+{
+       struct bonding *bond = bond_get_bond_by_slave(slave);
+
+       if (BOND_MODE(bond) == BOND_MODE_8023AD)
+               kfree(SLAVE_AD_INFO(slave));
+
+       kfree(slave);
+}
+
 /* enslave device <slave> to bond device <master> */
 int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 {
@@ -1269,7 +1299,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                if (!bond_has_slaves(bond)) {
                        pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address\n",
                                bond_dev->name);
-                       if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
+                       if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
                                bond->params.fail_over_mac = BOND_FOM_ACTIVE;
                                pr_warn("%s: Setting fail_over_mac to active for active-backup mode\n",
                                        bond_dev->name);
@@ -1290,11 +1320,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
            bond->dev->addr_assign_type == NET_ADDR_RANDOM)
                bond_set_dev_addr(bond->dev, slave_dev);
 
-       new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
+       new_slave = bond_alloc_slave(bond);
        if (!new_slave) {
                res = -ENOMEM;
                goto err_undo_flags;
        }
+
+       new_slave->bond = bond;
+       new_slave->dev = slave_dev;
        /*
         * Set the new_slave's queue_id to be zero.  Queue ID mapping
         * is set via sysfs or module option if desired.
@@ -1317,7 +1350,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        ether_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr);
 
        if (!bond->params.fail_over_mac ||
-           bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
+           BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
                /*
                 * Set slave to master's mac address.  The application already
                 * set the master's mac address to that of the first slave
@@ -1338,8 +1371,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                goto err_restore_mac;
        }
 
-       new_slave->bond = bond;
-       new_slave->dev = slave_dev;
        slave_dev->priv_flags |= IFF_BONDING;
 
        if (bond_is_lb(bond)) {
@@ -1351,10 +1382,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                        goto err_close;
        }
 
-       /* If the mode USES_PRIMARY, then the following is handled by
+       /* If the mode uses primary, then the following is handled by
         * bond_change_active_slave().
         */
-       if (!USES_PRIMARY(bond->params.mode)) {
+       if (!bond_uses_primary(bond)) {
                /* set promiscuity level to new slave */
                if (bond_dev->flags & IFF_PROMISC) {
                        res = dev_set_promiscuity(slave_dev, 1);
@@ -1377,7 +1408,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                netif_addr_unlock_bh(bond_dev);
        }
 
-       if (bond->params.mode == BOND_MODE_8023AD) {
+       if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                /* add lacpdu mc addr to mc list */
                u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
 
@@ -1450,7 +1481,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                 new_slave->link == BOND_LINK_DOWN ? "DOWN" :
                 (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
 
-       if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
+       if (bond_uses_primary(bond) && bond->params.primary[0]) {
                /* if there is a primary slave, remember it */
                if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
                        bond->primary_slave = new_slave;
@@ -1458,7 +1489,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                }
        }
 
-       switch (bond->params.mode) {
+       switch (BOND_MODE(bond)) {
        case BOND_MODE_ACTIVEBACKUP:
                bond_set_slave_inactive_flags(new_slave,
                                              BOND_SLAVE_NOTIFY_NOW);
@@ -1471,14 +1502,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
                /* if this is the first slave */
                if (!prev_slave) {
-                       SLAVE_AD_INFO(new_slave).id = 1;
+                       SLAVE_AD_INFO(new_slave)->id = 1;
                        /* Initialize AD with the number of times that the AD timer is called in 1 second
                         * can be called only after the mac address of the bond is set
                         */
                        bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
                } else {
-                       SLAVE_AD_INFO(new_slave).id =
-                               SLAVE_AD_INFO(prev_slave).id + 1;
+                       SLAVE_AD_INFO(new_slave)->id =
+                               SLAVE_AD_INFO(prev_slave)->id + 1;
                }
 
                bond_3ad_bind_slave(new_slave);
@@ -1539,7 +1570,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        bond_compute_features(bond);
        bond_set_carrier(bond);
 
-       if (USES_PRIMARY(bond->params.mode)) {
+       if (bond_uses_primary(bond)) {
                block_netpoll_tx();
                write_lock_bh(&bond->curr_slave_lock);
                bond_select_active_slave(bond);
@@ -1563,7 +1594,7 @@ err_unregister:
        netdev_rx_handler_unregister(slave_dev);
 
 err_detach:
-       if (!USES_PRIMARY(bond->params.mode))
+       if (!bond_uses_primary(bond))
                bond_hw_addr_flush(bond_dev, slave_dev);
 
        vlan_vids_del_by_dev(slave_dev, bond_dev);
@@ -1585,7 +1616,7 @@ err_close:
 
 err_restore_mac:
        if (!bond->params.fail_over_mac ||
-           bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
+           BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
                /* XXX TODO - fom follow mode needs to change master's
                 * MAC if this slave's MAC is in use by the bond, or at
                 * least print a warning.
@@ -1599,7 +1630,7 @@ err_restore_mtu:
        dev_set_mtu(slave_dev, new_slave->original_mtu);
 
 err_free:
-       kfree(new_slave);
+       bond_free_slave(new_slave);
 
 err_undo_flags:
        /* Enslave of first slave has failed and we need to fix master's mac */
@@ -1661,7 +1692,7 @@ static int __bond_release_one(struct net_device *bond_dev,
        write_lock_bh(&bond->lock);
 
        /* Inform AD package of unbinding of slave. */
-       if (bond->params.mode == BOND_MODE_8023AD)
+       if (BOND_MODE(bond) == BOND_MODE_8023AD)
                bond_3ad_unbind_slave(slave);
 
        write_unlock_bh(&bond->lock);
@@ -1676,7 +1707,7 @@ static int __bond_release_one(struct net_device *bond_dev,
        bond->current_arp_slave = NULL;
 
        if (!all && (!bond->params.fail_over_mac ||
-                    bond->params.mode != BOND_MODE_ACTIVEBACKUP)) {
+                    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
                if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
                    bond_has_slaves(bond))
                        pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s - set the HWaddr of %s to a different address to avoid conflicts\n",
@@ -1748,10 +1779,10 @@ static int __bond_release_one(struct net_device *bond_dev,
        /* must do this from outside any spinlocks */
        vlan_vids_del_by_dev(slave_dev, bond_dev);
 
-       /* If the mode USES_PRIMARY, then this cases was handled above by
+       /* If the mode uses primary, then this cases was handled above by
         * bond_change_active_slave(..., NULL)
         */
-       if (!USES_PRIMARY(bond->params.mode)) {
+       if (!bond_uses_primary(bond)) {
                /* unset promiscuity level from slave
                 * NOTE: The NETDEV_CHANGEADDR call above may change the value
                 * of the IFF_PROMISC flag in the bond_dev, but we need the
@@ -1775,7 +1806,7 @@ static int __bond_release_one(struct net_device *bond_dev,
        dev_close(slave_dev);
 
        if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
-           bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
+           BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
                /* restore original ("permanent") mac address */
                ether_addr_copy(addr.sa_data, slave->perm_hwaddr);
                addr.sa_family = slave_dev->type;
@@ -1786,7 +1817,7 @@ static int __bond_release_one(struct net_device *bond_dev,
 
        slave_dev->priv_flags &= ~IFF_BONDING;
 
-       kfree(slave);
+       bond_free_slave(slave);
 
        return 0;  /* deletion OK */
 }
@@ -1821,7 +1852,7 @@ static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
 {
        struct bonding *bond = netdev_priv(bond_dev);
 
-       info->bond_mode = bond->params.mode;
+       info->bond_mode = BOND_MODE(bond);
        info->miimon = bond->params.miimon;
 
        info->num_slaves = bond->slave_cnt;
@@ -1877,7 +1908,7 @@ static int bond_miimon_inspect(struct bonding *bond)
                        if (slave->delay) {
                                pr_info("%s: link status down for %sinterface %s, disabling it in %d ms\n",
                                        bond->dev->name,
-                                       (bond->params.mode ==
+                                       (BOND_MODE(bond) ==
                                         BOND_MODE_ACTIVEBACKUP) ?
                                        (bond_is_active_slave(slave) ?
                                         "active " : "backup ") : "",
@@ -1968,10 +1999,10 @@ static void bond_miimon_commit(struct bonding *bond)
                        slave->link = BOND_LINK_UP;
                        slave->last_link_up = jiffies;
 
-                       if (bond->params.mode == BOND_MODE_8023AD) {
+                       if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                                /* prevent it from being the active one */
                                bond_set_backup_slave(slave);
-                       } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
+                       } else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
                                /* make it immediately active */
                                bond_set_active_slave(slave);
                        } else if (slave != bond->primary_slave) {
@@ -1985,7 +2016,7 @@ static void bond_miimon_commit(struct bonding *bond)
                                slave->duplex ? "full" : "half");
 
                        /* notify ad that the link status has changed */
-                       if (bond->params.mode == BOND_MODE_8023AD)
+                       if (BOND_MODE(bond) == BOND_MODE_8023AD)
                                bond_3ad_handle_link_change(slave, BOND_LINK_UP);
 
                        if (bond_is_lb(bond))
@@ -2004,15 +2035,15 @@ static void bond_miimon_commit(struct bonding *bond)
 
                        slave->link = BOND_LINK_DOWN;
 
-                       if (bond->params.mode == BOND_MODE_ACTIVEBACKUP ||
-                           bond->params.mode == BOND_MODE_8023AD)
+                       if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
+                           BOND_MODE(bond) == BOND_MODE_8023AD)
                                bond_set_slave_inactive_flags(slave,
                                                              BOND_SLAVE_NOTIFY_NOW);
 
                        pr_info("%s: link status definitely down for interface %s, disabling it\n",
                                bond->dev->name, slave->dev->name);
 
-                       if (bond->params.mode == BOND_MODE_8023AD)
+                       if (BOND_MODE(bond) == BOND_MODE_8023AD)
                                bond_3ad_handle_link_change(slave,
                                                            BOND_LINK_DOWN);
 
@@ -2126,10 +2157,10 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
  */
 static void bond_arp_send(struct net_device *slave_dev, int arp_op,
                          __be32 dest_ip, __be32 src_ip,
-                         struct bond_vlan_tag *inner,
-                         struct bond_vlan_tag *outer)
+                         struct bond_vlan_tag *tags)
 {
        struct sk_buff *skb;
+       int i;
 
        pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n",
                 arp_op, slave_dev->name, &dest_ip, &src_ip);
@@ -2141,21 +2172,26 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
                net_err_ratelimited("ARP packet allocation failed\n");
                return;
        }
-       if (outer->vlan_id) {
-               if (inner->vlan_id) {
-                       pr_debug("inner tag: proto %X vid %X\n",
-                                ntohs(inner->vlan_proto), inner->vlan_id);
-                       skb = __vlan_put_tag(skb, inner->vlan_proto,
-                                            inner->vlan_id);
-                       if (!skb) {
-                               net_err_ratelimited("failed to insert inner VLAN tag\n");
-                               return;
-                       }
-               }
 
-               pr_debug("outer reg: proto %X vid %X\n",
-                        ntohs(outer->vlan_proto), outer->vlan_id);
-               skb = vlan_put_tag(skb, outer->vlan_proto, outer->vlan_id);
+       /* Go through all the tags backwards and add them to the packet */
+       for (i = BOND_MAX_VLAN_ENCAP - 1; i > 0; i--) {
+               if (!tags[i].vlan_id)
+                       continue;
+
+               pr_debug("inner tag: proto %X vid %X\n",
+                        ntohs(tags[i].vlan_proto), tags[i].vlan_id);
+               skb = __vlan_put_tag(skb, tags[i].vlan_proto,
+                                    tags[i].vlan_id);
+               if (!skb) {
+                       net_err_ratelimited("failed to insert inner VLAN tag\n");
+                       return;
+               }
+       }
+       /* Set the outer tag */
+       if (tags[0].vlan_id) {
+               pr_debug("outer tag: proto %X vid %X\n",
+                        ntohs(tags[0].vlan_proto), tags[0].vlan_id);
+               skb = vlan_put_tag(skb, tags[0].vlan_proto, tags[0].vlan_id);
                if (!skb) {
                        net_err_ratelimited("failed to insert outer VLAN tag\n");
                        return;
@@ -2164,22 +2200,52 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
        arp_xmit(skb);
 }
 
+/* Validate the device path between the @start_dev and the @end_dev.
+ * The path is valid if the @end_dev is reachable through device
+ * stacking.
+ * When the path is validated, collect any vlan information in the
+ * path.
+ */
+static bool bond_verify_device_path(struct net_device *start_dev,
+                                   struct net_device *end_dev,
+                                   struct bond_vlan_tag *tags)
+{
+       struct net_device *upper;
+       struct list_head  *iter;
+       int  idx;
+
+       if (start_dev == end_dev)
+               return true;
+
+       netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
+               if (bond_verify_device_path(upper, end_dev, tags)) {
+                       if (is_vlan_dev(upper)) {
+                               idx = vlan_get_encap_level(upper);
+                               if (idx >= BOND_MAX_VLAN_ENCAP)
+                                       return false;
+
+                               tags[idx].vlan_proto =
+                                                   vlan_dev_vlan_proto(upper);
+                               tags[idx].vlan_id = vlan_dev_vlan_id(upper);
+                       }
+                       return true;
+               }
+       }
+
+       return false;
+}
 
 static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
 {
-       struct net_device *upper, *vlan_upper;
-       struct list_head *iter, *vlan_iter;
        struct rtable *rt;
-       struct bond_vlan_tag inner, outer;
+       struct bond_vlan_tag tags[BOND_MAX_VLAN_ENCAP];
        __be32 *targets = bond->params.arp_targets, addr;
        int i;
+       bool ret;
 
        for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
                pr_debug("basa: target %pI4\n", &targets[i]);
-               inner.vlan_proto = 0;
-               inner.vlan_id = 0;
-               outer.vlan_proto = 0;
-               outer.vlan_id = 0;
+               memset(tags, 0, sizeof(tags));
 
                /* Find out through which dev should the packet go */
                rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
@@ -2192,7 +2258,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
                                net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
                                                     bond->dev->name,
                                                     &targets[i]);
-                       bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 0, &inner, &outer);
+                       bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
+                                     0, tags);
                        continue;
                }
 
@@ -2201,52 +2268,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
                        goto found;
 
                rcu_read_lock();
-               /* first we search only for vlan devices. for every vlan
-                * found we verify its upper dev list, searching for the
-                * rt->dst.dev. If found we save the tag of the vlan and
-                * proceed to send the packet.
-                */
-               netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper,
-                                                 vlan_iter) {
-                       if (!is_vlan_dev(vlan_upper))
-                               continue;
-
-                       if (vlan_upper == rt->dst.dev) {
-                               outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
-                               outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
-                               rcu_read_unlock();
-                               goto found;
-                       }
-                       netdev_for_each_all_upper_dev_rcu(vlan_upper, upper,
-                                                         iter) {
-                               if (upper == rt->dst.dev) {
-                                       /* If the upper dev is a vlan dev too,
-                                        *  set the vlan tag to inner tag.
-                                        */
-                                       if (is_vlan_dev(upper)) {
-                                               inner.vlan_proto = vlan_dev_vlan_proto(upper);
-                                               inner.vlan_id = vlan_dev_vlan_id(upper);
-                                       }
-                                       outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
-                                       outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
-                                       rcu_read_unlock();
-                                       goto found;
-                               }
-                       }
-               }
-
-               /* if the device we're looking for is not on top of any of
-                * our upper vlans, then just search for any dev that
-                * matches, and in case it's a vlan - save the id
-                */
-               netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
-                       if (upper == rt->dst.dev) {
-                               rcu_read_unlock();
-                               goto found;
-                       }
-               }
+               ret = bond_verify_device_path(bond->dev, rt->dst.dev, tags);
                rcu_read_unlock();
 
+               if (ret)
+                       goto found;
+
                /* Not our device - skip */
                pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
                         bond->dev->name, &targets[i],
@@ -2259,7 +2286,7 @@ found:
                addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
                ip_rt_put(rt);
                bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
-                             addr, &inner, &outer);
+                             addr, tags);
        }
 }
 
@@ -2291,8 +2318,8 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
        int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
 
        if (!slave_do_arp_validate(bond, slave)) {
-               if ((slave_do_arp_validate_only(bond, slave) && is_arp) ||
-                   !slave_do_arp_validate_only(bond, slave))
+               if ((slave_do_arp_validate_only(bond) && is_arp) ||
+                   !slave_do_arp_validate_only(bond))
                        slave->last_rx = jiffies;
                return RX_HANDLER_ANOTHER;
        } else if (!is_arp) {
@@ -2460,7 +2487,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
                 * do - all replies will be rx'ed on same link causing slaves
                 * to be unstable during low/no traffic periods
                 */
-               if (IS_UP(slave->dev))
+               if (bond_slave_is_up(slave))
                        bond_arp_send_all(bond, slave);
        }
 
@@ -2682,10 +2709,10 @@ static bool bond_ab_arp_probe(struct bonding *bond)
        bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER);
 
        bond_for_each_slave_rcu(bond, slave, iter) {
-               if (!found && !before && IS_UP(slave->dev))
+               if (!found && !before && bond_slave_is_up(slave))
                        before = slave;
 
-               if (found && !new_slave && IS_UP(slave->dev))
+               if (found && !new_slave && bond_slave_is_up(slave))
                        new_slave = slave;
                /* if the link state is up at this point, we
                 * mark it down - this can happen if we have
@@ -2694,7 +2721,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
                 * one the current slave so it is still marked
                 * up when it is actually down
                 */
-               if (!IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
+               if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
                        slave->link = BOND_LINK_DOWN;
                        if (slave->link_failure_count < UINT_MAX)
                                slave->link_failure_count++;
@@ -2857,7 +2884,7 @@ static int bond_slave_netdev_event(unsigned long event,
 
                bond_update_speed_duplex(slave);
 
-               if (bond->params.mode == BOND_MODE_8023AD) {
+               if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                        if (old_speed != slave->speed)
                                bond_3ad_adapter_speed_changed(slave);
                        if (old_duplex != slave->duplex)
@@ -2885,7 +2912,7 @@ static int bond_slave_netdev_event(unsigned long event,
                break;
        case NETDEV_CHANGENAME:
                /* we don't care if we don't have primary set */
-               if (!USES_PRIMARY(bond->params.mode) ||
+               if (!bond_uses_primary(bond) ||
                    !bond->params.primary[0])
                        break;
 
@@ -3015,20 +3042,18 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
  * bond_xmit_hash - generate a hash value based on the xmit policy
  * @bond: bonding device
  * @skb: buffer to use for headers
- * @count: modulo value
  *
  * This function will extract the necessary headers from the skb buffer and use
  * them to generate a hash based on the xmit_policy set in the bonding device
- * which will be reduced modulo count before returning.
  */
-int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count)
+u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
 {
        struct flow_keys flow;
        u32 hash;
 
        if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
            !bond_flow_dissect(bond, skb, &flow))
-               return bond_eth_hash(skb) % count;
+               return bond_eth_hash(skb);
 
        if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
            bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23)
@@ -3039,7 +3064,7 @@ int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count)
        hash ^= (hash >> 16);
        hash ^= (hash >> 8);
 
-       return hash % count;
+       return hash;
 }
 
 /*-------------------------- Device entry points ----------------------------*/
@@ -3050,7 +3075,7 @@ static void bond_work_init_all(struct bonding *bond)
                          bond_resend_igmp_join_requests_delayed);
        INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
        INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
-       if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
+       if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
                INIT_DELAYED_WORK(&bond->arp_work, bond_activebackup_arp_mon);
        else
                INIT_DELAYED_WORK(&bond->arp_work, bond_loadbalance_arp_mon);
@@ -3077,7 +3102,7 @@ static int bond_open(struct net_device *bond_dev)
        if (bond_has_slaves(bond)) {
                read_lock(&bond->curr_slave_lock);
                bond_for_each_slave(bond, slave, iter) {
-                       if (USES_PRIMARY(bond->params.mode)
+                       if (bond_uses_primary(bond)
                                && (slave != bond->curr_active_slave)) {
                                bond_set_slave_inactive_flags(slave,
                                                              BOND_SLAVE_NOTIFY_NOW);
@@ -3096,9 +3121,10 @@ static int bond_open(struct net_device *bond_dev)
                /* bond_alb_initialize must be called before the timer
                 * is started.
                 */
-               if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB)))
+               if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB)))
                        return -ENOMEM;
-               queue_delayed_work(bond->wq, &bond->alb_work, 0);
+               if (bond->params.tlb_dynamic_lb)
+                       queue_delayed_work(bond->wq, &bond->alb_work, 0);
        }
 
        if (bond->params.miimon)  /* link check interval, in milliseconds. */
@@ -3109,7 +3135,7 @@ static int bond_open(struct net_device *bond_dev)
                bond->recv_probe = bond_arp_rcv;
        }
 
-       if (bond->params.mode == BOND_MODE_8023AD) {
+       if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                queue_delayed_work(bond->wq, &bond->ad_work, 0);
                /* register to receive LACPDUs */
                bond->recv_probe = bond_3ad_lacpdu_recv;
@@ -3314,7 +3340,7 @@ static void bond_set_rx_mode(struct net_device *bond_dev)
 
 
        rcu_read_lock();
-       if (USES_PRIMARY(bond->params.mode)) {
+       if (bond_uses_primary(bond)) {
                slave = rcu_dereference(bond->curr_active_slave);
                if (slave) {
                        dev_uc_sync(slave->dev, bond_dev);
@@ -3468,7 +3494,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
        struct list_head *iter;
        int res = 0;
 
-       if (bond->params.mode == BOND_MODE_ALB)
+       if (BOND_MODE(bond) == BOND_MODE_ALB)
                return bond_alb_set_mac_address(bond_dev, addr);
 
 
@@ -3479,7 +3505,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
         * Returning an error causes ifenslave to fail.
         */
        if (bond->params.fail_over_mac &&
-           bond->params.mode == BOND_MODE_ACTIVEBACKUP)
+           BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
                return 0;
 
        if (!is_valid_ether_addr(sa->sa_data))
@@ -3559,7 +3585,7 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl
        /* Here we start from the slave with slave_id */
        bond_for_each_slave_rcu(bond, slave, iter) {
                if (--i < 0) {
-                       if (slave_can_tx(slave)) {
+                       if (bond_slave_can_tx(slave)) {
                                bond_dev_queue_xmit(bond, skb, slave->dev);
                                return;
                        }
@@ -3571,7 +3597,7 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl
        bond_for_each_slave_rcu(bond, slave, iter) {
                if (--i < 0)
                        break;
-               if (slave_can_tx(slave)) {
+               if (bond_slave_can_tx(slave)) {
                        bond_dev_queue_xmit(bond, skb, slave->dev);
                        return;
                }
@@ -3628,7 +3654,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
         */
        if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) {
                slave = rcu_dereference(bond->curr_active_slave);
-               if (slave && slave_can_tx(slave))
+               if (slave && bond_slave_can_tx(slave))
                        bond_dev_queue_xmit(bond, skb, slave->dev);
                else
                        bond_xmit_slave_id(bond, skb, 0);
@@ -3666,7 +3692,7 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
 
-       bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb, bond->slave_cnt));
+       bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb) % bond->slave_cnt);
 
        return NETDEV_TX_OK;
 }
@@ -3681,7 +3707,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
        bond_for_each_slave_rcu(bond, slave, iter) {
                if (bond_is_last_slave(bond, slave))
                        break;
-               if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
+               if (bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
                        struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
 
                        if (!skb2) {
@@ -3693,7 +3719,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
                        bond_dev_queue_xmit(bond, skb2, slave->dev);
                }
        }
-       if (slave && IS_UP(slave->dev) && slave->link == BOND_LINK_UP)
+       if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP)
                bond_dev_queue_xmit(bond, skb, slave->dev);
        else
                dev_kfree_skb_any(skb);
@@ -3718,7 +3744,7 @@ static inline int bond_slave_override(struct bonding *bond,
        /* Find out if any slaves have the same mapping as this skb. */
        bond_for_each_slave_rcu(bond, slave, iter) {
                if (slave->queue_id == skb->queue_mapping) {
-                       if (slave_can_tx(slave)) {
+                       if (bond_slave_can_tx(slave)) {
                                bond_dev_queue_xmit(bond, skb, slave->dev);
                                return 0;
                        }
@@ -3759,12 +3785,11 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
 {
        struct bonding *bond = netdev_priv(dev);
 
-       if (TX_QUEUE_OVERRIDE(bond->params.mode)) {
-               if (!bond_slave_override(bond, skb))
-                       return NETDEV_TX_OK;
-       }
+       if (bond_should_override_tx_queue(bond) &&
+           !bond_slave_override(bond, skb))
+               return NETDEV_TX_OK;
 
-       switch (bond->params.mode) {
+       switch (BOND_MODE(bond)) {
        case BOND_MODE_ROUNDROBIN:
                return bond_xmit_roundrobin(skb, dev);
        case BOND_MODE_ACTIVEBACKUP:
@@ -3776,12 +3801,13 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
        case BOND_MODE_8023AD:
                return bond_3ad_xmit_xor(skb, dev);
        case BOND_MODE_ALB:
-       case BOND_MODE_TLB:
                return bond_alb_xmit(skb, dev);
+       case BOND_MODE_TLB:
+               return bond_tlb_xmit(skb, dev);
        default:
                /* Should never happen, mode already checked */
                pr_err("%s: Error: Unknown bonding mode %d\n",
-                      dev->name, bond->params.mode);
+                      dev->name, BOND_MODE(bond));
                WARN_ON_ONCE(1);
                dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
@@ -3821,14 +3847,14 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev,
        ecmd->duplex = DUPLEX_UNKNOWN;
        ecmd->port = PORT_OTHER;
 
-       /* Since SLAVE_IS_OK returns false for all inactive or down slaves, we
+       /* Since bond_slave_can_tx returns false for all inactive or down slaves, we
         * do not need to check mode.  Though link speed might not represent
         * the true receive or transmit bandwidth (not all modes are symmetric)
         * this is an accurate maximum.
         */
        read_lock(&bond->lock);
        bond_for_each_slave(bond, slave, iter) {
-               if (SLAVE_IS_OK(slave)) {
+               if (bond_slave_can_tx(slave)) {
                        if (slave->speed != SPEED_UNKNOWN)
                                speed += slave->speed;
                        if (ecmd->duplex == DUPLEX_UNKNOWN &&
@@ -3998,7 +4024,8 @@ static int bond_check_params(struct bond_params *params)
 
        if (xmit_hash_policy) {
                if ((bond_mode != BOND_MODE_XOR) &&
-                   (bond_mode != BOND_MODE_8023AD)) {
+                   (bond_mode != BOND_MODE_8023AD) &&
+                   (bond_mode != BOND_MODE_TLB)) {
                        pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
                                bond_mode_name(bond_mode));
                } else {
@@ -4083,7 +4110,7 @@ static int bond_check_params(struct bond_params *params)
        }
 
        /* reset values for 802.3ad/TLB/ALB */
-       if (BOND_NO_USES_ARP(bond_mode)) {
+       if (!bond_mode_uses_arp(bond_mode)) {
                if (!miimon) {
                        pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
                        pr_warn("Forcing miimon to 100msec\n");
@@ -4165,7 +4192,7 @@ static int bond_check_params(struct bond_params *params)
                   catch mistakes */
                __be32 ip;
                if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
-                   IS_IP_TARGET_UNUSABLE_ADDRESS(ip)) {
+                   !bond_is_ip_target_ok(ip)) {
                        pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
                                arp_ip_target[i]);
                        arp_interval = 0;
@@ -4238,7 +4265,7 @@ static int bond_check_params(struct bond_params *params)
                pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n");
        }
 
-       if (primary && !USES_PRIMARY(bond_mode)) {
+       if (primary && !bond_mode_uses_primary(bond_mode)) {
                /* currently, using a primary only makes sense
                 * in active backup, TLB or ALB modes
                 */
@@ -4304,6 +4331,7 @@ static int bond_check_params(struct bond_params *params)
        params->min_links = min_links;
        params->lp_interval = lp_interval;
        params->packets_per_slave = packets_per_slave;
+       params->tlb_dynamic_lb = 1; /* Default value */
        if (packets_per_slave > 0) {
                params->reciprocal_packets_per_slave =
                        reciprocal_value(packets_per_slave);
index f847e165d252fb2a4528fe396b2c4f0553e81ac3..5ab3c1847e6760e2f3ef7d2ec35085c2d4bf655b 100644 (file)
@@ -56,10 +56,10 @@ static int bond_fill_slave_info(struct sk_buff *skb,
        if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID, slave->queue_id))
                goto nla_put_failure;
 
-       if (slave->bond->params.mode == BOND_MODE_8023AD) {
+       if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
                const struct aggregator *agg;
 
-               agg = SLAVE_AD_INFO(slave).port.aggregator;
+               agg = SLAVE_AD_INFO(slave)->port.aggregator;
                if (agg)
                        if (nla_put_u16(skb, IFLA_BOND_SLAVE_AD_AGGREGATOR_ID,
                                        agg->aggregator_identifier))
@@ -407,7 +407,7 @@ static int bond_fill_info(struct sk_buff *skb,
        unsigned int packets_per_slave;
        int i, targets_added;
 
-       if (nla_put_u8(skb, IFLA_BOND_MODE, bond->params.mode))
+       if (nla_put_u8(skb, IFLA_BOND_MODE, BOND_MODE(bond)))
                goto nla_put_failure;
 
        if (slave_dev &&
@@ -505,7 +505,7 @@ static int bond_fill_info(struct sk_buff *skb,
                       bond->params.ad_select))
                goto nla_put_failure;
 
-       if (bond->params.mode == BOND_MODE_8023AD) {
+       if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                struct ad_info info;
 
                if (!bond_3ad_get_active_agg_info(bond, &info)) {
index 724e30fa20b9fa70166b5d9b25ed9029fab6db73..540e0167bf24992037165d82c5af307d85f15f02 100644 (file)
@@ -70,6 +70,8 @@ static int bond_option_mode_set(struct bonding *bond,
                                const struct bond_opt_value *newval);
 static int bond_option_slaves_set(struct bonding *bond,
                                  const struct bond_opt_value *newval);
+static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
+                                 const struct bond_opt_value *newval);
 
 
 static const struct bond_opt_value bond_mode_tbl[] = {
@@ -125,6 +127,7 @@ static const struct bond_opt_value bond_fail_over_mac_tbl[] = {
 static const struct bond_opt_value bond_intmax_tbl[] = {
        { "off",     0,       BOND_VALFLAG_DEFAULT},
        { "maxval",  INT_MAX, BOND_VALFLAG_MAX},
+       { NULL,      -1,      0}
 };
 
 static const struct bond_opt_value bond_lacp_rate_tbl[] = {
@@ -179,6 +182,12 @@ static const struct bond_opt_value bond_lp_interval_tbl[] = {
        { NULL,      -1,      0},
 };
 
+static const struct bond_opt_value bond_tlb_dynamic_lb_tbl[] = {
+       { "off", 0,  0},
+       { "on",  1,  BOND_VALFLAG_DEFAULT},
+       { NULL,  -1, 0}
+};
+
 static const struct bond_option bond_opts[] = {
        [BOND_OPT_MODE] = {
                .id = BOND_OPT_MODE,
@@ -199,7 +208,7 @@ static const struct bond_option bond_opts[] = {
        [BOND_OPT_XMIT_HASH] = {
                .id = BOND_OPT_XMIT_HASH,
                .name = "xmit_hash_policy",
-               .desc = "balance-xor and 802.3ad hashing method",
+               .desc = "balance-xor, 802.3ad, and tlb hashing method",
                .values = bond_xmit_hashtype_tbl,
                .set = bond_option_xmit_hash_policy_set
        },
@@ -364,9 +373,33 @@ static const struct bond_option bond_opts[] = {
                .flags = BOND_OPTFLAG_RAWVAL,
                .set = bond_option_slaves_set
        },
+       [BOND_OPT_TLB_DYNAMIC_LB] = {
+               .id = BOND_OPT_TLB_DYNAMIC_LB,
+               .name = "tlb_dynamic_lb",
+               .desc = "Enable dynamic flow shuffling",
+               .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_TLB)),
+               .values = bond_tlb_dynamic_lb_tbl,
+               .flags = BOND_OPTFLAG_IFDOWN,
+               .set = bond_option_tlb_dynamic_lb_set,
+       },
        { }
 };
 
+/* Searches for an option by name */
+const struct bond_option *bond_opt_get_by_name(const char *name)
+{
+       const struct bond_option *opt;
+       int option;
+
+       for (option = 0; option < BOND_OPT_LAST; option++) {
+               opt = bond_opt_get(option);
+               if (opt && !strcmp(opt->name, name))
+                       return opt;
+       }
+
+       return NULL;
+}
+
 /* Searches for a value in opt's values[] table */
 const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val)
 {
@@ -640,7 +673,7 @@ const struct bond_option *bond_opt_get(unsigned int option)
 
 int bond_option_mode_set(struct bonding *bond, const struct bond_opt_value *newval)
 {
-       if (BOND_NO_USES_ARP(newval->value) && bond->params.arp_interval) {
+       if (!bond_mode_uses_arp(newval->value) && bond->params.arp_interval) {
                pr_info("%s: %s mode is incompatible with arp monitoring, start mii monitoring\n",
                        bond->dev->name, newval->string);
                /* disable arp monitoring */
@@ -661,7 +694,7 @@ int bond_option_mode_set(struct bonding *bond, const struct bond_opt_value *newv
 static struct net_device *__bond_option_active_slave_get(struct bonding *bond,
                                                         struct slave *slave)
 {
-       return USES_PRIMARY(bond->params.mode) && slave ? slave->dev : NULL;
+       return bond_uses_primary(bond) && slave ? slave->dev : NULL;
 }
 
 struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond)
@@ -726,7 +759,7 @@ static int bond_option_active_slave_set(struct bonding *bond,
                                bond->dev->name, new_active->dev->name);
                } else {
                        if (old_active && (new_active->link == BOND_LINK_UP) &&
-                           IS_UP(new_active->dev)) {
+                           bond_slave_is_up(new_active)) {
                                pr_info("%s: Setting %s as active slave\n",
                                        bond->dev->name, new_active->dev->name);
                                bond_change_active_slave(bond, new_active);
@@ -745,6 +778,10 @@ static int bond_option_active_slave_set(struct bonding *bond,
        return ret;
 }
 
+/* There are two tricky bits here.  First, if MII monitoring is activated, then
+ * we must disable ARP monitoring.  Second, if the timer isn't running, we must
+ * start it.
+ */
 static int bond_option_miimon_set(struct bonding *bond,
                                  const struct bond_opt_value *newval)
 {
@@ -783,6 +820,10 @@ static int bond_option_miimon_set(struct bonding *bond,
        return 0;
 }
 
+/* Set up and down delays. These must be multiples of the
+ * MII monitoring value, and are stored internally as the multiplier.
+ * Thus, we must translate to MS for the real world.
+ */
 static int bond_option_updelay_set(struct bonding *bond,
                                   const struct bond_opt_value *newval)
 {
@@ -841,6 +882,10 @@ static int bond_option_use_carrier_set(struct bonding *bond,
        return 0;
 }
 
+/* There are two tricky bits here.  First, if ARP monitoring is activated, then
+ * we must disable MII monitoring.  Second, if the ARP timer isn't running,
+ * we must start it.
+ */
 static int bond_option_arp_interval_set(struct bonding *bond,
                                        const struct bond_opt_value *newval)
 {
@@ -898,7 +943,7 @@ static int _bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
        __be32 *targets = bond->params.arp_targets;
        int ind;
 
-       if (IS_IP_TARGET_UNUSABLE_ADDRESS(target)) {
+       if (!bond_is_ip_target_ok(target)) {
                pr_err("%s: invalid ARP target %pI4 specified for addition\n",
                       bond->dev->name, &target);
                return -EINVAL;
@@ -943,7 +988,7 @@ static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target)
        unsigned long *targets_rx;
        int ind, i;
 
-       if (IS_IP_TARGET_UNUSABLE_ADDRESS(target)) {
+       if (!bond_is_ip_target_ok(target)) {
                pr_err("%s: invalid ARP target %pI4 specified for removal\n",
                       bond->dev->name, &target);
                return -EINVAL;
@@ -1337,3 +1382,13 @@ err_no_cmd:
        ret = -EPERM;
        goto out;
 }
+
+static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
+                                         const struct bond_opt_value *newval)
+{
+       pr_info("%s: Setting dynamic-lb to %s (%llu)\n",
+               bond->dev->name, newval->string, newval->value);
+       bond->params.tlb_dynamic_lb = newval->value;
+
+       return 0;
+}
index 12be9e1bfb0c0d048229a1698c2b384847fbd794..17ded5b291761ca9e85e2fa6c82514a4613815b5 100644 (file)
@@ -62,6 +62,7 @@ enum {
        BOND_OPT_RESEND_IGMP,
        BOND_OPT_LP_INTERVAL,
        BOND_OPT_SLAVES,
+       BOND_OPT_TLB_DYNAMIC_LB,
        BOND_OPT_LAST
 };
 
@@ -104,6 +105,7 @@ int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf);
 const struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
                                            struct bond_opt_value *val);
 const struct bond_option *bond_opt_get(unsigned int option);
+const struct bond_option *bond_opt_get_by_name(const char *name);
 const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val);
 
 /* This helper is used to initialize a bond_opt_value structure for parameter
index 013fdd0f45e94340917ee2aeecc8529d386862e2..b215b479bb3a6917ffd6d37f30f989996d176f5b 100644 (file)
@@ -72,9 +72,9 @@ static void bond_info_show_master(struct seq_file *seq)
        curr = rcu_dereference(bond->curr_active_slave);
 
        seq_printf(seq, "Bonding Mode: %s",
-                  bond_mode_name(bond->params.mode));
+                  bond_mode_name(BOND_MODE(bond)));
 
-       if (bond->params.mode == BOND_MODE_ACTIVEBACKUP &&
+       if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
            bond->params.fail_over_mac) {
                optval = bond_opt_get_val(BOND_OPT_FAIL_OVER_MAC,
                                          bond->params.fail_over_mac);
@@ -83,15 +83,15 @@ static void bond_info_show_master(struct seq_file *seq)
 
        seq_printf(seq, "\n");
 
-       if (bond->params.mode == BOND_MODE_XOR ||
-               bond->params.mode == BOND_MODE_8023AD) {
+       if (BOND_MODE(bond) == BOND_MODE_XOR ||
+               BOND_MODE(bond) == BOND_MODE_8023AD) {
                optval = bond_opt_get_val(BOND_OPT_XMIT_HASH,
                                          bond->params.xmit_policy);
                seq_printf(seq, "Transmit Hash Policy: %s (%d)\n",
                           optval->string, bond->params.xmit_policy);
        }
 
-       if (USES_PRIMARY(bond->params.mode)) {
+       if (bond_uses_primary(bond)) {
                seq_printf(seq, "Primary Slave: %s",
                           (bond->primary_slave) ?
                           bond->primary_slave->dev->name : "None");
@@ -134,7 +134,7 @@ static void bond_info_show_master(struct seq_file *seq)
                seq_printf(seq, "\n");
        }
 
-       if (bond->params.mode == BOND_MODE_8023AD) {
+       if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                struct ad_info ad_info;
 
                seq_puts(seq, "\n802.3ad info\n");
@@ -188,9 +188,9 @@ static void bond_info_show_slave(struct seq_file *seq,
 
        seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr);
 
-       if (bond->params.mode == BOND_MODE_8023AD) {
+       if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                const struct aggregator *agg
-                       = SLAVE_AD_INFO(slave).port.aggregator;
+                       = SLAVE_AD_INFO(slave)->port.aggregator;
 
                if (agg)
                        seq_printf(seq, "Aggregator ID: %d\n",
index 0e8b268da0a08f58c4443c6c36aa62bb9ef0f071..daed52f68ce1614ec94772ad1628f09417e04bfe 100644 (file)
@@ -45,8 +45,7 @@
 #define to_dev(obj)    container_of(obj, struct device, kobj)
 #define to_bond(cd)    ((struct bonding *)(netdev_priv(to_net_dev(cd))))
 
-/*
- * "show" function for the bond_masters attribute.
+/* "show" function for the bond_masters attribute.
  * The class parameter is ignored.
  */
 static ssize_t bonding_show_bonds(struct class *cls,
@@ -88,14 +87,12 @@ static struct net_device *bond_get_by_name(struct bond_net *bn, const char *ifna
        return NULL;
 }
 
-/*
- * "store" function for the bond_masters attribute.  This is what
+/* "store" function for the bond_masters attribute.  This is what
  * creates and deletes entire bonds.
  *
  * The class parameter is ignored.
  *
  */
-
 static ssize_t bonding_store_bonds(struct class *cls,
                                   struct class_attribute *attr,
                                   const char *buffer, size_t count)
@@ -158,9 +155,26 @@ static const struct class_attribute class_attr_bonding_masters = {
        .store = bonding_store_bonds,
 };
 
-/*
- * Show the slaves in the current bond.
- */
+/* Generic "store" method for bonding sysfs option setting */
+static ssize_t bonding_sysfs_store_option(struct device *d,
+                                         struct device_attribute *attr,
+                                         const char *buffer, size_t count)
+{
+       struct bonding *bond = to_bond(d);
+       const struct bond_option *opt;
+       int ret;
+
+       opt = bond_opt_get_by_name(attr->attr.name);
+       if (WARN_ON(!opt))
+               return -ENOENT;
+       ret = bond_opt_tryset_rtnl(bond, opt->id, (char *)buffer);
+       if (!ret)
+               ret = count;
+
+       return ret;
+}
+
+/* Show the slaves in the current bond. */
 static ssize_t bonding_show_slaves(struct device *d,
                                   struct device_attribute *attr, char *buf)
 {
@@ -190,62 +204,24 @@ static ssize_t bonding_show_slaves(struct device *d,
 
        return res;
 }
-
-/*
- * Set the slaves in the current bond.
- * This is supposed to be only thin wrapper for bond_enslave and bond_release.
- * All hard work should be done there.
- */
-static ssize_t bonding_store_slaves(struct device *d,
-                                   struct device_attribute *attr,
-                                   const char *buffer, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_SLAVES, (char *)buffer);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
 static DEVICE_ATTR(slaves, S_IRUGO | S_IWUSR, bonding_show_slaves,
-                  bonding_store_slaves);
+                  bonding_sysfs_store_option);
 
-/*
- * Show and set the bonding mode.  The bond interface must be down to
- * change the mode.
- */
+/* Show the bonding mode. */
 static ssize_t bonding_show_mode(struct device *d,
                                 struct device_attribute *attr, char *buf)
 {
        struct bonding *bond = to_bond(d);
        const struct bond_opt_value *val;
 
-       val = bond_opt_get_val(BOND_OPT_MODE, bond->params.mode);
+       val = bond_opt_get_val(BOND_OPT_MODE, BOND_MODE(bond));
 
-       return sprintf(buf, "%s %d\n", val->string, bond->params.mode);
-}
-
-static ssize_t bonding_store_mode(struct device *d,
-                                 struct device_attribute *attr,
-                                 const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MODE, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
+       return sprintf(buf, "%s %d\n", val->string, BOND_MODE(bond));
 }
 static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
-                  bonding_show_mode, bonding_store_mode);
+                  bonding_show_mode, bonding_sysfs_store_option);
 
-/*
- * Show and set the bonding transmit hash method.
- */
+/* Show the bonding transmit hash method. */
 static ssize_t bonding_show_xmit_hash(struct device *d,
                                      struct device_attribute *attr,
                                      char *buf)
@@ -257,26 +233,10 @@ static ssize_t bonding_show_xmit_hash(struct device *d,
 
        return sprintf(buf, "%s %d\n", val->string, bond->params.xmit_policy);
 }
-
-static ssize_t bonding_store_xmit_hash(struct device *d,
-                                      struct device_attribute *attr,
-                                      const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_XMIT_HASH, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
 static DEVICE_ATTR(xmit_hash_policy, S_IRUGO | S_IWUSR,
-                  bonding_show_xmit_hash, bonding_store_xmit_hash);
+                  bonding_show_xmit_hash, bonding_sysfs_store_option);
 
-/*
- * Show and set arp_validate.
- */
+/* Show arp_validate. */
 static ssize_t bonding_show_arp_validate(struct device *d,
                                         struct device_attribute *attr,
                                         char *buf)
@@ -289,26 +249,10 @@ static ssize_t bonding_show_arp_validate(struct device *d,
 
        return sprintf(buf, "%s %d\n", val->string, bond->params.arp_validate);
 }
-
-static ssize_t bonding_store_arp_validate(struct device *d,
-                                         struct device_attribute *attr,
-                                         const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_VALIDATE, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
-
 static DEVICE_ATTR(arp_validate, S_IRUGO | S_IWUSR, bonding_show_arp_validate,
-                  bonding_store_arp_validate);
-/*
- * Show and set arp_all_targets.
- */
+                  bonding_sysfs_store_option);
+
+/* Show arp_all_targets. */
 static ssize_t bonding_show_arp_all_targets(struct device *d,
                                         struct device_attribute *attr,
                                         char *buf)
@@ -321,28 +265,10 @@ static ssize_t bonding_show_arp_all_targets(struct device *d,
        return sprintf(buf, "%s %d\n",
                       val->string, bond->params.arp_all_targets);
 }
-
-static ssize_t bonding_store_arp_all_targets(struct device *d,
-                                         struct device_attribute *attr,
-                                         const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_ALL_TARGETS, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
-
 static DEVICE_ATTR(arp_all_targets, S_IRUGO | S_IWUSR,
-                  bonding_show_arp_all_targets, bonding_store_arp_all_targets);
+                  bonding_show_arp_all_targets, bonding_sysfs_store_option);
 
-/*
- * Show and store fail_over_mac.  User only allowed to change the
- * value when there are no slaves.
- */
+/* Show fail_over_mac. */
 static ssize_t bonding_show_fail_over_mac(struct device *d,
                                          struct device_attribute *attr,
                                          char *buf)
@@ -355,30 +281,10 @@ static ssize_t bonding_show_fail_over_mac(struct device *d,
 
        return sprintf(buf, "%s %d\n", val->string, bond->params.fail_over_mac);
 }
-
-static ssize_t bonding_store_fail_over_mac(struct device *d,
-                                          struct device_attribute *attr,
-                                          const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_FAIL_OVER_MAC, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
-
 static DEVICE_ATTR(fail_over_mac, S_IRUGO | S_IWUSR,
-                  bonding_show_fail_over_mac, bonding_store_fail_over_mac);
+                  bonding_show_fail_over_mac, bonding_sysfs_store_option);
 
-/*
- * Show and set the arp timer interval.  There are two tricky bits
- * here.  First, if ARP monitoring is activated, then we must disable
- * MII monitoring.  Second, if the ARP timer isn't running, we must
- * start it.
- */
+/* Show the arp timer interval. */
 static ssize_t bonding_show_arp_interval(struct device *d,
                                         struct device_attribute *attr,
                                         char *buf)
@@ -387,26 +293,10 @@ static ssize_t bonding_show_arp_interval(struct device *d,
 
        return sprintf(buf, "%d\n", bond->params.arp_interval);
 }
-
-static ssize_t bonding_store_arp_interval(struct device *d,
-                                         struct device_attribute *attr,
-                                         const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_INTERVAL, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
 static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR,
-                  bonding_show_arp_interval, bonding_store_arp_interval);
+                  bonding_show_arp_interval, bonding_sysfs_store_option);
 
-/*
- * Show and set the arp targets.
- */
+/* Show the arp targets. */
 static ssize_t bonding_show_arp_targets(struct device *d,
                                        struct device_attribute *attr,
                                        char *buf)
@@ -424,27 +314,10 @@ static ssize_t bonding_show_arp_targets(struct device *d,
 
        return res;
 }
+static DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR,
+                  bonding_show_arp_targets, bonding_sysfs_store_option);
 
-static ssize_t bonding_store_arp_targets(struct device *d,
-                                        struct device_attribute *attr,
-                                        const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_TARGETS, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
-static DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR , bonding_show_arp_targets, bonding_store_arp_targets);
-
-/*
- * Show and set the up and down delays.  These must be multiples of the
- * MII monitoring value, and are stored internally as the multiplier.
- * Thus, we must translate to MS for the real world.
- */
+/* Show the up and down delays. */
 static ssize_t bonding_show_downdelay(struct device *d,
                                      struct device_attribute *attr,
                                      char *buf)
@@ -453,22 +326,8 @@ static ssize_t bonding_show_downdelay(struct device *d,
 
        return sprintf(buf, "%d\n", bond->params.downdelay * bond->params.miimon);
 }
-
-static ssize_t bonding_store_downdelay(struct device *d,
-                                      struct device_attribute *attr,
-                                      const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_DOWNDELAY, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
 static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR,
-                  bonding_show_downdelay, bonding_store_downdelay);
+                  bonding_show_downdelay, bonding_sysfs_store_option);
 
 static ssize_t bonding_show_updelay(struct device *d,
                                    struct device_attribute *attr,
@@ -479,27 +338,10 @@ static ssize_t bonding_show_updelay(struct device *d,
        return sprintf(buf, "%d\n", bond->params.updelay * bond->params.miimon);
 
 }
-
-static ssize_t bonding_store_updelay(struct device *d,
-                                    struct device_attribute *attr,
-                                    const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_UPDELAY, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
 static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR,
-                  bonding_show_updelay, bonding_store_updelay);
+                  bonding_show_updelay, bonding_sysfs_store_option);
 
-/*
- * Show and set the LACP interval.  Interface must be down, and the mode
- * must be set to 802.3ad mode.
- */
+/* Show the LACP interval. */
 static ssize_t bonding_show_lacp(struct device *d,
                                 struct device_attribute *attr,
                                 char *buf)
@@ -511,22 +353,8 @@ static ssize_t bonding_show_lacp(struct device *d,
 
        return sprintf(buf, "%s %d\n", val->string, bond->params.lacp_fast);
 }
-
-static ssize_t bonding_store_lacp(struct device *d,
-                                 struct device_attribute *attr,
-                                 const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_LACP_RATE, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
 static DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR,
-                  bonding_show_lacp, bonding_store_lacp);
+                  bonding_show_lacp, bonding_sysfs_store_option);
 
 static ssize_t bonding_show_min_links(struct device *d,
                                      struct device_attribute *attr,
@@ -534,24 +362,10 @@ static ssize_t bonding_show_min_links(struct device *d,
 {
        struct bonding *bond = to_bond(d);
 
-       return sprintf(buf, "%d\n", bond->params.min_links);
-}
-
-static ssize_t bonding_store_min_links(struct device *d,
-                                      struct device_attribute *attr,
-                                      const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MINLINKS, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
+       return sprintf(buf, "%u\n", bond->params.min_links);
 }
 static DEVICE_ATTR(min_links, S_IRUGO | S_IWUSR,
-                  bonding_show_min_links, bonding_store_min_links);
+                  bonding_show_min_links, bonding_sysfs_store_option);
 
 static ssize_t bonding_show_ad_select(struct device *d,
                                      struct device_attribute *attr,
@@ -564,27 +378,10 @@ static ssize_t bonding_show_ad_select(struct device *d,
 
        return sprintf(buf, "%s %d\n", val->string, bond->params.ad_select);
 }
-
-
-static ssize_t bonding_store_ad_select(struct device *d,
-                                      struct device_attribute *attr,
-                                      const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_AD_SELECT, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
 static DEVICE_ATTR(ad_select, S_IRUGO | S_IWUSR,
-                  bonding_show_ad_select, bonding_store_ad_select);
+                  bonding_show_ad_select, bonding_sysfs_store_option);
 
-/*
- * Show and set the number of peer notifications to send after a failover event.
- */
+/* Show and set the number of peer notifications to send after a failover event. */
 static ssize_t bonding_show_num_peer_notif(struct device *d,
                                           struct device_attribute *attr,
                                           char *buf)
@@ -611,12 +408,7 @@ static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR,
 static DEVICE_ATTR(num_unsol_na, S_IRUGO | S_IWUSR,
                   bonding_show_num_peer_notif, bonding_store_num_peer_notif);
 
-/*
- * Show and set the MII monitor interval.  There are two tricky bits
- * here.  First, if MII monitoring is activated, then we must disable
- * ARP monitoring.  Second, if the timer isn't running, we must
- * start it.
- */
+/* Show the MII monitor interval. */
 static ssize_t bonding_show_miimon(struct device *d,
                                   struct device_attribute *attr,
                                   char *buf)
@@ -625,30 +417,10 @@ static ssize_t bonding_show_miimon(struct device *d,
 
        return sprintf(buf, "%d\n", bond->params.miimon);
 }
-
-static ssize_t bonding_store_miimon(struct device *d,
-                                   struct device_attribute *attr,
-                                   const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MIIMON, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
 static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR,
-                  bonding_show_miimon, bonding_store_miimon);
+                  bonding_show_miimon, bonding_sysfs_store_option);
 
-/*
- * Show and set the primary slave.  The store function is much
- * simpler than bonding_store_slaves function because it only needs to
- * handle one interface name.
- * The bond must be a mode that supports a primary for this be
- * set.
- */
+/* Show the primary slave. */
 static ssize_t bonding_show_primary(struct device *d,
                                    struct device_attribute *attr,
                                    char *buf)
@@ -661,26 +433,10 @@ static ssize_t bonding_show_primary(struct device *d,
 
        return count;
 }
-
-static ssize_t bonding_store_primary(struct device *d,
-                                    struct device_attribute *attr,
-                                    const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PRIMARY, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
 static DEVICE_ATTR(primary, S_IRUGO | S_IWUSR,
-                  bonding_show_primary, bonding_store_primary);
+                  bonding_show_primary, bonding_sysfs_store_option);
 
-/*
- * Show and set the primary_reselect flag.
- */
+/* Show the primary_reselect flag. */
 static ssize_t bonding_show_primary_reselect(struct device *d,
                                             struct device_attribute *attr,
                                             char *buf)
@@ -694,28 +450,10 @@ static ssize_t bonding_show_primary_reselect(struct device *d,
        return sprintf(buf, "%s %d\n",
                       val->string, bond->params.primary_reselect);
 }
-
-static ssize_t bonding_store_primary_reselect(struct device *d,
-                                             struct device_attribute *attr,
-                                             const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PRIMARY_RESELECT,
-                                  (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
 static DEVICE_ATTR(primary_reselect, S_IRUGO | S_IWUSR,
-                  bonding_show_primary_reselect,
-                  bonding_store_primary_reselect);
+                  bonding_show_primary_reselect, bonding_sysfs_store_option);
 
-/*
- * Show and set the use_carrier flag.
- */
+/* Show the use_carrier flag. */
 static ssize_t bonding_show_carrier(struct device *d,
                                    struct device_attribute *attr,
                                    char *buf)
@@ -724,27 +462,11 @@ static ssize_t bonding_show_carrier(struct device *d,
 
        return sprintf(buf, "%d\n", bond->params.use_carrier);
 }
-
-static ssize_t bonding_store_carrier(struct device *d,
-                                    struct device_attribute *attr,
-                                    const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_USE_CARRIER, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
 static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR,
-                  bonding_show_carrier, bonding_store_carrier);
+                  bonding_show_carrier, bonding_sysfs_store_option);
 
 
-/*
- * Show and set currently active_slave.
- */
+/* Show currently active_slave. */
 static ssize_t bonding_show_active_slave(struct device *d,
                                         struct device_attribute *attr,
                                         char *buf)
@@ -761,27 +483,10 @@ static ssize_t bonding_show_active_slave(struct device *d,
 
        return count;
 }
-
-static ssize_t bonding_store_active_slave(struct device *d,
-                                         struct device_attribute *attr,
-                                         const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ACTIVE_SLAVE, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
 static DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR,
-                  bonding_show_active_slave, bonding_store_active_slave);
-
+                  bonding_show_active_slave, bonding_sysfs_store_option);
 
-/*
- * Show link status of the bond interface.
- */
+/* Show link status of the bond interface. */
 static ssize_t bonding_show_mii_status(struct device *d,
                                       struct device_attribute *attr,
                                       char *buf)
@@ -792,9 +497,7 @@ static ssize_t bonding_show_mii_status(struct device *d,
 }
 static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL);
 
-/*
- * Show current 802.3ad aggregator ID.
- */
+/* Show current 802.3ad aggregator ID. */
 static ssize_t bonding_show_ad_aggregator(struct device *d,
                                          struct device_attribute *attr,
                                          char *buf)
@@ -802,7 +505,7 @@ static ssize_t bonding_show_ad_aggregator(struct device *d,
        int count = 0;
        struct bonding *bond = to_bond(d);
 
-       if (bond->params.mode == BOND_MODE_8023AD) {
+       if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                struct ad_info ad_info;
                count = sprintf(buf, "%d\n",
                                bond_3ad_get_active_agg_info(bond, &ad_info)
@@ -814,9 +517,7 @@ static ssize_t bonding_show_ad_aggregator(struct device *d,
 static DEVICE_ATTR(ad_aggregator, S_IRUGO, bonding_show_ad_aggregator, NULL);
 
 
-/*
- * Show number of active 802.3ad ports.
- */
+/* Show number of active 802.3ad ports. */
 static ssize_t bonding_show_ad_num_ports(struct device *d,
                                         struct device_attribute *attr,
                                         char *buf)
@@ -824,7 +525,7 @@ static ssize_t bonding_show_ad_num_ports(struct device *d,
        int count = 0;
        struct bonding *bond = to_bond(d);
 
-       if (bond->params.mode == BOND_MODE_8023AD) {
+       if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                struct ad_info ad_info;
                count = sprintf(buf, "%d\n",
                                bond_3ad_get_active_agg_info(bond, &ad_info)
@@ -836,9 +537,7 @@ static ssize_t bonding_show_ad_num_ports(struct device *d,
 static DEVICE_ATTR(ad_num_ports, S_IRUGO, bonding_show_ad_num_ports, NULL);
 
 
-/*
- * Show current 802.3ad actor key.
- */
+/* Show current 802.3ad actor key. */
 static ssize_t bonding_show_ad_actor_key(struct device *d,
                                         struct device_attribute *attr,
                                         char *buf)
@@ -846,7 +545,7 @@ static ssize_t bonding_show_ad_actor_key(struct device *d,
        int count = 0;
        struct bonding *bond = to_bond(d);
 
-       if (bond->params.mode == BOND_MODE_8023AD) {
+       if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                struct ad_info ad_info;
                count = sprintf(buf, "%d\n",
                                bond_3ad_get_active_agg_info(bond, &ad_info)
@@ -858,9 +557,7 @@ static ssize_t bonding_show_ad_actor_key(struct device *d,
 static DEVICE_ATTR(ad_actor_key, S_IRUGO, bonding_show_ad_actor_key, NULL);
 
 
-/*
- * Show current 802.3ad partner key.
- */
+/* Show current 802.3ad partner key. */
 static ssize_t bonding_show_ad_partner_key(struct device *d,
                                           struct device_attribute *attr,
                                           char *buf)
@@ -868,7 +565,7 @@ static ssize_t bonding_show_ad_partner_key(struct device *d,
        int count = 0;
        struct bonding *bond = to_bond(d);
 
-       if (bond->params.mode == BOND_MODE_8023AD) {
+       if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                struct ad_info ad_info;
                count = sprintf(buf, "%d\n",
                                bond_3ad_get_active_agg_info(bond, &ad_info)
@@ -880,9 +577,7 @@ static ssize_t bonding_show_ad_partner_key(struct device *d,
 static DEVICE_ATTR(ad_partner_key, S_IRUGO, bonding_show_ad_partner_key, NULL);
 
 
-/*
- * Show current 802.3ad partner mac.
- */
+/* Show current 802.3ad partner mac. */
 static ssize_t bonding_show_ad_partner_mac(struct device *d,
                                           struct device_attribute *attr,
                                           char *buf)
@@ -890,7 +585,7 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d,
        int count = 0;
        struct bonding *bond = to_bond(d);
 
-       if (bond->params.mode == BOND_MODE_8023AD) {
+       if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                struct ad_info ad_info;
                if (!bond_3ad_get_active_agg_info(bond, &ad_info))
                        count = sprintf(buf, "%pM\n", ad_info.partner_system);
@@ -900,9 +595,7 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d,
 }
 static DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL);
 
-/*
- * Show the queue_ids of the slaves in the current bond.
- */
+/* Show the queue_ids of the slaves in the current bond. */
 static ssize_t bonding_show_queue_id(struct device *d,
                                     struct device_attribute *attr,
                                     char *buf)
@@ -933,31 +626,11 @@ static ssize_t bonding_show_queue_id(struct device *d,
 
        return res;
 }
-
-/*
- * Set the queue_ids of the  slaves in the current bond.  The bond
- * interface must be enslaved for this to work.
- */
-static ssize_t bonding_store_queue_id(struct device *d,
-                                     struct device_attribute *attr,
-                                     const char *buffer, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_QUEUE_ID, (char *)buffer);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
 static DEVICE_ATTR(queue_id, S_IRUGO | S_IWUSR, bonding_show_queue_id,
-                  bonding_store_queue_id);
+                  bonding_sysfs_store_option);
 
 
-/*
- * Show and set the all_slaves_active flag.
- */
+/* Show the all_slaves_active flag. */
 static ssize_t bonding_show_slaves_active(struct device *d,
                                          struct device_attribute *attr,
                                          char *buf)
@@ -966,27 +639,10 @@ static ssize_t bonding_show_slaves_active(struct device *d,
 
        return sprintf(buf, "%d\n", bond->params.all_slaves_active);
 }
-
-static ssize_t bonding_store_slaves_active(struct device *d,
-                                          struct device_attribute *attr,
-                                          const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ALL_SLAVES_ACTIVE,
-                                  (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
 static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
-                  bonding_show_slaves_active, bonding_store_slaves_active);
+                  bonding_show_slaves_active, bonding_sysfs_store_option);
 
-/*
- * Show and set the number of IGMP membership reports to send on link failure
- */
+/* Show the number of IGMP membership reports to send on link failure */
 static ssize_t bonding_show_resend_igmp(struct device *d,
                                        struct device_attribute *attr,
                                        char *buf)
@@ -995,23 +651,8 @@ static ssize_t bonding_show_resend_igmp(struct device *d,
 
        return sprintf(buf, "%d\n", bond->params.resend_igmp);
 }
-
-static ssize_t bonding_store_resend_igmp(struct device *d,
-                                        struct device_attribute *attr,
-                                        const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_RESEND_IGMP, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
-
 static DEVICE_ATTR(resend_igmp, S_IRUGO | S_IWUSR,
-                  bonding_show_resend_igmp, bonding_store_resend_igmp);
+                  bonding_show_resend_igmp, bonding_sysfs_store_option);
 
 
 static ssize_t bonding_show_lp_interval(struct device *d,
@@ -1019,25 +660,21 @@ static ssize_t bonding_show_lp_interval(struct device *d,
                                        char *buf)
 {
        struct bonding *bond = to_bond(d);
+
        return sprintf(buf, "%d\n", bond->params.lp_interval);
 }
+static DEVICE_ATTR(lp_interval, S_IRUGO | S_IWUSR,
+                  bonding_show_lp_interval, bonding_sysfs_store_option);
 
-static ssize_t bonding_store_lp_interval(struct device *d,
-                                        struct device_attribute *attr,
-                                        const char *buf, size_t count)
+static ssize_t bonding_show_tlb_dynamic_lb(struct device *d,
+                                          struct device_attribute *attr,
+                                          char *buf)
 {
        struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_LP_INTERVAL, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
+       return sprintf(buf, "%d\n", bond->params.tlb_dynamic_lb);
 }
-
-static DEVICE_ATTR(lp_interval, S_IRUGO | S_IWUSR,
-                  bonding_show_lp_interval, bonding_store_lp_interval);
+static DEVICE_ATTR(tlb_dynamic_lb, S_IRUGO | S_IWUSR,
+                  bonding_show_tlb_dynamic_lb, bonding_sysfs_store_option);
 
 static ssize_t bonding_show_packets_per_slave(struct device *d,
                                              struct device_attribute *attr,
@@ -1045,27 +682,11 @@ static ssize_t bonding_show_packets_per_slave(struct device *d,
 {
        struct bonding *bond = to_bond(d);
        unsigned int packets_per_slave = bond->params.packets_per_slave;
-       return sprintf(buf, "%u\n", packets_per_slave);
-}
-
-static ssize_t bonding_store_packets_per_slave(struct device *d,
-                                              struct device_attribute *attr,
-                                              const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
 
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PACKETS_PER_SLAVE,
-                                  (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
+       return sprintf(buf, "%u\n", packets_per_slave);
 }
-
 static DEVICE_ATTR(packets_per_slave, S_IRUGO | S_IWUSR,
-                  bonding_show_packets_per_slave,
-                  bonding_store_packets_per_slave);
+                  bonding_show_packets_per_slave, bonding_sysfs_store_option);
 
 static struct attribute *per_bond_attrs[] = {
        &dev_attr_slaves.attr,
@@ -1099,6 +720,7 @@ static struct attribute *per_bond_attrs[] = {
        &dev_attr_min_links.attr,
        &dev_attr_lp_interval.attr,
        &dev_attr_packets_per_slave.attr,
+       &dev_attr_tlb_dynamic_lb.attr,
        NULL,
 };
 
@@ -1107,8 +729,7 @@ static struct attribute_group bonding_group = {
        .attrs = per_bond_attrs,
 };
 
-/*
- * Initialize sysfs.  This sets up the bonding_masters file in
+/* Initialize sysfs.  This sets up the bonding_masters file in
  * /sys/class/net.
  */
 int bond_create_sysfs(struct bond_net *bn)
@@ -1120,8 +741,7 @@ int bond_create_sysfs(struct bond_net *bn)
 
        ret = netdev_class_create_file_ns(&bn->class_attr_bonding_masters,
                                          bn->net);
-       /*
-        * Permit multiple loads of the module by ignoring failures to
+       /* Permit multiple loads of the module by ignoring failures to
         * create the bonding_masters sysfs file.  Bonding devices
         * created by second or subsequent loads of the module will
         * not be listed in, or controllable by, bonding_masters, but
@@ -1144,16 +764,13 @@ int bond_create_sysfs(struct bond_net *bn)
 
 }
 
-/*
- * Remove /sys/class/net/bonding_masters.
- */
+/* Remove /sys/class/net/bonding_masters. */
 void bond_destroy_sysfs(struct bond_net *bn)
 {
        netdev_class_remove_file_ns(&bn->class_attr_bonding_masters, bn->net);
 }
 
-/*
- * Initialize sysfs for each bond.  This sets up and registers
+/* Initialize sysfs for each bond.  This sets up and registers
  * the 'bondctl' directory for each individual bond under /sys/class/net.
  */
 void bond_prepare_sysfs_group(struct bonding *bond)
index 2e4eec5450c80b726a7bbf1a47ae169cdd18db4f..198677f58ce0af4134b2491e90e6774fa2ae17b4 100644 (file)
@@ -69,8 +69,8 @@ static ssize_t ad_aggregator_id_show(struct slave *slave, char *buf)
 {
        const struct aggregator *agg;
 
-       if (slave->bond->params.mode == BOND_MODE_8023AD) {
-               agg = SLAVE_AD_INFO(slave).port.aggregator;
+       if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
+               agg = SLAVE_AD_INFO(slave)->port.aggregator;
                if (agg)
                        return sprintf(buf, "%d\n",
                                       agg->aggregator_identifier);
index b8bdd0acc8f334ac97bca2ddfea602f473c3272f..ea64aa2f8b9575754f357c4619a4959960ddec93 100644 (file)
 
 #define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
 
+#define BOND_MAX_VLAN_ENCAP    2
 #define BOND_MAX_ARP_TARGETS   16
 
 #define BOND_DEFAULT_MIIMON    100
 
-#define IS_UP(dev)                                        \
-             ((((dev)->flags & IFF_UP) == IFF_UP)      && \
-              netif_running(dev)                       && \
-              netif_carrier_ok(dev))
-
-/*
- * Checks whether slave is ready for transmit.
- */
-#define SLAVE_IS_OK(slave)                             \
-                   (((slave)->dev->flags & IFF_UP)  && \
-                    netif_running((slave)->dev)     && \
-                    ((slave)->link == BOND_LINK_UP) && \
-                    bond_is_active_slave(slave))
-
-
-#define USES_PRIMARY(mode)                             \
-               (((mode) == BOND_MODE_ACTIVEBACKUP) ||  \
-                ((mode) == BOND_MODE_TLB)          ||  \
-                ((mode) == BOND_MODE_ALB))
-
-#define BOND_NO_USES_ARP(mode)                         \
-               (((mode) == BOND_MODE_8023AD)   ||      \
-                ((mode) == BOND_MODE_TLB)      ||      \
-                ((mode) == BOND_MODE_ALB))
-
-#define TX_QUEUE_OVERRIDE(mode)                                \
-                       (((mode) == BOND_MODE_ACTIVEBACKUP) ||  \
-                        ((mode) == BOND_MODE_ROUNDROBIN))
-
-#define BOND_MODE_IS_LB(mode)                  \
-               (((mode) == BOND_MODE_TLB) ||   \
-                ((mode) == BOND_MODE_ALB))
-
-#define IS_IP_TARGET_UNUSABLE_ADDRESS(a)       \
-       ((htonl(INADDR_BROADCAST) == a) ||      \
-        ipv4_is_zeronet(a))
 /*
  * Less bad way to call ioctl from within the kernel; this needs to be
  * done some other way to get the call out of interrupt context.
@@ -89,6 +54,8 @@
        set_fs(fs);                     \
        res; })
 
+#define BOND_MODE(bond) ((bond)->params.mode)
+
 /* slave list primitives */
 #define bond_slave_list(bond) (&(bond)->dev->adj_list.lower)
 
@@ -174,6 +141,7 @@ struct bond_params {
        int resend_igmp;
        int lp_interval;
        int packets_per_slave;
+       int tlb_dynamic_lb;
        struct reciprocal_value reciprocal_packets_per_slave;
 };
 
@@ -182,8 +150,6 @@ struct bond_parm_tbl {
        int mode;
 };
 
-#define BOND_MAX_MODENAME_LEN 20
-
 struct slave {
        struct net_device *dev; /* first - useful for panic debug */
        struct bonding *bond; /* our master */
@@ -204,7 +170,7 @@ struct slave {
        u32    speed;
        u16    queue_id;
        u8     perm_hwaddr[ETH_ALEN];
-       struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */
+       struct ad_slave_info *ad_info;
        struct tlb_slave_info tlb_info;
 #ifdef CONFIG_NET_POLL_CONTROLLER
        struct netpoll *np;
@@ -284,14 +250,41 @@ static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
 
 static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
 {
-       if (!slave || !slave->bond)
-               return NULL;
        return slave->bond;
 }
 
+static inline bool bond_should_override_tx_queue(struct bonding *bond)
+{
+       return BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
+              BOND_MODE(bond) == BOND_MODE_ROUNDROBIN;
+}
+
 static inline bool bond_is_lb(const struct bonding *bond)
 {
-       return BOND_MODE_IS_LB(bond->params.mode);
+       return BOND_MODE(bond) == BOND_MODE_TLB ||
+              BOND_MODE(bond) == BOND_MODE_ALB;
+}
+
+static inline bool bond_mode_uses_arp(int mode)
+{
+       return mode != BOND_MODE_8023AD && mode != BOND_MODE_TLB &&
+              mode != BOND_MODE_ALB;
+}
+
+static inline bool bond_mode_uses_primary(int mode)
+{
+       return mode == BOND_MODE_ACTIVEBACKUP || mode == BOND_MODE_TLB ||
+              mode == BOND_MODE_ALB;
+}
+
+static inline bool bond_uses_primary(struct bonding *bond)
+{
+       return bond_mode_uses_primary(BOND_MODE(bond));
+}
+
+static inline bool bond_slave_is_up(struct slave *slave)
+{
+       return netif_running(slave->dev) && netif_carrier_ok(slave->dev);
 }
 
 static inline void bond_set_active_slave(struct slave *slave)
@@ -364,6 +357,12 @@ static inline bool bond_is_active_slave(struct slave *slave)
        return !bond_slave_state(slave);
 }
 
+static inline bool bond_slave_can_tx(struct slave *slave)
+{
+       return bond_slave_is_up(slave) && slave->link == BOND_LINK_UP &&
+              bond_is_active_slave(slave);
+}
+
 #define BOND_PRI_RESELECT_ALWAYS       0
 #define BOND_PRI_RESELECT_BETTER       1
 #define BOND_PRI_RESELECT_FAILURE      2
@@ -395,12 +394,16 @@ static inline int slave_do_arp_validate(struct bonding *bond,
        return bond->params.arp_validate & (1 << bond_slave_state(slave));
 }
 
-static inline int slave_do_arp_validate_only(struct bonding *bond,
-                                            struct slave *slave)
+static inline int slave_do_arp_validate_only(struct bonding *bond)
 {
        return bond->params.arp_validate & BOND_ARP_FILTER;
 }
 
+static inline int bond_is_ip_target_ok(__be32 addr)
+{
+       return !ipv4_is_lbcast(addr) && !ipv4_is_zeronet(addr);
+}
+
 /* Get the oldest arp which we've received on this slave for bond's
  * arp_targets.
  */
@@ -478,16 +481,14 @@ static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be3
        return addr;
 }
 
-static inline bool slave_can_tx(struct slave *slave)
-{
-       if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP &&
-           bond_is_active_slave(slave))
-               return true;
-       else
-               return false;
-}
-
-struct bond_net;
+struct bond_net {
+       struct net              *net;   /* Associated network namespace */
+       struct list_head        dev_list;
+#ifdef CONFIG_PROC_FS
+       struct proc_dir_entry   *proc_dir;
+#endif
+       struct class_attribute  class_attr_bonding_masters;
+};
 
 int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
 void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
@@ -499,7 +500,7 @@ int bond_sysfs_slave_add(struct slave *slave);
 void bond_sysfs_slave_del(struct slave *slave);
 int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
 int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
-int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count);
+u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb);
 void bond_select_active_slave(struct bonding *bond);
 void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
 void bond_create_debugfs(void);
@@ -516,15 +517,6 @@ struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond);
 struct net_device *bond_option_active_slave_get(struct bonding *bond);
 const char *bond_slave_link_status(s8 link);
 
-struct bond_net {
-       struct net *            net;    /* Associated network namespace */
-       struct list_head        dev_list;
-#ifdef CONFIG_PROC_FS
-       struct proc_dir_entry * proc_dir;
-#endif
-       struct class_attribute  class_attr_bonding_masters;
-};
-
 #ifdef CONFIG_PROC_FS
 void bond_create_proc_entry(struct bonding *bond);
 void bond_remove_proc_entry(struct bonding *bond);
index 9e7d95dae2c7038478d6efadddba81e2778f47a9..41688229c570eb92e9e79b2bee2bd5b9d6173e39 100644 (file)
@@ -65,7 +65,7 @@ config CAN_LEDS
 
 config CAN_AT91
        tristate "Atmel AT91 onchip CAN controller"
-       depends on ARM
+       depends on ARCH_AT91 || COMPILE_TEST
        ---help---
          This is a driver for the SoC CAN controller in Atmel's AT91SAM9263
          and AT91SAM9X5 processors.
@@ -77,12 +77,6 @@ config CAN_TI_HECC
          Driver for TI HECC (High End CAN Controller) module found on many
          TI devices. The device specifications are available from www.ti.com
 
-config CAN_MCP251X
-       tristate "Microchip MCP251x SPI CAN controllers"
-       depends on SPI && HAS_DMA
-       ---help---
-         Driver for the Microchip MCP251x SPI CAN controllers.
-
 config CAN_BFIN
        depends on BF534 || BF536 || BF537 || BF538 || BF539 || BF54x
        tristate "Analog Devices Blackfin on-chip CAN"
@@ -110,7 +104,7 @@ config CAN_FLEXCAN
 
 config PCH_CAN
        tristate "Intel EG20T PCH CAN controller"
-       depends on PCI
+       depends on PCI && (X86_32 || COMPILE_TEST)
        ---help---
          This driver is for PCH CAN of Topcliff (Intel EG20T PCH) which
          is an IOH for x86 embedded processor (Intel Atom E6xx series).
@@ -125,6 +119,24 @@ config CAN_GRCAN
          endian syntheses of the cores would need some modifications on
          the hardware level to work.
 
+config CAN_RCAR
+       tristate "Renesas R-Car CAN controller"
+       depends on ARM
+       ---help---
+         Say Y here if you want to use CAN controller found on Renesas R-Car
+         SoCs.
+
+         To compile this driver as a module, choose M here: the module will
+         be called rcar_can.
+
+config CAN_XILINXCAN
+       tristate "Xilinx CAN"
+       depends on ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST
+       depends on COMMON_CLK && HAS_IOMEM
+       ---help---
+         Xilinx CAN driver. This driver supports both soft AXI CAN IP and
+         Zynq CANPS IP.
+
 source "drivers/net/can/mscan/Kconfig"
 
 source "drivers/net/can/sja1000/Kconfig"
@@ -133,6 +145,8 @@ source "drivers/net/can/c_can/Kconfig"
 
 source "drivers/net/can/cc770/Kconfig"
 
+source "drivers/net/can/spi/Kconfig"
+
 source "drivers/net/can/usb/Kconfig"
 
 source "drivers/net/can/softing/Kconfig"
index c7440392adbbaaabd5ca5b8ba872ba18c23f41d2..1697f22353a943315bdb5e162d3cdce4f7d535f7 100644 (file)
@@ -10,6 +10,7 @@ can-dev-y                     := dev.o
 
 can-dev-$(CONFIG_CAN_LEDS)     += led.o
 
+obj-y                          += spi/
 obj-y                          += usb/
 obj-y                          += softing/
 
@@ -19,11 +20,12 @@ obj-$(CONFIG_CAN_C_CAN)             += c_can/
 obj-$(CONFIG_CAN_CC770)                += cc770/
 obj-$(CONFIG_CAN_AT91)         += at91_can.o
 obj-$(CONFIG_CAN_TI_HECC)      += ti_hecc.o
-obj-$(CONFIG_CAN_MCP251X)      += mcp251x.o
 obj-$(CONFIG_CAN_BFIN)         += bfin_can.o
 obj-$(CONFIG_CAN_JANZ_ICAN3)   += janz-ican3.o
 obj-$(CONFIG_CAN_FLEXCAN)      += flexcan.o
 obj-$(CONFIG_PCH_CAN)          += pch_can.o
 obj-$(CONFIG_CAN_GRCAN)                += grcan.o
+obj-$(CONFIG_CAN_RCAR)         += rcar_can.o
+obj-$(CONFIG_CAN_XILINXCAN)    += xilinx_can.o
 
 ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
index a5c8dcfa83579376a36105c8c055e63f792c42ad..8e78bb48f5a4399f0fef7e991d64984c638c077a 100644 (file)
@@ -60,6 +60,8 @@
 #define CONTROL_IE             BIT(1)
 #define CONTROL_INIT           BIT(0)
 
+#define CONTROL_IRQMSK         (CONTROL_EIE | CONTROL_IE | CONTROL_SIE)
+
 /* test register */
 #define TEST_RX                        BIT(7)
 #define TEST_TX1               BIT(6)
 #define IF_COMM_CONTROL                BIT(4)
 #define IF_COMM_CLR_INT_PND    BIT(3)
 #define IF_COMM_TXRQST         BIT(2)
+#define IF_COMM_CLR_NEWDAT     IF_COMM_TXRQST
 #define IF_COMM_DATAA          BIT(1)
 #define IF_COMM_DATAB          BIT(0)
-#define IF_COMM_ALL            (IF_COMM_MASK | IF_COMM_ARB | \
-                               IF_COMM_CONTROL | IF_COMM_TXRQST | \
-                               IF_COMM_DATAA | IF_COMM_DATAB)
+
+/* TX buffer setup */
+#define IF_COMM_TX             (IF_COMM_ARB | IF_COMM_CONTROL | \
+                                IF_COMM_TXRQST |                \
+                                IF_COMM_DATAA | IF_COMM_DATAB)
 
 /* For the low buffers we clear the interrupt bit, but keep newdat */
 #define IF_COMM_RCV_LOW                (IF_COMM_MASK | IF_COMM_ARB | \
                                 IF_COMM_DATAA | IF_COMM_DATAB)
 
 /* For the high buffers we clear the interrupt bit and newdat */
-#define IF_COMM_RCV_HIGH       (IF_COMM_RCV_LOW | IF_COMM_TXRQST)
+#define IF_COMM_RCV_HIGH       (IF_COMM_RCV_LOW | IF_COMM_CLR_NEWDAT)
+
+
+/* Receive setup of message objects */
+#define IF_COMM_RCV_SETUP      (IF_COMM_MASK | IF_COMM_ARB | IF_COMM_CONTROL)
+
+/* Invalidation of message objects */
+#define IF_COMM_INVAL          (IF_COMM_ARB | IF_COMM_CONTROL)
 
 /* IFx arbitration */
-#define IF_ARB_MSGVAL          BIT(15)
-#define IF_ARB_MSGXTD          BIT(14)
-#define IF_ARB_TRANSMIT                BIT(13)
+#define IF_ARB_MSGVAL          BIT(31)
+#define IF_ARB_MSGXTD          BIT(30)
+#define IF_ARB_TRANSMIT                BIT(29)
 
 /* IFx message control */
 #define IF_MCONT_NEWDAT                BIT(15)
 #define IF_MCONT_EOB           BIT(7)
 #define IF_MCONT_DLC_MASK      0xf
 
+#define IF_MCONT_RCV           (IF_MCONT_RXIE | IF_MCONT_UMASK)
+#define IF_MCONT_RCV_EOB       (IF_MCONT_RCV | IF_MCONT_EOB)
+
+#define IF_MCONT_TX            (IF_MCONT_TXIE | IF_MCONT_EOB)
+
 /*
  * Use IF1 for RX and IF2 for TX
  */
 #define IF_RX                  0
 #define IF_TX                  1
 
-/* status interrupt */
-#define STATUS_INTERRUPT       0x8000
-
-/* global interrupt masks */
-#define ENABLE_ALL_INTERRUPTS  1
-#define DISABLE_ALL_INTERRUPTS 0
-
 /* minimum timeout for checking BUSY status */
 #define MIN_TIMEOUT_VALUE      6
 
@@ -171,6 +181,7 @@ enum c_can_lec_type {
        LEC_BIT0_ERROR,
        LEC_CRC_ERROR,
        LEC_UNUSED,
+       LEC_MASK = LEC_UNUSED,
 };
 
 /*
@@ -226,143 +237,113 @@ static inline void c_can_reset_ram(const struct c_can_priv *priv, bool enable)
                priv->raminit(priv, enable);
 }
 
-static inline int get_tx_next_msg_obj(const struct c_can_priv *priv)
-{
-       return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) +
-                       C_CAN_MSG_OBJ_TX_FIRST;
-}
-
-static inline int get_tx_echo_msg_obj(int txecho)
-{
-       return (txecho & C_CAN_NEXT_MSG_OBJ_MASK) + C_CAN_MSG_OBJ_TX_FIRST;
-}
-
-static u32 c_can_read_reg32(struct c_can_priv *priv, enum reg index)
-{
-       u32 val = priv->read_reg(priv, index);
-       val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
-       return val;
-}
-
-static void c_can_enable_all_interrupts(struct c_can_priv *priv,
-                                               int enable)
+static void c_can_irq_control(struct c_can_priv *priv, bool enable)
 {
-       unsigned int cntrl_save = priv->read_reg(priv,
-                                               C_CAN_CTRL_REG);
+       u32 ctrl = priv->read_reg(priv, C_CAN_CTRL_REG) & ~CONTROL_IRQMSK;
 
        if (enable)
-               cntrl_save |= (CONTROL_SIE | CONTROL_EIE | CONTROL_IE);
-       else
-               cntrl_save &= ~(CONTROL_EIE | CONTROL_IE | CONTROL_SIE);
+               ctrl |= CONTROL_IRQMSK;
 
-       priv->write_reg(priv, C_CAN_CTRL_REG, cntrl_save);
+       priv->write_reg(priv, C_CAN_CTRL_REG, ctrl);
 }
 
-static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface)
+static void c_can_obj_update(struct net_device *dev, int iface, u32 cmd, u32 obj)
 {
-       int count = MIN_TIMEOUT_VALUE;
+       struct c_can_priv *priv = netdev_priv(dev);
+       int cnt, reg = C_CAN_IFACE(COMREQ_REG, iface);
 
-       while (count && priv->read_reg(priv,
-                               C_CAN_IFACE(COMREQ_REG, iface)) &
-                               IF_COMR_BUSY) {
-               count--;
+       priv->write_reg32(priv, reg, (cmd << 16) | obj);
+
+       for (cnt = MIN_TIMEOUT_VALUE; cnt; cnt--) {
+               if (!(priv->read_reg(priv, reg) & IF_COMR_BUSY))
+                       return;
                udelay(1);
        }
+       netdev_err(dev, "Updating object timed out\n");
 
-       if (!count)
-               return 1;
+}
 
-       return 0;
+static inline void c_can_object_get(struct net_device *dev, int iface,
+                                   u32 obj, u32 cmd)
+{
+       c_can_obj_update(dev, iface, cmd, obj);
 }
 
-static inline void c_can_object_get(struct net_device *dev,
-                                       int iface, int objno, int mask)
+static inline void c_can_object_put(struct net_device *dev, int iface,
+                                   u32 obj, u32 cmd)
 {
-       struct c_can_priv *priv = netdev_priv(dev);
+       c_can_obj_update(dev, iface, cmd | IF_COMM_WR, obj);
+}
 
-       /*
-        * As per specs, after writting the message object number in the
-        * IF command request register the transfer b/w interface
-        * register and message RAM must be complete in 6 CAN-CLK
-        * period.
-        */
-       priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface),
-                       IFX_WRITE_LOW_16BIT(mask));
-       priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface),
-                       IFX_WRITE_LOW_16BIT(objno));
+/*
+ * Note: According to documentation clearing TXIE while MSGVAL is set
+ * is not allowed, but works nicely on C/DCAN. And that lowers the I/O
+ * load significantly.
+ */
+static void c_can_inval_tx_object(struct net_device *dev, int iface, int obj)
+{
+       struct c_can_priv *priv = netdev_priv(dev);
 
-       if (c_can_msg_obj_is_busy(priv, iface))
-               netdev_err(dev, "timed out in object get\n");
+       priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 0);
+       c_can_object_put(dev, iface, obj, IF_COMM_INVAL);
 }
 
-static inline void c_can_object_put(struct net_device *dev,
-                                       int iface, int objno, int mask)
+static void c_can_inval_msg_object(struct net_device *dev, int iface, int obj)
 {
        struct c_can_priv *priv = netdev_priv(dev);
 
-       /*
-        * As per specs, after writting the message object number in the
-        * IF command request register the transfer b/w interface
-        * register and message RAM must be complete in 6 CAN-CLK
-        * period.
-        */
-       priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface),
-                       (IF_COMM_WR | IFX_WRITE_LOW_16BIT(mask)));
-       priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface),
-                       IFX_WRITE_LOW_16BIT(objno));
-
-       if (c_can_msg_obj_is_busy(priv, iface))
-               netdev_err(dev, "timed out in object put\n");
+       priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0);
+       priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0);
+       c_can_inval_tx_object(dev, iface, obj);
 }
 
-static void c_can_write_msg_object(struct net_device *dev,
-                       int iface, struct can_frame *frame, int objno)
+static void c_can_setup_tx_object(struct net_device *dev, int iface,
+                                 struct can_frame *frame, int idx)
 {
-       int i;
-       u16 flags = 0;
-       unsigned int id;
        struct c_can_priv *priv = netdev_priv(dev);
-
-       if (!(frame->can_id & CAN_RTR_FLAG))
-               flags |= IF_ARB_TRANSMIT;
+       u16 ctrl = IF_MCONT_TX | frame->can_dlc;
+       bool rtr = frame->can_id & CAN_RTR_FLAG;
+       u32 arb = IF_ARB_MSGVAL;
+       int i;
 
        if (frame->can_id & CAN_EFF_FLAG) {
-               id = frame->can_id & CAN_EFF_MASK;
-               flags |= IF_ARB_MSGXTD;
-       } else
-               id = ((frame->can_id & CAN_SFF_MASK) << 18);
+               arb |= frame->can_id & CAN_EFF_MASK;
+               arb |= IF_ARB_MSGXTD;
+       } else {
+               arb |= (frame->can_id & CAN_SFF_MASK) << 18;
+       }
+
+       if (!rtr)
+               arb |= IF_ARB_TRANSMIT;
+
+       /*
+        * If we change the DIR bit, we need to invalidate the buffer
+        * first, i.e. clear the MSGVAL flag in the arbiter.
+        */
+       if (rtr != (bool)test_bit(idx, &priv->tx_dir)) {
+               u32 obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
+
+               c_can_inval_msg_object(dev, iface, obj);
+               change_bit(idx, &priv->tx_dir);
+       }
 
-       flags |= IF_ARB_MSGVAL;
+       priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), arb);
 
-       priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
-                               IFX_WRITE_LOW_16BIT(id));
-       priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), flags |
-                               IFX_WRITE_HIGH_16BIT(id));
+       priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
 
        for (i = 0; i < frame->can_dlc; i += 2) {
                priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
                                frame->data[i] | (frame->data[i + 1] << 8));
        }
-
-       /* enable interrupt for this message object */
-       priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
-                       IF_MCONT_TXIE | IF_MCONT_TXRQST | IF_MCONT_EOB |
-                       frame->can_dlc);
-       c_can_object_put(dev, iface, objno, IF_COMM_ALL);
 }
 
 static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
-                                               int iface,
-                                               int ctrl_mask)
+                                                      int iface)
 {
        int i;
-       struct c_can_priv *priv = netdev_priv(dev);
 
-       for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) {
-               priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
-                               ctrl_mask & ~IF_MCONT_NEWDAT);
-               c_can_object_put(dev, iface, i, IF_COMM_CONTROL);
-       }
+       for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++)
+               c_can_object_get(dev, iface, i, IF_COMM_CLR_NEWDAT);
 }
 
 static int c_can_handle_lost_msg_obj(struct net_device *dev,
@@ -377,6 +358,9 @@ static int c_can_handle_lost_msg_obj(struct net_device *dev,
        priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
        c_can_object_put(dev, iface, objno, IF_COMM_CONTROL);
 
+       stats->rx_errors++;
+       stats->rx_over_errors++;
+
        /* create an error msg */
        skb = alloc_can_err_skb(dev, &frame);
        if (unlikely(!skb))
@@ -384,22 +368,18 @@ static int c_can_handle_lost_msg_obj(struct net_device *dev,
 
        frame->can_id |= CAN_ERR_CRTL;
        frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
-       stats->rx_errors++;
-       stats->rx_over_errors++;
 
        netif_receive_skb(skb);
        return 1;
 }
 
-static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
+static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
 {
-       u16 flags, data;
-       int i;
-       unsigned int val;
-       struct c_can_priv *priv = netdev_priv(dev);
        struct net_device_stats *stats = &dev->stats;
-       struct sk_buff *skb;
+       struct c_can_priv *priv = netdev_priv(dev);
        struct can_frame *frame;
+       struct sk_buff *skb;
+       u32 arb, data;
 
        skb = alloc_can_skb(dev, &frame);
        if (!skb) {
@@ -409,115 +389,79 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
 
        frame->can_dlc = get_can_dlc(ctrl & 0x0F);
 
-       flags = priv->read_reg(priv, C_CAN_IFACE(ARB2_REG, iface));
-       val = priv->read_reg(priv, C_CAN_IFACE(ARB1_REG, iface)) |
-               (flags << 16);
+       arb = priv->read_reg32(priv, C_CAN_IFACE(ARB1_REG, iface));
 
-       if (flags & IF_ARB_MSGXTD)
-               frame->can_id = (val & CAN_EFF_MASK) | CAN_EFF_FLAG;
+       if (arb & IF_ARB_MSGXTD)
+               frame->can_id = (arb & CAN_EFF_MASK) | CAN_EFF_FLAG;
        else
-               frame->can_id = (val >> 18) & CAN_SFF_MASK;
+               frame->can_id = (arb >> 18) & CAN_SFF_MASK;
 
-       if (flags & IF_ARB_TRANSMIT)
+       if (arb & IF_ARB_TRANSMIT) {
                frame->can_id |= CAN_RTR_FLAG;
-       else {
-               for (i = 0; i < frame->can_dlc; i += 2) {
-                       data = priv->read_reg(priv,
-                               C_CAN_IFACE(DATA1_REG, iface) + i / 2);
+       } else {
+               int i, dreg = C_CAN_IFACE(DATA1_REG, iface);
+
+               for (i = 0; i < frame->can_dlc; i += 2, dreg ++) {
+                       data = priv->read_reg(priv, dreg);
                        frame->data[i] = data;
                        frame->data[i + 1] = data >> 8;
                }
        }
 
-       netif_receive_skb(skb);
-
        stats->rx_packets++;
        stats->rx_bytes += frame->can_dlc;
+
+       netif_receive_skb(skb);
        return 0;
 }
 
 static void c_can_setup_receive_object(struct net_device *dev, int iface,
-                                       int objno, unsigned int mask,
-                                       unsigned int id, unsigned int mcont)
+                                      u32 obj, u32 mask, u32 id, u32 mcont)
 {
        struct c_can_priv *priv = netdev_priv(dev);
 
-       priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface),
-                       IFX_WRITE_LOW_16BIT(mask));
-
-       /* According to C_CAN documentation, the reserved bit
-        * in IFx_MASK2 register is fixed 1
-        */
-       priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface),
-                       IFX_WRITE_HIGH_16BIT(mask) | BIT(13));
+       mask |= BIT(29);
+       priv->write_reg32(priv, C_CAN_IFACE(MASK1_REG, iface), mask);
 
-       priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
-                       IFX_WRITE_LOW_16BIT(id));
-       priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface),
-                       (IF_ARB_MSGVAL | IFX_WRITE_HIGH_16BIT(id)));
+       id |= IF_ARB_MSGVAL;
+       priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), id);
 
        priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont);
-       c_can_object_put(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
-
-       netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
-                       c_can_read_reg32(priv, C_CAN_MSGVAL1_REG));
-}
-
-static void c_can_inval_msg_object(struct net_device *dev, int iface, int objno)
-{
-       struct c_can_priv *priv = netdev_priv(dev);
-
-       priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0);
-       priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0);
-       priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 0);
-
-       c_can_object_put(dev, iface, objno, IF_COMM_ARB | IF_COMM_CONTROL);
-
-       netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
-                       c_can_read_reg32(priv, C_CAN_MSGVAL1_REG));
-}
-
-static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno)
-{
-       int val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
-
-       /*
-        * as transmission request register's bit n-1 corresponds to
-        * message object n, we need to handle the same properly.
-        */
-       if (val & (1 << (objno - 1)))
-               return 1;
-
-       return 0;
+       c_can_object_put(dev, iface, obj, IF_COMM_RCV_SETUP);
 }
 
 static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
-                                       struct net_device *dev)
+                                   struct net_device *dev)
 {
-       u32 msg_obj_no;
-       struct c_can_priv *priv = netdev_priv(dev);
        struct can_frame *frame = (struct can_frame *)skb->data;
+       struct c_can_priv *priv = netdev_priv(dev);
+       u32 idx, obj;
 
        if (can_dropped_invalid_skb(dev, skb))
                return NETDEV_TX_OK;
-
-       spin_lock_bh(&priv->xmit_lock);
-       msg_obj_no = get_tx_next_msg_obj(priv);
-
-       /* prepare message object for transmission */
-       c_can_write_msg_object(dev, IF_TX, frame, msg_obj_no);
-       priv->dlc[msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST] = frame->can_dlc;
-       can_put_echo_skb(skb, dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
-
        /*
-        * we have to stop the queue in case of a wrap around or
-        * if the next TX message object is still in use
+        * This is not a FIFO. C/D_CAN sends out the buffers
+        * prioritized. The lowest buffer number wins.
         */
-       priv->tx_next++;
-       if (c_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) ||
-                       (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) == 0)
+       idx = fls(atomic_read(&priv->tx_active));
+       obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
+
+       /* If this is the last buffer, stop the xmit queue */
+       if (idx == C_CAN_MSG_OBJ_TX_NUM - 1)
                netif_stop_queue(dev);
-       spin_unlock_bh(&priv->xmit_lock);
+       /*
+        * Store the message in the interface so we can call
+        * can_put_echo_skb(). We must do this before we enable
+        * transmit as we might race against do_tx().
+        */
+       c_can_setup_tx_object(dev, IF_TX, frame, idx);
+       priv->dlc[idx] = frame->can_dlc;
+       can_put_echo_skb(skb, dev, idx);
+
+       /* Update the active bits */
+       atomic_add((1 << idx), &priv->tx_active);
+       /* Start transmission */
+       c_can_object_put(dev, IF_TX, obj, IF_COMM_TX);
 
        return NETDEV_TX_OK;
 }
@@ -594,11 +538,10 @@ static void c_can_configure_msg_objects(struct net_device *dev)
 
        /* setup receive message objects */
        for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++)
-               c_can_setup_receive_object(dev, IF_RX, i, 0, 0,
-                       (IF_MCONT_RXIE | IF_MCONT_UMASK) & ~IF_MCONT_EOB);
+               c_can_setup_receive_object(dev, IF_RX, i, 0, 0, IF_MCONT_RCV);
 
        c_can_setup_receive_object(dev, IF_RX, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
-                       IF_MCONT_EOB | IF_MCONT_RXIE | IF_MCONT_UMASK);
+                                  IF_MCONT_RCV_EOB);
 }
 
 /*
@@ -612,30 +555,22 @@ static int c_can_chip_config(struct net_device *dev)
        struct c_can_priv *priv = netdev_priv(dev);
 
        /* enable automatic retransmission */
-       priv->write_reg(priv, C_CAN_CTRL_REG,
-                       CONTROL_ENABLE_AR);
+       priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR);
 
        if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) &&
            (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) {
                /* loopback + silent mode : useful for hot self-test */
-               priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE |
-                               CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
-               priv->write_reg(priv, C_CAN_TEST_REG,
-                               TEST_LBACK | TEST_SILENT);
+               priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
+               priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK | TEST_SILENT);
        } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
                /* loopback mode : useful for self-test function */
-               priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE |
-                               CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
+               priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
                priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK);
        } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
                /* silent mode : bus-monitoring mode */
-               priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE |
-                               CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
+               priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
                priv->write_reg(priv, C_CAN_TEST_REG, TEST_SILENT);
-       } else
-               /* normal mode*/
-               priv->write_reg(priv, C_CAN_CTRL_REG,
-                               CONTROL_EIE | CONTROL_SIE | CONTROL_IE);
+       }
 
        /* configure message objects */
        c_can_configure_msg_objects(dev);
@@ -643,6 +578,11 @@ static int c_can_chip_config(struct net_device *dev)
        /* set a `lec` value so that we can check for updates later */
        priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
 
+       /* Clear all internal status */
+       atomic_set(&priv->tx_active, 0);
+       priv->rxmasked = 0;
+       priv->tx_dir = 0;
+
        /* set bittiming params */
        return c_can_set_bittiming(dev);
 }
@@ -657,13 +597,11 @@ static int c_can_start(struct net_device *dev)
        if (err)
                return err;
 
-       priv->can.state = CAN_STATE_ERROR_ACTIVE;
-
-       /* reset tx helper pointers */
-       priv->tx_next = priv->tx_echo = 0;
+       /* Setup the command for new messages */
+       priv->comm_rcv_high = priv->type != BOSCH_D_CAN ?
+               IF_COMM_RCV_LOW : IF_COMM_RCV_HIGH;
 
-       /* enable status change, error and module interrupts */
-       c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
+       priv->can.state = CAN_STATE_ERROR_ACTIVE;
 
        return 0;
 }
@@ -672,15 +610,13 @@ static void c_can_stop(struct net_device *dev)
 {
        struct c_can_priv *priv = netdev_priv(dev);
 
-       /* disable all interrupts */
-       c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
-
-       /* set the state as STOPPED */
+       c_can_irq_control(priv, false);
        priv->can.state = CAN_STATE_STOPPED;
 }
 
 static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
 {
+       struct c_can_priv *priv = netdev_priv(dev);
        int err;
 
        switch (mode) {
@@ -689,6 +625,7 @@ static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
                if (err)
                        return err;
                netif_wake_queue(dev);
+               c_can_irq_control(priv, true);
                break;
        default:
                return -EOPNOTSUPP;
@@ -724,42 +661,29 @@ static int c_can_get_berr_counter(const struct net_device *dev,
        return err;
 }
 
-/*
- * priv->tx_echo holds the number of the oldest can_frame put for
- * transmission into the hardware, but not yet ACKed by the CAN tx
- * complete IRQ.
- *
- * We iterate from priv->tx_echo to priv->tx_next and check if the
- * packet has been transmitted, echo it back to the CAN framework.
- * If we discover a not yet transmitted packet, stop looking for more.
- */
 static void c_can_do_tx(struct net_device *dev)
 {
        struct c_can_priv *priv = netdev_priv(dev);
        struct net_device_stats *stats = &dev->stats;
-       u32 val, obj, pkts = 0, bytes = 0;
-
-       spin_lock_bh(&priv->xmit_lock);
-
-       for (; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
-               obj = get_tx_echo_msg_obj(priv->tx_echo);
-               val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
+       u32 idx, obj, pkts = 0, bytes = 0, pend, clr;
 
-               if (val & (1 << (obj - 1)))
-                       break;
+       clr = pend = priv->read_reg(priv, C_CAN_INTPND2_REG);
 
-               can_get_echo_skb(dev, obj - C_CAN_MSG_OBJ_TX_FIRST);
-               bytes += priv->dlc[obj - C_CAN_MSG_OBJ_TX_FIRST];
+       while ((idx = ffs(pend))) {
+               idx--;
+               pend &= ~(1 << idx);
+               obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
+               c_can_inval_tx_object(dev, IF_RX, obj);
+               can_get_echo_skb(dev, idx);
+               bytes += priv->dlc[idx];
                pkts++;
-               c_can_inval_msg_object(dev, IF_TX, obj);
        }
 
-       /* restart queue if wrap-up or if queue stalled on last pkt */
-       if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) ||
-                       ((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0))
-               netif_wake_queue(dev);
+       /* Clear the bits in the tx_active mask */
+       atomic_sub(clr, &priv->tx_active);
 
-       spin_unlock_bh(&priv->xmit_lock);
+       if (clr & (1 << (C_CAN_MSG_OBJ_TX_NUM - 1)))
+               netif_wake_queue(dev);
 
        if (pkts) {
                stats->tx_bytes += bytes;
@@ -800,18 +724,28 @@ static u32 c_can_adjust_pending(u32 pend)
        return pend & ~((1 << lasts) - 1);
 }
 
+static inline void c_can_rx_object_get(struct net_device *dev,
+                                      struct c_can_priv *priv, u32 obj)
+{
+               c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high);
+}
+
+static inline void c_can_rx_finalize(struct net_device *dev,
+                                    struct c_can_priv *priv, u32 obj)
+{
+       if (priv->type != BOSCH_D_CAN)
+               c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT);
+}
+
 static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
                              u32 pend, int quota)
 {
-       u32 pkts = 0, ctrl, obj, mcmd;
+       u32 pkts = 0, ctrl, obj;
 
        while ((obj = ffs(pend)) && quota > 0) {
                pend &= ~BIT(obj - 1);
 
-               mcmd = obj < C_CAN_MSG_RX_LOW_LAST ?
-                       IF_COMM_RCV_LOW : IF_COMM_RCV_HIGH;
-
-               c_can_object_get(dev, IF_RX, obj, mcmd);
+               c_can_rx_object_get(dev, priv, obj);
                ctrl = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, IF_RX));
 
                if (ctrl & IF_MCONT_MSGLST) {
@@ -833,9 +767,7 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
                /* read the data from the message object */
                c_can_read_msg_object(dev, IF_RX, ctrl);
 
-               if (obj == C_CAN_MSG_RX_LOW_LAST)
-                       /* activate all lower message objects */
-                       c_can_activate_all_lower_rx_msg_obj(dev, IF_RX, ctrl);
+               c_can_rx_finalize(dev, priv, obj);
 
                pkts++;
                quota--;
@@ -844,6 +776,13 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
        return pkts;
 }
 
+static inline u32 c_can_get_pending(struct c_can_priv *priv)
+{
+       u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG);
+
+       return pend;
+}
+
 /*
  * theory of operation:
  *
@@ -853,18 +792,9 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
  * has arrived. To work-around this issue, we keep two groups of message
  * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
  *
- * To ensure in-order frame reception we use the following
- * approach while re-activating a message object to receive further
- * frames:
- * - if the current message object number is lower than
- *   C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
- *   the INTPND bit.
- * - if the current message object number is equal to
- *   C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
- *   receive message objects.
- * - if the current message object number is greater than
- *   C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
- *   only this message object.
+ * We clear the newdat bit right away.
+ *
+ * This can result in packet reordering when the readout is slow.
  */
 static int c_can_do_rx_poll(struct net_device *dev, int quota)
 {
@@ -880,7 +810,7 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
 
        while (quota > 0) {
                if (!pend) {
-                       pend = priv->read_reg(priv, C_CAN_INTPND1_REG);
+                       pend = c_can_get_pending(priv);
                        if (!pend)
                                break;
                        /*
@@ -905,12 +835,6 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
        return pkts;
 }
 
-static inline int c_can_has_and_handle_berr(struct c_can_priv *priv)
-{
-       return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
-               (priv->current_status & LEC_UNUSED);
-}
-
 static int c_can_handle_state_change(struct net_device *dev,
                                enum c_can_bus_error_types error_type)
 {
@@ -922,6 +846,26 @@ static int c_can_handle_state_change(struct net_device *dev,
        struct sk_buff *skb;
        struct can_berr_counter bec;
 
+       switch (error_type) {
+       case C_CAN_ERROR_WARNING:
+               /* error warning state */
+               priv->can.can_stats.error_warning++;
+               priv->can.state = CAN_STATE_ERROR_WARNING;
+               break;
+       case C_CAN_ERROR_PASSIVE:
+               /* error passive state */
+               priv->can.can_stats.error_passive++;
+               priv->can.state = CAN_STATE_ERROR_PASSIVE;
+               break;
+       case C_CAN_BUS_OFF:
+               /* bus-off state */
+               priv->can.state = CAN_STATE_BUS_OFF;
+               can_bus_off(dev);
+               break;
+       default:
+               break;
+       }
+
        /* propagate the error condition to the CAN stack */
        skb = alloc_can_err_skb(dev, &cf);
        if (unlikely(!skb))
@@ -935,8 +879,6 @@ static int c_can_handle_state_change(struct net_device *dev,
        switch (error_type) {
        case C_CAN_ERROR_WARNING:
                /* error warning state */
-               priv->can.can_stats.error_warning++;
-               priv->can.state = CAN_STATE_ERROR_WARNING;
                cf->can_id |= CAN_ERR_CRTL;
                cf->data[1] = (bec.txerr > bec.rxerr) ?
                        CAN_ERR_CRTL_TX_WARNING :
@@ -947,8 +889,6 @@ static int c_can_handle_state_change(struct net_device *dev,
                break;
        case C_CAN_ERROR_PASSIVE:
                /* error passive state */
-               priv->can.can_stats.error_passive++;
-               priv->can.state = CAN_STATE_ERROR_PASSIVE;
                cf->can_id |= CAN_ERR_CRTL;
                if (rx_err_passive)
                        cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
@@ -960,22 +900,16 @@ static int c_can_handle_state_change(struct net_device *dev,
                break;
        case C_CAN_BUS_OFF:
                /* bus-off state */
-               priv->can.state = CAN_STATE_BUS_OFF;
                cf->can_id |= CAN_ERR_BUSOFF;
-               /*
-                * disable all interrupts in bus-off mode to ensure that
-                * the CPU is not hogged down
-                */
-               c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
                can_bus_off(dev);
                break;
        default:
                break;
        }
 
-       netif_receive_skb(skb);
        stats->rx_packets++;
        stats->rx_bytes += cf->can_dlc;
+       netif_receive_skb(skb);
 
        return 1;
 }
@@ -996,6 +930,13 @@ static int c_can_handle_bus_err(struct net_device *dev,
        if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR)
                return 0;
 
+       if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
+               return 0;
+
+       /* common for all type of bus errors */
+       priv->can.can_stats.bus_error++;
+       stats->rx_errors++;
+
        /* propagate the error condition to the CAN stack */
        skb = alloc_can_err_skb(dev, &cf);
        if (unlikely(!skb))
@@ -1005,10 +946,6 @@ static int c_can_handle_bus_err(struct net_device *dev,
         * check for 'last error code' which tells us the
         * type of the last error to occur on the CAN bus
         */
-
-       /* common for all type of bus errors */
-       priv->can.can_stats.bus_error++;
-       stats->rx_errors++;
        cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
        cf->data[2] |= CAN_ERR_PROT_UNSPEC;
 
@@ -1043,95 +980,64 @@ static int c_can_handle_bus_err(struct net_device *dev,
                break;
        }
 
-       /* set a `lec` value so that we can check for updates later */
-       priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
-
-       netif_receive_skb(skb);
        stats->rx_packets++;
        stats->rx_bytes += cf->can_dlc;
-
+       netif_receive_skb(skb);
        return 1;
 }
 
 static int c_can_poll(struct napi_struct *napi, int quota)
 {
-       u16 irqstatus;
-       int lec_type = 0;
-       int work_done = 0;
        struct net_device *dev = napi->dev;
        struct c_can_priv *priv = netdev_priv(dev);
+       u16 curr, last = priv->last_status;
+       int work_done = 0;
 
-       irqstatus = priv->irqstatus;
-       if (!irqstatus)
-               goto end;
+       priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG);
+       /* Ack status on C_CAN. D_CAN is self clearing */
+       if (priv->type != BOSCH_D_CAN)
+               priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
 
-       /* status events have the highest priority */
-       if (irqstatus == STATUS_INTERRUPT) {
-               priv->current_status = priv->read_reg(priv,
-                                       C_CAN_STS_REG);
-
-               /* handle Tx/Rx events */
-               if (priv->current_status & STATUS_TXOK)
-                       priv->write_reg(priv, C_CAN_STS_REG,
-                                       priv->current_status & ~STATUS_TXOK);
-
-               if (priv->current_status & STATUS_RXOK)
-                       priv->write_reg(priv, C_CAN_STS_REG,
-                                       priv->current_status & ~STATUS_RXOK);
-
-               /* handle state changes */
-               if ((priv->current_status & STATUS_EWARN) &&
-                               (!(priv->last_status & STATUS_EWARN))) {
-                       netdev_dbg(dev, "entered error warning state\n");
-                       work_done += c_can_handle_state_change(dev,
-                                               C_CAN_ERROR_WARNING);
-               }
-               if ((priv->current_status & STATUS_EPASS) &&
-                               (!(priv->last_status & STATUS_EPASS))) {
-                       netdev_dbg(dev, "entered error passive state\n");
-                       work_done += c_can_handle_state_change(dev,
-                                               C_CAN_ERROR_PASSIVE);
-               }
-               if ((priv->current_status & STATUS_BOFF) &&
-                               (!(priv->last_status & STATUS_BOFF))) {
-                       netdev_dbg(dev, "entered bus off state\n");
-                       work_done += c_can_handle_state_change(dev,
-                                               C_CAN_BUS_OFF);
-               }
+       /* handle state changes */
+       if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) {
+               netdev_dbg(dev, "entered error warning state\n");
+               work_done += c_can_handle_state_change(dev, C_CAN_ERROR_WARNING);
+       }
 
-               /* handle bus recovery events */
-               if ((!(priv->current_status & STATUS_BOFF)) &&
-                               (priv->last_status & STATUS_BOFF)) {
-                       netdev_dbg(dev, "left bus off state\n");
-                       priv->can.state = CAN_STATE_ERROR_ACTIVE;
-               }
-               if ((!(priv->current_status & STATUS_EPASS)) &&
-                               (priv->last_status & STATUS_EPASS)) {
-                       netdev_dbg(dev, "left error passive state\n");
-                       priv->can.state = CAN_STATE_ERROR_ACTIVE;
-               }
+       if ((curr & STATUS_EPASS) && (!(last & STATUS_EPASS))) {
+               netdev_dbg(dev, "entered error passive state\n");
+               work_done += c_can_handle_state_change(dev, C_CAN_ERROR_PASSIVE);
+       }
 
-               priv->last_status = priv->current_status;
-
-               /* handle lec errors on the bus */
-               lec_type = c_can_has_and_handle_berr(priv);
-               if (lec_type)
-                       work_done += c_can_handle_bus_err(dev, lec_type);
-       } else if ((irqstatus >= C_CAN_MSG_OBJ_RX_FIRST) &&
-                       (irqstatus <= C_CAN_MSG_OBJ_RX_LAST)) {
-               /* handle events corresponding to receive message objects */
-               work_done += c_can_do_rx_poll(dev, (quota - work_done));
-       } else if ((irqstatus >= C_CAN_MSG_OBJ_TX_FIRST) &&
-                       (irqstatus <= C_CAN_MSG_OBJ_TX_LAST)) {
-               /* handle events corresponding to transmit message objects */
-               c_can_do_tx(dev);
+       if ((curr & STATUS_BOFF) && (!(last & STATUS_BOFF))) {
+               netdev_dbg(dev, "entered bus off state\n");
+               work_done += c_can_handle_state_change(dev, C_CAN_BUS_OFF);
+               goto end;
        }
 
+       /* handle bus recovery events */
+       if ((!(curr & STATUS_BOFF)) && (last & STATUS_BOFF)) {
+               netdev_dbg(dev, "left bus off state\n");
+               priv->can.state = CAN_STATE_ERROR_ACTIVE;
+       }
+       if ((!(curr & STATUS_EPASS)) && (last & STATUS_EPASS)) {
+               netdev_dbg(dev, "left error passive state\n");
+               priv->can.state = CAN_STATE_ERROR_ACTIVE;
+       }
+
+       /* handle lec errors on the bus */
+       work_done += c_can_handle_bus_err(dev, curr & LEC_MASK);
+
+       /* Handle Tx/Rx events. We do this unconditionally */
+       work_done += c_can_do_rx_poll(dev, (quota - work_done));
+       c_can_do_tx(dev);
+
 end:
        if (work_done < quota) {
                napi_complete(napi);
-               /* enable all IRQs */
-               c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
+               /* enable all IRQs if we are not in bus off state */
+               if (priv->can.state != CAN_STATE_BUS_OFF)
+                       c_can_irq_control(priv, true);
        }
 
        return work_done;
@@ -1142,12 +1048,11 @@ static irqreturn_t c_can_isr(int irq, void *dev_id)
        struct net_device *dev = (struct net_device *)dev_id;
        struct c_can_priv *priv = netdev_priv(dev);
 
-       priv->irqstatus = priv->read_reg(priv, C_CAN_INT_REG);
-       if (!priv->irqstatus)
+       if (!priv->read_reg(priv, C_CAN_INT_REG))
                return IRQ_NONE;
 
        /* disable all interrupts and schedule the NAPI */
-       c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
+       c_can_irq_control(priv, false);
        napi_schedule(&priv->napi);
 
        return IRQ_HANDLED;
@@ -1184,6 +1089,8 @@ static int c_can_open(struct net_device *dev)
        can_led_event(dev, CAN_LED_EVENT_OPEN);
 
        napi_enable(&priv->napi);
+       /* enable status change, error and module interrupts */
+       c_can_irq_control(priv, true);
        netif_start_queue(dev);
 
        return 0;
@@ -1226,7 +1133,6 @@ struct net_device *alloc_c_can_dev(void)
                return NULL;
 
        priv = netdev_priv(dev);
-       spin_lock_init(&priv->xmit_lock);
        netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
 
        priv->dev = dev;
@@ -1281,6 +1187,7 @@ int c_can_power_up(struct net_device *dev)
        u32 val;
        unsigned long time_out;
        struct c_can_priv *priv = netdev_priv(dev);
+       int ret;
 
        if (!(dev->flags & IFF_UP))
                return 0;
@@ -1307,7 +1214,11 @@ int c_can_power_up(struct net_device *dev)
        if (time_after(jiffies, time_out))
                return -ETIMEDOUT;
 
-       return c_can_start(dev);
+       ret = c_can_start(dev);
+       if (!ret)
+               c_can_irq_control(priv, true);
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(c_can_power_up);
 #endif
index faa8404162b397e4bd589c0b7b665c2f6ab1e4e4..99ad1aa576b045197f82780d64936f3b7fe5651a 100644 (file)
 #ifndef C_CAN_H
 #define C_CAN_H
 
-/*
- * IFx register masks:
- * allow easy operation on 16-bit registers when the
- * argument is 32-bit instead
- */
-#define IFX_WRITE_LOW_16BIT(x) ((x) & 0xFFFF)
-#define IFX_WRITE_HIGH_16BIT(x)        (((x) & 0xFFFF0000) >> 16)
-
 /* message object split */
 #define C_CAN_NO_OF_OBJECTS    32
 #define C_CAN_MSG_OBJ_RX_NUM   16
@@ -45,8 +37,6 @@
 
 #define C_CAN_MSG_OBJ_RX_SPLIT 9
 #define C_CAN_MSG_RX_LOW_LAST  (C_CAN_MSG_OBJ_RX_SPLIT - 1)
-
-#define C_CAN_NEXT_MSG_OBJ_MASK        (C_CAN_MSG_OBJ_TX_NUM - 1)
 #define RECEIVE_OBJECT_BITS    0x0000ffff
 
 enum reg {
@@ -88,6 +78,7 @@ enum reg {
        C_CAN_INTPND2_REG,
        C_CAN_MSGVAL1_REG,
        C_CAN_MSGVAL2_REG,
+       C_CAN_FUNCTION_REG,
 };
 
 static const u16 reg_map_c_can[] = {
@@ -139,6 +130,7 @@ static const u16 reg_map_d_can[] = {
        [C_CAN_BRPEXT_REG]      = 0x0E,
        [C_CAN_INT_REG]         = 0x10,
        [C_CAN_TEST_REG]        = 0x14,
+       [C_CAN_FUNCTION_REG]    = 0x18,
        [C_CAN_TXRQST1_REG]     = 0x88,
        [C_CAN_TXRQST2_REG]     = 0x8A,
        [C_CAN_NEWDAT1_REG]     = 0x9C,
@@ -183,23 +175,22 @@ struct c_can_priv {
        struct napi_struct napi;
        struct net_device *dev;
        struct device *device;
-       spinlock_t xmit_lock;
-       int tx_object;
-       int current_status;
+       atomic_t tx_active;
+       unsigned long tx_dir;
        int last_status;
-       u16 (*read_reg) (struct c_can_priv *priv, enum reg index);
-       void (*write_reg) (struct c_can_priv *priv, enum reg index, u16 val);
+       u16 (*read_reg) (const struct c_can_priv *priv, enum reg index);
+       void (*write_reg) (const struct c_can_priv *priv, enum reg index, u16 val);
+       u32 (*read_reg32) (const struct c_can_priv *priv, enum reg index);
+       void (*write_reg32) (const struct c_can_priv *priv, enum reg index, u32 val);
        void __iomem *base;
        const u16 *regs;
-       unsigned long irq_flags; /* for request_irq() */
-       unsigned int tx_next;
-       unsigned int tx_echo;
        void *priv;             /* for board-specific data */
-       u16 irqstatus;
        enum c_can_dev_id type;
        u32 __iomem *raminit_ctrlreg;
-       unsigned int instance;
+       int instance;
        void (*raminit) (const struct c_can_priv *priv, bool enable);
+       u32 comm_rcv_high;
+       u32 rxmasked;
        u32 dlc[C_CAN_MSG_OBJ_TX_NUM];
 };
 
index bce0be54c2f59587a2498d2f37821f2634b886d9..5d11e0e4225bf3c84442b9ec8ddea4a005b4717f 100644 (file)
 
 #include "c_can.h"
 
+#define PCI_DEVICE_ID_PCH_CAN  0x8818
+#define PCH_PCI_SOFT_RESET     0x01fc
+
 enum c_can_pci_reg_align {
        C_CAN_REG_ALIGN_16,
        C_CAN_REG_ALIGN_32,
+       C_CAN_REG_32,
 };
 
 struct c_can_pci_data {
@@ -31,6 +35,10 @@ struct c_can_pci_data {
        enum c_can_pci_reg_align reg_align;
        /* Set the frequency */
        unsigned int freq;
+       /* PCI bar number */
+       int bar;
+       /* Callback for reset */
+       void (*init)(const struct c_can_priv *priv, bool enable);
 };
 
 /*
@@ -39,30 +47,70 @@ struct c_can_pci_data {
  * registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
  * Handle the same by providing a common read/write interface.
  */
-static u16 c_can_pci_read_reg_aligned_to_16bit(struct c_can_priv *priv,
+static u16 c_can_pci_read_reg_aligned_to_16bit(const struct c_can_priv *priv,
                                                enum reg index)
 {
        return readw(priv->base + priv->regs[index]);
 }
 
-static void c_can_pci_write_reg_aligned_to_16bit(struct c_can_priv *priv,
+static void c_can_pci_write_reg_aligned_to_16bit(const struct c_can_priv *priv,
                                                enum reg index, u16 val)
 {
        writew(val, priv->base + priv->regs[index]);
 }
 
-static u16 c_can_pci_read_reg_aligned_to_32bit(struct c_can_priv *priv,
+static u16 c_can_pci_read_reg_aligned_to_32bit(const struct c_can_priv *priv,
                                                enum reg index)
 {
        return readw(priv->base + 2 * priv->regs[index]);
 }
 
-static void c_can_pci_write_reg_aligned_to_32bit(struct c_can_priv *priv,
+static void c_can_pci_write_reg_aligned_to_32bit(const struct c_can_priv *priv,
                                                enum reg index, u16 val)
 {
        writew(val, priv->base + 2 * priv->regs[index]);
 }
 
+static u16 c_can_pci_read_reg_32bit(const struct c_can_priv *priv,
+                                   enum reg index)
+{
+       return (u16)ioread32(priv->base + 2 * priv->regs[index]);
+}
+
+static void c_can_pci_write_reg_32bit(const struct c_can_priv *priv,
+                                     enum reg index, u16 val)
+{
+       iowrite32((u32)val, priv->base + 2 * priv->regs[index]);
+}
+
+static u32 c_can_pci_read_reg32(const struct c_can_priv *priv, enum reg index)
+{
+       u32 val;
+
+       val = priv->read_reg(priv, index);
+       val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
+
+       return val;
+}
+
+static void c_can_pci_write_reg32(const struct c_can_priv *priv, enum reg index,
+               u32 val)
+{
+       priv->write_reg(priv, index + 1, val >> 16);
+       priv->write_reg(priv, index, val);
+}
+
+static void c_can_pci_reset_pch(const struct c_can_priv *priv, bool enable)
+{
+       if (enable) {
+               u32 __iomem *addr = priv->base + PCH_PCI_SOFT_RESET;
+
+               /* write to sw reset register */
+               iowrite32(1, addr);
+               iowrite32(0, addr);
+       }
+}
+
 static int c_can_pci_probe(struct pci_dev *pdev,
                           const struct pci_device_id *ent)
 {
@@ -84,10 +132,14 @@ static int c_can_pci_probe(struct pci_dev *pdev,
                goto out_disable_device;
        }
 
-       pci_set_master(pdev);
-       pci_enable_msi(pdev);
+       ret = pci_enable_msi(pdev);
+       if (!ret) {
+               dev_info(&pdev->dev, "MSI enabled\n");
+               pci_set_master(pdev);
+       }
 
-       addr = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
+       addr = pci_iomap(pdev, c_can_pci_data->bar,
+                        pci_resource_len(pdev, c_can_pci_data->bar));
        if (!addr) {
                dev_err(&pdev->dev,
                        "device has no PCI memory resources, "
@@ -132,6 +184,8 @@ static int c_can_pci_probe(struct pci_dev *pdev,
                goto out_free_c_can;
        }
 
+       priv->type = c_can_pci_data->type;
+
        /* Configure access to registers */
        switch (c_can_pci_data->reg_align) {
        case C_CAN_REG_ALIGN_32:
@@ -142,10 +196,18 @@ static int c_can_pci_probe(struct pci_dev *pdev,
                priv->read_reg = c_can_pci_read_reg_aligned_to_16bit;
                priv->write_reg = c_can_pci_write_reg_aligned_to_16bit;
                break;
+       case C_CAN_REG_32:
+               priv->read_reg = c_can_pci_read_reg_32bit;
+               priv->write_reg = c_can_pci_write_reg_32bit;
+               break;
        default:
                ret = -EINVAL;
                goto out_free_c_can;
        }
+       priv->read_reg32 = c_can_pci_read_reg32;
+       priv->write_reg32 = c_can_pci_write_reg32;
+
+       priv->raminit = c_can_pci_data->init;
 
        ret = register_c_can_dev(dev);
        if (ret) {
@@ -193,6 +255,15 @@ static struct c_can_pci_data c_can_sta2x11= {
        .type = BOSCH_C_CAN,
        .reg_align = C_CAN_REG_ALIGN_32,
        .freq = 52000000, /* 52 Mhz */
+       .bar = 0,
+};
+
+static struct c_can_pci_data c_can_pch = {
+       .type = BOSCH_C_CAN,
+       .reg_align = C_CAN_REG_32,
+       .freq = 50000000, /* 50 MHz */
+       .init = c_can_pci_reset_pch,
+       .bar = 1,
 };
 
 #define C_CAN_ID(_vend, _dev, _driverdata) {           \
@@ -202,6 +273,8 @@ static struct c_can_pci_data c_can_sta2x11= {
 static DEFINE_PCI_DEVICE_TABLE(c_can_pci_tbl) = {
        C_CAN_ID(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_CAN,
                 c_can_sta2x11),
+       C_CAN_ID(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PCH_CAN,
+                c_can_pch),
        {},
 };
 static struct pci_driver c_can_pci_driver = {
index 806d92753427b619fe7241ac290a18aa7769240b..824108cd9fd594a91c25b0b4a1d43d3341ad9a31 100644 (file)
@@ -40,6 +40,7 @@
 #define CAN_RAMINIT_START_MASK(i)      (0x001 << (i))
 #define CAN_RAMINIT_DONE_MASK(i)       (0x100 << (i))
 #define CAN_RAMINIT_ALL_MASK(i)                (0x101 << (i))
+#define DCAN_RAM_INIT_BIT              (1 << 3)
 static DEFINE_SPINLOCK(raminit_lock);
 /*
  * 16-bit c_can registers can be arranged differently in the memory
@@ -47,31 +48,31 @@ static DEFINE_SPINLOCK(raminit_lock);
  * registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
  * Handle the same by providing a common read/write interface.
  */
-static u16 c_can_plat_read_reg_aligned_to_16bit(struct c_can_priv *priv,
+static u16 c_can_plat_read_reg_aligned_to_16bit(const struct c_can_priv *priv,
                                                enum reg index)
 {
        return readw(priv->base + priv->regs[index]);
 }
 
-static void c_can_plat_write_reg_aligned_to_16bit(struct c_can_priv *priv,
+static void c_can_plat_write_reg_aligned_to_16bit(const struct c_can_priv *priv,
                                                enum reg index, u16 val)
 {
        writew(val, priv->base + priv->regs[index]);
 }
 
-static u16 c_can_plat_read_reg_aligned_to_32bit(struct c_can_priv *priv,
+static u16 c_can_plat_read_reg_aligned_to_32bit(const struct c_can_priv *priv,
                                                enum reg index)
 {
        return readw(priv->base + 2 * priv->regs[index]);
 }
 
-static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv,
+static void c_can_plat_write_reg_aligned_to_32bit(const struct c_can_priv *priv,
                                                enum reg index, u16 val)
 {
        writew(val, priv->base + 2 * priv->regs[index]);
 }
 
-static void c_can_hw_raminit_wait(const struct c_can_priv *priv, u32 mask,
+static void c_can_hw_raminit_wait_ti(const struct c_can_priv *priv, u32 mask,
                                  u32 val)
 {
        /* We look only at the bits of our instance. */
@@ -80,7 +81,7 @@ static void c_can_hw_raminit_wait(const struct c_can_priv *priv, u32 mask,
                udelay(1);
 }
 
-static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable)
+static void c_can_hw_raminit_ti(const struct c_can_priv *priv, bool enable)
 {
        u32 mask = CAN_RAMINIT_ALL_MASK(priv->instance);
        u32 ctrl;
@@ -96,18 +97,68 @@ static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable)
        ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
        writel(ctrl, priv->raminit_ctrlreg);
        ctrl &= ~CAN_RAMINIT_DONE_MASK(priv->instance);
-       c_can_hw_raminit_wait(priv, ctrl, mask);
+       c_can_hw_raminit_wait_ti(priv, ctrl, mask);
 
        if (enable) {
                /* Set start bit and wait for the done bit. */
                ctrl |= CAN_RAMINIT_START_MASK(priv->instance);
                writel(ctrl, priv->raminit_ctrlreg);
                ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
-               c_can_hw_raminit_wait(priv, ctrl, mask);
+               c_can_hw_raminit_wait_ti(priv, ctrl, mask);
        }
        spin_unlock(&raminit_lock);
 }
 
+static u32 c_can_plat_read_reg32(const struct c_can_priv *priv, enum reg index)
+{
+       u32 val;
+
+       val = priv->read_reg(priv, index);
+       val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
+
+       return val;
+}
+
+static void c_can_plat_write_reg32(const struct c_can_priv *priv, enum reg index,
+               u32 val)
+{
+       priv->write_reg(priv, index + 1, val >> 16);
+       priv->write_reg(priv, index, val);
+}
+
+static u32 d_can_plat_read_reg32(const struct c_can_priv *priv, enum reg index)
+{
+       return readl(priv->base + priv->regs[index]);
+}
+
+static void d_can_plat_write_reg32(const struct c_can_priv *priv, enum reg index,
+               u32 val)
+{
+       writel(val, priv->base + priv->regs[index]);
+}
+
+static void c_can_hw_raminit_wait(const struct c_can_priv *priv, u32 mask)
+{
+       while (priv->read_reg32(priv, C_CAN_FUNCTION_REG) & mask)
+               udelay(1);
+}
+
+static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable)
+{
+       u32 ctrl;
+
+       ctrl = priv->read_reg32(priv, C_CAN_FUNCTION_REG);
+       ctrl &= ~DCAN_RAM_INIT_BIT;
+       priv->write_reg32(priv, C_CAN_FUNCTION_REG, ctrl);
+       c_can_hw_raminit_wait(priv, ctrl);
+
+       if (enable) {
+               ctrl |= DCAN_RAM_INIT_BIT;
+               priv->write_reg32(priv, C_CAN_FUNCTION_REG, ctrl);
+               c_can_hw_raminit_wait(priv, ctrl);
+       }
+}
+
 static struct platform_device_id c_can_id_table[] = {
        [BOSCH_C_CAN_PLATFORM] = {
                .name = KBUILD_MODNAME,
@@ -201,11 +252,15 @@ static int c_can_plat_probe(struct platform_device *pdev)
                case IORESOURCE_MEM_32BIT:
                        priv->read_reg = c_can_plat_read_reg_aligned_to_32bit;
                        priv->write_reg = c_can_plat_write_reg_aligned_to_32bit;
+                       priv->read_reg32 = c_can_plat_read_reg32;
+                       priv->write_reg32 = c_can_plat_write_reg32;
                        break;
                case IORESOURCE_MEM_16BIT:
                default:
                        priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
                        priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
+                       priv->read_reg32 = c_can_plat_read_reg32;
+                       priv->write_reg32 = c_can_plat_write_reg32;
                        break;
                }
                break;
@@ -214,6 +269,8 @@ static int c_can_plat_probe(struct platform_device *pdev)
                priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
                priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
                priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
+               priv->read_reg32 = d_can_plat_read_reg32;
+               priv->write_reg32 = d_can_plat_write_reg32;
 
                if (pdev->dev.of_node)
                        priv->instance = of_alias_get_id(pdev->dev.of_node, "d_can");
@@ -221,11 +278,20 @@ static int c_can_plat_probe(struct platform_device *pdev)
                        priv->instance = pdev->id;
 
                res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+               /* Not all D_CAN modules have a separate register for the D_CAN
+                * RAM initialization. Use default RAM init bit in D_CAN module
+                * if not specified in DT.
+                */
+               if (!res) {
+                       priv->raminit = c_can_hw_raminit;
+                       break;
+               }
+
                priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res);
-               if (IS_ERR(priv->raminit_ctrlreg) || (int)priv->instance < 0)
+               if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
                        dev_info(&pdev->dev, "control memory is not used for raminit\n");
                else
-                       priv->raminit = c_can_hw_raminit;
+                       priv->raminit = c_can_hw_raminit_ti;
                break;
        default:
                ret = -EINVAL;
index c7a260478749ad163ec133df88e7a0086b220a73..e318e87e2bfc00ba9e32aa08858de5f5c1629dcf 100644 (file)
@@ -256,7 +256,7 @@ static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
 
        /* Check if the CAN device has bit-timing parameters */
        if (!btc)
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        /*
         * Depending on the given can_bittiming parameter structure the CAN
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
deleted file mode 100644 (file)
index 28c11f8..0000000
+++ /dev/null
@@ -1,1269 +0,0 @@
-/*
- * CAN bus driver for Microchip 251x CAN Controller with SPI Interface
- *
- * MCP2510 support and bug fixes by Christian Pellegrin
- * <chripell@evolware.org>
- *
- * Copyright 2009 Christian Pellegrin EVOL S.r.l.
- *
- * Copyright 2007 Raymarine UK, Ltd. All Rights Reserved.
- * Written under contract by:
- *   Chris Elston, Katalix Systems, Ltd.
- *
- * Based on Microchip MCP251x CAN controller driver written by
- * David Vrabel, Copyright 2006 Arcom Control Systems Ltd.
- *
- * Based on CAN bus driver for the CCAN controller written by
- * - Sascha Hauer, Marc Kleine-Budde, Pengutronix
- * - Simon Kallweit, intefo AG
- * Copyright 2007
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the version 2 of the GNU General Public License
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- *
- *
- * Your platform definition file should specify something like:
- *
- * static struct mcp251x_platform_data mcp251x_info = {
- *         .oscillator_frequency = 8000000,
- * };
- *
- * static struct spi_board_info spi_board_info[] = {
- *         {
- *                 .modalias = "mcp2510",
- *                     // or "mcp2515" depending on your controller
- *                 .platform_data = &mcp251x_info,
- *                 .irq = IRQ_EINT13,
- *                 .max_speed_hz = 2*1000*1000,
- *                 .chip_select = 2,
- *         },
- * };
- *
- * Please see mcp251x.h for a description of the fields in
- * struct mcp251x_platform_data.
- *
- */
-
-#include <linux/can/core.h>
-#include <linux/can/dev.h>
-#include <linux/can/led.h>
-#include <linux/can/platform/mcp251x.h>
-#include <linux/clk.h>
-#include <linux/completion.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/freezer.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/spi/spi.h>
-#include <linux/uaccess.h>
-#include <linux/regulator/consumer.h>
-
-/* SPI interface instruction set */
-#define INSTRUCTION_WRITE      0x02
-#define INSTRUCTION_READ       0x03
-#define INSTRUCTION_BIT_MODIFY 0x05
-#define INSTRUCTION_LOAD_TXB(n)        (0x40 + 2 * (n))
-#define INSTRUCTION_READ_RXB(n)        (((n) == 0) ? 0x90 : 0x94)
-#define INSTRUCTION_RESET      0xC0
-#define RTS_TXB0               0x01
-#define RTS_TXB1               0x02
-#define RTS_TXB2               0x04
-#define INSTRUCTION_RTS(n)     (0x80 | ((n) & 0x07))
-
-
-/* MPC251x registers */
-#define CANSTAT              0x0e
-#define CANCTRL              0x0f
-#  define CANCTRL_REQOP_MASK       0xe0
-#  define CANCTRL_REQOP_CONF       0x80
-#  define CANCTRL_REQOP_LISTEN_ONLY 0x60
-#  define CANCTRL_REQOP_LOOPBACK    0x40
-#  define CANCTRL_REQOP_SLEEP      0x20
-#  define CANCTRL_REQOP_NORMAL     0x00
-#  define CANCTRL_OSM              0x08
-#  define CANCTRL_ABAT             0x10
-#define TEC          0x1c
-#define REC          0x1d
-#define CNF1         0x2a
-#  define CNF1_SJW_SHIFT   6
-#define CNF2         0x29
-#  define CNF2_BTLMODE    0x80
-#  define CNF2_SAM         0x40
-#  define CNF2_PS1_SHIFT   3
-#define CNF3         0x28
-#  define CNF3_SOF        0x08
-#  define CNF3_WAKFIL     0x04
-#  define CNF3_PHSEG2_MASK 0x07
-#define CANINTE              0x2b
-#  define CANINTE_MERRE 0x80
-#  define CANINTE_WAKIE 0x40
-#  define CANINTE_ERRIE 0x20
-#  define CANINTE_TX2IE 0x10
-#  define CANINTE_TX1IE 0x08
-#  define CANINTE_TX0IE 0x04
-#  define CANINTE_RX1IE 0x02
-#  define CANINTE_RX0IE 0x01
-#define CANINTF              0x2c
-#  define CANINTF_MERRF 0x80
-#  define CANINTF_WAKIF 0x40
-#  define CANINTF_ERRIF 0x20
-#  define CANINTF_TX2IF 0x10
-#  define CANINTF_TX1IF 0x08
-#  define CANINTF_TX0IF 0x04
-#  define CANINTF_RX1IF 0x02
-#  define CANINTF_RX0IF 0x01
-#  define CANINTF_RX (CANINTF_RX0IF | CANINTF_RX1IF)
-#  define CANINTF_TX (CANINTF_TX2IF | CANINTF_TX1IF | CANINTF_TX0IF)
-#  define CANINTF_ERR (CANINTF_ERRIF)
-#define EFLG         0x2d
-#  define EFLG_EWARN   0x01
-#  define EFLG_RXWAR   0x02
-#  define EFLG_TXWAR   0x04
-#  define EFLG_RXEP    0x08
-#  define EFLG_TXEP    0x10
-#  define EFLG_TXBO    0x20
-#  define EFLG_RX0OVR  0x40
-#  define EFLG_RX1OVR  0x80
-#define TXBCTRL(n)  (((n) * 0x10) + 0x30 + TXBCTRL_OFF)
-#  define TXBCTRL_ABTF 0x40
-#  define TXBCTRL_MLOA 0x20
-#  define TXBCTRL_TXERR 0x10
-#  define TXBCTRL_TXREQ 0x08
-#define TXBSIDH(n)  (((n) * 0x10) + 0x30 + TXBSIDH_OFF)
-#  define SIDH_SHIFT    3
-#define TXBSIDL(n)  (((n) * 0x10) + 0x30 + TXBSIDL_OFF)
-#  define SIDL_SID_MASK    7
-#  define SIDL_SID_SHIFT   5
-#  define SIDL_EXIDE_SHIFT 3
-#  define SIDL_EID_SHIFT   16
-#  define SIDL_EID_MASK    3
-#define TXBEID8(n)  (((n) * 0x10) + 0x30 + TXBEID8_OFF)
-#define TXBEID0(n)  (((n) * 0x10) + 0x30 + TXBEID0_OFF)
-#define TXBDLC(n)   (((n) * 0x10) + 0x30 + TXBDLC_OFF)
-#  define DLC_RTR_SHIFT    6
-#define TXBCTRL_OFF 0
-#define TXBSIDH_OFF 1
-#define TXBSIDL_OFF 2
-#define TXBEID8_OFF 3
-#define TXBEID0_OFF 4
-#define TXBDLC_OFF  5
-#define TXBDAT_OFF  6
-#define RXBCTRL(n)  (((n) * 0x10) + 0x60 + RXBCTRL_OFF)
-#  define RXBCTRL_BUKT 0x04
-#  define RXBCTRL_RXM0 0x20
-#  define RXBCTRL_RXM1 0x40
-#define RXBSIDH(n)  (((n) * 0x10) + 0x60 + RXBSIDH_OFF)
-#  define RXBSIDH_SHIFT 3
-#define RXBSIDL(n)  (((n) * 0x10) + 0x60 + RXBSIDL_OFF)
-#  define RXBSIDL_IDE   0x08
-#  define RXBSIDL_SRR   0x10
-#  define RXBSIDL_EID   3
-#  define RXBSIDL_SHIFT 5
-#define RXBEID8(n)  (((n) * 0x10) + 0x60 + RXBEID8_OFF)
-#define RXBEID0(n)  (((n) * 0x10) + 0x60 + RXBEID0_OFF)
-#define RXBDLC(n)   (((n) * 0x10) + 0x60 + RXBDLC_OFF)
-#  define RXBDLC_LEN_MASK  0x0f
-#  define RXBDLC_RTR       0x40
-#define RXBCTRL_OFF 0
-#define RXBSIDH_OFF 1
-#define RXBSIDL_OFF 2
-#define RXBEID8_OFF 3
-#define RXBEID0_OFF 4
-#define RXBDLC_OFF  5
-#define RXBDAT_OFF  6
-#define RXFSIDH(n) ((n) * 4)
-#define RXFSIDL(n) ((n) * 4 + 1)
-#define RXFEID8(n) ((n) * 4 + 2)
-#define RXFEID0(n) ((n) * 4 + 3)
-#define RXMSIDH(n) ((n) * 4 + 0x20)
-#define RXMSIDL(n) ((n) * 4 + 0x21)
-#define RXMEID8(n) ((n) * 4 + 0x22)
-#define RXMEID0(n) ((n) * 4 + 0x23)
-
-#define GET_BYTE(val, byte)                    \
-       (((val) >> ((byte) * 8)) & 0xff)
-#define SET_BYTE(val, byte)                    \
-       (((val) & 0xff) << ((byte) * 8))
-
-/*
- * Buffer size required for the largest SPI transfer (i.e., reading a
- * frame)
- */
-#define CAN_FRAME_MAX_DATA_LEN 8
-#define SPI_TRANSFER_BUF_LEN   (6 + CAN_FRAME_MAX_DATA_LEN)
-#define CAN_FRAME_MAX_BITS     128
-
-#define TX_ECHO_SKB_MAX        1
-
-#define DEVICE_NAME "mcp251x"
-
-static int mcp251x_enable_dma; /* Enable SPI DMA. Default: 0 (Off) */
-module_param(mcp251x_enable_dma, int, S_IRUGO);
-MODULE_PARM_DESC(mcp251x_enable_dma, "Enable SPI DMA. Default: 0 (Off)");
-
-static const struct can_bittiming_const mcp251x_bittiming_const = {
-       .name = DEVICE_NAME,
-       .tseg1_min = 3,
-       .tseg1_max = 16,
-       .tseg2_min = 2,
-       .tseg2_max = 8,
-       .sjw_max = 4,
-       .brp_min = 1,
-       .brp_max = 64,
-       .brp_inc = 1,
-};
-
-enum mcp251x_model {
-       CAN_MCP251X_MCP2510     = 0x2510,
-       CAN_MCP251X_MCP2515     = 0x2515,
-};
-
-struct mcp251x_priv {
-       struct can_priv    can;
-       struct net_device *net;
-       struct spi_device *spi;
-       enum mcp251x_model model;
-
-       struct mutex mcp_lock; /* SPI device lock */
-
-       u8 *spi_tx_buf;
-       u8 *spi_rx_buf;
-       dma_addr_t spi_tx_dma;
-       dma_addr_t spi_rx_dma;
-
-       struct sk_buff *tx_skb;
-       int tx_len;
-
-       struct workqueue_struct *wq;
-       struct work_struct tx_work;
-       struct work_struct restart_work;
-
-       int force_quit;
-       int after_suspend;
-#define AFTER_SUSPEND_UP 1
-#define AFTER_SUSPEND_DOWN 2
-#define AFTER_SUSPEND_POWER 4
-#define AFTER_SUSPEND_RESTART 8
-       int restart_tx;
-       struct regulator *power;
-       struct regulator *transceiver;
-       struct clk *clk;
-};
-
-#define MCP251X_IS(_model) \
-static inline int mcp251x_is_##_model(struct spi_device *spi) \
-{ \
-       struct mcp251x_priv *priv = spi_get_drvdata(spi); \
-       return priv->model == CAN_MCP251X_MCP##_model; \
-}
-
-MCP251X_IS(2510);
-MCP251X_IS(2515);
-
-static void mcp251x_clean(struct net_device *net)
-{
-       struct mcp251x_priv *priv = netdev_priv(net);
-
-       if (priv->tx_skb || priv->tx_len)
-               net->stats.tx_errors++;
-       if (priv->tx_skb)
-               dev_kfree_skb(priv->tx_skb);
-       if (priv->tx_len)
-               can_free_echo_skb(priv->net, 0);
-       priv->tx_skb = NULL;
-       priv->tx_len = 0;
-}
-
-/*
- * Note about handling of error return of mcp251x_spi_trans: accessing
- * registers via SPI is not really different conceptually than using
- * normal I/O assembler instructions, although it's much more
- * complicated from a practical POV. So it's not advisable to always
- * check the return value of this function. Imagine that every
- * read{b,l}, write{b,l} and friends would be bracketed in "if ( < 0)
- * error();", it would be a great mess (well there are some situation
- * when exception handling C++ like could be useful after all). So we
- * just check that transfers are OK at the beginning of our
- * conversation with the chip and to avoid doing really nasty things
- * (like injecting bogus packets in the network stack).
- */
-static int mcp251x_spi_trans(struct spi_device *spi, int len)
-{
-       struct mcp251x_priv *priv = spi_get_drvdata(spi);
-       struct spi_transfer t = {
-               .tx_buf = priv->spi_tx_buf,
-               .rx_buf = priv->spi_rx_buf,
-               .len = len,
-               .cs_change = 0,
-       };
-       struct spi_message m;
-       int ret;
-
-       spi_message_init(&m);
-
-       if (mcp251x_enable_dma) {
-               t.tx_dma = priv->spi_tx_dma;
-               t.rx_dma = priv->spi_rx_dma;
-               m.is_dma_mapped = 1;
-       }
-
-       spi_message_add_tail(&t, &m);
-
-       ret = spi_sync(spi, &m);
-       if (ret)
-               dev_err(&spi->dev, "spi transfer failed: ret = %d\n", ret);
-       return ret;
-}
-
-static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg)
-{
-       struct mcp251x_priv *priv = spi_get_drvdata(spi);
-       u8 val = 0;
-
-       priv->spi_tx_buf[0] = INSTRUCTION_READ;
-       priv->spi_tx_buf[1] = reg;
-
-       mcp251x_spi_trans(spi, 3);
-       val = priv->spi_rx_buf[2];
-
-       return val;
-}
-
-static void mcp251x_read_2regs(struct spi_device *spi, uint8_t reg,
-               uint8_t *v1, uint8_t *v2)
-{
-       struct mcp251x_priv *priv = spi_get_drvdata(spi);
-
-       priv->spi_tx_buf[0] = INSTRUCTION_READ;
-       priv->spi_tx_buf[1] = reg;
-
-       mcp251x_spi_trans(spi, 4);
-
-       *v1 = priv->spi_rx_buf[2];
-       *v2 = priv->spi_rx_buf[3];
-}
-
-static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val)
-{
-       struct mcp251x_priv *priv = spi_get_drvdata(spi);
-
-       priv->spi_tx_buf[0] = INSTRUCTION_WRITE;
-       priv->spi_tx_buf[1] = reg;
-       priv->spi_tx_buf[2] = val;
-
-       mcp251x_spi_trans(spi, 3);
-}
-
-static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
-                              u8 mask, uint8_t val)
-{
-       struct mcp251x_priv *priv = spi_get_drvdata(spi);
-
-       priv->spi_tx_buf[0] = INSTRUCTION_BIT_MODIFY;
-       priv->spi_tx_buf[1] = reg;
-       priv->spi_tx_buf[2] = mask;
-       priv->spi_tx_buf[3] = val;
-
-       mcp251x_spi_trans(spi, 4);
-}
-
-static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
-                               int len, int tx_buf_idx)
-{
-       struct mcp251x_priv *priv = spi_get_drvdata(spi);
-
-       if (mcp251x_is_2510(spi)) {
-               int i;
-
-               for (i = 1; i < TXBDAT_OFF + len; i++)
-                       mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx) + i,
-                                         buf[i]);
-       } else {
-               memcpy(priv->spi_tx_buf, buf, TXBDAT_OFF + len);
-               mcp251x_spi_trans(spi, TXBDAT_OFF + len);
-       }
-}
-
-static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
-                         int tx_buf_idx)
-{
-       struct mcp251x_priv *priv = spi_get_drvdata(spi);
-       u32 sid, eid, exide, rtr;
-       u8 buf[SPI_TRANSFER_BUF_LEN];
-
-       exide = (frame->can_id & CAN_EFF_FLAG) ? 1 : 0; /* Extended ID Enable */
-       if (exide)
-               sid = (frame->can_id & CAN_EFF_MASK) >> 18;
-       else
-               sid = frame->can_id & CAN_SFF_MASK; /* Standard ID */
-       eid = frame->can_id & CAN_EFF_MASK; /* Extended ID */
-       rtr = (frame->can_id & CAN_RTR_FLAG) ? 1 : 0; /* Remote transmission */
-
-       buf[TXBCTRL_OFF] = INSTRUCTION_LOAD_TXB(tx_buf_idx);
-       buf[TXBSIDH_OFF] = sid >> SIDH_SHIFT;
-       buf[TXBSIDL_OFF] = ((sid & SIDL_SID_MASK) << SIDL_SID_SHIFT) |
-               (exide << SIDL_EXIDE_SHIFT) |
-               ((eid >> SIDL_EID_SHIFT) & SIDL_EID_MASK);
-       buf[TXBEID8_OFF] = GET_BYTE(eid, 1);
-       buf[TXBEID0_OFF] = GET_BYTE(eid, 0);
-       buf[TXBDLC_OFF] = (rtr << DLC_RTR_SHIFT) | frame->can_dlc;
-       memcpy(buf + TXBDAT_OFF, frame->data, frame->can_dlc);
-       mcp251x_hw_tx_frame(spi, buf, frame->can_dlc, tx_buf_idx);
-
-       /* use INSTRUCTION_RTS, to avoid "repeated frame problem" */
-       priv->spi_tx_buf[0] = INSTRUCTION_RTS(1 << tx_buf_idx);
-       mcp251x_spi_trans(priv->spi, 1);
-}
-
-static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
-                               int buf_idx)
-{
-       struct mcp251x_priv *priv = spi_get_drvdata(spi);
-
-       if (mcp251x_is_2510(spi)) {
-               int i, len;
-
-               for (i = 1; i < RXBDAT_OFF; i++)
-                       buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
-
-               len = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
-               for (; i < (RXBDAT_OFF + len); i++)
-                       buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
-       } else {
-               priv->spi_tx_buf[RXBCTRL_OFF] = INSTRUCTION_READ_RXB(buf_idx);
-               mcp251x_spi_trans(spi, SPI_TRANSFER_BUF_LEN);
-               memcpy(buf, priv->spi_rx_buf, SPI_TRANSFER_BUF_LEN);
-       }
-}
-
-static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
-{
-       struct mcp251x_priv *priv = spi_get_drvdata(spi);
-       struct sk_buff *skb;
-       struct can_frame *frame;
-       u8 buf[SPI_TRANSFER_BUF_LEN];
-
-       skb = alloc_can_skb(priv->net, &frame);
-       if (!skb) {
-               dev_err(&spi->dev, "cannot allocate RX skb\n");
-               priv->net->stats.rx_dropped++;
-               return;
-       }
-
-       mcp251x_hw_rx_frame(spi, buf, buf_idx);
-       if (buf[RXBSIDL_OFF] & RXBSIDL_IDE) {
-               /* Extended ID format */
-               frame->can_id = CAN_EFF_FLAG;
-               frame->can_id |=
-                       /* Extended ID part */
-                       SET_BYTE(buf[RXBSIDL_OFF] & RXBSIDL_EID, 2) |
-                       SET_BYTE(buf[RXBEID8_OFF], 1) |
-                       SET_BYTE(buf[RXBEID0_OFF], 0) |
-                       /* Standard ID part */
-                       (((buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) |
-                         (buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT)) << 18);
-               /* Remote transmission request */
-               if (buf[RXBDLC_OFF] & RXBDLC_RTR)
-                       frame->can_id |= CAN_RTR_FLAG;
-       } else {
-               /* Standard ID format */
-               frame->can_id =
-                       (buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) |
-                       (buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT);
-               if (buf[RXBSIDL_OFF] & RXBSIDL_SRR)
-                       frame->can_id |= CAN_RTR_FLAG;
-       }
-       /* Data length */
-       frame->can_dlc = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
-       memcpy(frame->data, buf + RXBDAT_OFF, frame->can_dlc);
-
-       priv->net->stats.rx_packets++;
-       priv->net->stats.rx_bytes += frame->can_dlc;
-
-       can_led_event(priv->net, CAN_LED_EVENT_RX);
-
-       netif_rx_ni(skb);
-}
-
-static void mcp251x_hw_sleep(struct spi_device *spi)
-{
-       mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_SLEEP);
-}
-
-static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
-                                          struct net_device *net)
-{
-       struct mcp251x_priv *priv = netdev_priv(net);
-       struct spi_device *spi = priv->spi;
-
-       if (priv->tx_skb || priv->tx_len) {
-               dev_warn(&spi->dev, "hard_xmit called while tx busy\n");
-               return NETDEV_TX_BUSY;
-       }
-
-       if (can_dropped_invalid_skb(net, skb))
-               return NETDEV_TX_OK;
-
-       netif_stop_queue(net);
-       priv->tx_skb = skb;
-       queue_work(priv->wq, &priv->tx_work);
-
-       return NETDEV_TX_OK;
-}
-
-static int mcp251x_do_set_mode(struct net_device *net, enum can_mode mode)
-{
-       struct mcp251x_priv *priv = netdev_priv(net);
-
-       switch (mode) {
-       case CAN_MODE_START:
-               mcp251x_clean(net);
-               /* We have to delay work since SPI I/O may sleep */
-               priv->can.state = CAN_STATE_ERROR_ACTIVE;
-               priv->restart_tx = 1;
-               if (priv->can.restart_ms == 0)
-                       priv->after_suspend = AFTER_SUSPEND_RESTART;
-               queue_work(priv->wq, &priv->restart_work);
-               break;
-       default:
-               return -EOPNOTSUPP;
-       }
-
-       return 0;
-}
-
-static int mcp251x_set_normal_mode(struct spi_device *spi)
-{
-       struct mcp251x_priv *priv = spi_get_drvdata(spi);
-       unsigned long timeout;
-
-       /* Enable interrupts */
-       mcp251x_write_reg(spi, CANINTE,
-                         CANINTE_ERRIE | CANINTE_TX2IE | CANINTE_TX1IE |
-                         CANINTE_TX0IE | CANINTE_RX1IE | CANINTE_RX0IE);
-
-       if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
-               /* Put device into loopback mode */
-               mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LOOPBACK);
-       } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
-               /* Put device into listen-only mode */
-               mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LISTEN_ONLY);
-       } else {
-               /* Put device into normal mode */
-               mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_NORMAL);
-
-               /* Wait for the device to enter normal mode */
-               timeout = jiffies + HZ;
-               while (mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) {
-                       schedule();
-                       if (time_after(jiffies, timeout)) {
-                               dev_err(&spi->dev, "MCP251x didn't"
-                                       " enter in normal mode\n");
-                               return -EBUSY;
-                       }
-               }
-       }
-       priv->can.state = CAN_STATE_ERROR_ACTIVE;
-       return 0;
-}
-
-static int mcp251x_do_set_bittiming(struct net_device *net)
-{
-       struct mcp251x_priv *priv = netdev_priv(net);
-       struct can_bittiming *bt = &priv->can.bittiming;
-       struct spi_device *spi = priv->spi;
-
-       mcp251x_write_reg(spi, CNF1, ((bt->sjw - 1) << CNF1_SJW_SHIFT) |
-                         (bt->brp - 1));
-       mcp251x_write_reg(spi, CNF2, CNF2_BTLMODE |
-                         (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES ?
-                          CNF2_SAM : 0) |
-                         ((bt->phase_seg1 - 1) << CNF2_PS1_SHIFT) |
-                         (bt->prop_seg - 1));
-       mcp251x_write_bits(spi, CNF3, CNF3_PHSEG2_MASK,
-                          (bt->phase_seg2 - 1));
-       dev_dbg(&spi->dev, "CNF: 0x%02x 0x%02x 0x%02x\n",
-               mcp251x_read_reg(spi, CNF1),
-               mcp251x_read_reg(spi, CNF2),
-               mcp251x_read_reg(spi, CNF3));
-
-       return 0;
-}
-
-static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv,
-                        struct spi_device *spi)
-{
-       mcp251x_do_set_bittiming(net);
-
-       mcp251x_write_reg(spi, RXBCTRL(0),
-                         RXBCTRL_BUKT | RXBCTRL_RXM0 | RXBCTRL_RXM1);
-       mcp251x_write_reg(spi, RXBCTRL(1),
-                         RXBCTRL_RXM0 | RXBCTRL_RXM1);
-       return 0;
-}
-
-static int mcp251x_hw_reset(struct spi_device *spi)
-{
-       struct mcp251x_priv *priv = spi_get_drvdata(spi);
-       int ret;
-       unsigned long timeout;
-
-       priv->spi_tx_buf[0] = INSTRUCTION_RESET;
-       ret = spi_write(spi, priv->spi_tx_buf, 1);
-       if (ret) {
-               dev_err(&spi->dev, "reset failed: ret = %d\n", ret);
-               return -EIO;
-       }
-
-       /* Wait for reset to finish */
-       timeout = jiffies + HZ;
-       mdelay(10);
-       while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK)
-              != CANCTRL_REQOP_CONF) {
-               schedule();
-               if (time_after(jiffies, timeout)) {
-                       dev_err(&spi->dev, "MCP251x didn't"
-                               " enter in conf mode after reset\n");
-                       return -EBUSY;
-               }
-       }
-       return 0;
-}
-
-static int mcp251x_hw_probe(struct spi_device *spi)
-{
-       int st1, st2;
-
-       mcp251x_hw_reset(spi);
-
-       /*
-        * Please note that these are "magic values" based on after
-        * reset defaults taken from data sheet which allows us to see
-        * if we really have a chip on the bus (we avoid common all
-        * zeroes or all ones situations)
-        */
-       st1 = mcp251x_read_reg(spi, CANSTAT) & 0xEE;
-       st2 = mcp251x_read_reg(spi, CANCTRL) & 0x17;
-
-       dev_dbg(&spi->dev, "CANSTAT 0x%02x CANCTRL 0x%02x\n", st1, st2);
-
-       /* Check for power up default values */
-       return (st1 == 0x80 && st2 == 0x07) ? 1 : 0;
-}
-
-static int mcp251x_power_enable(struct regulator *reg, int enable)
-{
-       if (IS_ERR_OR_NULL(reg))
-               return 0;
-
-       if (enable)
-               return regulator_enable(reg);
-       else
-               return regulator_disable(reg);
-}
-
-static void mcp251x_open_clean(struct net_device *net)
-{
-       struct mcp251x_priv *priv = netdev_priv(net);
-       struct spi_device *spi = priv->spi;
-
-       free_irq(spi->irq, priv);
-       mcp251x_hw_sleep(spi);
-       mcp251x_power_enable(priv->transceiver, 0);
-       close_candev(net);
-}
-
-static int mcp251x_stop(struct net_device *net)
-{
-       struct mcp251x_priv *priv = netdev_priv(net);
-       struct spi_device *spi = priv->spi;
-
-       close_candev(net);
-
-       priv->force_quit = 1;
-       free_irq(spi->irq, priv);
-       destroy_workqueue(priv->wq);
-       priv->wq = NULL;
-
-       mutex_lock(&priv->mcp_lock);
-
-       /* Disable and clear pending interrupts */
-       mcp251x_write_reg(spi, CANINTE, 0x00);
-       mcp251x_write_reg(spi, CANINTF, 0x00);
-
-       mcp251x_write_reg(spi, TXBCTRL(0), 0);
-       mcp251x_clean(net);
-
-       mcp251x_hw_sleep(spi);
-
-       mcp251x_power_enable(priv->transceiver, 0);
-
-       priv->can.state = CAN_STATE_STOPPED;
-
-       mutex_unlock(&priv->mcp_lock);
-
-       can_led_event(net, CAN_LED_EVENT_STOP);
-
-       return 0;
-}
-
-static void mcp251x_error_skb(struct net_device *net, int can_id, int data1)
-{
-       struct sk_buff *skb;
-       struct can_frame *frame;
-
-       skb = alloc_can_err_skb(net, &frame);
-       if (skb) {
-               frame->can_id |= can_id;
-               frame->data[1] = data1;
-               netif_rx_ni(skb);
-       } else {
-               netdev_err(net, "cannot allocate error skb\n");
-       }
-}
-
-static void mcp251x_tx_work_handler(struct work_struct *ws)
-{
-       struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv,
-                                                tx_work);
-       struct spi_device *spi = priv->spi;
-       struct net_device *net = priv->net;
-       struct can_frame *frame;
-
-       mutex_lock(&priv->mcp_lock);
-       if (priv->tx_skb) {
-               if (priv->can.state == CAN_STATE_BUS_OFF) {
-                       mcp251x_clean(net);
-               } else {
-                       frame = (struct can_frame *)priv->tx_skb->data;
-
-                       if (frame->can_dlc > CAN_FRAME_MAX_DATA_LEN)
-                               frame->can_dlc = CAN_FRAME_MAX_DATA_LEN;
-                       mcp251x_hw_tx(spi, frame, 0);
-                       priv->tx_len = 1 + frame->can_dlc;
-                       can_put_echo_skb(priv->tx_skb, net, 0);
-                       priv->tx_skb = NULL;
-               }
-       }
-       mutex_unlock(&priv->mcp_lock);
-}
-
-static void mcp251x_restart_work_handler(struct work_struct *ws)
-{
-       struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv,
-                                                restart_work);
-       struct spi_device *spi = priv->spi;
-       struct net_device *net = priv->net;
-
-       mutex_lock(&priv->mcp_lock);
-       if (priv->after_suspend) {
-               mdelay(10);
-               mcp251x_hw_reset(spi);
-               mcp251x_setup(net, priv, spi);
-               if (priv->after_suspend & AFTER_SUSPEND_RESTART) {
-                       mcp251x_set_normal_mode(spi);
-               } else if (priv->after_suspend & AFTER_SUSPEND_UP) {
-                       netif_device_attach(net);
-                       mcp251x_clean(net);
-                       mcp251x_set_normal_mode(spi);
-                       netif_wake_queue(net);
-               } else {
-                       mcp251x_hw_sleep(spi);
-               }
-               priv->after_suspend = 0;
-               priv->force_quit = 0;
-       }
-
-       if (priv->restart_tx) {
-               priv->restart_tx = 0;
-               mcp251x_write_reg(spi, TXBCTRL(0), 0);
-               mcp251x_clean(net);
-               netif_wake_queue(net);
-               mcp251x_error_skb(net, CAN_ERR_RESTARTED, 0);
-       }
-       mutex_unlock(&priv->mcp_lock);
-}
-
-static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
-{
-       struct mcp251x_priv *priv = dev_id;
-       struct spi_device *spi = priv->spi;
-       struct net_device *net = priv->net;
-
-       mutex_lock(&priv->mcp_lock);
-       while (!priv->force_quit) {
-               enum can_state new_state;
-               u8 intf, eflag;
-               u8 clear_intf = 0;
-               int can_id = 0, data1 = 0;
-
-               mcp251x_read_2regs(spi, CANINTF, &intf, &eflag);
-
-               /* mask out flags we don't care about */
-               intf &= CANINTF_RX | CANINTF_TX | CANINTF_ERR;
-
-               /* receive buffer 0 */
-               if (intf & CANINTF_RX0IF) {
-                       mcp251x_hw_rx(spi, 0);
-                       /*
-                        * Free one buffer ASAP
-                        * (The MCP2515 does this automatically.)
-                        */
-                       if (mcp251x_is_2510(spi))
-                               mcp251x_write_bits(spi, CANINTF, CANINTF_RX0IF, 0x00);
-               }
-
-               /* receive buffer 1 */
-               if (intf & CANINTF_RX1IF) {
-                       mcp251x_hw_rx(spi, 1);
-                       /* the MCP2515 does this automatically */
-                       if (mcp251x_is_2510(spi))
-                               clear_intf |= CANINTF_RX1IF;
-               }
-
-               /* any error or tx interrupt we need to clear? */
-               if (intf & (CANINTF_ERR | CANINTF_TX))
-                       clear_intf |= intf & (CANINTF_ERR | CANINTF_TX);
-               if (clear_intf)
-                       mcp251x_write_bits(spi, CANINTF, clear_intf, 0x00);
-
-               if (eflag)
-                       mcp251x_write_bits(spi, EFLG, eflag, 0x00);
-
-               /* Update can state */
-               if (eflag & EFLG_TXBO) {
-                       new_state = CAN_STATE_BUS_OFF;
-                       can_id |= CAN_ERR_BUSOFF;
-               } else if (eflag & EFLG_TXEP) {
-                       new_state = CAN_STATE_ERROR_PASSIVE;
-                       can_id |= CAN_ERR_CRTL;
-                       data1 |= CAN_ERR_CRTL_TX_PASSIVE;
-               } else if (eflag & EFLG_RXEP) {
-                       new_state = CAN_STATE_ERROR_PASSIVE;
-                       can_id |= CAN_ERR_CRTL;
-                       data1 |= CAN_ERR_CRTL_RX_PASSIVE;
-               } else if (eflag & EFLG_TXWAR) {
-                       new_state = CAN_STATE_ERROR_WARNING;
-                       can_id |= CAN_ERR_CRTL;
-                       data1 |= CAN_ERR_CRTL_TX_WARNING;
-               } else if (eflag & EFLG_RXWAR) {
-                       new_state = CAN_STATE_ERROR_WARNING;
-                       can_id |= CAN_ERR_CRTL;
-                       data1 |= CAN_ERR_CRTL_RX_WARNING;
-               } else {
-                       new_state = CAN_STATE_ERROR_ACTIVE;
-               }
-
-               /* Update can state statistics */
-               switch (priv->can.state) {
-               case CAN_STATE_ERROR_ACTIVE:
-                       if (new_state >= CAN_STATE_ERROR_WARNING &&
-                           new_state <= CAN_STATE_BUS_OFF)
-                               priv->can.can_stats.error_warning++;
-               case CAN_STATE_ERROR_WARNING:   /* fallthrough */
-                       if (new_state >= CAN_STATE_ERROR_PASSIVE &&
-                           new_state <= CAN_STATE_BUS_OFF)
-                               priv->can.can_stats.error_passive++;
-                       break;
-               default:
-                       break;
-               }
-               priv->can.state = new_state;
-
-               if (intf & CANINTF_ERRIF) {
-                       /* Handle overflow counters */
-                       if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR)) {
-                               if (eflag & EFLG_RX0OVR) {
-                                       net->stats.rx_over_errors++;
-                                       net->stats.rx_errors++;
-                               }
-                               if (eflag & EFLG_RX1OVR) {
-                                       net->stats.rx_over_errors++;
-                                       net->stats.rx_errors++;
-                               }
-                               can_id |= CAN_ERR_CRTL;
-                               data1 |= CAN_ERR_CRTL_RX_OVERFLOW;
-                       }
-                       mcp251x_error_skb(net, can_id, data1);
-               }
-
-               if (priv->can.state == CAN_STATE_BUS_OFF) {
-                       if (priv->can.restart_ms == 0) {
-                               priv->force_quit = 1;
-                               can_bus_off(net);
-                               mcp251x_hw_sleep(spi);
-                               break;
-                       }
-               }
-
-               if (intf == 0)
-                       break;
-
-               if (intf & CANINTF_TX) {
-                       net->stats.tx_packets++;
-                       net->stats.tx_bytes += priv->tx_len - 1;
-                       can_led_event(net, CAN_LED_EVENT_TX);
-                       if (priv->tx_len) {
-                               can_get_echo_skb(net, 0);
-                               priv->tx_len = 0;
-                       }
-                       netif_wake_queue(net);
-               }
-
-       }
-       mutex_unlock(&priv->mcp_lock);
-       return IRQ_HANDLED;
-}
-
-static int mcp251x_open(struct net_device *net)
-{
-       struct mcp251x_priv *priv = netdev_priv(net);
-       struct spi_device *spi = priv->spi;
-       unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_FALLING;
-       int ret;
-
-       ret = open_candev(net);
-       if (ret) {
-               dev_err(&spi->dev, "unable to set initial baudrate!\n");
-               return ret;
-       }
-
-       mutex_lock(&priv->mcp_lock);
-       mcp251x_power_enable(priv->transceiver, 1);
-
-       priv->force_quit = 0;
-       priv->tx_skb = NULL;
-       priv->tx_len = 0;
-
-       ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
-                                  flags, DEVICE_NAME, priv);
-       if (ret) {
-               dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
-               mcp251x_power_enable(priv->transceiver, 0);
-               close_candev(net);
-               goto open_unlock;
-       }
-
-       priv->wq = create_freezable_workqueue("mcp251x_wq");
-       INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
-       INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
-
-       ret = mcp251x_hw_reset(spi);
-       if (ret) {
-               mcp251x_open_clean(net);
-               goto open_unlock;
-       }
-       ret = mcp251x_setup(net, priv, spi);
-       if (ret) {
-               mcp251x_open_clean(net);
-               goto open_unlock;
-       }
-       ret = mcp251x_set_normal_mode(spi);
-       if (ret) {
-               mcp251x_open_clean(net);
-               goto open_unlock;
-       }
-
-       can_led_event(net, CAN_LED_EVENT_OPEN);
-
-       netif_wake_queue(net);
-
-open_unlock:
-       mutex_unlock(&priv->mcp_lock);
-       return ret;
-}
-
-static const struct net_device_ops mcp251x_netdev_ops = {
-       .ndo_open = mcp251x_open,
-       .ndo_stop = mcp251x_stop,
-       .ndo_start_xmit = mcp251x_hard_start_xmit,
-       .ndo_change_mtu = can_change_mtu,
-};
-
-static const struct of_device_id mcp251x_of_match[] = {
-       {
-               .compatible     = "microchip,mcp2510",
-               .data           = (void *)CAN_MCP251X_MCP2510,
-       },
-       {
-               .compatible     = "microchip,mcp2515",
-               .data           = (void *)CAN_MCP251X_MCP2515,
-       },
-       { }
-};
-MODULE_DEVICE_TABLE(of, mcp251x_of_match);
-
-static const struct spi_device_id mcp251x_id_table[] = {
-       {
-               .name           = "mcp2510",
-               .driver_data    = (kernel_ulong_t)CAN_MCP251X_MCP2510,
-       },
-       {
-               .name           = "mcp2515",
-               .driver_data    = (kernel_ulong_t)CAN_MCP251X_MCP2515,
-       },
-       { }
-};
-MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
-
-static int mcp251x_can_probe(struct spi_device *spi)
-{
-       const struct of_device_id *of_id = of_match_device(mcp251x_of_match,
-                                                          &spi->dev);
-       struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev);
-       struct net_device *net;
-       struct mcp251x_priv *priv;
-       int freq, ret = -ENODEV;
-       struct clk *clk;
-
-       clk = devm_clk_get(&spi->dev, NULL);
-       if (IS_ERR(clk)) {
-               if (pdata)
-                       freq = pdata->oscillator_frequency;
-               else
-                       return PTR_ERR(clk);
-       } else {
-               freq = clk_get_rate(clk);
-       }
-
-       /* Sanity check */
-       if (freq < 1000000 || freq > 25000000)
-               return -ERANGE;
-
-       /* Allocate can/net device */
-       net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX);
-       if (!net)
-               return -ENOMEM;
-
-       if (!IS_ERR(clk)) {
-               ret = clk_prepare_enable(clk);
-               if (ret)
-                       goto out_free;
-       }
-
-       net->netdev_ops = &mcp251x_netdev_ops;
-       net->flags |= IFF_ECHO;
-
-       priv = netdev_priv(net);
-       priv->can.bittiming_const = &mcp251x_bittiming_const;
-       priv->can.do_set_mode = mcp251x_do_set_mode;
-       priv->can.clock.freq = freq / 2;
-       priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
-               CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
-       if (of_id)
-               priv->model = (enum mcp251x_model)of_id->data;
-       else
-               priv->model = spi_get_device_id(spi)->driver_data;
-       priv->net = net;
-       priv->clk = clk;
-
-       priv->power = devm_regulator_get(&spi->dev, "vdd");
-       priv->transceiver = devm_regulator_get(&spi->dev, "xceiver");
-       if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
-           (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) {
-               ret = -EPROBE_DEFER;
-               goto out_clk;
-       }
-
-       ret = mcp251x_power_enable(priv->power, 1);
-       if (ret)
-               goto out_clk;
-
-       spi_set_drvdata(spi, priv);
-
-       priv->spi = spi;
-       mutex_init(&priv->mcp_lock);
-
-       /* If requested, allocate DMA buffers */
-       if (mcp251x_enable_dma) {
-               spi->dev.coherent_dma_mask = ~0;
-
-               /*
-                * Minimum coherent DMA allocation is PAGE_SIZE, so allocate
-                * that much and share it between Tx and Rx DMA buffers.
-                */
-               priv->spi_tx_buf = dma_alloc_coherent(&spi->dev,
-                                                     PAGE_SIZE,
-                                                     &priv->spi_tx_dma,
-                                                     GFP_DMA);
-
-               if (priv->spi_tx_buf) {
-                       priv->spi_rx_buf = (priv->spi_tx_buf + (PAGE_SIZE / 2));
-                       priv->spi_rx_dma = (dma_addr_t)(priv->spi_tx_dma +
-                                                       (PAGE_SIZE / 2));
-               } else {
-                       /* Fall back to non-DMA */
-                       mcp251x_enable_dma = 0;
-               }
-       }
-
-       /* Allocate non-DMA buffers */
-       if (!mcp251x_enable_dma) {
-               priv->spi_tx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN,
-                                               GFP_KERNEL);
-               if (!priv->spi_tx_buf) {
-                       ret = -ENOMEM;
-                       goto error_probe;
-               }
-               priv->spi_rx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN,
-                                               GFP_KERNEL);
-               if (!priv->spi_rx_buf) {
-                       ret = -ENOMEM;
-                       goto error_probe;
-               }
-       }
-
-       SET_NETDEV_DEV(net, &spi->dev);
-
-       /* Configure the SPI bus */
-       spi->mode = spi->mode ? : SPI_MODE_0;
-       if (mcp251x_is_2510(spi))
-               spi->max_speed_hz = spi->max_speed_hz ? : 5 * 1000 * 1000;
-       else
-               spi->max_speed_hz = spi->max_speed_hz ? : 10 * 1000 * 1000;
-       spi->bits_per_word = 8;
-       spi_setup(spi);
-
-       /* Here is OK to not lock the MCP, no one knows about it yet */
-       if (!mcp251x_hw_probe(spi)) {
-               ret = -ENODEV;
-               goto error_probe;
-       }
-       mcp251x_hw_sleep(spi);
-
-       ret = register_candev(net);
-       if (ret)
-               goto error_probe;
-
-       devm_can_led_init(net);
-
-       return ret;
-
-error_probe:
-       if (mcp251x_enable_dma)
-               dma_free_coherent(&spi->dev, PAGE_SIZE,
-                                 priv->spi_tx_buf, priv->spi_tx_dma);
-       mcp251x_power_enable(priv->power, 0);
-
-out_clk:
-       if (!IS_ERR(clk))
-               clk_disable_unprepare(clk);
-
-out_free:
-       free_candev(net);
-
-       return ret;
-}
-
-static int mcp251x_can_remove(struct spi_device *spi)
-{
-       struct mcp251x_priv *priv = spi_get_drvdata(spi);
-       struct net_device *net = priv->net;
-
-       unregister_candev(net);
-
-       if (mcp251x_enable_dma) {
-               dma_free_coherent(&spi->dev, PAGE_SIZE,
-                                 priv->spi_tx_buf, priv->spi_tx_dma);
-       }
-
-       mcp251x_power_enable(priv->power, 0);
-
-       if (!IS_ERR(priv->clk))
-               clk_disable_unprepare(priv->clk);
-
-       free_candev(net);
-
-       return 0;
-}
-
-static int __maybe_unused mcp251x_can_suspend(struct device *dev)
-{
-       struct spi_device *spi = to_spi_device(dev);
-       struct mcp251x_priv *priv = spi_get_drvdata(spi);
-       struct net_device *net = priv->net;
-
-       priv->force_quit = 1;
-       disable_irq(spi->irq);
-       /*
-        * Note: at this point neither IST nor workqueues are running.
-        * open/stop cannot be called anyway so locking is not needed
-        */
-       if (netif_running(net)) {
-               netif_device_detach(net);
-
-               mcp251x_hw_sleep(spi);
-               mcp251x_power_enable(priv->transceiver, 0);
-               priv->after_suspend = AFTER_SUSPEND_UP;
-       } else {
-               priv->after_suspend = AFTER_SUSPEND_DOWN;
-       }
-
-       if (!IS_ERR_OR_NULL(priv->power)) {
-               regulator_disable(priv->power);
-               priv->after_suspend |= AFTER_SUSPEND_POWER;
-       }
-
-       return 0;
-}
-
-static int __maybe_unused mcp251x_can_resume(struct device *dev)
-{
-       struct spi_device *spi = to_spi_device(dev);
-       struct mcp251x_priv *priv = spi_get_drvdata(spi);
-
-       if (priv->after_suspend & AFTER_SUSPEND_POWER) {
-               mcp251x_power_enable(priv->power, 1);
-               queue_work(priv->wq, &priv->restart_work);
-       } else {
-               if (priv->after_suspend & AFTER_SUSPEND_UP) {
-                       mcp251x_power_enable(priv->transceiver, 1);
-                       queue_work(priv->wq, &priv->restart_work);
-               } else {
-                       priv->after_suspend = 0;
-               }
-       }
-       priv->force_quit = 0;
-       enable_irq(spi->irq);
-       return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(mcp251x_can_pm_ops, mcp251x_can_suspend,
-       mcp251x_can_resume);
-
-static struct spi_driver mcp251x_can_driver = {
-       .driver = {
-               .name = DEVICE_NAME,
-               .owner = THIS_MODULE,
-               .of_match_table = mcp251x_of_match,
-               .pm = &mcp251x_can_pm_ops,
-       },
-       .id_table = mcp251x_id_table,
-       .probe = mcp251x_can_probe,
-       .remove = mcp251x_can_remove,
-};
-module_spi_driver(mcp251x_can_driver);
-
-MODULE_AUTHOR("Chris Elston <celston@katalix.com>, "
-             "Christian Pellegrin <chripell@evolware.org>");
-MODULE_DESCRIPTION("Microchip 251x CAN driver");
-MODULE_LICENSE("GPL v2");
index f19be5269e7be55ae92ccdb12f2274768aeb5ddf..81c711719490511718122ebc5ebf77bc6c883e6c 100644 (file)
@@ -1,5 +1,5 @@
 config CAN_MSCAN
-       depends on PPC || M68K
+       depends on PPC
        tristate "Support for Freescale MSCAN based chips"
        ---help---
          The Motorola Scalable Controller Area Network (MSCAN) definition
diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c
new file mode 100644 (file)
index 0000000..5268d21
--- /dev/null
@@ -0,0 +1,876 @@
+/* Renesas R-Car CAN device driver
+ *
+ * Copyright (C) 2013 Cogent Embedded, Inc. <source@cogentembedded.com>
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/can/led.h>
+#include <linux/can/dev.h>
+#include <linux/clk.h>
+#include <linux/can/platform/rcar_can.h>
+
+#define RCAR_CAN_DRV_NAME      "rcar_can"
+
+/* Mailbox configuration:
+ * mailbox 60 - 63 - Rx FIFO mailboxes
+ * mailbox 56 - 59 - Tx FIFO mailboxes
+ * non-FIFO mailboxes are not used
+ */
+#define RCAR_CAN_N_MBX         64 /* Number of mailboxes in non-FIFO mode */
+#define RCAR_CAN_RX_FIFO_MBX   60 /* Mailbox - window to Rx FIFO */
+#define RCAR_CAN_TX_FIFO_MBX   56 /* Mailbox - window to Tx FIFO */
+#define RCAR_CAN_FIFO_DEPTH    4
+
+/* Mailbox registers structure */
+struct rcar_can_mbox_regs {
+       u32 id;         /* IDE and RTR bits, SID and EID */
+       u8 stub;        /* Not used */
+       u8 dlc;         /* Data Length Code - bits [0..3] */
+       u8 data[8];     /* Data Bytes */
+       u8 tsh;         /* Time Stamp Higher Byte */
+       u8 tsl;         /* Time Stamp Lower Byte */
+};
+
+struct rcar_can_regs {
+       struct rcar_can_mbox_regs mb[RCAR_CAN_N_MBX]; /* Mailbox registers */
+       u32 mkr_2_9[8]; /* Mask Registers 2-9 */
+       u32 fidcr[2];   /* FIFO Received ID Compare Register */
+       u32 mkivlr1;    /* Mask Invalid Register 1 */
+       u32 mier1;      /* Mailbox Interrupt Enable Register 1 */
+       u32 mkr_0_1[2]; /* Mask Registers 0-1 */
+       u32 mkivlr0;    /* Mask Invalid Register 0*/
+       u32 mier0;      /* Mailbox Interrupt Enable Register 0 */
+       u8 pad_440[0x3c0];
+       u8 mctl[64];    /* Message Control Registers */
+       u16 ctlr;       /* Control Register */
+       u16 str;        /* Status register */
+       u8 bcr[3];      /* Bit Configuration Register */
+       u8 clkr;        /* Clock Select Register */
+       u8 rfcr;        /* Receive FIFO Control Register */
+       u8 rfpcr;       /* Receive FIFO Pointer Control Register */
+       u8 tfcr;        /* Transmit FIFO Control Register */
+       u8 tfpcr;       /* Transmit FIFO Pointer Control Register */
+       u8 eier;        /* Error Interrupt Enable Register */
+       u8 eifr;        /* Error Interrupt Factor Judge Register */
+       u8 recr;        /* Receive Error Count Register */
+       u8 tecr;        /* Transmit Error Count Register */
+       u8 ecsr;        /* Error Code Store Register */
+       u8 cssr;        /* Channel Search Support Register */
+       u8 mssr;        /* Mailbox Search Status Register */
+       u8 msmr;        /* Mailbox Search Mode Register */
+       u16 tsr;        /* Time Stamp Register */
+       u8 afsr;        /* Acceptance Filter Support Register */
+       u8 pad_857;
+       u8 tcr;         /* Test Control Register */
+       u8 pad_859[7];
+       u8 ier;         /* Interrupt Enable Register */
+       u8 isr;         /* Interrupt Status Register */
+       u8 pad_862;
+       u8 mbsmr;       /* Mailbox Search Mask Register */
+};
+
+struct rcar_can_priv {
+       struct can_priv can;    /* Must be the first member! */
+       struct net_device *ndev;
+       struct napi_struct napi;
+       struct rcar_can_regs __iomem *regs;
+       struct clk *clk;
+       u8 tx_dlc[RCAR_CAN_FIFO_DEPTH];
+       u32 tx_head;
+       u32 tx_tail;
+       u8 clock_select;
+       u8 ier;
+};
+
+static const struct can_bittiming_const rcar_can_bittiming_const = {
+       .name = RCAR_CAN_DRV_NAME,
+       .tseg1_min = 4,
+       .tseg1_max = 16,
+       .tseg2_min = 2,
+       .tseg2_max = 8,
+       .sjw_max = 4,
+       .brp_min = 1,
+       .brp_max = 1024,
+       .brp_inc = 1,
+};
+
+/* Control Register bits */
+#define RCAR_CAN_CTLR_BOM      (3 << 11) /* Bus-Off Recovery Mode Bits */
+#define RCAR_CAN_CTLR_BOM_ENT  (1 << 11) /* Entry to halt mode */
+                                       /* at bus-off entry */
+#define RCAR_CAN_CTLR_SLPM     (1 << 10)
+#define RCAR_CAN_CTLR_CANM     (3 << 8) /* Operating Mode Select Bit */
+#define RCAR_CAN_CTLR_CANM_HALT        (1 << 9)
+#define RCAR_CAN_CTLR_CANM_RESET (1 << 8)
+#define RCAR_CAN_CTLR_CANM_FORCE_RESET (3 << 8)
+#define RCAR_CAN_CTLR_MLM      (1 << 3) /* Message Lost Mode Select */
+#define RCAR_CAN_CTLR_IDFM     (3 << 1) /* ID Format Mode Select Bits */
+#define RCAR_CAN_CTLR_IDFM_MIXED (1 << 2) /* Mixed ID mode */
+#define RCAR_CAN_CTLR_MBM      (1 << 0) /* Mailbox Mode select */
+
+/* Status Register bits */
+#define RCAR_CAN_STR_RSTST     (1 << 8) /* Reset Status Bit */
+
+/* FIFO Received ID Compare Registers 0 and 1 bits */
+#define RCAR_CAN_FIDCR_IDE     (1 << 31) /* ID Extension Bit */
+#define RCAR_CAN_FIDCR_RTR     (1 << 30) /* Remote Transmission Request Bit */
+
+/* Receive FIFO Control Register bits */
+#define RCAR_CAN_RFCR_RFEST    (1 << 7) /* Receive FIFO Empty Status Flag */
+#define RCAR_CAN_RFCR_RFE      (1 << 0) /* Receive FIFO Enable */
+
+/* Transmit FIFO Control Register bits */
+#define RCAR_CAN_TFCR_TFUST    (7 << 1) /* Transmit FIFO Unsent Message */
+                                       /* Number Status Bits */
+#define RCAR_CAN_TFCR_TFUST_SHIFT 1    /* Offset of Transmit FIFO Unsent */
+                                       /* Message Number Status Bits */
+#define RCAR_CAN_TFCR_TFE      (1 << 0) /* Transmit FIFO Enable */
+
+#define RCAR_CAN_N_RX_MKREGS1  2       /* Number of mask registers */
+                                       /* for Rx mailboxes 0-31 */
+#define RCAR_CAN_N_RX_MKREGS2  8
+
+/* Bit Configuration Register settings */
+#define RCAR_CAN_BCR_TSEG1(x)  (((x) & 0x0f) << 20)
+#define RCAR_CAN_BCR_BPR(x)    (((x) & 0x3ff) << 8)
+#define RCAR_CAN_BCR_SJW(x)    (((x) & 0x3) << 4)
+#define RCAR_CAN_BCR_TSEG2(x)  ((x) & 0x07)
+
+/* Mailbox and Mask Registers bits */
+#define RCAR_CAN_IDE           (1 << 31)
+#define RCAR_CAN_RTR           (1 << 30)
+#define RCAR_CAN_SID_SHIFT     18
+
+/* Mailbox Interrupt Enable Register 1 bits */
+#define RCAR_CAN_MIER1_RXFIE   (1 << 28) /* Receive  FIFO Interrupt Enable */
+#define RCAR_CAN_MIER1_TXFIE   (1 << 24) /* Transmit FIFO Interrupt Enable */
+
+/* Interrupt Enable Register bits */
+#define RCAR_CAN_IER_ERSIE     (1 << 5) /* Error (ERS) Interrupt Enable Bit */
+#define RCAR_CAN_IER_RXFIE     (1 << 4) /* Reception FIFO Interrupt */
+                                       /* Enable Bit */
+#define RCAR_CAN_IER_TXFIE     (1 << 3) /* Transmission FIFO Interrupt */
+                                       /* Enable Bit */
+/* Interrupt Status Register bits */
+#define RCAR_CAN_ISR_ERSF      (1 << 5) /* Error (ERS) Interrupt Status Bit */
+#define RCAR_CAN_ISR_RXFF      (1 << 4) /* Reception FIFO Interrupt */
+                                       /* Status Bit */
+#define RCAR_CAN_ISR_TXFF      (1 << 3) /* Transmission FIFO Interrupt */
+                                       /* Status Bit */
+
+/* Error Interrupt Enable Register bits */
+#define RCAR_CAN_EIER_BLIE     (1 << 7) /* Bus Lock Interrupt Enable */
+#define RCAR_CAN_EIER_OLIE     (1 << 6) /* Overload Frame Transmit */
+                                       /* Interrupt Enable */
+#define RCAR_CAN_EIER_ORIE     (1 << 5) /* Receive Overrun  Interrupt Enable */
+#define RCAR_CAN_EIER_BORIE    (1 << 4) /* Bus-Off Recovery Interrupt Enable */
+#define RCAR_CAN_EIER_BOEIE    (1 << 3) /* Bus-Off Entry Interrupt Enable */
+#define RCAR_CAN_EIER_EPIE     (1 << 2) /* Error Passive Interrupt Enable */
+#define RCAR_CAN_EIER_EWIE     (1 << 1) /* Error Warning Interrupt Enable */
+#define RCAR_CAN_EIER_BEIE     (1 << 0) /* Bus Error Interrupt Enable */
+
+/* Error Interrupt Factor Judge Register bits */
+#define RCAR_CAN_EIFR_BLIF     (1 << 7) /* Bus Lock Detect Flag */
+#define RCAR_CAN_EIFR_OLIF     (1 << 6) /* Overload Frame Transmission */
+                                        /* Detect Flag */
+#define RCAR_CAN_EIFR_ORIF     (1 << 5) /* Receive Overrun Detect Flag */
+#define RCAR_CAN_EIFR_BORIF    (1 << 4) /* Bus-Off Recovery Detect Flag */
+#define RCAR_CAN_EIFR_BOEIF    (1 << 3) /* Bus-Off Entry Detect Flag */
+#define RCAR_CAN_EIFR_EPIF     (1 << 2) /* Error Passive Detect Flag */
+#define RCAR_CAN_EIFR_EWIF     (1 << 1) /* Error Warning Detect Flag */
+#define RCAR_CAN_EIFR_BEIF     (1 << 0) /* Bus Error Detect Flag */
+
+/* Error Code Store Register bits */
+#define RCAR_CAN_ECSR_EDPM     (1 << 7) /* Error Display Mode Select Bit */
+#define RCAR_CAN_ECSR_ADEF     (1 << 6) /* ACK Delimiter Error Flag */
+#define RCAR_CAN_ECSR_BE0F     (1 << 5) /* Bit Error (dominant) Flag */
+#define RCAR_CAN_ECSR_BE1F     (1 << 4) /* Bit Error (recessive) Flag */
+#define RCAR_CAN_ECSR_CEF      (1 << 3) /* CRC Error Flag */
+#define RCAR_CAN_ECSR_AEF      (1 << 2) /* ACK Error Flag */
+#define RCAR_CAN_ECSR_FEF      (1 << 1) /* Form Error Flag */
+#define RCAR_CAN_ECSR_SEF      (1 << 0) /* Stuff Error Flag */
+
+#define RCAR_CAN_NAPI_WEIGHT   4
+#define MAX_STR_READS          0x100
+
+static void tx_failure_cleanup(struct net_device *ndev)
+{
+       int i;
+
+       for (i = 0; i < RCAR_CAN_FIFO_DEPTH; i++)
+               can_free_echo_skb(ndev, i);
+}
+
+static void rcar_can_error(struct net_device *ndev)
+{
+       struct rcar_can_priv *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &ndev->stats;
+       struct can_frame *cf;
+       struct sk_buff *skb;
+       u8 eifr, txerr = 0, rxerr = 0;
+
+       /* Propagate the error condition to the CAN stack */
+       skb = alloc_can_err_skb(ndev, &cf);
+
+       eifr = readb(&priv->regs->eifr);
+       if (eifr & (RCAR_CAN_EIFR_EWIF | RCAR_CAN_EIFR_EPIF)) {
+               txerr = readb(&priv->regs->tecr);
+               rxerr = readb(&priv->regs->recr);
+               if (skb) {
+                       cf->can_id |= CAN_ERR_CRTL;
+                       cf->data[6] = txerr;
+                       cf->data[7] = rxerr;
+               }
+       }
+       if (eifr & RCAR_CAN_EIFR_BEIF) {
+               int rx_errors = 0, tx_errors = 0;
+               u8 ecsr;
+
+               netdev_dbg(priv->ndev, "Bus error interrupt:\n");
+               if (skb) {
+                       cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
+                       cf->data[2] = CAN_ERR_PROT_UNSPEC;
+               }
+               ecsr = readb(&priv->regs->ecsr);
+               if (ecsr & RCAR_CAN_ECSR_ADEF) {
+                       netdev_dbg(priv->ndev, "ACK Delimiter Error\n");
+                       tx_errors++;
+                       writeb(~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr);
+                       if (skb)
+                               cf->data[3] |= CAN_ERR_PROT_LOC_ACK_DEL;
+               }
+               if (ecsr & RCAR_CAN_ECSR_BE0F) {
+                       netdev_dbg(priv->ndev, "Bit Error (dominant)\n");
+                       tx_errors++;
+                       writeb(~RCAR_CAN_ECSR_BE0F, &priv->regs->ecsr);
+                       if (skb)
+                               cf->data[2] |= CAN_ERR_PROT_BIT0;
+               }
+               if (ecsr & RCAR_CAN_ECSR_BE1F) {
+                       netdev_dbg(priv->ndev, "Bit Error (recessive)\n");
+                       tx_errors++;
+                       writeb(~RCAR_CAN_ECSR_BE1F, &priv->regs->ecsr);
+                       if (skb)
+                               cf->data[2] |= CAN_ERR_PROT_BIT1;
+               }
+               if (ecsr & RCAR_CAN_ECSR_CEF) {
+                       netdev_dbg(priv->ndev, "CRC Error\n");
+                       rx_errors++;
+                       writeb(~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr);
+                       if (skb)
+                               cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+               }
+               if (ecsr & RCAR_CAN_ECSR_AEF) {
+                       netdev_dbg(priv->ndev, "ACK Error\n");
+                       tx_errors++;
+                       writeb(~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr);
+                       if (skb) {
+                               cf->can_id |= CAN_ERR_ACK;
+                               cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
+                       }
+               }
+               if (ecsr & RCAR_CAN_ECSR_FEF) {
+                       netdev_dbg(priv->ndev, "Form Error\n");
+                       rx_errors++;
+                       writeb(~RCAR_CAN_ECSR_FEF, &priv->regs->ecsr);
+                       if (skb)
+                               cf->data[2] |= CAN_ERR_PROT_FORM;
+               }
+               if (ecsr & RCAR_CAN_ECSR_SEF) {
+                       netdev_dbg(priv->ndev, "Stuff Error\n");
+                       rx_errors++;
+                       writeb(~RCAR_CAN_ECSR_SEF, &priv->regs->ecsr);
+                       if (skb)
+                               cf->data[2] |= CAN_ERR_PROT_STUFF;
+               }
+
+               priv->can.can_stats.bus_error++;
+               ndev->stats.rx_errors += rx_errors;
+               ndev->stats.tx_errors += tx_errors;
+               writeb(~RCAR_CAN_EIFR_BEIF, &priv->regs->eifr);
+       }
+       if (eifr & RCAR_CAN_EIFR_EWIF) {
+               netdev_dbg(priv->ndev, "Error warning interrupt\n");
+               priv->can.state = CAN_STATE_ERROR_WARNING;
+               priv->can.can_stats.error_warning++;
+               /* Clear interrupt condition */
+               writeb(~RCAR_CAN_EIFR_EWIF, &priv->regs->eifr);
+               if (skb)
+                       cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_WARNING :
+                                             CAN_ERR_CRTL_RX_WARNING;
+       }
+       if (eifr & RCAR_CAN_EIFR_EPIF) {
+               netdev_dbg(priv->ndev, "Error passive interrupt\n");
+               priv->can.state = CAN_STATE_ERROR_PASSIVE;
+               priv->can.can_stats.error_passive++;
+               /* Clear interrupt condition */
+               writeb(~RCAR_CAN_EIFR_EPIF, &priv->regs->eifr);
+               if (skb)
+                       cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_PASSIVE :
+                                             CAN_ERR_CRTL_RX_PASSIVE;
+       }
+       if (eifr & RCAR_CAN_EIFR_BOEIF) {
+               netdev_dbg(priv->ndev, "Bus-off entry interrupt\n");
+               tx_failure_cleanup(ndev);
+               priv->ier = RCAR_CAN_IER_ERSIE;
+               writeb(priv->ier, &priv->regs->ier);
+               priv->can.state = CAN_STATE_BUS_OFF;
+               /* Clear interrupt condition */
+               writeb(~RCAR_CAN_EIFR_BOEIF, &priv->regs->eifr);
+               can_bus_off(ndev);
+               if (skb)
+                       cf->can_id |= CAN_ERR_BUSOFF;
+       }
+       if (eifr & RCAR_CAN_EIFR_ORIF) {
+               netdev_dbg(priv->ndev, "Receive overrun error interrupt\n");
+               ndev->stats.rx_over_errors++;
+               ndev->stats.rx_errors++;
+               writeb(~RCAR_CAN_EIFR_ORIF, &priv->regs->eifr);
+               if (skb) {
+                       cf->can_id |= CAN_ERR_CRTL;
+                       cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+               }
+       }
+       if (eifr & RCAR_CAN_EIFR_OLIF) {
+               netdev_dbg(priv->ndev,
+                          "Overload Frame Transmission error interrupt\n");
+               ndev->stats.rx_over_errors++;
+               ndev->stats.rx_errors++;
+               writeb(~RCAR_CAN_EIFR_OLIF, &priv->regs->eifr);
+               if (skb) {
+                       cf->can_id |= CAN_ERR_PROT;
+                       cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
+               }
+       }
+
+       if (skb) {
+               stats->rx_packets++;
+               stats->rx_bytes += cf->can_dlc;
+               netif_rx(skb);
+       }
+}
+
+static void rcar_can_tx_done(struct net_device *ndev)
+{
+       struct rcar_can_priv *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &ndev->stats;
+       u8 isr;
+
+       while (1) {
+               u8 unsent = readb(&priv->regs->tfcr);
+
+               unsent = (unsent & RCAR_CAN_TFCR_TFUST) >>
+                         RCAR_CAN_TFCR_TFUST_SHIFT;
+               if (priv->tx_head - priv->tx_tail <= unsent)
+                       break;
+               stats->tx_packets++;
+               stats->tx_bytes += priv->tx_dlc[priv->tx_tail %
+                                               RCAR_CAN_FIFO_DEPTH];
+               priv->tx_dlc[priv->tx_tail % RCAR_CAN_FIFO_DEPTH] = 0;
+               can_get_echo_skb(ndev, priv->tx_tail % RCAR_CAN_FIFO_DEPTH);
+               priv->tx_tail++;
+               netif_wake_queue(ndev);
+       }
+       /* Clear interrupt */
+       isr = readb(&priv->regs->isr);
+       writeb(isr & ~RCAR_CAN_ISR_TXFF, &priv->regs->isr);
+       can_led_event(ndev, CAN_LED_EVENT_TX);
+}
+
+static irqreturn_t rcar_can_interrupt(int irq, void *dev_id)
+{
+       struct net_device *ndev = dev_id;
+       struct rcar_can_priv *priv = netdev_priv(ndev);
+       u8 isr;
+
+       isr = readb(&priv->regs->isr);
+       if (!(isr & priv->ier))
+               return IRQ_NONE;
+
+       if (isr & RCAR_CAN_ISR_ERSF)
+               rcar_can_error(ndev);
+
+       if (isr & RCAR_CAN_ISR_TXFF)
+               rcar_can_tx_done(ndev);
+
+       if (isr & RCAR_CAN_ISR_RXFF) {
+               if (napi_schedule_prep(&priv->napi)) {
+                       /* Disable Rx FIFO interrupts */
+                       priv->ier &= ~RCAR_CAN_IER_RXFIE;
+                       writeb(priv->ier, &priv->regs->ier);
+                       __napi_schedule(&priv->napi);
+               }
+       }
+
+       return IRQ_HANDLED;
+}
+
+static void rcar_can_set_bittiming(struct net_device *dev)
+{
+       struct rcar_can_priv *priv = netdev_priv(dev);
+       struct can_bittiming *bt = &priv->can.bittiming;
+       u32 bcr;
+
+       bcr = RCAR_CAN_BCR_TSEG1(bt->phase_seg1 + bt->prop_seg - 1) |
+             RCAR_CAN_BCR_BPR(bt->brp - 1) | RCAR_CAN_BCR_SJW(bt->sjw - 1) |
+             RCAR_CAN_BCR_TSEG2(bt->phase_seg2 - 1);
+       /* Don't overwrite CLKR with 32-bit BCR access; CLKR has 8-bit access.
+        * All the registers are big-endian but they get byte-swapped on 32-bit
+        * read/write (but not on 8-bit, contrary to the manuals)...
+        */
+       writel((bcr << 8) | priv->clock_select, &priv->regs->bcr);
+}
+
+static void rcar_can_start(struct net_device *ndev)
+{
+       struct rcar_can_priv *priv = netdev_priv(ndev);
+       u16 ctlr;
+       int i;
+
+       /* Set controller to known mode:
+        * - FIFO mailbox mode
+        * - accept all messages
+        * - overrun mode
+        * CAN is in sleep mode after MCU hardware or software reset.
+        */
+       ctlr = readw(&priv->regs->ctlr);
+       ctlr &= ~RCAR_CAN_CTLR_SLPM;
+       writew(ctlr, &priv->regs->ctlr);
+       /* Go to reset mode */
+       ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET;
+       writew(ctlr, &priv->regs->ctlr);
+       for (i = 0; i < MAX_STR_READS; i++) {
+               if (readw(&priv->regs->str) & RCAR_CAN_STR_RSTST)
+                       break;
+       }
+       rcar_can_set_bittiming(ndev);
+       ctlr |= RCAR_CAN_CTLR_IDFM_MIXED; /* Select mixed ID mode */
+       ctlr |= RCAR_CAN_CTLR_BOM_ENT;  /* Entry to halt mode automatically */
+                                       /* at bus-off */
+       ctlr |= RCAR_CAN_CTLR_MBM;      /* Select FIFO mailbox mode */
+       ctlr |= RCAR_CAN_CTLR_MLM;      /* Overrun mode */
+       writew(ctlr, &priv->regs->ctlr);
+
+       /* Accept all SID and EID */
+       writel(0, &priv->regs->mkr_2_9[6]);
+       writel(0, &priv->regs->mkr_2_9[7]);
+       /* In FIFO mailbox mode, write "0" to bits 24 to 31 */
+       writel(0, &priv->regs->mkivlr1);
+       /* Accept all frames */
+       writel(0, &priv->regs->fidcr[0]);
+       writel(RCAR_CAN_FIDCR_IDE | RCAR_CAN_FIDCR_RTR, &priv->regs->fidcr[1]);
+       /* Enable and configure FIFO mailbox interrupts */
+       writel(RCAR_CAN_MIER1_RXFIE | RCAR_CAN_MIER1_TXFIE, &priv->regs->mier1);
+
+       priv->ier = RCAR_CAN_IER_ERSIE | RCAR_CAN_IER_RXFIE |
+                   RCAR_CAN_IER_TXFIE;
+       writeb(priv->ier, &priv->regs->ier);
+
+       /* Accumulate error codes */
+       writeb(RCAR_CAN_ECSR_EDPM, &priv->regs->ecsr);
+       /* Enable error interrupts */
+       writeb(RCAR_CAN_EIER_EWIE | RCAR_CAN_EIER_EPIE | RCAR_CAN_EIER_BOEIE |
+              (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING ?
+              RCAR_CAN_EIER_BEIE : 0) | RCAR_CAN_EIER_ORIE |
+              RCAR_CAN_EIER_OLIE, &priv->regs->eier);
+       priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+       /* Go to operation mode */
+       writew(ctlr & ~RCAR_CAN_CTLR_CANM, &priv->regs->ctlr);
+       for (i = 0; i < MAX_STR_READS; i++) {
+               if (!(readw(&priv->regs->str) & RCAR_CAN_STR_RSTST))
+                       break;
+       }
+       /* Enable Rx and Tx FIFO */
+       writeb(RCAR_CAN_RFCR_RFE, &priv->regs->rfcr);
+       writeb(RCAR_CAN_TFCR_TFE, &priv->regs->tfcr);
+}
+
+static int rcar_can_open(struct net_device *ndev)
+{
+       struct rcar_can_priv *priv = netdev_priv(ndev);
+       int err;
+
+       err = clk_prepare_enable(priv->clk);
+       if (err) {
+               netdev_err(ndev, "clk_prepare_enable() failed, error %d\n",
+                          err);
+               goto out;
+       }
+       err = open_candev(ndev);
+       if (err) {
+               netdev_err(ndev, "open_candev() failed, error %d\n", err);
+               goto out_clock;
+       }
+       napi_enable(&priv->napi);
+       err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev);
+       if (err) {
+               netdev_err(ndev, "error requesting interrupt %x\n", ndev->irq);
+               goto out_close;
+       }
+       can_led_event(ndev, CAN_LED_EVENT_OPEN);
+       rcar_can_start(ndev);
+       netif_start_queue(ndev);
+       return 0;
+out_close:
+       napi_disable(&priv->napi);
+       close_candev(ndev);
+out_clock:
+       clk_disable_unprepare(priv->clk);
+out:
+       return err;
+}
+
+static void rcar_can_stop(struct net_device *ndev)
+{
+       struct rcar_can_priv *priv = netdev_priv(ndev);
+       u16 ctlr;
+       int i;
+
+       /* Go to (force) reset mode */
+       ctlr = readw(&priv->regs->ctlr);
+       ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET;
+       writew(ctlr, &priv->regs->ctlr);
+       for (i = 0; i < MAX_STR_READS; i++) {
+               if (readw(&priv->regs->str) & RCAR_CAN_STR_RSTST)
+                       break;
+       }
+       writel(0, &priv->regs->mier0);
+       writel(0, &priv->regs->mier1);
+       writeb(0, &priv->regs->ier);
+       writeb(0, &priv->regs->eier);
+       /* Go to sleep mode */
+       ctlr |= RCAR_CAN_CTLR_SLPM;
+       writew(ctlr, &priv->regs->ctlr);
+       priv->can.state = CAN_STATE_STOPPED;
+}
+
+static int rcar_can_close(struct net_device *ndev)
+{
+       struct rcar_can_priv *priv = netdev_priv(ndev);
+
+       netif_stop_queue(ndev);
+       rcar_can_stop(ndev);
+       free_irq(ndev->irq, ndev);
+       napi_disable(&priv->napi);
+       clk_disable_unprepare(priv->clk);
+       close_candev(ndev);
+       can_led_event(ndev, CAN_LED_EVENT_STOP);
+       return 0;
+}
+
+static netdev_tx_t rcar_can_start_xmit(struct sk_buff *skb,
+                                      struct net_device *ndev)
+{
+       struct rcar_can_priv *priv = netdev_priv(ndev);
+       struct can_frame *cf = (struct can_frame *)skb->data;
+       u32 data, i;
+
+       if (can_dropped_invalid_skb(ndev, skb))
+               return NETDEV_TX_OK;
+
+       if (cf->can_id & CAN_EFF_FLAG)  /* Extended frame format */
+               data = (cf->can_id & CAN_EFF_MASK) | RCAR_CAN_IDE;
+       else                            /* Standard frame format */
+               data = (cf->can_id & CAN_SFF_MASK) << RCAR_CAN_SID_SHIFT;
+
+       if (cf->can_id & CAN_RTR_FLAG) { /* Remote transmission request */
+               data |= RCAR_CAN_RTR;
+       } else {
+               for (i = 0; i < cf->can_dlc; i++)
+                       writeb(cf->data[i],
+                              &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].data[i]);
+       }
+
+       writel(data, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].id);
+
+       writeb(cf->can_dlc, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].dlc);
+
+       priv->tx_dlc[priv->tx_head % RCAR_CAN_FIFO_DEPTH] = cf->can_dlc;
+       can_put_echo_skb(skb, ndev, priv->tx_head % RCAR_CAN_FIFO_DEPTH);
+       priv->tx_head++;
+       /* Start Tx: write 0xff to the TFPCR register to increment
+        * the CPU-side pointer for the transmit FIFO to the next
+        * mailbox location
+        */
+       writeb(0xff, &priv->regs->tfpcr);
+       /* Stop the queue if we've filled all FIFO entries */
+       if (priv->tx_head - priv->tx_tail >= RCAR_CAN_FIFO_DEPTH)
+               netif_stop_queue(ndev);
+
+       return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops rcar_can_netdev_ops = {
+       .ndo_open = rcar_can_open,
+       .ndo_stop = rcar_can_close,
+       .ndo_start_xmit = rcar_can_start_xmit,
+};
+
+static void rcar_can_rx_pkt(struct rcar_can_priv *priv)
+{
+       struct net_device_stats *stats = &priv->ndev->stats;
+       struct can_frame *cf;
+       struct sk_buff *skb;
+       u32 data;
+       u8 dlc;
+
+       skb = alloc_can_skb(priv->ndev, &cf);
+       if (!skb) {
+               stats->rx_dropped++;
+               return;
+       }
+
+       data = readl(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].id);
+       if (data & RCAR_CAN_IDE)
+               cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
+       else
+               cf->can_id = (data >> RCAR_CAN_SID_SHIFT) & CAN_SFF_MASK;
+
+       dlc = readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].dlc);
+       cf->can_dlc = get_can_dlc(dlc);
+       if (data & RCAR_CAN_RTR) {
+               cf->can_id |= CAN_RTR_FLAG;
+       } else {
+               for (dlc = 0; dlc < cf->can_dlc; dlc++)
+                       cf->data[dlc] =
+                       readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].data[dlc]);
+       }
+
+       can_led_event(priv->ndev, CAN_LED_EVENT_RX);
+
+       stats->rx_bytes += cf->can_dlc;
+       stats->rx_packets++;
+       netif_receive_skb(skb);
+}
+
+static int rcar_can_rx_poll(struct napi_struct *napi, int quota)
+{
+       struct rcar_can_priv *priv = container_of(napi,
+                                                 struct rcar_can_priv, napi);
+       int num_pkts;
+
+       for (num_pkts = 0; num_pkts < quota; num_pkts++) {
+               u8 rfcr, isr;
+
+               isr = readb(&priv->regs->isr);
+               /* Clear interrupt bit */
+               if (isr & RCAR_CAN_ISR_RXFF)
+                       writeb(isr & ~RCAR_CAN_ISR_RXFF, &priv->regs->isr);
+               rfcr = readb(&priv->regs->rfcr);
+               if (rfcr & RCAR_CAN_RFCR_RFEST)
+                       break;
+               rcar_can_rx_pkt(priv);
+               /* Write 0xff to the RFPCR register to increment
+                * the CPU-side pointer for the receive FIFO
+                * to the next mailbox location
+                */
+               writeb(0xff, &priv->regs->rfpcr);
+       }
+       /* All packets processed */
+       if (num_pkts < quota) {
+               napi_complete(napi);
+               priv->ier |= RCAR_CAN_IER_RXFIE;
+               writeb(priv->ier, &priv->regs->ier);
+       }
+       return num_pkts;
+}
+
+static int rcar_can_do_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+       switch (mode) {
+       case CAN_MODE_START:
+               rcar_can_start(ndev);
+               netif_wake_queue(ndev);
+               return 0;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int rcar_can_get_berr_counter(const struct net_device *dev,
+                                    struct can_berr_counter *bec)
+{
+       struct rcar_can_priv *priv = netdev_priv(dev);
+       int err;
+
+       err = clk_prepare_enable(priv->clk);
+       if (err)
+               return err;
+       bec->txerr = readb(&priv->regs->tecr);
+       bec->rxerr = readb(&priv->regs->recr);
+       clk_disable_unprepare(priv->clk);
+       return 0;
+}
+
+static int rcar_can_probe(struct platform_device *pdev)
+{
+       struct rcar_can_platform_data *pdata;
+       struct rcar_can_priv *priv;
+       struct net_device *ndev;
+       struct resource *mem;
+       void __iomem *addr;
+       int err = -ENODEV;
+       int irq;
+
+       pdata = dev_get_platdata(&pdev->dev);
+       if (!pdata) {
+               dev_err(&pdev->dev, "No platform data provided!\n");
+               goto fail;
+       }
+
+       irq = platform_get_irq(pdev, 0);
+       if (!irq) {
+               dev_err(&pdev->dev, "No IRQ resource\n");
+               goto fail;
+       }
+
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       addr = devm_ioremap_resource(&pdev->dev, mem);
+       if (IS_ERR(addr)) {
+               err = PTR_ERR(addr);
+               goto fail;
+       }
+
+       ndev = alloc_candev(sizeof(struct rcar_can_priv), RCAR_CAN_FIFO_DEPTH);
+       if (!ndev) {
+               dev_err(&pdev->dev, "alloc_candev() failed\n");
+               err = -ENOMEM;
+               goto fail;
+       }
+
+       priv = netdev_priv(ndev);
+
+       priv->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(priv->clk)) {
+               err = PTR_ERR(priv->clk);
+               dev_err(&pdev->dev, "cannot get clock: %d\n", err);
+               goto fail_clk;
+       }
+
+       ndev->netdev_ops = &rcar_can_netdev_ops;
+       ndev->irq = irq;
+       ndev->flags |= IFF_ECHO;
+       priv->ndev = ndev;
+       priv->regs = addr;
+       priv->clock_select = pdata->clock_select;
+       priv->can.clock.freq = clk_get_rate(priv->clk);
+       priv->can.bittiming_const = &rcar_can_bittiming_const;
+       priv->can.do_set_mode = rcar_can_do_set_mode;
+       priv->can.do_get_berr_counter = rcar_can_get_berr_counter;
+       priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING;
+       platform_set_drvdata(pdev, ndev);
+       SET_NETDEV_DEV(ndev, &pdev->dev);
+
+       netif_napi_add(ndev, &priv->napi, rcar_can_rx_poll,
+                      RCAR_CAN_NAPI_WEIGHT);
+       err = register_candev(ndev);
+       if (err) {
+               dev_err(&pdev->dev, "register_candev() failed, error %d\n",
+                       err);
+               goto fail_candev;
+       }
+
+       devm_can_led_init(ndev);
+
+       dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n",
+                priv->regs, ndev->irq);
+
+       return 0;
+fail_candev:
+       netif_napi_del(&priv->napi);
+fail_clk:
+       free_candev(ndev);
+fail:
+       return err;
+}
+
+static int rcar_can_remove(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct rcar_can_priv *priv = netdev_priv(ndev);
+
+       unregister_candev(ndev);
+       netif_napi_del(&priv->napi);
+       free_candev(ndev);
+       return 0;
+}
+
+static int __maybe_unused rcar_can_suspend(struct device *dev)
+{
+       struct net_device *ndev = dev_get_drvdata(dev);
+       struct rcar_can_priv *priv = netdev_priv(ndev);
+       u16 ctlr;
+
+       if (netif_running(ndev)) {
+               netif_stop_queue(ndev);
+               netif_device_detach(ndev);
+       }
+       ctlr = readw(&priv->regs->ctlr);
+       ctlr |= RCAR_CAN_CTLR_CANM_HALT;
+       writew(ctlr, &priv->regs->ctlr);
+       ctlr |= RCAR_CAN_CTLR_SLPM;
+       writew(ctlr, &priv->regs->ctlr);
+       priv->can.state = CAN_STATE_SLEEPING;
+
+       clk_disable(priv->clk);
+       return 0;
+}
+
+static int __maybe_unused rcar_can_resume(struct device *dev)
+{
+       struct net_device *ndev = dev_get_drvdata(dev);
+       struct rcar_can_priv *priv = netdev_priv(ndev);
+       u16 ctlr;
+       int err;
+
+       err = clk_enable(priv->clk);
+       if (err) {
+               netdev_err(ndev, "clk_enable() failed, error %d\n", err);
+               return err;
+       }
+
+       ctlr = readw(&priv->regs->ctlr);
+       ctlr &= ~RCAR_CAN_CTLR_SLPM;
+       writew(ctlr, &priv->regs->ctlr);
+       ctlr &= ~RCAR_CAN_CTLR_CANM;
+       writew(ctlr, &priv->regs->ctlr);
+       priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+       if (netif_running(ndev)) {
+               netif_device_attach(ndev);
+               netif_start_queue(ndev);
+       }
+       return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(rcar_can_pm_ops, rcar_can_suspend, rcar_can_resume);
+
+static struct platform_driver rcar_can_driver = {
+       .driver = {
+               .name = RCAR_CAN_DRV_NAME,
+               .owner = THIS_MODULE,
+               .pm = &rcar_can_pm_ops,
+       },
+       .probe = rcar_can_probe,
+       .remove = rcar_can_remove,
+};
+
+module_platform_driver(rcar_can_driver);
+
+MODULE_AUTHOR("Cogent Embedded, Inc.");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CAN driver for Renesas R-Car SoC");
+MODULE_ALIAS("platform:" RCAR_CAN_DRV_NAME);
index c540e3d12e3d826260dbb590e8045c92fa4efb2b..564933ae218c78848dfba1e166f219e9de994e79 100644 (file)
@@ -551,7 +551,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        struct sja1000_priv *priv;
        struct peak_pci_chan *chan;
-       struct net_device *dev;
+       struct net_device *dev, *prev_dev;
        void __iomem *cfg_base, *reg_base;
        u16 sub_sys_id, icr;
        int i, err, channels;
@@ -688,11 +688,13 @@ failure_remove_channels:
        writew(0x0, cfg_base + PITA_ICR + 2);
 
        chan = NULL;
-       for (dev = pci_get_drvdata(pdev); dev; dev = chan->prev_dev) {
-               unregister_sja1000dev(dev);
-               free_sja1000dev(dev);
+       for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) {
                priv = netdev_priv(dev);
                chan = priv->priv;
+               prev_dev = chan->prev_dev;
+
+               unregister_sja1000dev(dev);
+               free_sja1000dev(dev);
        }
 
        /* free any PCIeC resources too */
@@ -726,10 +728,12 @@ static void peak_pci_remove(struct pci_dev *pdev)
 
        /* Loop over all registered devices */
        while (1) {
+               struct net_device *prev_dev = chan->prev_dev;
+
                dev_info(&pdev->dev, "removing device %s\n", dev->name);
                unregister_sja1000dev(dev);
                free_sja1000dev(dev);
-               dev = chan->prev_dev;
+               dev = prev_dev;
 
                if (!dev) {
                        /* do that only for first channel */
index df136a2516c401a5d96aba3d7c0da2e9f511d1a8..014695d7e6a342c49e9c86ed66f59ba5cddc0335 100644 (file)
@@ -46,6 +46,7 @@ static int clk[MAXDEV];
 static unsigned char cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
 static unsigned char ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
 static int indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
+static spinlock_t indirect_lock[MAXDEV];  /* lock for indirect access mode */
 
 module_param_array(port, ulong, NULL, S_IRUGO);
 MODULE_PARM_DESC(port, "I/O port number");
@@ -101,19 +102,26 @@ static void sja1000_isa_port_write_reg(const struct sja1000_priv *priv,
 static u8 sja1000_isa_port_read_reg_indirect(const struct sja1000_priv *priv,
                                             int reg)
 {
-       unsigned long base = (unsigned long)priv->reg_base;
+       unsigned long flags, base = (unsigned long)priv->reg_base;
+       u8 readval;
 
+       spin_lock_irqsave(&indirect_lock[priv->dev->dev_id], flags);
        outb(reg, base);
-       return inb(base + 1);
+       readval = inb(base + 1);
+       spin_unlock_irqrestore(&indirect_lock[priv->dev->dev_id], flags);
+
+       return readval;
 }
 
 static void sja1000_isa_port_write_reg_indirect(const struct sja1000_priv *priv,
                                                int reg, u8 val)
 {
-       unsigned long base = (unsigned long)priv->reg_base;
+       unsigned long flags, base = (unsigned long)priv->reg_base;
 
+       spin_lock_irqsave(&indirect_lock[priv->dev->dev_id], flags);
        outb(reg, base);
        outb(val, base + 1);
+       spin_unlock_irqrestore(&indirect_lock[priv->dev->dev_id], flags);
 }
 
 static int sja1000_isa_probe(struct platform_device *pdev)
@@ -169,6 +177,7 @@ static int sja1000_isa_probe(struct platform_device *pdev)
                if (iosize == SJA1000_IOSIZE_INDIRECT) {
                        priv->read_reg = sja1000_isa_port_read_reg_indirect;
                        priv->write_reg = sja1000_isa_port_write_reg_indirect;
+                       spin_lock_init(&indirect_lock[idx]);
                } else {
                        priv->read_reg = sja1000_isa_port_read_reg;
                        priv->write_reg = sja1000_isa_port_write_reg;
@@ -198,6 +207,7 @@ static int sja1000_isa_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, dev);
        SET_NETDEV_DEV(dev, &pdev->dev);
+       dev->dev_id = idx;
 
        err = register_sja1000dev(dev);
        if (err) {
index f5b16e0e3a125f4e38a93408e92b8218808339af..dcf9196f63164b0db099e17a8d1208d9ac158ac4 100644 (file)
@@ -322,13 +322,13 @@ static void slcan_write_wakeup(struct tty_struct *tty)
        if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
                return;
 
-       spin_lock(&sl->lock);
+       spin_lock_bh(&sl->lock);
        if (sl->xleft <= 0)  {
                /* Now serial buffer is almost free & we can start
                 * transmission of another packet */
                sl->dev->stats.tx_packets++;
                clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
-               spin_unlock(&sl->lock);
+               spin_unlock_bh(&sl->lock);
                netif_wake_queue(sl->dev);
                return;
        }
@@ -336,7 +336,7 @@ static void slcan_write_wakeup(struct tty_struct *tty)
        actual = tty->ops->write(tty, sl->xhead, sl->xleft);
        sl->xleft -= actual;
        sl->xhead += actual;
-       spin_unlock(&sl->lock);
+       spin_unlock_bh(&sl->lock);
 }
 
 /* Send a can_frame to a TTY queue. */
index 7d8c8f3672dd993119a28f478e5cc43514aa131c..bacd236ce3064d357271ff67e3ccacbfedddd953 100644 (file)
@@ -556,15 +556,6 @@ failed:
 /*
  * netdev sysfs
  */
-static ssize_t show_channel(struct device *dev, struct device_attribute *attr,
-               char *buf)
-{
-       struct net_device *ndev = to_net_dev(dev);
-       struct softing_priv *priv = netdev2softing(ndev);
-
-       return sprintf(buf, "%i\n", priv->index);
-}
-
 static ssize_t show_chip(struct device *dev, struct device_attribute *attr,
                char *buf)
 {
@@ -609,12 +600,10 @@ static ssize_t store_output(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static const DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL);
 static const DEVICE_ATTR(chip, S_IRUGO, show_chip, NULL);
 static const DEVICE_ATTR(output, S_IRUGO | S_IWUSR, show_output, store_output);
 
 static const struct attribute *const netdev_sysfs_attrs[] = {
-       &dev_attr_channel.attr,
        &dev_attr_chip.attr,
        &dev_attr_output.attr,
        NULL,
@@ -679,17 +668,20 @@ static int softing_netdev_register(struct net_device *netdev)
 {
        int ret;
 
-       netdev->sysfs_groups[0] = &netdev_sysfs_group;
        ret = register_candev(netdev);
        if (ret) {
                dev_alert(&netdev->dev, "register failed\n");
                return ret;
        }
+       if (sysfs_create_group(&netdev->dev.kobj, &netdev_sysfs_group) < 0)
+               netdev_alert(netdev, "sysfs group failed\n");
+
        return 0;
 }
 
 static void softing_netdev_cleanup(struct net_device *netdev)
 {
+       sysfs_remove_group(&netdev->dev.kobj, &netdev_sysfs_group);
        unregister_candev(netdev);
        free_candev(netdev);
 }
@@ -721,8 +713,6 @@ DEV_ATTR_RO(firmware_version, id.fw_version);
 DEV_ATTR_RO_STR(hardware, pdat->name);
 DEV_ATTR_RO(hardware_version, id.hw_version);
 DEV_ATTR_RO(license, id.license);
-DEV_ATTR_RO(frequency, id.freq);
-DEV_ATTR_RO(txpending, tx.pending);
 
 static struct attribute *softing_pdev_attrs[] = {
        &dev_attr_serial.attr,
@@ -731,8 +721,6 @@ static struct attribute *softing_pdev_attrs[] = {
        &dev_attr_hardware.attr,
        &dev_attr_hardware_version.attr,
        &dev_attr_license.attr,
-       &dev_attr_frequency.attr,
-       &dev_attr_txpending.attr,
        NULL,
 };
 
diff --git a/drivers/net/can/spi/Kconfig b/drivers/net/can/spi/Kconfig
new file mode 100644 (file)
index 0000000..148cae5
--- /dev/null
@@ -0,0 +1,10 @@
+menu "CAN SPI interfaces"
+       depends on SPI
+
+config CAN_MCP251X
+       tristate "Microchip MCP251x SPI CAN controllers"
+       depends on HAS_DMA
+       ---help---
+         Driver for the Microchip MCP251x SPI CAN controllers.
+
+endmenu
diff --git a/drivers/net/can/spi/Makefile b/drivers/net/can/spi/Makefile
new file mode 100644 (file)
index 0000000..90bcacf
--- /dev/null
@@ -0,0 +1,8 @@
+#
+#  Makefile for the Linux Controller Area Network SPI drivers.
+#
+
+
+obj-$(CONFIG_CAN_MCP251X)      += mcp251x.o
+
+ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
new file mode 100644 (file)
index 0000000..5df239e
--- /dev/null
@@ -0,0 +1,1266 @@
+/*
+ * CAN bus driver for Microchip 251x CAN Controller with SPI Interface
+ *
+ * MCP2510 support and bug fixes by Christian Pellegrin
+ * <chripell@evolware.org>
+ *
+ * Copyright 2009 Christian Pellegrin EVOL S.r.l.
+ *
+ * Copyright 2007 Raymarine UK, Ltd. All Rights Reserved.
+ * Written under contract by:
+ *   Chris Elston, Katalix Systems, Ltd.
+ *
+ * Based on Microchip MCP251x CAN controller driver written by
+ * David Vrabel, Copyright 2006 Arcom Control Systems Ltd.
+ *
+ * Based on CAN bus driver for the CCAN controller written by
+ * - Sascha Hauer, Marc Kleine-Budde, Pengutronix
+ * - Simon Kallweit, intefo AG
+ * Copyright 2007
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ *
+ * Your platform definition file should specify something like:
+ *
+ * static struct mcp251x_platform_data mcp251x_info = {
+ *         .oscillator_frequency = 8000000,
+ * };
+ *
+ * static struct spi_board_info spi_board_info[] = {
+ *         {
+ *                 .modalias = "mcp2510",
+ *                     // or "mcp2515" depending on your controller
+ *                 .platform_data = &mcp251x_info,
+ *                 .irq = IRQ_EINT13,
+ *                 .max_speed_hz = 2*1000*1000,
+ *                 .chip_select = 2,
+ *         },
+ * };
+ *
+ * Please see mcp251x.h for a description of the fields in
+ * struct mcp251x_platform_data.
+ *
+ */
+
+#include <linux/can/core.h>
+#include <linux/can/dev.h>
+#include <linux/can/led.h>
+#include <linux/can/platform/mcp251x.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/freezer.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/uaccess.h>
+#include <linux/regulator/consumer.h>
+
+/* SPI interface instruction set */
+#define INSTRUCTION_WRITE      0x02
+#define INSTRUCTION_READ       0x03
+#define INSTRUCTION_BIT_MODIFY 0x05
+#define INSTRUCTION_LOAD_TXB(n)        (0x40 + 2 * (n))
+#define INSTRUCTION_READ_RXB(n)        (((n) == 0) ? 0x90 : 0x94)
+#define INSTRUCTION_RESET      0xC0
+#define RTS_TXB0               0x01
+#define RTS_TXB1               0x02
+#define RTS_TXB2               0x04
+#define INSTRUCTION_RTS(n)     (0x80 | ((n) & 0x07))
+
+
+/* MPC251x registers */
+#define CANSTAT              0x0e
+#define CANCTRL              0x0f
+#  define CANCTRL_REQOP_MASK       0xe0
+#  define CANCTRL_REQOP_CONF       0x80
+#  define CANCTRL_REQOP_LISTEN_ONLY 0x60
+#  define CANCTRL_REQOP_LOOPBACK    0x40
+#  define CANCTRL_REQOP_SLEEP      0x20
+#  define CANCTRL_REQOP_NORMAL     0x00
+#  define CANCTRL_OSM              0x08
+#  define CANCTRL_ABAT             0x10
+#define TEC          0x1c
+#define REC          0x1d
+#define CNF1         0x2a
+#  define CNF1_SJW_SHIFT   6
+#define CNF2         0x29
+#  define CNF2_BTLMODE    0x80
+#  define CNF2_SAM         0x40
+#  define CNF2_PS1_SHIFT   3
+#define CNF3         0x28
+#  define CNF3_SOF        0x08
+#  define CNF3_WAKFIL     0x04
+#  define CNF3_PHSEG2_MASK 0x07
+#define CANINTE              0x2b
+#  define CANINTE_MERRE 0x80
+#  define CANINTE_WAKIE 0x40
+#  define CANINTE_ERRIE 0x20
+#  define CANINTE_TX2IE 0x10
+#  define CANINTE_TX1IE 0x08
+#  define CANINTE_TX0IE 0x04
+#  define CANINTE_RX1IE 0x02
+#  define CANINTE_RX0IE 0x01
+#define CANINTF              0x2c
+#  define CANINTF_MERRF 0x80
+#  define CANINTF_WAKIF 0x40
+#  define CANINTF_ERRIF 0x20
+#  define CANINTF_TX2IF 0x10
+#  define CANINTF_TX1IF 0x08
+#  define CANINTF_TX0IF 0x04
+#  define CANINTF_RX1IF 0x02
+#  define CANINTF_RX0IF 0x01
+#  define CANINTF_RX (CANINTF_RX0IF | CANINTF_RX1IF)
+#  define CANINTF_TX (CANINTF_TX2IF | CANINTF_TX1IF | CANINTF_TX0IF)
+#  define CANINTF_ERR (CANINTF_ERRIF)
+#define EFLG         0x2d
+#  define EFLG_EWARN   0x01
+#  define EFLG_RXWAR   0x02
+#  define EFLG_TXWAR   0x04
+#  define EFLG_RXEP    0x08
+#  define EFLG_TXEP    0x10
+#  define EFLG_TXBO    0x20
+#  define EFLG_RX0OVR  0x40
+#  define EFLG_RX1OVR  0x80
+#define TXBCTRL(n)  (((n) * 0x10) + 0x30 + TXBCTRL_OFF)
+#  define TXBCTRL_ABTF 0x40
+#  define TXBCTRL_MLOA 0x20
+#  define TXBCTRL_TXERR 0x10
+#  define TXBCTRL_TXREQ 0x08
+#define TXBSIDH(n)  (((n) * 0x10) + 0x30 + TXBSIDH_OFF)
+#  define SIDH_SHIFT    3
+#define TXBSIDL(n)  (((n) * 0x10) + 0x30 + TXBSIDL_OFF)
+#  define SIDL_SID_MASK    7
+#  define SIDL_SID_SHIFT   5
+#  define SIDL_EXIDE_SHIFT 3
+#  define SIDL_EID_SHIFT   16
+#  define SIDL_EID_MASK    3
+#define TXBEID8(n)  (((n) * 0x10) + 0x30 + TXBEID8_OFF)
+#define TXBEID0(n)  (((n) * 0x10) + 0x30 + TXBEID0_OFF)
+#define TXBDLC(n)   (((n) * 0x10) + 0x30 + TXBDLC_OFF)
+#  define DLC_RTR_SHIFT    6
+#define TXBCTRL_OFF 0
+#define TXBSIDH_OFF 1
+#define TXBSIDL_OFF 2
+#define TXBEID8_OFF 3
+#define TXBEID0_OFF 4
+#define TXBDLC_OFF  5
+#define TXBDAT_OFF  6
+#define RXBCTRL(n)  (((n) * 0x10) + 0x60 + RXBCTRL_OFF)
+#  define RXBCTRL_BUKT 0x04
+#  define RXBCTRL_RXM0 0x20
+#  define RXBCTRL_RXM1 0x40
+#define RXBSIDH(n)  (((n) * 0x10) + 0x60 + RXBSIDH_OFF)
+#  define RXBSIDH_SHIFT 3
+#define RXBSIDL(n)  (((n) * 0x10) + 0x60 + RXBSIDL_OFF)
+#  define RXBSIDL_IDE   0x08
+#  define RXBSIDL_SRR   0x10
+#  define RXBSIDL_EID   3
+#  define RXBSIDL_SHIFT 5
+#define RXBEID8(n)  (((n) * 0x10) + 0x60 + RXBEID8_OFF)
+#define RXBEID0(n)  (((n) * 0x10) + 0x60 + RXBEID0_OFF)
+#define RXBDLC(n)   (((n) * 0x10) + 0x60 + RXBDLC_OFF)
+#  define RXBDLC_LEN_MASK  0x0f
+#  define RXBDLC_RTR       0x40
+#define RXBCTRL_OFF 0
+#define RXBSIDH_OFF 1
+#define RXBSIDL_OFF 2
+#define RXBEID8_OFF 3
+#define RXBEID0_OFF 4
+#define RXBDLC_OFF  5
+#define RXBDAT_OFF  6
+#define RXFSIDH(n) ((n) * 4)
+#define RXFSIDL(n) ((n) * 4 + 1)
+#define RXFEID8(n) ((n) * 4 + 2)
+#define RXFEID0(n) ((n) * 4 + 3)
+#define RXMSIDH(n) ((n) * 4 + 0x20)
+#define RXMSIDL(n) ((n) * 4 + 0x21)
+#define RXMEID8(n) ((n) * 4 + 0x22)
+#define RXMEID0(n) ((n) * 4 + 0x23)
+
+#define GET_BYTE(val, byte)                    \
+       (((val) >> ((byte) * 8)) & 0xff)
+#define SET_BYTE(val, byte)                    \
+       (((val) & 0xff) << ((byte) * 8))
+
+/*
+ * Buffer size required for the largest SPI transfer (i.e., reading a
+ * frame)
+ */
+#define CAN_FRAME_MAX_DATA_LEN 8
+#define SPI_TRANSFER_BUF_LEN   (6 + CAN_FRAME_MAX_DATA_LEN)
+#define CAN_FRAME_MAX_BITS     128
+
+#define TX_ECHO_SKB_MAX        1
+
+#define MCP251X_OST_DELAY_MS   (5)
+
+#define DEVICE_NAME "mcp251x"
+
+static int mcp251x_enable_dma; /* Enable SPI DMA. Default: 0 (Off) */
+module_param(mcp251x_enable_dma, int, S_IRUGO);
+MODULE_PARM_DESC(mcp251x_enable_dma, "Enable SPI DMA. Default: 0 (Off)");
+
+static const struct can_bittiming_const mcp251x_bittiming_const = {
+       .name = DEVICE_NAME,
+       .tseg1_min = 3,
+       .tseg1_max = 16,
+       .tseg2_min = 2,
+       .tseg2_max = 8,
+       .sjw_max = 4,
+       .brp_min = 1,
+       .brp_max = 64,
+       .brp_inc = 1,
+};
+
+enum mcp251x_model {
+       CAN_MCP251X_MCP2510     = 0x2510,
+       CAN_MCP251X_MCP2515     = 0x2515,
+};
+
+struct mcp251x_priv {
+       struct can_priv    can;
+       struct net_device *net;
+       struct spi_device *spi;
+       enum mcp251x_model model;
+
+       struct mutex mcp_lock; /* SPI device lock */
+
+       u8 *spi_tx_buf;
+       u8 *spi_rx_buf;
+       dma_addr_t spi_tx_dma;
+       dma_addr_t spi_rx_dma;
+
+       struct sk_buff *tx_skb;
+       int tx_len;
+
+       struct workqueue_struct *wq;
+       struct work_struct tx_work;
+       struct work_struct restart_work;
+
+       int force_quit;
+       int after_suspend;
+#define AFTER_SUSPEND_UP 1
+#define AFTER_SUSPEND_DOWN 2
+#define AFTER_SUSPEND_POWER 4
+#define AFTER_SUSPEND_RESTART 8
+       int restart_tx;
+       struct regulator *power;
+       struct regulator *transceiver;
+       struct clk *clk;
+};
+
+#define MCP251X_IS(_model) \
+static inline int mcp251x_is_##_model(struct spi_device *spi) \
+{ \
+       struct mcp251x_priv *priv = spi_get_drvdata(spi); \
+       return priv->model == CAN_MCP251X_MCP##_model; \
+}
+
+MCP251X_IS(2510);
+MCP251X_IS(2515);
+
+static void mcp251x_clean(struct net_device *net)
+{
+       struct mcp251x_priv *priv = netdev_priv(net);
+
+       if (priv->tx_skb || priv->tx_len)
+               net->stats.tx_errors++;
+       if (priv->tx_skb)
+               dev_kfree_skb(priv->tx_skb);
+       if (priv->tx_len)
+               can_free_echo_skb(priv->net, 0);
+       priv->tx_skb = NULL;
+       priv->tx_len = 0;
+}
+
+/*
+ * Note about handling of error return of mcp251x_spi_trans: accessing
+ * registers via SPI is not really different conceptually than using
+ * normal I/O assembler instructions, although it's much more
+ * complicated from a practical POV. So it's not advisable to always
+ * check the return value of this function. Imagine that every
+ * read{b,l}, write{b,l} and friends would be bracketed in "if ( < 0)
+ * error();", it would be a great mess (well there are some situation
+ * when exception handling C++ like could be useful after all). So we
+ * just check that transfers are OK at the beginning of our
+ * conversation with the chip and to avoid doing really nasty things
+ * (like injecting bogus packets in the network stack).
+ */
+static int mcp251x_spi_trans(struct spi_device *spi, int len)
+{
+       struct mcp251x_priv *priv = spi_get_drvdata(spi);
+       struct spi_transfer t = {
+               .tx_buf = priv->spi_tx_buf,
+               .rx_buf = priv->spi_rx_buf,
+               .len = len,
+               .cs_change = 0,
+       };
+       struct spi_message m;
+       int ret;
+
+       spi_message_init(&m);
+
+       if (mcp251x_enable_dma) {
+               t.tx_dma = priv->spi_tx_dma;
+               t.rx_dma = priv->spi_rx_dma;
+               m.is_dma_mapped = 1;
+       }
+
+       spi_message_add_tail(&t, &m);
+
+       ret = spi_sync(spi, &m);
+       if (ret)
+               dev_err(&spi->dev, "spi transfer failed: ret = %d\n", ret);
+       return ret;
+}
+
+static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg)
+{
+       struct mcp251x_priv *priv = spi_get_drvdata(spi);
+       u8 val = 0;
+
+       priv->spi_tx_buf[0] = INSTRUCTION_READ;
+       priv->spi_tx_buf[1] = reg;
+
+       mcp251x_spi_trans(spi, 3);
+       val = priv->spi_rx_buf[2];
+
+       return val;
+}
+
+static void mcp251x_read_2regs(struct spi_device *spi, uint8_t reg,
+               uint8_t *v1, uint8_t *v2)
+{
+       struct mcp251x_priv *priv = spi_get_drvdata(spi);
+
+       priv->spi_tx_buf[0] = INSTRUCTION_READ;
+       priv->spi_tx_buf[1] = reg;
+
+       mcp251x_spi_trans(spi, 4);
+
+       *v1 = priv->spi_rx_buf[2];
+       *v2 = priv->spi_rx_buf[3];
+}
+
+static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val)
+{
+       struct mcp251x_priv *priv = spi_get_drvdata(spi);
+
+       priv->spi_tx_buf[0] = INSTRUCTION_WRITE;
+       priv->spi_tx_buf[1] = reg;
+       priv->spi_tx_buf[2] = val;
+
+       mcp251x_spi_trans(spi, 3);
+}
+
+static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
+                              u8 mask, uint8_t val)
+{
+       struct mcp251x_priv *priv = spi_get_drvdata(spi);
+
+       priv->spi_tx_buf[0] = INSTRUCTION_BIT_MODIFY;
+       priv->spi_tx_buf[1] = reg;
+       priv->spi_tx_buf[2] = mask;
+       priv->spi_tx_buf[3] = val;
+
+       mcp251x_spi_trans(spi, 4);
+}
+
+static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
+                               int len, int tx_buf_idx)
+{
+       struct mcp251x_priv *priv = spi_get_drvdata(spi);
+
+       if (mcp251x_is_2510(spi)) {
+               int i;
+
+               for (i = 1; i < TXBDAT_OFF + len; i++)
+                       mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx) + i,
+                                         buf[i]);
+       } else {
+               memcpy(priv->spi_tx_buf, buf, TXBDAT_OFF + len);
+               mcp251x_spi_trans(spi, TXBDAT_OFF + len);
+       }
+}
+
+static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
+                         int tx_buf_idx)
+{
+       struct mcp251x_priv *priv = spi_get_drvdata(spi);
+       u32 sid, eid, exide, rtr;
+       u8 buf[SPI_TRANSFER_BUF_LEN];
+
+       exide = (frame->can_id & CAN_EFF_FLAG) ? 1 : 0; /* Extended ID Enable */
+       if (exide)
+               sid = (frame->can_id & CAN_EFF_MASK) >> 18;
+       else
+               sid = frame->can_id & CAN_SFF_MASK; /* Standard ID */
+       eid = frame->can_id & CAN_EFF_MASK; /* Extended ID */
+       rtr = (frame->can_id & CAN_RTR_FLAG) ? 1 : 0; /* Remote transmission */
+
+       buf[TXBCTRL_OFF] = INSTRUCTION_LOAD_TXB(tx_buf_idx);
+       buf[TXBSIDH_OFF] = sid >> SIDH_SHIFT;
+       buf[TXBSIDL_OFF] = ((sid & SIDL_SID_MASK) << SIDL_SID_SHIFT) |
+               (exide << SIDL_EXIDE_SHIFT) |
+               ((eid >> SIDL_EID_SHIFT) & SIDL_EID_MASK);
+       buf[TXBEID8_OFF] = GET_BYTE(eid, 1);
+       buf[TXBEID0_OFF] = GET_BYTE(eid, 0);
+       buf[TXBDLC_OFF] = (rtr << DLC_RTR_SHIFT) | frame->can_dlc;
+       memcpy(buf + TXBDAT_OFF, frame->data, frame->can_dlc);
+       mcp251x_hw_tx_frame(spi, buf, frame->can_dlc, tx_buf_idx);
+
+       /* use INSTRUCTION_RTS, to avoid "repeated frame problem" */
+       priv->spi_tx_buf[0] = INSTRUCTION_RTS(1 << tx_buf_idx);
+       mcp251x_spi_trans(priv->spi, 1);
+}
+
+static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
+                               int buf_idx)
+{
+       struct mcp251x_priv *priv = spi_get_drvdata(spi);
+
+       if (mcp251x_is_2510(spi)) {
+               int i, len;
+
+               for (i = 1; i < RXBDAT_OFF; i++)
+                       buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
+
+               len = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
+               for (; i < (RXBDAT_OFF + len); i++)
+                       buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
+       } else {
+               priv->spi_tx_buf[RXBCTRL_OFF] = INSTRUCTION_READ_RXB(buf_idx);
+               mcp251x_spi_trans(spi, SPI_TRANSFER_BUF_LEN);
+               memcpy(buf, priv->spi_rx_buf, SPI_TRANSFER_BUF_LEN);
+       }
+}
+
+static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
+{
+       struct mcp251x_priv *priv = spi_get_drvdata(spi);
+       struct sk_buff *skb;
+       struct can_frame *frame;
+       u8 buf[SPI_TRANSFER_BUF_LEN];
+
+       skb = alloc_can_skb(priv->net, &frame);
+       if (!skb) {
+               dev_err(&spi->dev, "cannot allocate RX skb\n");
+               priv->net->stats.rx_dropped++;
+               return;
+       }
+
+       mcp251x_hw_rx_frame(spi, buf, buf_idx);
+       if (buf[RXBSIDL_OFF] & RXBSIDL_IDE) {
+               /* Extended ID format */
+               frame->can_id = CAN_EFF_FLAG;
+               frame->can_id |=
+                       /* Extended ID part */
+                       SET_BYTE(buf[RXBSIDL_OFF] & RXBSIDL_EID, 2) |
+                       SET_BYTE(buf[RXBEID8_OFF], 1) |
+                       SET_BYTE(buf[RXBEID0_OFF], 0) |
+                       /* Standard ID part */
+                       (((buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) |
+                         (buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT)) << 18);
+               /* Remote transmission request */
+               if (buf[RXBDLC_OFF] & RXBDLC_RTR)
+                       frame->can_id |= CAN_RTR_FLAG;
+       } else {
+               /* Standard ID format */
+               frame->can_id =
+                       (buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) |
+                       (buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT);
+               if (buf[RXBSIDL_OFF] & RXBSIDL_SRR)
+                       frame->can_id |= CAN_RTR_FLAG;
+       }
+       /* Data length */
+       frame->can_dlc = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
+       memcpy(frame->data, buf + RXBDAT_OFF, frame->can_dlc);
+
+       priv->net->stats.rx_packets++;
+       priv->net->stats.rx_bytes += frame->can_dlc;
+
+       can_led_event(priv->net, CAN_LED_EVENT_RX);
+
+       netif_rx_ni(skb);
+}
+
+static void mcp251x_hw_sleep(struct spi_device *spi)
+{
+       mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_SLEEP);
+}
+
+static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
+                                          struct net_device *net)
+{
+       struct mcp251x_priv *priv = netdev_priv(net);
+       struct spi_device *spi = priv->spi;
+
+       if (priv->tx_skb || priv->tx_len) {
+               dev_warn(&spi->dev, "hard_xmit called while tx busy\n");
+               return NETDEV_TX_BUSY;
+       }
+
+       if (can_dropped_invalid_skb(net, skb))
+               return NETDEV_TX_OK;
+
+       netif_stop_queue(net);
+       priv->tx_skb = skb;
+       queue_work(priv->wq, &priv->tx_work);
+
+       return NETDEV_TX_OK;
+}
+
+static int mcp251x_do_set_mode(struct net_device *net, enum can_mode mode)
+{
+       struct mcp251x_priv *priv = netdev_priv(net);
+
+       switch (mode) {
+       case CAN_MODE_START:
+               mcp251x_clean(net);
+               /* We have to delay work since SPI I/O may sleep */
+               priv->can.state = CAN_STATE_ERROR_ACTIVE;
+               priv->restart_tx = 1;
+               if (priv->can.restart_ms == 0)
+                       priv->after_suspend = AFTER_SUSPEND_RESTART;
+               queue_work(priv->wq, &priv->restart_work);
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+static int mcp251x_set_normal_mode(struct spi_device *spi)
+{
+       struct mcp251x_priv *priv = spi_get_drvdata(spi);
+       unsigned long timeout;
+
+       /* Enable interrupts */
+       mcp251x_write_reg(spi, CANINTE,
+                         CANINTE_ERRIE | CANINTE_TX2IE | CANINTE_TX1IE |
+                         CANINTE_TX0IE | CANINTE_RX1IE | CANINTE_RX0IE);
+
+       if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
+               /* Put device into loopback mode */
+               mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LOOPBACK);
+       } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
+               /* Put device into listen-only mode */
+               mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LISTEN_ONLY);
+       } else {
+               /* Put device into normal mode */
+               mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_NORMAL);
+
+               /* Wait for the device to enter normal mode */
+               timeout = jiffies + HZ;
+               while (mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) {
+                       schedule();
+                       if (time_after(jiffies, timeout)) {
+                               dev_err(&spi->dev, "MCP251x didn't"
+                                       " enter in normal mode\n");
+                               return -EBUSY;
+                       }
+               }
+       }
+       priv->can.state = CAN_STATE_ERROR_ACTIVE;
+       return 0;
+}
+
+static int mcp251x_do_set_bittiming(struct net_device *net)
+{
+       struct mcp251x_priv *priv = netdev_priv(net);
+       struct can_bittiming *bt = &priv->can.bittiming;
+       struct spi_device *spi = priv->spi;
+
+       mcp251x_write_reg(spi, CNF1, ((bt->sjw - 1) << CNF1_SJW_SHIFT) |
+                         (bt->brp - 1));
+       mcp251x_write_reg(spi, CNF2, CNF2_BTLMODE |
+                         (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES ?
+                          CNF2_SAM : 0) |
+                         ((bt->phase_seg1 - 1) << CNF2_PS1_SHIFT) |
+                         (bt->prop_seg - 1));
+       mcp251x_write_bits(spi, CNF3, CNF3_PHSEG2_MASK,
+                          (bt->phase_seg2 - 1));
+       dev_dbg(&spi->dev, "CNF: 0x%02x 0x%02x 0x%02x\n",
+               mcp251x_read_reg(spi, CNF1),
+               mcp251x_read_reg(spi, CNF2),
+               mcp251x_read_reg(spi, CNF3));
+
+       return 0;
+}
+
+static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv,
+                        struct spi_device *spi)
+{
+       mcp251x_do_set_bittiming(net);
+
+       mcp251x_write_reg(spi, RXBCTRL(0),
+                         RXBCTRL_BUKT | RXBCTRL_RXM0 | RXBCTRL_RXM1);
+       mcp251x_write_reg(spi, RXBCTRL(1),
+                         RXBCTRL_RXM0 | RXBCTRL_RXM1);
+       return 0;
+}
+
+static int mcp251x_hw_reset(struct spi_device *spi)
+{
+       struct mcp251x_priv *priv = spi_get_drvdata(spi);
+       u8 reg;
+       int ret;
+
+       /* Wait for oscillator startup timer after power up */
+       mdelay(MCP251X_OST_DELAY_MS);
+
+       priv->spi_tx_buf[0] = INSTRUCTION_RESET;
+       ret = mcp251x_spi_trans(spi, 1);
+       if (ret)
+               return ret;
+
+       /* Wait for oscillator startup timer after reset */
+       mdelay(MCP251X_OST_DELAY_MS);
+       
+       reg = mcp251x_read_reg(spi, CANSTAT);
+       if ((reg & CANCTRL_REQOP_MASK) != CANCTRL_REQOP_CONF)
+               return -ENODEV;
+
+       return 0;
+}
+
+static int mcp251x_hw_probe(struct spi_device *spi)
+{
+       u8 ctrl;
+       int ret;
+
+       ret = mcp251x_hw_reset(spi);
+       if (ret)
+               return ret;
+
+       ctrl = mcp251x_read_reg(spi, CANCTRL);
+
+       dev_dbg(&spi->dev, "CANCTRL 0x%02x\n", ctrl);
+
+       /* Check for power up default value */
+       if ((ctrl & 0x17) != 0x07)
+               return -ENODEV;
+
+       return 0;
+}
+
+static int mcp251x_power_enable(struct regulator *reg, int enable)
+{
+       if (IS_ERR_OR_NULL(reg))
+               return 0;
+
+       if (enable)
+               return regulator_enable(reg);
+       else
+               return regulator_disable(reg);
+}
+
+static void mcp251x_open_clean(struct net_device *net)
+{
+       struct mcp251x_priv *priv = netdev_priv(net);
+       struct spi_device *spi = priv->spi;
+
+       free_irq(spi->irq, priv);
+       mcp251x_hw_sleep(spi);
+       mcp251x_power_enable(priv->transceiver, 0);
+       close_candev(net);
+}
+
+static int mcp251x_stop(struct net_device *net)
+{
+       struct mcp251x_priv *priv = netdev_priv(net);
+       struct spi_device *spi = priv->spi;
+
+       close_candev(net);
+
+       priv->force_quit = 1;
+       free_irq(spi->irq, priv);
+       destroy_workqueue(priv->wq);
+       priv->wq = NULL;
+
+       mutex_lock(&priv->mcp_lock);
+
+       /* Disable and clear pending interrupts */
+       mcp251x_write_reg(spi, CANINTE, 0x00);
+       mcp251x_write_reg(spi, CANINTF, 0x00);
+
+       mcp251x_write_reg(spi, TXBCTRL(0), 0);
+       mcp251x_clean(net);
+
+       mcp251x_hw_sleep(spi);
+
+       mcp251x_power_enable(priv->transceiver, 0);
+
+       priv->can.state = CAN_STATE_STOPPED;
+
+       mutex_unlock(&priv->mcp_lock);
+
+       can_led_event(net, CAN_LED_EVENT_STOP);
+
+       return 0;
+}
+
+static void mcp251x_error_skb(struct net_device *net, int can_id, int data1)
+{
+       struct sk_buff *skb;
+       struct can_frame *frame;
+
+       skb = alloc_can_err_skb(net, &frame);
+       if (skb) {
+               frame->can_id |= can_id;
+               frame->data[1] = data1;
+               netif_rx_ni(skb);
+       } else {
+               netdev_err(net, "cannot allocate error skb\n");
+       }
+}
+
+static void mcp251x_tx_work_handler(struct work_struct *ws)
+{
+       struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv,
+                                                tx_work);
+       struct spi_device *spi = priv->spi;
+       struct net_device *net = priv->net;
+       struct can_frame *frame;
+
+       mutex_lock(&priv->mcp_lock);
+       if (priv->tx_skb) {
+               if (priv->can.state == CAN_STATE_BUS_OFF) {
+                       mcp251x_clean(net);
+               } else {
+                       frame = (struct can_frame *)priv->tx_skb->data;
+
+                       if (frame->can_dlc > CAN_FRAME_MAX_DATA_LEN)
+                               frame->can_dlc = CAN_FRAME_MAX_DATA_LEN;
+                       mcp251x_hw_tx(spi, frame, 0);
+                       priv->tx_len = 1 + frame->can_dlc;
+                       can_put_echo_skb(priv->tx_skb, net, 0);
+                       priv->tx_skb = NULL;
+               }
+       }
+       mutex_unlock(&priv->mcp_lock);
+}
+
+static void mcp251x_restart_work_handler(struct work_struct *ws)
+{
+       struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv,
+                                                restart_work);
+       struct spi_device *spi = priv->spi;
+       struct net_device *net = priv->net;
+
+       mutex_lock(&priv->mcp_lock);
+       if (priv->after_suspend) {
+               mcp251x_hw_reset(spi);
+               mcp251x_setup(net, priv, spi);
+               if (priv->after_suspend & AFTER_SUSPEND_RESTART) {
+                       mcp251x_set_normal_mode(spi);
+               } else if (priv->after_suspend & AFTER_SUSPEND_UP) {
+                       netif_device_attach(net);
+                       mcp251x_clean(net);
+                       mcp251x_set_normal_mode(spi);
+                       netif_wake_queue(net);
+               } else {
+                       mcp251x_hw_sleep(spi);
+               }
+               priv->after_suspend = 0;
+               priv->force_quit = 0;
+       }
+
+       if (priv->restart_tx) {
+               priv->restart_tx = 0;
+               mcp251x_write_reg(spi, TXBCTRL(0), 0);
+               mcp251x_clean(net);
+               netif_wake_queue(net);
+               mcp251x_error_skb(net, CAN_ERR_RESTARTED, 0);
+       }
+       mutex_unlock(&priv->mcp_lock);
+}
+
+static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
+{
+       struct mcp251x_priv *priv = dev_id;
+       struct spi_device *spi = priv->spi;
+       struct net_device *net = priv->net;
+
+       mutex_lock(&priv->mcp_lock);
+       while (!priv->force_quit) {
+               enum can_state new_state;
+               u8 intf, eflag;
+               u8 clear_intf = 0;
+               int can_id = 0, data1 = 0;
+
+               mcp251x_read_2regs(spi, CANINTF, &intf, &eflag);
+
+               /* mask out flags we don't care about */
+               intf &= CANINTF_RX | CANINTF_TX | CANINTF_ERR;
+
+               /* receive buffer 0 */
+               if (intf & CANINTF_RX0IF) {
+                       mcp251x_hw_rx(spi, 0);
+                       /*
+                        * Free one buffer ASAP
+                        * (The MCP2515 does this automatically.)
+                        */
+                       if (mcp251x_is_2510(spi))
+                               mcp251x_write_bits(spi, CANINTF, CANINTF_RX0IF, 0x00);
+               }
+
+               /* receive buffer 1 */
+               if (intf & CANINTF_RX1IF) {
+                       mcp251x_hw_rx(spi, 1);
+                       /* the MCP2515 does this automatically */
+                       if (mcp251x_is_2510(spi))
+                               clear_intf |= CANINTF_RX1IF;
+               }
+
+               /* any error or tx interrupt we need to clear? */
+               if (intf & (CANINTF_ERR | CANINTF_TX))
+                       clear_intf |= intf & (CANINTF_ERR | CANINTF_TX);
+               if (clear_intf)
+                       mcp251x_write_bits(spi, CANINTF, clear_intf, 0x00);
+
+               if (eflag)
+                       mcp251x_write_bits(spi, EFLG, eflag, 0x00);
+
+               /* Update can state */
+               if (eflag & EFLG_TXBO) {
+                       new_state = CAN_STATE_BUS_OFF;
+                       can_id |= CAN_ERR_BUSOFF;
+               } else if (eflag & EFLG_TXEP) {
+                       new_state = CAN_STATE_ERROR_PASSIVE;
+                       can_id |= CAN_ERR_CRTL;
+                       data1 |= CAN_ERR_CRTL_TX_PASSIVE;
+               } else if (eflag & EFLG_RXEP) {
+                       new_state = CAN_STATE_ERROR_PASSIVE;
+                       can_id |= CAN_ERR_CRTL;
+                       data1 |= CAN_ERR_CRTL_RX_PASSIVE;
+               } else if (eflag & EFLG_TXWAR) {
+                       new_state = CAN_STATE_ERROR_WARNING;
+                       can_id |= CAN_ERR_CRTL;
+                       data1 |= CAN_ERR_CRTL_TX_WARNING;
+               } else if (eflag & EFLG_RXWAR) {
+                       new_state = CAN_STATE_ERROR_WARNING;
+                       can_id |= CAN_ERR_CRTL;
+                       data1 |= CAN_ERR_CRTL_RX_WARNING;
+               } else {
+                       new_state = CAN_STATE_ERROR_ACTIVE;
+               }
+
+               /* Update can state statistics */
+               switch (priv->can.state) {
+               case CAN_STATE_ERROR_ACTIVE:
+                       if (new_state >= CAN_STATE_ERROR_WARNING &&
+                           new_state <= CAN_STATE_BUS_OFF)
+                               priv->can.can_stats.error_warning++;
+               case CAN_STATE_ERROR_WARNING:   /* fallthrough */
+                       if (new_state >= CAN_STATE_ERROR_PASSIVE &&
+                           new_state <= CAN_STATE_BUS_OFF)
+                               priv->can.can_stats.error_passive++;
+                       break;
+               default:
+                       break;
+               }
+               priv->can.state = new_state;
+
+               if (intf & CANINTF_ERRIF) {
+                       /* Handle overflow counters */
+                       if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR)) {
+                               if (eflag & EFLG_RX0OVR) {
+                                       net->stats.rx_over_errors++;
+                                       net->stats.rx_errors++;
+                               }
+                               if (eflag & EFLG_RX1OVR) {
+                                       net->stats.rx_over_errors++;
+                                       net->stats.rx_errors++;
+                               }
+                               can_id |= CAN_ERR_CRTL;
+                               data1 |= CAN_ERR_CRTL_RX_OVERFLOW;
+                       }
+                       mcp251x_error_skb(net, can_id, data1);
+               }
+
+               if (priv->can.state == CAN_STATE_BUS_OFF) {
+                       if (priv->can.restart_ms == 0) {
+                               priv->force_quit = 1;
+                               can_bus_off(net);
+                               mcp251x_hw_sleep(spi);
+                               break;
+                       }
+               }
+
+               if (intf == 0)
+                       break;
+
+               if (intf & CANINTF_TX) {
+                       net->stats.tx_packets++;
+                       net->stats.tx_bytes += priv->tx_len - 1;
+                       can_led_event(net, CAN_LED_EVENT_TX);
+                       if (priv->tx_len) {
+                               can_get_echo_skb(net, 0);
+                               priv->tx_len = 0;
+                       }
+                       netif_wake_queue(net);
+               }
+
+       }
+       mutex_unlock(&priv->mcp_lock);
+       return IRQ_HANDLED;
+}
+
+static int mcp251x_open(struct net_device *net)
+{
+       struct mcp251x_priv *priv = netdev_priv(net);
+       struct spi_device *spi = priv->spi;
+       unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_FALLING;
+       int ret;
+
+       ret = open_candev(net);
+       if (ret) {
+               dev_err(&spi->dev, "unable to set initial baudrate!\n");
+               return ret;
+       }
+
+       mutex_lock(&priv->mcp_lock);
+       mcp251x_power_enable(priv->transceiver, 1);
+
+       priv->force_quit = 0;
+       priv->tx_skb = NULL;
+       priv->tx_len = 0;
+
+       ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
+                                  flags | IRQF_ONESHOT, DEVICE_NAME, priv);
+       if (ret) {
+               dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
+               mcp251x_power_enable(priv->transceiver, 0);
+               close_candev(net);
+               goto open_unlock;
+       }
+
+       priv->wq = create_freezable_workqueue("mcp251x_wq");
+       INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
+       INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
+
+       ret = mcp251x_hw_reset(spi);
+       if (ret) {
+               mcp251x_open_clean(net);
+               goto open_unlock;
+       }
+       ret = mcp251x_setup(net, priv, spi);
+       if (ret) {
+               mcp251x_open_clean(net);
+               goto open_unlock;
+       }
+       ret = mcp251x_set_normal_mode(spi);
+       if (ret) {
+               mcp251x_open_clean(net);
+               goto open_unlock;
+       }
+
+       can_led_event(net, CAN_LED_EVENT_OPEN);
+
+       netif_wake_queue(net);
+
+open_unlock:
+       mutex_unlock(&priv->mcp_lock);
+       return ret;
+}
+
+static const struct net_device_ops mcp251x_netdev_ops = {
+       .ndo_open = mcp251x_open,
+       .ndo_stop = mcp251x_stop,
+       .ndo_start_xmit = mcp251x_hard_start_xmit,
+       .ndo_change_mtu = can_change_mtu,
+};
+
+static const struct of_device_id mcp251x_of_match[] = {
+       {
+               .compatible     = "microchip,mcp2510",
+               .data           = (void *)CAN_MCP251X_MCP2510,
+       },
+       {
+               .compatible     = "microchip,mcp2515",
+               .data           = (void *)CAN_MCP251X_MCP2515,
+       },
+       { }
+};
+MODULE_DEVICE_TABLE(of, mcp251x_of_match);
+
+static const struct spi_device_id mcp251x_id_table[] = {
+       {
+               .name           = "mcp2510",
+               .driver_data    = (kernel_ulong_t)CAN_MCP251X_MCP2510,
+       },
+       {
+               .name           = "mcp2515",
+               .driver_data    = (kernel_ulong_t)CAN_MCP251X_MCP2515,
+       },
+       { }
+};
+MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
+
+static int mcp251x_can_probe(struct spi_device *spi)
+{
+       const struct of_device_id *of_id = of_match_device(mcp251x_of_match,
+                                                          &spi->dev);
+       struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev);
+       struct net_device *net;
+       struct mcp251x_priv *priv;
+       struct clk *clk;
+       int freq, ret;
+
+       clk = devm_clk_get(&spi->dev, NULL);
+       if (IS_ERR(clk)) {
+               if (pdata)
+                       freq = pdata->oscillator_frequency;
+               else
+                       return PTR_ERR(clk);
+       } else {
+               freq = clk_get_rate(clk);
+       }
+
+       /* Sanity check */
+       if (freq < 1000000 || freq > 25000000)
+               return -ERANGE;
+
+       /* Allocate can/net device */
+       net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX);
+       if (!net)
+               return -ENOMEM;
+
+       if (!IS_ERR(clk)) {
+               ret = clk_prepare_enable(clk);
+               if (ret)
+                       goto out_free;
+       }
+
+       net->netdev_ops = &mcp251x_netdev_ops;
+       net->flags |= IFF_ECHO;
+
+       priv = netdev_priv(net);
+       priv->can.bittiming_const = &mcp251x_bittiming_const;
+       priv->can.do_set_mode = mcp251x_do_set_mode;
+       priv->can.clock.freq = freq / 2;
+       priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
+               CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
+       if (of_id)
+               priv->model = (enum mcp251x_model)of_id->data;
+       else
+               priv->model = spi_get_device_id(spi)->driver_data;
+       priv->net = net;
+       priv->clk = clk;
+
+       spi_set_drvdata(spi, priv);
+
+       /* Configure the SPI bus */
+       spi->bits_per_word = 8;
+       if (mcp251x_is_2510(spi))
+               spi->max_speed_hz = spi->max_speed_hz ? : 5 * 1000 * 1000;
+       else
+               spi->max_speed_hz = spi->max_speed_hz ? : 10 * 1000 * 1000;
+       ret = spi_setup(spi);
+       if (ret)
+               goto out_clk;
+
+       priv->power = devm_regulator_get(&spi->dev, "vdd");
+       priv->transceiver = devm_regulator_get(&spi->dev, "xceiver");
+       if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
+           (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) {
+               ret = -EPROBE_DEFER;
+               goto out_clk;
+       }
+
+       ret = mcp251x_power_enable(priv->power, 1);
+       if (ret)
+               goto out_clk;
+
+       priv->spi = spi;
+       mutex_init(&priv->mcp_lock);
+
+       /* If requested, allocate DMA buffers */
+       if (mcp251x_enable_dma) {
+               spi->dev.coherent_dma_mask = ~0;
+
+               /*
+                * Minimum coherent DMA allocation is PAGE_SIZE, so allocate
+                * that much and share it between Tx and Rx DMA buffers.
+                */
+               priv->spi_tx_buf = dma_alloc_coherent(&spi->dev,
+                                                     PAGE_SIZE,
+                                                     &priv->spi_tx_dma,
+                                                     GFP_DMA);
+
+               if (priv->spi_tx_buf) {
+                       priv->spi_rx_buf = (priv->spi_tx_buf + (PAGE_SIZE / 2));
+                       priv->spi_rx_dma = (dma_addr_t)(priv->spi_tx_dma +
+                                                       (PAGE_SIZE / 2));
+               } else {
+                       /* Fall back to non-DMA */
+                       mcp251x_enable_dma = 0;
+               }
+       }
+
+       /* Allocate non-DMA buffers */
+       if (!mcp251x_enable_dma) {
+               priv->spi_tx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN,
+                                               GFP_KERNEL);
+               if (!priv->spi_tx_buf) {
+                       ret = -ENOMEM;
+                       goto error_probe;
+               }
+               priv->spi_rx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN,
+                                               GFP_KERNEL);
+               if (!priv->spi_rx_buf) {
+                       ret = -ENOMEM;
+                       goto error_probe;
+               }
+       }
+
+       SET_NETDEV_DEV(net, &spi->dev);
+
+       /* Here is OK to not lock the MCP, no one knows about it yet */
+       ret = mcp251x_hw_probe(spi);
+       if (ret)
+               goto error_probe;
+
+       mcp251x_hw_sleep(spi);
+
+       ret = register_candev(net);
+       if (ret)
+               goto error_probe;
+
+       devm_can_led_init(net);
+
+       return 0;
+
+error_probe:
+       if (mcp251x_enable_dma)
+               dma_free_coherent(&spi->dev, PAGE_SIZE,
+                                 priv->spi_tx_buf, priv->spi_tx_dma);
+       mcp251x_power_enable(priv->power, 0);
+
+out_clk:
+       if (!IS_ERR(clk))
+               clk_disable_unprepare(clk);
+
+out_free:
+       free_candev(net);
+
+       return ret;
+}
+
+static int mcp251x_can_remove(struct spi_device *spi)
+{
+       struct mcp251x_priv *priv = spi_get_drvdata(spi);
+       struct net_device *net = priv->net;
+
+       unregister_candev(net);
+
+       if (mcp251x_enable_dma) {
+               dma_free_coherent(&spi->dev, PAGE_SIZE,
+                                 priv->spi_tx_buf, priv->spi_tx_dma);
+       }
+
+       mcp251x_power_enable(priv->power, 0);
+
+       if (!IS_ERR(priv->clk))
+               clk_disable_unprepare(priv->clk);
+
+       free_candev(net);
+
+       return 0;
+}
+
+static int __maybe_unused mcp251x_can_suspend(struct device *dev)
+{
+       struct spi_device *spi = to_spi_device(dev);
+       struct mcp251x_priv *priv = spi_get_drvdata(spi);
+       struct net_device *net = priv->net;
+
+       priv->force_quit = 1;
+       disable_irq(spi->irq);
+       /*
+        * Note: at this point neither IST nor workqueues are running.
+        * open/stop cannot be called anyway so locking is not needed
+        */
+       if (netif_running(net)) {
+               netif_device_detach(net);
+
+               mcp251x_hw_sleep(spi);
+               mcp251x_power_enable(priv->transceiver, 0);
+               priv->after_suspend = AFTER_SUSPEND_UP;
+       } else {
+               priv->after_suspend = AFTER_SUSPEND_DOWN;
+       }
+
+       if (!IS_ERR_OR_NULL(priv->power)) {
+               regulator_disable(priv->power);
+               priv->after_suspend |= AFTER_SUSPEND_POWER;
+       }
+
+       return 0;
+}
+
+static int __maybe_unused mcp251x_can_resume(struct device *dev)
+{
+       struct spi_device *spi = to_spi_device(dev);
+       struct mcp251x_priv *priv = spi_get_drvdata(spi);
+
+       if (priv->after_suspend & AFTER_SUSPEND_POWER) {
+               mcp251x_power_enable(priv->power, 1);
+               queue_work(priv->wq, &priv->restart_work);
+       } else {
+               if (priv->after_suspend & AFTER_SUSPEND_UP) {
+                       mcp251x_power_enable(priv->transceiver, 1);
+                       queue_work(priv->wq, &priv->restart_work);
+               } else {
+                       priv->after_suspend = 0;
+               }
+       }
+       priv->force_quit = 0;
+       enable_irq(spi->irq);
+       return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(mcp251x_can_pm_ops, mcp251x_can_suspend,
+       mcp251x_can_resume);
+
+static struct spi_driver mcp251x_can_driver = {
+       .driver = {
+               .name = DEVICE_NAME,
+               .owner = THIS_MODULE,
+               .of_match_table = mcp251x_of_match,
+               .pm = &mcp251x_can_pm_ops,
+       },
+       .id_table = mcp251x_id_table,
+       .probe = mcp251x_can_probe,
+       .remove = mcp251x_can_remove,
+};
+module_spi_driver(mcp251x_can_driver);
+
+MODULE_AUTHOR("Chris Elston <celston@katalix.com>, "
+             "Christian Pellegrin <chripell@evolware.org>");
+MODULE_DESCRIPTION("Microchip 251x CAN driver");
+MODULE_LICENSE("GPL v2");
index fc96a3d83ebecde338cdc5fd1d7b12f5ee9fbc25..a77db919363c08baa2c76dbb35c60701ea2c68ff 100644 (file)
@@ -13,13 +13,21 @@ config CAN_ESD_USB2
           This driver supports the CAN-USB/2 interface
           from esd electronic system design gmbh (http://www.esd.eu).
 
+config CAN_GS_USB
+       tristate "Geschwister Schneider UG interfaces"
+       ---help---
+         This driver supports the Geschwister Schneider USB/CAN devices.
+         If unsure choose N,
+         choose Y for built in support,
+         M to compile as module (module will be named: gs_usb).
+
 config CAN_KVASER_USB
        tristate "Kvaser CAN/USB interface"
        ---help---
          This driver adds support for Kvaser CAN/USB devices like Kvaser
          Leaf Light.
 
-         The driver gives support for the following devices:
+         The driver provides support for the following devices:
            - Kvaser Leaf Light
            - Kvaser Leaf Professional HS
            - Kvaser Leaf SemiPro HS
@@ -36,6 +44,8 @@ config CAN_KVASER_USB
            - Kvaser Leaf Light "China"
            - Kvaser BlackBird SemiPro
            - Kvaser USBcan R
+           - Kvaser Leaf Light v2
+           - Kvaser Mini PCI Express HS
 
          If unsure, say N.
 
index becef460a91aeb28851e91752b67ddec84ef931c..7b9a393b1ac82a1caf4684a704d7121ad16dc3c9 100644 (file)
@@ -4,6 +4,7 @@
 
 obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
 obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o
+obj-$(CONFIG_CAN_GS_USB) += gs_usb.o
 obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o
 obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/
 obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
new file mode 100644 (file)
index 0000000..04b0f84
--- /dev/null
@@ -0,0 +1,971 @@
+/* CAN driver for Geschwister Schneider USB/CAN devices.
+ *
+ * Copyright (C) 2013 Geschwister Schneider Technologie-,
+ * Entwicklungs- und Vertriebs UG (Haftungsbeschränkt).
+ *
+ * Many thanks to all socketcan devs!
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/signal.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/usb.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+/* Device specific constants */
+#define USB_GSUSB_1_VENDOR_ID      0x1d50
+#define USB_GSUSB_1_PRODUCT_ID     0x606f
+
+#define GSUSB_ENDPOINT_IN          1
+#define GSUSB_ENDPOINT_OUT         2
+
+/* Device specific constants */
+enum gs_usb_breq {
+       GS_USB_BREQ_HOST_FORMAT = 0,
+       GS_USB_BREQ_BITTIMING,
+       GS_USB_BREQ_MODE,
+       GS_USB_BREQ_BERR,
+       GS_USB_BREQ_BT_CONST,
+       GS_USB_BREQ_DEVICE_CONFIG
+};
+
+enum gs_can_mode {
+       /* reset a channel. turns it off */
+       GS_CAN_MODE_RESET = 0,
+       /* starts a channel */
+       GS_CAN_MODE_START
+};
+
+enum gs_can_state {
+       GS_CAN_STATE_ERROR_ACTIVE = 0,
+       GS_CAN_STATE_ERROR_WARNING,
+       GS_CAN_STATE_ERROR_PASSIVE,
+       GS_CAN_STATE_BUS_OFF,
+       GS_CAN_STATE_STOPPED,
+       GS_CAN_STATE_SLEEPING
+};
+
+/* data types passed between host and device */
+struct gs_host_config {
+       u32 byte_order;
+} __packed;
+/* All data exchanged between host and device is exchanged in host byte order,
+ * thanks to the struct gs_host_config byte_order member, which is sent first
+ * to indicate the desired byte order.
+ */
+
+struct gs_device_config {
+       u8 reserved1;
+       u8 reserved2;
+       u8 reserved3;
+       u8 icount;
+       u32 sw_version;
+       u32 hw_version;
+} __packed;
+
+#define GS_CAN_MODE_NORMAL               0
+#define GS_CAN_MODE_LISTEN_ONLY          (1<<0)
+#define GS_CAN_MODE_LOOP_BACK            (1<<1)
+#define GS_CAN_MODE_TRIPLE_SAMPLE        (1<<2)
+#define GS_CAN_MODE_ONE_SHOT             (1<<3)
+
+struct gs_device_mode {
+       u32 mode;
+       u32 flags;
+} __packed;
+
+struct gs_device_state {
+       u32 state;
+       u32 rxerr;
+       u32 txerr;
+} __packed;
+
+struct gs_device_bittiming {
+       u32 prop_seg;
+       u32 phase_seg1;
+       u32 phase_seg2;
+       u32 sjw;
+       u32 brp;
+} __packed;
+
+#define GS_CAN_FEATURE_LISTEN_ONLY      (1<<0)
+#define GS_CAN_FEATURE_LOOP_BACK        (1<<1)
+#define GS_CAN_FEATURE_TRIPLE_SAMPLE    (1<<2)
+#define GS_CAN_FEATURE_ONE_SHOT         (1<<3)
+
+struct gs_device_bt_const {
+       u32 feature;
+       u32 fclk_can;
+       u32 tseg1_min;
+       u32 tseg1_max;
+       u32 tseg2_min;
+       u32 tseg2_max;
+       u32 sjw_max;
+       u32 brp_min;
+       u32 brp_max;
+       u32 brp_inc;
+} __packed;
+
+#define GS_CAN_FLAG_OVERFLOW 1
+
+struct gs_host_frame {
+       u32 echo_id;
+       u32 can_id;
+
+       u8 can_dlc;
+       u8 channel;
+       u8 flags;
+       u8 reserved;
+
+       u8 data[8];
+} __packed;
+/* The GS USB devices make use of the same flags and masks as in
+ * linux/can.h and linux/can/error.h, and no additional mapping is necessary.
+ */
+
+/* Only send a max of GS_MAX_TX_URBS frames per channel at a time. */
+#define GS_MAX_TX_URBS 10
+/* Only launch a max of GS_MAX_RX_URBS usb requests at a time. */
+#define GS_MAX_RX_URBS 30
+/* Maximum number of interfaces the driver supports per device.
+ * Current hardware only supports 2 interfaces. The future may vary.
+ */
+#define GS_MAX_INTF 2
+
+struct gs_tx_context {
+       struct gs_can *dev;
+       unsigned int echo_id;
+};
+
+struct gs_can {
+       struct can_priv can; /* must be the first member */
+
+       struct gs_usb *parent;
+
+       struct net_device *netdev;
+       struct usb_device *udev;
+       struct usb_interface *iface;
+
+       struct can_bittiming_const bt_const;
+       unsigned int channel;   /* channel number */
+
+       /* This lock prevents a race condition between xmit and recieve. */
+       spinlock_t tx_ctx_lock;
+       struct gs_tx_context tx_context[GS_MAX_TX_URBS];
+
+       struct usb_anchor tx_submitted;
+       atomic_t active_tx_urbs;
+};
+
+/* usb interface struct */
+struct gs_usb {
+       struct gs_can *canch[GS_MAX_INTF];
+       struct usb_anchor rx_submitted;
+       atomic_t active_channels;
+       struct usb_device *udev;
+};
+
+/* 'allocate' a tx context.
+ * returns a valid tx context or NULL if there is no space.
+ */
+static struct gs_tx_context *gs_alloc_tx_context(struct gs_can *dev)
+{
+       int i = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->tx_ctx_lock, flags);
+
+       for (; i < GS_MAX_TX_URBS; i++) {
+               if (dev->tx_context[i].echo_id == GS_MAX_TX_URBS) {
+                       dev->tx_context[i].echo_id = i;
+                       spin_unlock_irqrestore(&dev->tx_ctx_lock, flags);
+                       return &dev->tx_context[i];
+               }
+       }
+
+       spin_unlock_irqrestore(&dev->tx_ctx_lock, flags);
+       return NULL;
+}
+
+/* releases a tx context
+ */
+static void gs_free_tx_context(struct gs_tx_context *txc)
+{
+       txc->echo_id = GS_MAX_TX_URBS;
+}
+
+/* Get a tx context by id.
+ */
+static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev, unsigned int id)
+{
+       unsigned long flags;
+
+       if (id < GS_MAX_TX_URBS) {
+               spin_lock_irqsave(&dev->tx_ctx_lock, flags);
+               if (dev->tx_context[id].echo_id == id) {
+                       spin_unlock_irqrestore(&dev->tx_ctx_lock, flags);
+                       return &dev->tx_context[id];
+               }
+               spin_unlock_irqrestore(&dev->tx_ctx_lock, flags);
+       }
+       return NULL;
+}
+
+static int gs_cmd_reset(struct gs_usb *gsusb, struct gs_can *gsdev)
+{
+       struct gs_device_mode *dm;
+       struct usb_interface *intf = gsdev->iface;
+       int rc;
+
+       dm = kzalloc(sizeof(*dm), GFP_KERNEL);
+       if (!dm)
+               return -ENOMEM;
+
+       dm->mode = GS_CAN_MODE_RESET;
+
+       rc = usb_control_msg(interface_to_usbdev(intf),
+                            usb_sndctrlpipe(interface_to_usbdev(intf), 0),
+                            GS_USB_BREQ_MODE,
+                            USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+                            gsdev->channel,
+                            0,
+                            dm,
+                            sizeof(*dm),
+                            1000);
+
+       return rc;
+}
+
+static void gs_update_state(struct gs_can *dev, struct can_frame *cf)
+{
+       struct can_device_stats *can_stats = &dev->can.can_stats;
+
+       if (cf->can_id & CAN_ERR_RESTARTED) {
+               dev->can.state = CAN_STATE_ERROR_ACTIVE;
+               can_stats->restarts++;
+       } else if (cf->can_id & CAN_ERR_BUSOFF) {
+               dev->can.state = CAN_STATE_BUS_OFF;
+               can_stats->bus_off++;
+       } else if (cf->can_id & CAN_ERR_CRTL) {
+               if ((cf->data[1] & CAN_ERR_CRTL_TX_WARNING) ||
+                   (cf->data[1] & CAN_ERR_CRTL_RX_WARNING)) {
+                       dev->can.state = CAN_STATE_ERROR_WARNING;
+                       can_stats->error_warning++;
+               } else if ((cf->data[1] & CAN_ERR_CRTL_TX_PASSIVE) ||
+                          (cf->data[1] & CAN_ERR_CRTL_RX_PASSIVE)) {
+                       dev->can.state = CAN_STATE_ERROR_PASSIVE;
+                       can_stats->error_passive++;
+               } else {
+                       dev->can.state = CAN_STATE_ERROR_ACTIVE;
+               }
+       }
+}
+
+static void gs_usb_recieve_bulk_callback(struct urb *urb)
+{
+       struct gs_usb *usbcan = urb->context;
+       struct gs_can *dev;
+       struct net_device *netdev;
+       int rc;
+       struct net_device_stats *stats;
+       struct gs_host_frame *hf = urb->transfer_buffer;
+       struct gs_tx_context *txc;
+       struct can_frame *cf;
+       struct sk_buff *skb;
+
+       BUG_ON(!usbcan);
+
+       switch (urb->status) {
+       case 0: /* success */
+               break;
+       case -ENOENT:
+       case -ESHUTDOWN:
+               return;
+       default:
+               /* do not resubmit aborted urbs. eg: when device goes down */
+               return;
+       }
+
+       /* device reports out of range channel id */
+       if (hf->channel >= GS_MAX_INTF)
+               goto resubmit_urb;
+
+       dev = usbcan->canch[hf->channel];
+
+       netdev = dev->netdev;
+       stats = &netdev->stats;
+
+       if (!netif_device_present(netdev))
+               return;
+
+       if (hf->echo_id == -1) { /* normal rx */
+               skb = alloc_can_skb(dev->netdev, &cf);
+               if (!skb)
+                       return;
+
+               cf->can_id = hf->can_id;
+
+               cf->can_dlc = get_can_dlc(hf->can_dlc);
+               memcpy(cf->data, hf->data, 8);
+
+               /* ERROR frames tell us information about the controller */
+               if (hf->can_id & CAN_ERR_FLAG)
+                       gs_update_state(dev, cf);
+
+               netdev->stats.rx_packets++;
+               netdev->stats.rx_bytes += hf->can_dlc;
+
+               netif_rx(skb);
+       } else { /* echo_id == hf->echo_id */
+               if (hf->echo_id >= GS_MAX_TX_URBS) {
+                       netdev_err(netdev,
+                                  "Unexpected out of range echo id %d\n",
+                                  hf->echo_id);
+                       goto resubmit_urb;
+               }
+
+               netdev->stats.tx_packets++;
+               netdev->stats.tx_bytes += hf->can_dlc;
+
+               txc = gs_get_tx_context(dev, hf->echo_id);
+
+               /* bad devices send bad echo_ids. */
+               if (!txc) {
+                       netdev_err(netdev,
+                                  "Unexpected unused echo id %d\n",
+                                  hf->echo_id);
+                       goto resubmit_urb;
+               }
+
+               can_get_echo_skb(netdev, hf->echo_id);
+
+               gs_free_tx_context(txc);
+
+               netif_wake_queue(netdev);
+       }
+
+       if (hf->flags & GS_CAN_FLAG_OVERFLOW) {
+               skb = alloc_can_err_skb(netdev, &cf);
+               if (!skb)
+                       goto resubmit_urb;
+
+               cf->can_id |= CAN_ERR_CRTL;
+               cf->can_dlc = CAN_ERR_DLC;
+               cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+               stats->rx_over_errors++;
+               stats->rx_errors++;
+               netif_rx(skb);
+       }
+
+ resubmit_urb:
+       usb_fill_bulk_urb(urb,
+                         usbcan->udev,
+                         usb_rcvbulkpipe(usbcan->udev, GSUSB_ENDPOINT_IN),
+                         hf,
+                         sizeof(struct gs_host_frame),
+                         gs_usb_recieve_bulk_callback,
+                         usbcan
+                         );
+
+       rc = usb_submit_urb(urb, GFP_ATOMIC);
+
+       /* USB failure take down all interfaces */
+       if (rc == -ENODEV) {
+               for (rc = 0; rc < GS_MAX_INTF; rc++) {
+                       if (usbcan->canch[rc])
+                               netif_device_detach(usbcan->canch[rc]->netdev);
+               }
+       }
+}
+
+static int gs_usb_set_bittiming(struct net_device *netdev)
+{
+       struct gs_can *dev = netdev_priv(netdev);
+       struct can_bittiming *bt = &dev->can.bittiming;
+       struct usb_interface *intf = dev->iface;
+       int rc;
+       struct gs_device_bittiming *dbt;
+
+       dbt = kmalloc(sizeof(*dbt), GFP_KERNEL);
+       if (!dbt)
+               return -ENOMEM;
+
+       dbt->prop_seg = bt->prop_seg;
+       dbt->phase_seg1 = bt->phase_seg1;
+       dbt->phase_seg2 = bt->phase_seg2;
+       dbt->sjw = bt->sjw;
+       dbt->brp = bt->brp;
+
+       /* request bit timings */
+       rc = usb_control_msg(interface_to_usbdev(intf),
+                            usb_sndctrlpipe(interface_to_usbdev(intf), 0),
+                            GS_USB_BREQ_BITTIMING,
+                            USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+                            dev->channel,
+                            0,
+                            dbt,
+                            sizeof(*dbt),
+                            1000);
+
+       kfree(dbt);
+
+       if (rc < 0)
+               dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)",
+                       rc);
+
+       return rc;
+}
+
+static void gs_usb_xmit_callback(struct urb *urb)
+{
+       struct gs_tx_context *txc = urb->context;
+       struct gs_can *dev = txc->dev;
+       struct net_device *netdev = dev->netdev;
+
+       if (urb->status)
+               netdev_info(netdev, "usb xmit fail %d\n", txc->echo_id);
+
+       usb_free_coherent(urb->dev,
+                         urb->transfer_buffer_length,
+                         urb->transfer_buffer,
+                         urb->transfer_dma);
+
+       atomic_dec(&dev->active_tx_urbs);
+
+       if (!netif_device_present(netdev))
+               return;
+
+       if (netif_queue_stopped(netdev))
+               netif_wake_queue(netdev);
+}
+
+static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+       struct gs_can *dev = netdev_priv(netdev);
+       struct net_device_stats *stats = &dev->netdev->stats;
+       struct urb *urb;
+       struct gs_host_frame *hf;
+       struct can_frame *cf;
+       int rc;
+       unsigned int idx;
+       struct gs_tx_context *txc;
+
+       if (can_dropped_invalid_skb(netdev, skb))
+               return NETDEV_TX_OK;
+
+       /* find an empty context to keep track of transmission */
+       txc = gs_alloc_tx_context(dev);
+       if (!txc)
+               return NETDEV_TX_BUSY;
+
+       /* create a URB, and a buffer for it */
+       urb = usb_alloc_urb(0, GFP_ATOMIC);
+       if (!urb) {
+               netdev_err(netdev, "No memory left for URB\n");
+               goto nomem_urb;
+       }
+
+       hf = usb_alloc_coherent(dev->udev, sizeof(*hf), GFP_ATOMIC,
+                               &urb->transfer_dma);
+       if (!hf) {
+               netdev_err(netdev, "No memory left for USB buffer\n");
+               goto nomem_hf;
+       }
+
+       idx = txc->echo_id;
+
+       if (idx >= GS_MAX_TX_URBS) {
+               netdev_err(netdev, "Invalid tx context %d\n", idx);
+               goto badidx;
+       }
+
+       hf->echo_id = idx;
+       hf->channel = dev->channel;
+
+       cf = (struct can_frame *)skb->data;
+
+       hf->can_id = cf->can_id;
+       hf->can_dlc = cf->can_dlc;
+       memcpy(hf->data, cf->data, cf->can_dlc);
+
+       usb_fill_bulk_urb(urb, dev->udev,
+                         usb_sndbulkpipe(dev->udev, GSUSB_ENDPOINT_OUT),
+                         hf,
+                         sizeof(*hf),
+                         gs_usb_xmit_callback,
+                         txc);
+
+       urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+       usb_anchor_urb(urb, &dev->tx_submitted);
+
+       can_put_echo_skb(skb, netdev, idx);
+
+       atomic_inc(&dev->active_tx_urbs);
+
+       rc = usb_submit_urb(urb, GFP_ATOMIC);
+       if (unlikely(rc)) {                     /* usb send failed */
+               atomic_dec(&dev->active_tx_urbs);
+
+               can_free_echo_skb(netdev, idx);
+               gs_free_tx_context(txc);
+
+               usb_unanchor_urb(urb);
+               usb_free_coherent(dev->udev,
+                                 sizeof(*hf),
+                                 hf,
+                                 urb->transfer_dma);
+
+
+               if (rc == -ENODEV) {
+                       netif_device_detach(netdev);
+               } else {
+                       netdev_err(netdev, "usb_submit failed (err=%d)\n", rc);
+                       stats->tx_dropped++;
+               }
+       } else {
+               /* Slow down tx path */
+               if (atomic_read(&dev->active_tx_urbs) >= GS_MAX_TX_URBS)
+                       netif_stop_queue(netdev);
+       }
+
+       /* let usb core take care of this urb */
+       usb_free_urb(urb);
+
+       return NETDEV_TX_OK;
+
+ badidx:
+       usb_free_coherent(dev->udev,
+                         sizeof(*hf),
+                         hf,
+                         urb->transfer_dma);
+ nomem_hf:
+       usb_free_urb(urb);
+
+ nomem_urb:
+       gs_free_tx_context(txc);
+       dev_kfree_skb(skb);
+       stats->tx_dropped++;
+       return NETDEV_TX_OK;
+}
+
+static int gs_can_open(struct net_device *netdev)
+{
+       struct gs_can *dev = netdev_priv(netdev);
+       struct gs_usb *parent = dev->parent;
+       int rc, i;
+       struct gs_device_mode *dm;
+       u32 ctrlmode;
+
+       rc = open_candev(netdev);
+       if (rc)
+               return rc;
+
+       if (atomic_add_return(1, &parent->active_channels) == 1) {
+               for (i = 0; i < GS_MAX_RX_URBS; i++) {
+                       struct urb *urb;
+                       u8 *buf;
+
+                       /* alloc rx urb */
+                       urb = usb_alloc_urb(0, GFP_KERNEL);
+                       if (!urb) {
+                               netdev_err(netdev,
+                                          "No memory left for URB\n");
+                               return -ENOMEM;
+                       }
+
+                       /* alloc rx buffer */
+                       buf = usb_alloc_coherent(dev->udev,
+                                                sizeof(struct gs_host_frame),
+                                                GFP_KERNEL,
+                                                &urb->transfer_dma);
+                       if (!buf) {
+                               netdev_err(netdev,
+                                          "No memory left for USB buffer\n");
+                               usb_free_urb(urb);
+                               return -ENOMEM;
+                       }
+
+                       /* fill, anchor, and submit rx urb */
+                       usb_fill_bulk_urb(urb,
+                                         dev->udev,
+                                         usb_rcvbulkpipe(dev->udev,
+                                                         GSUSB_ENDPOINT_IN),
+                                         buf,
+                                         sizeof(struct gs_host_frame),
+                                         gs_usb_recieve_bulk_callback,
+                                         parent);
+                       urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+                       usb_anchor_urb(urb, &parent->rx_submitted);
+
+                       rc = usb_submit_urb(urb, GFP_KERNEL);
+                       if (rc) {
+                               if (rc == -ENODEV)
+                                       netif_device_detach(dev->netdev);
+
+                               netdev_err(netdev,
+                                          "usb_submit failed (err=%d)\n",
+                                          rc);
+
+                               usb_unanchor_urb(urb);
+                               break;
+                       }
+
+                       /* Drop reference,
+                        * USB core will take care of freeing it
+                        */
+                       usb_free_urb(urb);
+               }
+       }
+
+       dm = kmalloc(sizeof(*dm), GFP_KERNEL);
+       if (!dm)
+               return -ENOMEM;
+
+       /* flags */
+       ctrlmode = dev->can.ctrlmode;
+       dm->flags = 0;
+
+       if (ctrlmode & CAN_CTRLMODE_LOOPBACK)
+               dm->flags |= GS_CAN_MODE_LOOP_BACK;
+       else if (ctrlmode & CAN_CTRLMODE_LISTENONLY)
+               dm->flags |= GS_CAN_MODE_LISTEN_ONLY;
+
+       /* Controller is not allowed to retry TX
+        * this mode is unavailable on atmels uc3c hardware
+        */
+       if (ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+               dm->flags |= GS_CAN_MODE_ONE_SHOT;
+
+       if (ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+               dm->flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
+
+       /* finally start device */
+       dm->mode = GS_CAN_MODE_START;
+       rc = usb_control_msg(interface_to_usbdev(dev->iface),
+                            usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0),
+                            GS_USB_BREQ_MODE,
+                            USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+                            dev->channel,
+                            0,
+                            dm,
+                            sizeof(*dm),
+                            1000);
+
+       if (rc < 0) {
+               netdev_err(netdev, "Couldn't start device (err=%d)\n", rc);
+               kfree(dm);
+               return rc;
+       }
+
+       kfree(dm);
+
+       dev->can.state = CAN_STATE_ERROR_ACTIVE;
+
+       if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
+               netif_start_queue(netdev);
+
+       return 0;
+}
+
+static int gs_can_close(struct net_device *netdev)
+{
+       int rc;
+       struct gs_can *dev = netdev_priv(netdev);
+       struct gs_usb *parent = dev->parent;
+
+       netif_stop_queue(netdev);
+
+       /* Stop polling */
+       if (atomic_dec_and_test(&parent->active_channels))
+               usb_kill_anchored_urbs(&parent->rx_submitted);
+
+       /* Stop sending URBs */
+       usb_kill_anchored_urbs(&dev->tx_submitted);
+       atomic_set(&dev->active_tx_urbs, 0);
+
+       /* reset the device */
+       rc = gs_cmd_reset(parent, dev);
+       if (rc < 0)
+               netdev_warn(netdev, "Couldn't shutdown device (err=%d)", rc);
+
+       /* reset tx contexts */
+       for (rc = 0; rc < GS_MAX_TX_URBS; rc++) {
+               dev->tx_context[rc].dev = dev;
+               dev->tx_context[rc].echo_id = GS_MAX_TX_URBS;
+       }
+
+       /* close the netdev */
+       close_candev(netdev);
+
+       return 0;
+}
+
+static const struct net_device_ops gs_usb_netdev_ops = {
+       .ndo_open = gs_can_open,
+       .ndo_stop = gs_can_close,
+       .ndo_start_xmit = gs_can_start_xmit,
+};
+
+static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface *intf)
+{
+       struct gs_can *dev;
+       struct net_device *netdev;
+       int rc;
+       struct gs_device_bt_const *bt_const;
+
+       bt_const = kmalloc(sizeof(*bt_const), GFP_KERNEL);
+       if (!bt_const)
+               return ERR_PTR(-ENOMEM);
+
+       /* fetch bit timing constants */
+       rc = usb_control_msg(interface_to_usbdev(intf),
+                            usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
+                            GS_USB_BREQ_BT_CONST,
+                            USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+                            channel,
+                            0,
+                            bt_const,
+                            sizeof(*bt_const),
+                            1000);
+
+       if (rc < 0) {
+               dev_err(&intf->dev,
+                       "Couldn't get bit timing const for channel (err=%d)\n",
+                       rc);
+               kfree(bt_const);
+               return ERR_PTR(rc);
+       }
+
+       /* create netdev */
+       netdev = alloc_candev(sizeof(struct gs_can), GS_MAX_TX_URBS);
+       if (!netdev) {
+               dev_err(&intf->dev, "Couldn't allocate candev\n");
+               kfree(bt_const);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       dev = netdev_priv(netdev);
+
+       netdev->netdev_ops = &gs_usb_netdev_ops;
+
+       netdev->flags |= IFF_ECHO; /* we support full roundtrip echo */
+
+       /* dev settup */
+       strcpy(dev->bt_const.name, "gs_usb");
+       dev->bt_const.tseg1_min = bt_const->tseg1_min;
+       dev->bt_const.tseg1_max = bt_const->tseg1_max;
+       dev->bt_const.tseg2_min = bt_const->tseg2_min;
+       dev->bt_const.tseg2_max = bt_const->tseg2_max;
+       dev->bt_const.sjw_max = bt_const->sjw_max;
+       dev->bt_const.brp_min = bt_const->brp_min;
+       dev->bt_const.brp_max = bt_const->brp_max;
+       dev->bt_const.brp_inc = bt_const->brp_inc;
+
+       dev->udev = interface_to_usbdev(intf);
+       dev->iface = intf;
+       dev->netdev = netdev;
+       dev->channel = channel;
+
+       init_usb_anchor(&dev->tx_submitted);
+       atomic_set(&dev->active_tx_urbs, 0);
+       spin_lock_init(&dev->tx_ctx_lock);
+       for (rc = 0; rc < GS_MAX_TX_URBS; rc++) {
+               dev->tx_context[rc].dev = dev;
+               dev->tx_context[rc].echo_id = GS_MAX_TX_URBS;
+       }
+
+       /* can settup */
+       dev->can.state = CAN_STATE_STOPPED;
+       dev->can.clock.freq = bt_const->fclk_can;
+       dev->can.bittiming_const = &dev->bt_const;
+       dev->can.do_set_bittiming = gs_usb_set_bittiming;
+
+       dev->can.ctrlmode_supported = 0;
+
+       if (bt_const->feature & GS_CAN_FEATURE_LISTEN_ONLY)
+               dev->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
+
+       if (bt_const->feature & GS_CAN_FEATURE_LOOP_BACK)
+               dev->can.ctrlmode_supported |= CAN_CTRLMODE_LOOPBACK;
+
+       if (bt_const->feature & GS_CAN_FEATURE_TRIPLE_SAMPLE)
+               dev->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+
+       if (bt_const->feature & GS_CAN_FEATURE_ONE_SHOT)
+               dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
+
+       kfree(bt_const);
+
+       SET_NETDEV_DEV(netdev, &intf->dev);
+
+       rc = register_candev(dev->netdev);
+       if (rc) {
+               free_candev(dev->netdev);
+               dev_err(&intf->dev, "Couldn't register candev (err=%d)\n", rc);
+               return ERR_PTR(rc);
+       }
+
+       return dev;
+}
+
+static void gs_destroy_candev(struct gs_can *dev)
+{
+       unregister_candev(dev->netdev);
+       free_candev(dev->netdev);
+       usb_kill_anchored_urbs(&dev->tx_submitted);
+       kfree(dev);
+}
+
+static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
+{
+       struct gs_usb *dev;
+       int rc = -ENOMEM;
+       unsigned int icount, i;
+       struct gs_host_config *hconf;
+       struct gs_device_config *dconf;
+
+       hconf = kmalloc(sizeof(*hconf), GFP_KERNEL);
+       if (!hconf)
+               return -ENOMEM;
+
+       hconf->byte_order = 0x0000beef;
+
+       /* send host config */
+       rc = usb_control_msg(interface_to_usbdev(intf),
+                            usb_sndctrlpipe(interface_to_usbdev(intf), 0),
+                            GS_USB_BREQ_HOST_FORMAT,
+                            USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+                            1,
+                            intf->altsetting[0].desc.bInterfaceNumber,
+                            hconf,
+                            sizeof(*hconf),
+                            1000);
+
+       kfree(hconf);
+
+       if (rc < 0) {
+               dev_err(&intf->dev, "Couldn't send data format (err=%d)\n",
+                       rc);
+               return rc;
+       }
+
+       dconf = kmalloc(sizeof(*dconf), GFP_KERNEL);
+       if (!dconf)
+               return -ENOMEM;
+
+       /* read device config */
+       rc = usb_control_msg(interface_to_usbdev(intf),
+                            usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
+                            GS_USB_BREQ_DEVICE_CONFIG,
+                            USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+                            1,
+                            intf->altsetting[0].desc.bInterfaceNumber,
+                            dconf,
+                            sizeof(*dconf),
+                            1000);
+       if (rc < 0) {
+               dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n",
+                       rc);
+
+               kfree(dconf);
+
+               return rc;
+       }
+
+       icount = dconf->icount+1;
+
+       kfree(dconf);
+
+       dev_info(&intf->dev, "Configuring for %d interfaces\n", icount);
+
+       if (icount > GS_MAX_INTF) {
+               dev_err(&intf->dev,
+                       "Driver cannot handle more that %d CAN interfaces\n",
+                       GS_MAX_INTF);
+               return -EINVAL;
+       }
+
+       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+       init_usb_anchor(&dev->rx_submitted);
+
+       atomic_set(&dev->active_channels, 0);
+
+       usb_set_intfdata(intf, dev);
+       dev->udev = interface_to_usbdev(intf);
+
+       for (i = 0; i < icount; i++) {
+               dev->canch[i] = gs_make_candev(i, intf);
+               if (IS_ERR_OR_NULL(dev->canch[i])) {
+                       /* on failure destroy previously created candevs */
+                       icount = i;
+                       for (i = 0; i < icount; i++) {
+                               gs_destroy_candev(dev->canch[i]);
+                               dev->canch[i] = NULL;
+                       }
+                       kfree(dev);
+                       return rc;
+               }
+               dev->canch[i]->parent = dev;
+       }
+
+       return 0;
+}
+
+static void gs_usb_disconnect(struct usb_interface *intf)
+{
+       unsigned i;
+       struct gs_usb *dev = usb_get_intfdata(intf);
+       usb_set_intfdata(intf, NULL);
+
+       if (!dev) {
+               dev_err(&intf->dev, "Disconnect (nodata)\n");
+               return;
+       }
+
+       for (i = 0; i < GS_MAX_INTF; i++) {
+               struct gs_can *can = dev->canch[i];
+
+               if (!can)
+                       continue;
+
+               gs_destroy_candev(can);
+       }
+
+       usb_kill_anchored_urbs(&dev->rx_submitted);
+}
+
+static const struct usb_device_id gs_usb_table[] = {
+       {USB_DEVICE(USB_GSUSB_1_VENDOR_ID, USB_GSUSB_1_PRODUCT_ID)},
+       {} /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, gs_usb_table);
+
+static struct usb_driver gs_usb_driver = {
+       .name       = "gs_usb",
+       .probe      = gs_usb_probe,
+       .disconnect = gs_usb_disconnect,
+       .id_table   = gs_usb_table,
+};
+
+module_usb_driver(gs_usb_driver);
+
+MODULE_AUTHOR("Maximilian Schneider <mws@schneidersoft.net>");
+MODULE_DESCRIPTION(
+"Socket CAN device driver for Geschwister Schneider Technologie-, "
+"Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces.");
+MODULE_LICENSE("GPL v2");
index 4ca46edc061d761169a85fc58937dee449a4776f..541fb7a05625aaf889089704ec155956629e77c8 100644 (file)
@@ -53,6 +53,8 @@
 #define USB_OEM_MERCURY_PRODUCT_ID     34
 #define USB_OEM_LEAF_PRODUCT_ID                35
 #define USB_CAN_R_PRODUCT_ID           39
+#define USB_LEAF_LITE_V2_PRODUCT_ID    288
+#define USB_MINI_PCIE_HS_PRODUCT_ID    289
 
 /* USB devices features */
 #define KVASER_HAS_SILENT_MODE         BIT(0)
@@ -356,6 +358,8 @@ static const struct usb_device_id kvaser_usb_table[] = {
                .driver_info = KVASER_HAS_TXRX_ERRORS },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID),
                .driver_info = KVASER_HAS_TXRX_ERRORS },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
        { }
 };
 MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
@@ -379,38 +383,43 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
        void *buf;
        int actual_len;
        int err;
-       int pos = 0;
+       int pos;
+       unsigned long to = jiffies + msecs_to_jiffies(USB_RECV_TIMEOUT);
 
        buf = kzalloc(RX_BUFFER_SIZE, GFP_KERNEL);
        if (!buf)
                return -ENOMEM;
 
-       err = usb_bulk_msg(dev->udev,
-                          usb_rcvbulkpipe(dev->udev,
-                                          dev->bulk_in->bEndpointAddress),
-                          buf, RX_BUFFER_SIZE, &actual_len,
-                          USB_RECV_TIMEOUT);
-       if (err < 0)
-               goto end;
+       do {
+               err = usb_bulk_msg(dev->udev,
+                                  usb_rcvbulkpipe(dev->udev,
+                                       dev->bulk_in->bEndpointAddress),
+                                  buf, RX_BUFFER_SIZE, &actual_len,
+                                  USB_RECV_TIMEOUT);
+               if (err < 0)
+                       goto end;
 
-       while (pos <= actual_len - MSG_HEADER_LEN) {
-               tmp = buf + pos;
+               pos = 0;
+               while (pos <= actual_len - MSG_HEADER_LEN) {
+                       tmp = buf + pos;
 
-               if (!tmp->len)
-                       break;
+                       if (!tmp->len)
+                               break;
 
-               if (pos + tmp->len > actual_len) {
-                       dev_err(dev->udev->dev.parent, "Format error\n");
-                       break;
-               }
+                       if (pos + tmp->len > actual_len) {
+                               dev_err(dev->udev->dev.parent,
+                                       "Format error\n");
+                               break;
+                       }
 
-               if (tmp->id == id) {
-                       memcpy(msg, tmp, tmp->len);
-                       goto end;
-               }
+                       if (tmp->id == id) {
+                               memcpy(msg, tmp, tmp->len);
+                               goto end;
+                       }
 
-               pos += tmp->len;
-       }
+                       pos += tmp->len;
+               }
+       } while (time_before(jiffies, to));
 
        err = -EINVAL;
 
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
new file mode 100644 (file)
index 0000000..5e8b560
--- /dev/null
@@ -0,0 +1,1208 @@
+/* Xilinx CAN device driver
+ *
+ * Copyright (C) 2012 - 2014 Xilinx, Inc.
+ * Copyright (C) 2009 PetaLogix. All rights reserved.
+ *
+ * Description:
+ * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/led.h>
+
+#define DRIVER_NAME    "xilinx_can"
+
+/* CAN registers set */
+enum xcan_reg {
+       XCAN_SRR_OFFSET         = 0x00, /* Software reset */
+       XCAN_MSR_OFFSET         = 0x04, /* Mode select */
+       XCAN_BRPR_OFFSET        = 0x08, /* Baud rate prescaler */
+       XCAN_BTR_OFFSET         = 0x0C, /* Bit timing */
+       XCAN_ECR_OFFSET         = 0x10, /* Error counter */
+       XCAN_ESR_OFFSET         = 0x14, /* Error status */
+       XCAN_SR_OFFSET          = 0x18, /* Status */
+       XCAN_ISR_OFFSET         = 0x1C, /* Interrupt status */
+       XCAN_IER_OFFSET         = 0x20, /* Interrupt enable */
+       XCAN_ICR_OFFSET         = 0x24, /* Interrupt clear */
+       XCAN_TXFIFO_ID_OFFSET   = 0x30,/* TX FIFO ID */
+       XCAN_TXFIFO_DLC_OFFSET  = 0x34, /* TX FIFO DLC */
+       XCAN_TXFIFO_DW1_OFFSET  = 0x38, /* TX FIFO Data Word 1 */
+       XCAN_TXFIFO_DW2_OFFSET  = 0x3C, /* TX FIFO Data Word 2 */
+       XCAN_RXFIFO_ID_OFFSET   = 0x50, /* RX FIFO ID */
+       XCAN_RXFIFO_DLC_OFFSET  = 0x54, /* RX FIFO DLC */
+       XCAN_RXFIFO_DW1_OFFSET  = 0x58, /* RX FIFO Data Word 1 */
+       XCAN_RXFIFO_DW2_OFFSET  = 0x5C, /* RX FIFO Data Word 2 */
+};
+
+/* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */
+#define XCAN_SRR_CEN_MASK              0x00000002 /* CAN enable */
+#define XCAN_SRR_RESET_MASK            0x00000001 /* Soft Reset the CAN core */
+#define XCAN_MSR_LBACK_MASK            0x00000002 /* Loop back mode select */
+#define XCAN_MSR_SLEEP_MASK            0x00000001 /* Sleep mode select */
+#define XCAN_BRPR_BRP_MASK             0x000000FF /* Baud rate prescaler */
+#define XCAN_BTR_SJW_MASK              0x00000180 /* Synchronous jump width */
+#define XCAN_BTR_TS2_MASK              0x00000070 /* Time segment 2 */
+#define XCAN_BTR_TS1_MASK              0x0000000F /* Time segment 1 */
+#define XCAN_ECR_REC_MASK              0x0000FF00 /* Receive error counter */
+#define XCAN_ECR_TEC_MASK              0x000000FF /* Transmit error counter */
+#define XCAN_ESR_ACKER_MASK            0x00000010 /* ACK error */
+#define XCAN_ESR_BERR_MASK             0x00000008 /* Bit error */
+#define XCAN_ESR_STER_MASK             0x00000004 /* Stuff error */
+#define XCAN_ESR_FMER_MASK             0x00000002 /* Form error */
+#define XCAN_ESR_CRCER_MASK            0x00000001 /* CRC error */
+#define XCAN_SR_TXFLL_MASK             0x00000400 /* TX FIFO is full */
+#define XCAN_SR_ESTAT_MASK             0x00000180 /* Error status */
+#define XCAN_SR_ERRWRN_MASK            0x00000040 /* Error warning */
+#define XCAN_SR_NORMAL_MASK            0x00000008 /* Normal mode */
+#define XCAN_SR_LBACK_MASK             0x00000002 /* Loop back mode */
+#define XCAN_SR_CONFIG_MASK            0x00000001 /* Configuration mode */
+#define XCAN_IXR_TXFEMP_MASK           0x00004000 /* TX FIFO Empty */
+#define XCAN_IXR_WKUP_MASK             0x00000800 /* Wake up interrupt */
+#define XCAN_IXR_SLP_MASK              0x00000400 /* Sleep interrupt */
+#define XCAN_IXR_BSOFF_MASK            0x00000200 /* Bus off interrupt */
+#define XCAN_IXR_ERROR_MASK            0x00000100 /* Error interrupt */
+#define XCAN_IXR_RXNEMP_MASK           0x00000080 /* RX FIFO NotEmpty intr */
+#define XCAN_IXR_RXOFLW_MASK           0x00000040 /* RX FIFO Overflow intr */
+#define XCAN_IXR_RXOK_MASK             0x00000010 /* Message received intr */
+#define XCAN_IXR_TXFLL_MASK            0x00000004 /* Tx FIFO Full intr */
+#define XCAN_IXR_TXOK_MASK             0x00000002 /* TX successful intr */
+#define XCAN_IXR_ARBLST_MASK           0x00000001 /* Arbitration lost intr */
+#define XCAN_IDR_ID1_MASK              0xFFE00000 /* Standard msg identifier */
+#define XCAN_IDR_SRR_MASK              0x00100000 /* Substitute remote TXreq */
+#define XCAN_IDR_IDE_MASK              0x00080000 /* Identifier extension */
+#define XCAN_IDR_ID2_MASK              0x0007FFFE /* Extended message ident */
+#define XCAN_IDR_RTR_MASK              0x00000001 /* Remote TX request */
+#define XCAN_DLCR_DLC_MASK             0xF0000000 /* Data length code */
+
+#define XCAN_INTR_ALL          (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\
+                                XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \
+                                XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \
+                                XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK)
+
+/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
+#define XCAN_BTR_SJW_SHIFT             7  /* Synchronous jump width */
+#define XCAN_BTR_TS2_SHIFT             4  /* Time segment 2 */
+#define XCAN_IDR_ID1_SHIFT             21 /* Standard Messg Identifier */
+#define XCAN_IDR_ID2_SHIFT             1  /* Extended Message Identifier */
+#define XCAN_DLCR_DLC_SHIFT            28 /* Data length code */
+#define XCAN_ESR_REC_SHIFT             8  /* Rx Error Count */
+
+/* CAN frame length constants */
+#define XCAN_FRAME_MAX_DATA_LEN                8
+#define XCAN_TIMEOUT                   (1 * HZ)
+
+/**
+ * struct xcan_priv - This definition define CAN driver instance
+ * @can:                       CAN private data structure.
+ * @tx_head:                   Tx CAN packets ready to send on the queue
+ * @tx_tail:                   Tx CAN packets successfully sended on the queue
+ * @tx_max:                    Maximum number packets the driver can send
+ * @napi:                      NAPI structure
+ * @read_reg:                  For reading data from CAN registers
+ * @write_reg:                 For writing data to CAN registers
+ * @dev:                       Network device data structure
+ * @reg_base:                  Ioremapped address to registers
+ * @irq_flags:                 For request_irq()
+ * @bus_clk:                   Pointer to struct clk
+ * @can_clk:                   Pointer to struct clk
+ */
+struct xcan_priv {
+       struct can_priv can;
+       unsigned int tx_head;
+       unsigned int tx_tail;
+       unsigned int tx_max;
+       struct napi_struct napi;
+       u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg);
+       void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg,
+                       u32 val);
+       struct net_device *dev;
+       void __iomem *reg_base;
+       unsigned long irq_flags;
+       struct clk *bus_clk;
+       struct clk *can_clk;
+};
+
+/* CAN Bittiming constants as per Xilinx CAN specs */
+static const struct can_bittiming_const xcan_bittiming_const = {
+       .name = DRIVER_NAME,
+       .tseg1_min = 1,
+       .tseg1_max = 16,
+       .tseg2_min = 1,
+       .tseg2_max = 8,
+       .sjw_max = 4,
+       .brp_min = 1,
+       .brp_max = 256,
+       .brp_inc = 1,
+};
+
+/**
+ * xcan_write_reg_le - Write a value to the device register little endian
+ * @priv:      Driver private data structure
+ * @reg:       Register offset
+ * @val:       Value to write at the Register offset
+ *
+ * Write data to the paricular CAN register
+ */
+static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg,
+                       u32 val)
+{
+       iowrite32(val, priv->reg_base + reg);
+}
+
+/**
+ * xcan_read_reg_le - Read a value from the device register little endian
+ * @priv:      Driver private data structure
+ * @reg:       Register offset
+ *
+ * Read data from the particular CAN register
+ * Return: value read from the CAN register
+ */
+static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg)
+{
+       return ioread32(priv->reg_base + reg);
+}
+
+/**
+ * xcan_write_reg_be - Write a value to the device register big endian
+ * @priv:      Driver private data structure
+ * @reg:       Register offset
+ * @val:       Value to write at the Register offset
+ *
+ * Write data to the paricular CAN register
+ */
+static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg,
+                       u32 val)
+{
+       iowrite32be(val, priv->reg_base + reg);
+}
+
+/**
+ * xcan_read_reg_be - Read a value from the device register big endian
+ * @priv:      Driver private data structure
+ * @reg:       Register offset
+ *
+ * Read data from the particular CAN register
+ * Return: value read from the CAN register
+ */
+static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg)
+{
+       return ioread32be(priv->reg_base + reg);
+}
+
+/**
+ * set_reset_mode - Resets the CAN device mode
+ * @ndev:      Pointer to net_device structure
+ *
+ * This is the driver reset mode routine.The driver
+ * enters into configuration mode.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int set_reset_mode(struct net_device *ndev)
+{
+       struct xcan_priv *priv = netdev_priv(ndev);
+       unsigned long timeout;
+
+       priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
+
+       timeout = jiffies + XCAN_TIMEOUT;
+       while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK)) {
+               if (time_after(jiffies, timeout)) {
+                       netdev_warn(ndev, "timed out for config mode\n");
+                       return -ETIMEDOUT;
+               }
+               usleep_range(500, 10000);
+       }
+
+       return 0;
+}
+
+/**
+ * xcan_set_bittiming - CAN set bit timing routine
+ * @ndev:      Pointer to net_device structure
+ *
+ * This is the driver set bittiming  routine.
+ * Return: 0 on success and failure value on error
+ */
+static int xcan_set_bittiming(struct net_device *ndev)
+{
+       struct xcan_priv *priv = netdev_priv(ndev);
+       struct can_bittiming *bt = &priv->can.bittiming;
+       u32 btr0, btr1;
+       u32 is_config_mode;
+
+       /* Check whether Xilinx CAN is in configuration mode.
+        * It cannot set bit timing if Xilinx CAN is not in configuration mode.
+        */
+       is_config_mode = priv->read_reg(priv, XCAN_SR_OFFSET) &
+                               XCAN_SR_CONFIG_MASK;
+       if (!is_config_mode) {
+               netdev_alert(ndev,
+                    "BUG! Cannot set bittiming - CAN is not in config mode\n");
+               return -EPERM;
+       }
+
+       /* Setting Baud Rate prescalar value in BRPR Register */
+       btr0 = (bt->brp - 1);
+
+       /* Setting Time Segment 1 in BTR Register */
+       btr1 = (bt->prop_seg + bt->phase_seg1 - 1);
+
+       /* Setting Time Segment 2 in BTR Register */
+       btr1 |= (bt->phase_seg2 - 1) << XCAN_BTR_TS2_SHIFT;
+
+       /* Setting Synchronous jump width in BTR Register */
+       btr1 |= (bt->sjw - 1) << XCAN_BTR_SJW_SHIFT;
+
+       priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0);
+       priv->write_reg(priv, XCAN_BTR_OFFSET, btr1);
+
+       netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n",
+                       priv->read_reg(priv, XCAN_BRPR_OFFSET),
+                       priv->read_reg(priv, XCAN_BTR_OFFSET));
+
+       return 0;
+}
+
+/**
+ * xcan_chip_start - This the drivers start routine
+ * @ndev:      Pointer to net_device structure
+ *
+ * This is the drivers start routine.
+ * Based on the State of the CAN device it puts
+ * the CAN device into a proper mode.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int xcan_chip_start(struct net_device *ndev)
+{
+       struct xcan_priv *priv = netdev_priv(ndev);
+       u32 err, reg_msr, reg_sr_mask;
+       unsigned long timeout;
+
+       /* Check if it is in reset mode */
+       err = set_reset_mode(ndev);
+       if (err < 0)
+               return err;
+
+       err = xcan_set_bittiming(ndev);
+       if (err < 0)
+               return err;
+
+       /* Enable interrupts */
+       priv->write_reg(priv, XCAN_IER_OFFSET, XCAN_INTR_ALL);
+
+       /* Check whether it is loopback mode or normal mode  */
+       if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
+               reg_msr = XCAN_MSR_LBACK_MASK;
+               reg_sr_mask = XCAN_SR_LBACK_MASK;
+       } else {
+               reg_msr = 0x0;
+               reg_sr_mask = XCAN_SR_NORMAL_MASK;
+       }
+
+       priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr);
+       priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
+
+       timeout = jiffies + XCAN_TIMEOUT;
+       while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & reg_sr_mask)) {
+               if (time_after(jiffies, timeout)) {
+                       netdev_warn(ndev,
+                               "timed out for correct mode\n");
+                       return -ETIMEDOUT;
+               }
+       }
+       netdev_dbg(ndev, "status:#x%08x\n",
+                       priv->read_reg(priv, XCAN_SR_OFFSET));
+
+       priv->can.state = CAN_STATE_ERROR_ACTIVE;
+       return 0;
+}
+
+/**
+ * xcan_do_set_mode - This sets the mode of the driver
+ * @ndev:      Pointer to net_device structure
+ * @mode:      Tells the mode of the driver
+ *
+ * This check the drivers state and calls the
+ * the corresponding modes to set.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+       int ret;
+
+       switch (mode) {
+       case CAN_MODE_START:
+               ret = xcan_chip_start(ndev);
+               if (ret < 0) {
+                       netdev_err(ndev, "xcan_chip_start failed!\n");
+                       return ret;
+               }
+               netif_wake_queue(ndev);
+               break;
+       default:
+               ret = -EOPNOTSUPP;
+               break;
+       }
+
+       return ret;
+}
+
+/**
+ * xcan_start_xmit - Starts the transmission
+ * @skb:       sk_buff pointer that contains data to be Txed
+ * @ndev:      Pointer to net_device structure
+ *
+ * This function is invoked from upper layers to initiate transmission. This
+ * function uses the next available free txbuff and populates their fields to
+ * start the transmission.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+       struct xcan_priv *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &ndev->stats;
+       struct can_frame *cf = (struct can_frame *)skb->data;
+       u32 id, dlc, data[2] = {0, 0};
+
+       if (can_dropped_invalid_skb(ndev, skb))
+               return NETDEV_TX_OK;
+
+       /* Check if the TX buffer is full */
+       if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
+                       XCAN_SR_TXFLL_MASK)) {
+               netif_stop_queue(ndev);
+               netdev_err(ndev, "BUG!, TX FIFO full when queue awake!\n");
+               return NETDEV_TX_BUSY;
+       }
+
+       /* Watch carefully on the bit sequence */
+       if (cf->can_id & CAN_EFF_FLAG) {
+               /* Extended CAN ID format */
+               id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) &
+                       XCAN_IDR_ID2_MASK;
+               id |= (((cf->can_id & CAN_EFF_MASK) >>
+                       (CAN_EFF_ID_BITS-CAN_SFF_ID_BITS)) <<
+                       XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK;
+
+               /* The substibute remote TX request bit should be "1"
+                * for extended frames as in the Xilinx CAN datasheet
+                */
+               id |= XCAN_IDR_IDE_MASK | XCAN_IDR_SRR_MASK;
+
+               if (cf->can_id & CAN_RTR_FLAG)
+                       /* Extended frames remote TX request */
+                       id |= XCAN_IDR_RTR_MASK;
+       } else {
+               /* Standard CAN ID format */
+               id = ((cf->can_id & CAN_SFF_MASK) << XCAN_IDR_ID1_SHIFT) &
+                       XCAN_IDR_ID1_MASK;
+
+               if (cf->can_id & CAN_RTR_FLAG)
+                       /* Standard frames remote TX request */
+                       id |= XCAN_IDR_SRR_MASK;
+       }
+
+       dlc = cf->can_dlc << XCAN_DLCR_DLC_SHIFT;
+
+       if (cf->can_dlc > 0)
+               data[0] = be32_to_cpup((__be32 *)(cf->data + 0));
+       if (cf->can_dlc > 4)
+               data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
+
+       can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
+       priv->tx_head++;
+
+       /* Write the Frame to Xilinx CAN TX FIFO */
+       priv->write_reg(priv, XCAN_TXFIFO_ID_OFFSET, id);
+       /* If the CAN frame is RTR frame this write triggers tranmission */
+       priv->write_reg(priv, XCAN_TXFIFO_DLC_OFFSET, dlc);
+       if (!(cf->can_id & CAN_RTR_FLAG)) {
+               priv->write_reg(priv, XCAN_TXFIFO_DW1_OFFSET, data[0]);
+               /* If the CAN frame is Standard/Extended frame this
+                * write triggers tranmission
+                */
+               priv->write_reg(priv, XCAN_TXFIFO_DW2_OFFSET, data[1]);
+               stats->tx_bytes += cf->can_dlc;
+       }
+
+       /* Check if the TX buffer is full */
+       if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
+               netif_stop_queue(ndev);
+
+       return NETDEV_TX_OK;
+}
+
+/**
+ * xcan_rx -  Is called from CAN isr to complete the received
+ *             frame  processing
+ * @ndev:      Pointer to net_device structure
+ *
+ * This function is invoked from the CAN isr(poll) to process the Rx frames. It
+ * does minimal processing and invokes "netif_receive_skb" to complete further
+ * processing.
+ * Return: 1 on success and 0 on failure.
+ */
+static int xcan_rx(struct net_device *ndev)
+{
+       struct xcan_priv *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &ndev->stats;
+       struct can_frame *cf;
+       struct sk_buff *skb;
+       u32 id_xcan, dlc, data[2] = {0, 0};
+
+       skb = alloc_can_skb(ndev, &cf);
+       if (unlikely(!skb)) {
+               stats->rx_dropped++;
+               return 0;
+       }
+
+       /* Read a frame from Xilinx zynq CANPS */
+       id_xcan = priv->read_reg(priv, XCAN_RXFIFO_ID_OFFSET);
+       dlc = priv->read_reg(priv, XCAN_RXFIFO_DLC_OFFSET) >>
+                               XCAN_DLCR_DLC_SHIFT;
+
+       /* Change Xilinx CAN data length format to socketCAN data format */
+       cf->can_dlc = get_can_dlc(dlc);
+
+       /* Change Xilinx CAN ID format to socketCAN ID format */
+       if (id_xcan & XCAN_IDR_IDE_MASK) {
+               /* The received frame is an Extended format frame */
+               cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
+               cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
+                               XCAN_IDR_ID2_SHIFT;
+               cf->can_id |= CAN_EFF_FLAG;
+               if (id_xcan & XCAN_IDR_RTR_MASK)
+                       cf->can_id |= CAN_RTR_FLAG;
+       } else {
+               /* The received frame is a standard format frame */
+               cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
+                               XCAN_IDR_ID1_SHIFT;
+               if (id_xcan & XCAN_IDR_SRR_MASK)
+                       cf->can_id |= CAN_RTR_FLAG;
+       }
+
+       if (!(id_xcan & XCAN_IDR_SRR_MASK)) {
+               data[0] = priv->read_reg(priv, XCAN_RXFIFO_DW1_OFFSET);
+               data[1] = priv->read_reg(priv, XCAN_RXFIFO_DW2_OFFSET);
+
+               /* Change Xilinx CAN data format to socketCAN data format */
+               if (cf->can_dlc > 0)
+                       *(__be32 *)(cf->data) = cpu_to_be32(data[0]);
+               if (cf->can_dlc > 4)
+                       *(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]);
+       }
+
+       stats->rx_bytes += cf->can_dlc;
+       stats->rx_packets++;
+       netif_receive_skb(skb);
+
+       return 1;
+}
+
+/**
+ * xcan_err_interrupt - error frame Isr
+ * @ndev:      net_device pointer
+ * @isr:       interrupt status register value
+ *
+ * This is the CAN error interrupt and it will
+ * check the the type of error and forward the error
+ * frame to upper layers.
+ */
+static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
+{
+       struct xcan_priv *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &ndev->stats;
+       struct can_frame *cf;
+       struct sk_buff *skb;
+       u32 err_status, status, txerr = 0, rxerr = 0;
+
+       skb = alloc_can_err_skb(ndev, &cf);
+
+       err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
+       priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
+       txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
+       rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
+                       XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
+       status = priv->read_reg(priv, XCAN_SR_OFFSET);
+
+       if (isr & XCAN_IXR_BSOFF_MASK) {
+               priv->can.state = CAN_STATE_BUS_OFF;
+               priv->can.can_stats.bus_off++;
+               /* Leave device in Config Mode in bus-off state */
+               priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
+               can_bus_off(ndev);
+               if (skb)
+                       cf->can_id |= CAN_ERR_BUSOFF;
+       } else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) {
+               priv->can.state = CAN_STATE_ERROR_PASSIVE;
+               priv->can.can_stats.error_passive++;
+               if (skb) {
+                       cf->can_id |= CAN_ERR_CRTL;
+                       cf->data[1] = (rxerr > 127) ?
+                                       CAN_ERR_CRTL_RX_PASSIVE :
+                                       CAN_ERR_CRTL_TX_PASSIVE;
+                       cf->data[6] = txerr;
+                       cf->data[7] = rxerr;
+               }
+       } else if (status & XCAN_SR_ERRWRN_MASK) {
+               priv->can.state = CAN_STATE_ERROR_WARNING;
+               priv->can.can_stats.error_warning++;
+               if (skb) {
+                       cf->can_id |= CAN_ERR_CRTL;
+                       cf->data[1] |= (txerr > rxerr) ?
+                                       CAN_ERR_CRTL_TX_WARNING :
+                                       CAN_ERR_CRTL_RX_WARNING;
+                       cf->data[6] = txerr;
+                       cf->data[7] = rxerr;
+               }
+       }
+
+       /* Check for Arbitration lost interrupt */
+       if (isr & XCAN_IXR_ARBLST_MASK) {
+               priv->can.can_stats.arbitration_lost++;
+               if (skb) {
+                       cf->can_id |= CAN_ERR_LOSTARB;
+                       cf->data[0] = CAN_ERR_LOSTARB_UNSPEC;
+               }
+       }
+
+       /* Check for RX FIFO Overflow interrupt */
+       if (isr & XCAN_IXR_RXOFLW_MASK) {
+               stats->rx_over_errors++;
+               stats->rx_errors++;
+               priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
+               if (skb) {
+                       cf->can_id |= CAN_ERR_CRTL;
+                       cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
+               }
+       }
+
+       /* Check for error interrupt */
+       if (isr & XCAN_IXR_ERROR_MASK) {
+               if (skb) {
+                       cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+                       cf->data[2] |= CAN_ERR_PROT_UNSPEC;
+               }
+
+               /* Check for Ack error interrupt */
+               if (err_status & XCAN_ESR_ACKER_MASK) {
+                       stats->tx_errors++;
+                       if (skb) {
+                               cf->can_id |= CAN_ERR_ACK;
+                               cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
+                       }
+               }
+
+               /* Check for Bit error interrupt */
+               if (err_status & XCAN_ESR_BERR_MASK) {
+                       stats->tx_errors++;
+                       if (skb) {
+                               cf->can_id |= CAN_ERR_PROT;
+                               cf->data[2] = CAN_ERR_PROT_BIT;
+                       }
+               }
+
+               /* Check for Stuff error interrupt */
+               if (err_status & XCAN_ESR_STER_MASK) {
+                       stats->rx_errors++;
+                       if (skb) {
+                               cf->can_id |= CAN_ERR_PROT;
+                               cf->data[2] = CAN_ERR_PROT_STUFF;
+                       }
+               }
+
+               /* Check for Form error interrupt */
+               if (err_status & XCAN_ESR_FMER_MASK) {
+                       stats->rx_errors++;
+                       if (skb) {
+                               cf->can_id |= CAN_ERR_PROT;
+                               cf->data[2] = CAN_ERR_PROT_FORM;
+                       }
+               }
+
+               /* Check for CRC error interrupt */
+               if (err_status & XCAN_ESR_CRCER_MASK) {
+                       stats->rx_errors++;
+                       if (skb) {
+                               cf->can_id |= CAN_ERR_PROT;
+                               cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ |
+                                               CAN_ERR_PROT_LOC_CRC_DEL;
+                       }
+               }
+                       priv->can.can_stats.bus_error++;
+       }
+
+       if (skb) {
+               stats->rx_packets++;
+               stats->rx_bytes += cf->can_dlc;
+               netif_rx(skb);
+       }
+
+       netdev_dbg(ndev, "%s: error status register:0x%x\n",
+                       __func__, priv->read_reg(priv, XCAN_ESR_OFFSET));
+}
+
+/**
+ * xcan_state_interrupt - It will check the state of the CAN device
+ * @ndev:      net_device pointer
+ * @isr:       interrupt status register value
+ *
+ * This will checks the state of the CAN device
+ * and puts the device into appropriate state.
+ */
+static void xcan_state_interrupt(struct net_device *ndev, u32 isr)
+{
+       struct xcan_priv *priv = netdev_priv(ndev);
+
+       /* Check for Sleep interrupt if set put CAN device in sleep state */
+       if (isr & XCAN_IXR_SLP_MASK)
+               priv->can.state = CAN_STATE_SLEEPING;
+
+       /* Check for Wake up interrupt if set put CAN device in Active state */
+       if (isr & XCAN_IXR_WKUP_MASK)
+               priv->can.state = CAN_STATE_ERROR_ACTIVE;
+}
+
+/**
+ * xcan_rx_poll - Poll routine for rx packets (NAPI)
+ * @napi:      napi structure pointer
+ * @quota:     Max number of rx packets to be processed.
+ *
+ * This is the poll routine for rx part.
+ * It will process the packets maximux quota value.
+ *
+ * Return: number of packets received
+ */
+static int xcan_rx_poll(struct napi_struct *napi, int quota)
+{
+       struct net_device *ndev = napi->dev;
+       struct xcan_priv *priv = netdev_priv(ndev);
+       u32 isr, ier;
+       int work_done = 0;
+
+       isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
+       while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) {
+               if (isr & XCAN_IXR_RXOK_MASK) {
+                       priv->write_reg(priv, XCAN_ICR_OFFSET,
+                               XCAN_IXR_RXOK_MASK);
+                       work_done += xcan_rx(ndev);
+               } else {
+                       priv->write_reg(priv, XCAN_ICR_OFFSET,
+                               XCAN_IXR_RXNEMP_MASK);
+                       break;
+               }
+               priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK);
+               isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
+       }
+
+       if (work_done)
+               can_led_event(ndev, CAN_LED_EVENT_RX);
+
+       if (work_done < quota) {
+               napi_complete(napi);
+               ier = priv->read_reg(priv, XCAN_IER_OFFSET);
+               ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK);
+               priv->write_reg(priv, XCAN_IER_OFFSET, ier);
+       }
+       return work_done;
+}
+
+/**
+ * xcan_tx_interrupt - Tx Done Isr
+ * @ndev:      net_device pointer
+ * @isr:       Interrupt status register value
+ */
+static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
+{
+       struct xcan_priv *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &ndev->stats;
+
+       while ((priv->tx_head - priv->tx_tail > 0) &&
+                       (isr & XCAN_IXR_TXOK_MASK)) {
+               priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
+               can_get_echo_skb(ndev, priv->tx_tail %
+                                       priv->tx_max);
+               priv->tx_tail++;
+               stats->tx_packets++;
+               isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
+       }
+       can_led_event(ndev, CAN_LED_EVENT_TX);
+       netif_wake_queue(ndev);
+}
+
+/**
+ * xcan_interrupt - CAN Isr
+ * @irq:       irq number
+ * @dev_id:    device id poniter
+ *
+ * This is the xilinx CAN Isr. It checks for the type of interrupt
+ * and invokes the corresponding ISR.
+ *
+ * Return:
+ * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise
+ */
+static irqreturn_t xcan_interrupt(int irq, void *dev_id)
+{
+       struct net_device *ndev = (struct net_device *)dev_id;
+       struct xcan_priv *priv = netdev_priv(ndev);
+       u32 isr, ier;
+
+       /* Get the interrupt status from Xilinx CAN */
+       isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
+       if (!isr)
+               return IRQ_NONE;
+
+       /* Check for the type of interrupt and Processing it */
+       if (isr & (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)) {
+               priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_SLP_MASK |
+                               XCAN_IXR_WKUP_MASK));
+               xcan_state_interrupt(ndev, isr);
+       }
+
+       /* Check for Tx interrupt and Processing it */
+       if (isr & XCAN_IXR_TXOK_MASK)
+               xcan_tx_interrupt(ndev, isr);
+
+       /* Check for the type of error interrupt and Processing it */
+       if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
+                       XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) {
+               priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK |
+                               XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK |
+                               XCAN_IXR_ARBLST_MASK));
+               xcan_err_interrupt(ndev, isr);
+       }
+
+       /* Check for the type of receive interrupt and Processing it */
+       if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) {
+               ier = priv->read_reg(priv, XCAN_IER_OFFSET);
+               ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK);
+               priv->write_reg(priv, XCAN_IER_OFFSET, ier);
+               napi_schedule(&priv->napi);
+       }
+       return IRQ_HANDLED;
+}
+
+/**
+ * xcan_chip_stop - Driver stop routine
+ * @ndev:      Pointer to net_device structure
+ *
+ * This is the drivers stop routine. It will disable the
+ * interrupts and put the device into configuration mode.
+ */
+static void xcan_chip_stop(struct net_device *ndev)
+{
+       struct xcan_priv *priv = netdev_priv(ndev);
+       u32 ier;
+
+       /* Disable interrupts and leave the can in configuration mode */
+       ier = priv->read_reg(priv, XCAN_IER_OFFSET);
+       ier &= ~XCAN_INTR_ALL;
+       priv->write_reg(priv, XCAN_IER_OFFSET, ier);
+       priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
+       priv->can.state = CAN_STATE_STOPPED;
+}
+
+/**
+ * xcan_open - Driver open routine
+ * @ndev:      Pointer to net_device structure
+ *
+ * This is the driver open routine.
+ * Return: 0 on success and failure value on error
+ */
+static int xcan_open(struct net_device *ndev)
+{
+       struct xcan_priv *priv = netdev_priv(ndev);
+       int ret;
+
+       ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags,
+                       ndev->name, ndev);
+       if (ret < 0) {
+               netdev_err(ndev, "irq allocation for CAN failed\n");
+               goto err;
+       }
+
+       ret = clk_prepare_enable(priv->can_clk);
+       if (ret) {
+               netdev_err(ndev, "unable to enable device clock\n");
+               goto err_irq;
+       }
+
+       ret = clk_prepare_enable(priv->bus_clk);
+       if (ret) {
+               netdev_err(ndev, "unable to enable bus clock\n");
+               goto err_can_clk;
+       }
+
+       /* Set chip into reset mode */
+       ret = set_reset_mode(ndev);
+       if (ret < 0) {
+               netdev_err(ndev, "mode resetting failed!\n");
+               goto err_bus_clk;
+       }
+
+       /* Common open */
+       ret = open_candev(ndev);
+       if (ret)
+               goto err_bus_clk;
+
+       ret = xcan_chip_start(ndev);
+       if (ret < 0) {
+               netdev_err(ndev, "xcan_chip_start failed!\n");
+               goto err_candev;
+       }
+
+       can_led_event(ndev, CAN_LED_EVENT_OPEN);
+       napi_enable(&priv->napi);
+       netif_start_queue(ndev);
+
+       return 0;
+
+err_candev:
+       close_candev(ndev);
+err_bus_clk:
+       clk_disable_unprepare(priv->bus_clk);
+err_can_clk:
+       clk_disable_unprepare(priv->can_clk);
+err_irq:
+       free_irq(ndev->irq, ndev);
+err:
+       return ret;
+}
+
+/**
+ * xcan_close - Driver close routine
+ * @ndev:      Pointer to net_device structure
+ *
+ * Return: 0 always
+ */
+static int xcan_close(struct net_device *ndev)
+{
+       struct xcan_priv *priv = netdev_priv(ndev);
+
+       netif_stop_queue(ndev);
+       napi_disable(&priv->napi);
+       xcan_chip_stop(ndev);
+       clk_disable_unprepare(priv->bus_clk);
+       clk_disable_unprepare(priv->can_clk);
+       free_irq(ndev->irq, ndev);
+       close_candev(ndev);
+
+       can_led_event(ndev, CAN_LED_EVENT_STOP);
+
+       return 0;
+}
+
+/**
+ * xcan_get_berr_counter - error counter routine
+ * @ndev:      Pointer to net_device structure
+ * @bec:       Pointer to can_berr_counter structure
+ *
+ * This is the driver error counter routine.
+ * Return: 0 on success and failure value on error
+ */
+static int xcan_get_berr_counter(const struct net_device *ndev,
+                                       struct can_berr_counter *bec)
+{
+       struct xcan_priv *priv = netdev_priv(ndev);
+       int ret;
+
+       ret = clk_prepare_enable(priv->can_clk);
+       if (ret)
+               goto err;
+
+       ret = clk_prepare_enable(priv->bus_clk);
+       if (ret)
+               goto err_clk;
+
+       bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
+       bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
+                       XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
+
+       clk_disable_unprepare(priv->bus_clk);
+       clk_disable_unprepare(priv->can_clk);
+
+       return 0;
+
+err_clk:
+       clk_disable_unprepare(priv->can_clk);
+err:
+       return ret;
+}
+
+
+static const struct net_device_ops xcan_netdev_ops = {
+       .ndo_open       = xcan_open,
+       .ndo_stop       = xcan_close,
+       .ndo_start_xmit = xcan_start_xmit,
+};
+
+/**
+ * xcan_suspend - Suspend method for the driver
+ * @dev:       Address of the platform_device structure
+ *
+ * Put the driver into low power mode.
+ * Return: 0 always
+ */
+static int __maybe_unused xcan_suspend(struct device *dev)
+{
+       struct platform_device *pdev = dev_get_drvdata(dev);
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct xcan_priv *priv = netdev_priv(ndev);
+
+       if (netif_running(ndev)) {
+               netif_stop_queue(ndev);
+               netif_device_detach(ndev);
+       }
+
+       priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK);
+       priv->can.state = CAN_STATE_SLEEPING;
+
+       clk_disable(priv->bus_clk);
+       clk_disable(priv->can_clk);
+
+       return 0;
+}
+
+/**
+ * xcan_resume - Resume from suspend
+ * @dev:       Address of the platformdevice structure
+ *
+ * Resume operation after suspend.
+ * Return: 0 on success and failure value on error
+ */
+static int __maybe_unused xcan_resume(struct device *dev)
+{
+       struct platform_device *pdev = dev_get_drvdata(dev);
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct xcan_priv *priv = netdev_priv(ndev);
+       int ret;
+
+       ret = clk_enable(priv->bus_clk);
+       if (ret) {
+               dev_err(dev, "Cannot enable clock.\n");
+               return ret;
+       }
+       ret = clk_enable(priv->can_clk);
+       if (ret) {
+               dev_err(dev, "Cannot enable clock.\n");
+               clk_disable_unprepare(priv->bus_clk);
+               return ret;
+       }
+
+       priv->write_reg(priv, XCAN_MSR_OFFSET, 0);
+       priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
+       priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+       if (netif_running(ndev)) {
+               netif_device_attach(ndev);
+               netif_start_queue(ndev);
+       }
+
+       return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(xcan_dev_pm_ops, xcan_suspend, xcan_resume);
+
+/**
+ * xcan_probe - Platform registration call
+ * @pdev:      Handle to the platform device structure
+ *
+ * This function does all the memory allocation and registration for the CAN
+ * device.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int xcan_probe(struct platform_device *pdev)
+{
+       struct resource *res; /* IO mem resources */
+       struct net_device *ndev;
+       struct xcan_priv *priv;
+       void __iomem *addr;
+       int ret, rx_max, tx_max;
+
+       /* Get the virtual base address for the device */
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       addr = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(addr)) {
+               ret = PTR_ERR(addr);
+               goto err;
+       }
+
+       ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max);
+       if (ret < 0)
+               goto err;
+
+       ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth", &rx_max);
+       if (ret < 0)
+               goto err;
+
+       /* Create a CAN device instance */
+       ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
+       if (!ndev)
+               return -ENOMEM;
+
+       priv = netdev_priv(ndev);
+       priv->dev = ndev;
+       priv->can.bittiming_const = &xcan_bittiming_const;
+       priv->can.do_set_mode = xcan_do_set_mode;
+       priv->can.do_get_berr_counter = xcan_get_berr_counter;
+       priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+                                       CAN_CTRLMODE_BERR_REPORTING;
+       priv->reg_base = addr;
+       priv->tx_max = tx_max;
+
+       /* Get IRQ for the device */
+       ndev->irq = platform_get_irq(pdev, 0);
+       ndev->flags |= IFF_ECHO;        /* We support local echo */
+
+       platform_set_drvdata(pdev, ndev);
+       SET_NETDEV_DEV(ndev, &pdev->dev);
+       ndev->netdev_ops = &xcan_netdev_ops;
+
+       /* Getting the CAN can_clk info */
+       priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
+       if (IS_ERR(priv->can_clk)) {
+               dev_err(&pdev->dev, "Device clock not found.\n");
+               ret = PTR_ERR(priv->can_clk);
+               goto err_free;
+       }
+       /* Check for type of CAN device */
+       if (of_device_is_compatible(pdev->dev.of_node,
+                                   "xlnx,zynq-can-1.0")) {
+               priv->bus_clk = devm_clk_get(&pdev->dev, "pclk");
+               if (IS_ERR(priv->bus_clk)) {
+                       dev_err(&pdev->dev, "bus clock not found\n");
+                       ret = PTR_ERR(priv->bus_clk);
+                       goto err_free;
+               }
+       } else {
+               priv->bus_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
+               if (IS_ERR(priv->bus_clk)) {
+                       dev_err(&pdev->dev, "bus clock not found\n");
+                       ret = PTR_ERR(priv->bus_clk);
+                       goto err_free;
+               }
+       }
+
+       ret = clk_prepare_enable(priv->can_clk);
+       if (ret) {
+               dev_err(&pdev->dev, "unable to enable device clock\n");
+               goto err_free;
+       }
+
+       ret = clk_prepare_enable(priv->bus_clk);
+       if (ret) {
+               dev_err(&pdev->dev, "unable to enable bus clock\n");
+               goto err_unprepare_disable_dev;
+       }
+
+       priv->write_reg = xcan_write_reg_le;
+       priv->read_reg = xcan_read_reg_le;
+
+       if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) {
+               priv->write_reg = xcan_write_reg_be;
+               priv->read_reg = xcan_read_reg_be;
+       }
+
+       priv->can.clock.freq = clk_get_rate(priv->can_clk);
+
+       netif_napi_add(ndev, &priv->napi, xcan_rx_poll, rx_max);
+
+       ret = register_candev(ndev);
+       if (ret) {
+               dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret);
+               goto err_unprepare_disable_busclk;
+       }
+
+       devm_can_led_init(ndev);
+       clk_disable_unprepare(priv->bus_clk);
+       clk_disable_unprepare(priv->can_clk);
+       netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n",
+                       priv->reg_base, ndev->irq, priv->can.clock.freq,
+                       priv->tx_max);
+
+       return 0;
+
+err_unprepare_disable_busclk:
+       clk_disable_unprepare(priv->bus_clk);
+err_unprepare_disable_dev:
+       clk_disable_unprepare(priv->can_clk);
+err_free:
+       free_candev(ndev);
+err:
+       return ret;
+}
+
+/**
+ * xcan_remove - Unregister the device after releasing the resources
+ * @pdev:      Handle to the platform device structure
+ *
+ * This function frees all the resources allocated to the device.
+ * Return: 0 always
+ */
+static int xcan_remove(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct xcan_priv *priv = netdev_priv(ndev);
+
+       if (set_reset_mode(ndev) < 0)
+               netdev_err(ndev, "mode resetting failed!\n");
+
+       unregister_candev(ndev);
+       netif_napi_del(&priv->napi);
+       free_candev(ndev);
+
+       return 0;
+}
+
+/* Match table for OF platform binding */
+static struct of_device_id xcan_of_match[] = {
+       { .compatible = "xlnx,zynq-can-1.0", },
+       { .compatible = "xlnx,axi-can-1.00.a", },
+       { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, xcan_of_match);
+
+static struct platform_driver xcan_driver = {
+       .probe = xcan_probe,
+       .remove = xcan_remove,
+       .driver = {
+               .owner = THIS_MODULE,
+               .name = DRIVER_NAME,
+               .pm = &xcan_dev_pm_ops,
+               .of_match_table = xcan_of_match,
+       },
+};
+
+module_platform_driver(xcan_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Xilinx Inc");
+MODULE_DESCRIPTION("Xilinx CAN interface");
index 41ee5b6ae91751239d81e4613f336aff02fbff84..69c42513dd724bb06ef40eaf4b5e4bbea800936d 100644 (file)
@@ -289,7 +289,7 @@ static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p)
 
 static int mv88e6123_61_65_setup(struct dsa_switch *ds)
 {
-       struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int i;
        int ret;
 
index dadfafba64e9ac0aad3de55d84e5bdea3ceb00a0..953bc6a49e594471342270e7281a8e263f0086de 100644 (file)
@@ -155,7 +155,7 @@ static int mv88e6131_setup_global(struct dsa_switch *ds)
 
 static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
 {
-       struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int addr = REG_PORT(p);
        u16 val;
 
@@ -274,7 +274,7 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
 
 static int mv88e6131_setup(struct dsa_switch *ds)
 {
-       struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int i;
        int ret;
 
index 17314ed9456d32c961aea31db10a650a2ffb1392..9ce2146346b6cda7175229d0f493f004730ab1b1 100644 (file)
@@ -74,7 +74,7 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
 
 int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
 {
-       struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int ret;
 
        mutex_lock(&ps->smi_mutex);
@@ -118,7 +118,7 @@ int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
 
 int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
 {
-       struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int ret;
 
        mutex_lock(&ps->smi_mutex);
@@ -256,7 +256,7 @@ static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
 
 static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
 {
-       struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int ret;
 
        mutex_lock(&ps->ppu_mutex);
@@ -283,7 +283,7 @@ static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
 
 static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
 {
-       struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 
        /* Schedule a timer to re-enable the PHY polling unit. */
        mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
@@ -292,7 +292,7 @@ static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
 
 void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
 {
-       struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 
        mutex_init(&ps->ppu_mutex);
        INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
@@ -463,7 +463,7 @@ void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
                                 int nr_stats, struct mv88e6xxx_hw_stat *stats,
                                 int port, uint64_t *data)
 {
-       struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int ret;
        int i;
 
index 35df0b9e6848b0f1bd964f7b93ed2f18988c6b08..a968654b631d28a860dcd83b8c9b728189c196b2 100644 (file)
@@ -534,7 +534,7 @@ static int el3_common_init(struct net_device *dev)
        /* The EL3-specific entries in the device structure. */
        dev->netdev_ops = &netdev_ops;
        dev->watchdog_timeo = TX_TIMEOUT;
-       SET_ETHTOOL_OPS(dev, &ethtool_ops);
+       dev->ethtool_ops = &ethtool_ops;
 
        err = register_netdev(dev);
        if (err) {
index 063557e037f21b8b43fa3d1dfe64a19c870d4789..f18647c2355990ba0e55b6a2690c0cdbb819da43 100644 (file)
@@ -218,7 +218,7 @@ static int tc589_probe(struct pcmcia_device *link)
        dev->netdev_ops = &el3_netdev_ops;
        dev->watchdog_timeo = TX_TIMEOUT;
 
-       SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+       dev->ethtool_ops = &netdev_ethtool_ops;
 
        return tc589_config(link);
 }
index 465cc7108d8a5e5bbb2c34df44f7bd0329d02d6b..e13b04624dedcecb7285943787946f2dfadd610c 100644 (file)
@@ -2435,7 +2435,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
        dev->watchdog_timeo     = TX_TIMEOUT;
 
-       SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
+       dev->ethtool_ops = &typhoon_ethtool_ops;
 
        /* We can handle scatter gather, up to 16 entries, and
         * we can do IP checksumming (only version 4, doh...)
index 455d4c399b52168ce6f8691160991e500216ff54..1d162ccb473341af256279b29d466bc56429472b 100644 (file)
@@ -157,7 +157,7 @@ static void ax_reset_8390(struct net_device *dev)
 
        /* This check _should_not_ be necessary, omit eventually. */
        while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) {
-               if (jiffies - reset_start_time > 2 * HZ / 100) {
+               if (time_after(jiffies, reset_start_time + 2 * HZ / 100)) {
                        netdev_warn(dev, "%s: did not complete.\n", __func__);
                        break;
                }
@@ -293,7 +293,7 @@ static void ax_block_output(struct net_device *dev, int count,
        dma_start = jiffies;
 
        while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) {
-               if (jiffies - dma_start > 2 * HZ / 100) {               /* 20ms */
+               if (time_after(jiffies, dma_start + 2 * HZ / 100)) { /* 20ms */
                        netdev_warn(dev, "timeout waiting for Tx RDC.\n");
                        ax_reset_8390(dev);
                        ax_NS8390_init(dev, 1);
index 39b26fe28d1051ff916faceb747da7a64dac711f..d7401017a3f10940f3a662bebc555d835be3ce4b 100644 (file)
@@ -35,6 +35,18 @@ source "drivers/net/ethernet/calxeda/Kconfig"
 source "drivers/net/ethernet/chelsio/Kconfig"
 source "drivers/net/ethernet/cirrus/Kconfig"
 source "drivers/net/ethernet/cisco/Kconfig"
+
+config CX_ECAT
+       tristate "Beckhoff CX5020 EtherCAT master support"
+       depends on PCI
+       ---help---
+         Driver for EtherCAT master module located on CCAT FPGA
+         that can be found on Beckhoff CX5020, and possibly other of CX
+         Beckhoff CX series industrial PCs.
+
+         To compile this driver as a module, choose M here. The module
+         will be called ec_bhf.
+
 source "drivers/net/ethernet/davicom/Kconfig"
 
 config DNET
index 545d0b3b9cb422b2fefa7122b074cd869a9085c2..35190e36c4568e6803279f878a6aa866685ca1be 100644 (file)
@@ -21,6 +21,7 @@ obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
 obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
 obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
 obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
+obj-$(CONFIG_CX_ECAT) += ec_bhf.o
 obj-$(CONFIG_DM9000) += davicom/
 obj-$(CONFIG_DNET) += dnet.o
 obj-$(CONFIG_NET_VENDOR_DEC) += dec/
index 171d73c1d3c22de3209ca6c48b7978c35b2a38b0..40dbbf740331c49b13ac0d64ef977be6ce361993 100644 (file)
@@ -784,7 +784,7 @@ static int starfire_init_one(struct pci_dev *pdev,
 
        dev->netdev_ops = &netdev_ops;
        dev->watchdog_timeo = TX_TIMEOUT;
-       SET_ETHTOOL_OPS(dev, &ethtool_ops);
+       dev->ethtool_ops = &ethtool_ops;
 
        netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work);
 
index 1517e9df5ba16c66f6f63bc10f8a0e1b460a95a3..9a6991be9749f75b4107d979cd31518cda837ec0 100644 (file)
@@ -476,7 +476,7 @@ static int acenic_probe_one(struct pci_dev *pdev,
        dev->watchdog_timeo = 5*HZ;
 
        dev->netdev_ops = &ace_netdev_ops;
-       SET_ETHTOOL_OPS(dev, &ace_ethtool_ops);
+       dev->ethtool_ops = &ace_ethtool_ops;
 
        /* we only display this string ONCE */
        if (!boards_found)
index 80c1ab74a4b8f36483ebe710aa56ffb3879d52bc..fdddba51473efce74edbf9c0befae1ebdfbe1824 100644 (file)
@@ -1,5 +1,6 @@
 config ALTERA_TSE
        tristate "Altera Triple-Speed Ethernet MAC support"
+       depends on HAS_DMA
        select PHYLIB
        ---help---
          This driver supports the Altera Triple-Speed (TSE) Ethernet MAC.
index d4a187e453698bbe96589921f97d5ea38bc3d5fb..3eff2fd3997e36ef128983fcaf080f595a693715 100644 (file)
@@ -5,3 +5,4 @@
 obj-$(CONFIG_ALTERA_TSE) += altera_tse.o
 altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \
 altera_msgdma.o altera_sgdma.o altera_utils.o
+ccflags-y += -D__CHECK_ENDIAN__
index 3df18669ea306580994cb877044a000aa4b7e6f9..0fb986ba32905a5dac78c926333c9765ec4678de 100644 (file)
@@ -18,6 +18,7 @@
 #include "altera_utils.h"
 #include "altera_tse.h"
 #include "altera_msgdmahw.h"
+#include "altera_msgdma.h"
 
 /* No initialization work to do for MSGDMA */
 int msgdma_initialize(struct altera_tse_private *priv)
@@ -29,21 +30,23 @@ void msgdma_uninitialize(struct altera_tse_private *priv)
 {
 }
 
+void msgdma_start_rxdma(struct altera_tse_private *priv)
+{
+}
+
 void msgdma_reset(struct altera_tse_private *priv)
 {
        int counter;
-       struct msgdma_csr *txcsr =
-               (struct msgdma_csr *)priv->tx_dma_csr;
-       struct msgdma_csr *rxcsr =
-               (struct msgdma_csr *)priv->rx_dma_csr;
 
        /* Reset Rx mSGDMA */
-       iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
-       iowrite32(MSGDMA_CSR_CTL_RESET, &rxcsr->control);
+       csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr,
+               msgdma_csroffs(status));
+       csrwr32(MSGDMA_CSR_CTL_RESET, priv->rx_dma_csr,
+               msgdma_csroffs(control));
 
        counter = 0;
        while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
-               if (tse_bit_is_clear(&rxcsr->status,
+               if (tse_bit_is_clear(priv->rx_dma_csr, msgdma_csroffs(status),
                                     MSGDMA_CSR_STAT_RESETTING))
                        break;
                udelay(1);
@@ -54,15 +57,18 @@ void msgdma_reset(struct altera_tse_private *priv)
                           "TSE Rx mSGDMA resetting bit never cleared!\n");
 
        /* clear all status bits */
-       iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
+       csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr, msgdma_csroffs(status));
 
        /* Reset Tx mSGDMA */
-       iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status);
-       iowrite32(MSGDMA_CSR_CTL_RESET, &txcsr->control);
+       csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr,
+               msgdma_csroffs(status));
+
+       csrwr32(MSGDMA_CSR_CTL_RESET, priv->tx_dma_csr,
+               msgdma_csroffs(control));
 
        counter = 0;
        while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
-               if (tse_bit_is_clear(&txcsr->status,
+               if (tse_bit_is_clear(priv->tx_dma_csr, msgdma_csroffs(status),
                                     MSGDMA_CSR_STAT_RESETTING))
                        break;
                udelay(1);
@@ -73,58 +79,58 @@ void msgdma_reset(struct altera_tse_private *priv)
                           "TSE Tx mSGDMA resetting bit never cleared!\n");
 
        /* clear all status bits */
-       iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status);
+       csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr, msgdma_csroffs(status));
 }
 
 void msgdma_disable_rxirq(struct altera_tse_private *priv)
 {
-       struct msgdma_csr *csr = priv->rx_dma_csr;
-       tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+       tse_clear_bit(priv->rx_dma_csr, msgdma_csroffs(control),
+                     MSGDMA_CSR_CTL_GLOBAL_INTR);
 }
 
 void msgdma_enable_rxirq(struct altera_tse_private *priv)
 {
-       struct msgdma_csr *csr = priv->rx_dma_csr;
-       tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+       tse_set_bit(priv->rx_dma_csr, msgdma_csroffs(control),
+                   MSGDMA_CSR_CTL_GLOBAL_INTR);
 }
 
 void msgdma_disable_txirq(struct altera_tse_private *priv)
 {
-       struct msgdma_csr *csr = priv->tx_dma_csr;
-       tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+       tse_clear_bit(priv->tx_dma_csr, msgdma_csroffs(control),
+                     MSGDMA_CSR_CTL_GLOBAL_INTR);
 }
 
 void msgdma_enable_txirq(struct altera_tse_private *priv)
 {
-       struct msgdma_csr *csr = priv->tx_dma_csr;
-       tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+       tse_set_bit(priv->tx_dma_csr, msgdma_csroffs(control),
+                   MSGDMA_CSR_CTL_GLOBAL_INTR);
 }
 
 void msgdma_clear_rxirq(struct altera_tse_private *priv)
 {
-       struct msgdma_csr *csr = priv->rx_dma_csr;
-       iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
+       csrwr32(MSGDMA_CSR_STAT_IRQ, priv->rx_dma_csr, msgdma_csroffs(status));
 }
 
 void msgdma_clear_txirq(struct altera_tse_private *priv)
 {
-       struct msgdma_csr *csr = priv->tx_dma_csr;
-       iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
+       csrwr32(MSGDMA_CSR_STAT_IRQ, priv->tx_dma_csr, msgdma_csroffs(status));
 }
 
 /* return 0 to indicate transmit is pending */
 int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
 {
-       struct msgdma_extended_desc *desc = priv->tx_dma_desc;
-
-       iowrite32(lower_32_bits(buffer->dma_addr), &desc->read_addr_lo);
-       iowrite32(upper_32_bits(buffer->dma_addr), &desc->read_addr_hi);
-       iowrite32(0, &desc->write_addr_lo);
-       iowrite32(0, &desc->write_addr_hi);
-       iowrite32(buffer->len, &desc->len);
-       iowrite32(0, &desc->burst_seq_num);
-       iowrite32(MSGDMA_DESC_TX_STRIDE, &desc->stride);
-       iowrite32(MSGDMA_DESC_CTL_TX_SINGLE, &desc->control);
+       csrwr32(lower_32_bits(buffer->dma_addr), priv->tx_dma_desc,
+               msgdma_descroffs(read_addr_lo));
+       csrwr32(upper_32_bits(buffer->dma_addr), priv->tx_dma_desc,
+               msgdma_descroffs(read_addr_hi));
+       csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_lo));
+       csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_hi));
+       csrwr32(buffer->len, priv->tx_dma_desc, msgdma_descroffs(len));
+       csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(burst_seq_num));
+       csrwr32(MSGDMA_DESC_TX_STRIDE, priv->tx_dma_desc,
+               msgdma_descroffs(stride));
+       csrwr32(MSGDMA_DESC_CTL_TX_SINGLE, priv->tx_dma_desc,
+               msgdma_descroffs(control));
        return 0;
 }
 
@@ -133,17 +139,16 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
        u32 ready = 0;
        u32 inuse;
        u32 status;
-       struct msgdma_csr *txcsr =
-               (struct msgdma_csr *)priv->tx_dma_csr;
 
        /* Get number of sent descriptors */
-       inuse = ioread32(&txcsr->rw_fill_level) & 0xffff;
+       inuse = csrrd32(priv->tx_dma_csr, msgdma_csroffs(rw_fill_level))
+                       & 0xffff;
 
        if (inuse) { /* Tx FIFO is not empty */
                ready = priv->tx_prod - priv->tx_cons - inuse - 1;
        } else {
                /* Check for buffered last packet */
-               status = ioread32(&txcsr->status);
+               status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));
                if (status & MSGDMA_CSR_STAT_BUSY)
                        ready = priv->tx_prod - priv->tx_cons - 1;
                else
@@ -154,10 +159,9 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
 
 /* Put buffer to the mSGDMA RX FIFO
  */
-int msgdma_add_rx_desc(struct altera_tse_private *priv,
+void msgdma_add_rx_desc(struct altera_tse_private *priv,
                        struct tse_buffer *rxbuffer)
 {
-       struct msgdma_extended_desc *desc = priv->rx_dma_desc;
        u32 len = priv->rx_dma_buf_sz;
        dma_addr_t dma_addr = rxbuffer->dma_addr;
        u32 control = (MSGDMA_DESC_CTL_END_ON_EOP
@@ -167,15 +171,16 @@ int msgdma_add_rx_desc(struct altera_tse_private *priv,
                        | MSGDMA_DESC_CTL_TR_ERR_IRQ
                        | MSGDMA_DESC_CTL_GO);
 
-       iowrite32(0, &desc->read_addr_lo);
-       iowrite32(0, &desc->read_addr_hi);
-       iowrite32(lower_32_bits(dma_addr), &desc->write_addr_lo);
-       iowrite32(upper_32_bits(dma_addr), &desc->write_addr_hi);
-       iowrite32(len, &desc->len);
-       iowrite32(0, &desc->burst_seq_num);
-       iowrite32(0x00010001, &desc->stride);
-       iowrite32(control, &desc->control);
-       return 1;
+       csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_lo));
+       csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_hi));
+       csrwr32(lower_32_bits(dma_addr), priv->rx_dma_desc,
+               msgdma_descroffs(write_addr_lo));
+       csrwr32(upper_32_bits(dma_addr), priv->rx_dma_desc,
+               msgdma_descroffs(write_addr_hi));
+       csrwr32(len, priv->rx_dma_desc, msgdma_descroffs(len));
+       csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(burst_seq_num));
+       csrwr32(0x00010001, priv->rx_dma_desc, msgdma_descroffs(stride));
+       csrwr32(control, priv->rx_dma_desc, msgdma_descroffs(control));
 }
 
 /* status is returned on upper 16 bits,
@@ -186,14 +191,13 @@ u32 msgdma_rx_status(struct altera_tse_private *priv)
        u32 rxstatus = 0;
        u32 pktlength;
        u32 pktstatus;
-       struct msgdma_csr *rxcsr =
-               (struct msgdma_csr *)priv->rx_dma_csr;
-       struct msgdma_response *rxresp =
-               (struct msgdma_response *)priv->rx_dma_resp;
-
-       if (ioread32(&rxcsr->resp_fill_level) & 0xffff) {
-               pktlength = ioread32(&rxresp->bytes_transferred);
-               pktstatus = ioread32(&rxresp->status);
+
+       if (csrrd32(priv->rx_dma_csr, msgdma_csroffs(resp_fill_level))
+           & 0xffff) {
+               pktlength = csrrd32(priv->rx_dma_resp,
+                                   msgdma_respoffs(bytes_transferred));
+               pktstatus = csrrd32(priv->rx_dma_resp,
+                                   msgdma_respoffs(status));
                rxstatus = pktstatus;
                rxstatus = rxstatus << 16;
                rxstatus |= (pktlength & 0xffff);
index 7f0f5bf2bba2f42952ea69307c44a334f6784b59..42cf61c81057be8f4c3f2213dfdd9e89de7dee15 100644 (file)
@@ -25,10 +25,11 @@ void msgdma_disable_txirq(struct altera_tse_private *);
 void msgdma_clear_rxirq(struct altera_tse_private *);
 void msgdma_clear_txirq(struct altera_tse_private *);
 u32 msgdma_tx_completions(struct altera_tse_private *);
-int msgdma_add_rx_desc(struct altera_tse_private *, struct tse_buffer *);
+void msgdma_add_rx_desc(struct altera_tse_private *, struct tse_buffer *);
 int msgdma_tx_buffer(struct altera_tse_private *, struct tse_buffer *);
 u32 msgdma_rx_status(struct altera_tse_private *);
 int msgdma_initialize(struct altera_tse_private *);
 void msgdma_uninitialize(struct altera_tse_private *);
+void msgdma_start_rxdma(struct altera_tse_private *);
 
 #endif /*  __ALTERA_MSGDMA_H__ */
index d7b59ba4019c1fe1914b2b5c0643e464fcdff62d..e335626e1b6b5288c4f3d4cc8f45d79dcd56655d 100644 (file)
 #ifndef __ALTERA_MSGDMAHW_H__
 #define __ALTERA_MSGDMAHW_H__
 
-/* mSGDMA standard descriptor format
- */
-struct msgdma_desc {
-       u32 read_addr;  /* data buffer source address */
-       u32 write_addr; /* data buffer destination address */
-       u32 len;        /* the number of bytes to transfer per descriptor */
-       u32 control;    /* characteristics of the transfer */
-};
-
 /* mSGDMA extended descriptor format
  */
 struct msgdma_extended_desc {
@@ -159,6 +150,10 @@ struct msgdma_response {
        u32 status;
 };
 
+#define msgdma_respoffs(a) (offsetof(struct msgdma_response, a))
+#define msgdma_csroffs(a) (offsetof(struct msgdma_csr, a))
+#define msgdma_descroffs(a) (offsetof(struct msgdma_extended_desc, a))
+
 /* mSGDMA response register bit definitions
  */
 #define MSGDMA_RESP_EARLY_TERM BIT(8)
index 0ee96639ae44e7a238ac36682868443caa7c43c3..580553d42d34fd773139cda6076f00d7bc40cce1 100644 (file)
 #include "altera_sgdmahw.h"
 #include "altera_sgdma.h"
 
-static void sgdma_descrip(struct sgdma_descrip *desc,
-                         struct sgdma_descrip *ndesc,
-                         dma_addr_t ndesc_phys,
-                         dma_addr_t raddr,
-                         dma_addr_t waddr,
-                         u16 length,
-                         int generate_eop,
-                         int rfixed,
-                         int wfixed);
+static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
+                               struct sgdma_descrip __iomem *ndesc,
+                               dma_addr_t ndesc_phys,
+                               dma_addr_t raddr,
+                               dma_addr_t waddr,
+                               u16 length,
+                               int generate_eop,
+                               int rfixed,
+                               int wfixed);
 
 static int sgdma_async_write(struct altera_tse_private *priv,
-                             struct sgdma_descrip *desc);
+                             struct sgdma_descrip __iomem *desc);
 
 static int sgdma_async_read(struct altera_tse_private *priv);
 
 static dma_addr_t
 sgdma_txphysaddr(struct altera_tse_private *priv,
-                struct sgdma_descrip *desc);
+                struct sgdma_descrip __iomem *desc);
 
 static dma_addr_t
 sgdma_rxphysaddr(struct altera_tse_private *priv,
-                struct sgdma_descrip *desc);
+                struct sgdma_descrip __iomem *desc);
 
 static int sgdma_txbusy(struct altera_tse_private *priv);
 
@@ -64,18 +64,23 @@ queue_rx_peekhead(struct altera_tse_private *priv);
 
 int sgdma_initialize(struct altera_tse_private *priv)
 {
-       priv->txctrlreg = SGDMA_CTRLREG_ILASTD;
+       priv->txctrlreg = SGDMA_CTRLREG_ILASTD |
+                     SGDMA_CTRLREG_INTEN;
 
        priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP |
+                     SGDMA_CTRLREG_INTEN |
                      SGDMA_CTRLREG_ILASTD;
 
+       priv->sgdmadesclen = sizeof(struct sgdma_descrip);
+
        INIT_LIST_HEAD(&priv->txlisthd);
        INIT_LIST_HEAD(&priv->rxlisthd);
 
        priv->rxdescphys = (dma_addr_t) 0;
        priv->txdescphys = (dma_addr_t) 0;
 
-       priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc,
+       priv->rxdescphys = dma_map_single(priv->device,
+                                         (void __force *)priv->rx_dma_desc,
                                          priv->rxdescmem, DMA_BIDIRECTIONAL);
 
        if (dma_mapping_error(priv->device, priv->rxdescphys)) {
@@ -84,7 +89,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
                return -EINVAL;
        }
 
-       priv->txdescphys = dma_map_single(priv->device, priv->tx_dma_desc,
+       priv->txdescphys = dma_map_single(priv->device,
+                                         (void __force *)priv->tx_dma_desc,
                                          priv->txdescmem, DMA_TO_DEVICE);
 
        if (dma_mapping_error(priv->device, priv->txdescphys)) {
@@ -93,6 +99,16 @@ int sgdma_initialize(struct altera_tse_private *priv)
                return -EINVAL;
        }
 
+       /* Initialize descriptor memory to all 0's, sync memory to cache */
+       memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
+       memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
+
+       dma_sync_single_for_device(priv->device, priv->txdescphys,
+                                  priv->txdescmem, DMA_TO_DEVICE);
+
+       dma_sync_single_for_device(priv->device, priv->rxdescphys,
+                                  priv->rxdescmem, DMA_TO_DEVICE);
+
        return 0;
 }
 
@@ -112,58 +128,48 @@ void sgdma_uninitialize(struct altera_tse_private *priv)
  */
 void sgdma_reset(struct altera_tse_private *priv)
 {
-       u32 *ptxdescripmem = (u32 *)priv->tx_dma_desc;
-       u32 txdescriplen   = priv->txdescmem;
-       u32 *prxdescripmem = (u32 *)priv->rx_dma_desc;
-       u32 rxdescriplen   = priv->rxdescmem;
-       struct sgdma_csr *ptxsgdma = (struct sgdma_csr *)priv->tx_dma_csr;
-       struct sgdma_csr *prxsgdma = (struct sgdma_csr *)priv->rx_dma_csr;
-
        /* Initialize descriptor memory to 0 */
-       memset(ptxdescripmem, 0, txdescriplen);
-       memset(prxdescripmem, 0, rxdescriplen);
+       memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
+       memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
 
-       iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control);
-       iowrite32(0, &ptxsgdma->control);
+       csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control));
+       csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
 
-       iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control);
-       iowrite32(0, &prxsgdma->control);
+       csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control));
+       csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
 }
 
+/* For SGDMA, interrupts remain enabled after initially enabling,
+ * so no need to provide implementations for abstract enable
+ * and disable
+ */
+
 void sgdma_enable_rxirq(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
-       priv->rxctrlreg |= SGDMA_CTRLREG_INTEN;
-       tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
 }
 
 void sgdma_enable_txirq(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
-       priv->txctrlreg |= SGDMA_CTRLREG_INTEN;
-       tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
 }
 
-/* for SGDMA, RX interrupts remain enabled after enabling */
 void sgdma_disable_rxirq(struct altera_tse_private *priv)
 {
 }
 
-/* for SGDMA, TX interrupts remain enabled after enabling */
 void sgdma_disable_txirq(struct altera_tse_private *priv)
 {
 }
 
 void sgdma_clear_rxirq(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
-       tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
+       tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control),
+                   SGDMA_CTRLREG_CLRINT);
 }
 
 void sgdma_clear_txirq(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
-       tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
+       tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control),
+                   SGDMA_CTRLREG_CLRINT);
 }
 
 /* transmits buffer through SGDMA. Returns number of buffers
@@ -173,28 +179,27 @@ void sgdma_clear_txirq(struct altera_tse_private *priv)
  */
 int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
 {
-       int pktstx = 0;
-       struct sgdma_descrip *descbase =
-               (struct sgdma_descrip *)priv->tx_dma_desc;
+       struct sgdma_descrip __iomem *descbase =
+               (struct sgdma_descrip __iomem *)priv->tx_dma_desc;
 
-       struct sgdma_descrip *cdesc = &descbase[0];
-       struct sgdma_descrip *ndesc = &descbase[1];
+       struct sgdma_descrip __iomem *cdesc = &descbase[0];
+       struct sgdma_descrip __iomem *ndesc = &descbase[1];
 
        /* wait 'til the tx sgdma is ready for the next transmit request */
        if (sgdma_txbusy(priv))
                return 0;
 
-       sgdma_descrip(cdesc,                    /* current descriptor */
-                     ndesc,                    /* next descriptor */
-                     sgdma_txphysaddr(priv, ndesc),
-                     buffer->dma_addr,         /* address of packet to xmit */
-                     0,                        /* write addr 0 for tx dma */
-                     buffer->len,              /* length of packet */
-                     SGDMA_CONTROL_EOP,        /* Generate EOP */
-                     0,                        /* read fixed */
-                     SGDMA_CONTROL_WR_FIXED);  /* Generate SOP */
+       sgdma_setup_descrip(cdesc,                      /* current descriptor */
+                           ndesc,                      /* next descriptor */
+                           sgdma_txphysaddr(priv, ndesc),
+                           buffer->dma_addr,           /* address of packet to xmit */
+                           0,                          /* write addr 0 for tx dma */
+                           buffer->len,                /* length of packet */
+                           SGDMA_CONTROL_EOP,          /* Generate EOP */
+                           0,                          /* read fixed */
+                           SGDMA_CONTROL_WR_FIXED);    /* Generate SOP */
 
-       pktstx = sgdma_async_write(priv, cdesc);
+       sgdma_async_write(priv, cdesc);
 
        /* enqueue the request to the pending transmit queue */
        queue_tx(priv, buffer);
@@ -208,10 +213,10 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
 u32 sgdma_tx_completions(struct altera_tse_private *priv)
 {
        u32 ready = 0;
-       struct sgdma_descrip *desc = (struct sgdma_descrip *)priv->tx_dma_desc;
 
        if (!sgdma_txbusy(priv) &&
-           ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) &&
+           ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control))
+            & SGDMA_CONTROL_HW_OWNED) == 0) &&
            (dequeue_tx(priv))) {
                ready = 1;
        }
@@ -219,11 +224,15 @@ u32 sgdma_tx_completions(struct altera_tse_private *priv)
        return ready;
 }
 
-int sgdma_add_rx_desc(struct altera_tse_private *priv,
-                     struct tse_buffer *rxbuffer)
+void sgdma_start_rxdma(struct altera_tse_private *priv)
+{
+       sgdma_async_read(priv);
+}
+
+void sgdma_add_rx_desc(struct altera_tse_private *priv,
+                      struct tse_buffer *rxbuffer)
 {
        queue_rx(priv, rxbuffer);
-       return sgdma_async_read(priv);
 }
 
 /* status is returned on upper 16 bits,
@@ -231,38 +240,62 @@ int sgdma_add_rx_desc(struct altera_tse_private *priv,
  */
 u32 sgdma_rx_status(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
-       struct sgdma_descrip *base = (struct sgdma_descrip *)priv->rx_dma_desc;
-       struct sgdma_descrip *desc = NULL;
-       int pktsrx;
-       unsigned int rxstatus = 0;
-       unsigned int pktlength = 0;
-       unsigned int pktstatus = 0;
+       struct sgdma_descrip __iomem *base =
+               (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
+       struct sgdma_descrip __iomem *desc = NULL;
        struct tse_buffer *rxbuffer = NULL;
+       unsigned int rxstatus = 0;
 
-       dma_sync_single_for_cpu(priv->device,
-                               priv->rxdescphys,
-                               priv->rxdescmem,
-                               DMA_BIDIRECTIONAL);
+       u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status));
 
        desc = &base[0];
-       if ((ioread32(&csr->status) & SGDMA_STSREG_EOP) ||
-           (desc->status & SGDMA_STATUS_EOP)) {
-               pktlength = desc->bytes_xferred;
-               pktstatus = desc->status & 0x3f;
-               rxstatus = pktstatus;
+       if (sts & SGDMA_STSREG_EOP) {
+               unsigned int pktlength = 0;
+               unsigned int pktstatus = 0;
+               dma_sync_single_for_cpu(priv->device,
+                                       priv->rxdescphys,
+                                       priv->sgdmadesclen,
+                                       DMA_FROM_DEVICE);
+
+               pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
+               pktstatus = csrrd8(desc, sgdma_descroffs(status));
+               rxstatus = pktstatus & ~SGDMA_STATUS_EOP;
                rxstatus = rxstatus << 16;
                rxstatus |= (pktlength & 0xffff);
 
-               desc->status = 0;
-
-               rxbuffer = dequeue_rx(priv);
-               if (rxbuffer == NULL)
+               if (rxstatus) {
+                       csrwr8(0, desc, sgdma_descroffs(status));
+
+                       rxbuffer = dequeue_rx(priv);
+                       if (rxbuffer == NULL)
+                               netdev_info(priv->dev,
+                                           "sgdma rx and rx queue empty!\n");
+
+                       /* Clear control */
+                       csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
+                       /* clear status */
+                       csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status));
+
+                       /* kick the rx sgdma after reaping this descriptor */
+                       sgdma_async_read(priv);
+
+               } else {
+                       /* If the SGDMA indicated an end of packet on recv,
+                        * then it's expected that the rxstatus from the
+                        * descriptor is non-zero - meaning a valid packet
+                        * with a nonzero length, or an error has been
+                        * indicated. if not, then all we can do is signal
+                        * an error and return no packet received. Most likely
+                        * there is a system design error, or an error in the
+                        * underlying kernel (cache or cache management problem)
+                        */
                        netdev_err(priv->dev,
-                                  "sgdma rx and rx queue empty!\n");
-
-               /* kick the rx sgdma after reaping this descriptor */
-               pktsrx = sgdma_async_read(priv);
+                                  "SGDMA RX Error Info: %x, %x, %x\n",
+                                  sts, csrrd8(desc, sgdma_descroffs(status)),
+                                  rxstatus);
+               }
+       } else if (sts == 0) {
+               sgdma_async_read(priv);
        }
 
        return rxstatus;
@@ -270,38 +303,41 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
 
 
 /* Private functions */
-static void sgdma_descrip(struct sgdma_descrip *desc,
-                         struct sgdma_descrip *ndesc,
-                         dma_addr_t ndesc_phys,
-                         dma_addr_t raddr,
-                         dma_addr_t waddr,
-                         u16 length,
-                         int generate_eop,
-                         int rfixed,
-                         int wfixed)
+static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
+                               struct sgdma_descrip __iomem *ndesc,
+                               dma_addr_t ndesc_phys,
+                               dma_addr_t raddr,
+                               dma_addr_t waddr,
+                               u16 length,
+                               int generate_eop,
+                               int rfixed,
+                               int wfixed)
 {
        /* Clear the next descriptor as not owned by hardware */
-       u32 ctrl = ndesc->control;
+
+       u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control));
        ctrl &= ~SGDMA_CONTROL_HW_OWNED;
-       ndesc->control = ctrl;
+       csrwr8(ctrl, ndesc, sgdma_descroffs(control));
 
-       ctrl = 0;
        ctrl = SGDMA_CONTROL_HW_OWNED;
        ctrl |= generate_eop;
        ctrl |= rfixed;
        ctrl |= wfixed;
 
        /* Channel is implicitly zero, initialized to 0 by default */
-
-       desc->raddr = raddr;
-       desc->waddr = waddr;
-       desc->next = lower_32_bits(ndesc_phys);
-       desc->control = ctrl;
-       desc->status = 0;
-       desc->rburst = 0;
-       desc->wburst = 0;
-       desc->bytes = length;
-       desc->bytes_xferred = 0;
+       csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr));
+       csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr));
+
+       csrwr32(0, desc, sgdma_descroffs(pad1));
+       csrwr32(0, desc, sgdma_descroffs(pad2));
+       csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next));
+
+       csrwr8(ctrl, desc, sgdma_descroffs(control));
+       csrwr8(0, desc, sgdma_descroffs(status));
+       csrwr8(0, desc, sgdma_descroffs(wburst));
+       csrwr8(0, desc, sgdma_descroffs(rburst));
+       csrwr16(length, desc, sgdma_descroffs(bytes));
+       csrwr16(0, desc, sgdma_descroffs(bytes_xferred));
 }
 
 /* If hardware is busy, don't restart async read.
@@ -312,48 +348,42 @@ static void sgdma_descrip(struct sgdma_descrip *desc,
  */
 static int sgdma_async_read(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
-       struct sgdma_descrip *descbase =
-               (struct sgdma_descrip *)priv->rx_dma_desc;
+       struct sgdma_descrip __iomem *descbase =
+               (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
 
-       struct sgdma_descrip *cdesc = &descbase[0];
-       struct sgdma_descrip *ndesc = &descbase[1];
-
-       unsigned int sts = ioread32(&csr->status);
+       struct sgdma_descrip __iomem *cdesc = &descbase[0];
+       struct sgdma_descrip __iomem *ndesc = &descbase[1];
        struct tse_buffer *rxbuffer = NULL;
 
        if (!sgdma_rxbusy(priv)) {
                rxbuffer = queue_rx_peekhead(priv);
-               if (rxbuffer == NULL)
+               if (rxbuffer == NULL) {
+                       netdev_err(priv->dev, "no rx buffers available\n");
                        return 0;
-
-               sgdma_descrip(cdesc,            /* current descriptor */
-                             ndesc,            /* next descriptor */
-                             sgdma_rxphysaddr(priv, ndesc),
-                             0,                /* read addr 0 for rx dma */
-                             rxbuffer->dma_addr, /* write addr for rx dma */
-                             0,                /* read 'til EOP */
-                             0,                /* EOP: NA for rx dma */
-                             0,                /* read fixed: NA for rx dma */
-                             0);               /* SOP: NA for rx DMA */
-
-               /* clear control and status */
-               iowrite32(0, &csr->control);
-
-               /* If status available, clear those bits */
-               if (sts & 0xf)
-                       iowrite32(0xf, &csr->status);
+               }
+
+               sgdma_setup_descrip(cdesc,              /* current descriptor */
+                                   ndesc,              /* next descriptor */
+                                   sgdma_rxphysaddr(priv, ndesc),
+                                   0,                  /* read addr 0 for rx dma */
+                                   rxbuffer->dma_addr, /* write addr for rx dma */
+                                   0,                  /* read 'til EOP */
+                                   0,                  /* EOP: NA for rx dma */
+                                   0,                  /* read fixed: NA for rx dma */
+                                   0);                 /* SOP: NA for rx DMA */
 
                dma_sync_single_for_device(priv->device,
                                           priv->rxdescphys,
-                                          priv->rxdescmem,
-                                          DMA_BIDIRECTIONAL);
+                                          priv->sgdmadesclen,
+                                          DMA_TO_DEVICE);
 
-               iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
-                         &csr->next_descrip);
+               csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
+                       priv->rx_dma_csr,
+                       sgdma_csroffs(next_descrip));
 
-               iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START),
-                         &csr->control);
+               csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START),
+                       priv->rx_dma_csr,
+                       sgdma_csroffs(control));
 
                return 1;
        }
@@ -362,32 +392,32 @@ static int sgdma_async_read(struct altera_tse_private *priv)
 }
 
 static int sgdma_async_write(struct altera_tse_private *priv,
-                            struct sgdma_descrip *desc)
+                            struct sgdma_descrip __iomem *desc)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
-
        if (sgdma_txbusy(priv))
                return 0;
 
        /* clear control and status */
-       iowrite32(0, &csr->control);
-       iowrite32(0x1f, &csr->status);
+       csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
+       csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
 
        dma_sync_single_for_device(priv->device, priv->txdescphys,
-                                  priv->txdescmem, DMA_TO_DEVICE);
+                                  priv->sgdmadesclen, DMA_TO_DEVICE);
 
-       iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
-                 &csr->next_descrip);
+       csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
+               priv->tx_dma_csr,
+               sgdma_csroffs(next_descrip));
 
-       iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START),
-                 &csr->control);
+       csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START),
+               priv->tx_dma_csr,
+               sgdma_csroffs(control));
 
        return 1;
 }
 
 static dma_addr_t
 sgdma_txphysaddr(struct altera_tse_private *priv,
-                struct sgdma_descrip *desc)
+                struct sgdma_descrip __iomem *desc)
 {
        dma_addr_t paddr = priv->txdescmem_busaddr;
        uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
@@ -396,7 +426,7 @@ sgdma_txphysaddr(struct altera_tse_private *priv,
 
 static dma_addr_t
 sgdma_rxphysaddr(struct altera_tse_private *priv,
-                struct sgdma_descrip *desc)
+                struct sgdma_descrip __iomem *desc)
 {
        dma_addr_t paddr = priv->rxdescmem_busaddr;
        uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
@@ -485,8 +515,8 @@ queue_rx_peekhead(struct altera_tse_private *priv)
  */
 static int sgdma_rxbusy(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
-       return ioread32(&csr->status) & SGDMA_STSREG_BUSY;
+       return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status))
+                      & SGDMA_STSREG_BUSY;
 }
 
 /* waits for the tx sgdma to finish it's current operation, returns 0
@@ -495,13 +525,14 @@ static int sgdma_rxbusy(struct altera_tse_private *priv)
 static int sgdma_txbusy(struct altera_tse_private *priv)
 {
        int delay = 0;
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
 
        /* if DMA is busy, wait for current transactino to finish */
-       while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100))
+       while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
+               & SGDMA_STSREG_BUSY) && (delay++ < 100))
                udelay(1);
 
-       if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) {
+       if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
+           & SGDMA_STSREG_BUSY) {
                netdev_err(priv->dev, "timeout waiting for tx dma\n");
                return 1;
        }
index 07d471729dc4978deee7fc592962999ec1eff1f3..584977e29ef944e1132f3ec779d7cdf1f9572160 100644 (file)
@@ -26,10 +26,11 @@ void sgdma_clear_rxirq(struct altera_tse_private *);
 void sgdma_clear_txirq(struct altera_tse_private *);
 int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *);
 u32 sgdma_tx_completions(struct altera_tse_private *);
-int sgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *);
+void sgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *);
 void sgdma_status(struct altera_tse_private *);
 u32 sgdma_rx_status(struct altera_tse_private *);
 int sgdma_initialize(struct altera_tse_private *);
 void sgdma_uninitialize(struct altera_tse_private *);
+void sgdma_start_rxdma(struct altera_tse_private *);
 
 #endif /*  __ALTERA_SGDMA_H__ */
index ba3334f353836cd0441d537ba5fda8a28bea171e..85bc33b218d946f557647d1a73f9699e3189827b 100644 (file)
 
 /* SGDMA descriptor structure */
 struct sgdma_descrip {
-       unsigned int    raddr; /* address of data to be read */
-       unsigned int    pad1;
-       unsigned int    waddr;
-       unsigned int    pad2;
-       unsigned int    next;
-       unsigned int    pad3;
-       unsigned short  bytes;
-       unsigned char   rburst;
-       unsigned char   wburst;
-       unsigned short  bytes_xferred;  /* 16 bits, bytes xferred */
+       u32     raddr; /* address of data to be read */
+       u32     pad1;
+       u32     waddr;
+       u32     pad2;
+       u32     next;
+       u32     pad3;
+       u16     bytes;
+       u8      rburst;
+       u8      wburst;
+       u16     bytes_xferred;  /* 16 bits, bytes xferred */
 
        /* bit 0: error
         * bit 1: length error
@@ -39,7 +39,7 @@ struct sgdma_descrip {
         * bit 6: reserved
         * bit 7: status eop for recv case
         */
-       unsigned char   status;
+       u8      status;
 
        /* bit 0: eop
         * bit 1: read_fixed
@@ -47,7 +47,7 @@ struct sgdma_descrip {
         * bits 3,4,5,6: Channel (always 0)
         * bit 7: hardware owned
         */
-       unsigned char   control;
+       u8      control;
 } __packed;
 
 
@@ -101,6 +101,8 @@ struct sgdma_csr {
        u32     pad3[3];
 };
 
+#define sgdma_csroffs(a) (offsetof(struct sgdma_csr, a))
+#define sgdma_descroffs(a) (offsetof(struct sgdma_descrip, a))
 
 #define SGDMA_STSREG_ERR       BIT(0) /* Error */
 #define SGDMA_STSREG_EOP       BIT(1) /* EOP */
index 8feeed05de0e14829718a1bec85fbb94af6699e6..2adb24d4523c915d3b7d87f1294ead36757cea50 100644 (file)
@@ -58,6 +58,8 @@
 /* MAC function configuration default settings */
 #define ALTERA_TSE_TX_IPG_LENGTH       12
 
+#define ALTERA_TSE_PAUSE_QUANTA                0xffff
+
 #define GET_BIT_VALUE(v, bit)          (((v) >> (bit)) & 0x1)
 
 /* MAC Command_Config Register Bit Definitions
@@ -355,6 +357,8 @@ struct altera_tse_mac {
        u32 reserved5[42];
 };
 
+#define tse_csroffs(a) (offsetof(struct altera_tse_mac, a))
+
 /* Transmit and Receive Command Registers Bit Definitions
  */
 #define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC                BIT(17)
@@ -390,10 +394,11 @@ struct altera_dmaops {
        void (*clear_rxirq)(struct altera_tse_private *);
        int (*tx_buffer)(struct altera_tse_private *, struct tse_buffer *);
        u32 (*tx_completions)(struct altera_tse_private *);
-       int (*add_rx_desc)(struct altera_tse_private *, struct tse_buffer *);
+       void (*add_rx_desc)(struct altera_tse_private *, struct tse_buffer *);
        u32 (*get_rx_status)(struct altera_tse_private *);
        int (*init_dma)(struct altera_tse_private *);
        void (*uninit_dma)(struct altera_tse_private *);
+       void (*start_rxdma)(struct altera_tse_private *);
 };
 
 /* This structure is private to each device.
@@ -453,6 +458,7 @@ struct altera_tse_private {
        u32 rxctrlreg;
        dma_addr_t rxdescphys;
        dma_addr_t txdescphys;
+       size_t sgdmadesclen;
 
        struct list_head txlisthd;
        struct list_head rxlisthd;
@@ -483,4 +489,49 @@ struct altera_tse_private {
  */
 void altera_tse_set_ethtool_ops(struct net_device *);
 
+static inline
+u32 csrrd32(void __iomem *mac, size_t offs)
+{
+       void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+       return readl(paddr);
+}
+
+static inline
+u16 csrrd16(void __iomem *mac, size_t offs)
+{
+       void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+       return readw(paddr);
+}
+
+static inline
+u8 csrrd8(void __iomem *mac, size_t offs)
+{
+       void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+       return readb(paddr);
+}
+
+static inline
+void csrwr32(u32 val, void __iomem *mac, size_t offs)
+{
+       void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+
+       writel(val, paddr);
+}
+
+static inline
+void csrwr16(u16 val, void __iomem *mac, size_t offs)
+{
+       void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+
+       writew(val, paddr);
+}
+
+static inline
+void csrwr8(u8 val, void __iomem *mac, size_t offs)
+{
+       void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+
+       writeb(val, paddr);
+}
+
 #endif /* __ALTERA_TSE_H__ */
index 319ca74f5e7480b23bc0ce63f9dca0a25ce894b2..be72e1e6452522ba9b81e58a3268580c992cfa5f 100644 (file)
@@ -77,7 +77,7 @@ static void tse_get_drvinfo(struct net_device *dev,
        struct altera_tse_private *priv = netdev_priv(dev);
        u32 rev = ioread32(&priv->mac_dev->megacore_revision);
 
-       strcpy(info->driver, "Altera TSE MAC IP Driver");
+       strcpy(info->driver, "altera_tse");
        strcpy(info->version, "v8.0");
        snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "v%d.%d",
                 rev & 0xFFFF, (rev & 0xFFFF0000) >> 16);
@@ -96,54 +96,89 @@ static void tse_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
                           u64 *buf)
 {
        struct altera_tse_private *priv = netdev_priv(dev);
-       struct altera_tse_mac *mac = priv->mac_dev;
        u64 ext;
 
-       buf[0] = ioread32(&mac->frames_transmitted_ok);
-       buf[1] = ioread32(&mac->frames_received_ok);
-       buf[2] = ioread32(&mac->frames_check_sequence_errors);
-       buf[3] = ioread32(&mac->alignment_errors);
+       buf[0] = csrrd32(priv->mac_dev,
+                        tse_csroffs(frames_transmitted_ok));
+       buf[1] = csrrd32(priv->mac_dev,
+                        tse_csroffs(frames_received_ok));
+       buf[2] = csrrd32(priv->mac_dev,
+                        tse_csroffs(frames_check_sequence_errors));
+       buf[3] = csrrd32(priv->mac_dev,
+                        tse_csroffs(alignment_errors));
 
        /* Extended aOctetsTransmittedOK counter */
-       ext = (u64) ioread32(&mac->msb_octets_transmitted_ok) << 32;
-       ext |= ioread32(&mac->octets_transmitted_ok);
+       ext = (u64) csrrd32(priv->mac_dev,
+                           tse_csroffs(msb_octets_transmitted_ok)) << 32;
+
+       ext |= csrrd32(priv->mac_dev,
+                      tse_csroffs(octets_transmitted_ok));
        buf[4] = ext;
 
        /* Extended aOctetsReceivedOK counter */
-       ext = (u64) ioread32(&mac->msb_octets_received_ok) << 32;
-       ext |= ioread32(&mac->octets_received_ok);
+       ext = (u64) csrrd32(priv->mac_dev,
+                           tse_csroffs(msb_octets_received_ok)) << 32;
+
+       ext |= csrrd32(priv->mac_dev,
+                      tse_csroffs(octets_received_ok));
        buf[5] = ext;
 
-       buf[6] = ioread32(&mac->tx_pause_mac_ctrl_frames);
-       buf[7] = ioread32(&mac->rx_pause_mac_ctrl_frames);
-       buf[8] = ioread32(&mac->if_in_errors);
-       buf[9] = ioread32(&mac->if_out_errors);
-       buf[10] = ioread32(&mac->if_in_ucast_pkts);
-       buf[11] = ioread32(&mac->if_in_multicast_pkts);
-       buf[12] = ioread32(&mac->if_in_broadcast_pkts);
-       buf[13] = ioread32(&mac->if_out_discards);
-       buf[14] = ioread32(&mac->if_out_ucast_pkts);
-       buf[15] = ioread32(&mac->if_out_multicast_pkts);
-       buf[16] = ioread32(&mac->if_out_broadcast_pkts);
-       buf[17] = ioread32(&mac->ether_stats_drop_events);
+       buf[6] = csrrd32(priv->mac_dev,
+                        tse_csroffs(tx_pause_mac_ctrl_frames));
+       buf[7] = csrrd32(priv->mac_dev,
+                        tse_csroffs(rx_pause_mac_ctrl_frames));
+       buf[8] = csrrd32(priv->mac_dev,
+                        tse_csroffs(if_in_errors));
+       buf[9] = csrrd32(priv->mac_dev,
+                        tse_csroffs(if_out_errors));
+       buf[10] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_in_ucast_pkts));
+       buf[11] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_in_multicast_pkts));
+       buf[12] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_in_broadcast_pkts));
+       buf[13] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_out_discards));
+       buf[14] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_out_ucast_pkts));
+       buf[15] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_out_multicast_pkts));
+       buf[16] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_out_broadcast_pkts));
+       buf[17] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_drop_events));
 
        /* Extended etherStatsOctets counter */
-       ext = (u64) ioread32(&mac->msb_ether_stats_octets) << 32;
-       ext |= ioread32(&mac->ether_stats_octets);
+       ext = (u64) csrrd32(priv->mac_dev,
+                           tse_csroffs(msb_ether_stats_octets)) << 32;
+       ext |= csrrd32(priv->mac_dev,
+                      tse_csroffs(ether_stats_octets));
        buf[18] = ext;
 
-       buf[19] = ioread32(&mac->ether_stats_pkts);
-       buf[20] = ioread32(&mac->ether_stats_undersize_pkts);
-       buf[21] = ioread32(&mac->ether_stats_oversize_pkts);
-       buf[22] = ioread32(&mac->ether_stats_pkts_64_octets);
-       buf[23] = ioread32(&mac->ether_stats_pkts_65to127_octets);
-       buf[24] = ioread32(&mac->ether_stats_pkts_128to255_octets);
-       buf[25] = ioread32(&mac->ether_stats_pkts_256to511_octets);
-       buf[26] = ioread32(&mac->ether_stats_pkts_512to1023_octets);
-       buf[27] = ioread32(&mac->ether_stats_pkts_1024to1518_octets);
-       buf[28] = ioread32(&mac->ether_stats_pkts_1519tox_octets);
-       buf[29] = ioread32(&mac->ether_stats_jabbers);
-       buf[30] = ioread32(&mac->ether_stats_fragments);
+       buf[19] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts));
+       buf[20] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_undersize_pkts));
+       buf[21] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_oversize_pkts));
+       buf[22] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_64_octets));
+       buf[23] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_65to127_octets));
+       buf[24] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_128to255_octets));
+       buf[25] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_256to511_octets));
+       buf[26] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_512to1023_octets));
+       buf[27] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_1024to1518_octets));
+       buf[28] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_1519tox_octets));
+       buf[29] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_jabbers));
+       buf[30] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_fragments));
 }
 
 static int tse_sset_count(struct net_device *dev, int sset)
@@ -178,19 +213,24 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
 {
        int i;
        struct altera_tse_private *priv = netdev_priv(dev);
-       u32 *tse_mac_regs = (u32 *)priv->mac_dev;
        u32 *buf = regbuf;
 
        /* Set version to a known value, so ethtool knows
         * how to do any special formatting of this data.
         * This version number will need to change if and
         * when this register table is changed.
+        *
+        * version[31:0] = 1: Dump the first 128 TSE Registers
+        *      Upper bits are all 0 by default
+        *
+        * Upper 16-bits will indicate feature presence for
+        * Ethtool register decoding in future version.
         */
 
        regs->version = 1;
 
        for (i = 0; i < TSE_NUM_REGS; i++)
-               buf[i] = ioread32(&tse_mac_regs[i]);
+               buf[i] = csrrd32(priv->mac_dev, i * 4);
 }
 
 static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -231,5 +271,5 @@ static const struct ethtool_ops tse_ethtool_ops = {
 
 void altera_tse_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &tse_ethtool_ops);
+       netdev->ethtool_ops = &tse_ethtool_ops;
 }
index c70a29e0b9f79115cc697df95b52757830651ea3..7330681574d20ccd02855beabedc9b01c8ae2b48 100644 (file)
@@ -100,29 +100,30 @@ static inline u32 tse_tx_avail(struct altera_tse_private *priv)
  */
 static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
 {
-       struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv;
-       unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0;
-       u32 data;
+       struct net_device *ndev = bus->priv;
+       struct altera_tse_private *priv = netdev_priv(ndev);
 
        /* set MDIO address */
-       iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr);
+       csrwr32((mii_id & 0x1f), priv->mac_dev,
+               tse_csroffs(mdio_phy0_addr));
 
        /* get the data */
-       data = ioread32(&mdio_regs[regnum]) & 0xffff;
-       return data;
+       return csrrd32(priv->mac_dev,
+                      tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff;
 }
 
 static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
                                 u16 value)
 {
-       struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv;
-       unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0;
+       struct net_device *ndev = bus->priv;
+       struct altera_tse_private *priv = netdev_priv(ndev);
 
        /* set MDIO address */
-       iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr);
+       csrwr32((mii_id & 0x1f), priv->mac_dev,
+               tse_csroffs(mdio_phy0_addr));
 
        /* write the data */
-       iowrite32((u32) value, &mdio_regs[regnum]);
+       csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4);
        return 0;
 }
 
@@ -168,7 +169,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
        for (i = 0; i < PHY_MAX_ADDR; i++)
                mdio->irq[i] = PHY_POLL;
 
-       mdio->priv = priv->mac_dev;
+       mdio->priv = dev;
        mdio->parent = priv->device;
 
        ret = of_mdiobus_register(mdio, mdio_node);
@@ -224,6 +225,7 @@ static int tse_init_rx_buffer(struct altera_tse_private *priv,
                dev_kfree_skb_any(rxbuffer->skb);
                return -EINVAL;
        }
+       rxbuffer->dma_addr &= (dma_addr_t)~3;
        rxbuffer->len = len;
        return 0;
 }
@@ -425,9 +427,10 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
                priv->dev->stats.rx_bytes += pktlength;
 
                entry = next_entry;
+
+               tse_rx_refill(priv);
        }
 
-       tse_rx_refill(priv);
        return count;
 }
 
@@ -520,7 +523,6 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
        struct altera_tse_private *priv;
        unsigned long int flags;
 
-
        if (unlikely(!dev)) {
                pr_err("%s: invalid dev pointer\n", __func__);
                return IRQ_NONE;
@@ -562,7 +564,6 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
        unsigned int nopaged_len = skb_headlen(skb);
        enum netdev_tx ret = NETDEV_TX_OK;
        dma_addr_t dma_addr;
-       int txcomplete = 0;
 
        spin_lock_bh(&priv->tx_lock);
 
@@ -598,7 +599,7 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
        dma_sync_single_for_device(priv->device, buffer->dma_addr,
                                   buffer->len, DMA_TO_DEVICE);
 
-       txcomplete = priv->dmaops->tx_buffer(priv, buffer);
+       priv->dmaops->tx_buffer(priv, buffer);
 
        skb_tx_timestamp(skb);
 
@@ -697,7 +698,6 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
        struct altera_tse_private *priv = netdev_priv(dev);
        struct phy_device *phydev = NULL;
        char phy_id_fmt[MII_BUS_ID_SIZE + 3];
-       int ret;
 
        if (priv->phy_addr != POLL_PHY) {
                snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
@@ -711,6 +711,7 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
                        netdev_err(dev, "Could not attach to PHY\n");
 
        } else {
+               int ret;
                phydev = phy_find_first(priv->mdio);
                if (phydev == NULL) {
                        netdev_err(dev, "No PHY found\n");
@@ -790,7 +791,6 @@ static int init_phy(struct net_device *dev)
 
 static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
 {
-       struct altera_tse_mac *mac = priv->mac_dev;
        u32 msb;
        u32 lsb;
 
@@ -798,8 +798,8 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
        lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
 
        /* Set primary MAC address */
-       iowrite32(msb, &mac->mac_addr_0);
-       iowrite32(lsb, &mac->mac_addr_1);
+       csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0));
+       csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1));
 }
 
 /* MAC software reset.
@@ -810,26 +810,26 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
  */
 static int reset_mac(struct altera_tse_private *priv)
 {
-       void __iomem *cmd_cfg_reg = &priv->mac_dev->command_config;
        int counter;
        u32 dat;
 
-       dat = ioread32(cmd_cfg_reg);
+       dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
        dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
        dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
-       iowrite32(dat, cmd_cfg_reg);
+       csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
 
        counter = 0;
        while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
-               if (tse_bit_is_clear(cmd_cfg_reg, MAC_CMDCFG_SW_RESET))
+               if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config),
+                                    MAC_CMDCFG_SW_RESET))
                        break;
                udelay(1);
        }
 
        if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
-               dat = ioread32(cmd_cfg_reg);
+               dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
                dat &= ~MAC_CMDCFG_SW_RESET;
-               iowrite32(dat, cmd_cfg_reg);
+               csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
                return -1;
        }
        return 0;
@@ -839,42 +839,58 @@ static int reset_mac(struct altera_tse_private *priv)
 */
 static int init_mac(struct altera_tse_private *priv)
 {
-       struct altera_tse_mac *mac = priv->mac_dev;
        unsigned int cmd = 0;
        u32 frm_length;
 
        /* Setup Rx FIFO */
-       iowrite32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
-                 &mac->rx_section_empty);
-       iowrite32(ALTERA_TSE_RX_SECTION_FULL, &mac->rx_section_full);
-       iowrite32(ALTERA_TSE_RX_ALMOST_EMPTY, &mac->rx_almost_empty);
-       iowrite32(ALTERA_TSE_RX_ALMOST_FULL, &mac->rx_almost_full);
+       csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
+               priv->mac_dev, tse_csroffs(rx_section_empty));
+
+       csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev,
+               tse_csroffs(rx_section_full));
+
+       csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev,
+               tse_csroffs(rx_almost_empty));
+
+       csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev,
+               tse_csroffs(rx_almost_full));
 
        /* Setup Tx FIFO */
-       iowrite32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
-                 &mac->tx_section_empty);
-       iowrite32(ALTERA_TSE_TX_SECTION_FULL, &mac->tx_section_full);
-       iowrite32(ALTERA_TSE_TX_ALMOST_EMPTY, &mac->tx_almost_empty);
-       iowrite32(ALTERA_TSE_TX_ALMOST_FULL, &mac->tx_almost_full);
+       csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
+               priv->mac_dev, tse_csroffs(tx_section_empty));
+
+       csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev,
+               tse_csroffs(tx_section_full));
+
+       csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev,
+               tse_csroffs(tx_almost_empty));
+
+       csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev,
+               tse_csroffs(tx_almost_full));
 
        /* MAC Address Configuration */
        tse_update_mac_addr(priv, priv->dev->dev_addr);
 
        /* MAC Function Configuration */
        frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
-       iowrite32(frm_length, &mac->frm_length);
-       iowrite32(ALTERA_TSE_TX_IPG_LENGTH, &mac->tx_ipg_length);
+       csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length));
+
+       csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev,
+               tse_csroffs(tx_ipg_length));
 
        /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
         * start address
         */
-       tse_clear_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
-       tse_clear_bit(&mac->tx_cmd_stat, ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
-                                        ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
+       tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat),
+                   ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
+
+       tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat),
+                     ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
+                     ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
 
        /* Set the MAC options */
-       cmd = ioread32(&mac->command_config);
-       cmd |= MAC_CMDCFG_PAD_EN;       /* Padding Removal on Receive */
+       cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config));
+       cmd &= ~MAC_CMDCFG_PAD_EN;      /* No padding Removal on Receive */
        cmd &= ~MAC_CMDCFG_CRC_FWD;     /* CRC Removal */
        cmd |= MAC_CMDCFG_RX_ERR_DISC;  /* Automatically discard frames
                                         * with CRC errors
@@ -882,7 +898,16 @@ static int init_mac(struct altera_tse_private *priv)
        cmd |= MAC_CMDCFG_CNTL_FRM_ENA;
        cmd &= ~MAC_CMDCFG_TX_ENA;
        cmd &= ~MAC_CMDCFG_RX_ENA;
-       iowrite32(cmd, &mac->command_config);
+
+       /* Default speed and duplex setting, full/100 */
+       cmd &= ~MAC_CMDCFG_HD_ENA;
+       cmd &= ~MAC_CMDCFG_ETH_SPEED;
+       cmd &= ~MAC_CMDCFG_ENA_10;
+
+       csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config));
+
+       csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev,
+               tse_csroffs(pause_quanta));
 
        if (netif_msg_hw(priv))
                dev_dbg(priv->device,
@@ -895,15 +920,14 @@ static int init_mac(struct altera_tse_private *priv)
  */
 static void tse_set_mac(struct altera_tse_private *priv, bool enable)
 {
-       struct altera_tse_mac *mac = priv->mac_dev;
-       u32 value = ioread32(&mac->command_config);
+       u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 
        if (enable)
                value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
        else
                value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
 
-       iowrite32(value, &mac->command_config);
+       csrwr32(value, priv->mac_dev, tse_csroffs(command_config));
 }
 
 /* Change the MTU
@@ -933,13 +957,12 @@ static int tse_change_mtu(struct net_device *dev, int new_mtu)
 static void altera_tse_set_mcfilter(struct net_device *dev)
 {
        struct altera_tse_private *priv = netdev_priv(dev);
-       struct altera_tse_mac *mac = priv->mac_dev;
        int i;
        struct netdev_hw_addr *ha;
 
        /* clear the hash filter */
        for (i = 0; i < 64; i++)
-               iowrite32(0, &(mac->hash_table[i]));
+               csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
 
        netdev_for_each_mc_addr(ha, dev) {
                unsigned int hash = 0;
@@ -955,7 +978,7 @@ static void altera_tse_set_mcfilter(struct net_device *dev)
 
                        hash = (hash << 1) | xor_bit;
                }
-               iowrite32(1, &(mac->hash_table[hash]));
+               csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4);
        }
 }
 
@@ -963,12 +986,11 @@ static void altera_tse_set_mcfilter(struct net_device *dev)
 static void altera_tse_set_mcfilterall(struct net_device *dev)
 {
        struct altera_tse_private *priv = netdev_priv(dev);
-       struct altera_tse_mac *mac = priv->mac_dev;
        int i;
 
        /* set the hash filter */
        for (i = 0; i < 64; i++)
-               iowrite32(1, &(mac->hash_table[i]));
+               csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
 }
 
 /* Set or clear the multicast filter for this adaptor
@@ -976,12 +998,12 @@ static void altera_tse_set_mcfilterall(struct net_device *dev)
 static void tse_set_rx_mode_hashfilter(struct net_device *dev)
 {
        struct altera_tse_private *priv = netdev_priv(dev);
-       struct altera_tse_mac *mac = priv->mac_dev;
 
        spin_lock(&priv->mac_cfg_lock);
 
        if (dev->flags & IFF_PROMISC)
-               tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+               tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
+                           MAC_CMDCFG_PROMIS_EN);
 
        if (dev->flags & IFF_ALLMULTI)
                altera_tse_set_mcfilterall(dev);
@@ -996,15 +1018,16 @@ static void tse_set_rx_mode_hashfilter(struct net_device *dev)
 static void tse_set_rx_mode(struct net_device *dev)
 {
        struct altera_tse_private *priv = netdev_priv(dev);
-       struct altera_tse_mac *mac = priv->mac_dev;
 
        spin_lock(&priv->mac_cfg_lock);
 
        if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
            !netdev_mc_empty(dev) || !netdev_uc_empty(dev))
-               tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+               tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
+                           MAC_CMDCFG_PROMIS_EN);
        else
-               tse_clear_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+               tse_clear_bit(priv->mac_dev, tse_csroffs(command_config),
+                             MAC_CMDCFG_PROMIS_EN);
 
        spin_unlock(&priv->mac_cfg_lock);
 }
@@ -1085,17 +1108,19 @@ static int tse_open(struct net_device *dev)
 
        spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
 
-       /* Start MAC Rx/Tx */
-       spin_lock(&priv->mac_cfg_lock);
-       tse_set_mac(priv, true);
-       spin_unlock(&priv->mac_cfg_lock);
-
        if (priv->phydev)
                phy_start(priv->phydev);
 
        napi_enable(&priv->napi);
        netif_start_queue(dev);
 
+       priv->dmaops->start_rxdma(priv);
+
+       /* Start MAC Rx/Tx */
+       spin_lock(&priv->mac_cfg_lock);
+       tse_set_mac(priv, true);
+       spin_unlock(&priv->mac_cfg_lock);
+
        return 0;
 
 tx_request_irq_error:
@@ -1167,7 +1192,6 @@ static struct net_device_ops altera_tse_netdev_ops = {
        .ndo_validate_addr      = eth_validate_addr,
 };
 
-
 static int request_and_map(struct platform_device *pdev, const char *name,
                           struct resource **res, void __iomem **ptr)
 {
@@ -1235,7 +1259,7 @@ static int altera_tse_probe(struct platform_device *pdev)
                /* Get the mapped address to the SGDMA descriptor memory */
                ret = request_and_map(pdev, "s1", &dma_res, &descmap);
                if (ret)
-                       goto out_free;
+                       goto err_free_netdev;
 
                /* Start of that memory is for transmit descriptors */
                priv->tx_dma_desc = descmap;
@@ -1254,24 +1278,24 @@ static int altera_tse_probe(struct platform_device *pdev)
                if (upper_32_bits(priv->rxdescmem_busaddr)) {
                        dev_dbg(priv->device,
                                "SGDMA bus addresses greater than 32-bits\n");
-                       goto out_free;
+                       goto err_free_netdev;
                }
                if (upper_32_bits(priv->txdescmem_busaddr)) {
                        dev_dbg(priv->device,
                                "SGDMA bus addresses greater than 32-bits\n");
-                       goto out_free;
+                       goto err_free_netdev;
                }
        } else if (priv->dmaops &&
                   priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) {
                ret = request_and_map(pdev, "rx_resp", &dma_res,
                                      &priv->rx_dma_resp);
                if (ret)
-                       goto out_free;
+                       goto err_free_netdev;
 
                ret = request_and_map(pdev, "tx_desc", &dma_res,
                                      &priv->tx_dma_desc);
                if (ret)
-                       goto out_free;
+                       goto err_free_netdev;
 
                priv->txdescmem = resource_size(dma_res);
                priv->txdescmem_busaddr = dma_res->start;
@@ -1279,13 +1303,13 @@ static int altera_tse_probe(struct platform_device *pdev)
                ret = request_and_map(pdev, "rx_desc", &dma_res,
                                      &priv->rx_dma_desc);
                if (ret)
-                       goto out_free;
+                       goto err_free_netdev;
 
                priv->rxdescmem = resource_size(dma_res);
                priv->rxdescmem_busaddr = dma_res->start;
 
        } else {
-               goto out_free;
+               goto err_free_netdev;
        }
 
        if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)))
@@ -1294,26 +1318,26 @@ static int altera_tse_probe(struct platform_device *pdev)
        else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32)))
                dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
        else
-               goto out_free;
+               goto err_free_netdev;
 
        /* MAC address space */
        ret = request_and_map(pdev, "control_port", &control_port,
                              (void __iomem **)&priv->mac_dev);
        if (ret)
-               goto out_free;
+               goto err_free_netdev;
 
        /* xSGDMA Rx Dispatcher address space */
        ret = request_and_map(pdev, "rx_csr", &dma_res,
                              &priv->rx_dma_csr);
        if (ret)
-               goto out_free;
+               goto err_free_netdev;
 
 
        /* xSGDMA Tx Dispatcher address space */
        ret = request_and_map(pdev, "tx_csr", &dma_res,
                              &priv->tx_dma_csr);
        if (ret)
-               goto out_free;
+               goto err_free_netdev;
 
 
        /* Rx IRQ */
@@ -1321,7 +1345,7 @@ static int altera_tse_probe(struct platform_device *pdev)
        if (priv->rx_irq == -ENXIO) {
                dev_err(&pdev->dev, "cannot obtain Rx IRQ\n");
                ret = -ENXIO;
-               goto out_free;
+               goto err_free_netdev;
        }
 
        /* Tx IRQ */
@@ -1329,7 +1353,7 @@ static int altera_tse_probe(struct platform_device *pdev)
        if (priv->tx_irq == -ENXIO) {
                dev_err(&pdev->dev, "cannot obtain Tx IRQ\n");
                ret = -ENXIO;
-               goto out_free;
+               goto err_free_netdev;
        }
 
        /* get FIFO depths from device tree */
@@ -1337,14 +1361,14 @@ static int altera_tse_probe(struct platform_device *pdev)
                                 &priv->rx_fifo_depth)) {
                dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n");
                ret = -ENXIO;
-               goto out_free;
+               goto err_free_netdev;
        }
 
        if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
                                 &priv->rx_fifo_depth)) {
                dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
                ret = -ENXIO;
-               goto out_free;
+               goto err_free_netdev;
        }
 
        /* get hash filter settings for this instance */
@@ -1352,6 +1376,11 @@ static int altera_tse_probe(struct platform_device *pdev)
                of_property_read_bool(pdev->dev.of_node,
                                      "altr,has-hash-multicast-filter");
 
+       /* Set hash filter to not set for now until the
+        * multicast filter receive issue is debugged
+        */
+       priv->hash_filter = 0;
+
        /* get supplemental address settings for this instance */
        priv->added_unicast =
                of_property_read_bool(pdev->dev.of_node,
@@ -1393,7 +1422,7 @@ static int altera_tse_probe(struct platform_device *pdev)
              ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) {
                dev_err(&pdev->dev, "invalid phy-addr specified %d\n",
                        priv->phy_addr);
-               goto out_free;
+               goto err_free_netdev;
        }
 
        /* Create/attach to MDIO bus */
@@ -1401,7 +1430,7 @@ static int altera_tse_probe(struct platform_device *pdev)
                                     atomic_add_return(1, &instance_count));
 
        if (ret)
-               goto out_free;
+               goto err_free_netdev;
 
        /* initialize netdev */
        ether_setup(ndev);
@@ -1438,7 +1467,7 @@ static int altera_tse_probe(struct platform_device *pdev)
        ret = register_netdev(ndev);
        if (ret) {
                dev_err(&pdev->dev, "failed to register TSE net device\n");
-               goto out_free_mdio;
+               goto err_register_netdev;
        }
 
        platform_set_drvdata(pdev, ndev);
@@ -1455,13 +1484,16 @@ static int altera_tse_probe(struct platform_device *pdev)
        ret = init_phy(ndev);
        if (ret != 0) {
                netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret);
-               goto out_free_mdio;
+               goto err_init_phy;
        }
        return 0;
 
-out_free_mdio:
+err_init_phy:
+       unregister_netdev(ndev);
+err_register_netdev:
+       netif_napi_del(&priv->napi);
        altera_tse_mdio_destroy(ndev);
-out_free:
+err_free_netdev:
        free_netdev(ndev);
        return ret;
 }
@@ -1480,7 +1512,7 @@ static int altera_tse_remove(struct platform_device *pdev)
        return 0;
 }
 
-struct altera_dmaops altera_dtype_sgdma = {
+static const struct altera_dmaops altera_dtype_sgdma = {
        .altera_dtype = ALTERA_DTYPE_SGDMA,
        .dmamask = 32,
        .reset_dma = sgdma_reset,
@@ -1496,9 +1528,10 @@ struct altera_dmaops altera_dtype_sgdma = {
        .get_rx_status = sgdma_rx_status,
        .init_dma = sgdma_initialize,
        .uninit_dma = sgdma_uninitialize,
+       .start_rxdma = sgdma_start_rxdma,
 };
 
-struct altera_dmaops altera_dtype_msgdma = {
+static const struct altera_dmaops altera_dtype_msgdma = {
        .altera_dtype = ALTERA_DTYPE_MSGDMA,
        .dmamask = 64,
        .reset_dma = msgdma_reset,
@@ -1514,6 +1547,7 @@ struct altera_dmaops altera_dtype_msgdma = {
        .get_rx_status = msgdma_rx_status,
        .init_dma = msgdma_initialize,
        .uninit_dma = msgdma_uninitialize,
+       .start_rxdma = msgdma_start_rxdma,
 };
 
 static struct of_device_id altera_tse_ids[] = {
index 70fa13f486b2fc4ef1d6fc45bb82977d13ea1930..d7eeb1713ad2b85721a533fbcc469d22db5740ea 100644 (file)
 #include "altera_tse.h"
 #include "altera_utils.h"
 
-void tse_set_bit(void __iomem *ioaddr, u32 bit_mask)
+void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask)
 {
-       u32 value = ioread32(ioaddr);
+       u32 value = csrrd32(ioaddr, offs);
        value |= bit_mask;
-       iowrite32(value, ioaddr);
+       csrwr32(value, ioaddr, offs);
 }
 
-void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask)
+void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask)
 {
-       u32 value = ioread32(ioaddr);
+       u32 value = csrrd32(ioaddr, offs);
        value &= ~bit_mask;
-       iowrite32(value, ioaddr);
+       csrwr32(value, ioaddr, offs);
 }
 
-int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask)
+int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask)
 {
-       u32 value = ioread32(ioaddr);
+       u32 value = csrrd32(ioaddr, offs);
        return (value & bit_mask) ? 1 : 0;
 }
 
-int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask)
+int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask)
 {
-       u32 value = ioread32(ioaddr);
+       u32 value = csrrd32(ioaddr, offs);
        return (value & bit_mask) ? 0 : 1;
 }
index ce1db36d35832a974f9f958f0b57c298343c0967..baf100ccf5872c7c18ac365ddfe314308b78c9b5 100644 (file)
@@ -19,9 +19,9 @@
 #ifndef __ALTERA_UTILS_H__
 #define __ALTERA_UTILS_H__
 
-void tse_set_bit(void __iomem *ioaddr, u32 bit_mask);
-void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask);
-int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask);
-int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask);
+void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
+void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
+int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask);
+int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask);
 
 #endif /* __ALTERA_UTILS_H__*/
index 26efaaa5e73fd292de512fc428e9af84126fcbb9..068dc7cad5fa3c511c34b034c006add6284bb097 100644 (file)
@@ -1900,7 +1900,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
 
        /* Initialize driver entry points */
        dev->netdev_ops = &amd8111e_netdev_ops;
-       SET_ETHTOOL_OPS(dev, &ops);
+       dev->ethtool_ops = &ops;
        dev->irq =pdev->irq;
        dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
        netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32);
index b08101b31b8bc547ffbfb486f87abc7a18f92686..968b7bfac8fcaa9af767c57c474a83b3229b09a0 100644 (file)
@@ -718,7 +718,6 @@ static int ariadne_init_one(struct zorro_dev *z,
        unsigned long mem_start = board + ARIADNE_RAM;
        struct resource *r1, *r2;
        struct net_device *dev;
-       struct ariadne_private *priv;
        u32 serial;
        int err;
 
@@ -738,8 +737,6 @@ static int ariadne_init_one(struct zorro_dev *z,
                return -ENOMEM;
        }
 
-       priv = netdev_priv(dev);
-
        r1->name = dev->name;
        r2->name = dev->name;
 
index a2bd91e3d302acce0d727cc3ef1eda27e56040cb..a78e4c13695980e295ead2c8d85fd6f6a351f5c8 100644 (file)
@@ -1229,7 +1229,7 @@ static int au1000_probe(struct platform_device *pdev)
        dev->base_addr = base->start;
        dev->irq = irq;
        dev->netdev_ops = &au1000_netdev_ops;
-       SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
+       dev->ethtool_ops = &au1000_ethtool_ops;
        dev->watchdog_timeo = ETH_TX_TIMEOUT;
 
        /*
index 08569fe2b182c2bef930ef582320b9ccb9cfc952..abf3b1581c82a2eacbf7074b3c9db9ca12fbef19 100644 (file)
@@ -457,7 +457,7 @@ static int nmclan_probe(struct pcmcia_device *link)
     lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
 
     dev->netdev_ops = &mace_netdev_ops;
-    SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+    dev->ethtool_ops = &netdev_ethtool_ops;
     dev->watchdog_timeo = TX_TIMEOUT;
 
     return nmclan_config(link);
index 928fac6dd10a90dca66244dba99194c9c9e2924d..53f85bf715268db94d864695321e3890454719ce 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/netdevice.h>
 #include <linux/phy.h>
+#include <linux/clk.h>
 
 /* STATUS and ENABLE Register bit masks */
 #define TXINT_MASK     (1<<0)  /* Transmit interrupt */
@@ -131,6 +132,7 @@ struct arc_emac_priv {
        struct mii_bus *bus;
 
        void __iomem *regs;
+       struct clk *clk;
 
        struct napi_struct napi;
        struct net_device_stats stats;
index eeecc29cf5b7d2695739d819fe900e798bf38996..18e2faccebb0dcb98bc19bdc333561776aec6b95 100644 (file)
@@ -13,6 +13,7 @@
  *             Vineet Gupta
  */
 
+#include <linux/crc32.h>
 #include <linux/etherdevice.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
@@ -362,6 +363,15 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
        return IRQ_HANDLED;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void arc_emac_poll_controller(struct net_device *dev)
+{
+       disable_irq(dev->irq);
+       arc_emac_intr(dev->irq, dev);
+       enable_irq(dev->irq);
+}
+#endif
+
 /**
  * arc_emac_open - Open the network device.
  * @ndev:      Pointer to the network device.
@@ -450,6 +460,41 @@ static int arc_emac_open(struct net_device *ndev)
        return 0;
 }
 
+/**
+ * arc_emac_set_rx_mode - Change the receive filtering mode.
+ * @ndev:      Pointer to the network device.
+ *
+ * This function enables/disables promiscuous or all-multicast mode
+ * and updates the multicast filtering list of the network device.
+ */
+static void arc_emac_set_rx_mode(struct net_device *ndev)
+{
+       struct arc_emac_priv *priv = netdev_priv(ndev);
+
+       if (ndev->flags & IFF_PROMISC) {
+               arc_reg_or(priv, R_CTRL, PROM_MASK);
+       } else {
+               arc_reg_clr(priv, R_CTRL, PROM_MASK);
+
+               if (ndev->flags & IFF_ALLMULTI) {
+                       arc_reg_set(priv, R_LAFL, ~0);
+                       arc_reg_set(priv, R_LAFH, ~0);
+               } else {
+                       struct netdev_hw_addr *ha;
+                       unsigned int filter[2] = { 0, 0 };
+                       int bit;
+
+                       netdev_for_each_mc_addr(ha, ndev) {
+                               bit = ether_crc_le(ETH_ALEN, ha->addr) >> 26;
+                               filter[bit >> 5] |= 1 << (bit & 31);
+                       }
+
+                       arc_reg_set(priv, R_LAFL, filter[0]);
+                       arc_reg_set(priv, R_LAFH, filter[1]);
+               }
+       }
+}
+
 /**
  * arc_emac_stop - Close the network device.
  * @ndev:      Pointer to the network device.
@@ -574,6 +619,18 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
        return NETDEV_TX_OK;
 }
 
+static void arc_emac_set_address_internal(struct net_device *ndev)
+{
+       struct arc_emac_priv *priv = netdev_priv(ndev);
+       unsigned int addr_low, addr_hi;
+
+       addr_low = le32_to_cpu(*(__le32 *) &ndev->dev_addr[0]);
+       addr_hi = le16_to_cpu(*(__le16 *) &ndev->dev_addr[4]);
+
+       arc_reg_set(priv, R_ADDRL, addr_low);
+       arc_reg_set(priv, R_ADDRH, addr_hi);
+}
+
 /**
  * arc_emac_set_address - Set the MAC address for this device.
  * @ndev:      Pointer to net_device structure.
@@ -587,9 +644,7 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
  */
 static int arc_emac_set_address(struct net_device *ndev, void *p)
 {
-       struct arc_emac_priv *priv = netdev_priv(ndev);
        struct sockaddr *addr = p;
-       unsigned int addr_low, addr_hi;
 
        if (netif_running(ndev))
                return -EBUSY;
@@ -599,11 +654,7 @@ static int arc_emac_set_address(struct net_device *ndev, void *p)
 
        memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
 
-       addr_low = le32_to_cpu(*(__le32 *) &ndev->dev_addr[0]);
-       addr_hi = le16_to_cpu(*(__le16 *) &ndev->dev_addr[4]);
-
-       arc_reg_set(priv, R_ADDRL, addr_low);
-       arc_reg_set(priv, R_ADDRH, addr_hi);
+       arc_emac_set_address_internal(ndev);
 
        return 0;
 }
@@ -614,6 +665,10 @@ static const struct net_device_ops arc_emac_netdev_ops = {
        .ndo_start_xmit         = arc_emac_tx,
        .ndo_set_mac_address    = arc_emac_set_address,
        .ndo_get_stats          = arc_emac_stats,
+       .ndo_set_rx_mode        = arc_emac_set_rx_mode,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = arc_emac_poll_controller,
+#endif
 };
 
 static int arc_emac_probe(struct platform_device *pdev)
@@ -643,13 +698,6 @@ static int arc_emac_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       /* Get CPU clock frequency from device tree */
-       if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
-                                &clock_frequency)) {
-               dev_err(&pdev->dev, "failed to retrieve <clock-frequency> from device tree\n");
-               return -EINVAL;
-       }
-
        /* Get IRQ from device tree */
        irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
        if (!irq) {
@@ -677,17 +725,36 @@ static int arc_emac_probe(struct platform_device *pdev)
        priv->regs = devm_ioremap_resource(&pdev->dev, &res_regs);
        if (IS_ERR(priv->regs)) {
                err = PTR_ERR(priv->regs);
-               goto out;
+               goto out_netdev;
        }
        dev_dbg(&pdev->dev, "Registers base address is 0x%p\n", priv->regs);
 
+       priv->clk = of_clk_get(pdev->dev.of_node, 0);
+       if (IS_ERR(priv->clk)) {
+               /* Get CPU clock frequency from device tree */
+               if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
+                                       &clock_frequency)) {
+                       dev_err(&pdev->dev, "failed to retrieve <clock-frequency> from device tree\n");
+                       err = -EINVAL;
+                       goto out_netdev;
+               }
+       } else {
+               err = clk_prepare_enable(priv->clk);
+               if (err) {
+                       dev_err(&pdev->dev, "failed to enable clock\n");
+                       goto out_clkget;
+               }
+
+               clock_frequency = clk_get_rate(priv->clk);
+       }
+
        id = arc_reg_get(priv, R_ID);
 
        /* Check for EMAC revision 5 or 7, magic number */
        if (!(id == 0x0005fd02 || id == 0x0007fd02)) {
                dev_err(&pdev->dev, "ARC EMAC not detected, id=0x%x\n", id);
                err = -ENODEV;
-               goto out;
+               goto out_clken;
        }
        dev_info(&pdev->dev, "ARC EMAC detected with id: 0x%x\n", id);
 
@@ -702,7 +769,7 @@ static int arc_emac_probe(struct platform_device *pdev)
                               ndev->name, ndev);
        if (err) {
                dev_err(&pdev->dev, "could not allocate IRQ\n");
-               goto out;
+               goto out_clken;
        }
 
        /* Get MAC address from device tree */
@@ -713,6 +780,7 @@ static int arc_emac_probe(struct platform_device *pdev)
        else
                eth_hw_addr_random(ndev);
 
+       arc_emac_set_address_internal(ndev);
        dev_info(&pdev->dev, "MAC address is now %pM\n", ndev->dev_addr);
 
        /* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */
@@ -722,7 +790,7 @@ static int arc_emac_probe(struct platform_device *pdev)
        if (!priv->rxbd) {
                dev_err(&pdev->dev, "failed to allocate data buffers\n");
                err = -ENOMEM;
-               goto out;
+               goto out_clken;
        }
 
        priv->txbd = priv->rxbd + RX_BD_NUM;
@@ -734,7 +802,7 @@ static int arc_emac_probe(struct platform_device *pdev)
        err = arc_mdio_probe(pdev, priv);
        if (err) {
                dev_err(&pdev->dev, "failed to probe MII bus\n");
-               goto out;
+               goto out_clken;
        }
 
        priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0,
@@ -742,7 +810,7 @@ static int arc_emac_probe(struct platform_device *pdev)
        if (!priv->phy_dev) {
                dev_err(&pdev->dev, "of_phy_connect() failed\n");
                err = -ENODEV;
-               goto out;
+               goto out_mdio;
        }
 
        dev_info(&pdev->dev, "connected to %s phy with id 0x%x\n",
@@ -752,14 +820,25 @@ static int arc_emac_probe(struct platform_device *pdev)
 
        err = register_netdev(ndev);
        if (err) {
-               netif_napi_del(&priv->napi);
                dev_err(&pdev->dev, "failed to register network device\n");
-               goto out;
+               goto out_netif_api;
        }
 
        return 0;
 
-out:
+out_netif_api:
+       netif_napi_del(&priv->napi);
+       phy_disconnect(priv->phy_dev);
+       priv->phy_dev = NULL;
+out_mdio:
+       arc_mdio_remove(priv);
+out_clken:
+       if (!IS_ERR(priv->clk))
+               clk_disable_unprepare(priv->clk);
+out_clkget:
+       if (!IS_ERR(priv->clk))
+               clk_put(priv->clk);
+out_netdev:
        free_netdev(ndev);
        return err;
 }
@@ -774,6 +853,12 @@ static int arc_emac_remove(struct platform_device *pdev)
        arc_mdio_remove(priv);
        unregister_netdev(ndev);
        netif_napi_del(&priv->napi);
+
+       if (!IS_ERR(priv->clk)) {
+               clk_disable_unprepare(priv->clk);
+               clk_put(priv->clk);
+       }
+
        free_netdev(ndev);
 
        return 0;
index 17bb9ce96260df20eba44a9f215778a62c28373e..49faa97a30c364185f7e988a7e64be5529a62382 100644 (file)
@@ -1302,7 +1302,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        netdev->netdev_ops = &alx_netdev_ops;
-       SET_ETHTOOL_OPS(netdev, &alx_ethtool_ops);
+       netdev->ethtool_ops = &alx_ethtool_ops;
        netdev->irq = pdev->irq;
        netdev->watchdog_timeo = ALX_WATCHDOG_TIME;
 
index 859ea844ba0ff7c292994a59446951680a2e7b79..ecacaaeb2b92929caeeb12849b06c3d6b53707c9 100644 (file)
@@ -305,5 +305,5 @@ static const struct ethtool_ops atl1c_ethtool_ops = {
 
 void atl1c_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &atl1c_ethtool_ops);
+       netdev->ethtool_ops = &atl1c_ethtool_ops;
 }
index 82b23861bf5598698f3a21232e37eaf693f83c6e..206e9b7be43123911b80ab1d84478bd4412079fe 100644 (file)
@@ -388,5 +388,5 @@ static const struct ethtool_ops atl1e_ethtool_ops = {
 
 void atl1e_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &atl1e_ethtool_ops);
+       netdev->ethtool_ops = &atl1e_ethtool_ops;
 }
index 78befb522a528268fae32c68649ead0a01263366..2587fed7b02cbc3e93674ec73e053b0f50c28c46 100644 (file)
@@ -1396,7 +1396,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        atl2_setup_pcicmd(pdev);
 
        netdev->netdev_ops = &atl2_netdev_ops;
-       SET_ETHTOOL_OPS(netdev, &atl2_ethtool_ops);
+       netdev->ethtool_ops = &atl2_ethtool_ops;
        netdev->watchdog_timeo = 5 * HZ;
        strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
 
index 85dbddd03722b20a861f53cba7fe00b6cb66f3db..3e488094b0731811459c66dcb0517d00cb7dfbbe 100644 (file)
@@ -150,4 +150,15 @@ config BGMAC
          In case of using this driver on BCM4706 it's also requires to enable
          BCMA_DRIVER_GMAC_CMN to make it work.
 
+config SYSTEMPORT
+       tristate "Broadcom SYSTEMPORT internal MAC support"
+       depends on OF
+       select MII
+       select PHYLIB
+       select FIXED_PHY if SYSTEMPORT=y
+       help
+         This driver supports the built-in Ethernet MACs found in the
+         Broadcom BCM7xxx Set Top Box family chipset using an internal
+         Ethernet switch.
+
 endif # NET_VENDOR_BROADCOM
index fd639a0d4c7d64b2b7db5eb084087502e3c6d63a..e2a958a657e0bb8f816d205e0792d3fdfbfc70a4 100644 (file)
@@ -11,3 +11,4 @@ obj-$(CONFIG_BNX2X) += bnx2x/
 obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
 obj-$(CONFIG_TIGON3) += tg3.o
 obj-$(CONFIG_BGMAC) += bgmac.o
+obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
index 05ba6258901794ab51842ddc8d630f4d1286798d..ca5a20a48b14cdb48ccd75aaa1c34715a8dcfee3 100644 (file)
@@ -2380,7 +2380,7 @@ static int b44_init_one(struct ssb_device *sdev,
        netif_napi_add(dev, &bp->napi, b44_poll, 64);
        dev->watchdog_timeo = B44_TX_TIMEOUT;
        dev->irq = sdev->irq;
-       SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
+       dev->ethtool_ops = &b44_ethtool_ops;
 
        err = ssb_bus_powerup(sdev->bus, 0);
        if (err) {
index a7d11f5565d69342ad296471a9a5d44f8d7c51d5..3e8d1a88ed3d7b597298100798e5286449ffafe8 100644 (file)
@@ -1315,8 +1315,7 @@ static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
 
 };
 
-#define BCM_ENET_STATS_LEN     \
-       (sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats))
+#define BCM_ENET_STATS_LEN     ARRAY_SIZE(bcm_enet_gstrings_stats)
 
 static const u32 unused_mib_regs[] = {
        ETH_MIB_TX_ALL_OCTETS,
@@ -1898,7 +1897,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
        dev->netdev_ops = &bcm_enet_ops;
        netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
 
-       SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops);
+       dev->ethtool_ops = &bcm_enet_ethtool_ops;
        SET_NETDEV_DEV(dev, &pdev->dev);
 
        ret = register_netdev(dev);
@@ -2784,7 +2783,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
        /* register netdevice */
        dev->netdev_ops = &bcm_enetsw_ops;
        netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
-       SET_ETHTOOL_OPS(dev, &bcm_enetsw_ethtool_ops);
+       dev->ethtool_ops = &bcm_enetsw_ethtool_ops;
        SET_NETDEV_DEV(dev, &pdev->dev);
 
        spin_lock_init(&priv->enetsw_mdio_lock);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
new file mode 100644 (file)
index 0000000..dc708a8
--- /dev/null
@@ -0,0 +1,1649 @@
+/*
+ * Broadcom BCM7xxx System Port Ethernet MAC driver
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include "bcmsysport.h"
+
+/* I/O accessors register helpers */
+#define BCM_SYSPORT_IO_MACRO(name, offset) \
+static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
+{                                                                      \
+       u32 reg = __raw_readl(priv->base + offset + off);               \
+       return reg;                                                     \
+}                                                                      \
+static inline void name##_writel(struct bcm_sysport_priv *priv,                \
+                                 u32 val, u32 off)                     \
+{                                                                      \
+       __raw_writel(val, priv->base + offset + off);                   \
+}                                                                      \
+
+BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
+BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
+BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
+BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
+BCM_SYSPORT_IO_MACRO(rdma, SYS_PORT_RDMA_OFFSET);
+BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
+BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
+BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
+BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
+BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
+
+/* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
+ * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
+  */
+#define BCM_SYSPORT_INTR_L2(which)     \
+static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
+                                               u32 mask)               \
+{                                                                      \
+       intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR);     \
+       priv->irq##which##_mask &= ~(mask);                             \
+}                                                                      \
+static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
+                                               u32 mask)               \
+{                                                                      \
+       intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET);      \
+       priv->irq##which##_mask |= (mask);                              \
+}                                                                      \
+
+BCM_SYSPORT_INTR_L2(0)
+BCM_SYSPORT_INTR_L2(1)
+
+/* Register accesses to GISB/RBUS registers are expensive (few hundred
+ * nanoseconds), so keep the check for 64-bits explicit here to save
+ * one register write per-packet on 32-bits platforms.
+ */
+static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
+                                    void __iomem *d,
+                                    dma_addr_t addr)
+{
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+       __raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
+                       d + DESC_ADDR_HI_STATUS_LEN);
+#endif
+       __raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO);
+}
+
+static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
+                                               struct dma_desc *desc,
+                                               unsigned int port)
+{
+       /* Ports are latched, so write upper address first */
+       tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port));
+       tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port));
+}
+
+/* Ethtool operations */
+static int bcm_sysport_set_settings(struct net_device *dev,
+                                   struct ethtool_cmd *cmd)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+       if (!netif_running(dev))
+               return -EINVAL;
+
+       return phy_ethtool_sset(priv->phydev, cmd);
+}
+
+static int bcm_sysport_get_settings(struct net_device *dev,
+                                       struct ethtool_cmd *cmd)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+       if (!netif_running(dev))
+               return -EINVAL;
+
+       return phy_ethtool_gset(priv->phydev, cmd);
+}
+
+static int bcm_sysport_set_rx_csum(struct net_device *dev,
+                                       netdev_features_t wanted)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       u32 reg;
+
+       priv->rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
+       reg = rxchk_readl(priv, RXCHK_CONTROL);
+       if (priv->rx_csum_en)
+               reg |= RXCHK_EN;
+       else
+               reg &= ~RXCHK_EN;
+
+       /* If UniMAC forwards CRC, we need to skip over it to get
+        * a valid CHK bit to be set in the per-packet status word
+        */
+       if (priv->rx_csum_en && priv->crc_fwd)
+               reg |= RXCHK_SKIP_FCS;
+       else
+               reg &= ~RXCHK_SKIP_FCS;
+
+       rxchk_writel(priv, reg, RXCHK_CONTROL);
+
+       return 0;
+}
+
+static int bcm_sysport_set_tx_csum(struct net_device *dev,
+                                       netdev_features_t wanted)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       u32 reg;
+
+       /* Hardware transmit checksum requires us to enable the Transmit status
+        * block prepended to the packet contents
+        */
+       priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
+       reg = tdma_readl(priv, TDMA_CONTROL);
+       if (priv->tsb_en)
+               reg |= TSB_EN;
+       else
+               reg &= ~TSB_EN;
+       tdma_writel(priv, reg, TDMA_CONTROL);
+
+       return 0;
+}
+
+static int bcm_sysport_set_features(struct net_device *dev,
+                                       netdev_features_t features)
+{
+       netdev_features_t changed = features ^ dev->features;
+       netdev_features_t wanted = dev->wanted_features;
+       int ret = 0;
+
+       if (changed & NETIF_F_RXCSUM)
+               ret = bcm_sysport_set_rx_csum(dev, wanted);
+       if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
+               ret = bcm_sysport_set_tx_csum(dev, wanted);
+
+       return ret;
+}
+
+/* Hardware counters must be kept in sync because the order/offset
+ * is important here (order in structure declaration = order in hardware)
+ */
+static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
+       /* general stats */
+       STAT_NETDEV(rx_packets),
+       STAT_NETDEV(tx_packets),
+       STAT_NETDEV(rx_bytes),
+       STAT_NETDEV(tx_bytes),
+       STAT_NETDEV(rx_errors),
+       STAT_NETDEV(tx_errors),
+       STAT_NETDEV(rx_dropped),
+       STAT_NETDEV(tx_dropped),
+       STAT_NETDEV(multicast),
+       /* UniMAC RSV counters */
+       STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
+       STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
+       STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
+       STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
+       STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
+       STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
+       STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
+       STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
+       STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
+       STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
+       STAT_MIB_RX("rx_pkts", mib.rx.pkt),
+       STAT_MIB_RX("rx_bytes", mib.rx.bytes),
+       STAT_MIB_RX("rx_multicast", mib.rx.mca),
+       STAT_MIB_RX("rx_broadcast", mib.rx.bca),
+       STAT_MIB_RX("rx_fcs", mib.rx.fcs),
+       STAT_MIB_RX("rx_control", mib.rx.cf),
+       STAT_MIB_RX("rx_pause", mib.rx.pf),
+       STAT_MIB_RX("rx_unknown", mib.rx.uo),
+       STAT_MIB_RX("rx_align", mib.rx.aln),
+       STAT_MIB_RX("rx_outrange", mib.rx.flr),
+       STAT_MIB_RX("rx_code", mib.rx.cde),
+       STAT_MIB_RX("rx_carrier", mib.rx.fcr),
+       STAT_MIB_RX("rx_oversize", mib.rx.ovr),
+       STAT_MIB_RX("rx_jabber", mib.rx.jbr),
+       STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
+       STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
+       STAT_MIB_RX("rx_unicast", mib.rx.uc),
+       STAT_MIB_RX("rx_ppp", mib.rx.ppp),
+       STAT_MIB_RX("rx_crc", mib.rx.rcrc),
+       /* UniMAC TSV counters */
+       STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
+       STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
+       STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
+       STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
+       STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
+       STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
+       STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
+       STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
+       STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
+       STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
+       STAT_MIB_TX("tx_pkts", mib.tx.pkts),
+       STAT_MIB_TX("tx_multicast", mib.tx.mca),
+       STAT_MIB_TX("tx_broadcast", mib.tx.bca),
+       STAT_MIB_TX("tx_pause", mib.tx.pf),
+       STAT_MIB_TX("tx_control", mib.tx.cf),
+       STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
+       STAT_MIB_TX("tx_oversize", mib.tx.ovr),
+       STAT_MIB_TX("tx_defer", mib.tx.drf),
+       STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
+       STAT_MIB_TX("tx_single_col", mib.tx.scl),
+       STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
+       STAT_MIB_TX("tx_late_col", mib.tx.lcl),
+       STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
+       STAT_MIB_TX("tx_frags", mib.tx.frg),
+       STAT_MIB_TX("tx_total_col", mib.tx.ncl),
+       STAT_MIB_TX("tx_jabber", mib.tx.jbr),
+       STAT_MIB_TX("tx_bytes", mib.tx.bytes),
+       STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
+       STAT_MIB_TX("tx_unicast", mib.tx.uc),
+       /* UniMAC RUNT counters */
+       STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
+       STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
+       STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
+       STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
+       /* RXCHK misc statistics */
+       STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
+       STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
+                       RXCHK_OTHER_DISC_CNTR),
+       /* RBUF misc statistics */
+       STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
+       STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
+};
+
+#define BCM_SYSPORT_STATS_LEN  ARRAY_SIZE(bcm_sysport_gstrings_stats)
+
+static void bcm_sysport_get_drvinfo(struct net_device *dev,
+                                       struct ethtool_drvinfo *info)
+{
+       strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+       strlcpy(info->version, "0.1", sizeof(info->version));
+       strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
+       info->n_stats = BCM_SYSPORT_STATS_LEN;
+}
+
+static u32 bcm_sysport_get_msglvl(struct net_device *dev)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+       return priv->msg_enable;
+}
+
+static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+       priv->msg_enable = enable;
+}
+
+static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
+{
+       switch (string_set) {
+       case ETH_SS_STATS:
+               return BCM_SYSPORT_STATS_LEN;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static void bcm_sysport_get_strings(struct net_device *dev,
+                                       u32 stringset, u8 *data)
+{
+       int i;
+
+       switch (stringset) {
+       case ETH_SS_STATS:
+               for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+                       memcpy(data + i * ETH_GSTRING_LEN,
+                               bcm_sysport_gstrings_stats[i].stat_string,
+                               ETH_GSTRING_LEN);
+               }
+               break;
+       default:
+               break;
+       }
+}
+
+static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
+{
+       int i, j = 0;
+
+       for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+               const struct bcm_sysport_stats *s;
+               u8 offset = 0;
+               u32 val = 0;
+               char *p;
+
+               s = &bcm_sysport_gstrings_stats[i];
+               switch (s->type) {
+               case BCM_SYSPORT_STAT_NETDEV:
+                       continue;
+               case BCM_SYSPORT_STAT_MIB_RX:
+               case BCM_SYSPORT_STAT_MIB_TX:
+               case BCM_SYSPORT_STAT_RUNT:
+                       if (s->type != BCM_SYSPORT_STAT_MIB_RX)
+                               offset = UMAC_MIB_STAT_OFFSET;
+                       val = umac_readl(priv, UMAC_MIB_START + j + offset);
+                       break;
+               case BCM_SYSPORT_STAT_RXCHK:
+                       val = rxchk_readl(priv, s->reg_offset);
+                       if (val == ~0)
+                               rxchk_writel(priv, 0, s->reg_offset);
+                       break;
+               case BCM_SYSPORT_STAT_RBUF:
+                       val = rbuf_readl(priv, s->reg_offset);
+                       if (val == ~0)
+                               rbuf_writel(priv, 0, s->reg_offset);
+                       break;
+               }
+
+               j += s->stat_sizeof;
+               p = (char *)priv + s->stat_offset;
+               *(u32 *)p = val;
+       }
+
+       netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
+}
+
+static void bcm_sysport_get_stats(struct net_device *dev,
+                                       struct ethtool_stats *stats, u64 *data)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       int i;
+
+       if (netif_running(dev))
+               bcm_sysport_update_mib_counters(priv);
+
+       for (i =  0; i < BCM_SYSPORT_STATS_LEN; i++) {
+               const struct bcm_sysport_stats *s;
+               char *p;
+
+               s = &bcm_sysport_gstrings_stats[i];
+               if (s->type == BCM_SYSPORT_STAT_NETDEV)
+                       p = (char *)&dev->stats;
+               else
+                       p = (char *)priv;
+               p += s->stat_offset;
+               data[i] = *(u32 *)p;
+       }
+}
+
+static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
+{
+       dev_kfree_skb_any(cb->skb);
+       cb->skb = NULL;
+       dma_unmap_addr_set(cb, dma_addr, 0);
+}
+
+static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
+                                struct bcm_sysport_cb *cb)
+{
+       struct device *kdev = &priv->pdev->dev;
+       struct net_device *ndev = priv->netdev;
+       dma_addr_t mapping;
+       int ret;
+
+       cb->skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
+       if (!cb->skb) {
+               netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
+               return -ENOMEM;
+       }
+
+       mapping = dma_map_single(kdev, cb->skb->data,
+                               RX_BUF_LENGTH, DMA_FROM_DEVICE);
+       ret = dma_mapping_error(kdev, mapping);
+       if (ret) {
+               bcm_sysport_free_cb(cb);
+               netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
+               return ret;
+       }
+
+       dma_unmap_addr_set(cb, dma_addr, mapping);
+       dma_desc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
+
+       priv->rx_bd_assign_index++;
+       priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
+       priv->rx_bd_assign_ptr = priv->rx_bds +
+               (priv->rx_bd_assign_index * DESC_SIZE);
+
+       netif_dbg(priv, rx_status, ndev, "RX refill\n");
+
+       return 0;
+}
+
+static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
+{
+       struct bcm_sysport_cb *cb;
+       int ret = 0;
+       unsigned int i;
+
+       for (i = 0; i < priv->num_rx_bds; i++) {
+               cb = &priv->rx_cbs[priv->rx_bd_assign_index];
+               if (cb->skb)
+                       continue;
+
+               ret = bcm_sysport_rx_refill(priv, cb);
+               if (ret)
+                       break;
+       }
+
+       return ret;
+}
+
+/* Poll the hardware for up to budget packets to process */
+static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
+                                       unsigned int budget)
+{
+       struct device *kdev = &priv->pdev->dev;
+       struct net_device *ndev = priv->netdev;
+       unsigned int processed = 0, to_process;
+       struct bcm_sysport_cb *cb;
+       struct sk_buff *skb;
+       unsigned int p_index;
+       u16 len, status;
+       struct rsb *rsb;
+
+       /* Determine how much we should process since last call */
+       p_index = rdma_readl(priv, RDMA_PROD_INDEX);
+       p_index &= RDMA_PROD_INDEX_MASK;
+
+       if (p_index < priv->rx_c_index)
+               to_process = (RDMA_CONS_INDEX_MASK + 1) -
+                       priv->rx_c_index + p_index;
+       else
+               to_process = p_index - priv->rx_c_index;
+
+       netif_dbg(priv, rx_status, ndev,
+                       "p_index=%d rx_c_index=%d to_process=%d\n",
+                       p_index, priv->rx_c_index, to_process);
+
+       while ((processed < to_process) &&
+               (processed < budget)) {
+
+               cb = &priv->rx_cbs[priv->rx_read_ptr];
+               skb = cb->skb;
+               dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
+                               RX_BUF_LENGTH, DMA_FROM_DEVICE);
+
+               /* Extract the Receive Status Block prepended */
+               rsb = (struct rsb *)skb->data;
+               len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
+               status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
+                       DESC_STATUS_MASK;
+
+               processed++;
+               priv->rx_read_ptr++;
+               if (priv->rx_read_ptr == priv->num_rx_bds)
+                       priv->rx_read_ptr = 0;
+
+               netif_dbg(priv, rx_status, ndev,
+                               "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
+                               p_index, priv->rx_c_index, priv->rx_read_ptr,
+                               len, status);
+
+               if (unlikely(!skb)) {
+                       netif_err(priv, rx_err, ndev, "out of memory!\n");
+                       ndev->stats.rx_dropped++;
+                       ndev->stats.rx_errors++;
+                       goto refill;
+               }
+
+               if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
+                       netif_err(priv, rx_status, ndev, "fragmented packet!\n");
+                       ndev->stats.rx_dropped++;
+                       ndev->stats.rx_errors++;
+                       bcm_sysport_free_cb(cb);
+                       goto refill;
+               }
+
+               if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
+                       netif_err(priv, rx_err, ndev, "error packet\n");
+                       if (RX_STATUS_OVFLOW)
+                               ndev->stats.rx_over_errors++;
+                       ndev->stats.rx_dropped++;
+                       ndev->stats.rx_errors++;
+                       bcm_sysport_free_cb(cb);
+                       goto refill;
+               }
+
+               skb_put(skb, len);
+
+               /* Hardware validated our checksum */
+               if (likely(status & DESC_L4_CSUM))
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+               /* Hardware pre-pends packets with 2bytes between Ethernet
+                * and IP header plus we have the Receive Status Block, strip
+                * off all of this from the SKB.
+                */
+               skb_pull(skb, sizeof(*rsb) + 2);
+               len -= (sizeof(*rsb) + 2);
+
+               /* UniMAC may forward CRC */
+               if (priv->crc_fwd) {
+                       skb_trim(skb, len - ETH_FCS_LEN);
+                       len -= ETH_FCS_LEN;
+               }
+
+               skb->protocol = eth_type_trans(skb, ndev);
+               ndev->stats.rx_packets++;
+               ndev->stats.rx_bytes += len;
+
+               napi_gro_receive(&priv->napi, skb);
+refill:
+               bcm_sysport_rx_refill(priv, cb);
+       }
+
+       return processed;
+}
+
+static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv,
+                                       struct bcm_sysport_cb *cb,
+                                       unsigned int *bytes_compl,
+                                       unsigned int *pkts_compl)
+{
+       struct device *kdev = &priv->pdev->dev;
+       struct net_device *ndev = priv->netdev;
+
+       if (cb->skb) {
+               ndev->stats.tx_bytes += cb->skb->len;
+               *bytes_compl += cb->skb->len;
+               dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
+                               dma_unmap_len(cb, dma_len),
+                               DMA_TO_DEVICE);
+               ndev->stats.tx_packets++;
+               (*pkts_compl)++;
+               bcm_sysport_free_cb(cb);
+       /* SKB fragment */
+       } else if (dma_unmap_addr(cb, dma_addr)) {
+               ndev->stats.tx_bytes += dma_unmap_len(cb, dma_len);
+               dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
+                               dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
+               dma_unmap_addr_set(cb, dma_addr, 0);
+       }
+}
+
+/* Reclaim queued SKBs for transmission completion, lockless version */
+static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
+                                            struct bcm_sysport_tx_ring *ring)
+{
+       struct net_device *ndev = priv->netdev;
+       unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
+       unsigned int pkts_compl = 0, bytes_compl = 0;
+       struct bcm_sysport_cb *cb;
+       struct netdev_queue *txq;
+       u32 hw_ind;
+
+       txq = netdev_get_tx_queue(ndev, ring->index);
+
+       /* Compute how many descriptors have been processed since last call */
+       hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
+       c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
+       ring->p_index = (hw_ind & RING_PROD_INDEX_MASK);
+
+       last_c_index = ring->c_index;
+       num_tx_cbs = ring->size;
+
+       c_index &= (num_tx_cbs - 1);
+
+       if (c_index >= last_c_index)
+               last_tx_cn = c_index - last_c_index;
+       else
+               last_tx_cn = num_tx_cbs - last_c_index + c_index;
+
+       netif_dbg(priv, tx_done, ndev,
+                       "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
+                       ring->index, c_index, last_tx_cn, last_c_index);
+
+       while (last_tx_cn-- > 0) {
+               cb = ring->cbs + last_c_index;
+               bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl);
+
+               ring->desc_count++;
+               last_c_index++;
+               last_c_index &= (num_tx_cbs - 1);
+       }
+
+       ring->c_index = c_index;
+
+       if (netif_tx_queue_stopped(txq) && pkts_compl)
+               netif_tx_wake_queue(txq);
+
+       netif_dbg(priv, tx_done, ndev,
+                       "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
+                       ring->index, ring->c_index, pkts_compl, bytes_compl);
+
+       return pkts_compl;
+}
+
+/* Locked version of the per-ring TX reclaim routine */
+static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
+                                          struct bcm_sysport_tx_ring *ring)
+{
+       unsigned int released;
+
+       spin_lock(&ring->lock);
+       released = __bcm_sysport_tx_reclaim(priv, ring);
+       spin_unlock(&ring->lock);
+
+       return released;
+}
+
+static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
+{
+       struct bcm_sysport_tx_ring *ring =
+               container_of(napi, struct bcm_sysport_tx_ring, napi);
+       unsigned int work_done = 0;
+
+       work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
+
+       if (work_done < budget) {
+               napi_complete(napi);
+               /* re-enable TX interrupt */
+               intrl2_1_mask_clear(ring->priv, BIT(ring->index));
+       }
+
+       return work_done;
+}
+
+static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
+{
+       unsigned int q;
+
+       for (q = 0; q < priv->netdev->num_tx_queues; q++)
+               bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
+}
+
+static int bcm_sysport_poll(struct napi_struct *napi, int budget)
+{
+       struct bcm_sysport_priv *priv =
+               container_of(napi, struct bcm_sysport_priv, napi);
+       unsigned int work_done = 0;
+
+       work_done = bcm_sysport_desc_rx(priv, budget);
+
+       priv->rx_c_index += work_done;
+       priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
+       rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
+
+       if (work_done < budget) {
+               napi_complete(napi);
+               /* re-enable RX interrupts */
+               intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
+       }
+
+       return work_done;
+}
+
+
+/* RX and misc interrupt routine */
+static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
+{
+       struct net_device *dev = dev_id;
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+       priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
+                         ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
+       intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
+
+       if (unlikely(priv->irq0_stat == 0)) {
+               netdev_warn(priv->netdev, "spurious RX interrupt\n");
+               return IRQ_NONE;
+       }
+
+       if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
+               if (likely(napi_schedule_prep(&priv->napi))) {
+                       /* disable RX interrupts */
+                       intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
+                       __napi_schedule(&priv->napi);
+               }
+       }
+
+       /* TX ring is full, perform a full reclaim since we do not know
+        * which one would trigger this interrupt
+        */
+       if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
+               bcm_sysport_tx_reclaim_all(priv);
+
+       return IRQ_HANDLED;
+}
+
+/* TX interrupt service routine */
+static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
+{
+       struct net_device *dev = dev_id;
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       struct bcm_sysport_tx_ring *txr;
+       unsigned int ring;
+
+       priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
+                               ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
+       intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+
+       if (unlikely(priv->irq1_stat == 0)) {
+               netdev_warn(priv->netdev, "spurious TX interrupt\n");
+               return IRQ_NONE;
+       }
+
+       for (ring = 0; ring < dev->num_tx_queues; ring++) {
+               if (!(priv->irq1_stat & BIT(ring)))
+                       continue;
+
+               txr = &priv->tx_rings[ring];
+
+               if (likely(napi_schedule_prep(&txr->napi))) {
+                       intrl2_1_mask_set(priv, BIT(ring));
+                       __napi_schedule(&txr->napi);
+               }
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev)
+{
+       struct sk_buff *nskb;
+       struct tsb *tsb;
+       u32 csum_info;
+       u8 ip_proto;
+       u16 csum_start;
+       u16 ip_ver;
+
+       /* Re-allocate SKB if needed */
+       if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
+               nskb = skb_realloc_headroom(skb, sizeof(*tsb));
+               dev_kfree_skb(skb);
+               if (!nskb) {
+                       dev->stats.tx_errors++;
+                       dev->stats.tx_dropped++;
+                       return -ENOMEM;
+               }
+               skb = nskb;
+       }
+
+       tsb = (struct tsb *)skb_push(skb, sizeof(*tsb));
+       /* Zero-out TSB by default */
+       memset(tsb, 0, sizeof(*tsb));
+
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               ip_ver = htons(skb->protocol);
+               switch (ip_ver) {
+               case ETH_P_IP:
+                       ip_proto = ip_hdr(skb)->protocol;
+                       break;
+               case ETH_P_IPV6:
+                       ip_proto = ipv6_hdr(skb)->nexthdr;
+                       break;
+               default:
+                       return 0;
+               }
+
+               /* Get the checksum offset and the L4 (transport) offset */
+               csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
+               csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
+               csum_info |= (csum_start << L4_PTR_SHIFT);
+
+               if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
+                       csum_info |= L4_LENGTH_VALID;
+                       if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
+                               csum_info |= L4_UDP;
+               } else
+                       csum_info = 0;
+
+               tsb->l4_ptr_dest_map = csum_info;
+       }
+
+       return 0;
+}
+
+static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
+                                   struct net_device *dev)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       struct device *kdev = &priv->pdev->dev;
+       struct bcm_sysport_tx_ring *ring;
+       struct bcm_sysport_cb *cb;
+       struct netdev_queue *txq;
+       struct dma_desc *desc;
+       unsigned int skb_len;
+       dma_addr_t mapping;
+       u32 len_status;
+       u16 queue;
+       int ret;
+
+       queue = skb_get_queue_mapping(skb);
+       txq = netdev_get_tx_queue(dev, queue);
+       ring = &priv->tx_rings[queue];
+
+       /* lock against tx reclaim in BH context */
+       spin_lock(&ring->lock);
+       if (unlikely(ring->desc_count == 0)) {
+               netif_tx_stop_queue(txq);
+               netdev_err(dev, "queue %d awake and ring full!\n", queue);
+               ret = NETDEV_TX_BUSY;
+               goto out;
+       }
+
+       /* Insert TSB and checksum infos */
+       if (priv->tsb_en) {
+               ret = bcm_sysport_insert_tsb(skb, dev);
+               if (ret) {
+                       ret = NETDEV_TX_OK;
+                       goto out;
+               }
+       }
+
+       /* The Ethernet switch we are interfaced with needs packets to be at
+        * least 64 bytes (including FCS) otherwise they will be discarded when
+        * they enter the switch port logic. When Broadcom tags are enabled, we
+        * need to make sure that packets are at least 68 bytes
+        * (including FCS and tag) because the length verification is done after
+        * the Broadcom tag is stripped off the ingress packet.
+        */
+       if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
+               ret = NETDEV_TX_OK;
+               goto out;
+       }
+
+       skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ?
+                       ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len;
+
+       mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
+       if (dma_mapping_error(kdev, mapping)) {
+               netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
+                               skb->data, skb_len);
+               ret = NETDEV_TX_OK;
+               goto out;
+       }
+
+       /* Remember the SKB for future freeing */
+       cb = &ring->cbs[ring->curr_desc];
+       cb->skb = skb;
+       dma_unmap_addr_set(cb, dma_addr, mapping);
+       dma_unmap_len_set(cb, dma_len, skb_len);
+
+       /* Fetch a descriptor entry from our pool */
+       desc = ring->desc_cpu;
+
+       desc->addr_lo = lower_32_bits(mapping);
+       len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
+       len_status |= (skb_len << DESC_LEN_SHIFT);
+       len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
+                       DESC_STATUS_SHIFT;
+       if (skb->ip_summed == CHECKSUM_PARTIAL)
+               len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
+
+       ring->curr_desc++;
+       if (ring->curr_desc == ring->size)
+               ring->curr_desc = 0;
+       ring->desc_count--;
+
+       /* Ensure write completion of the descriptor status/length
+        * in DRAM before the System Port WRITE_PORT register latches
+        * the value
+        */
+       wmb();
+       desc->addr_status_len = len_status;
+       wmb();
+
+       /* Write this descriptor address to the RING write port */
+       tdma_port_write_desc_addr(priv, desc, ring->index);
+
+       /* Check ring space and update SW control flow */
+       if (ring->desc_count == 0)
+               netif_tx_stop_queue(txq);
+
+       netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
+                       ring->index, ring->desc_count, ring->curr_desc);
+
+       ret = NETDEV_TX_OK;
+out:
+       spin_unlock(&ring->lock);
+       return ret;
+}
+
+static void bcm_sysport_tx_timeout(struct net_device *dev)
+{
+       netdev_warn(dev, "transmit timeout!\n");
+
+       dev->trans_start = jiffies;
+       dev->stats.tx_errors++;
+
+       netif_tx_wake_all_queues(dev);
+}
+
+/* phylib adjust link callback */
+static void bcm_sysport_adj_link(struct net_device *dev)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       struct phy_device *phydev = priv->phydev;
+       unsigned int changed = 0;
+       u32 cmd_bits = 0, reg;
+
+       if (priv->old_link != phydev->link) {
+               changed = 1;
+               priv->old_link = phydev->link;
+       }
+
+       if (priv->old_duplex != phydev->duplex) {
+               changed = 1;
+               priv->old_duplex = phydev->duplex;
+       }
+
+       switch (phydev->speed) {
+       case SPEED_2500:
+               cmd_bits = CMD_SPEED_2500;
+               break;
+       case SPEED_1000:
+               cmd_bits = CMD_SPEED_1000;
+               break;
+       case SPEED_100:
+               cmd_bits = CMD_SPEED_100;
+               break;
+       case SPEED_10:
+               cmd_bits = CMD_SPEED_10;
+               break;
+       default:
+               break;
+       }
+       cmd_bits <<= CMD_SPEED_SHIFT;
+
+       if (phydev->duplex == DUPLEX_HALF)
+               cmd_bits |= CMD_HD_EN;
+
+       if (priv->old_pause != phydev->pause) {
+               changed = 1;
+               priv->old_pause = phydev->pause;
+       }
+
+       if (!phydev->pause)
+               cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
+
+       if (changed) {
+               reg = umac_readl(priv, UMAC_CMD);
+               reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
+                       CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
+                       CMD_TX_PAUSE_IGNORE);
+               reg |= cmd_bits;
+               umac_writel(priv, reg, UMAC_CMD);
+
+               phy_print_status(priv->phydev);
+       }
+}
+
+static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
+                                   unsigned int index)
+{
+       struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
+       struct device *kdev = &priv->pdev->dev;
+       size_t size;
+       void *p;
+       u32 reg;
+
+       /* Simple descriptors partitioning for now */
+       size = 256;
+
+       /* We just need one DMA descriptor which is DMA-able, since writing to
+        * the port will allocate a new descriptor in its internal linked-list
+        */
+       p = dma_zalloc_coherent(kdev, 1, &ring->desc_dma, GFP_KERNEL);
+       if (!p) {
+               netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
+               return -ENOMEM;
+       }
+
+       ring->cbs = kzalloc(sizeof(struct bcm_sysport_cb) * size, GFP_KERNEL);
+       if (!ring->cbs) {
+               netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
+               return -ENOMEM;
+       }
+
+       /* Initialize SW view of the ring */
+       spin_lock_init(&ring->lock);
+       ring->priv = priv;
+       netif_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
+       ring->index = index;
+       ring->size = size;
+       ring->alloc_size = ring->size;
+       ring->desc_cpu = p;
+       ring->desc_count = ring->size;
+       ring->curr_desc = 0;
+
+       /* Initialize HW ring */
+       tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
+       tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
+       tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
+       tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
+       tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index));
+       tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index));
+
+       /* Program the number of descriptors as MAX_THRESHOLD and half of
+        * its size for the hysteresis trigger
+        */
+       tdma_writel(priv, ring->size |
+                       1 << RING_HYST_THRESH_SHIFT,
+                       TDMA_DESC_RING_MAX_HYST(index));
+
+       /* Enable the ring queue in the arbiter */
+       reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
+       reg |= (1 << index);
+       tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
+
+       napi_enable(&ring->napi);
+
+       netif_dbg(priv, hw, priv->netdev,
+                       "TDMA cfg, size=%d, desc_cpu=%p\n",
+                       ring->size, ring->desc_cpu);
+
+       return 0;
+}
+
+static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
+                                       unsigned int index)
+{
+       struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
+       struct device *kdev = &priv->pdev->dev;
+       u32 reg;
+
+       /* Caller should stop the TDMA engine */
+       reg = tdma_readl(priv, TDMA_STATUS);
+       if (!(reg & TDMA_DISABLED))
+               netdev_warn(priv->netdev, "TDMA not stopped!\n");
+
+       napi_disable(&ring->napi);
+       netif_napi_del(&ring->napi);
+
+       bcm_sysport_tx_reclaim(priv, ring);
+
+       kfree(ring->cbs);
+       ring->cbs = NULL;
+
+       if (ring->desc_dma) {
+               dma_free_coherent(kdev, 1, ring->desc_cpu, ring->desc_dma);
+               ring->desc_dma = 0;
+       }
+       ring->size = 0;
+       ring->alloc_size = 0;
+
+       netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
+}
+
+/* RDMA helper */
+static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
+                                       unsigned int enable)
+{
+       unsigned int timeout = 1000;
+       u32 reg;
+
+       reg = rdma_readl(priv, RDMA_CONTROL);
+       if (enable)
+               reg |= RDMA_EN;
+       else
+               reg &= ~RDMA_EN;
+       rdma_writel(priv, reg, RDMA_CONTROL);
+
+       /* Poll for RMDA disabling completion */
+       do {
+               reg = rdma_readl(priv, RDMA_STATUS);
+               if (!!(reg & RDMA_DISABLED) == !enable)
+                       return 0;
+               usleep_range(1000, 2000);
+       } while (timeout-- > 0);
+
+       netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
+
+       return -ETIMEDOUT;
+}
+
+/* TDMA helper */
+static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
+                                       unsigned int enable)
+{
+       unsigned int timeout = 1000;
+       u32 reg;
+
+       reg = tdma_readl(priv, TDMA_CONTROL);
+       if (enable)
+               reg |= TDMA_EN;
+       else
+               reg &= ~TDMA_EN;
+       tdma_writel(priv, reg, TDMA_CONTROL);
+
+       /* Poll for TMDA disabling completion */
+       do {
+               reg = tdma_readl(priv, TDMA_STATUS);
+               if (!!(reg & TDMA_DISABLED) == !enable)
+                       return 0;
+
+               usleep_range(1000, 2000);
+       } while (timeout-- > 0);
+
+       netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
+
+       return -ETIMEDOUT;
+}
+
+static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
+{
+       u32 reg;
+       int ret;
+
+       /* Initialize SW view of the RX ring */
+       priv->num_rx_bds = NUM_RX_DESC;
+       priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
+       priv->rx_bd_assign_ptr = priv->rx_bds;
+       priv->rx_bd_assign_index = 0;
+       priv->rx_c_index = 0;
+       priv->rx_read_ptr = 0;
+       priv->rx_cbs = kzalloc(priv->num_rx_bds *
+                               sizeof(struct bcm_sysport_cb), GFP_KERNEL);
+       if (!priv->rx_cbs) {
+               netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
+               return -ENOMEM;
+       }
+
+       ret = bcm_sysport_alloc_rx_bufs(priv);
+       if (ret) {
+               netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
+               return ret;
+       }
+
+       /* Initialize HW, ensure RDMA is disabled */
+       reg = rdma_readl(priv, RDMA_STATUS);
+       if (!(reg & RDMA_DISABLED))
+               rdma_enable_set(priv, 0);
+
+       rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
+       rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
+       rdma_writel(priv, 0, RDMA_PROD_INDEX);
+       rdma_writel(priv, 0, RDMA_CONS_INDEX);
+       rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
+                         RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
+       /* Operate the queue in ring mode */
+       rdma_writel(priv, 0, RDMA_START_ADDR_HI);
+       rdma_writel(priv, 0, RDMA_START_ADDR_LO);
+       rdma_writel(priv, 0, RDMA_END_ADDR_HI);
+       rdma_writel(priv, NUM_HW_RX_DESC_WORDS - 1, RDMA_END_ADDR_LO);
+
+       rdma_writel(priv, 1, RDMA_MBDONE_INTR);
+
+       netif_dbg(priv, hw, priv->netdev,
+                       "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
+                       priv->num_rx_bds, priv->rx_bds);
+
+       return 0;
+}
+
+static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
+{
+       struct bcm_sysport_cb *cb;
+       unsigned int i;
+       u32 reg;
+
+       /* Caller should ensure RDMA is disabled */
+       reg = rdma_readl(priv, RDMA_STATUS);
+       if (!(reg & RDMA_DISABLED))
+               netdev_warn(priv->netdev, "RDMA not stopped!\n");
+
+       for (i = 0; i < priv->num_rx_bds; i++) {
+               cb = &priv->rx_cbs[i];
+               if (dma_unmap_addr(cb, dma_addr))
+                       dma_unmap_single(&priv->pdev->dev,
+                                       dma_unmap_addr(cb, dma_addr),
+                                       RX_BUF_LENGTH, DMA_FROM_DEVICE);
+               bcm_sysport_free_cb(cb);
+       }
+
+       kfree(priv->rx_cbs);
+       priv->rx_cbs = NULL;
+
+       netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
+}
+
+static void bcm_sysport_set_rx_mode(struct net_device *dev)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       u32 reg;
+
+       reg = umac_readl(priv, UMAC_CMD);
+       if (dev->flags & IFF_PROMISC)
+               reg |= CMD_PROMISC;
+       else
+               reg &= ~CMD_PROMISC;
+       umac_writel(priv, reg, UMAC_CMD);
+
+       /* No support for ALLMULTI */
+       if (dev->flags & IFF_ALLMULTI)
+               return;
+}
+
+static inline void umac_enable_set(struct bcm_sysport_priv *priv,
+                                       unsigned int enable)
+{
+       u32 reg;
+
+       reg = umac_readl(priv, UMAC_CMD);
+       if (enable)
+               reg |= CMD_RX_EN | CMD_TX_EN;
+       else
+               reg &= ~(CMD_RX_EN | CMD_TX_EN);
+       umac_writel(priv, reg, UMAC_CMD);
+
+       /* UniMAC stops on a packet boundary, wait for a full-sized packet
+        * to be processed (1 msec).
+        */
+       if (enable == 0)
+               usleep_range(1000, 2000);
+}
+
+static inline int umac_reset(struct bcm_sysport_priv *priv)
+{
+       unsigned int timeout = 0;
+       u32 reg;
+       int ret = 0;
+
+       umac_writel(priv, 0, UMAC_CMD);
+       while (timeout++ < 1000) {
+               reg = umac_readl(priv, UMAC_CMD);
+               if (!(reg & CMD_SW_RESET))
+                       break;
+
+               udelay(1);
+       }
+
+       if (timeout == 1000) {
+               dev_err(&priv->pdev->dev,
+                       "timeout waiting for MAC to come out of reset\n");
+               ret = -ETIMEDOUT;
+       }
+
+       return ret;
+}
+
+static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
+                               unsigned char *addr)
+{
+       umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
+                       (addr[2] << 8) | addr[3], UMAC_MAC0);
+       umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
+}
+
+static void topctrl_flush(struct bcm_sysport_priv *priv)
+{
+       topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
+       topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
+       mdelay(1);
+       topctrl_writel(priv, 0, RX_FLUSH_CNTL);
+       topctrl_writel(priv, 0, TX_FLUSH_CNTL);
+}
+
+static int bcm_sysport_open(struct net_device *dev)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       unsigned int i;
+       u32 reg;
+       int ret;
+
+       /* Reset UniMAC */
+       ret = umac_reset(priv);
+       if (ret) {
+               netdev_err(dev, "UniMAC reset failed\n");
+               return ret;
+       }
+
+       /* Flush TX and RX FIFOs at TOPCTRL level */
+       topctrl_flush(priv);
+
+       /* Disable the UniMAC RX/TX */
+       umac_enable_set(priv, 0);
+
+       /* Enable RBUF 2bytes alignment and Receive Status Block */
+       reg = rbuf_readl(priv, RBUF_CONTROL);
+       reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
+       rbuf_writel(priv, reg, RBUF_CONTROL);
+
+       /* Set maximum frame length */
+       umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+
+       /* Set MAC address */
+       umac_set_hw_addr(priv, dev->dev_addr);
+
+       /* Read CRC forward */
+       priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
+
+       priv->phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
+                                       0, priv->phy_interface);
+       if (!priv->phydev) {
+               netdev_err(dev, "could not attach to PHY\n");
+               return -ENODEV;
+       }
+
+       /* Reset house keeping link status */
+       priv->old_duplex = -1;
+       priv->old_link = -1;
+       priv->old_pause = -1;
+
+       /* mask all interrupts and request them */
+       intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
+       intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+       intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+       intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
+       intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+       intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+
+       ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
+       if (ret) {
+               netdev_err(dev, "failed to request RX interrupt\n");
+               goto out_phy_disconnect;
+       }
+
+       ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, dev->name, dev);
+       if (ret) {
+               netdev_err(dev, "failed to request TX interrupt\n");
+               goto out_free_irq0;
+       }
+
+       /* Initialize both hardware and software ring */
+       for (i = 0; i < dev->num_tx_queues; i++) {
+               ret = bcm_sysport_init_tx_ring(priv, i);
+               if (ret) {
+                       netdev_err(dev, "failed to initialize TX ring %d\n",
+                                       i);
+                       goto out_free_tx_ring;
+               }
+       }
+
+       /* Initialize linked-list */
+       tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
+
+       /* Initialize RX ring */
+       ret = bcm_sysport_init_rx_ring(priv);
+       if (ret) {
+               netdev_err(dev, "failed to initialize RX ring\n");
+               goto out_free_rx_ring;
+       }
+
+       /* Turn on RDMA */
+       ret = rdma_enable_set(priv, 1);
+       if (ret)
+               goto out_free_rx_ring;
+
+       /* Enable RX interrupt and TX ring full interrupt */
+       intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
+
+       /* Turn on TDMA */
+       ret = tdma_enable_set(priv, 1);
+       if (ret)
+               goto out_clear_rx_int;
+
+       /* Enable NAPI */
+       napi_enable(&priv->napi);
+
+       /* Turn on UniMAC TX/RX */
+       umac_enable_set(priv, 1);
+
+       phy_start(priv->phydev);
+
+       /* Enable TX interrupts for the 32 TXQs */
+       intrl2_1_mask_clear(priv, 0xffffffff);
+
+       /* Last call before we start the real business */
+       netif_tx_start_all_queues(dev);
+
+       return 0;
+
+out_clear_rx_int:
+       intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
+out_free_rx_ring:
+       bcm_sysport_fini_rx_ring(priv);
+out_free_tx_ring:
+       for (i = 0; i < dev->num_tx_queues; i++)
+               bcm_sysport_fini_tx_ring(priv, i);
+       free_irq(priv->irq1, dev);
+out_free_irq0:
+       free_irq(priv->irq0, dev);
+out_phy_disconnect:
+       phy_disconnect(priv->phydev);
+       return ret;
+}
+
+static int bcm_sysport_stop(struct net_device *dev)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       unsigned int i;
+       u32 reg;
+       int ret;
+
+       /* stop all software from updating hardware */
+       netif_tx_stop_all_queues(dev);
+       napi_disable(&priv->napi);
+       phy_stop(priv->phydev);
+
+       /* mask all interrupts */
+       intrl2_0_mask_set(priv, 0xffffffff);
+       intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+       intrl2_1_mask_set(priv, 0xffffffff);
+       intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+
+       /* Disable UniMAC RX */
+       reg = umac_readl(priv, UMAC_CMD);
+       reg &= ~CMD_RX_EN;
+       umac_writel(priv, reg, UMAC_CMD);
+
+       ret = tdma_enable_set(priv, 0);
+       if (ret) {
+               netdev_err(dev, "timeout disabling RDMA\n");
+               return ret;
+       }
+
+       /* Wait for a maximum packet size to be drained */
+       usleep_range(2000, 3000);
+
+       ret = rdma_enable_set(priv, 0);
+       if (ret) {
+               netdev_err(dev, "timeout disabling TDMA\n");
+               return ret;
+       }
+
+       /* Disable UniMAC TX */
+       reg = umac_readl(priv, UMAC_CMD);
+       reg &= ~CMD_TX_EN;
+       umac_writel(priv, reg, UMAC_CMD);
+
+       /* Free RX/TX rings SW structures */
+       for (i = 0; i < dev->num_tx_queues; i++)
+               bcm_sysport_fini_tx_ring(priv, i);
+       bcm_sysport_fini_rx_ring(priv);
+
+       free_irq(priv->irq0, dev);
+       free_irq(priv->irq1, dev);
+
+       /* Disconnect from PHY */
+       phy_disconnect(priv->phydev);
+
+       return 0;
+}
+
+static struct ethtool_ops bcm_sysport_ethtool_ops = {
+       .get_settings           = bcm_sysport_get_settings,
+       .set_settings           = bcm_sysport_set_settings,
+       .get_drvinfo            = bcm_sysport_get_drvinfo,
+       .get_msglevel           = bcm_sysport_get_msglvl,
+       .set_msglevel           = bcm_sysport_set_msglvl,
+       .get_link               = ethtool_op_get_link,
+       .get_strings            = bcm_sysport_get_strings,
+       .get_ethtool_stats      = bcm_sysport_get_stats,
+       .get_sset_count         = bcm_sysport_get_sset_count,
+};
+
+static const struct net_device_ops bcm_sysport_netdev_ops = {
+       .ndo_start_xmit         = bcm_sysport_xmit,
+       .ndo_tx_timeout         = bcm_sysport_tx_timeout,
+       .ndo_open               = bcm_sysport_open,
+       .ndo_stop               = bcm_sysport_stop,
+       .ndo_set_features       = bcm_sysport_set_features,
+       .ndo_set_rx_mode        = bcm_sysport_set_rx_mode,
+};
+
+#define REV_FMT        "v%2x.%02x"
+
+static int bcm_sysport_probe(struct platform_device *pdev)
+{
+       struct bcm_sysport_priv *priv;
+       struct device_node *dn;
+       struct net_device *dev;
+       const void *macaddr;
+       struct resource *r;
+       u32 txq, rxq;
+       int ret;
+
+       dn = pdev->dev.of_node;
+       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+       /* Read the Transmit/Receive Queue properties */
+       if (of_property_read_u32(dn, "systemport,num-txq", &txq))
+               txq = TDMA_NUM_RINGS;
+       if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
+               rxq = 1;
+
+       dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
+       if (!dev)
+               return -ENOMEM;
+
+       /* Initialize private members */
+       priv = netdev_priv(dev);
+
+       priv->irq0 = platform_get_irq(pdev, 0);
+       priv->irq1 = platform_get_irq(pdev, 1);
+       if (priv->irq0 <= 0 || priv->irq1 <= 0) {
+               dev_err(&pdev->dev, "invalid interrupts\n");
+               ret = -EINVAL;
+               goto err;
+       }
+
+       priv->base = devm_ioremap_resource(&pdev->dev, r);
+       if (IS_ERR(priv->base)) {
+               ret = PTR_ERR(priv->base);
+               goto err;
+       }
+
+       priv->netdev = dev;
+       priv->pdev = pdev;
+
+       priv->phy_interface = of_get_phy_mode(dn);
+       /* Default to GMII interface mode */
+       if (priv->phy_interface < 0)
+               priv->phy_interface = PHY_INTERFACE_MODE_GMII;
+
+       /* In the case of a fixed PHY, the DT node associated
+        * to the PHY is the Ethernet MAC DT node.
+        */
+       if (of_phy_is_fixed_link(dn)) {
+               ret = of_phy_register_fixed_link(dn);
+               if (ret) {
+                       dev_err(&pdev->dev, "failed to register fixed PHY\n");
+                       goto err;
+               }
+
+               priv->phy_dn = dn;
+       }
+
+       /* Initialize netdevice members */
+       macaddr = of_get_mac_address(dn);
+       if (!macaddr || !is_valid_ether_addr(macaddr)) {
+               dev_warn(&pdev->dev, "using random Ethernet MAC\n");
+               random_ether_addr(dev->dev_addr);
+       } else {
+               ether_addr_copy(dev->dev_addr, macaddr);
+       }
+
+       SET_NETDEV_DEV(dev, &pdev->dev);
+       dev_set_drvdata(&pdev->dev, dev);
+       dev->ethtool_ops = &bcm_sysport_ethtool_ops;
+       dev->netdev_ops = &bcm_sysport_netdev_ops;
+       netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
+
+       /* HW supported features, none enabled by default */
+       dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
+                               NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
+       /* Set the needed headroom once and for all */
+       BUILD_BUG_ON(sizeof(struct tsb) != 8);
+       dev->needed_headroom += sizeof(struct tsb);
+
+       /* We are interfaced to a switch which handles the multicast
+        * filtering for us, so we do not support programming any
+        * multicast hash table in this Ethernet MAC.
+        */
+       dev->flags &= ~IFF_MULTICAST;
+
+       ret = register_netdev(dev);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to register net_device\n");
+               goto err;
+       }
+
+       priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
+       dev_info(&pdev->dev,
+               "Broadcom SYSTEMPORT" REV_FMT
+               " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
+               (priv->rev >> 8) & 0xff, priv->rev & 0xff,
+               priv->base, priv->irq0, priv->irq1, txq, rxq);
+
+       return 0;
+err:
+       free_netdev(dev);
+       return ret;
+}
+
+static int bcm_sysport_remove(struct platform_device *pdev)
+{
+       struct net_device *dev = dev_get_drvdata(&pdev->dev);
+
+       /* Not much to do, ndo_close has been called
+        * and we use managed allocations
+        */
+       unregister_netdev(dev);
+       free_netdev(dev);
+       dev_set_drvdata(&pdev->dev, NULL);
+
+       return 0;
+}
+
+static const struct of_device_id bcm_sysport_of_match[] = {
+       { .compatible = "brcm,systemport-v1.00" },
+       { .compatible = "brcm,systemport" },
+       { /* sentinel */ }
+};
+
+static struct platform_driver bcm_sysport_driver = {
+       .probe  = bcm_sysport_probe,
+       .remove = bcm_sysport_remove,
+       .driver =  {
+               .name = "brcm-systemport",
+               .owner = THIS_MODULE,
+               .of_match_table = bcm_sysport_of_match,
+       },
+};
+module_platform_driver(bcm_sysport_driver);
+
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
+MODULE_ALIAS("platform:brcm-systemport");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
new file mode 100644 (file)
index 0000000..73fd04a
--- /dev/null
@@ -0,0 +1,678 @@
+/*
+ * Broadcom BCM7xxx System Port Ethernet MAC driver
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __BCM_SYSPORT_H
+#define __BCM_SYSPORT_H
+
+#include <linux/if_vlan.h>
+
+/* Receive/transmit descriptor format */
+#define DESC_ADDR_HI_STATUS_LEN        0x00
+#define  DESC_ADDR_HI_SHIFT    0
+#define  DESC_ADDR_HI_MASK     0xff
+#define  DESC_STATUS_SHIFT     8
+#define  DESC_STATUS_MASK      0x3ff
+#define  DESC_LEN_SHIFT                18
+#define  DESC_LEN_MASK         0x7fff
+#define DESC_ADDR_LO           0x04
+
+/* HW supports 40-bit addressing hence the */
+#define DESC_SIZE              (WORDS_PER_DESC * sizeof(u32))
+
+/* Default RX buffer allocation size */
+#define RX_BUF_LENGTH          2048
+
+/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(4) + FCS(4) = 1526.
+ * 1536 is multiple of 256 bytes
+ */
+#define ENET_BRCM_TAG_LEN      4
+#define ENET_PAD               10
+#define UMAC_MAX_MTU_SIZE      (ETH_DATA_LEN + ETH_HLEN + VLAN_HLEN + \
+                                ENET_BRCM_TAG_LEN + ETH_FCS_LEN + ENET_PAD)
+
+/* Transmit status block */
+struct tsb {
+       u32 pcp_dei_vid;
+#define PCP_DEI_MASK           0xf
+#define VID_SHIFT              4
+#define VID_MASK               0xfff
+       u32 l4_ptr_dest_map;
+#define L4_CSUM_PTR_MASK       0x1ff
+#define L4_PTR_SHIFT           9
+#define L4_PTR_MASK            0x1ff
+#define L4_UDP                 (1 << 18)
+#define L4_LENGTH_VALID                (1 << 19)
+#define DEST_MAP_SHIFT         20
+#define DEST_MAP_MASK          0x1ff
+};
+
+/* Receive status block uses the same
+ * definitions as the DMA descriptor
+ */
+struct rsb {
+       u32 rx_status_len;
+       u32 brcm_egress_tag;
+};
+
+/* Common Receive/Transmit status bits */
+#define DESC_L4_CSUM           (1 << 7)
+#define DESC_SOP               (1 << 8)
+#define DESC_EOP               (1 << 9)
+
+/* Receive Status bits */
+#define RX_STATUS_UCAST                        0
+#define RX_STATUS_BCAST                        0x04
+#define RX_STATUS_MCAST                        0x08
+#define RX_STATUS_L2_MCAST             0x0c
+#define RX_STATUS_ERR                  (1 << 4)
+#define RX_STATUS_OVFLOW               (1 << 5)
+#define RX_STATUS_PARSE_FAIL           (1 << 6)
+
+/* Transmit Status bits */
+#define TX_STATUS_VLAN_NO_ACT          0x00
+#define TX_STATUS_VLAN_PCP_TSB         0x01
+#define TX_STATUS_VLAN_QUEUE           0x02
+#define TX_STATUS_VLAN_VID_TSB         0x03
+#define TX_STATUS_OWR_CRC              (1 << 2)
+#define TX_STATUS_APP_CRC              (1 << 3)
+#define TX_STATUS_BRCM_TAG_NO_ACT      0
+#define TX_STATUS_BRCM_TAG_ZERO                0x10
+#define TX_STATUS_BRCM_TAG_ONE_QUEUE   0x20
+#define TX_STATUS_BRCM_TAG_ONE_TSB     0x30
+#define TX_STATUS_SKIP_BYTES           (1 << 6)
+
+/* Specific register definitions */
+#define SYS_PORT_TOPCTRL_OFFSET                0
+#define REV_CNTL                       0x00
+#define  REV_MASK                      0xffff
+
+#define RX_FLUSH_CNTL                  0x04
+#define  RX_FLUSH                      (1 << 0)
+
+#define TX_FLUSH_CNTL                  0x08
+#define  TX_FLUSH                      (1 << 0)
+
+#define MISC_CNTL                      0x0c
+#define  SYS_CLK_SEL                   (1 << 0)
+#define  TDMA_EOP_SEL                  (1 << 1)
+
+/* Level-2 Interrupt controller offsets and defines */
+#define SYS_PORT_INTRL2_0_OFFSET       0x200
+#define SYS_PORT_INTRL2_1_OFFSET       0x240
+#define INTRL2_CPU_STATUS              0x00
+#define INTRL2_CPU_SET                 0x04
+#define INTRL2_CPU_CLEAR               0x08
+#define INTRL2_CPU_MASK_STATUS         0x0c
+#define INTRL2_CPU_MASK_SET            0x10
+#define INTRL2_CPU_MASK_CLEAR          0x14
+
+/* Level-2 instance 0 interrupt bits */
+#define INTRL2_0_GISB_ERR              (1 << 0)
+#define INTRL2_0_RBUF_OVFLOW           (1 << 1)
+#define INTRL2_0_TBUF_UNDFLOW          (1 << 2)
+#define INTRL2_0_MPD                   (1 << 3)
+#define INTRL2_0_BRCM_MATCH_TAG                (1 << 4)
+#define INTRL2_0_RDMA_MBDONE           (1 << 5)
+#define INTRL2_0_OVER_MAX_THRESH       (1 << 6)
+#define INTRL2_0_BELOW_HYST_THRESH     (1 << 7)
+#define INTRL2_0_FREE_LIST_EMPTY       (1 << 8)
+#define INTRL2_0_TX_RING_FULL          (1 << 9)
+#define INTRL2_0_DESC_ALLOC_ERR                (1 << 10)
+#define INTRL2_0_UNEXP_PKTSIZE_ACK     (1 << 11)
+
+/* RXCHK offset and defines */
+#define SYS_PORT_RXCHK_OFFSET          0x300
+
+#define RXCHK_CONTROL                  0x00
+#define  RXCHK_EN                      (1 << 0)
+#define  RXCHK_SKIP_FCS                        (1 << 1)
+#define  RXCHK_BAD_CSUM_DIS            (1 << 2)
+#define  RXCHK_BRCM_TAG_EN             (1 << 3)
+#define  RXCHK_BRCM_TAG_MATCH_SHIFT    4
+#define  RXCHK_BRCM_TAG_MATCH_MASK     0xff
+#define  RXCHK_PARSE_TNL               (1 << 12)
+#define  RXCHK_VIOL_EN                 (1 << 13)
+#define  RXCHK_VIOL_DIS                        (1 << 14)
+#define  RXCHK_INCOM_PKT               (1 << 15)
+#define  RXCHK_V6_DUPEXT_EN            (1 << 16)
+#define  RXCHK_V6_DUPEXT_DIS           (1 << 17)
+#define  RXCHK_ETHERTYPE_DIS           (1 << 18)
+#define  RXCHK_L2_HDR_DIS              (1 << 19)
+#define  RXCHK_L3_HDR_DIS              (1 << 20)
+#define  RXCHK_MAC_RX_ERR_DIS          (1 << 21)
+#define  RXCHK_PARSE_AUTH              (1 << 22)
+
+#define RXCHK_BRCM_TAG0                        0x04
+#define RXCHK_BRCM_TAG(i)              ((i) * RXCHK_BRCM_TAG0)
+#define RXCHK_BRCM_TAG0_MASK           0x24
+#define RXCHK_BRCM_TAG_MASK(i)         ((i) * RXCHK_BRCM_TAG0_MASK)
+#define RXCHK_BRCM_TAG_MATCH_STATUS    0x44
+#define RXCHK_ETHERTYPE                        0x48
+#define RXCHK_BAD_CSUM_CNTR            0x4C
+#define RXCHK_OTHER_DISC_CNTR          0x50
+
+/* TXCHCK offsets and defines */
+#define SYS_PORT_TXCHK_OFFSET          0x380
+#define TXCHK_PKT_RDY_THRESH           0x00
+
+/* Receive buffer offset and defines */
+#define SYS_PORT_RBUF_OFFSET           0x400
+
+#define RBUF_CONTROL                   0x00
+#define  RBUF_RSB_EN                   (1 << 0)
+#define  RBUF_4B_ALGN                  (1 << 1)
+#define  RBUF_BRCM_TAG_STRIP           (1 << 2)
+#define  RBUF_BAD_PKT_DISC             (1 << 3)
+#define  RBUF_RESUME_THRESH_SHIFT      4
+#define  RBUF_RESUME_THRESH_MASK       0xff
+#define  RBUF_OK_TO_SEND_SHIFT         12
+#define  RBUF_OK_TO_SEND_MASK          0xff
+#define  RBUF_CRC_REPLACE              (1 << 20)
+#define  RBUF_OK_TO_SEND_MODE          (1 << 21)
+#define  RBUF_RSB_SWAP                 (1 << 22)
+#define  RBUF_ACPI_EN                  (1 << 23)
+
+#define RBUF_PKT_RDY_THRESH            0x04
+
+#define RBUF_STATUS                    0x08
+#define  RBUF_WOL_MODE                 (1 << 0)
+#define  RBUF_MPD                      (1 << 1)
+#define  RBUF_ACPI                     (1 << 2)
+
+#define RBUF_OVFL_DISC_CNTR            0x0c
+#define RBUF_ERR_PKT_CNTR              0x10
+
+/* Transmit buffer offset and defines */
+#define SYS_PORT_TBUF_OFFSET           0x600
+
+#define TBUF_CONTROL                   0x00
+#define  TBUF_BP_EN                    (1 << 0)
+#define  TBUF_MAX_PKT_THRESH_SHIFT     1
+#define  TBUF_MAX_PKT_THRESH_MASK      0x1f
+#define  TBUF_FULL_THRESH_SHIFT                8
+#define  TBUF_FULL_THRESH_MASK         0x1f
+
+/* UniMAC offset and defines */
+#define SYS_PORT_UMAC_OFFSET           0x800
+
+#define UMAC_CMD                       0x008
+#define  CMD_TX_EN                     (1 << 0)
+#define  CMD_RX_EN                     (1 << 1)
+#define  CMD_SPEED_SHIFT               2
+#define  CMD_SPEED_10                  0
+#define  CMD_SPEED_100                 1
+#define  CMD_SPEED_1000                        2
+#define  CMD_SPEED_2500                        3
+#define  CMD_SPEED_MASK                        3
+#define  CMD_PROMISC                   (1 << 4)
+#define  CMD_PAD_EN                    (1 << 5)
+#define  CMD_CRC_FWD                   (1 << 6)
+#define  CMD_PAUSE_FWD                 (1 << 7)
+#define  CMD_RX_PAUSE_IGNORE           (1 << 8)
+#define  CMD_TX_ADDR_INS               (1 << 9)
+#define  CMD_HD_EN                     (1 << 10)
+#define  CMD_SW_RESET                  (1 << 13)
+#define  CMD_LCL_LOOP_EN               (1 << 15)
+#define  CMD_AUTO_CONFIG               (1 << 22)
+#define  CMD_CNTL_FRM_EN               (1 << 23)
+#define  CMD_NO_LEN_CHK                        (1 << 24)
+#define  CMD_RMT_LOOP_EN               (1 << 25)
+#define  CMD_PRBL_EN                   (1 << 27)
+#define  CMD_TX_PAUSE_IGNORE           (1 << 28)
+#define  CMD_TX_RX_EN                  (1 << 29)
+#define  CMD_RUNT_FILTER_DIS           (1 << 30)
+
+#define UMAC_MAC0                      0x00c
+#define UMAC_MAC1                      0x010
+#define UMAC_MAX_FRAME_LEN             0x014
+
+#define UMAC_TX_FLUSH                  0x334
+
+#define UMAC_MIB_START                 0x400
+
+/* There is a 0xC gap between the end of RX and beginning of TX stats and then
+ * between the end of TX stats and the beginning of the RX RUNT
+ */
+#define UMAC_MIB_STAT_OFFSET           0xc
+
+#define UMAC_MIB_CTRL                  0x580
+#define  MIB_RX_CNT_RST                        (1 << 0)
+#define  MIB_RUNT_CNT_RST              (1 << 1)
+#define  MIB_TX_CNT_RST                        (1 << 2)
+#define UMAC_MDF_CTRL                  0x650
+#define UMAC_MDF_ADDR                  0x654
+
+/* Receive DMA offset and defines */
+#define SYS_PORT_RDMA_OFFSET           0x2000
+
+#define RDMA_CONTROL                   0x1000
+#define  RDMA_EN                       (1 << 0)
+#define  RDMA_RING_CFG                 (1 << 1)
+#define  RDMA_DISC_EN                  (1 << 2)
+#define  RDMA_BUF_DATA_OFFSET_SHIFT    4
+#define  RDMA_BUF_DATA_OFFSET_MASK     0x3ff
+
+#define RDMA_STATUS                    0x1004
+#define  RDMA_DISABLED                 (1 << 0)
+#define  RDMA_DESC_RAM_INIT_BUSY       (1 << 1)
+#define  RDMA_BP_STATUS                        (1 << 2)
+
+#define RDMA_SCB_BURST_SIZE            0x1008
+
+#define RDMA_RING_BUF_SIZE             0x100c
+#define  RDMA_RING_SIZE_SHIFT          16
+
+#define RDMA_WRITE_PTR_HI              0x1010
+#define RDMA_WRITE_PTR_LO              0x1014
+#define RDMA_PROD_INDEX                        0x1018
+#define  RDMA_PROD_INDEX_MASK          0xffff
+
+#define RDMA_CONS_INDEX                        0x101c
+#define  RDMA_CONS_INDEX_MASK          0xffff
+
+#define RDMA_START_ADDR_HI             0x1020
+#define RDMA_START_ADDR_LO             0x1024
+#define RDMA_END_ADDR_HI               0x1028
+#define RDMA_END_ADDR_LO               0x102c
+
+#define RDMA_MBDONE_INTR               0x1030
+#define  RDMA_INTR_THRESH_MASK         0xff
+#define  RDMA_TIMEOUT_SHIFT            16
+#define  RDMA_TIMEOUT_MASK             0xffff
+
+#define RDMA_XON_XOFF_THRESH           0x1034
+#define  RDMA_XON_XOFF_THRESH_MASK     0xffff
+#define  RDMA_XOFF_THRESH_SHIFT                16
+
+#define RDMA_READ_PTR_HI               0x1038
+#define RDMA_READ_PTR_LO               0x103c
+
+#define RDMA_OVERRIDE                  0x1040
+#define  RDMA_LE_MODE                  (1 << 0)
+#define  RDMA_REG_MODE                 (1 << 1)
+
+#define RDMA_TEST                      0x1044
+#define  RDMA_TP_OUT_SEL               (1 << 0)
+#define  RDMA_MEM_SEL                  (1 << 1)
+
+#define RDMA_DEBUG                     0x1048
+
+/* Transmit DMA offset and defines */
+#define TDMA_NUM_RINGS                 32      /* rings = queues */
+#define TDMA_PORT_SIZE                 DESC_SIZE /* two 32-bits words */
+
+#define SYS_PORT_TDMA_OFFSET           0x4000
+#define TDMA_WRITE_PORT_OFFSET         0x0000
+#define TDMA_WRITE_PORT_HI(i)          (TDMA_WRITE_PORT_OFFSET + \
+                                       (i) * TDMA_PORT_SIZE)
+#define TDMA_WRITE_PORT_LO(i)          (TDMA_WRITE_PORT_OFFSET + \
+                                       sizeof(u32) + (i) * TDMA_PORT_SIZE)
+
+#define TDMA_READ_PORT_OFFSET          (TDMA_WRITE_PORT_OFFSET + \
+                                       (TDMA_NUM_RINGS * TDMA_PORT_SIZE))
+#define TDMA_READ_PORT_HI(i)           (TDMA_READ_PORT_OFFSET + \
+                                       (i) * TDMA_PORT_SIZE)
+#define TDMA_READ_PORT_LO(i)           (TDMA_READ_PORT_OFFSET + \
+                                       sizeof(u32) + (i) * TDMA_PORT_SIZE)
+
+#define TDMA_READ_PORT_CMD_OFFSET      (TDMA_READ_PORT_OFFSET + \
+                                       (TDMA_NUM_RINGS * TDMA_PORT_SIZE))
+#define TDMA_READ_PORT_CMD(i)          (TDMA_READ_PORT_CMD_OFFSET + \
+                                       (i) * sizeof(u32))
+
+#define TDMA_DESC_RING_00_BASE         (TDMA_READ_PORT_CMD_OFFSET + \
+                                       (TDMA_NUM_RINGS * sizeof(u32)))
+
+/* Register offsets and defines relatives to a specific ring number */
+#define RING_HEAD_TAIL_PTR             0x00
+#define  RING_HEAD_MASK                        0x7ff
+#define  RING_TAIL_SHIFT               11
+#define  RING_TAIL_MASK                        0x7ff
+#define  RING_FLUSH                    (1 << 24)
+#define  RING_EN                       (1 << 25)
+
+#define RING_COUNT                     0x04
+#define  RING_COUNT_MASK               0x7ff
+#define  RING_BUFF_DONE_SHIFT          11
+#define  RING_BUFF_DONE_MASK           0x7ff
+
+#define RING_MAX_HYST                  0x08
+#define  RING_MAX_THRESH_MASK          0x7ff
+#define  RING_HYST_THRESH_SHIFT                11
+#define  RING_HYST_THRESH_MASK         0x7ff
+
+#define RING_INTR_CONTROL              0x0c
+#define  RING_INTR_THRESH_MASK         0x7ff
+#define  RING_EMPTY_INTR_EN            (1 << 15)
+#define  RING_TIMEOUT_SHIFT            16
+#define  RING_TIMEOUT_MASK             0xffff
+
+#define RING_PROD_CONS_INDEX           0x10
+#define  RING_PROD_INDEX_MASK          0xffff
+#define  RING_CONS_INDEX_SHIFT         16
+#define  RING_CONS_INDEX_MASK          0xffff
+
+#define RING_MAPPING                   0x14
+#define  RING_QID_MASK                 0x3
+#define  RING_PORT_ID_SHIFT            3
+#define  RING_PORT_ID_MASK             0x7
+#define  RING_IGNORE_STATUS            (1 << 6)
+#define  RING_FAILOVER_EN              (1 << 7)
+#define  RING_CREDIT_SHIFT             8
+#define  RING_CREDIT_MASK              0xffff
+
+#define RING_PCP_DEI_VID               0x18
+#define  RING_VID_MASK                 0x7ff
+#define  RING_DEI                      (1 << 12)
+#define  RING_PCP_SHIFT                        13
+#define  RING_PCP_MASK                 0x7
+#define  RING_PKT_SIZE_ADJ_SHIFT       16
+#define  RING_PKT_SIZE_ADJ_MASK                0xf
+
+#define TDMA_DESC_RING_SIZE            28
+
+/* Defininition for a given TX ring base address */
+#define TDMA_DESC_RING_BASE(i)         (TDMA_DESC_RING_00_BASE + \
+                                       ((i) * TDMA_DESC_RING_SIZE))
+
+/* Ring indexed register addreses */
+#define TDMA_DESC_RING_HEAD_TAIL_PTR(i)        (TDMA_DESC_RING_BASE(i) + \
+                                       RING_HEAD_TAIL_PTR)
+#define TDMA_DESC_RING_COUNT(i)                (TDMA_DESC_RING_BASE(i) + \
+                                       RING_COUNT)
+#define TDMA_DESC_RING_MAX_HYST(i)     (TDMA_DESC_RING_BASE(i) + \
+                                       RING_MAX_HYST)
+#define TDMA_DESC_RING_INTR_CONTROL(i) (TDMA_DESC_RING_BASE(i) + \
+                                       RING_INTR_CONTROL)
+#define TDMA_DESC_RING_PROD_CONS_INDEX(i) \
+                                       (TDMA_DESC_RING_BASE(i) + \
+                                       RING_PROD_CONS_INDEX)
+#define TDMA_DESC_RING_MAPPING(i)      (TDMA_DESC_RING_BASE(i) + \
+                                       RING_MAPPING)
+#define TDMA_DESC_RING_PCP_DEI_VID(i)  (TDMA_DESC_RING_BASE(i) + \
+                                       RING_PCP_DEI_VID)
+
+#define TDMA_CONTROL                   0x600
+#define  TDMA_EN                       (1 << 0)
+#define  TSB_EN                                (1 << 1)
+#define  TSB_SWAP                      (1 << 2)
+#define  ACB_ALGO                      (1 << 3)
+#define  BUF_DATA_OFFSET_SHIFT         4
+#define  BUF_DATA_OFFSET_MASK          0x3ff
+#define  VLAN_EN                       (1 << 14)
+#define  SW_BRCM_TAG                   (1 << 15)
+#define  WNC_KPT_SIZE_UPDATE           (1 << 16)
+#define  SYNC_PKT_SIZE                 (1 << 17)
+#define  ACH_TXDONE_DELAY_SHIFT                18
+#define  ACH_TXDONE_DELAY_MASK         0xff
+
+#define TDMA_STATUS                    0x604
+#define  TDMA_DISABLED                 (1 << 0)
+#define  TDMA_LL_RAM_INIT_BUSY         (1 << 1)
+
+#define TDMA_SCB_BURST_SIZE            0x608
+#define TDMA_OVER_MAX_THRESH_STATUS    0x60c
+#define TDMA_OVER_HYST_THRESH_STATUS   0x610
+#define TDMA_TPID                      0x614
+
+#define TDMA_FREE_LIST_HEAD_TAIL_PTR   0x618
+#define  TDMA_FREE_HEAD_MASK           0x7ff
+#define  TDMA_FREE_TAIL_SHIFT          11
+#define  TDMA_FREE_TAIL_MASK           0x7ff
+
+#define TDMA_FREE_LIST_COUNT           0x61c
+#define  TDMA_FREE_LIST_COUNT_MASK     0x7ff
+
+#define TDMA_TIER2_ARB_CTRL            0x620
+#define  TDMA_ARB_MODE_RR              0
+#define  TDMA_ARB_MODE_WEIGHT_RR       0x1
+#define  TDMA_ARB_MODE_STRICT          0x2
+#define  TDMA_ARB_MODE_DEFICIT_RR      0x3
+#define  TDMA_CREDIT_SHIFT             4
+#define  TDMA_CREDIT_MASK              0xffff
+
+#define TDMA_TIER1_ARB_0_CTRL          0x624
+#define  TDMA_ARB_EN                   (1 << 0)
+
+#define TDMA_TIER1_ARB_0_QUEUE_EN      0x628
+#define TDMA_TIER1_ARB_1_CTRL          0x62c
+#define TDMA_TIER1_ARB_1_QUEUE_EN      0x630
+#define TDMA_TIER1_ARB_2_CTRL          0x634
+#define TDMA_TIER1_ARB_2_QUEUE_EN      0x638
+#define TDMA_TIER1_ARB_3_CTRL          0x63c
+#define TDMA_TIER1_ARB_3_QUEUE_EN      0x640
+
+#define TDMA_SCB_ENDIAN_OVERRIDE       0x644
+#define  TDMA_LE_MODE                  (1 << 0)
+#define  TDMA_REG_MODE                 (1 << 1)
+
+#define TDMA_TEST                      0x648
+#define  TDMA_TP_OUT_SEL               (1 << 0)
+#define  TDMA_MEM_TM                   (1 << 1)
+
+#define TDMA_DEBUG                     0x64c
+
+/* Transmit/Receive descriptor */
+struct dma_desc {
+       u32     addr_status_len;
+       u32     addr_lo;
+};
+
+/* Number of Receive hardware descriptor words */
+#define NUM_HW_RX_DESC_WORDS           1024
+/* Real number of usable descriptors */
+#define NUM_RX_DESC                    (NUM_HW_RX_DESC_WORDS / WORDS_PER_DESC)
+
+/* Internal linked-list RAM has up to 1536 entries */
+#define NUM_TX_DESC                    1536
+
+#define WORDS_PER_DESC                 (sizeof(struct dma_desc) / sizeof(u32))
+
+/* Rx/Tx common counter group.*/
+struct bcm_sysport_pkt_counters {
+       u32     cnt_64;         /* RO Received/Transmited 64 bytes packet */
+       u32     cnt_127;        /* RO Rx/Tx 127 bytes packet */
+       u32     cnt_255;        /* RO Rx/Tx 65-255 bytes packet */
+       u32     cnt_511;        /* RO Rx/Tx 256-511 bytes packet */
+       u32     cnt_1023;       /* RO Rx/Tx 512-1023 bytes packet */
+       u32     cnt_1518;       /* RO Rx/Tx 1024-1518 bytes packet */
+       u32     cnt_mgv;        /* RO Rx/Tx 1519-1522 good VLAN packet */
+       u32     cnt_2047;       /* RO Rx/Tx 1522-2047 bytes packet*/
+       u32     cnt_4095;       /* RO Rx/Tx 2048-4095 bytes packet*/
+       u32     cnt_9216;       /* RO Rx/Tx 4096-9216 bytes packet*/
+};
+
+/* RSV, Receive Status Vector */
+struct bcm_sysport_rx_counters {
+       struct  bcm_sysport_pkt_counters pkt_cnt;
+       u32     pkt;            /* RO (0x428) Received pkt count*/
+       u32     bytes;          /* RO Received byte count */
+       u32     mca;            /* RO # of Received multicast pkt */
+       u32     bca;            /* RO # of Receive broadcast pkt */
+       u32     fcs;            /* RO # of Received FCS error  */
+       u32     cf;             /* RO # of Received control frame pkt*/
+       u32     pf;             /* RO # of Received pause frame pkt */
+       u32     uo;             /* RO # of unknown op code pkt */
+       u32     aln;            /* RO # of alignment error count */
+       u32     flr;            /* RO # of frame length out of range count */
+       u32     cde;            /* RO # of code error pkt */
+       u32     fcr;            /* RO # of carrier sense error pkt */
+       u32     ovr;            /* RO # of oversize pkt*/
+       u32     jbr;            /* RO # of jabber count */
+       u32     mtue;           /* RO # of MTU error pkt*/
+       u32     pok;            /* RO # of Received good pkt */
+       u32     uc;             /* RO # of unicast pkt */
+       u32     ppp;            /* RO # of PPP pkt */
+       u32     rcrc;           /* RO (0x470),# of CRC match pkt */
+};
+
+/* TSV, Transmit Status Vector */
+struct bcm_sysport_tx_counters {
+       struct bcm_sysport_pkt_counters pkt_cnt;
+       u32     pkts;           /* RO (0x4a8) Transmited pkt */
+       u32     mca;            /* RO # of xmited multicast pkt */
+       u32     bca;            /* RO # of xmited broadcast pkt */
+       u32     pf;             /* RO # of xmited pause frame count */
+       u32     cf;             /* RO # of xmited control frame count */
+       u32     fcs;            /* RO # of xmited FCS error count */
+       u32     ovr;            /* RO # of xmited oversize pkt */
+       u32     drf;            /* RO # of xmited deferral pkt */
+       u32     edf;            /* RO # of xmited Excessive deferral pkt*/
+       u32     scl;            /* RO # of xmited single collision pkt */
+       u32     mcl;            /* RO # of xmited multiple collision pkt*/
+       u32     lcl;            /* RO # of xmited late collision pkt */
+       u32     ecl;            /* RO # of xmited excessive collision pkt*/
+       u32     frg;            /* RO # of xmited fragments pkt*/
+       u32     ncl;            /* RO # of xmited total collision count */
+       u32     jbr;            /* RO # of xmited jabber count*/
+       u32     bytes;          /* RO # of xmited byte count */
+       u32     pok;            /* RO # of xmited good pkt */
+       u32     uc;             /* RO (0x0x4f0)# of xmited unitcast pkt */
+};
+
+struct bcm_sysport_mib {
+       struct bcm_sysport_rx_counters rx;
+       struct bcm_sysport_tx_counters tx;
+       u32 rx_runt_cnt;
+       u32 rx_runt_fcs;
+       u32 rx_runt_fcs_align;
+       u32 rx_runt_bytes;
+       u32 rxchk_bad_csum;
+       u32 rxchk_other_pkt_disc;
+       u32 rbuf_ovflow_cnt;
+       u32 rbuf_err_cnt;
+};
+
+/* HW maintains a large list of counters */
+enum bcm_sysport_stat_type {
+       BCM_SYSPORT_STAT_NETDEV = -1,
+       BCM_SYSPORT_STAT_MIB_RX,
+       BCM_SYSPORT_STAT_MIB_TX,
+       BCM_SYSPORT_STAT_RUNT,
+       BCM_SYSPORT_STAT_RXCHK,
+       BCM_SYSPORT_STAT_RBUF,
+};
+
+/* Macros to help define ethtool statistics */
+#define STAT_NETDEV(m) { \
+       .stat_string = __stringify(m), \
+       .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
+       .stat_offset = offsetof(struct net_device_stats, m), \
+       .type = BCM_SYSPORT_STAT_NETDEV, \
+}
+
+#define STAT_MIB(str, m, _type) { \
+       .stat_string = str, \
+       .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
+       .stat_offset = offsetof(struct bcm_sysport_priv, m), \
+       .type = _type, \
+}
+
+#define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX)
+#define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX)
+#define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT)
+
+#define STAT_RXCHK(str, m, ofs) { \
+       .stat_string = str, \
+       .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
+       .stat_offset = offsetof(struct bcm_sysport_priv, m), \
+       .type = BCM_SYSPORT_STAT_RXCHK, \
+       .reg_offset = ofs, \
+}
+
+#define STAT_RBUF(str, m, ofs) { \
+       .stat_string = str, \
+       .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
+       .stat_offset = offsetof(struct bcm_sysport_priv, m), \
+       .type = BCM_SYSPORT_STAT_RBUF, \
+       .reg_offset = ofs, \
+}
+
+struct bcm_sysport_stats {
+       char stat_string[ETH_GSTRING_LEN];
+       int stat_sizeof;
+       int stat_offset;
+       enum bcm_sysport_stat_type type;
+       /* reg offset from UMAC base for misc counters */
+       u16 reg_offset;
+};
+
+/* Software house keeping helper structure */
+struct bcm_sysport_cb {
+       struct sk_buff  *skb;           /* SKB for RX packets */
+       void __iomem    *bd_addr;       /* Buffer descriptor PHYS addr */
+
+       DEFINE_DMA_UNMAP_ADDR(dma_addr);
+       DEFINE_DMA_UNMAP_LEN(dma_len);
+};
+
+/* Software view of the TX ring */
+struct bcm_sysport_tx_ring {
+       spinlock_t      lock;           /* Ring lock for tx reclaim/xmit */
+       struct napi_struct napi;        /* NAPI per tx queue */
+       dma_addr_t      desc_dma;       /* DMA cookie */
+       unsigned int    index;          /* Ring index */
+       unsigned int    size;           /* Ring current size */
+       unsigned int    alloc_size;     /* Ring one-time allocated size */
+       unsigned int    desc_count;     /* Number of descriptors */
+       unsigned int    curr_desc;      /* Current descriptor */
+       unsigned int    c_index;        /* Last consumer index */
+       unsigned int    p_index;        /* Current producer index */
+       struct bcm_sysport_cb *cbs;     /* Transmit control blocks */
+       struct dma_desc *desc_cpu;      /* CPU view of the descriptor */
+       struct bcm_sysport_priv *priv;  /* private context backpointer */
+};
+
+/* Driver private structure */
+struct bcm_sysport_priv {
+       void __iomem            *base;
+       u32                     irq0_stat;
+       u32                     irq0_mask;
+       u32                     irq1_stat;
+       u32                     irq1_mask;
+       struct napi_struct      napi ____cacheline_aligned;
+       struct net_device       *netdev;
+       struct platform_device  *pdev;
+       int                     irq0;
+       int                     irq1;
+
+       /* Transmit rings */
+       struct bcm_sysport_tx_ring tx_rings[TDMA_NUM_RINGS];
+
+       /* Receive queue */
+       void __iomem            *rx_bds;
+       void __iomem            *rx_bd_assign_ptr;
+       unsigned int            rx_bd_assign_index;
+       struct bcm_sysport_cb   *rx_cbs;
+       unsigned int            num_rx_bds;
+       unsigned int            rx_read_ptr;
+       unsigned int            rx_c_index;
+
+       /* PHY device */
+       struct device_node      *phy_dn;
+       struct phy_device       *phydev;
+       phy_interface_t         phy_interface;
+       int                     old_pause;
+       int                     old_link;
+       int                     old_duplex;
+
+       /* Misc fields */
+       unsigned int            rx_csum_en:1;
+       unsigned int            tsb_en:1;
+       unsigned int            crc_fwd:1;
+       u16                     rev;
+
+       /* MIB related fields */
+       struct bcm_sysport_mib  mib;
+
+       /* Ethtool */
+       u32                     msg_enable;
+};
+#endif /* __BCM_SYSPORT_H */
index 0297a79a38e16312c7fe24bfd7d1992b980a8f85..05c6af6c418fa45690d085885b0cca330b58c210 100644 (file)
@@ -1436,7 +1436,7 @@ static int bgmac_probe(struct bcma_device *core)
                return -ENOMEM;
        net_dev->netdev_ops = &bgmac_netdev_ops;
        net_dev->irq = core->irq;
-       SET_ETHTOOL_OPS(net_dev, &bgmac_ethtool_ops);
+       net_dev->ethtool_ops = &bgmac_ethtool_ops;
        bgmac = netdev_priv(net_dev);
        bgmac->net_dev = net_dev;
        bgmac->core = core;
index 4d8f8aba0ea5d93be5e288a31da7fc12903af71f..4cab09d3f80729a2bd843cf6b0b6490f0a8e95ad 100644 (file)
@@ -6,7 +6,7 @@
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation.
  *
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  * Written by: Eliezer Tamir
  * Based on code from Michael Chan's bnx2 driver
  */
index 9261d5313b5be2bd361612640535fbf9c2810438..d18441ebe944cd3697f5988221d0e051dac73d3f 100644 (file)
@@ -6,7 +6,7 @@
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation.
  *
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  * Written by: Eliezer Tamir
  * Based on code from Michael Chan's bnx2 driver
  * UDP CSUM errata workaround by Arik Gendelman
index 3448cc033ca55c33d4f8b002b2828d88b6185cd5..571427c7226b11f4c22669ce8d8f14c81950a10d 100644 (file)
@@ -6,7 +6,7 @@
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation.
  *
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  * Written by: Eliezer Tamir
  * Based on code from Michael Chan's bnx2 driver
  * UDP CSUM errata workaround by Arik Gendelman
index 97ea5421dd96f41bc3daf7a899ef9f1ddaab0931..51a952c51cb1a5fda4fbb3807fc9d73da8710087 100644 (file)
@@ -12,7 +12,7 @@
  * license other than the GPL, without Broadcom's express prior written
  * consent.
  *
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  * Written by: Dmitry Kravkov
  *
  */
index 804b8f64463e80a1fcb45f51bda976b4d8544062..c6939ecb02c572fd41fa5d2c814ed5ad932c26e7 100644 (file)
@@ -12,7 +12,7 @@
  * license other than the GPL, without Broadcom's express prior written
  * consent.
  *
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  * Written by: Dmitry Kravkov
  *
  */
index b6de05e3149b5604d818d5496cbbc23ab7bf64e8..5203a8924edf765376b53ac80669ff7223fb7b44 100644 (file)
@@ -6,7 +6,7 @@
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation.
  *
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  * Written by: Eliezer Tamir
  * Based on code from Michael Chan's bnx2 driver
  * UDP CSUM errata workaround by Arik Gendelman
@@ -3506,8 +3506,6 @@ static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
 
 void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev)
 {
-       if (IS_PF(bp))
-               SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops);
-       else /* vf */
-               SET_ETHTOOL_OPS(netdev, &bnx2x_vf_ethtool_ops);
+       netdev->ethtool_ops = (IS_PF(bp)) ?
+               &bnx2x_ethtool_ops : &bnx2x_vf_ethtool_ops;
 }
index f572ae164fce4d49317ca752e2cd0eaac1895ae3..8aafd9b5d6a2b107f67a8cce344b4bb479ccc85c 100644 (file)
@@ -6,8 +6,8 @@
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation.
  *
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Vladislav Zolotarov <vladz@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Vladislav Zolotarov
  * Based on the original idea of John Wright <john.wright@hp.com>.
  */
 
index c2dfea7968f452defdca6fd4b1ea68758381038e..bd90e50bd8e662d4731b61f82ba9fe06071429a1 100644 (file)
@@ -7,9 +7,9 @@
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation.
  *
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  * Written by: Eliezer Tamir
- * Modified by: Vladislav Zolotarov <vladz@broadcom.com>
+ * Modified by: Vladislav Zolotarov
  */
 
 #ifndef BNX2X_INIT_H
index 8ab0dd90096085b33f4ee831f6487c3766b164af..5669ed2e87d0039ab02c3bc70636359e49c811e6 100644 (file)
@@ -8,8 +8,8 @@
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation.
  *
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Vladislav Zolotarov <vladz@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Vladislav Zolotarov
  */
 
 #ifndef BNX2X_INIT_OPS_H
index a78edaccceee92d8f2439ac40f3b3ba887ec0000..ff2bdd80f0aa8eb6fe8bebd9c36dce9834cf751e 100644 (file)
@@ -6,7 +6,7 @@
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation.
  *
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  * Written by: Eliezer Tamir
  * Based on code from Michael Chan's bnx2 driver
  * UDP CSUM errata workaround by Arik Gendelman
@@ -10051,8 +10051,8 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
 #define BCM_5710_UNDI_FW_MF_MAJOR      (0x07)
 #define BCM_5710_UNDI_FW_MF_MINOR      (0x08)
 #define BCM_5710_UNDI_FW_MF_VERS       (0x05)
-#define BNX2X_PREV_UNDI_MF_PORT(p)     (0x1a150c + ((p) << 4))
-#define BNX2X_PREV_UNDI_MF_FUNC(f)     (0x1a184c + ((f) << 4))
+#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4))
+#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4))
 static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
 {
        u8 major, minor, version;
@@ -10352,6 +10352,7 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
        /* Reset should be performed after BRB is emptied */
        if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
                u32 timer_count = 1000;
+               bool need_write = true;
 
                /* Close the MAC Rx to prevent BRB from filling up */
                bnx2x_prev_unload_close_mac(bp, &mac_vals);
@@ -10398,7 +10399,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
                         * cleaning methods - might be redundant but harmless.
                         */
                        if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) {
-                               bnx2x_prev_unload_undi_mf(bp);
+                               if (need_write) {
+                                       bnx2x_prev_unload_undi_mf(bp);
+                                       need_write = false;
+                               }
                        } else if (prev_undi) {
                                /* If UNDI resides in memory,
                                 * manually increment it
@@ -13233,6 +13237,8 @@ static void __bnx2x_remove(struct pci_dev *pdev,
                                iounmap(bp->doorbells);
 
                        bnx2x_release_firmware(bp);
+               } else {
+                       bnx2x_vf_pci_dealloc(bp);
                }
                bnx2x_free_mem_bp(bp);
 
index 31297266b743e27fa527da4636bc893b1c64cc56..736264b5fc3bef7e72ed83cca3a2704d88189324 100644 (file)
@@ -12,7 +12,7 @@
  * license other than the GPL, without Broadcom's express prior written
  * consent.
  *
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  * Written by: Vladislav Zolotarov
  *
  */
index 80f6c790ed88097ed17b3c3f259179e86451eff2..718ecd2946616195cc92d53bd2f6498d525f14fe 100644 (file)
@@ -12,7 +12,7 @@
  * license other than the GPL, without Broadcom's express prior written
  * consent.
  *
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  * Written by: Vladislav Zolotarov
  *
  */
index 5c523b32db70126720dbf0b2914dcbb1a3391a2b..a93c7af7afe610fa2b45667653f3ecd6c985f3c6 100644 (file)
@@ -12,9 +12,9 @@
  * license other than the GPL, without Broadcom's express prior written
  * consent.
  *
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Shmulik Ravid <shmulikr@broadcom.com>
- *            Ariel Elior <ariele@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Shmulik Ravid
+ *            Ariel Elior <ariel.elior@qlogic.com>
  *
  */
 #include "bnx2x.h"
@@ -427,7 +427,9 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
        if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN &&
            (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >=
             vf_vlan_rules_cnt(vf))) {
-               BNX2X_ERR("No credits for vlan\n");
+               BNX2X_ERR("No credits for vlan [%d >= %d]\n",
+                         atomic_read(&bnx2x_vfq(vf, qid, vlan_count)),
+                         vf_vlan_rules_cnt(vf));
                return -ENOMEM;
        }
 
@@ -610,6 +612,7 @@ int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
                }
 
                /* add new mcasts */
+               mcast.mcast_list_len = mc_num;
                rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD);
                if (rc)
                        BNX2X_ERR("Faled to add multicasts\n");
@@ -837,6 +840,29 @@ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
        return 0;
 }
 
+static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp,
+                                         struct bnx2x_virtf *vf,
+                                         int new)
+{
+       int num = vf_vlan_rules_cnt(vf);
+       int diff = new - num;
+       bool rc = true;
+
+       DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n",
+          vf->abs_vfid, new, num);
+
+       if (diff > 0)
+               rc = bp->vlans_pool.get(&bp->vlans_pool, diff);
+       else if (diff < 0)
+               rc = bp->vlans_pool.put(&bp->vlans_pool, -diff);
+
+       if (rc)
+               vf_vlan_rules_cnt(vf) = new;
+       else
+               DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n",
+                  vf->abs_vfid);
+}
+
 /* must be called after the number of PF queues and the number of VFs are
  * both known
  */
@@ -854,9 +880,11 @@ bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
        resc->num_mac_filters = 1;
 
        /* divvy up vlan rules */
+       bnx2x_iov_re_set_vlan_filters(bp, vf, 0);
        vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
        vlan_count = 1 << ilog2(vlan_count);
-       resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp);
+       bnx2x_iov_re_set_vlan_filters(bp, vf,
+                                     vlan_count / BNX2X_NR_VIRTFN(bp));
 
        /* no real limitation */
        resc->num_mc_filters = 0;
@@ -1478,10 +1506,6 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
                bnx2x_iov_static_resc(bp, vf);
 
                /* queues are initialized during VF-ACQUIRE */
-
-               /* reserve the vf vlan credit */
-               bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
-
                vf->filter_state = 0;
                vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
 
@@ -1912,11 +1936,12 @@ int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
        u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
        u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
 
+       /* Save a vlan filter for the Hypervisor */
        return ((req_resc->num_rxqs <= rxq_cnt) &&
                (req_resc->num_txqs <= txq_cnt) &&
                (req_resc->num_sbs <= vf_sb_count(vf))   &&
                (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
-               (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
+               (req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf)));
 }
 
 /* CORE VF API */
@@ -1972,14 +1997,14 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
        vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
        if (resc->num_mac_filters)
                vf_mac_rules_cnt(vf) = resc->num_mac_filters;
-       if (resc->num_vlan_filters)
-               vf_vlan_rules_cnt(vf) = resc->num_vlan_filters;
+       /* Add an additional vlan filter credit for the hypervisor */
+       bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1);
 
        DP(BNX2X_MSG_IOV,
           "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
           vf_sb_count(vf), vf_rxq_count(vf),
           vf_txq_count(vf), vf_mac_rules_cnt(vf),
-          vf_vlan_rules_cnt(vf));
+          vf_vlan_rules_visible_cnt(vf));
 
        /* Initialize the queues */
        if (!vf->vfqs) {
@@ -2551,7 +2576,8 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
 
        ivi->vf = vfidx;
        ivi->qos = 0;
-       ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */
+       ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */
+       ivi->min_tx_rate = 0;
        ivi->spoofchk = 1; /*always enabled */
        if (vf->state == VF_ENABLED) {
                /* mac and vlan are in vlan_mac objects */
@@ -2670,7 +2696,7 @@ out:
                bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
        }
 
-       return 0;
+       return rc;
 }
 
 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
@@ -2896,6 +2922,14 @@ void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
        return bp->regview + PXP_VF_ADDR_DB_START;
 }
 
+void bnx2x_vf_pci_dealloc(struct bnx2x *bp)
+{
+       BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
+                      sizeof(struct bnx2x_vf_mbx_msg));
+       BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
+                      sizeof(union pf_vf_bulletin));
+}
+
 int bnx2x_vf_pci_alloc(struct bnx2x *bp)
 {
        mutex_init(&bp->vf2pf_mutex);
@@ -2915,10 +2949,7 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp)
        return 0;
 
 alloc_mem_err:
-       BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
-                      sizeof(struct bnx2x_vf_mbx_msg));
-       BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
-                      sizeof(union pf_vf_bulletin));
+       bnx2x_vf_pci_dealloc(bp);
        return -ENOMEM;
 }
 
index 8bf764570eef773eafa87ffd0fca26592e4d64ef..96c575e147a5b14da2b67e9ea25a044a4a882d07 100644 (file)
@@ -12,9 +12,9 @@
  * license other than the GPL, without Broadcom's express prior written
  * consent.
  *
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Shmulik Ravid <shmulikr@broadcom.com>
- *            Ariel Elior <ariele@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Shmulik Ravid
+ *            Ariel Elior <ariel.elior@qlogic.com>
  */
 #ifndef BNX2X_SRIOV_H
 #define BNX2X_SRIOV_H
@@ -159,6 +159,8 @@ struct bnx2x_virtf {
 #define vf_mac_rules_cnt(vf)           ((vf)->alloc_resc.num_mac_filters)
 #define vf_vlan_rules_cnt(vf)          ((vf)->alloc_resc.num_vlan_filters)
 #define vf_mc_rules_cnt(vf)            ((vf)->alloc_resc.num_mc_filters)
+       /* Hide a single vlan filter credit for the hypervisor */
+#define vf_vlan_rules_visible_cnt(vf)  (vf_vlan_rules_cnt(vf) - 1)
 
        u8 sb_count;    /* actual number of SBs */
        u8 igu_base_id; /* base igu status block id */
@@ -502,6 +504,7 @@ static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
 void bnx2x_timer_sriov(struct bnx2x *bp);
 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp);
+void bnx2x_vf_pci_dealloc(struct bnx2x *bp);
 int bnx2x_vf_pci_alloc(struct bnx2x *bp);
 int bnx2x_enable_sriov(struct bnx2x *bp);
 void bnx2x_disable_sriov(struct bnx2x *bp);
@@ -568,6 +571,7 @@ static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
        return NULL;
 }
 
+static inline void bnx2x_vf_pci_dealloc(struct bnx2x *bp) {}
 static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
 static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
 static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
index 3b75070411aab83136ac2c245ba9c65682aefebe..ca47665f94bf76fd6c9b2c1eb920b5aba7743124 100644 (file)
@@ -6,7 +6,7 @@
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation.
  *
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  * Written by: Eliezer Tamir
  * Based on code from Michael Chan's bnx2 driver
  * UDP CSUM errata workaround by Arik Gendelman
index f35845006cdd8a74c57d99d069939548188256bb..2beceaefdeea7aa5ac53f6a3028cd0fbcacbc51a 100644 (file)
@@ -6,7 +6,7 @@
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation.
  *
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  * Written by: Eliezer Tamir
  * Based on code from Michael Chan's bnx2 driver
  * UDP CSUM errata workaround by Arik Gendelman
index 0622884596b2f478ec4a2789c17fdd0938544995..d712d0ddd719bd4dd3a37b25736dfdccf156084c 100644 (file)
@@ -12,9 +12,9 @@
  * license other than the GPL, without Broadcom's express prior written
  * consent.
  *
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Shmulik Ravid <shmulikr@broadcom.com>
- *            Ariel Elior <ariele@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Shmulik Ravid
+ *            Ariel Elior <ariel.elior@qlogic.com>
  */
 
 #include "bnx2x.h"
@@ -747,7 +747,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
 out:
        bnx2x_vfpf_finalize(bp, &req->first_tlv);
 
-       return 0;
+       return rc;
 }
 
 /* request pf to config rss table for vf queues*/
@@ -1163,7 +1163,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
                        bnx2x_vf_max_queue_cnt(bp, vf);
                resc->num_sbs = vf_sb_count(vf);
                resc->num_mac_filters = vf_mac_rules_cnt(vf);
-               resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
+               resc->num_vlan_filters = vf_vlan_rules_visible_cnt(vf);
                resc->num_mc_filters = 0;
 
                if (status == PFVF_STATUS_SUCCESS) {
index c922b81170e5bc20c4ff69d34d16d40f853c9d8a..e21e706762c9964ad917e69b7149700a458f74fc 100644 (file)
@@ -12,8 +12,8 @@
  * license other than the GPL, without Broadcom's express prior written
  * consent.
  *
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Ariel Elior <ariele@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Ariel Elior <ariel.elior@qlogic.com>
  */
 #ifndef VF_PF_IF_H
 #define VF_PF_IF_H
index 0966bd04375f1aa0384d4196b368bdf9345dc1f9..5ba1cfbd60da3555878fa8fd467c3a9a36c03642 100644 (file)
@@ -2481,7 +2481,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
        dev_set_drvdata(&pdev->dev, dev);
        ether_addr_copy(dev->dev_addr, macaddr);
        dev->watchdog_timeo = 2 * HZ;
-       SET_ETHTOOL_OPS(dev, &bcmgenet_ethtool_ops);
+       dev->ethtool_ops = &bcmgenet_ethtool_ops;
        dev->netdev_ops = &bcmgenet_netdev_ops;
        netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64);
 
index 4608673beaff9f7682d669e4cb7a540fed0c0f04..add8d8596084054ca1e059a360be4a1d24501122 100644 (file)
@@ -298,6 +298,7 @@ int bcmgenet_mii_config(struct net_device *dev)
 static int bcmgenet_mii_probe(struct net_device *dev)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
+       struct device_node *dn = priv->pdev->dev.of_node;
        struct phy_device *phydev;
        unsigned int phy_flags;
        int ret;
@@ -307,15 +308,19 @@ static int bcmgenet_mii_probe(struct net_device *dev)
                return 0;
        }
 
-       if (priv->phy_dn)
-               phydev = of_phy_connect(dev, priv->phy_dn,
-                                       bcmgenet_mii_setup, 0,
-                                       priv->phy_interface);
-       else
-               phydev = of_phy_connect_fixed_link(dev,
-                                       bcmgenet_mii_setup,
-                                       priv->phy_interface);
+       /* In the case of a fixed PHY, the DT node associated
+        * to the PHY is the Ethernet MAC DT node.
+        */
+       if (of_phy_is_fixed_link(dn)) {
+               ret = of_phy_register_fixed_link(dn);
+               if (ret)
+                       return ret;
+
+               priv->phy_dn = dn;
+       }
 
+       phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, 0,
+                               priv->phy_interface);
        if (!phydev) {
                pr_err("could not attach to PHY\n");
                return -ENODEV;
index e5d95c5ce1ad8df29075dcc243650a0e15aa896d..3b74da5f48a14c9f5ef9e97be29ed235ac07a22d 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
  * Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2005-2013 Broadcom Corporation.
+ * Copyright (C) 2005-2014 Broadcom Corporation.
  *
  * Firmware is:
  *     Derived from proprietary unpublished source code,
@@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
 
 #define DRV_MODULE_NAME                "tg3"
 #define TG3_MAJ_NUM                    3
-#define TG3_MIN_NUM                    136
+#define TG3_MIN_NUM                    137
 #define DRV_MODULE_VERSION     \
        __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE     "Jan 03, 2014"
+#define DRV_MODULE_RELDATE     "May 11, 2014"
 
 #define RESET_KIND_SHUTDOWN    0
 #define RESET_KIND_INIT                1
@@ -3224,7 +3224,7 @@ static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
        return 0;
 }
 
-#define NVRAM_CMD_TIMEOUT 10000
+#define NVRAM_CMD_TIMEOUT 100
 
 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
 {
@@ -7871,9 +7871,7 @@ tg3_tso_bug_end:
        return NETDEV_TX_OK;
 }
 
-/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
- * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
- */
+/* hard_start_xmit for all devices */
 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct tg3 *tp = netdev_priv(dev);
@@ -7884,6 +7882,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
        struct tg3_napi *tnapi;
        struct netdev_queue *txq;
        unsigned int last;
+       struct iphdr *iph = NULL;
+       struct tcphdr *tcph = NULL;
+       __sum16 tcp_csum = 0, ip_csum = 0;
+       __be16 ip_tot_len = 0;
 
        txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
        tnapi = &tp->napi[skb_get_queue_mapping(skb)];
@@ -7915,7 +7917,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        mss = skb_shinfo(skb)->gso_size;
        if (mss) {
-               struct iphdr *iph;
                u32 tcp_opt_len, hdr_len;
 
                if (skb_cow_head(skb, 0))
@@ -7927,27 +7928,31 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
                hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
 
                if (!skb_is_gso_v6(skb)) {
+                       if (unlikely((ETH_HLEN + hdr_len) > 80) &&
+                           tg3_flag(tp, TSO_BUG))
+                               return tg3_tso_bug(tp, skb);
+
+                       ip_csum = iph->check;
+                       ip_tot_len = iph->tot_len;
                        iph->check = 0;
                        iph->tot_len = htons(mss + hdr_len);
                }
 
-               if (unlikely((ETH_HLEN + hdr_len) > 80) &&
-                   tg3_flag(tp, TSO_BUG))
-                       return tg3_tso_bug(tp, skb);
-
                base_flags |= (TXD_FLAG_CPU_PRE_DMA |
                               TXD_FLAG_CPU_POST_DMA);
 
+               tcph = tcp_hdr(skb);
+               tcp_csum = tcph->check;
+
                if (tg3_flag(tp, HW_TSO_1) ||
                    tg3_flag(tp, HW_TSO_2) ||
                    tg3_flag(tp, HW_TSO_3)) {
-                       tcp_hdr(skb)->check = 0;
+                       tcph->check = 0;
                        base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
-               } else
-                       tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
-                                                                iph->daddr, 0,
-                                                                IPPROTO_TCP,
-                                                                0);
+               } else {
+                       tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+                                                        0, IPPROTO_TCP, 0);
+               }
 
                if (tg3_flag(tp, HW_TSO_3)) {
                        mss |= (hdr_len & 0xc) << 12;
@@ -8047,6 +8052,18 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (would_hit_hwbug) {
                tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
 
+               if (mss) {
+                       /* If it's a TSO packet, do GSO instead of
+                        * allocating and copying to a large linear SKB
+                        */
+                       if (ip_tot_len) {
+                               iph->check = ip_csum;
+                               iph->tot_len = ip_tot_len;
+                       }
+                       tcph->check = tcp_csum;
+                       return tg3_tso_bug(tp, skb);
+               }
+
                /* If the workaround fails due to memory/mapping
                 * failure, silently drop this packet.
                 */
@@ -11876,9 +11893,9 @@ static int tg3_get_eeprom_len(struct net_device *dev)
 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
 {
        struct tg3 *tp = netdev_priv(dev);
-       int ret;
+       int ret, cpmu_restore = 0;
        u8  *pd;
-       u32 i, offset, len, b_offset, b_count;
+       u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
        __be32 val;
 
        if (tg3_flag(tp, NO_NVRAM))
@@ -11890,6 +11907,19 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
 
        eeprom->magic = TG3_EEPROM_MAGIC;
 
+       /* Override clock, link aware and link idle modes */
+       if (tg3_flag(tp, CPMU_PRESENT)) {
+               cpmu_val = tr32(TG3_CPMU_CTRL);
+               if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
+                               CPMU_CTRL_LINK_IDLE_MODE)) {
+                       tw32(TG3_CPMU_CTRL, cpmu_val &
+                                           ~(CPMU_CTRL_LINK_AWARE_MODE |
+                                            CPMU_CTRL_LINK_IDLE_MODE));
+                       cpmu_restore = 1;
+               }
+       }
+       tg3_override_clk(tp);
+
        if (offset & 3) {
                /* adjustments to start on required 4 byte boundary */
                b_offset = offset & 3;
@@ -11900,7 +11930,7 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
                }
                ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
                if (ret)
-                       return ret;
+                       goto eeprom_done;
                memcpy(data, ((char *)&val) + b_offset, b_count);
                len -= b_count;
                offset += b_count;
@@ -11912,10 +11942,20 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
        for (i = 0; i < (len - (len & 3)); i += 4) {
                ret = tg3_nvram_read_be32(tp, offset + i, &val);
                if (ret) {
+                       if (i)
+                               i -= 4;
                        eeprom->len += i;
-                       return ret;
+                       goto eeprom_done;
                }
                memcpy(pd + i, &val, 4);
+               if (need_resched()) {
+                       if (signal_pending(current)) {
+                               eeprom->len += i;
+                               ret = -EINTR;
+                               goto eeprom_done;
+                       }
+                       cond_resched();
+               }
        }
        eeprom->len += i;
 
@@ -11926,11 +11966,19 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
                b_offset = offset + len - b_count;
                ret = tg3_nvram_read_be32(tp, b_offset, &val);
                if (ret)
-                       return ret;
+                       goto eeprom_done;
                memcpy(pd, &val, b_count);
                eeprom->len += b_count;
        }
-       return 0;
+       ret = 0;
+
+eeprom_done:
+       /* Restore clock, link aware and link idle modes */
+       tg3_restore_clk(tp);
+       if (cpmu_restore)
+               tw32(TG3_CPMU_CTRL, cpmu_val);
+
+       return ret;
 }
 
 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
index 04321e5a356e45a0f7fc642f3817035d983dbd90..461accaf0aa40242c3756880dd6659371cdfe5f0 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
  * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
  * Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2007-2013 Broadcom Corporation.
+ * Copyright (C) 2007-2014 Broadcom Corporation.
  */
 
 #ifndef _T3_H
index f9e150825bb58bf0ef9e3568a1084cb8ebc61d37..adca62b728371d9b9e1e70e68c8870c02398eb00 100644 (file)
@@ -1137,5 +1137,5 @@ static const struct ethtool_ops bnad_ethtool_ops = {
 void
 bnad_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &bnad_ethtool_ops);
+       netdev->ethtool_ops = &bnad_ethtool_ops;
 }
index 7e49c43b7af3501f953cac4796e4769da542c29d..9e089d24466e65fb6c8b01c08f2ece882e23aaaa 100644 (file)
@@ -4,7 +4,7 @@
 
 config NET_CADENCE
        bool "Cadence devices"
-       depends on HAS_IOMEM && (ARM || AVR32 || COMPILE_TEST)
+       depends on HAS_IOMEM && (ARM || AVR32 || MICROBLAZE || COMPILE_TEST)
        default y
        ---help---
          If you have a network (Ethernet) card belonging to this class, say Y.
@@ -30,7 +30,7 @@ config ARM_AT91_ETHER
 
 config MACB
        tristate "Cadence MACB/GEM support"
-       depends on HAS_DMA && (PLATFORM_AT32AP || ARCH_AT91 || ARCH_PICOXCELL || ARCH_ZYNQ || COMPILE_TEST)
+       depends on HAS_DMA && (PLATFORM_AT32AP || ARCH_AT91 || ARCH_PICOXCELL || ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST)
        select PHYLIB
        ---help---
          The Cadence MACB ethernet interface is found on many Atmel AT32 and
index ca97005e24b41217849beaf4b9a578fbaf1f2027..e9daa072ebb4f2c03af453255a9c374bde6ff47c 100644 (file)
@@ -599,25 +599,16 @@ static void gem_rx_refill(struct macb *bp)
 {
        unsigned int            entry;
        struct sk_buff          *skb;
-       struct macb_dma_desc    *desc;
        dma_addr_t              paddr;
 
        while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
-               u32 addr, ctrl;
-
                entry = macb_rx_ring_wrap(bp->rx_prepared_head);
-               desc = &bp->rx_ring[entry];
 
                /* Make hw descriptor updates visible to CPU */
                rmb();
 
-               addr = desc->addr;
-               ctrl = desc->ctrl;
                bp->rx_prepared_head++;
 
-               if ((addr & MACB_BIT(RX_USED)))
-                       continue;
-
                if (bp->rx_skbuff[entry] == NULL) {
                        /* allocate sk_buff for this free entry in ring */
                        skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
@@ -698,7 +689,6 @@ static int gem_rx(struct macb *bp, int budget)
                if (!(addr & MACB_BIT(RX_USED)))
                        break;
 
-               desc->addr &= ~MACB_BIT(RX_USED);
                bp->rx_tail++;
                count++;
 
@@ -891,16 +881,15 @@ static int macb_poll(struct napi_struct *napi, int budget)
        if (work_done < budget) {
                napi_complete(napi);
 
-               /*
-                * We've done what we can to clean the buffers. Make sure we
-                * get notified when new packets arrive.
-                */
-               macb_writel(bp, IER, MACB_RX_INT_FLAGS);
-
                /* Packets received while interrupts were disabled */
                status = macb_readl(bp, RSR);
-               if (unlikely(status))
+               if (status) {
+                       if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+                               macb_writel(bp, ISR, MACB_BIT(RCOMP));
                        napi_reschedule(napi);
+               } else {
+                       macb_writel(bp, IER, MACB_RX_INT_FLAGS);
+               }
        }
 
        /* TODO: Handle errors */
@@ -951,6 +940,10 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
                if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
                        macb_writel(bp, IDR, MACB_TX_INT_FLAGS);
                        schedule_work(&bp->tx_error_task);
+
+                       if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+                               macb_writel(bp, ISR, MACB_TX_ERR_FLAGS);
+
                        break;
                }
 
@@ -968,6 +961,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
                                bp->hw_stats.gem.rx_overruns++;
                        else
                                bp->hw_stats.macb.rx_overruns++;
+
+                       if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+                               macb_writel(bp, ISR, MACB_BIT(ISR_ROVR));
                }
 
                if (status & MACB_BIT(HRESP)) {
@@ -977,6 +973,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
                         * (work queue?)
                         */
                        netdev_err(dev, "DMA bus error: HRESP not OK\n");
+
+                       if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+                               macb_writel(bp, ISR, MACB_BIT(HRESP));
                }
 
                status = macb_readl(bp, ISR);
@@ -1113,7 +1112,7 @@ static void gem_free_rx_buffers(struct macb *bp)
 
                desc = &bp->rx_ring[i];
                addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
-               dma_unmap_single(&bp->pdev->dev, addr, skb->len,
+               dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
                                 DMA_FROM_DEVICE);
                dev_kfree_skb_any(skb);
                skb = NULL;
index 521dfea44b837d57bc7a7297ac973cd4d8098d3e..25d6b2a10e4e6f7fb6b09d25706e11e2783bbea4 100644 (file)
@@ -1737,7 +1737,7 @@ static int xgmac_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, ndev);
        ether_setup(ndev);
        ndev->netdev_ops = &xgmac_netdev_ops;
-       SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops);
+       ndev->ethtool_ops = &xgmac_ethtool_ops;
        spin_lock_init(&priv->stats_lock);
        INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work);
 
index d40c994a4f6a2c807965e44eaf81f2be0053fb50..570222c3341070445b3b0c224205872f286a3cfd 100644 (file)
@@ -67,13 +67,13 @@ config CHELSIO_T3
          will be called cxgb3.
 
 config CHELSIO_T4
-       tristate "Chelsio Communications T4 Ethernet support"
+       tristate "Chelsio Communications T4/T5 Ethernet support"
        depends on PCI
        select FW_LOADER
        select MDIO
        ---help---
-         This driver supports Chelsio T4-based gigabit and 10Gb Ethernet
-         adapters.
+         This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet
+         adapter and T5 based 40Gb Ethernet adapter.
 
          For general information about Chelsio and our products, visit
          our website at <http://www.chelsio.com>.
@@ -87,11 +87,12 @@ config CHELSIO_T4
          will be called cxgb4.
 
 config CHELSIO_T4VF
-       tristate "Chelsio Communications T4 Virtual Function Ethernet support"
+       tristate "Chelsio Communications T4/T5 Virtual Function Ethernet support"
        depends on PCI
        ---help---
-         This driver supports Chelsio T4-based gigabit and 10Gb Ethernet
-         adapters with PCI-E SR-IOV Virtual Functions.
+         This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet
+         adapters and T5 based 40Gb Ethernet adapters with PCI-E SR-IOV Virtual
+         Functions.
 
          For general information about Chelsio and our products, visit
          our website at <http://www.chelsio.com>.
index 0fe7ff750d77e1618a9fd49a59bbf9577ed9190d..c1b2c1dbf015accf381c1f010610dcac625f618e 100644 (file)
@@ -1100,7 +1100,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
                netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
 
-               SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
+               netdev->ethtool_ops = &t1_ethtool_ops;
        }
 
        if (t1_init_sw_modules(adapter, bi) < 0) {
index 07bbb711b7e5a716aba3e8d2e3e958e2ce8fa506..3ed50794724892979a969e7eefa33e6de2f074aa 100644 (file)
@@ -3291,7 +3291,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                        netdev->features |= NETIF_F_HIGHDMA;
 
                netdev->netdev_ops = &cxgb_netdev_ops;
-               SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
+               netdev->ethtool_ops = &cxgb_ethtool_ops;
        }
 
        pci_set_drvdata(pdev, adapter);
index c0a9dd55f4e55215bb0e42902c12c97b241d0d3f..b0cbb2b7fd484f95ec36574feef7981753a552e9 100644 (file)
@@ -185,7 +185,7 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
                if (ether_addr_equal(dev->dev_addr, mac)) {
                        rcu_read_lock();
                        if (vlan && vlan != VLAN_VID_MASK) {
-                               dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), vlan);
+                               dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), vlan);
                        } else if (netif_is_bond_slave(dev)) {
                                struct net_device *upper_dev;
 
index 6fe58913403ab24f61ddf7f7e8d02714e040d250..266a5bc6aedff349f6089aeb839bff668531c6d2 100644 (file)
@@ -2252,12 +2252,19 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
        else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
                 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
                cmd->port = PORT_FIBRE;
-       else if (p->port_type == FW_PORT_TYPE_SFP) {
-               if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
-                   p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
+       else if (p->port_type == FW_PORT_TYPE_SFP ||
+                p->port_type == FW_PORT_TYPE_QSFP_10G ||
+                p->port_type == FW_PORT_TYPE_QSFP) {
+               if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
+                   p->mod_type == FW_PORT_MOD_TYPE_SR ||
+                   p->mod_type == FW_PORT_MOD_TYPE_ER ||
+                   p->mod_type == FW_PORT_MOD_TYPE_LRM)
+                       cmd->port = PORT_FIBRE;
+               else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
+                        p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
                        cmd->port = PORT_DA;
                else
-                       cmd->port = PORT_FIBRE;
+                       cmd->port = PORT_OTHER;
        } else
                cmd->port = PORT_OTHER;
 
@@ -4061,7 +4068,7 @@ static int update_root_dev_clip(struct net_device *dev)
 
        /* Parse all bond and vlan devices layered on top of the physical dev */
        for (i = 0; i < VLAN_N_VID; i++) {
-               root_dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), i);
+               root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
                if (!root_dev)
                        continue;
 
@@ -5870,6 +5877,8 @@ static void print_port_info(const struct net_device *dev)
                spd = " 2.5 GT/s";
        else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
                spd = " 5 GT/s";
+       else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
+               spd = " 8 GT/s";
 
        if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
                bufp += sprintf(bufp, "100/");
@@ -6074,7 +6083,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                netdev->priv_flags |= IFF_UNICAST_FLT;
 
                netdev->netdev_ops = &cxgb4_netdev_ops;
-               SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
+               netdev->ethtool_ops = &cxgb_ethtool_ops;
        }
 
        pci_set_drvdata(pdev, adapter);
index ca95cf2954eb33f62719130a8b0432fbb324c2b6..cced1a3d5181337ea5560918370d5b3ebcaa5237 100644 (file)
@@ -1697,7 +1697,8 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
                return handle_trace_pkt(q->adap, si);
 
        pkt = (const struct cpl_rx_pkt *)rsp;
-       csum_ok = pkt->csum_calc && !pkt->err_vec;
+       csum_ok = pkt->csum_calc && !pkt->err_vec &&
+                 (q->netdev->features & NETIF_F_RXCSUM);
        if ((pkt->l2info & htonl(RXF_TCP)) &&
            (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
                do_gro(rxq, si, pkt);
@@ -1720,8 +1721,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
 
        rxq->stats.pkts++;
 
-       if (csum_ok && (q->netdev->features & NETIF_F_RXCSUM) &&
-           (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
+       if (csum_ok && (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
                if (!pkt->ip_frag) {
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                        rxq->stats.rx_cso++;
index 52859288de7b4d5b8c550c9a1e53fe5845225da0..ff1cdd1788b5f62efdf03ffd2f501b65054383a9 100644 (file)
@@ -2664,7 +2664,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
                netdev->priv_flags |= IFF_UNICAST_FLT;
 
                netdev->netdev_ops = &cxgb4vf_netdev_ops;
-               SET_ETHTOOL_OPS(netdev, &cxgb4vf_ethtool_ops);
+               netdev->ethtool_ops = &cxgb4vf_ethtool_ops;
 
                /*
                 * Initialize the hardware/software state for the port.
index 9cfa4b4bb089d398a1b687a71d32f0856d15fa47..adebbf849cdbfab8c3f639a97ae80077aec84493 100644 (file)
@@ -1510,7 +1510,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
 {
        struct sk_buff *skb;
        const struct cpl_rx_pkt *pkt = (void *)rsp;
-       bool csum_ok = pkt->csum_calc && !pkt->err_vec;
+       bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
+                      (rspq->netdev->features & NETIF_F_RXCSUM);
        struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
 
        /*
@@ -1538,8 +1539,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
        skb_record_rx_queue(skb, rspq->idx);
        rxq->stats.pkts++;
 
-       if (csum_ok && (rspq->netdev->features & NETIF_F_RXCSUM) &&
-           !pkt->err_vec && (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
+       if (csum_ok && !pkt->err_vec &&
+           (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
                if (!pkt->ip_frag)
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                else {
index e35c8e0202adda08dec6d53586bdd9f7da89a2e9..f23ef321606ca1326b02507139f953274964b84d 100644 (file)
@@ -43,6 +43,8 @@
 #define ENIC_CQ_MAX            (ENIC_WQ_MAX + ENIC_RQ_MAX)
 #define ENIC_INTR_MAX          (ENIC_CQ_MAX + 2)
 
+#define ENIC_AIC_LARGE_PKT_DIFF        3
+
 struct enic_msix_entry {
        int requested;
        char devname[IFNAMSIZ];
@@ -50,6 +52,33 @@ struct enic_msix_entry {
        void *devid;
 };
 
+/* Store only the lower range.  Higher range is given by fw. */
+struct enic_intr_mod_range {
+       u32 small_pkt_range_start;
+       u32 large_pkt_range_start;
+};
+
+struct enic_intr_mod_table {
+       u32 rx_rate;
+       u32 range_percent;
+};
+
+#define ENIC_MAX_LINK_SPEEDS           3
+#define ENIC_LINK_SPEED_10G            10000
+#define ENIC_LINK_SPEED_4G             4000
+#define ENIC_LINK_40G_INDEX            2
+#define ENIC_LINK_10G_INDEX            1
+#define ENIC_LINK_4G_INDEX             0
+#define ENIC_RX_COALESCE_RANGE_END     125
+#define ENIC_AIC_TS_BREAK              100
+
+struct enic_rx_coal {
+       u32 small_pkt_range_start;
+       u32 large_pkt_range_start;
+       u32 range_end;
+       u32 use_adaptive_rx_coalesce;
+};
+
 /* priv_flags */
 #define ENIC_SRIOV_ENABLED             (1 << 0)
 
@@ -92,6 +121,7 @@ struct enic {
        unsigned int mc_count;
        unsigned int uc_count;
        u32 port_mtu;
+       struct enic_rx_coal rx_coalesce_setting;
        u32 rx_coalesce_usecs;
        u32 tx_coalesce_usecs;
 #ifdef CONFIG_PCI_IOV
index 47e3562f48667232ad16be0e57cda9c618fac430..1882db230e139e506bfb1efc5248ac211ec76c2b 100644 (file)
@@ -79,6 +79,17 @@ static const struct enic_stat enic_rx_stats[] = {
 static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
 static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
 
+void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
+{
+       int i;
+       int intr;
+
+       for (i = 0; i < enic->rq_count; i++) {
+               intr = enic_msix_rq_intr(enic, i);
+               vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
+       }
+}
+
 static int enic_get_settings(struct net_device *netdev,
        struct ethtool_cmd *ecmd)
 {
@@ -178,9 +189,14 @@ static int enic_get_coalesce(struct net_device *netdev,
        struct ethtool_coalesce *ecmd)
 {
        struct enic *enic = netdev_priv(netdev);
+       struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
 
        ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
        ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
+       if (rxcoal->use_adaptive_rx_coalesce)
+               ecmd->use_adaptive_rx_coalesce = 1;
+       ecmd->rx_coalesce_usecs_low = rxcoal->small_pkt_range_start;
+       ecmd->rx_coalesce_usecs_high = rxcoal->range_end;
 
        return 0;
 }
@@ -191,17 +207,31 @@ static int enic_set_coalesce(struct net_device *netdev,
        struct enic *enic = netdev_priv(netdev);
        u32 tx_coalesce_usecs;
        u32 rx_coalesce_usecs;
+       u32 rx_coalesce_usecs_low;
+       u32 rx_coalesce_usecs_high;
+       u32 coalesce_usecs_max;
        unsigned int i, intr;
+       struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
 
+       coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
        tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
-               vnic_dev_get_intr_coal_timer_max(enic->vdev));
+                                 coalesce_usecs_max);
        rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
-               vnic_dev_get_intr_coal_timer_max(enic->vdev));
+                                 coalesce_usecs_max);
+
+       rx_coalesce_usecs_low = min_t(u32, ecmd->rx_coalesce_usecs_low,
+                                     coalesce_usecs_max);
+       rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high,
+                                      coalesce_usecs_max);
 
        switch (vnic_dev_get_intr_mode(enic->vdev)) {
        case VNIC_DEV_INTR_MODE_INTX:
                if (tx_coalesce_usecs != rx_coalesce_usecs)
                        return -EINVAL;
+               if (ecmd->use_adaptive_rx_coalesce      ||
+                   ecmd->rx_coalesce_usecs_low         ||
+                   ecmd->rx_coalesce_usecs_high)
+                       return -EOPNOTSUPP;
 
                intr = enic_legacy_io_intr();
                vnic_intr_coalescing_timer_set(&enic->intr[intr],
@@ -210,6 +240,10 @@ static int enic_set_coalesce(struct net_device *netdev,
        case VNIC_DEV_INTR_MODE_MSI:
                if (tx_coalesce_usecs != rx_coalesce_usecs)
                        return -EINVAL;
+               if (ecmd->use_adaptive_rx_coalesce      ||
+                   ecmd->rx_coalesce_usecs_low         ||
+                   ecmd->rx_coalesce_usecs_high)
+                       return -EOPNOTSUPP;
 
                vnic_intr_coalescing_timer_set(&enic->intr[0],
                        tx_coalesce_usecs);
@@ -221,12 +255,27 @@ static int enic_set_coalesce(struct net_device *netdev,
                                tx_coalesce_usecs);
                }
 
-               for (i = 0; i < enic->rq_count; i++) {
-                       intr = enic_msix_rq_intr(enic, i);
-                       vnic_intr_coalescing_timer_set(&enic->intr[intr],
-                               rx_coalesce_usecs);
+               if (rxcoal->use_adaptive_rx_coalesce) {
+                       if (!ecmd->use_adaptive_rx_coalesce) {
+                               rxcoal->use_adaptive_rx_coalesce = 0;
+                               enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
+                       }
+               } else {
+                       if (ecmd->use_adaptive_rx_coalesce)
+                               rxcoal->use_adaptive_rx_coalesce = 1;
+                       else
+                               enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
                }
 
+               if (ecmd->rx_coalesce_usecs_high) {
+                       if (rx_coalesce_usecs_high <
+                           (rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
+                               return -EINVAL;
+                       rxcoal->range_end = rx_coalesce_usecs_high;
+                       rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
+                       rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
+                                                       ENIC_AIC_LARGE_PKT_DIFF;
+               }
                break;
        default:
                break;
@@ -253,5 +302,5 @@ static const struct ethtool_ops enic_ethtool_ops = {
 
 void enic_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &enic_ethtool_ops);
+       netdev->ethtool_ops = &enic_ethtool_ops;
 }
index 2945718ce8068e4355628852ed200d539c9c2273..d5a220d5bad1e811cd2e2afd8b70708fd34c2b50 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/rtnetlink.h>
 #include <linux/prefetch.h>
 #include <net/ip6_checksum.h>
+#include <linux/ktime.h>
 
 #include "cq_enet_desc.h"
 #include "vnic_dev.h"
@@ -72,6 +73,35 @@ MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 MODULE_DEVICE_TABLE(pci, enic_id_table);
 
+#define ENIC_LARGE_PKT_THRESHOLD               1000
+#define ENIC_MAX_COALESCE_TIMERS               10
+/*  Interrupt moderation table, which will be used to decide the
+ *  coalescing timer values
+ *  {rx_rate in Mbps, mapping percentage of the range}
+ */
+struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
+       {4000,  0},
+       {4400, 10},
+       {5060, 20},
+       {5230, 30},
+       {5540, 40},
+       {5820, 50},
+       {6120, 60},
+       {6435, 70},
+       {6745, 80},
+       {7000, 90},
+       {0xFFFFFFFF, 100}
+};
+
+/* This table helps the driver to pick different ranges for rx coalescing
+ * timer depending on the link speed.
+ */
+struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
+       {0,  0}, /* 0  - 4  Gbps */
+       {0,  3}, /* 4  - 10 Gbps */
+       {3,  6}, /* 10 - 40 Gbps */
+};
+
 int enic_is_dynamic(struct enic *enic)
 {
        return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
@@ -979,6 +1009,15 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
        return 0;
 }
 
+static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
+                                     u32 pkt_len)
+{
+       if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len)
+               pkt_size->large_pkt_bytes_cnt += pkt_len;
+       else
+               pkt_size->small_pkt_bytes_cnt += pkt_len;
+}
+
 static void enic_rq_indicate_buf(struct vnic_rq *rq,
        struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
        int skipped, void *opaque)
@@ -986,6 +1025,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
        struct enic *enic = vnic_dev_priv(rq->vdev);
        struct net_device *netdev = enic->netdev;
        struct sk_buff *skb;
+       struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
 
        u8 type, color, eop, sop, ingress_port, vlan_stripped;
        u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
@@ -1056,6 +1096,9 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
                        napi_gro_receive(&enic->napi[q_number], skb);
                else
                        netif_receive_skb(skb);
+               if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+                       enic_intr_update_pkt_size(&cq->pkt_size_counter,
+                                                 bytes_written);
        } else {
 
                /* Buffer overflow
@@ -1134,6 +1177,64 @@ static int enic_poll(struct napi_struct *napi, int budget)
        return rq_work_done;
 }
 
+static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
+{
+       unsigned int intr = enic_msix_rq_intr(enic, rq->index);
+       struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+       u32 timer = cq->tobe_rx_coal_timeval;
+
+       if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) {
+               vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
+               cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval;
+       }
+}
+
+static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
+{
+       struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
+       struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+       struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter;
+       int index;
+       u32 timer;
+       u32 range_start;
+       u32 traffic;
+       u64 delta;
+       ktime_t now = ktime_get();
+
+       delta = ktime_us_delta(now, cq->prev_ts);
+       if (delta < ENIC_AIC_TS_BREAK)
+               return;
+       cq->prev_ts = now;
+
+       traffic = pkt_size_counter->large_pkt_bytes_cnt +
+                 pkt_size_counter->small_pkt_bytes_cnt;
+       /* The table takes Mbps
+        * traffic *= 8    => bits
+        * traffic *= (10^6 / delta)    => bps
+        * traffic /= 10^6     => Mbps
+        *
+        * Combining, traffic *= (8 / delta)
+        */
+
+       traffic <<= 3;
+       traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta;
+
+       for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++)
+               if (traffic < mod_table[index].rx_rate)
+                       break;
+       range_start = (pkt_size_counter->small_pkt_bytes_cnt >
+                      pkt_size_counter->large_pkt_bytes_cnt << 1) ?
+                     rx_coal->small_pkt_range_start :
+                     rx_coal->large_pkt_range_start;
+       timer = range_start + ((rx_coal->range_end - range_start) *
+                              mod_table[index].range_percent / 100);
+       /* Damping */
+       cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1;
+
+       pkt_size_counter->large_pkt_bytes_cnt = 0;
+       pkt_size_counter->small_pkt_bytes_cnt = 0;
+}
+
 static int enic_poll_msix(struct napi_struct *napi, int budget)
 {
        struct net_device *netdev = napi->dev;
@@ -1171,6 +1272,13 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
 
        if (err)
                work_done = work_to_do;
+       if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+               /* Call the function which refreshes
+                * the intr coalescing timer value based on
+                * the traffic.  This is supported only in
+                * the case of MSI-x mode
+                */
+               enic_calc_int_moderation(enic, &enic->rq[rq]);
 
        if (work_done < work_to_do) {
 
@@ -1179,6 +1287,8 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
                 */
 
                napi_complete(napi);
+               if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+                       enic_set_int_moderation(enic, &enic->rq[rq]);
                vnic_intr_unmask(&enic->intr[intr]);
        }
 
@@ -1314,6 +1424,42 @@ static void enic_synchronize_irqs(struct enic *enic)
        }
 }
 
+static void enic_set_rx_coal_setting(struct enic *enic)
+{
+       unsigned int speed;
+       int index = -1;
+       struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
+
+       /* If intr mode is not MSIX, do not do adaptive coalescing */
+       if (VNIC_DEV_INTR_MODE_MSIX != vnic_dev_get_intr_mode(enic->vdev)) {
+               netdev_info(enic->netdev, "INTR mode is not MSIX, Not initializing adaptive coalescing");
+               return;
+       }
+
+       /* 1. Read the link speed from fw
+        * 2. Pick the default range for the speed
+        * 3. Update it in enic->rx_coalesce_setting
+        */
+       speed = vnic_dev_port_speed(enic->vdev);
+       if (ENIC_LINK_SPEED_10G < speed)
+               index = ENIC_LINK_40G_INDEX;
+       else if (ENIC_LINK_SPEED_4G < speed)
+               index = ENIC_LINK_10G_INDEX;
+       else
+               index = ENIC_LINK_4G_INDEX;
+
+       rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
+       rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
+       rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
+
+       /* Start with the value provided by UCSM */
+       for (index = 0; index < enic->rq_count; index++)
+               enic->cq[index].cur_rx_coal_timeval =
+                               enic->config.intr_timer_usec;
+
+       rx_coal->use_adaptive_rx_coalesce = 1;
+}
+
 static int enic_dev_notify_set(struct enic *enic)
 {
        int err;
@@ -2231,6 +2377,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        enic->notify_timer.function = enic_notify_timer;
        enic->notify_timer.data = (unsigned long)enic;
 
+       enic_set_rx_coal_setting(enic);
        INIT_WORK(&enic->reset, enic_reset);
        INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
 
@@ -2250,6 +2397,9 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
+       /* rx coalesce time already got initialized. This gets used
+        * if adaptive coal is turned off
+        */
        enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
 
        if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
index 579315cbe803c91130c978b5e21538240aff3018..4e6aa65857f70a0b7236decf382c3018c137517c 100644 (file)
@@ -50,6 +50,11 @@ struct vnic_cq_ctrl {
        u32 pad10;
 };
 
+struct vnic_rx_bytes_counter {
+       unsigned int small_pkt_bytes_cnt;
+       unsigned int large_pkt_bytes_cnt;
+};
+
 struct vnic_cq {
        unsigned int index;
        struct vnic_dev *vdev;
@@ -58,6 +63,10 @@ struct vnic_cq {
        unsigned int to_clean;
        unsigned int last_color;
        unsigned int interrupt_offset;
+       struct vnic_rx_bytes_counter pkt_size_counter;
+       unsigned int cur_rx_coal_timeval;
+       unsigned int tobe_rx_coal_timeval;
+       ktime_t prev_ts;
 };
 
 static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
index 1642de78aac84c86b51cbae27c16d9748d70fb19..861660841ce281c23a5790e62942a779998b5209 100644 (file)
@@ -1703,7 +1703,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 #ifdef CONFIG_TULIP_NAPI
        netif_napi_add(dev, &tp->napi, tulip_poll, 16);
 #endif
-       SET_ETHTOOL_OPS(dev, &ops);
+       dev->ethtool_ops = &ops;
 
        if (register_netdev(dev))
                goto err_out_free_ring;
index 4fb756d219f700bfb82a37d62e9cb078fca22024..2324f2ddfd4821b991006dffedf213a1dad61fcf 100644 (file)
@@ -227,7 +227,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
        }
        dev->netdev_ops = &netdev_ops;
        dev->watchdog_timeo = TX_TIMEOUT;
-       SET_ETHTOOL_OPS(dev, &ethtool_ops);
+       dev->ethtool_ops = &ethtool_ops;
 #if 0
        dev->features = NETIF_F_IP_CSUM;
 #endif
index d9e5ca0d48c125c88e55b319975fcab5e0a4ad99..433c1e18544250bd76dddb3e1e243d8b90ce3b73 100644 (file)
@@ -577,7 +577,7 @@ static int sundance_probe1(struct pci_dev *pdev,
 
        /* The chip-specific entries in the device structure. */
        dev->netdev_ops = &netdev_ops;
-       SET_ETHTOOL_OPS(dev, &ethtool_ops);
+       dev->ethtool_ops = &ethtool_ops;
        dev->watchdog_timeo = TX_TIMEOUT;
 
        pci_set_drvdata(pdev, dev);
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
new file mode 100644 (file)
index 0000000..056b44b
--- /dev/null
@@ -0,0 +1,706 @@
+ /*
+ * drivers/net/ethernet/beckhoff/ec_bhf.c
+ *
+ * Copyright (C) 2014 Darek Marcinkiewicz <reksio@newterm.pl>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* This is a driver for EtherCAT master module present on CCAT FPGA.
+ * Those can be found on Bechhoff CX50xx industrial PCs.
+ */
+
+#if 0
+#define DEBUG
+#endif
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/stat.h>
+
+#define TIMER_INTERVAL_NSEC    20000
+
+#define INFO_BLOCK_SIZE                0x10
+#define INFO_BLOCK_TYPE                0x0
+#define INFO_BLOCK_REV         0x2
+#define INFO_BLOCK_BLK_CNT     0x4
+#define INFO_BLOCK_TX_CHAN     0x4
+#define INFO_BLOCK_RX_CHAN     0x5
+#define INFO_BLOCK_OFFSET      0x8
+
+#define EC_MII_OFFSET          0x4
+#define EC_FIFO_OFFSET         0x8
+#define EC_MAC_OFFSET          0xc
+
+#define MAC_FRAME_ERR_CNT      0x0
+#define MAC_RX_ERR_CNT         0x1
+#define MAC_CRC_ERR_CNT                0x2
+#define MAC_LNK_LST_ERR_CNT    0x3
+#define MAC_TX_FRAME_CNT       0x10
+#define MAC_RX_FRAME_CNT       0x14
+#define MAC_TX_FIFO_LVL                0x20
+#define MAC_DROPPED_FRMS       0x28
+#define MAC_CONNECTED_CCAT_FLAG        0x78
+
+#define MII_MAC_ADDR           0x8
+#define MII_MAC_FILT_FLAG      0xe
+#define MII_LINK_STATUS                0xf
+
+#define FIFO_TX_REG            0x0
+#define FIFO_TX_RESET          0x8
+#define FIFO_RX_REG            0x10
+#define FIFO_RX_ADDR_VALID     (1u << 31)
+#define FIFO_RX_RESET          0x18
+
+#define DMA_CHAN_OFFSET                0x1000
+#define DMA_CHAN_SIZE          0x8
+
+#define DMA_WINDOW_SIZE_MASK   0xfffffffc
+
+static struct pci_device_id ids[] = {
+       { PCI_DEVICE(0x15ec, 0x5000), },
+       { 0, }
+};
+MODULE_DEVICE_TABLE(pci, ids);
+
+struct rx_header {
+#define RXHDR_NEXT_ADDR_MASK   0xffffffu
+#define RXHDR_NEXT_VALID       (1u << 31)
+       __le32 next;
+#define RXHDR_NEXT_RECV_FLAG   0x1
+       __le32 recv;
+#define RXHDR_LEN_MASK         0xfffu
+       __le16 len;
+       __le16 port;
+       __le32 reserved;
+       u8 timestamp[8];
+} __packed;
+
+#define PKT_PAYLOAD_SIZE       0x7e8
+struct rx_desc {
+       struct rx_header header;
+       u8 data[PKT_PAYLOAD_SIZE];
+} __packed;
+
+struct tx_header {
+       __le16 len;
+#define TX_HDR_PORT_0          0x1
+#define TX_HDR_PORT_1          0x2
+       u8 port;
+       u8 ts_enable;
+#define TX_HDR_SENT            0x1
+       __le32 sent;
+       u8 timestamp[8];
+} __packed;
+
+struct tx_desc {
+       struct tx_header header;
+       u8 data[PKT_PAYLOAD_SIZE];
+} __packed;
+
+#define FIFO_SIZE              64
+
+static long polling_frequency = TIMER_INTERVAL_NSEC;
+
+struct bhf_dma {
+       u8 *buf;
+       size_t len;
+       dma_addr_t buf_phys;
+
+       u8 *alloc;
+       size_t alloc_len;
+       dma_addr_t alloc_phys;
+};
+
+struct ec_bhf_priv {
+       struct net_device *net_dev;
+
+       struct pci_dev *dev;
+
+       void __iomem *io;
+       void __iomem *dma_io;
+
+       struct hrtimer hrtimer;
+
+       int tx_dma_chan;
+       int rx_dma_chan;
+       void __iomem *ec_io;
+       void __iomem *fifo_io;
+       void __iomem *mii_io;
+       void __iomem *mac_io;
+
+       struct bhf_dma rx_buf;
+       struct rx_desc *rx_descs;
+       int rx_dnext;
+       int rx_dcount;
+
+       struct bhf_dma tx_buf;
+       struct tx_desc *tx_descs;
+       int tx_dcount;
+       int tx_dnext;
+
+       u64 stat_rx_bytes;
+       u64 stat_tx_bytes;
+};
+
+#define PRIV_TO_DEV(priv) (&(priv)->dev->dev)
+
+#define ETHERCAT_MASTER_ID     0x14
+
+static void ec_bhf_print_status(struct ec_bhf_priv *priv)
+{
+       struct device *dev = PRIV_TO_DEV(priv);
+
+       dev_dbg(dev, "Frame error counter: %d\n",
+               ioread8(priv->mac_io + MAC_FRAME_ERR_CNT));
+       dev_dbg(dev, "RX error counter: %d\n",
+               ioread8(priv->mac_io + MAC_RX_ERR_CNT));
+       dev_dbg(dev, "CRC error counter: %d\n",
+               ioread8(priv->mac_io + MAC_CRC_ERR_CNT));
+       dev_dbg(dev, "TX frame counter: %d\n",
+               ioread32(priv->mac_io + MAC_TX_FRAME_CNT));
+       dev_dbg(dev, "RX frame counter: %d\n",
+               ioread32(priv->mac_io + MAC_RX_FRAME_CNT));
+       dev_dbg(dev, "TX fifo level: %d\n",
+               ioread8(priv->mac_io + MAC_TX_FIFO_LVL));
+       dev_dbg(dev, "Dropped frames: %d\n",
+               ioread8(priv->mac_io + MAC_DROPPED_FRMS));
+       dev_dbg(dev, "Connected with CCAT slot: %d\n",
+               ioread8(priv->mac_io + MAC_CONNECTED_CCAT_FLAG));
+       dev_dbg(dev, "Link status: %d\n",
+               ioread8(priv->mii_io + MII_LINK_STATUS));
+}
+
+static void ec_bhf_reset(struct ec_bhf_priv *priv)
+{
+       iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT);
+       iowrite8(0, priv->mac_io + MAC_RX_ERR_CNT);
+       iowrite8(0, priv->mac_io + MAC_CRC_ERR_CNT);
+       iowrite8(0, priv->mac_io + MAC_LNK_LST_ERR_CNT);
+       iowrite32(0, priv->mac_io + MAC_TX_FRAME_CNT);
+       iowrite32(0, priv->mac_io + MAC_RX_FRAME_CNT);
+       iowrite8(0, priv->mac_io + MAC_DROPPED_FRMS);
+
+       iowrite8(0, priv->fifo_io + FIFO_TX_RESET);
+       iowrite8(0, priv->fifo_io + FIFO_RX_RESET);
+
+       iowrite8(0, priv->mac_io + MAC_TX_FIFO_LVL);
+}
+
+static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc)
+{
+       u32 len = le16_to_cpu(desc->header.len) + sizeof(desc->header);
+       u32 addr = (u8 *)desc - priv->tx_buf.buf;
+
+       iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG);
+
+       dev_dbg(PRIV_TO_DEV(priv), "Done sending packet\n");
+}
+
+static int ec_bhf_desc_sent(struct tx_desc *desc)
+{
+       return le32_to_cpu(desc->header.sent) & TX_HDR_SENT;
+}
+
+static void ec_bhf_process_tx(struct ec_bhf_priv *priv)
+{
+       if (unlikely(netif_queue_stopped(priv->net_dev))) {
+               /* Make sure that we perceive changes to tx_dnext. */
+               smp_rmb();
+
+               if (ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext]))
+                       netif_wake_queue(priv->net_dev);
+       }
+}
+
+static int ec_bhf_pkt_received(struct rx_desc *desc)
+{
+       return le32_to_cpu(desc->header.recv) & RXHDR_NEXT_RECV_FLAG;
+}
+
+static void ec_bhf_add_rx_desc(struct ec_bhf_priv *priv, struct rx_desc *desc)
+{
+       iowrite32(FIFO_RX_ADDR_VALID | ((u8 *)(desc) - priv->rx_buf.buf),
+                 priv->fifo_io + FIFO_RX_REG);
+}
+
+static void ec_bhf_process_rx(struct ec_bhf_priv *priv)
+{
+       struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext];
+       struct device *dev = PRIV_TO_DEV(priv);
+
+       while (ec_bhf_pkt_received(desc)) {
+               int pkt_size = (le16_to_cpu(desc->header.len) &
+                              RXHDR_LEN_MASK) - sizeof(struct rx_header) - 4;
+               u8 *data = desc->data;
+               struct sk_buff *skb;
+
+               skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size);
+               dev_dbg(dev, "Received packet, size: %d\n", pkt_size);
+
+               if (skb) {
+                       memcpy(skb_put(skb, pkt_size), data, pkt_size);
+                       skb->protocol = eth_type_trans(skb, priv->net_dev);
+                       dev_dbg(dev, "Protocol type: %x\n", skb->protocol);
+
+                       priv->stat_rx_bytes += pkt_size;
+
+                       netif_rx(skb);
+               } else {
+                       dev_err_ratelimited(dev,
+                               "Couldn't allocate a skb_buff for a packet of size %u\n",
+                               pkt_size);
+               }
+
+               desc->header.recv = 0;
+
+               ec_bhf_add_rx_desc(priv, desc);
+
+               priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount;
+               desc = &priv->rx_descs[priv->rx_dnext];
+       }
+
+}
+
+static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer)
+{
+       struct ec_bhf_priv *priv = container_of(timer, struct ec_bhf_priv,
+                                               hrtimer);
+       ec_bhf_process_rx(priv);
+       ec_bhf_process_tx(priv);
+
+       if (!netif_running(priv->net_dev))
+               return HRTIMER_NORESTART;
+
+       hrtimer_forward_now(timer, ktime_set(0, polling_frequency));
+       return HRTIMER_RESTART;
+}
+
+static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv)
+{
+       struct device *dev = PRIV_TO_DEV(priv);
+       unsigned block_count, i;
+       void __iomem *ec_info;
+
+       dev_dbg(dev, "Info block:\n");
+       dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io));
+       dev_dbg(dev, "Revision of function: %x\n",
+               (unsigned)ioread16(priv->io + INFO_BLOCK_REV));
+
+       block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT);
+       dev_dbg(dev, "Number of function blocks: %x\n", block_count);
+
+       for (i = 0; i < block_count; i++) {
+               u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE +
+                                   INFO_BLOCK_TYPE);
+               if (type == ETHERCAT_MASTER_ID)
+                       break;
+       }
+       if (i == block_count) {
+               dev_err(dev, "EtherCAT master with DMA block not found\n");
+               return -ENODEV;
+       }
+       dev_dbg(dev, "EtherCAT master with DMA block found at pos: %d\n", i);
+
+       ec_info = priv->io + i * INFO_BLOCK_SIZE;
+       dev_dbg(dev, "EtherCAT master revision: %d\n",
+               ioread16(ec_info + INFO_BLOCK_REV));
+
+       priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN);
+       dev_dbg(dev, "EtherCAT master tx dma channel: %d\n",
+               priv->tx_dma_chan);
+
+       priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN);
+       dev_dbg(dev, "EtherCAT master rx dma channel: %d\n",
+                priv->rx_dma_chan);
+
+       priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET);
+       priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET);
+       priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET);
+       priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET);
+
+       dev_dbg(dev,
+               "EtherCAT block addres: %p, fifo address: %p, mii address: %p, mac address: %p\n",
+               priv->ec_io, priv->fifo_io, priv->mii_io, priv->mac_io);
+
+       return 0;
+}
+
+static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb,
+                                    struct net_device *net_dev)
+{
+       struct ec_bhf_priv *priv = netdev_priv(net_dev);
+       struct tx_desc *desc;
+       unsigned len;
+
+       dev_dbg(PRIV_TO_DEV(priv), "Starting xmit\n");
+
+       desc = &priv->tx_descs[priv->tx_dnext];
+
+       skb_copy_and_csum_dev(skb, desc->data);
+       len = skb->len;
+
+       memset(&desc->header, 0, sizeof(desc->header));
+       desc->header.len = cpu_to_le16(len);
+       desc->header.port = TX_HDR_PORT_0;
+
+       ec_bhf_send_packet(priv, desc);
+
+       priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount;
+
+       if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) {
+               /* Make sure that update updates to tx_dnext are perceived
+                * by timer routine.
+                */
+               smp_wmb();
+
+               netif_stop_queue(net_dev);
+
+               dev_dbg(PRIV_TO_DEV(priv), "Stopping netif queue\n");
+               ec_bhf_print_status(priv);
+       }
+
+       priv->stat_tx_bytes += len;
+
+       dev_kfree_skb(skb);
+
+       return NETDEV_TX_OK;
+}
+
+static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv,
+                               struct bhf_dma *buf,
+                               int channel,
+                               int size)
+{
+       int offset = channel * DMA_CHAN_SIZE + DMA_CHAN_OFFSET;
+       struct device *dev = PRIV_TO_DEV(priv);
+       u32 mask;
+
+       iowrite32(0xffffffff, priv->dma_io + offset);
+
+       mask = ioread32(priv->dma_io + offset);
+       mask &= DMA_WINDOW_SIZE_MASK;
+       dev_dbg(dev, "Read mask %x for channel %d\n", mask, channel);
+
+       /* We want to allocate a chunk of memory that is:
+        * - aligned to the mask we just read
+        * - is of size 2^mask bytes (at most)
+        * In order to ensure that we will allocate buffer of
+        * 2 * 2^mask bytes.
+        */
+       buf->len = min_t(int, ~mask + 1, size);
+       buf->alloc_len = 2 * buf->len;
+
+       dev_dbg(dev, "Allocating %d bytes for channel %d",
+               (int)buf->alloc_len, channel);
+       buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys,
+                                       GFP_KERNEL);
+       if (buf->alloc == NULL) {
+               dev_info(dev, "Failed to allocate buffer\n");
+               return -ENOMEM;
+       }
+
+       buf->buf_phys = (buf->alloc_phys + buf->len) & mask;
+       buf->buf = buf->alloc + (buf->buf_phys - buf->alloc_phys);
+
+       iowrite32(0, priv->dma_io + offset + 4);
+       iowrite32(buf->buf_phys, priv->dma_io + offset);
+       dev_dbg(dev, "Buffer: %x and read from dev: %x",
+               (unsigned)buf->buf_phys, ioread32(priv->dma_io + offset));
+
+       return 0;
+}
+
+static void ec_bhf_setup_tx_descs(struct ec_bhf_priv *priv)
+{
+       int i = 0;
+
+       priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc);
+       priv->tx_descs = (struct tx_desc *) priv->tx_buf.buf;
+       priv->tx_dnext = 0;
+
+       for (i = 0; i < priv->tx_dcount; i++)
+               priv->tx_descs[i].header.sent = cpu_to_le32(TX_HDR_SENT);
+}
+
+static void ec_bhf_setup_rx_descs(struct ec_bhf_priv *priv)
+{
+       int i;
+
+       priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc);
+       priv->rx_descs = (struct rx_desc *) priv->rx_buf.buf;
+       priv->rx_dnext = 0;
+
+       for (i = 0; i < priv->rx_dcount; i++) {
+               struct rx_desc *desc = &priv->rx_descs[i];
+               u32 next;
+
+               if (i != priv->rx_dcount - 1)
+                       next = (u8 *)(desc + 1) - priv->rx_buf.buf;
+               else
+                       next = 0;
+               next |= RXHDR_NEXT_VALID;
+               desc->header.next = cpu_to_le32(next);
+               desc->header.recv = 0;
+               ec_bhf_add_rx_desc(priv, desc);
+       }
+}
+
+static int ec_bhf_open(struct net_device *net_dev)
+{
+       struct ec_bhf_priv *priv = netdev_priv(net_dev);
+       struct device *dev = PRIV_TO_DEV(priv);
+       int err = 0;
+
+       dev_info(dev, "Opening device\n");
+
+       ec_bhf_reset(priv);
+
+       err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan,
+                                  FIFO_SIZE * sizeof(struct rx_desc));
+       if (err) {
+               dev_err(dev, "Failed to allocate rx buffer\n");
+               goto out;
+       }
+       ec_bhf_setup_rx_descs(priv);
+
+       dev_info(dev, "RX buffer allocated, address: %x\n",
+                (unsigned)priv->rx_buf.buf_phys);
+
+       err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan,
+                                  FIFO_SIZE * sizeof(struct tx_desc));
+       if (err) {
+               dev_err(dev, "Failed to allocate tx buffer\n");
+               goto error_rx_free;
+       }
+       dev_dbg(dev, "TX buffer allocated, addres: %x\n",
+               (unsigned)priv->tx_buf.buf_phys);
+
+       iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG);
+
+       ec_bhf_setup_tx_descs(priv);
+
+       netif_start_queue(net_dev);
+
+       hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       priv->hrtimer.function = ec_bhf_timer_fun;
+       hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency),
+                     HRTIMER_MODE_REL);
+
+       dev_info(PRIV_TO_DEV(priv), "Device open\n");
+
+       ec_bhf_print_status(priv);
+
+       return 0;
+
+error_rx_free:
+       dma_free_coherent(dev, priv->rx_buf.alloc_len, priv->rx_buf.alloc,
+                         priv->rx_buf.alloc_len);
+out:
+       return err;
+}
+
+static int ec_bhf_stop(struct net_device *net_dev)
+{
+       struct ec_bhf_priv *priv = netdev_priv(net_dev);
+       struct device *dev = PRIV_TO_DEV(priv);
+
+       hrtimer_cancel(&priv->hrtimer);
+
+       ec_bhf_reset(priv);
+
+       netif_tx_disable(net_dev);
+
+       dma_free_coherent(dev, priv->tx_buf.alloc_len,
+                         priv->tx_buf.alloc, priv->tx_buf.alloc_phys);
+       dma_free_coherent(dev, priv->rx_buf.alloc_len,
+                         priv->rx_buf.alloc, priv->rx_buf.alloc_phys);
+
+       return 0;
+}
+
+static struct rtnl_link_stats64 *
+ec_bhf_get_stats(struct net_device *net_dev,
+                struct rtnl_link_stats64 *stats)
+{
+       struct ec_bhf_priv *priv = netdev_priv(net_dev);
+
+       stats->rx_errors = ioread8(priv->mac_io + MAC_RX_ERR_CNT) +
+                               ioread8(priv->mac_io + MAC_CRC_ERR_CNT) +
+                               ioread8(priv->mac_io + MAC_FRAME_ERR_CNT);
+       stats->rx_packets = ioread32(priv->mac_io + MAC_RX_FRAME_CNT);
+       stats->tx_packets = ioread32(priv->mac_io + MAC_TX_FRAME_CNT);
+       stats->rx_dropped = ioread8(priv->mac_io + MAC_DROPPED_FRMS);
+
+       stats->tx_bytes = priv->stat_tx_bytes;
+       stats->rx_bytes = priv->stat_rx_bytes;
+
+       return stats;
+}
+
+static const struct net_device_ops ec_bhf_netdev_ops = {
+       .ndo_start_xmit         = ec_bhf_start_xmit,
+       .ndo_open               = ec_bhf_open,
+       .ndo_stop               = ec_bhf_stop,
+       .ndo_get_stats64        = ec_bhf_get_stats,
+       .ndo_change_mtu         = eth_change_mtu,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_set_mac_address    = eth_mac_addr
+};
+
+static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+       struct net_device *net_dev;
+       struct ec_bhf_priv *priv;
+       void __iomem *dma_io;
+       void __iomem *io;
+       int err = 0;
+
+       err = pci_enable_device(dev);
+       if (err)
+               return err;
+
+       pci_set_master(dev);
+
+       err = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
+       if (err) {
+               dev_err(&dev->dev,
+                       "Required dma mask not supported, failed to initialize device\n");
+               err = -EIO;
+               goto err_disable_dev;
+       }
+
+       err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32));
+       if (err) {
+               dev_err(&dev->dev,
+                       "Required dma mask not supported, failed to initialize device\n");
+               goto err_disable_dev;
+       }
+
+       err = pci_request_regions(dev, "ec_bhf");
+       if (err) {
+               dev_err(&dev->dev, "Failed to request pci memory regions\n");
+               goto err_disable_dev;
+       }
+
+       io = pci_iomap(dev, 0, 0);
+       if (!io) {
+               dev_err(&dev->dev, "Failed to map pci card memory bar 0");
+               err = -EIO;
+               goto err_release_regions;
+       }
+
+       dma_io = pci_iomap(dev, 2, 0);
+       if (!dma_io) {
+               dev_err(&dev->dev, "Failed to map pci card memory bar 2");
+               err = -EIO;
+               goto err_unmap;
+       }
+
+       net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv));
+       if (net_dev == NULL) {
+               err = -ENOMEM;
+               goto err_unmap_dma_io;
+       }
+
+       pci_set_drvdata(dev, net_dev);
+       SET_NETDEV_DEV(net_dev, &dev->dev);
+
+       net_dev->features = 0;
+       net_dev->flags |= IFF_NOARP;
+
+       net_dev->netdev_ops = &ec_bhf_netdev_ops;
+
+       priv = netdev_priv(net_dev);
+       priv->net_dev = net_dev;
+       priv->io = io;
+       priv->dma_io = dma_io;
+       priv->dev = dev;
+
+       err = ec_bhf_setup_offsets(priv);
+       if (err < 0)
+               goto err_free_net_dev;
+
+       memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6);
+
+       dev_dbg(&dev->dev, "CX5020 Ethercat master address: %pM\n",
+               net_dev->dev_addr);
+
+       err = register_netdev(net_dev);
+       if (err < 0)
+               goto err_free_net_dev;
+
+       return 0;
+
+err_free_net_dev:
+       free_netdev(net_dev);
+err_unmap_dma_io:
+       pci_iounmap(dev, dma_io);
+err_unmap:
+       pci_iounmap(dev, io);
+err_release_regions:
+       pci_release_regions(dev);
+err_disable_dev:
+       pci_clear_master(dev);
+       pci_disable_device(dev);
+
+       return err;
+}
+
+static void ec_bhf_remove(struct pci_dev *dev)
+{
+       struct net_device *net_dev = pci_get_drvdata(dev);
+       struct ec_bhf_priv *priv = netdev_priv(net_dev);
+
+       unregister_netdev(net_dev);
+       free_netdev(net_dev);
+
+       pci_iounmap(dev, priv->dma_io);
+       pci_iounmap(dev, priv->io);
+       pci_release_regions(dev);
+       pci_clear_master(dev);
+       pci_disable_device(dev);
+}
+
+static struct pci_driver pci_driver = {
+       .name           = "ec_bhf",
+       .id_table       = ids,
+       .probe          = ec_bhf_probe,
+       .remove         = ec_bhf_remove,
+};
+
+static int __init ec_bhf_init(void)
+{
+       return pci_register_driver(&pci_driver);
+}
+
+static void __exit ec_bhf_exit(void)
+{
+       pci_unregister_driver(&pci_driver);
+}
+
+module_init(ec_bhf_init);
+module_exit(ec_bhf_exit);
+
+module_param(polling_frequency, long, S_IRUGO);
+MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Dariusz Marcinkiewicz <reksio@newterm.pl>");
index 2f67c7c8d41337ba5240f087b56606f842360936..2e7c5553955e739ba717568325a2c1be67a15c47 100644 (file)
@@ -374,6 +374,7 @@ enum vf_state {
 #define BE_FLAGS_LINK_STATUS_INIT              1
 #define BE_FLAGS_WORKER_SCHEDULED              (1 << 3)
 #define BE_FLAGS_VLAN_PROMISC                  (1 << 4)
+#define BE_FLAGS_MCAST_PROMISC                 (1 << 5)
 #define BE_FLAGS_NAPI_ENABLED                  (1 << 9)
 #define BE_FLAGS_QNQ_ASYNC_EVT_RCVD            (1 << 11)
 #define BE_FLAGS_VXLAN_OFFLOADS                        (1 << 12)
@@ -455,7 +456,7 @@ struct be_adapter {
        struct be_drv_stats drv_stats;
        struct be_aic_obj aic_obj[MAX_EVT_QS];
        u16 vlans_added;
-       u8 vlan_tag[VLAN_N_VID];
+       unsigned long vids[BITS_TO_LONGS(VLAN_N_VID)];
        u8 vlan_prio_bmap;      /* Available Priority BitMap */
        u16 recommended_prio;   /* Recommended Priority */
        struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
index 07e78e89a348ecc1d84bff5f1767c48be9a12b00..a568f7d1a24cc7cf579466d254e19438ad4596a2 100644 (file)
@@ -52,8 +52,7 @@ static struct be_cmd_priv_map cmd_priv_map[] = {
        }
 };
 
-static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode,
-                          u8 subsystem)
+static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
 {
        int i;
        int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
@@ -120,21 +119,28 @@ static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
        return (void *)addr;
 }
 
-static int be_mcc_compl_process(struct be_adapter *adapter,
-                               struct be_mcc_compl *compl)
+static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status)
 {
-       u16 compl_status, extd_status;
-       struct be_cmd_resp_hdr *resp_hdr;
-       u8 opcode = 0, subsystem = 0;
-
-       /* Just swap the status to host endian; mcc tag is opaquely copied
-        * from mcc_wrb */
-       be_dws_le_to_cpu(compl, 4);
-
-       compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
-                               CQE_STATUS_COMPL_MASK;
+       if (base_status == MCC_STATUS_NOT_SUPPORTED ||
+           base_status == MCC_STATUS_ILLEGAL_REQUEST ||
+           addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
+           (opcode == OPCODE_COMMON_WRITE_FLASHROM &&
+           (base_status == MCC_STATUS_ILLEGAL_FIELD ||
+            addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
+               return true;
+       else
+               return false;
+}
 
-       resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
+/* Place holder for all the async MCC cmds wherein the caller is not in a busy
+ * loop (has not issued be_mcc_notify_wait())
+ */
+static void be_async_cmd_process(struct be_adapter *adapter,
+                                struct be_mcc_compl *compl,
+                                struct be_cmd_resp_hdr *resp_hdr)
+{
+       enum mcc_base_status base_status = base_status(compl->status);
+       u8 opcode = 0, subsystem = 0;
 
        if (resp_hdr) {
                opcode = resp_hdr->opcode;
@@ -144,61 +150,86 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
        if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
            subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
                complete(&adapter->et_cmd_compl);
-               return 0;
+               return;
        }
 
-       if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
-            (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
-           (subsystem == CMD_SUBSYSTEM_COMMON)) {
-               adapter->flash_status = compl_status;
+       if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
+            opcode == OPCODE_COMMON_WRITE_OBJECT) &&
+           subsystem == CMD_SUBSYSTEM_COMMON) {
+               adapter->flash_status = compl->status;
                complete(&adapter->et_cmd_compl);
+               return;
        }
 
-       if (compl_status == MCC_STATUS_SUCCESS) {
-               if (((opcode == OPCODE_ETH_GET_STATISTICS) ||
-                    (opcode == OPCODE_ETH_GET_PPORT_STATS)) &&
-                   (subsystem == CMD_SUBSYSTEM_ETH)) {
-                       be_parse_stats(adapter);
-                       adapter->stats_cmd_sent = false;
-               }
-               if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
-                   subsystem == CMD_SUBSYSTEM_COMMON) {
+       if ((opcode == OPCODE_ETH_GET_STATISTICS ||
+            opcode == OPCODE_ETH_GET_PPORT_STATS) &&
+           subsystem == CMD_SUBSYSTEM_ETH &&
+           base_status == MCC_STATUS_SUCCESS) {
+               be_parse_stats(adapter);
+               adapter->stats_cmd_sent = false;
+               return;
+       }
+
+       if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
+           subsystem == CMD_SUBSYSTEM_COMMON) {
+               if (base_status == MCC_STATUS_SUCCESS) {
                        struct be_cmd_resp_get_cntl_addnl_attribs *resp =
-                               (void *)resp_hdr;
+                                                       (void *)resp_hdr;
                        adapter->drv_stats.be_on_die_temperature =
-                               resp->on_die_temperature;
-               }
-       } else {
-               if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
+                                               resp->on_die_temperature;
+               } else {
                        adapter->be_get_temp_freq = 0;
+               }
+               return;
+       }
+}
+
+static int be_mcc_compl_process(struct be_adapter *adapter,
+                               struct be_mcc_compl *compl)
+{
+       enum mcc_base_status base_status;
+       enum mcc_addl_status addl_status;
+       struct be_cmd_resp_hdr *resp_hdr;
+       u8 opcode = 0, subsystem = 0;
+
+       /* Just swap the status to host endian; mcc tag is opaquely copied
+        * from mcc_wrb */
+       be_dws_le_to_cpu(compl, 4);
+
+       base_status = base_status(compl->status);
+       addl_status = addl_status(compl->status);
+
+       resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
+       if (resp_hdr) {
+               opcode = resp_hdr->opcode;
+               subsystem = resp_hdr->subsystem;
+       }
+
+       be_async_cmd_process(adapter, compl, resp_hdr);
 
-               if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
-                       compl_status == MCC_STATUS_ILLEGAL_REQUEST)
-                       goto done;
+       if (base_status != MCC_STATUS_SUCCESS &&
+           !be_skip_err_log(opcode, base_status, addl_status)) {
 
-               if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
+               if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
                        dev_warn(&adapter->pdev->dev,
                                 "VF is not privileged to issue opcode %d-%d\n",
                                 opcode, subsystem);
                } else {
-                       extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
-                                       CQE_STATUS_EXTD_MASK;
                        dev_err(&adapter->pdev->dev,
                                "opcode %d-%d failed:status %d-%d\n",
-                               opcode, subsystem, compl_status, extd_status);
-
-                       if (extd_status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
-                               return extd_status;
+                               opcode, subsystem, base_status, addl_status);
                }
        }
-done:
-       return compl_status;
+       return compl->status;
 }
 
 /* Link state evt is a string of bytes; no need for endian swapping */
 static void be_async_link_state_process(struct be_adapter *adapter,
-               struct be_async_event_link_state *evt)
+                                       struct be_mcc_compl *compl)
 {
+       struct be_async_event_link_state *evt =
+                       (struct be_async_event_link_state *)compl;
+
        /* When link status changes, link speed must be re-queried from FW */
        adapter->phy.link_speed = -1;
 
@@ -221,8 +252,11 @@ static void be_async_link_state_process(struct be_adapter *adapter,
 
 /* Grp5 CoS Priority evt */
 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
-               struct be_async_event_grp5_cos_priority *evt)
+                                              struct be_mcc_compl *compl)
 {
+       struct be_async_event_grp5_cos_priority *evt =
+                       (struct be_async_event_grp5_cos_priority *)compl;
+
        if (evt->valid) {
                adapter->vlan_prio_bmap = evt->available_priority_bmap;
                adapter->recommended_prio &= ~VLAN_PRIO_MASK;
@@ -233,8 +267,11 @@ static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
 
 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
-               struct be_async_event_grp5_qos_link_speed *evt)
+                                           struct be_mcc_compl *compl)
 {
+       struct be_async_event_grp5_qos_link_speed *evt =
+                       (struct be_async_event_grp5_qos_link_speed *)compl;
+
        if (adapter->phy.link_speed >= 0 &&
            evt->physical_port == adapter->port_num)
                adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
@@ -242,8 +279,11 @@ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
 
 /*Grp5 PVID evt*/
 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
-               struct be_async_event_grp5_pvid_state *evt)
+                                            struct be_mcc_compl *compl)
 {
+       struct be_async_event_grp5_pvid_state *evt =
+                       (struct be_async_event_grp5_pvid_state *)compl;
+
        if (evt->enabled) {
                adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
                dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
@@ -253,26 +293,21 @@ static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
 }
 
 static void be_async_grp5_evt_process(struct be_adapter *adapter,
-               u32 trailer, struct be_mcc_compl *evt)
+                                     struct be_mcc_compl *compl)
 {
-       u8 event_type = 0;
-
-       event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
-               ASYNC_TRAILER_EVENT_TYPE_MASK;
+       u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) &
+                               ASYNC_EVENT_TYPE_MASK;
 
        switch (event_type) {
        case ASYNC_EVENT_COS_PRIORITY:
-               be_async_grp5_cos_priority_process(adapter,
-               (struct be_async_event_grp5_cos_priority *)evt);
-       break;
+               be_async_grp5_cos_priority_process(adapter, compl);
+               break;
        case ASYNC_EVENT_QOS_SPEED:
-               be_async_grp5_qos_speed_process(adapter,
-               (struct be_async_event_grp5_qos_link_speed *)evt);
-       break;
+               be_async_grp5_qos_speed_process(adapter, compl);
+               break;
        case ASYNC_EVENT_PVID_STATE:
-               be_async_grp5_pvid_state_process(adapter,
-               (struct be_async_event_grp5_pvid_state *)evt);
-       break;
+               be_async_grp5_pvid_state_process(adapter, compl);
+               break;
        default:
                dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n",
                         event_type);
@@ -281,13 +316,13 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
 }
 
 static void be_async_dbg_evt_process(struct be_adapter *adapter,
-               u32 trailer, struct be_mcc_compl *cmp)
+                                    struct be_mcc_compl *cmp)
 {
        u8 event_type = 0;
        struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp;
 
-       event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
-               ASYNC_TRAILER_EVENT_TYPE_MASK;
+       event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
+                       ASYNC_EVENT_TYPE_MASK;
 
        switch (event_type) {
        case ASYNC_DEBUG_EVENT_TYPE_QNQ:
@@ -302,25 +337,33 @@ static void be_async_dbg_evt_process(struct be_adapter *adapter,
        }
 }
 
-static inline bool is_link_state_evt(u32 trailer)
+static inline bool is_link_state_evt(u32 flags)
 {
-       return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
-               ASYNC_TRAILER_EVENT_CODE_MASK) ==
-                               ASYNC_EVENT_CODE_LINK_STATE;
+       return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
+                       ASYNC_EVENT_CODE_LINK_STATE;
 }
 
-static inline bool is_grp5_evt(u32 trailer)
+static inline bool is_grp5_evt(u32 flags)
 {
-       return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
-               ASYNC_TRAILER_EVENT_CODE_MASK) ==
-                               ASYNC_EVENT_CODE_GRP_5);
+       return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
+                       ASYNC_EVENT_CODE_GRP_5;
 }
 
-static inline bool is_dbg_evt(u32 trailer)
+static inline bool is_dbg_evt(u32 flags)
 {
-       return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
-               ASYNC_TRAILER_EVENT_CODE_MASK) ==
-                               ASYNC_EVENT_CODE_QNQ);
+       return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
+                       ASYNC_EVENT_CODE_QNQ;
+}
+
+static void be_mcc_event_process(struct be_adapter *adapter,
+                                struct be_mcc_compl *compl)
+{
+       if (is_link_state_evt(compl->flags))
+               be_async_link_state_process(adapter, compl);
+       else if (is_grp5_evt(compl->flags))
+               be_async_grp5_evt_process(adapter, compl);
+       else if (is_dbg_evt(compl->flags))
+               be_async_dbg_evt_process(adapter, compl);
 }
 
 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
@@ -362,21 +405,13 @@ int be_process_mcc(struct be_adapter *adapter)
        struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
 
        spin_lock(&adapter->mcc_cq_lock);
+
        while ((compl = be_mcc_compl_get(adapter))) {
                if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
-                       /* Interpret flags as an async trailer */
-                       if (is_link_state_evt(compl->flags))
-                               be_async_link_state_process(adapter,
-                               (struct be_async_event_link_state *) compl);
-                       else if (is_grp5_evt(compl->flags))
-                               be_async_grp5_evt_process(adapter,
-                               compl->flags, compl);
-                       else if (is_dbg_evt(compl->flags))
-                               be_async_dbg_evt_process(adapter,
-                               compl->flags, compl);
+                       be_mcc_event_process(adapter, compl);
                } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
-                               status = be_mcc_compl_process(adapter, compl);
-                               atomic_dec(&mcc_obj->q.used);
+                       status = be_mcc_compl_process(adapter, compl);
+                       atomic_dec(&mcc_obj->q.used);
                }
                be_mcc_compl_use(compl);
                num++;
@@ -436,7 +471,9 @@ static int be_mcc_notify_wait(struct be_adapter *adapter)
        if (status == -EIO)
                goto out;
 
-       status = resp->status;
+       status = (resp->base_status |
+                 ((resp->addl_status & CQE_ADDL_STATUS_MASK) <<
+                  CQE_ADDL_STATUS_SHIFT));
 out:
        return status;
 }
@@ -560,10 +597,8 @@ static bool lancer_provisioning_error(struct be_adapter *adapter)
        u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
        sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
        if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
-               sliport_err1 = ioread32(adapter->db +
-                                       SLIPORT_ERROR1_OFFSET);
-               sliport_err2 = ioread32(adapter->db +
-                                       SLIPORT_ERROR2_OFFSET);
+               sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET);
+               sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET);
 
                if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
                    sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
@@ -630,8 +665,7 @@ int be_fw_wait_ready(struct be_adapter *adapter)
                if (stage == POST_STAGE_ARMFW_RDY)
                        return 0;
 
-               dev_info(dev, "Waiting for POST, %ds elapsed\n",
-                        timeout);
+               dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);
                if (msleep_interruptible(2000)) {
                        dev_err(dev, "Waiting for POST aborted\n");
                        return -EINTR;
@@ -649,8 +683,7 @@ static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
        return &wrb->payload.sgl[0];
 }
 
-static inline void fill_wrb_tags(struct be_mcc_wrb *wrb,
-                                unsigned long addr)
+static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)
 {
        wrb->tag0 = addr & 0xFFFFFFFF;
        wrb->tag1 = upper_32_bits(addr);
@@ -659,8 +692,9 @@ static inline void fill_wrb_tags(struct be_mcc_wrb *wrb,
 /* Don't touch the hdr after it's prepared */
 /* mem will be NULL for embedded commands */
 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
-                               u8 subsystem, u8 opcode, int cmd_len,
-                               struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
+                                  u8 subsystem, u8 opcode, int cmd_len,
+                                  struct be_mcc_wrb *wrb,
+                                  struct be_dma_mem *mem)
 {
        struct be_sge *sge;
 
@@ -683,7 +717,7 @@ static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
 }
 
 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
-                       struct be_dma_mem *mem)
+                                     struct be_dma_mem *mem)
 {
        int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
        u64 dma = (u64)mem->dma;
@@ -868,7 +902,8 @@ int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
+                              NULL);
 
        /* Support for EQ_CREATEv2 available only SH-R onwards */
        if (!(BEx_chip(adapter) || lancer_chip(adapter)))
@@ -917,7 +952,8 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
+                              NULL);
        req->type = MAC_ADDRESS_TYPE_NETWORK;
        if (permanent) {
                req->permanent = 1;
@@ -940,7 +976,7 @@ err:
 
 /* Uses synchronous MCCQ */
 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
-               u32 if_id, u32 *pmac_id, u32 domain)
+                   u32 if_id, u32 *pmac_id, u32 domain)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_pmac_add *req;
@@ -956,7 +992,8 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
+                              NULL);
 
        req->hdr.domain = domain;
        req->if_id = cpu_to_le32(if_id);
@@ -1012,7 +1049,7 @@ err:
 
 /* Uses Mbox */
 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
-               struct be_queue_info *eq, bool no_delay, int coalesce_wm)
+                    struct be_queue_info *eq, bool no_delay, int coalesce_wm)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_cq_create *req;
@@ -1028,17 +1065,18 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
        ctxt = &req->context;
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb,
+                              NULL);
 
        req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
 
        if (BEx_chip(adapter)) {
                AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
-                                                               coalesce_wm);
+                             coalesce_wm);
                AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
-                                                               ctxt, no_delay);
+                             ctxt, no_delay);
                AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
-                                               __ilog2_u32(cq->len/256));
+                             __ilog2_u32(cq->len / 256));
                AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
                AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
                AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
@@ -1053,14 +1091,12 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
                        AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
                                      ctxt, coalesce_wm);
                AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
-                                                               no_delay);
+                             no_delay);
                AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
-                                               __ilog2_u32(cq->len/256));
+                             __ilog2_u32(cq->len / 256));
                AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
-               AMAP_SET_BITS(struct amap_cq_context_v2, eventable,
-                                                               ctxt, 1);
-               AMAP_SET_BITS(struct amap_cq_context_v2, eqid,
-                                                               ctxt, eq->id);
+               AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
+               AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
        }
 
        be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -1088,8 +1124,8 @@ static u32 be_encoded_q_len(int q_len)
 }
 
 static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
-                               struct be_queue_info *mccq,
-                               struct be_queue_info *cq)
+                                 struct be_queue_info *mccq,
+                                 struct be_queue_info *cq)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_mcc_ext_create *req;
@@ -1105,13 +1141,14 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
        ctxt = &req->context;
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                       OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb,
+                              NULL);
 
        req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
        if (BEx_chip(adapter)) {
                AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
                AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
-                                               be_encoded_q_len(mccq->len));
+                             be_encoded_q_len(mccq->len));
                AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
        } else {
                req->hdr.version = 1;
@@ -1145,8 +1182,8 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
 }
 
 static int be_cmd_mccq_org_create(struct be_adapter *adapter,
-                               struct be_queue_info *mccq,
-                               struct be_queue_info *cq)
+                                 struct be_queue_info *mccq,
+                                 struct be_queue_info *cq)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_mcc_create *req;
@@ -1162,13 +1199,14 @@ static int be_cmd_mccq_org_create(struct be_adapter *adapter,
        ctxt = &req->context;
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                       OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb,
+                              NULL);
 
        req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
 
        AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
        AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
-                       be_encoded_q_len(mccq->len));
+                     be_encoded_q_len(mccq->len));
        AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
 
        be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -1187,8 +1225,7 @@ static int be_cmd_mccq_org_create(struct be_adapter *adapter,
 }
 
 int be_cmd_mccq_create(struct be_adapter *adapter,
-                       struct be_queue_info *mccq,
-                       struct be_queue_info *cq)
+                      struct be_queue_info *mccq, struct be_queue_info *cq)
 {
        int status;
 
@@ -1213,7 +1250,7 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
 
        req = embedded_payload(&wrb);
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
-                               OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
+                              OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
 
        if (lancer_chip(adapter)) {
                req->hdr.version = 1;
@@ -1250,8 +1287,8 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
 
 /* Uses MCC */
 int be_cmd_rxq_create(struct be_adapter *adapter,
-               struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
-               u32 if_id, u32 rss, u8 *rss_id)
+                     struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
+                     u32 if_id, u32 rss, u8 *rss_id)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_eth_rx_create *req;
@@ -1268,7 +1305,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
-                               OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
+                              OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
 
        req->cq_id = cpu_to_le16(cq_id);
        req->frag_size = fls(frag_size) - 1;
@@ -1295,7 +1332,7 @@ err:
  * Uses Mbox
  */
 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
-               int queue_type)
+                    int queue_type)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_q_destroy *req;
@@ -1334,7 +1371,7 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
        }
 
        be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
-                               NULL);
+                              NULL);
        req->id = cpu_to_le16(q->id);
 
        status = be_mbox_notify_wait(adapter);
@@ -1361,7 +1398,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
-                       OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
+                              OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
        req->id = cpu_to_le16(q->id);
 
        status = be_mcc_notify_wait(adapter);
@@ -1384,7 +1421,8 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
 
        req = embedded_payload(&wrb);
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), &wrb, NULL);
+                              OPCODE_COMMON_NTWK_INTERFACE_CREATE,
+                              sizeof(*req), &wrb, NULL);
        req->hdr.domain = domain;
        req->capability_flags = cpu_to_le32(cap_flags);
        req->enable_flags = cpu_to_le32(en_flags);
@@ -1422,7 +1460,8 @@ int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_NTWK_INTERFACE_DESTROY,
+                              sizeof(*req), wrb, NULL);
        req->hdr.domain = domain;
        req->interface_id = cpu_to_le32(interface_id);
 
@@ -1452,7 +1491,8 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
        hdr = nonemb_cmd->va;
 
        be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
-               OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
+                              OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb,
+                              nonemb_cmd);
 
        /* version 1 of the cmd is not supported only by BE2 */
        if (BE2_chip(adapter))
@@ -1472,7 +1512,7 @@ err:
 
 /* Lancer Stats */
 int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
-                               struct be_dma_mem *nonemb_cmd)
+                              struct be_dma_mem *nonemb_cmd)
 {
 
        struct be_mcc_wrb *wrb;
@@ -1493,8 +1533,8 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
        req = nonemb_cmd->va;
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
-                       OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
-                       nonemb_cmd);
+                              OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size,
+                              wrb, nonemb_cmd);
 
        req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
        req->cmd_params.params.reset_stats = 0;
@@ -1553,7 +1593,8 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
+                              sizeof(*req), wrb, NULL);
 
        /* version 1 of the cmd is not supported only by BE2 */
        if (!BE2_chip(adapter))
@@ -1598,8 +1639,8 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
-               wrb, NULL);
+                              OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
+                              sizeof(*req), wrb, NULL);
 
        be_mcc_notify(adapter);
 
@@ -1625,7 +1666,8 @@ int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb,
+                              NULL);
        req->fat_operation = cpu_to_le32(QUERY_FAT);
        status = be_mcc_notify_wait(adapter);
        if (!status) {
@@ -1655,8 +1697,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
 
        get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
        get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
-                       get_fat_cmd.size,
-                       &get_fat_cmd.dma);
+                                             get_fat_cmd.size,
+                                             &get_fat_cmd.dma);
        if (!get_fat_cmd.va) {
                status = -ENOMEM;
                dev_err(&adapter->pdev->dev,
@@ -1679,8 +1721,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
 
                payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
                be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                               OPCODE_COMMON_MANAGE_FAT, payload_len, wrb,
-                               &get_fat_cmd);
+                                      OPCODE_COMMON_MANAGE_FAT, payload_len,
+                                      wrb, &get_fat_cmd);
 
                req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
                req->read_log_offset = cpu_to_le32(log_offset);
@@ -1691,8 +1733,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
                if (!status) {
                        struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
                        memcpy(buf + offset,
-                               resp->data_buffer,
-                               le32_to_cpu(resp->read_log_length));
+                              resp->data_buffer,
+                              le32_to_cpu(resp->read_log_length));
                } else {
                        dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
                        goto err;
@@ -1702,14 +1744,13 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
        }
 err:
        pci_free_consistent(adapter->pdev, get_fat_cmd.size,
-                       get_fat_cmd.va,
-                       get_fat_cmd.dma);
+                           get_fat_cmd.va, get_fat_cmd.dma);
        spin_unlock_bh(&adapter->mcc_lock);
 }
 
 /* Uses synchronous mcc */
 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
-                       char *fw_on_flash)
+                     char *fw_on_flash)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_get_fw_version *req;
@@ -1726,7 +1767,8 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb,
+                              NULL);
        status = be_mcc_notify_wait(adapter);
        if (!status) {
                struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
@@ -1759,7 +1801,8 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb,
+                              NULL);
 
        req->num_eq = cpu_to_le32(num);
        for (i = 0; i < num; i++) {
@@ -1777,7 +1820,7 @@ err:
 
 /* Uses sycnhronous mcc */
 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
-                      u32 num, bool promiscuous)
+                      u32 num)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_vlan_config *req;
@@ -1793,19 +1836,16 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
+                              wrb, NULL);
 
        req->interface_id = if_id;
-       req->promiscuous = promiscuous;
        req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
        req->num_vlan = num;
-       if (!promiscuous) {
-               memcpy(req->normal_vlan, vtag_array,
-                       req->num_vlan * sizeof(vtag_array[0]));
-       }
+       memcpy(req->normal_vlan, vtag_array,
+              req->num_vlan * sizeof(vtag_array[0]));
 
        status = be_mcc_notify_wait(adapter);
-
 err:
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
@@ -1827,18 +1867,19 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
        }
        memset(req, 0, sizeof(*req));
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                               OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
-                               wrb, mem);
+                              OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
+                              wrb, mem);
 
        req->if_id = cpu_to_le32(adapter->if_handle);
        if (flags & IFF_PROMISC) {
                req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
-                                       BE_IF_FLAGS_VLAN_PROMISCUOUS |
-                                       BE_IF_FLAGS_MCAST_PROMISCUOUS);
+                                                BE_IF_FLAGS_VLAN_PROMISCUOUS |
+                                                BE_IF_FLAGS_MCAST_PROMISCUOUS);
                if (value == ON)
-                       req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
-                                               BE_IF_FLAGS_VLAN_PROMISCUOUS |
-                                               BE_IF_FLAGS_MCAST_PROMISCUOUS);
+                       req->if_flags =
+                               cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
+                                           BE_IF_FLAGS_VLAN_PROMISCUOUS |
+                                           BE_IF_FLAGS_MCAST_PROMISCUOUS);
        } else if (flags & IFF_ALLMULTI) {
                req->if_flags_mask = req->if_flags =
                                cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
@@ -1867,7 +1908,7 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
        }
 
        if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) !=
-            req->if_flags_mask) {
+           req->if_flags_mask) {
                dev_warn(&adapter->pdev->dev,
                         "Cannot set rx filter flags 0x%x\n",
                         req->if_flags_mask);
@@ -1905,7 +1946,8 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
+                              wrb, NULL);
 
        req->tx_flow_control = cpu_to_le16((u16)tx_fc);
        req->rx_flow_control = cpu_to_le16((u16)rx_fc);
@@ -1938,7 +1980,8 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req),
+                              wrb, NULL);
 
        status = be_mcc_notify_wait(adapter);
        if (!status) {
@@ -1968,7 +2011,8 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
+                              sizeof(*req), wrb, NULL);
 
        status = be_mbox_notify_wait(adapter);
        if (!status) {
@@ -2011,7 +2055,8 @@ int be_cmd_reset_function(struct be_adapter *adapter)
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb,
+                              NULL);
 
        status = be_mbox_notify_wait(adapter);
 
@@ -2020,7 +2065,7 @@ int be_cmd_reset_function(struct be_adapter *adapter)
 }
 
 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
-                       u32 rss_hash_opts, u16 table_size, u8 *rss_hkey)
+                     u32 rss_hash_opts, u16 table_size, u8 *rss_hkey)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_rss_config *req;
@@ -2029,35 +2074,38 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
        if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
                return 0;
 
-       if (mutex_lock_interruptible(&adapter->mbox_lock))
-               return -1;
+       spin_lock_bh(&adapter->mcc_lock);
 
-       wrb = wrb_from_mbox(adapter);
+       wrb = wrb_from_mccq(adapter);
+       if (!wrb) {
+               status = -EBUSY;
+               goto err;
+       }
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
-               OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
+                              OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
 
        req->if_id = cpu_to_le32(adapter->if_handle);
        req->enable_rss = cpu_to_le16(rss_hash_opts);
        req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
 
-       if (lancer_chip(adapter) || skyhawk_chip(adapter))
+       if (!BEx_chip(adapter))
                req->hdr.version = 1;
 
        memcpy(req->cpu_table, rsstable, table_size);
        memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);
        be_dws_cpu_to_le(req->hash, sizeof(req->hash));
 
-       status = be_mbox_notify_wait(adapter);
-
-       mutex_unlock(&adapter->mbox_lock);
+       status = be_mcc_notify_wait(adapter);
+err:
+       spin_unlock_bh(&adapter->mcc_lock);
        return status;
 }
 
 /* Uses sync mcc */
 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
-                       u8 bcn, u8 sts, u8 state)
+                           u8 bcn, u8 sts, u8 state)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_enable_disable_beacon *req;
@@ -2073,7 +2121,8 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_ENABLE_DISABLE_BEACON,
+                              sizeof(*req), wrb, NULL);
 
        req->port_num = port_num;
        req->beacon_state = state;
@@ -2104,7 +2153,8 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req),
+                              wrb, NULL);
 
        req->port_num = port_num;
 
@@ -2143,20 +2193,20 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                               OPCODE_COMMON_WRITE_OBJECT,
-                               sizeof(struct lancer_cmd_req_write_object), wrb,
-                               NULL);
+                              OPCODE_COMMON_WRITE_OBJECT,
+                              sizeof(struct lancer_cmd_req_write_object), wrb,
+                              NULL);
 
        ctxt = &req->context;
        AMAP_SET_BITS(struct amap_lancer_write_obj_context,
-                       write_length, ctxt, data_size);
+                     write_length, ctxt, data_size);
 
        if (data_size == 0)
                AMAP_SET_BITS(struct amap_lancer_write_obj_context,
-                               eof, ctxt, 1);
+                             eof, ctxt, 1);
        else
                AMAP_SET_BITS(struct amap_lancer_write_obj_context,
-                               eof, ctxt, 0);
+                             eof, ctxt, 0);
 
        be_dws_cpu_to_le(ctxt, sizeof(req->context));
        req->write_offset = cpu_to_le32(data_offset);
@@ -2164,8 +2214,8 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
        req->descriptor_count = cpu_to_le32(1);
        req->buf_len = cpu_to_le32(data_size);
        req->addr_low = cpu_to_le32((cmd->dma +
-                               sizeof(struct lancer_cmd_req_write_object))
-                               & 0xFFFFFFFF);
+                                    sizeof(struct lancer_cmd_req_write_object))
+                                   & 0xFFFFFFFF);
        req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
                                sizeof(struct lancer_cmd_req_write_object)));
 
@@ -2194,8 +2244,8 @@ err_unlock:
 }
 
 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
-               u32 data_size, u32 data_offset, const char *obj_name,
-               u32 *data_read, u32 *eof, u8 *addn_status)
+                          u32 data_size, u32 data_offset, const char *obj_name,
+                          u32 *data_read, u32 *eof, u8 *addn_status)
 {
        struct be_mcc_wrb *wrb;
        struct lancer_cmd_req_read_object *req;
@@ -2213,9 +2263,9 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                       OPCODE_COMMON_READ_OBJECT,
-                       sizeof(struct lancer_cmd_req_read_object), wrb,
-                       NULL);
+                              OPCODE_COMMON_READ_OBJECT,
+                              sizeof(struct lancer_cmd_req_read_object), wrb,
+                              NULL);
 
        req->desired_read_len = cpu_to_le32(data_size);
        req->read_offset = cpu_to_le32(data_offset);
@@ -2241,7 +2291,7 @@ err_unlock:
 }
 
 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
-                       u32 flash_type, u32 flash_opcode, u32 buf_size)
+                         u32 flash_type, u32 flash_opcode, u32 buf_size)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_write_flashrom *req;
@@ -2258,7 +2308,8 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
        req = cmd->va;
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd);
+                              OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb,
+                              cmd);
 
        req->params.op_type = cpu_to_le32(flash_type);
        req->params.op_code = cpu_to_le32(flash_opcode);
@@ -2281,7 +2332,7 @@ err_unlock:
 }
 
 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
-                        int offset)
+                         u16 optype, int offset)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_read_flash_crc *req;
@@ -2300,7 +2351,7 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
                               OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
                               wrb, NULL);
 
-       req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT);
+       req->params.op_type = cpu_to_le32(optype);
        req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
        req->params.offset = cpu_to_le32(offset);
        req->params.data_buf_size = cpu_to_le32(0x4);
@@ -2315,7 +2366,7 @@ err:
 }
 
 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
-                               struct be_dma_mem *nonemb_cmd)
+                           struct be_dma_mem *nonemb_cmd)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_acpi_wol_magic_config *req;
@@ -2331,8 +2382,8 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
        req = nonemb_cmd->va;
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
-               OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb,
-               nonemb_cmd);
+                              OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req),
+                              wrb, nonemb_cmd);
        memcpy(req->magic_mac, mac, ETH_ALEN);
 
        status = be_mcc_notify_wait(adapter);
@@ -2360,8 +2411,8 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
-                       OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb,
-                       NULL);
+                              OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req),
+                              wrb, NULL);
 
        req->src_port = port_num;
        req->dest_port = port_num;
@@ -2375,7 +2426,8 @@ err:
 }
 
 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
-               u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
+                        u32 loopback_type, u32 pkt_size, u32 num_pkts,
+                        u64 pattern)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_loopback_test *req;
@@ -2393,7 +2445,8 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
-                       OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
+                              OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb,
+                              NULL);
 
        req->hdr.timeout = cpu_to_le32(15);
        req->pattern = cpu_to_le64(pattern);
@@ -2418,7 +2471,7 @@ err:
 }
 
 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
-                               u32 byte_cnt, struct be_dma_mem *cmd)
+                       u32 byte_cnt, struct be_dma_mem *cmd)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_ddrdma_test *req;
@@ -2434,7 +2487,8 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
        }
        req = cmd->va;
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
-                       OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd);
+                              OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb,
+                              cmd);
 
        req->pattern = cpu_to_le64(pattern);
        req->byte_count = cpu_to_le32(byte_cnt);
@@ -2462,7 +2516,7 @@ err:
 }
 
 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
-                               struct be_dma_mem *nonemb_cmd)
+                           struct be_dma_mem *nonemb_cmd)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_seeprom_read *req;
@@ -2478,8 +2532,8 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
        req = nonemb_cmd->va;
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                       OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
-                       nonemb_cmd);
+                              OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
+                              nonemb_cmd);
 
        status = be_mcc_notify_wait(adapter);
 
@@ -2507,8 +2561,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
                goto err;
        }
        cmd.size = sizeof(struct be_cmd_req_get_phy_info);
-       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
-                                       &cmd.dma);
+       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
        if (!cmd.va) {
                dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
                status = -ENOMEM;
@@ -2518,8 +2571,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
        req = cmd.va;
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                       OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
-                       wrb, &cmd);
+                              OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
+                              wrb, &cmd);
 
        status = be_mcc_notify_wait(adapter);
        if (!status) {
@@ -2541,8 +2594,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
                                BE_SUPPORTED_SPEED_1GBPS;
                }
        }
-       pci_free_consistent(adapter->pdev, cmd.size,
-                               cmd.va, cmd.dma);
+       pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
 err:
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
@@ -2565,7 +2617,7 @@ int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                       OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
 
        req->hdr.domain = domain;
        req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
@@ -2594,10 +2646,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
        memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
        attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
        attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
-                                               &attribs_cmd.dma);
+                                             &attribs_cmd.dma);
        if (!attribs_cmd.va) {
-               dev_err(&adapter->pdev->dev,
-                               "Memory allocation failure\n");
+               dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
                status = -ENOMEM;
                goto err;
        }
@@ -2610,8 +2661,8 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
        req = attribs_cmd.va;
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                        OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb,
-                       &attribs_cmd);
+                              OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len,
+                              wrb, &attribs_cmd);
 
        status = be_mbox_notify_wait(adapter);
        if (!status) {
@@ -2646,7 +2697,8 @@ int be_cmd_req_native_mode(struct be_adapter *adapter)
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP,
+                              sizeof(*req), wrb, NULL);
 
        req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
                                CAPABILITY_BE3_NATIVE_ERX_API);
@@ -2759,12 +2811,12 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
        memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
        get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
        get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
-                       get_mac_list_cmd.size,
-                       &get_mac_list_cmd.dma);
+                                                  get_mac_list_cmd.size,
+                                                  &get_mac_list_cmd.dma);
 
        if (!get_mac_list_cmd.va) {
                dev_err(&adapter->pdev->dev,
-                               "Memory allocation failure during GET_MAC_LIST\n");
+                       "Memory allocation failure during GET_MAC_LIST\n");
                return -ENOMEM;
        }
 
@@ -2828,18 +2880,18 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
                /* If no active mac_id found, return first mac addr */
                *pmac_id_valid = false;
                memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
-                                                               ETH_ALEN);
+                      ETH_ALEN);
        }
 
 out:
        spin_unlock_bh(&adapter->mcc_lock);
        pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
-                       get_mac_list_cmd.va, get_mac_list_cmd.dma);
+                           get_mac_list_cmd.va, get_mac_list_cmd.dma);
        return status;
 }
 
-int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac,
-                         u32 if_handle, bool active, u32 domain)
+int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
+                         u8 *mac, u32 if_handle, bool active, u32 domain)
 {
 
        if (!active)
@@ -2889,7 +2941,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
        memset(&cmd, 0, sizeof(struct be_dma_mem));
        cmd.size = sizeof(struct be_cmd_req_set_mac_list);
        cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
-                       &cmd.dma, GFP_KERNEL);
+                                   &cmd.dma, GFP_KERNEL);
        if (!cmd.va)
                return -ENOMEM;
 
@@ -2903,8 +2955,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
 
        req = cmd.va;
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                               OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
-                               wrb, &cmd);
+                              OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
+                              wrb, &cmd);
 
        req->hdr.domain = domain;
        req->mac_count = mac_count;
@@ -2914,8 +2966,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
        status = be_mcc_notify_wait(adapter);
 
 err:
-       dma_free_coherent(&adapter->pdev->dev, cmd.size,
-                               cmd.va, cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
 }
@@ -2960,7 +3011,8 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
        ctxt = &req->context;
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                       OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb,
+                              NULL);
 
        req->hdr.domain = domain;
        AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
@@ -3006,7 +3058,8 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
        ctxt = &req->context;
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                       OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb,
+                              NULL);
 
        req->hdr.domain = domain;
        AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
@@ -3024,10 +3077,9 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
        if (!status) {
                struct be_cmd_resp_get_hsw_config *resp =
                                                embedded_payload(wrb);
-               be_dws_le_to_cpu(&resp->context,
-                                               sizeof(resp->context));
+               be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
                vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
-                                                       pvid, &resp->context);
+                                   pvid, &resp->context);
                if (pvid)
                        *pvid = le16_to_cpu(vid);
                if (mode)
@@ -3059,11 +3111,9 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
 
        memset(&cmd, 0, sizeof(struct be_dma_mem));
        cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
-       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
-                                              &cmd.dma);
+       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
        if (!cmd.va) {
-               dev_err(&adapter->pdev->dev,
-                               "Memory allocation failure\n");
+               dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
                status = -ENOMEM;
                goto err;
        }
@@ -3346,8 +3396,7 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
 
        memset(&cmd, 0, sizeof(struct be_dma_mem));
        cmd.size = sizeof(struct be_cmd_resp_get_func_config);
-       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
-                                     &cmd.dma);
+       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
        if (!cmd.va) {
                dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
                status = -ENOMEM;
@@ -3393,7 +3442,7 @@ err:
 
 /* Uses mbox */
 static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
-                                       u8 domain, struct be_dma_mem *cmd)
+                                         u8 domain, struct be_dma_mem *cmd)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_get_profile_config *req;
@@ -3421,7 +3470,7 @@ static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
 
 /* Uses sync mcc */
 static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
-                                       u8 domain, struct be_dma_mem *cmd)
+                                         u8 domain, struct be_dma_mem *cmd)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_get_profile_config *req;
@@ -3481,8 +3530,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
        resp = cmd.va;
        desc_count = le32_to_cpu(resp->desc_count);
 
-       pcie =  be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
-                                desc_count);
+       pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
+                               desc_count);
        if (pcie)
                res->max_vfs = le16_to_cpu(pcie->num_vfs);
 
@@ -3545,33 +3594,47 @@ void be_reset_nic_desc(struct be_nic_res_desc *nic)
        nic->cq_count = 0xFFFF;
        nic->toe_conn_count = 0xFFFF;
        nic->eq_count = 0xFFFF;
+       nic->iface_count = 0xFFFF;
        nic->link_param = 0xFF;
+       nic->channel_id_param = cpu_to_le16(0xF000);
        nic->acpi_params = 0xFF;
        nic->wol_param = 0x0F;
-       nic->bw_min = 0xFFFFFFFF;
+       nic->tunnel_iface_count = 0xFFFF;
+       nic->direct_tenant_iface_count = 0xFFFF;
        nic->bw_max = 0xFFFFFFFF;
 }
 
-int be_cmd_config_qos(struct be_adapter *adapter, u32 bps, u8 domain)
+int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
+                     u8 domain)
 {
-       if (lancer_chip(adapter)) {
-               struct be_nic_res_desc nic_desc;
+       struct be_nic_res_desc nic_desc;
+       u32 bw_percent;
+       u16 version = 0;
+
+       if (BE3_chip(adapter))
+               return be_cmd_set_qos(adapter, max_rate / 10, domain);
 
-               be_reset_nic_desc(&nic_desc);
+       be_reset_nic_desc(&nic_desc);
+       nic_desc.pf_num = adapter->pf_number;
+       nic_desc.vf_num = domain;
+       if (lancer_chip(adapter)) {
                nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
                nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
                nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) |
                                        (1 << NOSV_SHIFT);
-               nic_desc.pf_num = adapter->pf_number;
-               nic_desc.vf_num = domain;
-               nic_desc.bw_max = cpu_to_le32(bps);
-
-               return be_cmd_set_profile_config(adapter, &nic_desc,
-                                                RESOURCE_DESC_SIZE_V0,
-                                                0, domain);
+               nic_desc.bw_max = cpu_to_le32(max_rate / 10);
        } else {
-               return be_cmd_set_qos(adapter, bps, domain);
+               version = 1;
+               nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
+               nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
+               nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
+               bw_percent = max_rate ? (max_rate * 100) / link_speed : 100;
+               nic_desc.bw_max = cpu_to_le32(bw_percent);
        }
+
+       return be_cmd_set_profile_config(adapter, &nic_desc,
+                                        nic_desc.hdr.desc_len,
+                                        version, domain);
 }
 
 int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
@@ -3856,7 +3919,7 @@ err:
 }
 
 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
-                       int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
+                   int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
 {
        struct be_adapter *adapter = netdev_priv(netdev_handle);
        struct be_mcc_wrb *wrb;
index 4ea79b9c67e934e9db0b414486aada60c8f959af..d4616ffb7238a6ac8bd764100132cfda9235df86 100644 (file)
@@ -50,7 +50,7 @@ struct be_mcc_wrb {
 #define CQE_FLAGS_CONSUMED_MASK        (1 << 27)
 
 /* Completion Status */
-enum {
+enum mcc_base_status {
        MCC_STATUS_SUCCESS = 0,
        MCC_STATUS_FAILED = 1,
        MCC_STATUS_ILLEGAL_REQUEST = 2,
@@ -60,12 +60,25 @@ enum {
        MCC_STATUS_NOT_SUPPORTED = 66
 };
 
-#define MCC_ADDL_STS_INSUFFICIENT_RESOURCES    0x16
+/* Additional status */
+enum mcc_addl_status {
+       MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES = 0x16,
+       MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH = 0x4d,
+       MCC_ADDL_STATUS_TOO_MANY_INTERFACES = 0x4a
+};
+
+#define CQE_BASE_STATUS_MASK           0xFFFF
+#define CQE_BASE_STATUS_SHIFT          0       /* bits 0 - 15 */
+#define CQE_ADDL_STATUS_MASK           0xFF
+#define CQE_ADDL_STATUS_SHIFT          16      /* bits 16 - 31 */
 
-#define CQE_STATUS_COMPL_MASK          0xFFFF
-#define CQE_STATUS_COMPL_SHIFT         0       /* bits 0 - 15 */
-#define CQE_STATUS_EXTD_MASK           0xFFFF
-#define CQE_STATUS_EXTD_SHIFT          16      /* bits 16 - 31 */
+#define base_status(status)            \
+               ((enum mcc_base_status) \
+                       (status > 0 ? (status & CQE_BASE_STATUS_MASK) : 0))
+#define addl_status(status)            \
+               ((enum mcc_addl_status) \
+                       (status > 0 ? (status >> CQE_ADDL_STATUS_SHIFT) & \
+                                       CQE_ADDL_STATUS_MASK : 0))
 
 struct be_mcc_compl {
        u32 status;             /* dword 0 */
@@ -74,13 +87,13 @@ struct be_mcc_compl {
        u32 flags;              /* dword 3 */
 };
 
-/* When the async bit of mcc_compl is set, the last 4 bytes of
- * mcc_compl is interpreted as follows:
+/* When the async bit of mcc_compl flags is set, flags
+ * is interpreted as follows:
  */
-#define ASYNC_TRAILER_EVENT_CODE_SHIFT 8       /* bits 8 - 15 */
-#define ASYNC_TRAILER_EVENT_CODE_MASK  0xFF
-#define ASYNC_TRAILER_EVENT_TYPE_SHIFT 16
-#define ASYNC_TRAILER_EVENT_TYPE_MASK  0xFF
+#define ASYNC_EVENT_CODE_SHIFT         8       /* bits 8 - 15 */
+#define ASYNC_EVENT_CODE_MASK          0xFF
+#define ASYNC_EVENT_TYPE_SHIFT         16
+#define ASYNC_EVENT_TYPE_MASK          0xFF
 #define ASYNC_EVENT_CODE_LINK_STATE    0x1
 #define ASYNC_EVENT_CODE_GRP_5         0x5
 #define ASYNC_EVENT_QOS_SPEED          0x1
@@ -89,10 +102,6 @@ struct be_mcc_compl {
 #define ASYNC_EVENT_CODE_QNQ           0x6
 #define ASYNC_DEBUG_EVENT_TYPE_QNQ     1
 
-struct be_async_event_trailer {
-       u32 code;
-};
-
 enum {
        LINK_DOWN       = 0x0,
        LINK_UP         = 0x1
@@ -100,7 +109,7 @@ enum {
 #define LINK_STATUS_MASK                       0x1
 #define LOGICAL_LINK_STATUS_MASK               0x2
 
-/* When the event code of an async trailer is link-state, the mcc_compl
+/* When the event code of compl->flags is link-state, the mcc_compl
  * must be interpreted as follows
  */
 struct be_async_event_link_state {
@@ -110,10 +119,10 @@ struct be_async_event_link_state {
        u8 port_speed;
        u8 port_fault;
        u8 rsvd0[7];
-       struct be_async_event_trailer trailer;
+       u32 flags;
 } __packed;
 
-/* When the event code of an async trailer is GRP-5 and event_type is QOS_SPEED
+/* When the event code of compl->flags is GRP-5 and event_type is QOS_SPEED
  * the mcc_compl must be interpreted as follows
  */
 struct be_async_event_grp5_qos_link_speed {
@@ -121,10 +130,10 @@ struct be_async_event_grp5_qos_link_speed {
        u8 rsvd[5];
        u16 qos_link_speed;
        u32 event_tag;
-       struct be_async_event_trailer trailer;
+       u32 flags;
 } __packed;
 
-/* When the event code of an async trailer is GRP5 and event type is
+/* When the event code of compl->flags is GRP5 and event type is
  * CoS-Priority, the mcc_compl must be interpreted as follows
  */
 struct be_async_event_grp5_cos_priority {
@@ -134,10 +143,10 @@ struct be_async_event_grp5_cos_priority {
        u8 valid;
        u8 rsvd0;
        u8 event_tag;
-       struct be_async_event_trailer trailer;
+       u32 flags;
 } __packed;
 
-/* When the event code of an async trailer is GRP5 and event type is
+/* When the event code of compl->flags is GRP5 and event type is
  * PVID state, the mcc_compl must be interpreted as follows
  */
 struct be_async_event_grp5_pvid_state {
@@ -146,7 +155,7 @@ struct be_async_event_grp5_pvid_state {
        u16 tag;
        u32 event_tag;
        u32 rsvd1;
-       struct be_async_event_trailer trailer;
+       u32 flags;
 } __packed;
 
 /* async event indicating outer VLAN tag in QnQ */
@@ -156,7 +165,7 @@ struct be_async_event_qnq {
        u16 vlan_tag;
        u32 event_tag;
        u8 rsvd1[4];
-       struct be_async_event_trailer trailer;
+       u32 flags;
 } __packed;
 
 struct be_mcc_mailbox {
@@ -258,8 +267,8 @@ struct be_cmd_resp_hdr {
        u8 opcode;              /* dword 0 */
        u8 subsystem;           /* dword 0 */
        u8 rsvd[2];             /* dword 0 */
-       u8 status;              /* dword 1 */
-       u8 add_status;          /* dword 1 */
+       u8 base_status;         /* dword 1 */
+       u8 addl_status;         /* dword 1 */
        u8 rsvd1[2];            /* dword 1 */
        u32 response_length;    /* dword 2 */
        u32 actual_resp_len;    /* dword 3 */
@@ -1186,7 +1195,8 @@ struct be_cmd_read_flash_crc {
        struct flashrom_params params;
        u8 crc[4];
        u8 rsvd[4];
-};
+} __packed;
+
 /**************** Lancer Firmware Flash ************/
 struct amap_lancer_write_obj_context {
        u8 write_length[24];
@@ -1891,16 +1901,20 @@ struct be_nic_res_desc {
        u16 cq_count;
        u16 toe_conn_count;
        u16 eq_count;
-       u32 rsvd5;
+       u16 vlan_id;
+       u16 iface_count;
        u32 cap_flags;
        u8 link_param;
-       u8 rsvd6[3];
+       u8 rsvd6;
+       u16 channel_id_param;
        u32 bw_min;
        u32 bw_max;
        u8 acpi_params;
        u8 wol_param;
        u16 rsvd7;
-       u32 rsvd8[7];
+       u16 tunnel_iface_count;
+       u16 direct_tenant_iface_count;
+       u32 rsvd8[6];
 } __packed;
 
 /************ Multi-Channel type ***********/
@@ -2060,7 +2074,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
                      char *fw_on_flash);
 int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num);
 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
-                      u32 num, bool promiscuous);
+                      u32 num);
 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc);
 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc);
@@ -2084,7 +2098,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
                           u32 data_size, u32 data_offset, const char *obj_name,
                           u32 *data_read, u32 *eof, u8 *addn_status);
 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
-                        int offset);
+                         u16 optype, int offset);
 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
                            struct be_dma_mem *nonemb_cmd);
 int be_cmd_fw_init(struct be_adapter *adapter);
@@ -2101,7 +2115,8 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
                        u8 loopback_type, u8 enable);
 int be_cmd_get_phy_info(struct be_adapter *adapter);
-int be_cmd_config_qos(struct be_adapter *adapter, u32 bps, u8 domain);
+int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate,
+                     u16 link_speed, u8 domain);
 void be_detect_error(struct be_adapter *adapter);
 int be_cmd_get_die_temperature(struct be_adapter *adapter);
 int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
index 6f3494e41511f67918c68e550a70d355ad99bc2f..970ae337daace7966c96baf80a92072c996de91e 100644 (file)
@@ -132,6 +132,7 @@ static const struct be_ethtool_stat et_rx_stats[] = {
        {DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
        {DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
        {DRVSTAT_RX_INFO(rx_compl)},
+       {DRVSTAT_RX_INFO(rx_compl_err)},
        {DRVSTAT_RX_INFO(rx_mcast_pkts)},
        /* Number of page allocation failures while posting receive buffers
         * to HW.
@@ -181,7 +182,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
 #define BE_NO_LOOPBACK 0xff
 
 static void be_get_drvinfo(struct net_device *netdev,
-                               struct ethtool_drvinfo *drvinfo)
+                          struct ethtool_drvinfo *drvinfo)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
 
@@ -201,8 +202,7 @@ static void be_get_drvinfo(struct net_device *netdev,
        drvinfo->eedump_len = 0;
 }
 
-static u32
-lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
+static u32 lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
 {
        u32 data_read = 0, eof;
        u8 addn_status;
@@ -212,14 +212,14 @@ lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
        memset(&data_len_cmd, 0, sizeof(data_len_cmd));
        /* data_offset and data_size should be 0 to get reg len */
        status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0,
-                               file_name, &data_read, &eof, &addn_status);
+                                       file_name, &data_read, &eof,
+                                       &addn_status);
 
        return data_read;
 }
 
-static int
-lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
-               u32 buf_len, void *buf)
+static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
+                               u32 buf_len, void *buf)
 {
        struct be_dma_mem read_cmd;
        u32 read_len = 0, total_read_len = 0, chunk_size;
@@ -229,11 +229,11 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
 
        read_cmd.size = LANCER_READ_FILE_CHUNK;
        read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
-                       &read_cmd.dma);
+                                          &read_cmd.dma);
 
        if (!read_cmd.va) {
                dev_err(&adapter->pdev->dev,
-                               "Memory allocation failure while reading dump\n");
+                       "Memory allocation failure while reading dump\n");
                return -ENOMEM;
        }
 
@@ -242,8 +242,8 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
                                LANCER_READ_FILE_CHUNK);
                chunk_size = ALIGN(chunk_size, 4);
                status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size,
-                               total_read_len, file_name, &read_len,
-                               &eof, &addn_status);
+                                               total_read_len, file_name,
+                                               &read_len, &eof, &addn_status);
                if (!status) {
                        memcpy(buf + total_read_len, read_cmd.va, read_len);
                        total_read_len += read_len;
@@ -254,13 +254,12 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
                }
        }
        pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
-                       read_cmd.dma);
+                           read_cmd.dma);
 
        return status;
 }
 
-static int
-be_get_reg_len(struct net_device *netdev)
+static int be_get_reg_len(struct net_device *netdev)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
        u32 log_size = 0;
@@ -271,7 +270,7 @@ be_get_reg_len(struct net_device *netdev)
        if (be_physfn(adapter)) {
                if (lancer_chip(adapter))
                        log_size = lancer_cmd_get_file_len(adapter,
-                                       LANCER_FW_DUMP_FILE);
+                                                          LANCER_FW_DUMP_FILE);
                else
                        be_cmd_get_reg_len(adapter, &log_size);
        }
@@ -287,7 +286,7 @@ be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
                memset(buf, 0, regs->len);
                if (lancer_chip(adapter))
                        lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE,
-                                       regs->len, buf);
+                                            regs->len, buf);
                else
                        be_cmd_get_regs(adapter, regs->len, buf);
        }
@@ -337,9 +336,8 @@ static int be_set_coalesce(struct net_device *netdev,
        return 0;
 }
 
-static void
-be_get_ethtool_stats(struct net_device *netdev,
-               struct ethtool_stats *stats, uint64_t *data)
+static void be_get_ethtool_stats(struct net_device *netdev,
+                                struct ethtool_stats *stats, uint64_t *data)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
        struct be_rx_obj *rxo;
@@ -390,9 +388,8 @@ be_get_ethtool_stats(struct net_device *netdev,
        }
 }
 
-static void
-be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
-               uint8_t *data)
+static void be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
+                               uint8_t *data)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
        int i, j;
@@ -642,16 +639,15 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
        adapter->rx_fc = ecmd->rx_pause;
 
        status = be_cmd_set_flow_control(adapter,
-                                       adapter->tx_fc, adapter->rx_fc);
+                                        adapter->tx_fc, adapter->rx_fc);
        if (status)
                dev_warn(&adapter->pdev->dev, "Pause param set failed.\n");
 
        return status;
 }
 
-static int
-be_set_phys_id(struct net_device *netdev,
-              enum ethtool_phys_id_state state)
+static int be_set_phys_id(struct net_device *netdev,
+                         enum ethtool_phys_id_state state)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
 
@@ -708,8 +704,7 @@ static int be_set_dump(struct net_device *netdev, struct ethtool_dump *dump)
        return status;
 }
 
-static void
-be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+static void be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
 
@@ -723,8 +718,7 @@ be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
        memset(&wol->sopass, 0, sizeof(wol->sopass));
 }
 
-static int
-be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
 
@@ -744,8 +738,7 @@ be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
        return 0;
 }
 
-static int
-be_test_ddr_dma(struct be_adapter *adapter)
+static int be_test_ddr_dma(struct be_adapter *adapter)
 {
        int ret, i;
        struct be_dma_mem ddrdma_cmd;
@@ -761,7 +754,7 @@ be_test_ddr_dma(struct be_adapter *adapter)
 
        for (i = 0; i < 2; i++) {
                ret = be_cmd_ddr_dma_test(adapter, pattern[i],
-                                       4096, &ddrdma_cmd);
+                                         4096, &ddrdma_cmd);
                if (ret != 0)
                        goto err;
        }
@@ -773,20 +766,17 @@ err:
 }
 
 static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
-                               u64 *status)
+                           u64 *status)
 {
-       be_cmd_set_loopback(adapter, adapter->hba_port_num,
-                               loopback_type, 1);
+       be_cmd_set_loopback(adapter, adapter->hba_port_num, loopback_type, 1);
        *status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
-                               loopback_type, 1500,
-                               2, 0xabc);
-       be_cmd_set_loopback(adapter, adapter->hba_port_num,
-                               BE_NO_LOOPBACK, 1);
+                                      loopback_type, 1500, 2, 0xabc);
+       be_cmd_set_loopback(adapter, adapter->hba_port_num, BE_NO_LOOPBACK, 1);
        return *status;
 }
 
-static void
-be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
+static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
+                        u64 *data)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
        int status;
@@ -801,12 +791,10 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
        memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
 
        if (test->flags & ETH_TEST_FL_OFFLINE) {
-               if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
-                                    &data[0]) != 0)
+               if (be_loopback_test(adapter, BE_MAC_LOOPBACK, &data[0]) != 0)
                        test->flags |= ETH_TEST_FL_FAILED;
 
-               if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
-                                    &data[1]) != 0)
+               if (be_loopback_test(adapter, BE_PHY_LOOPBACK, &data[1]) != 0)
                        test->flags |= ETH_TEST_FL_FAILED;
 
                if (test->flags & ETH_TEST_FL_EXTERNAL_LB) {
@@ -832,16 +820,14 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
        }
 }
 
-static int
-be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
+static int be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
 
        return be_load_fw(adapter, efl->data);
 }
 
-static int
-be_get_eeprom_len(struct net_device *netdev)
+static int be_get_eeprom_len(struct net_device *netdev)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
 
@@ -851,18 +837,17 @@ be_get_eeprom_len(struct net_device *netdev)
        if (lancer_chip(adapter)) {
                if (be_physfn(adapter))
                        return lancer_cmd_get_file_len(adapter,
-                                       LANCER_VPD_PF_FILE);
+                                                      LANCER_VPD_PF_FILE);
                else
                        return lancer_cmd_get_file_len(adapter,
-                                       LANCER_VPD_VF_FILE);
+                                                      LANCER_VPD_VF_FILE);
        } else {
                return BE_READ_SEEPROM_LEN;
        }
 }
 
-static int
-be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
-                       uint8_t *data)
+static int be_read_eeprom(struct net_device *netdev,
+                         struct ethtool_eeprom *eeprom, uint8_t *data)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
        struct be_dma_mem eeprom_cmd;
@@ -875,10 +860,10 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
        if (lancer_chip(adapter)) {
                if (be_physfn(adapter))
                        return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE,
-                                       eeprom->len, data);
+                                                   eeprom->len, data);
                else
                        return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE,
-                                       eeprom->len, data);
+                                                   eeprom->len, data);
        }
 
        eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
@@ -962,7 +947,7 @@ static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
 }
 
 static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
-                     u32 *rule_locs)
+                       u32 *rule_locs)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
 
index 3bd198550edbb95a64602e28535e8eb56758a220..8840c64aaeca7daca310d0a5a38dfdbed790fb12 100644 (file)
 #define OPTYPE_FCOE_FW_ACTIVE          10
 #define OPTYPE_FCOE_FW_BACKUP          11
 #define OPTYPE_NCSI_FW                 13
+#define OPTYPE_REDBOOT_DIR             18
+#define OPTYPE_REDBOOT_CONFIG          19
+#define OPTYPE_SH_PHY_FW               21
+#define OPTYPE_FLASHISM_JUMPVECTOR     22
+#define OPTYPE_UFI_DIR                 23
 #define OPTYPE_PHY_FW                  99
 #define TN_8022                                13
 
-#define ILLEGAL_IOCTL_REQ              2
 #define FLASHROM_OPER_PHY_FLASH                9
 #define FLASHROM_OPER_PHY_SAVE         10
 #define FLASHROM_OPER_FLASH            1
 #define IMAGE_FIRMWARE_BACKUP_FCoE     178
 #define IMAGE_FIRMWARE_BACKUP_COMP_FCoE 179
 #define IMAGE_FIRMWARE_PHY             192
+#define IMAGE_REDBOOT_DIR              208
+#define IMAGE_REDBOOT_CONFIG           209
+#define IMAGE_UFI_DIR                  210
 #define IMAGE_BOOT_CODE                        224
 
 /************* Rx Packet Type Encoding **************/
@@ -534,7 +541,8 @@ struct flash_section_entry {
        u32 image_size;
        u32 cksum;
        u32 entry_point;
-       u32 rsvd0;
+       u16 optype;
+       u16 rsvd0;
        u32 rsvd1;
        u8 ver_data[32];
 } __packed;
index a3c6a27d13fa59f29563e61602a47a1fd1863f0b..6822b3d76d85960f3c9b48b3cd4d8c4bad44d87b 100644 (file)
@@ -134,7 +134,7 @@ static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
 }
 
 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
-               u16 len, u16 entry_size)
+                         u16 len, u16 entry_size)
 {
        struct be_dma_mem *mem = &q->dma_mem;
 
@@ -154,7 +154,7 @@ static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
        u32 reg, enabled;
 
        pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
-                               &reg);
+                             &reg);
        enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
 
        if (!enabled && enable)
@@ -165,7 +165,7 @@ static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
                return;
 
        pci_write_config_dword(adapter->pdev,
-                       PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
+                              PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
 }
 
 static void be_intr_set(struct be_adapter *adapter, bool enable)
@@ -206,12 +206,11 @@ static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
 }
 
 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
-               bool arm, bool clear_int, u16 num_popped)
+                        bool arm, bool clear_int, u16 num_popped)
 {
        u32 val = 0;
        val |= qid & DB_EQ_RING_ID_MASK;
-       val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
-                       DB_EQ_RING_ID_EXT_MASK_SHIFT);
+       val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
 
        if (adapter->eeh_error)
                return;
@@ -477,7 +476,7 @@ static void populate_be_v2_stats(struct be_adapter *adapter)
        drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
        drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
        adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
-       if (be_roce_supported(adapter))  {
+       if (be_roce_supported(adapter)) {
                drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
                drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
                drvs->rx_roce_frames = port_stats->roce_frames_received;
@@ -491,8 +490,7 @@ static void populate_lancer_stats(struct be_adapter *adapter)
 {
 
        struct be_drv_stats *drvs = &adapter->drv_stats;
-       struct lancer_pport_stats *pport_stats =
-                                       pport_stats_from_cmd(adapter);
+       struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
 
        be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
        drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
@@ -539,8 +537,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
 }
 
 static void populate_erx_stats(struct be_adapter *adapter,
-                       struct be_rx_obj *rxo,
-                       u32 erx_stat)
+                              struct be_rx_obj *rxo, u32 erx_stat)
 {
        if (!BEx_chip(adapter))
                rx_stats(rxo)->rx_drops_no_frags = erx_stat;
@@ -579,7 +576,7 @@ void be_parse_stats(struct be_adapter *adapter)
 }
 
 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
-                                       struct rtnl_link_stats64 *stats)
+                                               struct rtnl_link_stats64 *stats)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
        struct be_drv_stats *drvs = &adapter->drv_stats;
@@ -660,7 +657,8 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
 }
 
 static void be_tx_stats_update(struct be_tx_obj *txo,
-                       u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
+                              u32 wrb_cnt, u32 copied, u32 gso_segs,
+                              bool stopped)
 {
        struct be_tx_stats *stats = tx_stats(txo);
 
@@ -676,7 +674,7 @@ static void be_tx_stats_update(struct be_tx_obj *txo,
 
 /* Determine number of WRB entries needed to xmit data in an skb */
 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
-                                                               bool *dummy)
+                          bool *dummy)
 {
        int cnt = (skb->len > skb->data_len);
 
@@ -704,7 +702,7 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
 }
 
 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
-                                       struct sk_buff *skb)
+                                    struct sk_buff *skb)
 {
        u8 vlan_prio;
        u16 vlan_tag;
@@ -733,7 +731,8 @@ static u16 skb_ip_proto(struct sk_buff *skb)
 }
 
 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
-               struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
+                        struct sk_buff *skb, u32 wrb_cnt, u32 len,
+                        bool skip_hw_vlan)
 {
        u16 vlan_tag, proto;
 
@@ -774,7 +773,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
 }
 
 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
-               bool unmap_single)
+                         bool unmap_single)
 {
        dma_addr_t dma;
 
@@ -791,8 +790,8 @@ static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
 }
 
 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
-               struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
-               bool skip_hw_vlan)
+                       struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
+                       bool skip_hw_vlan)
 {
        dma_addr_t busaddr;
        int i, copied = 0;
@@ -821,8 +820,7 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
        }
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               const struct skb_frag_struct *frag =
-                       &skb_shinfo(skb)->frags[i];
+               const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
                busaddr = skb_frag_dma_map(dev, frag, 0,
                                           skb_frag_size(frag), DMA_TO_DEVICE);
                if (dma_mapping_error(dev, busaddr))
@@ -927,8 +925,7 @@ static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
        return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
 }
 
-static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
-                               struct sk_buff *skb)
+static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
 {
        return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
 }
@@ -959,7 +956,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
         */
        if (be_pvid_tagging_enabled(adapter) &&
            veh->h_vlan_proto == htons(ETH_P_8021Q))
-                       *skip_hw_vlan = true;
+               *skip_hw_vlan = true;
 
        /* HW has a bug wherein it will calculate CSUM for VLAN
         * pkts even though it is disabled.
@@ -1077,16 +1074,15 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
        if (new_mtu < BE_MIN_MTU ||
-                       new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
-                                       (ETH_HLEN + ETH_FCS_LEN))) {
+           new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {
                dev_info(&adapter->pdev->dev,
-                       "MTU must be between %d and %d bytes\n",
-                       BE_MIN_MTU,
-                       (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
+                        "MTU must be between %d and %d bytes\n",
+                        BE_MIN_MTU,
+                        (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
                return -EINVAL;
        }
        dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
-                       netdev->mtu, new_mtu);
+                netdev->mtu, new_mtu);
        netdev->mtu = new_mtu;
        return 0;
 }
@@ -1098,7 +1094,7 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
 static int be_vid_config(struct be_adapter *adapter)
 {
        u16 vids[BE_NUM_VLANS_SUPPORTED];
-       u16 num = 0, i;
+       u16 num = 0, i = 0;
        int status = 0;
 
        /* No need to further configure vids if in promiscuous mode */
@@ -1109,16 +1105,14 @@ static int be_vid_config(struct be_adapter *adapter)
                goto set_vlan_promisc;
 
        /* Construct VLAN Table to give to HW */
-       for (i = 0; i < VLAN_N_VID; i++)
-               if (adapter->vlan_tag[i])
-                       vids[num++] = cpu_to_le16(i);
-
-       status = be_cmd_vlan_config(adapter, adapter->if_handle,
-                                   vids, num, 0);
+       for_each_set_bit(i, adapter->vids, VLAN_N_VID)
+               vids[num++] = cpu_to_le16(i);
 
+       status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
        if (status) {
                /* Set to VLAN promisc mode as setting VLAN filter failed */
-               if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
+               if (addl_status(status) ==
+                               MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
                        goto set_vlan_promisc;
                dev_err(&adapter->pdev->dev,
                        "Setting HW VLAN filtering failed.\n");
@@ -1160,16 +1154,16 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
        if (lancer_chip(adapter) && vid == 0)
                return status;
 
-       if (adapter->vlan_tag[vid])
+       if (test_bit(vid, adapter->vids))
                return status;
 
-       adapter->vlan_tag[vid] = 1;
+       set_bit(vid, adapter->vids);
        adapter->vlans_added++;
 
        status = be_vid_config(adapter);
        if (status) {
                adapter->vlans_added--;
-               adapter->vlan_tag[vid] = 0;
+               clear_bit(vid, adapter->vids);
        }
 
        return status;
@@ -1184,12 +1178,12 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
        if (lancer_chip(adapter) && vid == 0)
                goto ret;
 
-       adapter->vlan_tag[vid] = 0;
+       clear_bit(vid, adapter->vids);
        status = be_vid_config(adapter);
        if (!status)
                adapter->vlans_added--;
        else
-               adapter->vlan_tag[vid] = 1;
+               set_bit(vid, adapter->vids);
 ret:
        return status;
 }
@@ -1197,7 +1191,7 @@ ret:
 static void be_clear_promisc(struct be_adapter *adapter)
 {
        adapter->promiscuous = false;
-       adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
+       adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
 
        be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
 }
@@ -1222,10 +1216,8 @@ static void be_set_rx_mode(struct net_device *netdev)
 
        /* Enable multicast promisc if num configured exceeds what we support */
        if (netdev->flags & IFF_ALLMULTI ||
-           netdev_mc_count(netdev) > be_max_mc(adapter)) {
-               be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
-               goto done;
-       }
+           netdev_mc_count(netdev) > be_max_mc(adapter))
+               goto set_mcast_promisc;
 
        if (netdev_uc_count(netdev) != adapter->uc_macs) {
                struct netdev_hw_addr *ha;
@@ -1251,13 +1243,22 @@ static void be_set_rx_mode(struct net_device *netdev)
        }
 
        status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
-
-       /* Set to MCAST promisc mode if setting MULTICAST address fails */
-       if (status) {
-               dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
-               dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
-               be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
+       if (!status) {
+               if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
+                       adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
+               goto done;
        }
+
+set_mcast_promisc:
+       if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
+               return;
+
+       /* Set to MCAST promisc mode if setting MULTICAST address fails
+        * or if num configured exceeds what we support
+        */
+       status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
+       if (!status)
+               adapter->flags |= BE_FLAGS_MCAST_PROMISC;
 done:
        return;
 }
@@ -1287,7 +1288,7 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
 
        if (status)
                dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
-                               mac, vf);
+                       mac, vf);
        else
                memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
 
@@ -1295,7 +1296,7 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
 }
 
 static int be_get_vf_config(struct net_device *netdev, int vf,
-                       struct ifla_vf_info *vi)
+                           struct ifla_vf_info *vi)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
        struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
@@ -1307,7 +1308,8 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
                return -EINVAL;
 
        vi->vf = vf;
-       vi->tx_rate = vf_cfg->tx_rate;
+       vi->max_tx_rate = vf_cfg->tx_rate;
+       vi->min_tx_rate = 0;
        vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
        vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
        memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
@@ -1316,8 +1318,7 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
        return 0;
 }
 
-static int be_set_vf_vlan(struct net_device *netdev,
-                       int vf, u16 vlan, u8 qos)
+static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
        struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
@@ -1348,11 +1349,14 @@ static int be_set_vf_vlan(struct net_device *netdev,
        return status;
 }
 
-static int be_set_vf_tx_rate(struct net_device *netdev,
-                       int vf, int rate)
+static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
+                            int min_tx_rate, int max_tx_rate)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
-       int status = 0;
+       struct device *dev = &adapter->pdev->dev;
+       int percent_rate, status = 0;
+       u16 link_speed = 0;
+       u8 link_status;
 
        if (!sriov_enabled(adapter))
                return -EPERM;
@@ -1360,18 +1364,50 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
        if (vf >= adapter->num_vfs)
                return -EINVAL;
 
-       if (rate < 100 || rate > 10000) {
-               dev_err(&adapter->pdev->dev,
-                       "tx rate must be between 100 and 10000 Mbps\n");
+       if (min_tx_rate)
                return -EINVAL;
+
+       if (!max_tx_rate)
+               goto config_qos;
+
+       status = be_cmd_link_status_query(adapter, &link_speed,
+                                         &link_status, 0);
+       if (status)
+               goto err;
+
+       if (!link_status) {
+               dev_err(dev, "TX-rate setting not allowed when link is down\n");
+               status = -EPERM;
+               goto err;
        }
 
-       status = be_cmd_config_qos(adapter, rate / 10, vf + 1);
+       if (max_tx_rate < 100 || max_tx_rate > link_speed) {
+               dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
+                       link_speed);
+               status = -EINVAL;
+               goto err;
+       }
+
+       /* On Skyhawk the QOS setting must be done only as a % value */
+       percent_rate = link_speed / 100;
+       if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
+               dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
+                       percent_rate);
+               status = -EINVAL;
+               goto err;
+       }
+
+config_qos:
+       status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
        if (status)
-               dev_err(&adapter->pdev->dev,
-                               "tx rate %d on VF %d failed\n", rate, vf);
-       else
-               adapter->vf_cfg[vf].tx_rate = rate;
+               goto err;
+
+       adapter->vf_cfg[vf].tx_rate = max_tx_rate;
+       return 0;
+
+err:
+       dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
+               max_tx_rate, vf);
        return status;
 }
 static int be_set_vf_link_state(struct net_device *netdev, int vf,
@@ -1469,7 +1505,7 @@ modify_eqd:
 }
 
 static void be_rx_stats_update(struct be_rx_obj *rxo,
-               struct be_rx_compl_info *rxcp)
+                              struct be_rx_compl_info *rxcp)
 {
        struct be_rx_stats *stats = rx_stats(rxo);
 
@@ -1566,7 +1602,8 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
                skb_frag_set_page(skb, 0, page_info->page);
                skb_shinfo(skb)->frags[0].page_offset =
                                        page_info->page_offset + hdr_len;
-               skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
+               skb_frag_size_set(&skb_shinfo(skb)->frags[0],
+                                 curr_frag_len - hdr_len);
                skb->data_len = curr_frag_len - hdr_len;
                skb->truesize += rx_frag_size;
                skb->tail += hdr_len;
@@ -1725,8 +1762,8 @@ static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
        if (rxcp->vlanf) {
                rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
                                          compl);
-               rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
-                                              compl);
+               rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
+                                              vlan_tag, compl);
        }
        rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
        rxcp->tunneled =
@@ -1757,8 +1794,8 @@ static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
        if (rxcp->vlanf) {
                rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
                                          compl);
-               rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
-                                              compl);
+               rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
+                                              vlan_tag, compl);
        }
        rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
        rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
@@ -1799,7 +1836,7 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
                        rxcp->vlan_tag = swab16(rxcp->vlan_tag);
 
                if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
-                   !adapter->vlan_tag[rxcp->vlan_tag])
+                   !test_bit(rxcp->vlan_tag, adapter->vids))
                        rxcp->vlanf = 0;
        }
 
@@ -1915,7 +1952,7 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
 }
 
 static u16 be_tx_compl_process(struct be_adapter *adapter,
-               struct be_tx_obj *txo, u16 last_index)
+                              struct be_tx_obj *txo, u16 last_index)
 {
        struct be_queue_info *txq = &txo->q;
        struct be_eth_wrb *wrb;
@@ -2122,7 +2159,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
 
                eq = &eqo->q;
                rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
-                                       sizeof(struct be_eq_entry));
+                                   sizeof(struct be_eq_entry));
                if (rc)
                        return rc;
 
@@ -2155,7 +2192,7 @@ static int be_mcc_queues_create(struct be_adapter *adapter)
 
        cq = &adapter->mcc_obj.cq;
        if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
-                       sizeof(struct be_mcc_compl)))
+                          sizeof(struct be_mcc_compl)))
                goto err;
 
        /* Use the default EQ for MCC completions */
@@ -2275,7 +2312,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
                rxo->adapter = adapter;
                cq = &rxo->cq;
                rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
-                               sizeof(struct be_eth_rx_compl));
+                                   sizeof(struct be_eth_rx_compl));
                if (rc)
                        return rc;
 
@@ -2339,7 +2376,7 @@ static inline bool do_gro(struct be_rx_compl_info *rxcp)
 }
 
 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
-                       int budget, int polling)
+                        int budget, int polling)
 {
        struct be_adapter *adapter = rxo->adapter;
        struct be_queue_info *rx_cq = &rxo->cq;
@@ -2365,7 +2402,7 @@ static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
                 * promiscuous mode on some skews
                 */
                if (unlikely(rxcp->port != adapter->port_num &&
-                               !lancer_chip(adapter))) {
+                            !lancer_chip(adapter))) {
                        be_rx_compl_discard(rxo, rxcp);
                        goto loop_continue;
                }
@@ -2405,8 +2442,9 @@ static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
                if (!txcp)
                        break;
                num_wrbs += be_tx_compl_process(adapter, txo,
-                               AMAP_GET_BITS(struct amap_eth_tx_compl,
-                                       wrb_index, txcp));
+                                               AMAP_GET_BITS(struct
+                                                             amap_eth_tx_compl,
+                                                             wrb_index, txcp));
        }
 
        if (work_done) {
@@ -2416,7 +2454,7 @@ static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
                /* As Tx wrbs have been freed up, wake up netdev queue
                 * if it was stopped due to lack of tx wrbs.  */
                if (__netif_subqueue_stopped(adapter->netdev, idx) &&
-                       atomic_read(&txo->q.used) < txo->q.len / 2) {
+                   atomic_read(&txo->q.used) < txo->q.len / 2) {
                        netif_wake_subqueue(adapter->netdev, idx);
                }
 
@@ -2510,9 +2548,9 @@ void be_detect_error(struct be_adapter *adapter)
                sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
                if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
                        sliport_err1 = ioread32(adapter->db +
-                                       SLIPORT_ERROR1_OFFSET);
+                                               SLIPORT_ERROR1_OFFSET);
                        sliport_err2 = ioread32(adapter->db +
-                                       SLIPORT_ERROR2_OFFSET);
+                                               SLIPORT_ERROR2_OFFSET);
                        adapter->hw_error = true;
                        /* Do not log error messages if its a FW reset */
                        if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
@@ -2531,13 +2569,13 @@ void be_detect_error(struct be_adapter *adapter)
                }
        } else {
                pci_read_config_dword(adapter->pdev,
-                               PCICFG_UE_STATUS_LOW, &ue_lo);
+                                     PCICFG_UE_STATUS_LOW, &ue_lo);
                pci_read_config_dword(adapter->pdev,
-                               PCICFG_UE_STATUS_HIGH, &ue_hi);
+                                     PCICFG_UE_STATUS_HIGH, &ue_hi);
                pci_read_config_dword(adapter->pdev,
-                               PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
+                                     PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
                pci_read_config_dword(adapter->pdev,
-                               PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
+                                     PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
 
                ue_lo = (ue_lo & ~ue_lo_mask);
                ue_hi = (ue_hi & ~ue_hi_mask);
@@ -2624,7 +2662,7 @@ fail:
 }
 
 static inline int be_msix_vec_get(struct be_adapter *adapter,
-                               struct be_eq_obj *eqo)
+                                 struct be_eq_obj *eqo)
 {
        return adapter->msix_entries[eqo->msix_idx].vector;
 }
@@ -2648,7 +2686,7 @@ err_msix:
        for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
                free_irq(be_msix_vec_get(adapter, eqo), eqo);
        dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
-               status);
+                status);
        be_msix_disable(adapter);
        return status;
 }
@@ -2821,8 +2859,7 @@ static int be_rx_qs_create(struct be_adapter *adapter)
        }
 
        get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
-       rc = be_cmd_rss_config(adapter, rss->rsstable,
-                              rss->rss_flags,
+       rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
                               128, rss_hkey);
        if (rc) {
                rss->rss_flags = RSS_ENABLE_NONE;
@@ -2903,7 +2940,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
 
        if (enable) {
                status = pci_write_config_dword(adapter->pdev,
-                       PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
+                                               PCICFG_PM_CONTROL_OFFSET,
+                                               PCICFG_PM_CONTROL_MASK);
                if (status) {
                        dev_err(&adapter->pdev->dev,
                                "Could not enable Wake-on-lan\n");
@@ -2912,7 +2950,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
                        return status;
                }
                status = be_cmd_enable_magic_wol(adapter,
-                               adapter->netdev->dev_addr, &cmd);
+                                                adapter->netdev->dev_addr,
+                                                &cmd);
                pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
                pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
        } else {
@@ -2951,7 +2990,8 @@ static int be_vf_eth_addr_config(struct be_adapter *adapter)
 
                if (status)
                        dev_err(&adapter->pdev->dev,
-                       "Mac address assignment failed for VF %d\n", vf);
+                               "Mac address assignment failed for VF %d\n",
+                               vf);
                else
                        memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
 
@@ -3093,9 +3133,11 @@ static int be_vfs_if_create(struct be_adapter *adapter)
 
                /* If a FW profile exists, then cap_flags are updated */
                en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
-                          BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
-               status = be_cmd_if_create(adapter, cap_flags, en_flags,
-                                         &vf_cfg->if_handle, vf + 1);
+                                       BE_IF_FLAGS_BROADCAST |
+                                       BE_IF_FLAGS_MULTICAST);
+               status =
+                   be_cmd_if_create(adapter, cap_flags, en_flags,
+                                    &vf_cfg->if_handle, vf + 1);
                if (status)
                        goto err;
        }
@@ -3126,7 +3168,6 @@ static int be_vf_setup(struct be_adapter *adapter)
        struct be_vf_cfg *vf_cfg;
        int status, old_vfs, vf;
        u32 privileges;
-       u16 lnk_speed;
 
        old_vfs = pci_num_vf(adapter->pdev);
        if (old_vfs) {
@@ -3182,16 +3223,9 @@ static int be_vf_setup(struct be_adapter *adapter)
                                         vf);
                }
 
-               /* BE3 FW, by default, caps VF TX-rate to 100mbps.
-                * Allow full available bandwidth
-                */
-               if (BE3_chip(adapter) && !old_vfs)
-                       be_cmd_config_qos(adapter, 1000, vf + 1);
-
-               status = be_cmd_link_status_query(adapter, &lnk_speed,
-                                                 NULL, vf + 1);
-               if (!status)
-                       vf_cfg->tx_rate = lnk_speed;
+               /* Allow full available bandwidth */
+               if (!old_vfs)
+                       be_cmd_config_qos(adapter, 0, 0, vf + 1);
 
                if (!old_vfs) {
                        be_cmd_enable_vf(adapter, vf + 1);
@@ -3597,35 +3631,7 @@ static void be_netpoll(struct net_device *netdev)
 }
 #endif
 
-#define FW_FILE_HDR_SIGN       "ServerEngines Corp. "
-static char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
-
-static bool be_flash_redboot(struct be_adapter *adapter,
-                       const u8 *p, u32 img_start, int image_size,
-                       int hdr_size)
-{
-       u32 crc_offset;
-       u8 flashed_crc[4];
-       int status;
-
-       crc_offset = hdr_size + img_start + image_size - 4;
-
-       p += crc_offset;
-
-       status = be_cmd_get_flash_crc(adapter, flashed_crc,
-                       (image_size - 4));
-       if (status) {
-               dev_err(&adapter->pdev->dev,
-               "could not get crc from flash, not flashing redboot\n");
-               return false;
-       }
-
-       /*update redboot only if crc does not match*/
-       if (!memcmp(flashed_crc, p, 4))
-               return false;
-       else
-               return true;
-}
+static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
 
 static bool phy_flashing_required(struct be_adapter *adapter)
 {
@@ -3656,8 +3662,8 @@ static bool is_comp_in_ufi(struct be_adapter *adapter,
 }
 
 static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
-                                        int header_size,
-                                        const struct firmware *fw)
+                                               int header_size,
+                                               const struct firmware *fw)
 {
        struct flash_section_info *fsec = NULL;
        const u8 *p = fw->data;
@@ -3672,12 +3678,35 @@ static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
        return NULL;
 }
 
+static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
+                             u32 img_offset, u32 img_size, int hdr_size,
+                             u16 img_optype, bool *crc_match)
+{
+       u32 crc_offset;
+       int status;
+       u8 crc[4];
+
+       status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
+       if (status)
+               return status;
+
+       crc_offset = hdr_size + img_offset + img_size - 4;
+
+       /* Skip flashing, if crc of flashed region matches */
+       if (!memcmp(crc, p + crc_offset, 4))
+               *crc_match = true;
+       else
+               *crc_match = false;
+
+       return status;
+}
+
 static int be_flash(struct be_adapter *adapter, const u8 *img,
-               struct be_dma_mem *flash_cmd, int optype, int img_size)
+                   struct be_dma_mem *flash_cmd, int optype, int img_size)
 {
-       u32 total_bytes = 0, flash_op, num_bytes = 0;
-       int status = 0;
        struct be_cmd_write_flashrom *req = flash_cmd->va;
+       u32 total_bytes, flash_op, num_bytes;
+       int status;
 
        total_bytes = img_size;
        while (total_bytes) {
@@ -3700,32 +3729,28 @@ static int be_flash(struct be_adapter *adapter, const u8 *img,
                memcpy(req->data_buf, img, num_bytes);
                img += num_bytes;
                status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
-                                               flash_op, num_bytes);
-               if (status) {
-                       if (status == ILLEGAL_IOCTL_REQ &&
-                           optype == OPTYPE_PHY_FW)
-                               break;
-                       dev_err(&adapter->pdev->dev,
-                               "cmd to write to flash rom failed.\n");
+                                              flash_op, num_bytes);
+               if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
+                   optype == OPTYPE_PHY_FW)
+                       break;
+               else if (status)
                        return status;
-               }
        }
        return 0;
 }
 
 /* For BE2, BE3 and BE3-R */
 static int be_flash_BEx(struct be_adapter *adapter,
-                        const struct firmware *fw,
-                        struct be_dma_mem *flash_cmd,
-                        int num_of_images)
-
+                       const struct firmware *fw,
+                       struct be_dma_mem *flash_cmd, int num_of_images)
 {
-       int status = 0, i, filehdr_size = 0;
        int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
-       const u8 *p = fw->data;
-       const struct flash_comp *pflashcomp;
-       int num_comp, redboot;
+       struct device *dev = &adapter->pdev->dev;
        struct flash_section_info *fsec = NULL;
+       int status, i, filehdr_size, num_comp;
+       const struct flash_comp *pflashcomp;
+       bool crc_match;
+       const u8 *p;
 
        struct flash_comp gen3_flash_types[] = {
                { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
@@ -3782,8 +3807,7 @@ static int be_flash_BEx(struct be_adapter *adapter,
        /* Get flash section info*/
        fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
        if (!fsec) {
-               dev_err(&adapter->pdev->dev,
-                       "Invalid Cookie. UFI corrupted ?\n");
+               dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
                return -1;
        }
        for (i = 0; i < num_comp; i++) {
@@ -3799,23 +3823,32 @@ static int be_flash_BEx(struct be_adapter *adapter,
                                continue;
 
                if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
-                       redboot = be_flash_redboot(adapter, fw->data,
-                               pflashcomp[i].offset, pflashcomp[i].size,
-                               filehdr_size + img_hdrs_size);
-                       if (!redboot)
+                       status = be_check_flash_crc(adapter, fw->data,
+                                                   pflashcomp[i].offset,
+                                                   pflashcomp[i].size,
+                                                   filehdr_size +
+                                                   img_hdrs_size,
+                                                   OPTYPE_REDBOOT, &crc_match);
+                       if (status) {
+                               dev_err(dev,
+                                       "Could not get CRC for 0x%x region\n",
+                                       pflashcomp[i].optype);
+                               continue;
+                       }
+
+                       if (crc_match)
                                continue;
                }
 
-               p = fw->data;
-               p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
+               p = fw->data + filehdr_size + pflashcomp[i].offset +
+                       img_hdrs_size;
                if (p + pflashcomp[i].size > fw->data + fw->size)
                        return -1;
 
                status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
-                                       pflashcomp[i].size);
+                                 pflashcomp[i].size);
                if (status) {
-                       dev_err(&adapter->pdev->dev,
-                               "Flashing section type %d failed.\n",
+                       dev_err(dev, "Flashing section type 0x%x failed\n",
                                pflashcomp[i].img_type);
                        return status;
                }
@@ -3823,80 +3856,142 @@ static int be_flash_BEx(struct be_adapter *adapter,
        return 0;
 }
 
+static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
+{
+       u32 img_type = le32_to_cpu(fsec_entry.type);
+       u16 img_optype = le16_to_cpu(fsec_entry.optype);
+
+       if (img_optype != 0xFFFF)
+               return img_optype;
+
+       switch (img_type) {
+       case IMAGE_FIRMWARE_iSCSI:
+               img_optype = OPTYPE_ISCSI_ACTIVE;
+               break;
+       case IMAGE_BOOT_CODE:
+               img_optype = OPTYPE_REDBOOT;
+               break;
+       case IMAGE_OPTION_ROM_ISCSI:
+               img_optype = OPTYPE_BIOS;
+               break;
+       case IMAGE_OPTION_ROM_PXE:
+               img_optype = OPTYPE_PXE_BIOS;
+               break;
+       case IMAGE_OPTION_ROM_FCoE:
+               img_optype = OPTYPE_FCOE_BIOS;
+               break;
+       case IMAGE_FIRMWARE_BACKUP_iSCSI:
+               img_optype = OPTYPE_ISCSI_BACKUP;
+               break;
+       case IMAGE_NCSI:
+               img_optype = OPTYPE_NCSI_FW;
+               break;
+       case IMAGE_FLASHISM_JUMPVECTOR:
+               img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
+               break;
+       case IMAGE_FIRMWARE_PHY:
+               img_optype = OPTYPE_SH_PHY_FW;
+               break;
+       case IMAGE_REDBOOT_DIR:
+               img_optype = OPTYPE_REDBOOT_DIR;
+               break;
+       case IMAGE_REDBOOT_CONFIG:
+               img_optype = OPTYPE_REDBOOT_CONFIG;
+               break;
+       case IMAGE_UFI_DIR:
+               img_optype = OPTYPE_UFI_DIR;
+               break;
+       default:
+               break;
+       }
+
+       return img_optype;
+}
+
 static int be_flash_skyhawk(struct be_adapter *adapter,
-               const struct firmware *fw,
-               struct be_dma_mem *flash_cmd, int num_of_images)
+                           const struct firmware *fw,
+                           struct be_dma_mem *flash_cmd, int num_of_images)
 {
-       int status = 0, i, filehdr_size = 0;
-       int img_offset, img_size, img_optype, redboot;
        int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
-       const u8 *p = fw->data;
+       struct device *dev = &adapter->pdev->dev;
        struct flash_section_info *fsec = NULL;
+       u32 img_offset, img_size, img_type;
+       int status, i, filehdr_size;
+       bool crc_match, old_fw_img;
+       u16 img_optype;
+       const u8 *p;
 
        filehdr_size = sizeof(struct flash_file_hdr_g3);
        fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
        if (!fsec) {
-               dev_err(&adapter->pdev->dev,
-                       "Invalid Cookie. UFI corrupted ?\n");
+               dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
                return -1;
        }
 
        for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
                img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
                img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
+               img_type   = le32_to_cpu(fsec->fsec_entry[i].type);
+               img_optype = be_get_img_optype(fsec->fsec_entry[i]);
+               old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
 
-               switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
-               case IMAGE_FIRMWARE_iSCSI:
-                       img_optype = OPTYPE_ISCSI_ACTIVE;
-                       break;
-               case IMAGE_BOOT_CODE:
-                       img_optype = OPTYPE_REDBOOT;
-                       break;
-               case IMAGE_OPTION_ROM_ISCSI:
-                       img_optype = OPTYPE_BIOS;
-                       break;
-               case IMAGE_OPTION_ROM_PXE:
-                       img_optype = OPTYPE_PXE_BIOS;
-                       break;
-               case IMAGE_OPTION_ROM_FCoE:
-                       img_optype = OPTYPE_FCOE_BIOS;
-                       break;
-               case IMAGE_FIRMWARE_BACKUP_iSCSI:
-                       img_optype = OPTYPE_ISCSI_BACKUP;
-                       break;
-               case IMAGE_NCSI:
-                       img_optype = OPTYPE_NCSI_FW;
-                       break;
-               default:
+               if (img_optype == 0xFFFF)
                        continue;
+               /* Don't bother verifying CRC if an old FW image is being
+                * flashed
+                */
+               if (old_fw_img)
+                       goto flash;
+
+               status = be_check_flash_crc(adapter, fw->data, img_offset,
+                                           img_size, filehdr_size +
+                                           img_hdrs_size, img_optype,
+                                           &crc_match);
+               /* The current FW image on the card does not recognize the new
+                * FLASH op_type. The FW download is partially complete.
+                * Reboot the server now to enable FW image to recognize the
+                * new FLASH op_type. To complete the remaining process,
+                * download the same FW again after the reboot.
+                */
+               if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
+                   base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
+                       dev_err(dev, "Flash incomplete. Reset the server\n");
+                       dev_err(dev, "Download FW image again after reset\n");
+                       return -EAGAIN;
+               } else if (status) {
+                       dev_err(dev, "Could not get CRC for 0x%x region\n",
+                               img_optype);
+                       return -EFAULT;
                }
 
-               if (img_optype == OPTYPE_REDBOOT) {
-                       redboot = be_flash_redboot(adapter, fw->data,
-                                       img_offset, img_size,
-                                       filehdr_size + img_hdrs_size);
-                       if (!redboot)
-                               continue;
-               }
+               if (crc_match)
+                       continue;
 
-               p = fw->data;
-               p += filehdr_size + img_offset + img_hdrs_size;
+flash:
+               p = fw->data + filehdr_size + img_offset + img_hdrs_size;
                if (p + img_size > fw->data + fw->size)
                        return -1;
 
                status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
-               if (status) {
-                       dev_err(&adapter->pdev->dev,
-                               "Flashing section type %d failed.\n",
-                               fsec->fsec_entry[i].type);
-                       return status;
+               /* For old FW images ignore ILLEGAL_FIELD error or errors on
+                * UFI_DIR region
+                */
+               if (old_fw_img &&
+                   (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
+                    (img_optype == OPTYPE_UFI_DIR &&
+                     base_status(status) == MCC_STATUS_FAILED))) {
+                       continue;
+               } else if (status) {
+                       dev_err(dev, "Flashing section type 0x%x failed\n",
+                               img_type);
+                       return -EFAULT;
                }
        }
        return 0;
 }
 
 static int lancer_fw_download(struct be_adapter *adapter,
-                               const struct firmware *fw)
+                             const struct firmware *fw)
 {
 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
@@ -3962,7 +4057,7 @@ static int lancer_fw_download(struct be_adapter *adapter,
        }
 
        dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
-                               flash_cmd.dma);
+                         flash_cmd.dma);
        if (status) {
                dev_err(&adapter->pdev->dev,
                        "Firmware load error. "
@@ -3983,9 +4078,8 @@ static int lancer_fw_download(struct be_adapter *adapter,
                        goto lancer_fw_exit;
                }
        } else if (change_status != LANCER_NO_RESET_NEEDED) {
-                       dev_err(&adapter->pdev->dev,
-                               "System reboot required for new FW"
-                               " to be active\n");
+               dev_err(&adapter->pdev->dev,
+                       "System reboot required for new FW to be active\n");
        }
 
        dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
@@ -4049,7 +4143,7 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
                        switch (ufi_type) {
                        case UFI_TYPE4:
                                status = be_flash_skyhawk(adapter, fw,
-                                                       &flash_cmd, num_imgs);
+                                                         &flash_cmd, num_imgs);
                                break;
                        case UFI_TYPE3R:
                                status = be_flash_BEx(adapter, fw, &flash_cmd,
@@ -4119,8 +4213,7 @@ fw_exit:
        return status;
 }
 
-static int be_ndo_bridge_setlink(struct net_device *dev,
-                                   struct nlmsghdr *nlh)
+static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
 {
        struct be_adapter *adapter = netdev_priv(dev);
        struct nlattr *attr, *br_spec;
@@ -4162,8 +4255,7 @@ err:
 }
 
 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
-                                   struct net_device *dev,
-                                   u32 filter_mask)
+                                struct net_device *dev, u32 filter_mask)
 {
        struct be_adapter *adapter = netdev_priv(dev);
        int status = 0;
@@ -4261,7 +4353,7 @@ static const struct net_device_ops be_netdev_ops = {
        .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
        .ndo_set_vf_mac         = be_set_vf_mac,
        .ndo_set_vf_vlan        = be_set_vf_vlan,
-       .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
+       .ndo_set_vf_rate        = be_set_vf_tx_rate,
        .ndo_get_vf_config      = be_get_vf_config,
        .ndo_set_vf_link_state  = be_set_vf_link_state,
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -4308,7 +4400,7 @@ static void be_netdev_init(struct net_device *netdev)
 
        netdev->netdev_ops = &be_netdev_ops;
 
-       SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
+       netdev->ethtool_ops = &be_ethtool_ops;
 }
 
 static void be_unmap_pci_bars(struct be_adapter *adapter)
@@ -4877,7 +4969,7 @@ static void be_shutdown(struct pci_dev *pdev)
 }
 
 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
-                               pci_channel_state_t state)
+                                           pci_channel_state_t state)
 {
        struct be_adapter *adapter = pci_get_drvdata(pdev);
        struct net_device *netdev =  adapter->netdev;
@@ -4956,6 +5048,12 @@ static void be_eeh_resume(struct pci_dev *pdev)
        if (status)
                goto err;
 
+       /* On some BE3 FW versions, after a HW reset,
+        * interrupts will remain disabled for each function.
+        * So, explicitly enable interrupts
+        */
+       be_intr_set(adapter, true);
+
        /* tell fw we're ready to fire cmds */
        status = be_cmd_fw_init(adapter);
        if (status)
index 68069eabc4f855c0636d12b69854e44919ffb863..c77fa4a6984458648a97960deeef2d0a54b6637c 100644 (file)
@@ -1210,7 +1210,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
 
        SET_NETDEV_DEV(netdev, &pdev->dev);
 
-       SET_ETHTOOL_OPS(netdev, &ftgmac100_ethtool_ops);
+       netdev->ethtool_ops = &ftgmac100_ethtool_ops;
        netdev->netdev_ops = &ftgmac100_netdev_ops;
        netdev->features = NETIF_F_IP_CSUM | NETIF_F_GRO;
 
index 8be5b40c0a121331f1d1bbc0712499eb4bec8160..4ff1adc6bfcab9132b1dbf673626f868430bbcc3 100644 (file)
@@ -1085,7 +1085,7 @@ static int ftmac100_probe(struct platform_device *pdev)
        }
 
        SET_NETDEV_DEV(netdev, &pdev->dev);
-       SET_ETHTOOL_OPS(netdev, &ftmac100_ethtool_ops);
+       netdev->ethtool_ops = &ftmac100_ethtool_ops;
        netdev->netdev_ops = &ftmac100_netdev_ops;
 
        platform_set_drvdata(pdev, netdev);
index 8d69e439f0c518d4b3e46c9ae21d85e4013b7e06..4d989b2df42042d9e39ad5fb216f979b53b76678 100644 (file)
@@ -1255,6 +1255,49 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
        return 0;
 }
 
+static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
+{
+       struct fec_enet_private *fep = netdev_priv(ndev);
+       int ret;
+
+       if (enable) {
+               ret = clk_prepare_enable(fep->clk_ahb);
+               if (ret)
+                       return ret;
+               ret = clk_prepare_enable(fep->clk_ipg);
+               if (ret)
+                       goto failed_clk_ipg;
+               if (fep->clk_enet_out) {
+                       ret = clk_prepare_enable(fep->clk_enet_out);
+                       if (ret)
+                               goto failed_clk_enet_out;
+               }
+               if (fep->clk_ptp) {
+                       ret = clk_prepare_enable(fep->clk_ptp);
+                       if (ret)
+                               goto failed_clk_ptp;
+               }
+       } else {
+               clk_disable_unprepare(fep->clk_ahb);
+               clk_disable_unprepare(fep->clk_ipg);
+               if (fep->clk_enet_out)
+                       clk_disable_unprepare(fep->clk_enet_out);
+               if (fep->clk_ptp)
+                       clk_disable_unprepare(fep->clk_ptp);
+       }
+
+       return 0;
+failed_clk_ptp:
+       if (fep->clk_enet_out)
+               clk_disable_unprepare(fep->clk_enet_out);
+failed_clk_enet_out:
+               clk_disable_unprepare(fep->clk_ipg);
+failed_clk_ipg:
+               clk_disable_unprepare(fep->clk_ahb);
+
+       return ret;
+}
+
 static int fec_enet_mii_probe(struct net_device *ndev)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
@@ -1364,7 +1407,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
         * Reference Manual has an error on this, and gets fixed on i.MX6Q
         * document.
         */
-       fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ahb), 5000000);
+       fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
        if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
                fep->phy_speed--;
        fep->phy_speed <<= 1;
@@ -1773,6 +1816,11 @@ fec_enet_open(struct net_device *ndev)
        struct fec_enet_private *fep = netdev_priv(ndev);
        int ret;
 
+       pinctrl_pm_select_default_state(&fep->pdev->dev);
+       ret = fec_enet_clk_enable(ndev, true);
+       if (ret)
+               return ret;
+
        /* I should reset the ring buffers here, but I don't yet know
         * a simple way to do that.
         */
@@ -1811,6 +1859,8 @@ fec_enet_close(struct net_device *ndev)
                phy_disconnect(fep->phy_dev);
        }
 
+       fec_enet_clk_enable(ndev, false);
+       pinctrl_pm_select_sleep_state(&fep->pdev->dev);
        fec_enet_free_buffers(ndev);
 
        return 0;
@@ -2114,6 +2164,9 @@ fec_probe(struct platform_device *pdev)
                fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
 #endif
 
+       /* Select default pin state */
+       pinctrl_pm_select_default_state(&pdev->dev);
+
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        fep->hwp = devm_ioremap_resource(&pdev->dev, r);
        if (IS_ERR(fep->hwp)) {
@@ -2164,26 +2217,10 @@ fec_probe(struct platform_device *pdev)
                fep->bufdesc_ex = 0;
        }
 
-       ret = clk_prepare_enable(fep->clk_ahb);
+       ret = fec_enet_clk_enable(ndev, true);
        if (ret)
                goto failed_clk;
 
-       ret = clk_prepare_enable(fep->clk_ipg);
-       if (ret)
-               goto failed_clk_ipg;
-
-       if (fep->clk_enet_out) {
-               ret = clk_prepare_enable(fep->clk_enet_out);
-               if (ret)
-                       goto failed_clk_enet_out;
-       }
-
-       if (fep->clk_ptp) {
-               ret = clk_prepare_enable(fep->clk_ptp);
-               if (ret)
-                       goto failed_clk_ptp;
-       }
-
        fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
        if (!IS_ERR(fep->reg_phy)) {
                ret = regulator_enable(fep->reg_phy);
@@ -2225,6 +2262,8 @@ fec_probe(struct platform_device *pdev)
 
        /* Carrier starts down, phylib will bring it up */
        netif_carrier_off(ndev);
+       fec_enet_clk_enable(ndev, false);
+       pinctrl_pm_select_sleep_state(&pdev->dev);
 
        ret = register_netdev(ndev);
        if (ret)
@@ -2244,15 +2283,7 @@ failed_init:
        if (fep->reg_phy)
                regulator_disable(fep->reg_phy);
 failed_regulator:
-       if (fep->clk_ptp)
-               clk_disable_unprepare(fep->clk_ptp);
-failed_clk_ptp:
-       if (fep->clk_enet_out)
-               clk_disable_unprepare(fep->clk_enet_out);
-failed_clk_enet_out:
-       clk_disable_unprepare(fep->clk_ipg);
-failed_clk_ipg:
-       clk_disable_unprepare(fep->clk_ahb);
+       fec_enet_clk_enable(ndev, false);
 failed_clk:
 failed_ioremap:
        free_netdev(ndev);
@@ -2272,14 +2303,9 @@ fec_drv_remove(struct platform_device *pdev)
        del_timer_sync(&fep->time_keep);
        if (fep->reg_phy)
                regulator_disable(fep->reg_phy);
-       if (fep->clk_ptp)
-               clk_disable_unprepare(fep->clk_ptp);
        if (fep->ptp_clock)
                ptp_clock_unregister(fep->ptp_clock);
-       if (fep->clk_enet_out)
-               clk_disable_unprepare(fep->clk_enet_out);
-       clk_disable_unprepare(fep->clk_ipg);
-       clk_disable_unprepare(fep->clk_ahb);
+       fec_enet_clk_enable(ndev, false);
        free_netdev(ndev);
 
        return 0;
@@ -2296,12 +2322,8 @@ fec_suspend(struct device *dev)
                fec_stop(ndev);
                netif_device_detach(ndev);
        }
-       if (fep->clk_ptp)
-               clk_disable_unprepare(fep->clk_ptp);
-       if (fep->clk_enet_out)
-               clk_disable_unprepare(fep->clk_enet_out);
-       clk_disable_unprepare(fep->clk_ipg);
-       clk_disable_unprepare(fep->clk_ahb);
+       fec_enet_clk_enable(ndev, false);
+       pinctrl_pm_select_sleep_state(&fep->pdev->dev);
 
        if (fep->reg_phy)
                regulator_disable(fep->reg_phy);
@@ -2322,25 +2344,10 @@ fec_resume(struct device *dev)
                        return ret;
        }
 
-       ret = clk_prepare_enable(fep->clk_ahb);
+       pinctrl_pm_select_default_state(&fep->pdev->dev);
+       ret = fec_enet_clk_enable(ndev, true);
        if (ret)
-               goto failed_clk_ahb;
-
-       ret = clk_prepare_enable(fep->clk_ipg);
-       if (ret)
-               goto failed_clk_ipg;
-
-       if (fep->clk_enet_out) {
-               ret = clk_prepare_enable(fep->clk_enet_out);
-               if (ret)
-                       goto failed_clk_enet_out;
-       }
-
-       if (fep->clk_ptp) {
-               ret = clk_prepare_enable(fep->clk_ptp);
-               if (ret)
-                       goto failed_clk_ptp;
-       }
+               goto failed_clk;
 
        if (netif_running(ndev)) {
                fec_restart(ndev, fep->full_duplex);
@@ -2349,14 +2356,7 @@ fec_resume(struct device *dev)
 
        return 0;
 
-failed_clk_ptp:
-       if (fep->clk_enet_out)
-               clk_disable_unprepare(fep->clk_enet_out);
-failed_clk_enet_out:
-       clk_disable_unprepare(fep->clk_ipg);
-failed_clk_ipg:
-       clk_disable_unprepare(fep->clk_ahb);
-failed_clk_ahb:
+failed_clk:
        if (fep->reg_phy)
                regulator_disable(fep->reg_phy);
        return ret;
index dc80db41d6b3397388b0210283c4c7fd3ce07680..cfaf17b70f3fc5d6ab7a11a81266d67267646f33 100644 (file)
@@ -791,10 +791,6 @@ static int fs_init_phy(struct net_device *dev)
 
        phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0,
                                iface);
-       if (!phydev) {
-               phydev = of_phy_connect_fixed_link(dev, &fs_adjust_link,
-                                                  iface);
-       }
        if (!phydev) {
                dev_err(&dev->dev, "Could not attach to PHY\n");
                return -ENODEV;
@@ -1029,9 +1025,16 @@ static int fs_enet_probe(struct platform_device *ofdev)
        fpi->use_napi = 1;
        fpi->napi_weight = 17;
        fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
-       if ((!fpi->phy_node) && (!of_get_property(ofdev->dev.of_node, "fixed-link",
-                                                 NULL)))
-               goto out_free_fpi;
+       if (!fpi->phy_node && of_phy_is_fixed_link(ofdev->dev.of_node)) {
+               err = of_phy_register_fixed_link(ofdev->dev.of_node);
+               if (err)
+                       goto out_free_fpi;
+
+               /* In the case of a fixed PHY, the DT node associated
+                * to the PHY is the Ethernet MAC DT node.
+                */
+               fpi->phy_node = ofdev->dev.of_node;
+       }
 
        if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) {
                phy_connection_type = of_get_property(ofdev->dev.of_node,
index 9125d9abf0998d31e3179bd9c712af487855d5a9..282674027c92b012aedd35d980b3e69a4b96fd4a 100644 (file)
@@ -121,6 +121,7 @@ static irqreturn_t gfar_error(int irq, void *dev_id);
 static irqreturn_t gfar_transmit(int irq, void *dev_id);
 static irqreturn_t gfar_interrupt(int irq, void *dev_id);
 static void adjust_link(struct net_device *dev);
+static noinline void gfar_update_link_state(struct gfar_private *priv);
 static int init_phy(struct net_device *dev);
 static int gfar_probe(struct platform_device *ofdev);
 static int gfar_remove(struct platform_device *ofdev);
@@ -888,6 +889,17 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
 
        priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
 
+       /* In the case of a fixed PHY, the DT node associated
+        * to the PHY is the Ethernet MAC DT node.
+        */
+       if (of_phy_is_fixed_link(np)) {
+               err = of_phy_register_fixed_link(np);
+               if (err)
+                       goto err_grp_init;
+
+               priv->phy_node = np;
+       }
+
        /* Find the TBI PHY.  If it's not there, we don't support SGMII */
        priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
 
@@ -1659,9 +1671,6 @@ static int init_phy(struct net_device *dev)
 
        priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
                                      interface);
-       if (!priv->phydev)
-               priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
-                                                        interface);
        if (!priv->phydev) {
                dev_err(&dev->dev, "could not attach to PHY\n");
                return -ENODEV;
@@ -3076,41 +3085,6 @@ static irqreturn_t gfar_interrupt(int irq, void *grp_id)
        return IRQ_HANDLED;
 }
 
-static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
-{
-       struct phy_device *phydev = priv->phydev;
-       u32 val = 0;
-
-       if (!phydev->duplex)
-               return val;
-
-       if (!priv->pause_aneg_en) {
-               if (priv->tx_pause_en)
-                       val |= MACCFG1_TX_FLOW;
-               if (priv->rx_pause_en)
-                       val |= MACCFG1_RX_FLOW;
-       } else {
-               u16 lcl_adv, rmt_adv;
-               u8 flowctrl;
-               /* get link partner capabilities */
-               rmt_adv = 0;
-               if (phydev->pause)
-                       rmt_adv = LPA_PAUSE_CAP;
-               if (phydev->asym_pause)
-                       rmt_adv |= LPA_PAUSE_ASYM;
-
-               lcl_adv = mii_advertise_flowctrl(phydev->advertising);
-
-               flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
-               if (flowctrl & FLOW_CTRL_TX)
-                       val |= MACCFG1_TX_FLOW;
-               if (flowctrl & FLOW_CTRL_RX)
-                       val |= MACCFG1_RX_FLOW;
-       }
-
-       return val;
-}
-
 /* Called every time the controller might need to be made
  * aware of new link state.  The PHY code conveys this
  * information through variables in the phydev structure, and this
@@ -3120,83 +3094,12 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
 static void adjust_link(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
-       struct gfar __iomem *regs = priv->gfargrp[0].regs;
        struct phy_device *phydev = priv->phydev;
-       int new_state = 0;
-
-       if (test_bit(GFAR_RESETTING, &priv->state))
-               return;
-
-       if (phydev->link) {
-               u32 tempval1 = gfar_read(&regs->maccfg1);
-               u32 tempval = gfar_read(&regs->maccfg2);
-               u32 ecntrl = gfar_read(&regs->ecntrl);
-
-               /* Now we make sure that we can be in full duplex mode.
-                * If not, we operate in half-duplex mode.
-                */
-               if (phydev->duplex != priv->oldduplex) {
-                       new_state = 1;
-                       if (!(phydev->duplex))
-                               tempval &= ~(MACCFG2_FULL_DUPLEX);
-                       else
-                               tempval |= MACCFG2_FULL_DUPLEX;
-
-                       priv->oldduplex = phydev->duplex;
-               }
-
-               if (phydev->speed != priv->oldspeed) {
-                       new_state = 1;
-                       switch (phydev->speed) {
-                       case 1000:
-                               tempval =
-                                   ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
-
-                               ecntrl &= ~(ECNTRL_R100);
-                               break;
-                       case 100:
-                       case 10:
-                               tempval =
-                                   ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
-
-                               /* Reduced mode distinguishes
-                                * between 10 and 100
-                                */
-                               if (phydev->speed == SPEED_100)
-                                       ecntrl |= ECNTRL_R100;
-                               else
-                                       ecntrl &= ~(ECNTRL_R100);
-                               break;
-                       default:
-                               netif_warn(priv, link, dev,
-                                          "Ack!  Speed (%d) is not 10/100/1000!\n",
-                                          phydev->speed);
-                               break;
-                       }
-
-                       priv->oldspeed = phydev->speed;
-               }
-
-               tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
-               tempval1 |= gfar_get_flowctrl_cfg(priv);
-
-               gfar_write(&regs->maccfg1, tempval1);
-               gfar_write(&regs->maccfg2, tempval);
-               gfar_write(&regs->ecntrl, ecntrl);
-
-               if (!priv->oldlink) {
-                       new_state = 1;
-                       priv->oldlink = 1;
-               }
-       } else if (priv->oldlink) {
-               new_state = 1;
-               priv->oldlink = 0;
-               priv->oldspeed = 0;
-               priv->oldduplex = -1;
-       }
 
-       if (new_state && netif_msg_link(priv))
-               phy_print_status(phydev);
+       if (unlikely(phydev->link != priv->oldlink ||
+                    phydev->duplex != priv->oldduplex ||
+                    phydev->speed != priv->oldspeed))
+               gfar_update_link_state(priv);
 }
 
 /* Update the hash table based on the current list of multicast
@@ -3442,6 +3345,114 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
        return IRQ_HANDLED;
 }
 
+static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
+{
+       struct phy_device *phydev = priv->phydev;
+       u32 val = 0;
+
+       if (!phydev->duplex)
+               return val;
+
+       if (!priv->pause_aneg_en) {
+               if (priv->tx_pause_en)
+                       val |= MACCFG1_TX_FLOW;
+               if (priv->rx_pause_en)
+                       val |= MACCFG1_RX_FLOW;
+       } else {
+               u16 lcl_adv, rmt_adv;
+               u8 flowctrl;
+               /* get link partner capabilities */
+               rmt_adv = 0;
+               if (phydev->pause)
+                       rmt_adv = LPA_PAUSE_CAP;
+               if (phydev->asym_pause)
+                       rmt_adv |= LPA_PAUSE_ASYM;
+
+               lcl_adv = mii_advertise_flowctrl(phydev->advertising);
+
+               flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+               if (flowctrl & FLOW_CTRL_TX)
+                       val |= MACCFG1_TX_FLOW;
+               if (flowctrl & FLOW_CTRL_RX)
+                       val |= MACCFG1_RX_FLOW;
+       }
+
+       return val;
+}
+
+static noinline void gfar_update_link_state(struct gfar_private *priv)
+{
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
+       struct phy_device *phydev = priv->phydev;
+
+       if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
+               return;
+
+       if (phydev->link) {
+               u32 tempval1 = gfar_read(&regs->maccfg1);
+               u32 tempval = gfar_read(&regs->maccfg2);
+               u32 ecntrl = gfar_read(&regs->ecntrl);
+
+               if (phydev->duplex != priv->oldduplex) {
+                       if (!(phydev->duplex))
+                               tempval &= ~(MACCFG2_FULL_DUPLEX);
+                       else
+                               tempval |= MACCFG2_FULL_DUPLEX;
+
+                       priv->oldduplex = phydev->duplex;
+               }
+
+               if (phydev->speed != priv->oldspeed) {
+                       switch (phydev->speed) {
+                       case 1000:
+                               tempval =
+                                   ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
+
+                               ecntrl &= ~(ECNTRL_R100);
+                               break;
+                       case 100:
+                       case 10:
+                               tempval =
+                                   ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
+
+                               /* Reduced mode distinguishes
+                                * between 10 and 100
+                                */
+                               if (phydev->speed == SPEED_100)
+                                       ecntrl |= ECNTRL_R100;
+                               else
+                                       ecntrl &= ~(ECNTRL_R100);
+                               break;
+                       default:
+                               netif_warn(priv, link, priv->ndev,
+                                          "Ack!  Speed (%d) is not 10/100/1000!\n",
+                                          phydev->speed);
+                               break;
+                       }
+
+                       priv->oldspeed = phydev->speed;
+               }
+
+               tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
+               tempval1 |= gfar_get_flowctrl_cfg(priv);
+
+               gfar_write(&regs->maccfg1, tempval1);
+               gfar_write(&regs->maccfg2, tempval);
+               gfar_write(&regs->ecntrl, ecntrl);
+
+               if (!priv->oldlink)
+                       priv->oldlink = 1;
+
+       } else if (priv->oldlink) {
+               priv->oldlink = 0;
+               priv->oldspeed = 0;
+               priv->oldduplex = -1;
+       }
+
+       if (netif_msg_link(priv))
+               phy_print_status(phydev);
+}
+
 static struct of_device_id gfar_match[] =
 {
        {
index 891dbee6e6c14d2394cc2dff00092f448faf3dc2..76d70708f864af66b4a525e671f021e0d30a2b7d 100644 (file)
@@ -533,6 +533,9 @@ static int gfar_spauseparam(struct net_device *dev,
        struct gfar __iomem *regs = priv->gfargrp[0].regs;
        u32 oldadv, newadv;
 
+       if (!phydev)
+               return -ENODEV;
+
        if (!(phydev->supported & SUPPORTED_Pause) ||
            (!(phydev->supported & SUPPORTED_Asym_Pause) &&
             (epause->rx_pause != epause->tx_pause)))
index c8299c31b21f9f5c52dc380f9b867597c1b8bcdf..fab39e2954410106f9c26304f73c29169c1d35a1 100644 (file)
@@ -1728,9 +1728,6 @@ static int init_phy(struct net_device *dev)
 
        phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0,
                                priv->phy_interface);
-       if (!phydev)
-               phydev = of_phy_connect_fixed_link(dev, &adjust_link,
-                                                  priv->phy_interface);
        if (!phydev) {
                dev_err(&dev->dev, "Could not attach to PHY\n");
                return -ENODEV;
@@ -3790,6 +3787,17 @@ static int ucc_geth_probe(struct platform_device* ofdev)
        ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
 
        ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0);
+       if (!ug_info->phy_node) {
+               /* In the case of a fixed PHY, the DT node associated
+                * to the PHY is the Ethernet MAC DT node.
+                */
+               if (of_phy_is_fixed_link(np)) {
+                       err = of_phy_register_fixed_link(np);
+                       if (err)
+                               return err;
+               }
+               ug_info->phy_node = np;
+       }
 
        /* Find the TBI PHY node.  If it's not there, we don't support SGMII */
        ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
index 413329eff2ffc05f80197d05d16a9c4332821b6b..cc83350d56ba1c05aa7eb619b607635f76de6400 100644 (file)
@@ -417,5 +417,5 @@ static const struct ethtool_ops uec_ethtool_ops = {
 
 void uec_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &uec_ethtool_ops);
+       netdev->ethtool_ops = &uec_ethtool_ops;
 }
index 7becab1aa3e43b41c8f4b942cce18d9c584152fd..cfe7a74317307f8ef1ef39acd2c78c97e903d423 100644 (file)
@@ -256,7 +256,7 @@ static int fmvj18x_probe(struct pcmcia_device *link)
     dev->netdev_ops = &fjn_netdev_ops;
     dev->watchdog_timeo = TX_TIMEOUT;
 
-    SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+    dev->ethtool_ops = &netdev_ethtool_ops;
 
     return fmvj18x_config(link);
 } /* fmvj18x_attach */
index 95837b99a464865a7ca47273cb49ce9b3200136b..6055e3eaf49c860eff05450d4135f11b9d52a0a4 100644 (file)
@@ -278,5 +278,5 @@ static const struct ethtool_ops ehea_ethtool_ops = {
 
 void ehea_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &ehea_ethtool_ops);
+       netdev->ethtool_ops = &ehea_ethtool_ops;
 }
index 538903bf13bce736161a96af324a2b9ebc9aecd9..a0b418e007a0d09303a32838d8acf1a06ca9d061 100644 (file)
@@ -28,6 +28,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/device.h>
 #include <linux/in.h>
 #include <linux/ip.h>
 #include <linux/tcp.h>
@@ -3273,7 +3274,7 @@ static int ehea_probe_adapter(struct platform_device *dev)
                return -EINVAL;
        }
 
-       adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+       adapter = devm_kzalloc(&dev->dev, sizeof(*adapter), GFP_KERNEL);
        if (!adapter) {
                ret = -ENOMEM;
                dev_err(&dev->dev, "no mem for ehea_adapter\n");
@@ -3359,7 +3360,6 @@ out_kill_eq:
 
 out_free_ad:
        list_del(&adapter->list);
-       kfree(adapter);
 
 out:
        ehea_update_firmware_handles();
@@ -3386,7 +3386,6 @@ static int ehea_remove(struct platform_device *dev)
        ehea_destroy_eq(adapter->neq);
        ehea_remove_adapter_mr(adapter);
        list_del(&adapter->list);
-       kfree(adapter);
 
        ehea_update_firmware_handles();
 
index 9b03033bb5576f52d7c6f687f9e1a99c8e7a21a5..a0820f72b25c88bf9141354231813864c8159fec 100644 (file)
@@ -103,12 +103,14 @@ out_nomem:
 
 static void hw_queue_dtor(struct hw_queue *queue)
 {
-       int pages_per_kpage = PAGE_SIZE / queue->pagesize;
+       int pages_per_kpage;
        int i, nr_pages;
 
        if (!queue || !queue->queue_pages)
                return;
 
+       pages_per_kpage = PAGE_SIZE / queue->pagesize;
+
        nr_pages = queue->queue_length / queue->pagesize;
 
        for (i = 0; i < nr_pages; i += pages_per_kpage)
index ae342fdb42c8e79853507007ad2ea6c8d3681fad..87bd953cc2eeaef7f6af65902841645e98f7eda5 100644 (file)
@@ -2879,7 +2879,7 @@ static int emac_probe(struct platform_device *ofdev)
                dev->commac.ops = &emac_commac_sg_ops;
        } else
                ndev->netdev_ops = &emac_netdev_ops;
-       SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
+       ndev->ethtool_ops = &emac_ethtool_ops;
 
        netif_carrier_off(ndev);
 
index 25045ae071711f03cee9ccabf9898cf2abebbc21..5727779a7df27477bee22c8bad9ac8253b083aea 100644 (file)
@@ -2245,7 +2245,7 @@ static int ipg_probe(struct pci_dev *pdev, const struct pci_device_id *id)
         */
        dev->netdev_ops = &ipg_netdev_ops;
        SET_NETDEV_DEV(dev, &pdev->dev);
-       SET_ETHTOOL_OPS(dev, &ipg_ethtool_ops);
+       dev->ethtool_ops = &ipg_ethtool_ops;
 
        rc = pci_request_regions(pdev, DRV_NAME);
        if (rc)
index b56461ce674c7832152cdab4c0b4c3b27789d9a4..9d979d7debef0fd1cb3ec80ca3dd8d908b6cd33b 100644 (file)
@@ -2854,7 +2854,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        netdev->hw_features |= NETIF_F_RXALL;
 
        netdev->netdev_ops = &e100_netdev_ops;
-       SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
+       netdev->ethtool_ops = &e100_ethtool_ops;
        netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
        strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
 
index 73a8aeefb92a46d13a4c73be6cade2c5d694e00f..341889a4ef7f93f76fb4d845a592f015f7bd2d70 100644 (file)
@@ -1905,5 +1905,5 @@ static const struct ethtool_ops e1000_ethtool_ops = {
 
 void e1000_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops);
+       netdev->ethtool_ops = &e1000_ethtool_ops;
 }
index c1d3fdb296a05e4f1cd86205bce12669e41ceb6b..e9b07ccc0ebaebc0b1b0993625485cae93b0b447 100644 (file)
@@ -4877,10 +4877,10 @@ void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
         * since the test for a multicast frame will test positive on
         * a broadcast frame.
         */
-       if ((mac_addr[0] == (u8) 0xff) && (mac_addr[1] == (u8) 0xff))
+       if (is_broadcast_ether_addr(mac_addr))
                /* Broadcast packet */
                stats->bprc++;
-       else if (*mac_addr & 0x01)
+       else if (is_multicast_ether_addr(mac_addr))
                /* Multicast packet */
                stats->mprc++;
 
index a5f6b11d6992e63aa9af8da9f52540697455b2d1..08f22f348800ddd37aaf2d2d87eb4ce0e9c7f3b3 100644 (file)
@@ -1365,6 +1365,7 @@ static const struct e1000_mac_operations es2_mac_ops = {
        .setup_led              = e1000e_setup_led_generic,
        .config_collision_dist  = e1000e_config_collision_dist_generic,
        .rar_set                = e1000e_rar_set_generic,
+       .rar_get_count          = e1000e_rar_get_count_generic,
 };
 
 static const struct e1000_phy_operations es2_phy_ops = {
index e0aa7f1efb08ceb50d9049128dcbdb66f442b17d..218481e509f99f4e32deba736ae4117365c650e3 100644 (file)
@@ -1896,6 +1896,7 @@ static const struct e1000_mac_operations e82571_mac_ops = {
        .config_collision_dist  = e1000e_config_collision_dist_generic,
        .read_mac_addr          = e1000_read_mac_addr_82571,
        .rar_set                = e1000e_rar_set_generic,
+       .rar_get_count          = e1000e_rar_get_count_generic,
 };
 
 static const struct e1000_phy_operations e82_phy_ops_igp = {
index 1471c5464a89e72d87aa571d4a1b15d791a3f015..7785240a0da1a1b409cfbc3d4b18ff63b5f2966a 100644 (file)
@@ -265,10 +265,10 @@ struct e1000_adapter {
        u32 tx_hwtstamp_timeouts;
 
        /* Rx */
-       bool (*clean_rx) (struct e1000_ring *ring, int *work_done,
-                         int work_to_do) ____cacheline_aligned_in_smp;
-       void (*alloc_rx_buf) (struct e1000_ring *ring, int cleaned_count,
-                             gfp_t gfp);
+       bool (*clean_rx)(struct e1000_ring *ring, int *work_done,
+                        int work_to_do) ____cacheline_aligned_in_smp;
+       void (*alloc_rx_buf)(struct e1000_ring *ring, int cleaned_count,
+                            gfp_t gfp);
        struct e1000_ring *rx_ring;
 
        u32 rx_int_delay;
@@ -391,6 +391,8 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
  * 25MHz       46-bit  2^46 / 10^9 / 3600 = 19.55 hours
  */
 #define E1000_SYSTIM_OVERFLOW_PERIOD   (HZ * 60 * 60 * 4)
+#define E1000_MAX_82574_SYSTIM_REREADS 50
+#define E1000_82574_SYSTIM_EPSILON     (1ULL << 35ULL)
 
 /* hardware capability, feature, and workaround flags */
 #define FLAG_HAS_AMT                      (1 << 0)
@@ -573,35 +575,8 @@ static inline u32 __er32(struct e1000_hw *hw, unsigned long reg)
 
 #define er32(reg)      __er32(hw, E1000_##reg)
 
-/**
- * __ew32_prepare - prepare to write to MAC CSR register on certain parts
- * @hw: pointer to the HW structure
- *
- * When updating the MAC CSR registers, the Manageability Engine (ME) could
- * be accessing the registers at the same time.  Normally, this is handled in
- * h/w by an arbiter but on some parts there is a bug that acknowledges Host
- * accesses later than it should which could result in the register to have
- * an incorrect value.  Workaround this by checking the FWSM register which
- * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
- * and try again a number of times.
- **/
-static inline s32 __ew32_prepare(struct e1000_hw *hw)
-{
-       s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
-
-       while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
-               udelay(50);
-
-       return i;
-}
-
-static inline void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
-{
-       if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
-               __ew32_prepare(hw);
-
-       writel(val, hw->hw_addr + reg);
-}
+s32 __ew32_prepare(struct e1000_hw *hw);
+void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val);
 
 #define ew32(reg, val) __ew32(hw, E1000_##reg, (val))
 
index cad250bc1b99fc81d51fb8956eee74c9acc3bc7e..e9a48bb5caacb8e73e5ebb8eeb8baafbbf2f8679 100644 (file)
@@ -169,6 +169,7 @@ static int e1000_get_settings(struct net_device *netdev,
                }
        } else if (!pm_runtime_suspended(netdev->dev.parent)) {
                u32 status = er32(STATUS);
+
                if (status & E1000_STATUS_LU) {
                        if (status & E1000_STATUS_SPEED_1000)
                                speed = SPEED_1000;
@@ -783,25 +784,26 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
                              reg + (offset << 2), val,
                              (test[pat] & write & mask));
                        *data = reg;
-                       return 1;
+                       return true;
                }
        }
-       return 0;
+       return false;
 }
 
 static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
                              int reg, u32 mask, u32 write)
 {
        u32 val;
+
        __ew32(&adapter->hw, reg, write & mask);
        val = __er32(&adapter->hw, reg);
        if ((write & mask) != (val & mask)) {
                e_err("set/check test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n",
                      reg, (val & mask), (write & mask));
                *data = reg;
-               return 1;
+               return true;
        }
-       return 0;
+       return false;
 }
 
 #define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write)                       \
@@ -1717,6 +1719,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
        *data = 0;
        if (hw->phy.media_type == e1000_media_type_internal_serdes) {
                int i = 0;
+
                hw->mac.serdes_has_link = false;
 
                /* On some blade server designs, link establishment
@@ -2315,5 +2318,5 @@ static const struct ethtool_ops e1000_ethtool_ops = {
 
 void e1000e_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops);
+       netdev->ethtool_ops = &e1000_ethtool_ops;
 }
index 6b3de5f39a97862e2f5742b14a24530ba6ecc33f..72f5475c4b9093d075e608070c4415a4e0ad5afd 100644 (file)
@@ -469,8 +469,9 @@ struct e1000_mac_operations {
        s32  (*setup_led)(struct e1000_hw *);
        void (*write_vfta)(struct e1000_hw *, u32, u32);
        void (*config_collision_dist)(struct e1000_hw *);
-       void (*rar_set)(struct e1000_hw *, u8 *, u32);
+       int  (*rar_set)(struct e1000_hw *, u8 *, u32);
        s32  (*read_mac_addr)(struct e1000_hw *);
+       u32  (*rar_get_count)(struct e1000_hw *);
 };
 
 /* When to use various PHY register access functions:
index 9866f264f55e33a8e564757730ada0d6ab7c6a92..8894ab8ed6bd82de2c0f720939d31a906096a12c 100644 (file)
@@ -139,8 +139,9 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
-static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
-static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
+static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
+static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
+static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw);
 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
 static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force);
@@ -186,7 +187,7 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
 {
        u16 phy_reg = 0;
        u32 phy_id = 0;
-       s32 ret_val;
+       s32 ret_val = 0;
        u16 retry_count;
        u32 mac_reg = 0;
 
@@ -217,11 +218,13 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
        /* In case the PHY needs to be in mdio slow mode,
         * set slow mode and try to get the PHY id again.
         */
-       hw->phy.ops.release(hw);
-       ret_val = e1000_set_mdio_slow_mode_hv(hw);
-       if (!ret_val)
-               ret_val = e1000e_get_phy_id(hw);
-       hw->phy.ops.acquire(hw);
+       if (hw->mac.type < e1000_pch_lpt) {
+               hw->phy.ops.release(hw);
+               ret_val = e1000_set_mdio_slow_mode_hv(hw);
+               if (!ret_val)
+                       ret_val = e1000e_get_phy_id(hw);
+               hw->phy.ops.acquire(hw);
+       }
 
        if (ret_val)
                return false;
@@ -702,6 +705,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
                mac->ops.rar_set = e1000_rar_set_pch_lpt;
                mac->ops.setup_physical_interface =
                    e1000_setup_copper_link_pch_lpt;
+               mac->ops.rar_get_count = e1000_rar_get_count_pch_lpt;
        }
 
        /* Enable PCS Lock-loss workaround for ICH8 */
@@ -842,6 +846,17 @@ s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
                }
        }
 
+       if (hw->phy.type == e1000_phy_82579) {
+               ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
+                                                   &data);
+               if (ret_val)
+                       goto release;
+
+               data &= ~I82579_LPI_100_PLL_SHUT;
+               ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
+                                                    data);
+       }
+
        /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
        ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
        if (ret_val)
@@ -1314,14 +1329,18 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
                        return ret_val;
        }
 
-       /* When connected at 10Mbps half-duplex, 82579 parts are excessively
+       /* When connected at 10Mbps half-duplex, some parts are excessively
         * aggressive resulting in many collisions. To avoid this, increase
         * the IPG and reduce Rx latency in the PHY.
         */
-       if ((hw->mac.type == e1000_pch2lan) && link) {
+       if (((hw->mac.type == e1000_pch2lan) ||
+            (hw->mac.type == e1000_pch_lpt)) && link) {
                u32 reg;
+
                reg = er32(STATUS);
                if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
+                       u16 emi_addr;
+
                        reg = er32(TIPG);
                        reg &= ~E1000_TIPG_IPGT_MASK;
                        reg |= 0xFF;
@@ -1332,8 +1351,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
                        if (ret_val)
                                return ret_val;
 
-                       ret_val =
-                           e1000_write_emi_reg_locked(hw, I82579_RX_CONFIG, 0);
+                       if (hw->mac.type == e1000_pch2lan)
+                               emi_addr = I82579_RX_CONFIG;
+                       else
+                               emi_addr = I217_RX_CONFIG;
+
+                       ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0);
 
                        hw->phy.ops.release(hw);
 
@@ -1614,9 +1637,9 @@ static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
        u32 fwsm;
 
        fwsm = er32(FWSM);
-       return ((fwsm & E1000_ICH_FWSM_FW_VALID) &&
+       return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
                ((fwsm & E1000_FWSM_MODE_MASK) ==
-                (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)));
+                (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
 }
 
 /**
@@ -1647,7 +1670,7 @@ static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
  *  Use SHRA[0-3] in place of those reserved for ME.
  **/
-static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
+static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
 {
        u32 rar_low, rar_high;
 
@@ -1669,7 +1692,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
                e1e_flush();
                ew32(RAH(index), rar_high);
                e1e_flush();
-               return;
+               return 0;
        }
 
        /* RAR[1-6] are owned by manageability.  Skip those and program the
@@ -1692,7 +1715,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
                /* verify the register updates */
                if ((er32(SHRAL(index - 1)) == rar_low) &&
                    (er32(SHRAH(index - 1)) == rar_high))
-                       return;
+                       return 0;
 
                e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
                      (index - 1), er32(FWSM));
@@ -1700,6 +1723,43 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
 
 out:
        e_dbg("Failed to write receive address at index %d\n", index);
+       return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_rar_get_count_pch_lpt - Get the number of available SHRA
+ *  @hw: pointer to the HW structure
+ *
+ *  Get the number of available receive registers that the Host can
+ *  program. SHRA[0-10] are the shared receive address registers
+ *  that are shared between the Host and manageability engine (ME).
+ *  ME can reserve any number of addresses and the host needs to be
+ *  able to tell how many available registers it has access to.
+ **/
+static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw)
+{
+       u32 wlock_mac;
+       u32 num_entries;
+
+       wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
+       wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
+
+       switch (wlock_mac) {
+       case 0:
+               /* All SHRA[0..10] and RAR[0] available */
+               num_entries = hw->mac.rar_entry_count;
+               break;
+       case 1:
+               /* Only RAR[0] available */
+               num_entries = 1;
+               break;
+       default:
+               /* SHRA[0..(wlock_mac - 1)] available + RAR[0] */
+               num_entries = wlock_mac + 1;
+               break;
+       }
+
+       return num_entries;
 }
 
 /**
@@ -1713,7 +1773,7 @@ out:
  *  contain the MAC address. SHRA[0-10] are the shared receive address
  *  registers that are shared between the Host and manageability engine (ME).
  **/
-static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
+static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
 {
        u32 rar_low, rar_high;
        u32 wlock_mac;
@@ -1735,7 +1795,7 @@ static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
                e1e_flush();
                ew32(RAH(index), rar_high);
                e1e_flush();
-               return;
+               return 0;
        }
 
        /* The manageability engine (ME) can lock certain SHRAR registers that
@@ -1767,12 +1827,13 @@ static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
                        /* verify the register updates */
                        if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) &&
                            (er32(SHRAH_PCH_LPT(index - 1)) == rar_high))
-                               return;
+                               return 0;
                }
        }
 
 out:
        e_dbg("Failed to write receive address at index %d\n", index);
+       return -E1000_ERR_CONFIG;
 }
 
 /**
@@ -2493,51 +2554,44 @@ release:
  *  e1000_k1_gig_workaround_lv - K1 Si workaround
  *  @hw:   pointer to the HW structure
  *
- *  Workaround to set the K1 beacon duration for 82579 parts
+ *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
+ *  Disable K1 in 1000Mbps and 100Mbps
  **/
 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
 {
        s32 ret_val = 0;
        u16 status_reg = 0;
-       u32 mac_reg;
-       u16 phy_reg;
 
        if (hw->mac.type != e1000_pch2lan)
                return 0;
 
-       /* Set K1 beacon duration based on 1Gbps speed or otherwise */
+       /* Set K1 beacon duration based on 10Mbs speed */
        ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
        if (ret_val)
                return ret_val;
 
        if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
            == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
-               mac_reg = er32(FEXTNVM4);
-               mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
-
-               ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
-               if (ret_val)
-                       return ret_val;
-
-               if (status_reg & HV_M_STATUS_SPEED_1000) {
+               if (status_reg &
+                   (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
                        u16 pm_phy_reg;
 
-                       mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
-                       phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
-                       /* LV 1G Packet drop issue wa  */
+                       /* LV 1G/100 Packet drop issue wa  */
                        ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg);
                        if (ret_val)
                                return ret_val;
-                       pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA;
+                       pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
                        ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg);
                        if (ret_val)
                                return ret_val;
                } else {
+                       u32 mac_reg;
+
+                       mac_reg = er32(FEXTNVM4);
+                       mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
                        mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
-                       phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
+                       ew32(FEXTNVM4, mac_reg);
                }
-               ew32(FEXTNVM4, mac_reg);
-               ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
        }
 
        return ret_val;
@@ -4963,6 +5017,7 @@ static const struct e1000_mac_operations ich8_mac_ops = {
        /* id_led_init dependent on mac type */
        .config_collision_dist  = e1000e_config_collision_dist_generic,
        .rar_set                = e1000e_rar_set_generic,
+       .rar_get_count          = e1000e_rar_get_count_generic,
 };
 
 static const struct e1000_phy_operations ich8_phy_ops = {
index bead50f9187b527291596da67351339b64482707..5515126c81c199b5e44bd909ac9ea3f336cf4fcd 100644 (file)
 #define I82577_MSE_THRESHOLD   0x0887  /* 82577 Mean Square Error Threshold */
 #define I82579_MSE_LINK_DOWN   0x2411  /* MSE count before dropping link */
 #define I82579_RX_CONFIG               0x3412  /* Receive configuration */
+#define I82579_LPI_PLL_SHUT            0x4412  /* LPI PLL Shut Enable */
 #define I82579_EEE_PCS_STATUS          0x182E  /* IEEE MMD Register 3.1 >> 8 */
 #define I82579_EEE_CAPABILITY          0x0410  /* IEEE MMD Register 3.20 */
 #define I82579_EEE_ADVERTISEMENT       0x040E  /* IEEE MMD Register 7.60 */
 #define I82579_EEE_LP_ABILITY          0x040F  /* IEEE MMD Register 7.61 */
 #define I82579_EEE_100_SUPPORTED       (1 << 1)        /* 100BaseTx EEE */
 #define I82579_EEE_1000_SUPPORTED      (1 << 2)        /* 1000BaseTx EEE */
+#define I82579_LPI_100_PLL_SHUT        (1 << 2)        /* 100M LPI PLL Shut Enabled */
 #define I217_EEE_PCS_STATUS    0x9401  /* IEEE MMD Register 3.1 */
 #define I217_EEE_CAPABILITY    0x8000  /* IEEE MMD Register 3.20 */
 #define I217_EEE_ADVERTISEMENT 0x8001  /* IEEE MMD Register 7.60 */
 #define I217_EEE_LP_ABILITY    0x8002  /* IEEE MMD Register 7.61 */
+#define I217_RX_CONFIG         0xB20C  /* Receive configuration */
 
 #define E1000_EEE_RX_LPI_RCVD  0x0400  /* Tx LP idle received */
 #define E1000_EEE_TX_LPI_RCVD  0x0800  /* Rx LP idle received */
index baa0a466d1d05ca533999b9ffcac3846a476ab5f..8c386f3a15ebb6649e347f5316555827b151819b 100644 (file)
@@ -211,6 +211,11 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
        return 0;
 }
 
+u32 e1000e_rar_get_count_generic(struct e1000_hw *hw)
+{
+       return hw->mac.rar_entry_count;
+}
+
 /**
  *  e1000e_rar_set_generic - Set receive address register
  *  @hw: pointer to the HW structure
@@ -220,7 +225,7 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
  *  Sets the receive address array register at index to the address passed
  *  in by addr.
  **/
-void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
+int e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
 {
        u32 rar_low, rar_high;
 
@@ -244,6 +249,8 @@ void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
        e1e_flush();
        ew32(RAH(index), rar_high);
        e1e_flush();
+
+       return 0;
 }
 
 /**
index 4e81c2825b7a1ca60b9a870ce61737e29d4e7972..0513d90cdeeaa980b3fb226eaac6feebe481c4f3 100644 (file)
@@ -61,7 +61,8 @@ void e1000e_update_adaptive(struct e1000_hw *hw);
 void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
 
 void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
-void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
+u32 e1000e_rar_get_count_generic(struct e1000_hw *hw);
+int e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
 void e1000e_config_collision_dist_generic(struct e1000_hw *hw);
 
 #endif
index d50c91e5052808b9485a44538e4b0821b9b30e88..201cc93f36256d5776e882399d3afdf594b81192 100644 (file)
@@ -123,6 +123,36 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
        {0, NULL}
 };
 
+/**
+ * __ew32_prepare - prepare to write to MAC CSR register on certain parts
+ * @hw: pointer to the HW structure
+ *
+ * When updating the MAC CSR registers, the Manageability Engine (ME) could
+ * be accessing the registers at the same time.  Normally, this is handled in
+ * h/w by an arbiter but on some parts there is a bug that acknowledges Host
+ * accesses later than it should which could result in the register to have
+ * an incorrect value.  Workaround this by checking the FWSM register which
+ * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
+ * and try again a number of times.
+ **/
+s32 __ew32_prepare(struct e1000_hw *hw)
+{
+       s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
+
+       while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
+               udelay(50);
+
+       return i;
+}
+
+void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
+{
+       if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+               __ew32_prepare(hw);
+
+       writel(val, hw->hw_addr + reg);
+}
+
 /**
  * e1000_regdump - register printout routine
  * @hw: pointer to the HW structure
@@ -599,6 +629,7 @@ static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
 
        if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) {
                u32 rctl = er32(RCTL);
+
                ew32(RCTL, rctl & ~E1000_RCTL_EN);
                e_err("ME firmware caused invalid RDT - resetting\n");
                schedule_work(&adapter->reset_task);
@@ -615,6 +646,7 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
 
        if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
                u32 tctl = er32(TCTL);
+
                ew32(TCTL, tctl & ~E1000_TCTL_EN);
                e_err("ME firmware caused invalid TDT - resetting\n");
                schedule_work(&adapter->reset_task);
@@ -1165,7 +1197,7 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
                dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
                adapter->tx_hwtstamp_skb = NULL;
                adapter->tx_hwtstamp_timeouts++;
-               e_warn("clearing Tx timestamp hang");
+               e_warn("clearing Tx timestamp hang\n");
        } else {
                /* reschedule to check later */
                schedule_work(&adapter->tx_hwtstamp_work);
@@ -1198,6 +1230,7 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
        while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
               (count < tx_ring->count)) {
                bool cleaned = false;
+
                rmb();          /* read buffer_info after eop_desc */
                for (; !cleaned; count++) {
                        tx_desc = E1000_TX_DESC(*tx_ring, i);
@@ -1753,6 +1786,7 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
                    adapter->flags & FLAG_RX_NEEDS_RESTART) {
                        /* disable receives */
                        u32 rctl = er32(RCTL);
+
                        ew32(RCTL, rctl & ~E1000_RCTL_EN);
                        adapter->flags |= FLAG_RESTART_NOW;
                }
@@ -1960,6 +1994,7 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
        /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
        if (hw->mac.type == e1000_82574) {
                u32 rfctl = er32(RFCTL);
+
                rfctl |= E1000_RFCTL_ACK_DIS;
                ew32(RFCTL, rfctl);
        }
@@ -2204,6 +2239,7 @@ static void e1000_irq_disable(struct e1000_adapter *adapter)
 
        if (adapter->msix_entries) {
                int i;
+
                for (i = 0; i < adapter->num_vectors; i++)
                        synchronize_irq(adapter->msix_entries[i].vector);
        } else {
@@ -2921,6 +2957,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
 
        if (adapter->flags2 & FLAG2_DMA_BURST) {
                u32 txdctl = er32(TXDCTL(0));
+
                txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
                            E1000_TXDCTL_WTHRESH);
                /* set up some performance related parameters to encourage the
@@ -3239,6 +3276,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
 
                if (adapter->flags & FLAG_IS_ICH) {
                        u32 rxdctl = er32(RXDCTL(0));
+
                        ew32(RXDCTL(0), rxdctl | 0x3);
                }
 
@@ -3303,9 +3341,11 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
-       unsigned int rar_entries = hw->mac.rar_entry_count;
+       unsigned int rar_entries;
        int count = 0;
 
+       rar_entries = hw->mac.ops.rar_get_count(hw);
+
        /* save a rar entry for our hardware address */
        rar_entries--;
 
@@ -3324,9 +3364,13 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev)
                 * combining
                 */
                netdev_for_each_uc_addr(ha, netdev) {
+                       int rval;
+
                        if (!rar_entries)
                                break;
-                       hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
+                       rval = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
+                       if (rval < 0)
+                               return -ENOMEM;
                        count++;
                }
        }
@@ -4085,12 +4129,37 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
        struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
                                                     cc);
        struct e1000_hw *hw = &adapter->hw;
-       cycle_t systim;
+       cycle_t systim, systim_next;
 
        /* latch SYSTIMH on read of SYSTIML */
        systim = (cycle_t)er32(SYSTIML);
        systim |= (cycle_t)er32(SYSTIMH) << 32;
 
+       if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
+               u64 incvalue, time_delta, rem, temp;
+               int i;
+
+               /* errata for 82574/82583 possible bad bits read from SYSTIMH/L
+                * check to see that the time is incrementing at a reasonable
+                * rate and is a multiple of incvalue
+                */
+               incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
+               for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
+                       /* latch SYSTIMH on read of SYSTIML */
+                       systim_next = (cycle_t)er32(SYSTIML);
+                       systim_next |= (cycle_t)er32(SYSTIMH) << 32;
+
+                       time_delta = systim_next - systim;
+                       temp = time_delta;
+                       rem = do_div(temp, incvalue);
+
+                       systim = systim_next;
+
+                       if ((time_delta < E1000_82574_SYSTIM_EPSILON) &&
+                           (rem == 0))
+                               break;
+               }
+       }
        return systim;
 }
 
@@ -4491,7 +4560,7 @@ static void e1000e_update_phy_task(struct work_struct *work)
        e1000_get_phy_info(hw);
 
        /* Enable EEE on 82579 after link up */
-       if (hw->phy.type == e1000_phy_82579)
+       if (hw->phy.type >= e1000_phy_82579)
                e1000_set_eee_pchlan(hw);
 }
 
@@ -4695,6 +4764,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
        /* Correctable ECC Errors */
        if (hw->mac.type == e1000_pch_lpt) {
                u32 pbeccsts = er32(PBECCSTS);
+
                adapter->corr_errors +=
                    pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
                adapter->uncorr_errors +=
@@ -4808,6 +4878,7 @@ static void e1000e_enable_receives(struct e1000_adapter *adapter)
            (adapter->flags & FLAG_RESTART_NOW)) {
                struct e1000_hw *hw = &adapter->hw;
                u32 rctl = er32(RCTL);
+
                ew32(RCTL, rctl | E1000_RCTL_EN);
                adapter->flags &= ~FLAG_RESTART_NOW;
        }
@@ -4930,6 +5001,7 @@ static void e1000_watchdog_task(struct work_struct *work)
                        if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
                            !txb2b) {
                                u32 tarc0;
+
                                tarc0 = er32(TARC(0));
                                tarc0 &= ~SPEED_MODE_BIT;
                                ew32(TARC(0), tarc0);
@@ -5170,7 +5242,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
        __be16 protocol;
 
        if (skb->ip_summed != CHECKSUM_PARTIAL)
-               return 0;
+               return false;
 
        if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
                protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
@@ -5215,7 +5287,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
                i = 0;
        tx_ring->next_to_use = i;
 
-       return 1;
+       return true;
 }
 
 static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
@@ -5687,7 +5759,7 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
-       int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+       int max_frame = new_mtu + VLAN_HLEN + ETH_HLEN + ETH_FCS_LEN;
 
        /* Jumbo frame support */
        if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
@@ -6209,6 +6281,7 @@ static int __e1000_resume(struct pci_dev *pdev)
                e1e_wphy(&adapter->hw, BM_WUS, ~0);
        } else {
                u32 wus = er32(WUS);
+
                if (wus) {
                        e_info("MAC Wakeup cause - %s\n",
                               wus & E1000_WUS_EX ? "Unicast Packet" :
@@ -6235,6 +6308,7 @@ static int __e1000_resume(struct pci_dev *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int e1000e_pm_thaw(struct device *dev)
 {
        struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
@@ -6255,7 +6329,6 @@ static int e1000e_pm_thaw(struct device *dev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
 static int e1000e_pm_suspend(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
@@ -7027,7 +7100,7 @@ static const struct pci_error_handlers e1000_err_handler = {
        .resume = e1000_io_resume,
 };
 
-static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
+static const struct pci_device_id e1000_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
@@ -7144,6 +7217,7 @@ static struct pci_driver e1000_driver = {
 static int __init e1000_init_module(void)
 {
        int ret;
+
        pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
                e1000e_driver_version);
        pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n");
index a9a976f04bffe957e22b0852bd8918c6d6f63bd0..b1f212b7baf7e71f0b2a3a160463898019778a00 100644 (file)
@@ -398,6 +398,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
                /* Loop to allow for up to whole page write of eeprom */
                while (widx < words) {
                        u16 word_out = data[widx];
+
                        word_out = (word_out >> 8) | (word_out << 8);
                        e1000_shift_out_eec_bits(hw, word_out, 16);
                        widx++;
index d0ac0f3249c886415d308c4a0cd376feda3d44db..aa1923f7ebdd2e56dd0ebd436feb69164426ad7c 100644 (file)
@@ -436,6 +436,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
 
                if (num_IntMode > bd) {
                        unsigned int int_mode = IntMode[bd];
+
                        e1000_validate_option(&int_mode, &opt, adapter);
                        adapter->int_mode = int_mode;
                } else {
@@ -457,6 +458,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
 
                if (num_SmartPowerDownEnable > bd) {
                        unsigned int spd = SmartPowerDownEnable[bd];
+
                        e1000_validate_option(&spd, &opt, adapter);
                        if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && spd)
                                adapter->flags |= FLAG_SMART_POWER_DOWN;
@@ -473,6 +475,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
 
                if (num_CrcStripping > bd) {
                        unsigned int crc_stripping = CrcStripping[bd];
+
                        e1000_validate_option(&crc_stripping, &opt, adapter);
                        if (crc_stripping == OPTION_ENABLED) {
                                adapter->flags2 |= FLAG2_CRC_STRIPPING;
@@ -495,6 +498,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
 
                if (num_KumeranLockLoss > bd) {
                        unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
+
                        e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
                        enabled = kmrn_lock_loss;
                }
index 00b3fc98bf309bf3d371a984f0da7245caf07013..b2005e13fb01583a10f58aa37339feb2336191bd 100644 (file)
@@ -2896,6 +2896,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
                    (hw->phy.addr == 2) &&
                    !(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) {
                        u16 data2 = 0x7EFF;
+
                        ret_val = e1000_access_phy_debug_regs_hv(hw,
                                                                 (1 << 6) | 0x3,
                                                                 &data2, false);
index 3841bccf058c7aa0fe3b2f90e2c70c38e9b6f209..537d2780b408b3cdc9e0fc546d45c04444cc86e2 100644 (file)
@@ -164,6 +164,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
 #define HV_M_STATUS_AUTONEG_COMPLETE   0x1000
 #define HV_M_STATUS_SPEED_MASK         0x0300
 #define HV_M_STATUS_SPEED_1000         0x0200
+#define HV_M_STATUS_SPEED_100          0x0100
 #define HV_M_STATUS_LINK_UP            0x0040
 
 #define IGP01E1000_PHY_PCS_INIT_REG    0x00B4
index beb7b4393a6c26fc46c917a798a6fd7bfaee28ea..581898f23223e2ddcde4ddf0226025276caeea91 100644 (file)
 #define STRINGIFY(foo)  #foo
 #define XSTRINGIFY(bar) STRINGIFY(bar)
 
-#ifndef ARCH_HAS_PREFETCH
-#define prefetch(X)
-#endif
-
 #define I40E_RX_DESC(R, i)                     \
        ((ring_is_16byte_desc_enabled(R))       \
                ? (union i40e_32byte_rx_desc *) \
@@ -329,9 +325,7 @@ struct i40e_pf {
        struct ptp_clock *ptp_clock;
        struct ptp_clock_info ptp_caps;
        struct sk_buff *ptp_tx_skb;
-       struct work_struct ptp_tx_work;
        struct hwtstamp_config tstamp_config;
-       unsigned long ptp_tx_start;
        unsigned long last_rx_ptp_check;
        spinlock_t tmreg_lock; /* Used to protect the device time registers. */
        u64 ptp_base_adj;
@@ -540,6 +534,15 @@ static inline bool i40e_rx_is_programming_status(u64 qw)
                (qw >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT);
 }
 
+/**
+ * i40e_get_fd_cnt_all - get the total FD filter space available
+ * @pf: pointer to the pf struct
+ **/
+static inline int i40e_get_fd_cnt_all(struct i40e_pf *pf)
+{
+       return pf->hw.fdir_shared_filter_count + pf->fdir_pf_filter_count;
+}
+
 /* needed by i40e_ethtool.c */
 int i40e_up(struct i40e_vsi *vsi);
 void i40e_down(struct i40e_vsi *vsi);
index 34415d342ece381a97755b051f68945b254303b8..ba2811be8be42a037787f86b0583a5acb20892bf 100644 (file)
@@ -587,8 +587,7 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
        i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
        hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
 
-       if (hw->aq.api_maj_ver != I40E_FW_API_VERSION_MAJOR ||
-           hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) {
+       if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
                ret_code = I40E_ERR_FIRMWARE_API_VERSION;
                goto init_adminq_free_arq;
        }
index f8dfb4b7e99cd6b05caa72a6da868dd887a97a64..2329e2ff2deb25b68cd121ae365e30059a8d4c14 100644 (file)
@@ -133,7 +133,11 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
  **/
 bool i40e_check_asq_alive(struct i40e_hw *hw)
 {
-       return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK);
+       if (hw->aq.asq.len)
+               return !!(rd32(hw, hw->aq.asq.len) &
+                         I40E_PF_ATQLEN_ATQENABLE_MASK);
+       else
+               return false;
 }
 
 /**
@@ -789,6 +793,9 @@ void i40e_clear_pxe_mode(struct i40e_hw *hw)
 {
        u32 reg;
 
+       if (i40e_check_asq_alive(hw))
+               i40e_aq_clear_pxe_mode(hw, NULL);
+
        /* Clear single descriptor fetch/write-back mode */
        reg = rd32(hw, I40E_GLLAN_RCTL_0);
 
@@ -906,6 +913,33 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
 
 /* Admin command wrappers */
 
+/**
+ * i40e_aq_clear_pxe_mode
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Tell the firmware that the driver is taking over from PXE
+ **/
+i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       i40e_status status;
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_clear_pxe *cmd =
+               (struct i40e_aqc_clear_pxe *)&desc.params.raw;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_clear_pxe_mode);
+
+       cmd->rx_cnt = 0x2;
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       wr32(hw, I40E_GLLAN_RCTL_0, 0x1);
+
+       return status;
+}
+
 /**
  * i40e_aq_set_link_restart_an
  * @hw: pointer to the hw struct
@@ -975,6 +1009,13 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
        hw_link_info->an_info = resp->an_info;
        hw_link_info->ext_info = resp->ext_info;
        hw_link_info->loopback = resp->loopback;
+       hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
+       hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
+
+       if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
+               hw_link_info->crc_enable = true;
+       else
+               hw_link_info->crc_enable = false;
 
        if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_ENABLE))
                hw_link_info->lse_enable = true;
@@ -1300,6 +1341,7 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
        struct i40e_aqc_driver_version *cmd =
                (struct i40e_aqc_driver_version *)&desc.params.raw;
        i40e_status status;
+       u16 len;
 
        if (dv == NULL)
                return I40E_ERR_PARAM;
@@ -1311,7 +1353,14 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
        cmd->driver_minor_ver = dv->minor_version;
        cmd->driver_build_ver = dv->build_version;
        cmd->driver_subbuild_ver = dv->subbuild_version;
-       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       len = 0;
+       while (len < sizeof(dv->driver_string) &&
+              (dv->driver_string[len] < 0x80) &&
+              dv->driver_string[len])
+               len++;
+       status = i40e_asq_send_command(hw, &desc, dv->driver_string,
+                                      len, cmd_details);
 
        return status;
 }
@@ -1900,6 +1949,12 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
                }
        }
 
+       /* Software override ensuring FCoE is disabled if npar or mfp
+        * mode because it is not supported in these modes.
+        */
+       if (p->npar_enable || p->mfp_mode_1)
+               p->fcoe = false;
+
        /* additional HW specific goodies that might
         * someday be HW version specific
         */
@@ -2094,8 +2149,8 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
  * @cmd_details: pointer to command details structure or NULL
  **/
 i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
-                               u16 udp_port, u8 header_len,
-                               u8 protocol_index, u8 *filter_index,
+                               u16 udp_port, u8 protocol_index,
+                               u8 *filter_index,
                                struct i40e_asq_cmd_details *cmd_details)
 {
        struct i40e_aq_desc desc;
index 3c37386fd138fdeb3941170e5789687d90385fd5..1aaec400b28ecb3120b7236a516b13fb59f160b9 100644 (file)
@@ -1744,10 +1744,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false);
        } else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) {
                i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true);
-       } else if (strncmp(cmd_buf, "fd-sb off", 9) == 0) {
-               i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_SB_ENABLED, false);
-       } else if (strncmp(cmd_buf, "fd-sb on", 8) == 0) {
-               i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_SB_ENABLED, true);
        } else if (strncmp(cmd_buf, "lldp", 4) == 0) {
                if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
                        int ret;
@@ -1967,8 +1963,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                dev_info(&pf->pdev->dev, "  rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
                dev_info(&pf->pdev->dev, "  fd-atr off\n");
                dev_info(&pf->pdev->dev, "  fd-atr on\n");
-               dev_info(&pf->pdev->dev, "  fd-sb off\n");
-               dev_info(&pf->pdev->dev, "  fd-sb on\n");
                dev_info(&pf->pdev->dev, "  lldp start\n");
                dev_info(&pf->pdev->dev, "  lldp stop\n");
                dev_info(&pf->pdev->dev, "  lldp get local\n");
index 0cf47c958081ad6996c61e55a77be7a45f60609d..354181d1761226136b15f4a41080127c4b8f4494 100644 (file)
@@ -112,7 +112,6 @@ static struct i40e_stats i40e_gstrings_stats[] = {
        I40E_PF_STAT("rx_oversize", stats.rx_oversize),
        I40E_PF_STAT("rx_jabber", stats.rx_jabber),
        I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
-       I40E_PF_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
        I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
        /* LPI stats */
        I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status),
@@ -122,8 +121,9 @@ static struct i40e_stats i40e_gstrings_stats[] = {
 };
 
 #define I40E_QUEUE_STATS_LEN(n) \
-  ((((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs + \
-    ((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs) * 2)
+       (((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \
+           * 2 /* Tx and Rx together */                                     \
+           * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
 #define I40E_GLOBAL_STATS_LEN  ARRAY_SIZE(i40e_gstrings_stats)
 #define I40E_NETDEV_STATS_LEN   ARRAY_SIZE(i40e_gstrings_net_stats)
 #define I40E_VSI_STATS_LEN(n)   (I40E_NETDEV_STATS_LEN + \
@@ -633,6 +633,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
                                   struct ethtool_stats *stats, u64 *data)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_ring *tx_ring, *rx_ring;
        struct i40e_vsi *vsi = np->vsi;
        struct i40e_pf *pf = vsi->back;
        int i = 0;
@@ -650,8 +651,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
        }
        rcu_read_lock();
        for (j = 0; j < vsi->num_queue_pairs; j++) {
-               struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
-               struct i40e_ring *rx_ring;
+               tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
 
                if (!tx_ring)
                        continue;
@@ -1009,14 +1009,13 @@ static int i40e_get_coalesce(struct net_device *netdev,
        ec->rx_max_coalesced_frames_irq = vsi->work_limit;
 
        if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
-               ec->rx_coalesce_usecs = 1;
-       else
-               ec->rx_coalesce_usecs = vsi->rx_itr_setting;
+               ec->use_adaptive_rx_coalesce = 1;
 
        if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
-               ec->tx_coalesce_usecs = 1;
-       else
-               ec->tx_coalesce_usecs = vsi->tx_itr_setting;
+               ec->use_adaptive_tx_coalesce = 1;
+
+       ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
+       ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
 
        return 0;
 }
@@ -1035,37 +1034,27 @@ static int i40e_set_coalesce(struct net_device *netdev,
        if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
                vsi->work_limit = ec->tx_max_coalesced_frames_irq;
 
-       switch (ec->rx_coalesce_usecs) {
-       case 0:
-               vsi->rx_itr_setting = 0;
-               break;
-       case 1:
-               vsi->rx_itr_setting = (I40E_ITR_DYNAMIC |
-                                      ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
-               break;
-       default:
-               if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
-                   (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)))
-                       return -EINVAL;
+       if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
+           (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
                vsi->rx_itr_setting = ec->rx_coalesce_usecs;
-               break;
-       }
+       else
+               return -EINVAL;
 
-       switch (ec->tx_coalesce_usecs) {
-       case 0:
-               vsi->tx_itr_setting = 0;
-               break;
-       case 1:
-               vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
-                                      ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
-               break;
-       default:
-               if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
-                   (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)))
-                       return -EINVAL;
+       if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
+           (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
                vsi->tx_itr_setting = ec->tx_coalesce_usecs;
-               break;
-       }
+       else
+               return -EINVAL;
+
+       if (ec->use_adaptive_rx_coalesce)
+               vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
+       else
+               vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
+
+       if (ec->use_adaptive_tx_coalesce)
+               vsi->tx_itr_setting |= I40E_ITR_DYNAMIC;
+       else
+               vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
 
        vector = vsi->base_vector;
        for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
@@ -1142,8 +1131,7 @@ static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf,
        int cnt = 0;
 
        /* report total rule count */
-       cmd->data = pf->hw.fdir_shared_filter_count +
-                   pf->fdir_pf_filter_count;
+       cmd->data = i40e_get_fd_cnt_all(pf);
 
        hlist_for_each_entry_safe(rule, node2,
                                  &pf->fdir_filter_list, fdir_node) {
@@ -1177,10 +1165,6 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
        struct i40e_fdir_filter *rule = NULL;
        struct hlist_node *node2;
 
-       /* report total rule count */
-       cmd->data = pf->hw.fdir_shared_filter_count +
-                   pf->fdir_pf_filter_count;
-
        hlist_for_each_entry_safe(rule, node2,
                                  &pf->fdir_filter_list, fdir_node) {
                if (fsp->location <= rule->fd_id)
@@ -1231,6 +1215,8 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
                break;
        case ETHTOOL_GRXCLSRLCNT:
                cmd->rule_cnt = pf->fdir_pf_active_filters;
+               /* report total rule count */
+               cmd->data = i40e_get_fd_cnt_all(pf);
                ret = 0;
                break;
        case ETHTOOL_GRXCLSRULE:
@@ -1299,16 +1285,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
        case UDP_V4_FLOW:
                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
                case 0:
-                       hena &=
-                       ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
-                       ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
-                       ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+                       hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+                                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
                        break;
                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-                       hena |=
-                       (((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP)  |
-                       ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
-                       ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+                       hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+                                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
                        break;
                default:
                        return -EINVAL;
@@ -1317,16 +1299,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
        case UDP_V6_FLOW:
                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
                case 0:
-                       hena &=
-                       ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
-                       ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
-                       ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+                       hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+                                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
                        break;
                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-                       hena |=
-                       (((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP)  |
-                       ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
-                       ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+                       hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+                                ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
                        break;
                default:
                        return -EINVAL;
@@ -1700,5 +1678,5 @@ static const struct ethtool_ops i40e_ethtool_ops = {
 
 void i40e_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &i40e_ethtool_ops);
+       netdev->ethtool_ops = &i40e_ethtool_ops;
 }
index 0c69851eaecca9ac623715f2e158e822fdea368d..cef3db44301ea512ca68cc03d8c7a7fcac35d34d 100644 (file)
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
 
 #define DRV_VERSION_MAJOR 0
 #define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 43
+#define DRV_VERSION_BUILD 46
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
             __stringify(DRV_VERSION_MINOR) "." \
             __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -356,6 +356,7 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
                                             struct rtnl_link_stats64 *stats)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_ring *tx_ring, *rx_ring;
        struct i40e_vsi *vsi = np->vsi;
        struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
        int i;
@@ -368,7 +369,6 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
 
        rcu_read_lock();
        for (i = 0; i < vsi->num_queue_pairs; i++) {
-               struct i40e_ring *tx_ring, *rx_ring;
                u64 bytes, packets;
                unsigned int start;
 
@@ -2415,6 +2415,7 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
  **/
 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
 {
+       struct i40e_ring *tx_ring, *rx_ring;
        u16 qoffset, qcount;
        int i, n;
 
@@ -2428,8 +2429,8 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
                qoffset = vsi->tc_config.tc_info[n].qoffset;
                qcount = vsi->tc_config.tc_info[n].qcount;
                for (i = qoffset; i < (qoffset + qcount); i++) {
-                       struct i40e_ring *rx_ring = vsi->rx_rings[i];
-                       struct i40e_ring *tx_ring = vsi->tx_rings[i];
+                       rx_ring = vsi->rx_rings[i];
+                       tx_ring = vsi->tx_rings[i];
                        rx_ring->dcb_tc = n;
                        tx_ring->dcb_tc = n;
                }
@@ -2567,7 +2568,6 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
              I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
              I40E_PFINT_ICR0_ENA_GPIO_MASK          |
              I40E_PFINT_ICR0_ENA_TIMESYNC_MASK      |
-             I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK  |
              I40E_PFINT_ICR0_ENA_HMC_ERR_MASK       |
              I40E_PFINT_ICR0_ENA_VFLR_MASK          |
              I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
@@ -2899,12 +2899,9 @@ static irqreturn_t i40e_intr(int irq, void *data)
                u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
 
                if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
-                       ena_mask &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+                       icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
                        i40e_ptp_tx_hwtstamp(pf);
-                       prttsyn_stat &= ~I40E_PRTTSYN_STAT_0_TXTIME_MASK;
                }
-
-               wr32(hw, I40E_PRTTSYN_STAT_0, prttsyn_stat);
        }
 
        /* If a critical error is pending we have no choice but to reset the
@@ -4027,6 +4024,8 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
                                 pf->vsi[v]->seid);
                        /* Will try to configure as many components */
                } else {
+                       /* Re-configure VSI vectors based on updated TC map */
+                       i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
                        if (pf->vsi[v]->netdev)
                                i40e_dcbnl_set_all(pf->vsi[v]);
                }
@@ -4066,6 +4065,9 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
                                       DCB_CAP_DCBX_VER_IEEE;
                        pf->flags |= I40E_FLAG_DCB_ENABLED;
                }
+       } else {
+               dev_info(&pf->pdev->dev, "AQ Querying DCB configuration failed: %d\n",
+                        pf->hw.aq.asq_last_status);
        }
 
 out:
@@ -4267,6 +4269,14 @@ static int i40e_open(struct net_device *netdev)
        if (err)
                return err;
 
+       /* configure global TSO hardware offload settings */
+       wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
+                                                      TCP_FLAG_FIN) >> 16);
+       wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
+                                                      TCP_FLAG_FIN |
+                                                      TCP_FLAG_CWR) >> 16);
+       wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
+
 #ifdef CONFIG_I40E_VXLAN
        vxlan_get_rx_port(netdev);
 #endif
@@ -4323,7 +4333,7 @@ int i40e_vsi_open(struct i40e_vsi *vsi)
                         dev_driver_string(&pf->pdev->dev));
                err = i40e_vsi_request_irq(vsi, int_name);
        } else {
-               err = EINVAL;
+               err = -EINVAL;
                goto err_setup_rx;
        }
 
@@ -4701,8 +4711,7 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
            (pf->flags & I40E_FLAG_FD_SB_ENABLED))
                return;
        fcnt_prog = i40e_get_current_fd_count(pf);
-       fcnt_avail = pf->hw.fdir_shared_filter_count +
-                                              pf->fdir_pf_filter_count;
+       fcnt_avail = i40e_get_fd_cnt_all(pf);
        if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) {
                if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
                    (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
@@ -5358,8 +5367,10 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
         * because the reset will make them disappear.
         */
        ret = i40e_pf_reset(hw);
-       if (ret)
+       if (ret) {
                dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
+               goto end_core_reset;
+       }
        pf->pfr_count++;
 
        if (test_bit(__I40E_DOWN, &pf->state))
@@ -5595,7 +5606,6 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
  **/
 static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
 {
-       const int vxlan_hdr_qwords = 4;
        struct i40e_hw *hw = &pf->hw;
        i40e_status ret;
        u8 filter_index;
@@ -5613,7 +5623,6 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
                        port = pf->vxlan_ports[i];
                        ret = port ?
                              i40e_aq_add_udp_tunnel(hw, ntohs(port),
-                                                    vxlan_hdr_qwords,
                                                     I40E_AQC_TUNNEL_TYPE_VXLAN,
                                                     &filter_index, NULL)
                              : i40e_aq_del_udp_tunnel(hw, i, NULL);
@@ -5940,14 +5949,12 @@ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
  **/
 static int i40e_alloc_rings(struct i40e_vsi *vsi)
 {
+       struct i40e_ring *tx_ring, *rx_ring;
        struct i40e_pf *pf = vsi->back;
        int i;
 
        /* Set basic values in the rings to be used later during open() */
        for (i = 0; i < vsi->alloc_queue_pairs; i++) {
-               struct i40e_ring *tx_ring;
-               struct i40e_ring *rx_ring;
-
                /* allocate space for both Tx and Rx in one shot */
                tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
                if (!tx_ring)
@@ -6360,6 +6367,10 @@ static int i40e_sw_init(struct i40e_pf *pf)
                    I40E_FLAG_MSIX_ENABLED    |
                    I40E_FLAG_RX_1BUF_ENABLED;
 
+       /* Set default ITR */
+       pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
+       pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
+
        /* Depending on PF configurations, it is possible that the RSS
         * maximum might end up larger than the available queues
         */
@@ -6712,7 +6723,7 @@ static const struct net_device_ops i40e_netdev_ops = {
        .ndo_set_features       = i40e_set_features,
        .ndo_set_vf_mac         = i40e_ndo_set_vf_mac,
        .ndo_set_vf_vlan        = i40e_ndo_set_vf_port_vlan,
-       .ndo_set_vf_tx_rate     = i40e_ndo_set_vf_bw,
+       .ndo_set_vf_rate        = i40e_ndo_set_vf_bw,
        .ndo_get_vf_config      = i40e_ndo_get_vf_config,
        .ndo_set_vf_link_state  = i40e_ndo_set_vf_link_state,
 #ifdef CONFIG_I40E_VXLAN
@@ -6767,12 +6778,15 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
                           NETIF_F_HW_VLAN_CTAG_FILTER |
                           NETIF_F_IPV6_CSUM           |
                           NETIF_F_TSO                 |
+                          NETIF_F_TSO_ECN             |
                           NETIF_F_TSO6                |
                           NETIF_F_RXCSUM              |
-                          NETIF_F_NTUPLE              |
                           NETIF_F_RXHASH              |
                           0;
 
+       if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+               netdev->features |= NETIF_F_NTUPLE;
+
        /* copy netdev features into list of user selectable features */
        netdev->hw_features |= netdev->features;
 
@@ -6821,7 +6835,6 @@ static void i40e_vsi_delete(struct i40e_vsi *vsi)
                return;
 
        i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
-       return;
 }
 
 /**
@@ -7561,8 +7574,6 @@ void i40e_veb_release(struct i40e_veb *veb)
 
        i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
        i40e_veb_clear(veb);
-
-       return;
 }
 
 /**
@@ -8043,7 +8054,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
        }
 
        pf->queues_left = queues_left;
-       return;
 }
 
 /**
@@ -8289,7 +8299,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err) {
                dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err);
                pf->flags &= ~I40E_FLAG_DCB_ENABLED;
-               goto err_init_dcb;
+               /* Continue without DCB enabled */
        }
 #endif /* CONFIG_I40E_DCB */
 
@@ -8353,6 +8363,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                }
        }
 
+#ifdef CONFIG_PCI_IOV
        /* prep for VF support */
        if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
            (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
@@ -8375,6 +8386,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                                         err);
                }
        }
+#endif /* CONFIG_PCI_IOV */
 
        pfs_found++;
 
@@ -8385,6 +8397,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        dv.minor_version = DRV_VERSION_MINOR;
        dv.build_version = DRV_VERSION_BUILD;
        dv.subbuild_version = 0;
+       strncpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
        i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
 
        /* since everything's happy, start the service_task timer */
@@ -8426,9 +8439,6 @@ err_vsis:
 err_switch_setup:
        i40e_reset_interrupt_capability(pf);
        del_timer_sync(&pf->service_timer);
-#ifdef CONFIG_I40E_DCB
-err_init_dcb:
-#endif /* CONFIG_I40E_DCB */
 err_mac_addr:
 err_configure_lan_hmc:
        (void)i40e_shutdown_lan_hmc(hw);
index 262bdf11d221e5a30be53a2f57a09e1865b32e79..81299189a47d3e58b61e75dba58869a85e813f2e 100644 (file)
@@ -160,7 +160,7 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
                udelay(5);
        }
        if (ret_code == I40E_ERR_TIMEOUT)
-               hw_dbg(hw, "Done bit in GLNVM_SRCTL not set");
+               hw_dbg(hw, "Done bit in GLNVM_SRCTL not set\n");
        return ret_code;
 }
 
index 10652f615aec3ed0cfe6399cd075d01e4cee78f7..57172f98f9f800b5b3d3e9f316baa61f494082d3 100644 (file)
@@ -74,6 +74,8 @@ i40e_status i40e_aq_set_phy_reset(struct i40e_hw *hw,
                                struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
                                struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+                               struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
                                struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
@@ -157,8 +159,8 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
 i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
                                struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
-                               u16 udp_port, u8 header_len,
-                               u8 protocol_index, u8 *filter_index,
+                               u16 udp_port, u8 protocol_index,
+                               u8 *filter_index,
                                struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
                                struct i40e_asq_cmd_details *cmd_details);
index e33ec6c842b7035acd8189520a4f410c495270f1..101f439acda6adfd7e57084865f3bd1b16d06362 100644 (file)
@@ -48,7 +48,6 @@
                                        I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
 #define I40E_PRTTSYN_CTL1_TSYNTYPE_V2  (0x2 << \
                                        I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
-#define I40E_PTP_TX_TIMEOUT  (HZ * 15)
 
 /**
  * i40e_ptp_read - Read the PHC time from the device
@@ -216,40 +215,6 @@ static int i40e_ptp_settime(struct ptp_clock_info *ptp,
        return 0;
 }
 
-/**
- * i40e_ptp_tx_work
- * @work: pointer to work struct
- *
- * This work function polls the PRTTSYN_STAT_0.TXTIME bit to determine when a
- * Tx timestamp event has occurred, in order to pass the Tx timestamp value up
- * the stack in the skb.
- */
-static void i40e_ptp_tx_work(struct work_struct *work)
-{
-       struct i40e_pf *pf = container_of(work, struct i40e_pf,
-                                         ptp_tx_work);
-       struct i40e_hw *hw = &pf->hw;
-       u32 prttsyn_stat_0;
-
-       if (!pf->ptp_tx_skb)
-               return;
-
-       if (time_is_before_jiffies(pf->ptp_tx_start +
-                                  I40E_PTP_TX_TIMEOUT)) {
-               dev_kfree_skb_any(pf->ptp_tx_skb);
-               pf->ptp_tx_skb = NULL;
-               pf->tx_hwtstamp_timeouts++;
-               dev_warn(&pf->pdev->dev, "clearing Tx timestamp hang");
-               return;
-       }
-
-       prttsyn_stat_0 = rd32(hw, I40E_PRTTSYN_STAT_0);
-       if (prttsyn_stat_0 & I40E_PRTTSYN_STAT_0_TXTIME_MASK)
-               i40e_ptp_tx_hwtstamp(pf);
-       else
-               schedule_work(&pf->ptp_tx_work);
-}
-
 /**
  * i40e_ptp_enable - Enable/disable ancillary features of the PHC subsystem
  * @ptp: The PTP clock structure
@@ -321,7 +286,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
                pf->last_rx_ptp_check = jiffies;
                pf->rx_hwtstamp_cleared++;
                dev_warn(&vsi->back->pdev->dev,
-                        "%s: clearing Rx timestamp hang",
+                        "%s: clearing Rx timestamp hang\n",
                         __func__);
        }
 }
@@ -608,7 +573,6 @@ void i40e_ptp_init(struct i40e_pf *pf)
                u32 regval;
 
                spin_lock_init(&pf->tmreg_lock);
-               INIT_WORK(&pf->ptp_tx_work, i40e_ptp_tx_work);
 
                dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__,
                         netdev->name);
@@ -647,7 +611,6 @@ void i40e_ptp_stop(struct i40e_pf *pf)
        pf->ptp_tx = false;
        pf->ptp_rx = false;
 
-       cancel_work_sync(&pf->ptp_tx_work);
        if (pf->ptp_tx_skb) {
                dev_kfree_skb_any(pf->ptp_tx_skb);
                pf->ptp_tx_skb = NULL;
index 1d40f425acf1833657902dce6dd7206b5493e7eb..25c928615f55f5af04bbe9545cb0f9c53aa17fda 100644 (file)
 #define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT)
 #define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
 #define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT)
-#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
-#define I40E_PFINT_ICR0_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
 #define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
 #define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
 #define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
 #define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
 #define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
 #define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
-#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
-#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
 #define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
 #define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
 #define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
index 0f5d96ad281d2400cbe4fbc47e886f9832758e4b..5a2218762db13bd90e5cafafdf8623049ec2ad79 100644 (file)
@@ -24,6 +24,7 @@
  *
  ******************************************************************************/
 
+#include <linux/prefetch.h>
 #include "i40e.h"
 #include "i40e_prototype.h"
 
@@ -183,7 +184,6 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
        struct iphdr *ip;
        bool err = false;
        int ret;
-       int i;
        static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
                0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
@@ -199,21 +199,17 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
        ip->saddr = fd_data->src_ip[0];
        udp->source = fd_data->src_port;
 
-       for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP;
-            i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) {
-               fd_data->pctype = i;
-               ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
-
-               if (ret) {
-                       dev_info(&pf->pdev->dev,
-                                "Filter command send failed for PCTYPE %d (ret = %d)\n",
-                                fd_data->pctype, ret);
-                       err = true;
-               } else {
-                       dev_info(&pf->pdev->dev,
-                                "Filter OK for PCTYPE %d (ret = %d)\n",
-                                fd_data->pctype, ret);
-               }
+       fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+       ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+       if (ret) {
+               dev_info(&pf->pdev->dev,
+                        "Filter command send failed for PCTYPE %d (ret = %d)\n",
+                        fd_data->pctype, ret);
+               err = true;
+       } else {
+               dev_info(&pf->pdev->dev,
+                        "Filter OK for PCTYPE %d (ret = %d)\n",
+                        fd_data->pctype, ret);
        }
 
        return err ? -EOPNOTSUPP : 0;
@@ -262,7 +258,7 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
                }
        }
 
-       fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN;
+       fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
        ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
 
        if (ret) {
@@ -418,7 +414,7 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
                }
                break;
        default:
-               dev_info(&pf->pdev->dev, "Could not specify spec type %d",
+               dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
                         input->flow_type);
                ret = -EINVAL;
        }
@@ -455,9 +451,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
 
                /* filter programming failed most likely due to table full */
                fcnt_prog = i40e_get_current_fd_count(pf);
-               fcnt_avail = pf->hw.fdir_shared_filter_count +
-                                                      pf->fdir_pf_filter_count;
-
+               fcnt_avail = i40e_get_fd_cnt_all(pf);
                /* If ATR is running fcnt_prog can quickly change,
                 * if we are very close to full, it makes sense to disable
                 * FD ATR/SB and then re-enable it when there is room.
@@ -478,7 +472,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
                                pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
                        }
                } else {
-                       dev_info(&pdev->dev, "FD filter programming error");
+                       dev_info(&pdev->dev, "FD filter programming error\n");
                }
        } else if (error ==
                          (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
@@ -1713,9 +1707,11 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
                                I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
                if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
                        struct vlan_ethhdr *vhdr;
-                       if (skb_header_cloned(skb) &&
-                           pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
-                               return -ENOMEM;
+                       int rc;
+
+                       rc = skb_cow_head(skb, 0);
+                       if (rc < 0)
+                               return rc;
                        vhdr = (struct vlan_ethhdr *)skb->data;
                        vhdr->h_vlan_TCI = htons(tx_flags >>
                                                 I40E_TX_FLAGS_VLAN_SHIFT);
@@ -1743,20 +1739,18 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
                    u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
 {
        u32 cd_cmd, cd_tso_len, cd_mss;
+       struct ipv6hdr *ipv6h;
        struct tcphdr *tcph;
        struct iphdr *iph;
        u32 l4len;
        int err;
-       struct ipv6hdr *ipv6h;
 
        if (!skb_is_gso(skb))
                return 0;
 
-       if (skb_header_cloned(skb)) {
-               err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
-               if (err)
-                       return err;
-       }
+       err = skb_cow_head(skb, 0);
+       if (err < 0)
+               return err;
 
        if (protocol == htons(ETH_P_IP)) {
                iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
@@ -1825,9 +1819,6 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
        *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
                                I40E_TXD_CTX_QW1_CMD_SHIFT;
 
-       pf->ptp_tx_start = jiffies;
-       schedule_work(&pf->ptp_tx_work);
-
        return 1;
 }
 
index d5349698e513b511a8ac9e5e6c798d888cdce409..d5e3f5430284bd61bf27ab0bcec9d66cacc750b8 100644 (file)
@@ -27,7 +27,7 @@
 #ifndef _I40E_TXRX_H_
 #define _I40E_TXRX_H_
 
-/* Interrupt Throttling and Rate Limiting (storm control) Goodies */
+/* Interrupt Throttling and Rate Limiting Goodies */
 
 #define I40E_MAX_ITR               0x0FF0  /* reg uses 2 usec resolution */
 #define I40E_MIN_ITR               0x0004  /* reg uses 2 usec resolution */
@@ -69,16 +69,11 @@ enum i40e_dyn_idx_t {
 
 /* Supported RSS offloads */
 #define I40E_DEFAULT_RSS_HENA ( \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN) | \
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
        ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
index 71a968fe557f33e12f6feb81d416f4f5a55946fb..71f9718caf0a60d8821782f9c70ee842bb758e9c 100644 (file)
@@ -167,6 +167,9 @@ struct i40e_link_status {
        u8 loopback;
        /* is Link Status Event notification to SW enabled */
        bool lse_enable;
+       u16 max_frame_size;
+       bool crc_enable;
+       u8 pacing;
 };
 
 struct i40e_phy_info {
@@ -409,6 +412,7 @@ struct i40e_driver_version {
        u8 minor_version;
        u8 build_version;
        u8 subbuild_version;
+       u8 driver_string[32];
 };
 
 /* RX Descriptors */
@@ -862,18 +866,14 @@ struct i40e_filter_program_desc {
 
 /* Packet Classifier Types for filters */
 enum i40e_filter_pctype {
-       /* Note: Values 0-28 are reserved for future use */
-       I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP        = 29,
-       I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP      = 30,
+       /* Note: Values 0-30 are reserved for future use */
        I40E_FILTER_PCTYPE_NONF_IPV4_UDP                = 31,
-       I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN            = 32,
+       /* Note: Value 32 is reserved for future use */
        I40E_FILTER_PCTYPE_NONF_IPV4_TCP                = 33,
        I40E_FILTER_PCTYPE_NONF_IPV4_SCTP               = 34,
        I40E_FILTER_PCTYPE_NONF_IPV4_OTHER              = 35,
        I40E_FILTER_PCTYPE_FRAG_IPV4                    = 36,
-       /* Note: Values 37-38 are reserved for future use */
-       I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP        = 39,
-       I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP      = 40,
+       /* Note: Values 37-40 are reserved for future use */
        I40E_FILTER_PCTYPE_NONF_IPV6_UDP                = 41,
        I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN            = 42,
        I40E_FILTER_PCTYPE_NONF_IPV6_TCP                = 43,
@@ -1016,8 +1016,8 @@ struct i40e_hw_port_stats {
        u64 mac_short_packet_dropped;   /* mspdc */
        u64 checksum_error;             /* xec */
        /* EEE LPI */
-       bool tx_lpi_status;
-       bool rx_lpi_status;
+       u32 tx_lpi_status;
+       u32 rx_lpi_status;
        u64 tx_lpi_count;               /* etlpic */
        u64 rx_lpi_count;               /* erlpic */
 };
index 5421714df32488e220cce4e6e4a1082693a554d8..4e7634c83685adc38f14ad4168657f6128ae849c 100644 (file)
@@ -354,6 +354,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
        rx_ctx.tphhead_ena = 1;
        rx_ctx.lrxqthresh = 2;
        rx_ctx.crcstrip = 1;
+       rx_ctx.prefena = 1;
 
        /* clear the context in the HMC */
        ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
@@ -842,6 +843,10 @@ void i40e_free_vfs(struct i40e_pf *pf)
        kfree(pf->vf);
        pf->vf = NULL;
 
+       /* This check is for when the driver is unloaded while VFs are
+        * assigned. Setting the number of VFs to 0 through sysfs is caught
+        * before this function ever gets called.
+        */
        if (!i40e_vfs_are_assigned(pf)) {
                pci_disable_sriov(pf->pdev);
                /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
@@ -978,7 +983,12 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
        if (num_vfs)
                return i40e_pci_sriov_enable(pdev, num_vfs);
 
-       i40e_free_vfs(pf);
+       if (!i40e_vfs_are_assigned(pf)) {
+               i40e_free_vfs(pf);
+       } else {
+               dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
+               return -EINVAL;
+       }
        return 0;
 }
 
@@ -2049,10 +2059,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
        }
 
        /* delete the temporary mac address */
-       i40e_del_filter(vsi, vf->default_lan_addr.addr, 0, true, false);
+       i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id,
+                       true, false);
 
        /* add the new mac address */
-       f = i40e_add_filter(vsi, mac, 0, true, false);
+       f = i40e_add_filter(vsi, mac, vf->port_vlan_id, true, false);
        if (!f) {
                dev_err(&pf->pdev->dev,
                        "Unable to add VF ucast filter\n");
@@ -2128,11 +2139,15 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
 
        /* Check for condition where there was already a port VLAN ID
         * filter set and now it is being deleted by setting it to zero.
+        * Additionally check for the condition where there was a port
+        * VLAN but now there is a new and different port VLAN being set.
         * Before deleting all the old VLAN filters we must add new ones
         * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
         * MAC addresses deleted.
         */
-       if (!(vlan_id || qos) && vsi->info.pvid)
+       if ((!(vlan_id || qos) ||
+           (vlan_id | qos) != le16_to_cpu(vsi->info.pvid)) &&
+           vsi->info.pvid)
                ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY);
 
        if (vsi->info.pvid) {
@@ -2183,6 +2198,8 @@ error_pvid:
        return ret;
 }
 
+#define I40E_BW_CREDIT_DIVISOR 50     /* 50Mbps per BW credit */
+#define I40E_MAX_BW_INACTIVE_ACCUM 4  /* device can accumulate 4 credits max */
 /**
  * i40e_ndo_set_vf_bw
  * @netdev: network interface device structure
@@ -2191,7 +2208,8 @@ error_pvid:
  *
  * configure vf tx rate
  **/
-int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate)
+int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+                      int max_tx_rate)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_pf *pf = np->vsi->back;
@@ -2207,6 +2225,12 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate)
                goto error;
        }
 
+       if (min_tx_rate) {
+               dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for vf %d.\n",
+                       min_tx_rate, vf_id);
+               return -EINVAL;
+       }
+
        vf = &(pf->vf[vf_id]);
        vsi = pf->vsi[vf->lan_vsi_index];
        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
@@ -2229,23 +2253,29 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate)
                break;
        }
 
-       if (tx_rate > speed) {
-               dev_err(&pf->pdev->dev, "Invalid tx rate %d specified for vf %d.",
-                       tx_rate, vf->vf_id);
+       if (max_tx_rate > speed) {
+               dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for vf %d.",
+                       max_tx_rate, vf->vf_id);
                ret = -EINVAL;
                goto error;
        }
 
+       if ((max_tx_rate < 50) && (max_tx_rate > 0)) {
+               dev_warn(&pf->pdev->dev, "Setting max Tx rate to minimum usable value of 50Mbps.\n");
+               max_tx_rate = 50;
+       }
+
        /* Tx rate credits are in values of 50Mbps, 0 is disabled*/
-       ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, tx_rate / 50, 0,
-                                         NULL);
+       ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
+                                         max_tx_rate / I40E_BW_CREDIT_DIVISOR,
+                                         I40E_MAX_BW_INACTIVE_ACCUM, NULL);
        if (ret) {
-               dev_err(&pf->pdev->dev, "Unable to set tx rate, error code %d.\n",
+               dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n",
                        ret);
                ret = -EIO;
                goto error;
        }
-       vf->tx_rate = tx_rate;
+       vf->tx_rate = max_tx_rate;
 error:
        return ret;
 }
@@ -2287,10 +2317,18 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
 
        memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN);
 
-       ivi->tx_rate = vf->tx_rate;
+       ivi->max_tx_rate = vf->tx_rate;
+       ivi->min_tx_rate = 0;
        ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
        ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
                   I40E_VLAN_PRIORITY_SHIFT;
+       if (vf->link_forced == false)
+               ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+       else if (vf->link_up == true)
+               ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+       else
+               ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+
        ret = 0;
 
 error_param:
index ba3d1f8414beabdd1b6a0e61a524978f77831c42..5a559be4ba2ca926b92fc65dbbb1fb5ca0f727cf 100644 (file)
@@ -116,7 +116,8 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
                              int vf_id, u16 vlan_id, u8 qos);
-int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate);
+int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+                      int max_tx_rate);
 int i40e_ndo_get_vf_config(struct net_device *netdev,
                           int vf_id, struct ifla_vf_info *ivi);
 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
index e09be37a07a8384f41731cfd9e7a744c646e7216..3a423836a565294aa82dadbeb93e4d45619ccd86 100644 (file)
@@ -1,7 +1,7 @@
 ################################################################################
 #
 # Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
-# Copyright(c) 2013 Intel Corporation.
+# Copyright(c) 2013 - 2014 Intel Corporation.
 #
 # This program is free software; you can redistribute it and/or modify it
 # under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
 # FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 # more details.
 #
+# You should have received a copy of the GNU General Public License along
+# with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
 # The full GNU General Public License is included in this distribution in
 # the file called "COPYING".
 #
index c79df257f830d7791580b81ae68028d8ee18d65e..68b4aacd43f534b941cec1127f063eb5a2415996 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
index 7d24be528601c990e97307c7b0663cd7b66273cf..e3472c62e1554194740a9233aadedaa5f13077e3 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
index 6e617669c326256295e4c283c1781e6681ed2bdd..89d9209ff2bd20be69c3431f3ff6cb77308d8ca2 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -676,7 +679,6 @@ struct i40e_aqc_add_get_update_vsi {
 #define I40E_AQ_VSI_TYPE_PF             0x2
 #define I40E_AQ_VSI_TYPE_EMP_MNG        0x3
 #define I40E_AQ_VSI_FLAG_CASCADED_PV    0x4
-#define I40E_AQ_VSI_FLAG_CLOUD_VSI      0x8
        __le32 addr_high;
        __le32 addr_low;
 };
@@ -1038,7 +1040,9 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
 #define I40E_AQC_SET_VSI_PROMISC_VLAN        0x10
        __le16 seid;
 #define I40E_AQC_VSI_PROM_CMD_SEID_MASK      0x3FF
-       u8     reserved[10];
+       __le16 vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_VALID          0x8000
+       u8     reserved[8];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
@@ -1931,19 +1935,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
 /* Add Udp Tunnel command and completion (direct 0x0B00) */
 struct i40e_aqc_add_udp_tunnel {
        __le16 udp_port;
-       u8     header_len; /* in DWords, 1 to 15 */
+       u8     reserved0[3];
        u8     protocol_type;
-#define I40E_AQC_TUNNEL_TYPE_TEREDO    0x0
-#define I40E_AQC_TUNNEL_TYPE_VXLAN     0x2
-#define I40E_AQC_TUNNEL_TYPE_NGE       0x3
-       u8     variable_udp_length;
-#define I40E_AQC_TUNNEL_FIXED_UDP_LENGTH       0x0
-#define I40E_AQC_TUNNEL_VARIABLE_UDP_LENGTH    0x1
-       u8              udp_key_index;
-#define I40E_AQC_TUNNEL_KEY_INDEX_VXLAN                        0x0
-#define I40E_AQC_TUNNEL_KEY_INDEX_NGE                  0x1
-#define I40E_AQC_TUNNEL_KEY_INDEX_PROPRIETARY_UDP      0x2
-       u8              reserved[10];
+#define I40E_AQC_TUNNEL_TYPE_VXLAN     0x00
+#define I40E_AQC_TUNNEL_TYPE_NGE       0x01
+#define I40E_AQC_TUNNEL_TYPE_TEREDO    0x10
+       u8     reserved1[10];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
index d8654fb9e525a82881c88cf882d91c716577ec0f..8e6a6dd9212bb0812f42378ad208ccd451e43ee4 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
index ae084378faabff7669794cd0f47a9072f1698243..ea0f2001cc201fa5f6eeced0f72b585e3aa23132 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -130,7 +133,11 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
  **/
 bool i40evf_check_asq_alive(struct i40e_hw *hw)
 {
-       return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK);
+       if (hw->aq.asq.len)
+               return !!(rd32(hw, hw->aq.asq.len) &
+                         I40E_PF_ATQLEN_ATQENABLE_MASK);
+       else
+               return false;
 }
 
 /**
index cb97b3eed440ff764c1a9dbf705e45bc9e308b2f..9d906514fc3d3474392102ea702cd40a10f027cc 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
index 775fcb2463d7f4e0ffbef124ed717958a12ffbec..d6f762241537804be777b97ad5f29616f990058f 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
index 622f373b745d59092355131aa20f5f5c57d486c8..21a91b14bf819365bfea82276324b14da6ef403c 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
index 97ab8c2b76f8f0e6cf1c6c485eaeafee0e42a5a1..849edcc2e398f7bfdaaea5eaf078ba714977ca5f 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
index 30af953cf106a4afc6aa9e0939d24adce8d68418..7977205b1e049a9b0ed082ab9eedb556a9c9b2e3 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
 #define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT)
 #define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
 #define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT)
-#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
-#define I40E_PFINT_ICR0_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
 #define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
 #define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
 #define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
 #define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
 #define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
 #define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
-#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
-#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
 #define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
 #define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
 #define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
index 7c08cc2e339b89f05d79d37dc5e46ba9921cb17b..7fa7a41915c1acce92ce1b8d46cc22d7043f96c2 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
index b9f50f40abe18b78cf0c3ff580ded29666491d92..82d6844245b59a6db0e03b0cccc188dffad3cbf8 100644 (file)
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
index 10bf49e18d7f6c0ed8d1af47d5d30544c5a8951e..af639d8608a53b0282f46ada3d1fe6ed925c5e7b 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -24,7 +27,7 @@
 #ifndef _I40E_TXRX_H_
 #define _I40E_TXRX_H_
 
-/* Interrupt Throttling and Rate Limiting (storm control) Goodies */
+/* Interrupt Throttling and Rate Limiting Goodies */
 
 #define I40E_MAX_ITR               0x0FF0  /* reg uses 2 usec resolution */
 #define I40E_MIN_ITR               0x0004  /* reg uses 2 usec resolution */
@@ -66,16 +69,11 @@ enum i40e_dyn_idx_t {
 
 /* Supported RSS offloads */
 #define I40E_DEFAULT_RSS_HENA ( \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN) | \
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
        ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
index 4673b3381eddaa1672edca1f60b85e89d33ab247..67082f7bfcef35c71b8f7bb70400ca0a8772bddb 100644 (file)
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -101,15 +104,6 @@ enum i40e_debug_mask {
        I40E_DEBUG_ALL                  = 0xFFFFFFFF
 };
 
-/* PCI Bus Info */
-#define I40E_PCI_LINK_WIDTH_1          0x10
-#define I40E_PCI_LINK_WIDTH_2          0x20
-#define I40E_PCI_LINK_WIDTH_4          0x40
-#define I40E_PCI_LINK_WIDTH_8          0x80
-#define I40E_PCI_LINK_SPEED_2500       0x1
-#define I40E_PCI_LINK_SPEED_5000       0x2
-#define I40E_PCI_LINK_SPEED_8000       0x3
-
 /* These are structs for managing the hardware information and the operations.
  * The structures of function pointers are filled out at init time when we
  * know for sure exactly which hardware we're working with.  This gives us the
@@ -173,6 +167,9 @@ struct i40e_link_status {
        u8 loopback;
        /* is Link Status Event notification to SW enabled */
        bool lse_enable;
+       u16 max_frame_size;
+       bool crc_enable;
+       u8 pacing;
 };
 
 struct i40e_phy_info {
@@ -415,6 +412,7 @@ struct i40e_driver_version {
        u8 minor_version;
        u8 build_version;
        u8 subbuild_version;
+       u8 driver_string[32];
 };
 
 /* RX Descriptors */
@@ -868,18 +866,14 @@ struct i40e_filter_program_desc {
 
 /* Packet Classifier Types for filters */
 enum i40e_filter_pctype {
-       /* Note: Values 0-28 are reserved for future use */
-       I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP        = 29,
-       I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP      = 30,
+       /* Note: Values 0-30 are reserved for future use */
        I40E_FILTER_PCTYPE_NONF_IPV4_UDP                = 31,
-       I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN            = 32,
+       /* Note: Value 32 is reserved for future use */
        I40E_FILTER_PCTYPE_NONF_IPV4_TCP                = 33,
        I40E_FILTER_PCTYPE_NONF_IPV4_SCTP               = 34,
        I40E_FILTER_PCTYPE_NONF_IPV4_OTHER              = 35,
        I40E_FILTER_PCTYPE_FRAG_IPV4                    = 36,
-       /* Note: Values 37-38 are reserved for future use */
-       I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP        = 39,
-       I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP      = 40,
+       /* Note: Values 37-40 are reserved for future use */
        I40E_FILTER_PCTYPE_NONF_IPV6_UDP                = 41,
        I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN            = 42,
        I40E_FILTER_PCTYPE_NONF_IPV6_TCP                = 43,
@@ -1022,8 +1016,8 @@ struct i40e_hw_port_stats {
        u64 mac_short_packet_dropped;   /* mspdc */
        u64 checksum_error;             /* xec */
        /* EEE LPI */
-       bool tx_lpi_status;
-       bool rx_lpi_status;
+       u32 tx_lpi_status;
+       u32 rx_lpi_status;
        u64 tx_lpi_count;               /* etlpic */
        u64 rx_lpi_count;               /* erlpic */
 };
index ccf45d04b7ef88e74e99d5ad844ad10df8d9b4f8..1ef5b31ece909766dbc8ec3d5fee43aea5377893 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
index 807807d6238738c0e111739e9dc96d1f2200d77a..2913bc3332a1b1c23bf37d685b608ded39343c29 100644 (file)
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
index 8b0db1ce179c5447ce83240098e6c076d6782ed6..df4dcfd364d868608d8782978e3b8f621a902e89 100644 (file)
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -56,10 +59,12 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
 };
 
 #define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats)
-#define I40EVF_QUEUE_STATS_LEN \
+#define I40EVF_QUEUE_STATS_LEN(_dev) \
        (((struct i40evf_adapter *) \
-               netdev_priv(netdev))->vsi_res->num_queue_pairs * 4)
-#define I40EVF_STATS_LEN (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN)
+               netdev_priv(_dev))->vsi_res->num_queue_pairs \
+                 * 2 * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
+#define I40EVF_STATS_LEN(_dev) \
+       (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev))
 
 /**
  * i40evf_get_settings - Get Link Speed and Duplex settings
@@ -75,7 +80,7 @@ static int i40evf_get_settings(struct net_device *netdev,
        /* In the future the VF will be able to query the PF for
         * some information - for now use a dummy value
         */
-       ecmd->supported = SUPPORTED_10000baseT_Full;
+       ecmd->supported = 0;
        ecmd->autoneg = AUTONEG_DISABLE;
        ecmd->transceiver = XCVR_DUMMY1;
        ecmd->port = PORT_NONE;
@@ -94,9 +99,9 @@ static int i40evf_get_settings(struct net_device *netdev,
 static int i40evf_get_sset_count(struct net_device *netdev, int sset)
 {
        if (sset == ETH_SS_STATS)
-               return I40EVF_STATS_LEN;
+               return I40EVF_STATS_LEN(netdev);
        else
-               return -ENOTSUPP;
+               return -EINVAL;
 }
 
 /**
@@ -290,14 +295,13 @@ static int i40evf_get_coalesce(struct net_device *netdev,
        ec->rx_max_coalesced_frames = vsi->work_limit;
 
        if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
-               ec->rx_coalesce_usecs = 1;
-       else
-               ec->rx_coalesce_usecs = vsi->rx_itr_setting;
+               ec->use_adaptive_rx_coalesce = 1;
 
        if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
-               ec->tx_coalesce_usecs = 1;
-       else
-               ec->tx_coalesce_usecs = vsi->tx_itr_setting;
+               ec->use_adaptive_tx_coalesce = 1;
+
+       ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
+       ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
 
        return 0;
 }
@@ -318,54 +322,358 @@ static int i40evf_set_coalesce(struct net_device *netdev,
        struct i40e_q_vector *q_vector;
        int i;
 
-       if (ec->tx_max_coalesced_frames || ec->rx_max_coalesced_frames)
-               vsi->work_limit = ec->tx_max_coalesced_frames;
+       if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
+               vsi->work_limit = ec->tx_max_coalesced_frames_irq;
+
+       if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
+           (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
+               vsi->rx_itr_setting = ec->rx_coalesce_usecs;
+
+       else
+               return -EINVAL;
+
+       if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
+           (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
+               vsi->tx_itr_setting = ec->tx_coalesce_usecs;
+       else if (ec->use_adaptive_tx_coalesce)
+               vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
+                                      ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+       else
+               return -EINVAL;
+
+       if (ec->use_adaptive_rx_coalesce)
+               vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
+       else
+               vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
+
+       if (ec->use_adaptive_tx_coalesce)
+               vsi->tx_itr_setting |= I40E_ITR_DYNAMIC;
+       else
+               vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
+
+       for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) {
+               q_vector = adapter->q_vector[i];
+               q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
+               wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr);
+               q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
+               wr32(hw, I40E_VFINT_ITRN1(1, i), q_vector->tx.itr);
+               i40e_flush(hw);
+       }
+
+       return 0;
+}
+
+/**
+ * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
+ * @adapter: board private structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the flow is supported, else Invalid Input.
+ **/
+static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter,
+                                   struct ethtool_rxnfc *cmd)
+{
+       struct i40e_hw *hw = &adapter->hw;
+       u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
+                  ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
+
+       /* We always hash on IP src and dest addresses */
+       cmd->data = RXH_IP_SRC | RXH_IP_DST;
 
-       switch (ec->rx_coalesce_usecs) {
-       case 0:
-               vsi->rx_itr_setting = 0;
+       switch (cmd->flow_type) {
+       case TCP_V4_FLOW:
+               if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
+                       cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
                break;
-       case 1:
-               vsi->rx_itr_setting = (I40E_ITR_DYNAMIC
-                                      | ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+       case UDP_V4_FLOW:
+               if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
+                       cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
                break;
-       default:
-               if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
-                   (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)))
-                       return -EINVAL;
-               vsi->rx_itr_setting = ec->rx_coalesce_usecs;
+
+       case SCTP_V4_FLOW:
+       case AH_ESP_V4_FLOW:
+       case AH_V4_FLOW:
+       case ESP_V4_FLOW:
+       case IPV4_FLOW:
+               break;
+
+       case TCP_V6_FLOW:
+               if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
+                       cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               break;
+       case UDP_V6_FLOW:
+               if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
+                       cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               break;
+
+       case SCTP_V6_FLOW:
+       case AH_ESP_V6_FLOW:
+       case AH_V6_FLOW:
+       case ESP_V6_FLOW:
+       case IPV6_FLOW:
                break;
+       default:
+               cmd->data = 0;
+               return -EINVAL;
        }
 
-       switch (ec->tx_coalesce_usecs) {
-       case 0:
-               vsi->tx_itr_setting = 0;
+       return 0;
+}
+
+/**
+ * i40evf_get_rxnfc - command to get RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the command is supported.
+ **/
+static int i40evf_get_rxnfc(struct net_device *netdev,
+                           struct ethtool_rxnfc *cmd,
+                           u32 *rule_locs)
+{
+       struct i40evf_adapter *adapter = netdev_priv(netdev);
+       int ret = -EOPNOTSUPP;
+
+       switch (cmd->cmd) {
+       case ETHTOOL_GRXRINGS:
+               cmd->data = adapter->vsi_res->num_queue_pairs;
+               ret = 0;
                break;
-       case 1:
-               vsi->tx_itr_setting = (I40E_ITR_DYNAMIC
-                                      | ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
+       case ETHTOOL_GRXFH:
+               ret = i40evf_get_rss_hash_opts(adapter, cmd);
                break;
        default:
-               if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
-                   (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)))
+               break;
+       }
+
+       return ret;
+}
+
+/**
+ * i40evf_set_rss_hash_opt - Enable/Disable flow types for RSS hash
+ * @adapter: board private structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the flow input set is supported.
+ **/
+static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
+                                  struct ethtool_rxnfc *nfc)
+{
+       struct i40e_hw *hw = &adapter->hw;
+
+       u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
+                  ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
+
+       /* RSS does not support anything other than hashing
+        * to queues on src and dst IPs and ports
+        */
+       if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+                         RXH_L4_B_0_1 | RXH_L4_B_2_3))
+               return -EINVAL;
+
+       /* We need at least the IP SRC and DEST fields for hashing */
+       if (!(nfc->data & RXH_IP_SRC) ||
+           !(nfc->data & RXH_IP_DST))
+               return -EINVAL;
+
+       switch (nfc->flow_type) {
+       case TCP_V4_FLOW:
+               switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+               case 0:
+                       hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+                       break;
+               case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+                       hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+                       break;
+               default:
                        return -EINVAL;
-               vsi->tx_itr_setting = ec->tx_coalesce_usecs;
+               }
+               break;
+       case TCP_V6_FLOW:
+               switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+               case 0:
+                       hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+                       break;
+               case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+                       hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+                       break;
+               default:
+                       return -EINVAL;
+               }
                break;
+       case UDP_V4_FLOW:
+               switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+               case 0:
+                       hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+                                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+                       break;
+               case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+                       hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+                                ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               break;
+       case UDP_V6_FLOW:
+               switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+               case 0:
+                       hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+                                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+                       break;
+               case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+                       hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+                                ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               break;
+       case AH_ESP_V4_FLOW:
+       case AH_V4_FLOW:
+       case ESP_V4_FLOW:
+       case SCTP_V4_FLOW:
+               if ((nfc->data & RXH_L4_B_0_1) ||
+                   (nfc->data & RXH_L4_B_2_3))
+                       return -EINVAL;
+               hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+               break;
+       case AH_ESP_V6_FLOW:
+       case AH_V6_FLOW:
+       case ESP_V6_FLOW:
+       case SCTP_V6_FLOW:
+               if ((nfc->data & RXH_L4_B_0_1) ||
+                   (nfc->data & RXH_L4_B_2_3))
+                       return -EINVAL;
+               hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+               break;
+       case IPV4_FLOW:
+               hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+                       ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
+               break;
+       case IPV6_FLOW:
+               hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+                       ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+               break;
+       default:
+               return -EINVAL;
        }
 
-       for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) {
-               q_vector = adapter->q_vector[i];
-               q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
-               wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr);
-               q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
-               wr32(hw, I40E_VFINT_ITRN1(1, i), q_vector->tx.itr);
-               i40e_flush(hw);
+       wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
+       wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
+       i40e_flush(hw);
+
+       return 0;
+}
+
+/**
+ * i40evf_set_rxnfc - command to set RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the command is supported.
+ **/
+static int i40evf_set_rxnfc(struct net_device *netdev,
+                           struct ethtool_rxnfc *cmd)
+{
+       struct i40evf_adapter *adapter = netdev_priv(netdev);
+       int ret = -EOPNOTSUPP;
+
+       switch (cmd->cmd) {
+       case ETHTOOL_SRXFH:
+               ret = i40evf_set_rss_hash_opt(adapter, cmd);
+               break;
+       default:
+               break;
+       }
+
+       return ret;
+}
+
+/**
+ * i40evf_get_channels: get the number of channels supported by the device
+ * @netdev: network interface device structure
+ * @ch: channel information structure
+ *
+ * For the purposes of our device, we only use combined channels, i.e. a tx/rx
+ * queue pair. Report one extra channel to match our "other" MSI-X vector.
+ **/
+static void i40evf_get_channels(struct net_device *netdev,
+                               struct ethtool_channels *ch)
+{
+       struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+       /* Report maximum channels */
+       ch->max_combined = adapter->vsi_res->num_queue_pairs;
+
+       ch->max_other = NONQ_VECS;
+       ch->other_count = NONQ_VECS;
+
+       ch->combined_count = adapter->vsi_res->num_queue_pairs;
+}
+
+/**
+ * i40evf_get_rxfh_indir_size - get the rx flow hash indirection table size
+ * @netdev: network interface device structure
+ *
+ * Returns the table size.
+ **/
+static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev)
+{
+       return (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
+}
+
+/**
+ * i40evf_get_rxfh_indir - get the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ *
+ * Reads the indirection table directly from the hardware. Always returns 0.
+ **/
+static int i40evf_get_rxfh_indir(struct net_device *netdev, u32 *indir)
+{
+       struct i40evf_adapter *adapter = netdev_priv(netdev);
+       struct i40e_hw *hw = &adapter->hw;
+       u32 hlut_val;
+       int i, j;
+
+       for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX; i++) {
+               hlut_val = rd32(hw, I40E_VFQF_HLUT(i));
+               indir[j++] = hlut_val & 0xff;
+               indir[j++] = (hlut_val >> 8) & 0xff;
+               indir[j++] = (hlut_val >> 16) & 0xff;
+               indir[j++] = (hlut_val >> 24) & 0xff;
+       }
+       return 0;
+}
+
+/**
+ * i40evf_set_rxfh_indir - set the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ *
+ * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
+ * returns 0 after programming the table.
+ **/
+static int i40evf_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
+{
+       struct i40evf_adapter *adapter = netdev_priv(netdev);
+       struct i40e_hw *hw = &adapter->hw;
+       u32 hlut_val;
+       int i, j;
+
+       for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX + 1; i++) {
+               hlut_val = indir[j++];
+               hlut_val |= indir[j++] << 8;
+               hlut_val |= indir[j++] << 16;
+               hlut_val |= indir[j++] << 24;
+               wr32(hw, I40E_VFQF_HLUT(i), hlut_val);
        }
 
        return 0;
 }
 
-static struct ethtool_ops i40evf_ethtool_ops = {
+static const struct ethtool_ops i40evf_ethtool_ops = {
        .get_settings           = i40evf_get_settings,
        .get_drvinfo            = i40evf_get_drvinfo,
        .get_link               = ethtool_op_get_link,
@@ -378,6 +686,12 @@ static struct ethtool_ops i40evf_ethtool_ops = {
        .set_msglevel           = i40evf_set_msglevel,
        .get_coalesce           = i40evf_get_coalesce,
        .set_coalesce           = i40evf_set_coalesce,
+       .get_rxnfc              = i40evf_get_rxnfc,
+       .set_rxnfc              = i40evf_set_rxnfc,
+       .get_rxfh_indir_size    = i40evf_get_rxfh_indir_size,
+       .get_rxfh_indir         = i40evf_get_rxfh_indir,
+       .set_rxfh_indir         = i40evf_set_rxfh_indir,
+       .get_channels           = i40evf_get_channels,
 };
 
 /**
@@ -389,5 +703,5 @@ static struct ethtool_ops i40evf_ethtool_ops = {
  **/
 void i40evf_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &i40evf_ethtool_ops);
+       netdev->ethtool_ops = &i40evf_ethtool_ops;
 }
index da6054cbd9c01e7d63357e602e0460c401bab65e..632c2b32afa13211bc2f86befefa350658f4950b 100644 (file)
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
 #include "i40e_prototype.h"
 static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
 static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
+static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter);
+static void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter);
 static int i40evf_close(struct net_device *netdev);
 
 char i40evf_driver_name[] = "i40evf";
 static const char i40evf_driver_string[] =
        "Intel(R) XL710 X710 Virtual Function Network Driver";
 
-#define DRV_VERSION "0.9.21"
+#define DRV_VERSION "0.9.23"
 const char i40evf_driver_version[] = DRV_VERSION;
 static const char i40evf_copyright[] =
        "Copyright (c) 2013 - 2014 Intel Corporation.";
@@ -688,7 +693,6 @@ static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
                f->remove = true;
                adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
        }
-       return;
 }
 
 /**
@@ -841,7 +845,7 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
        list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
                bool found = false;
 
-               if (f->macaddr[0] & 0x01) {
+               if (is_multicast_ether_addr(f->macaddr)) {
                        netdev_for_each_mc_addr(mca, netdev) {
                                if (ether_addr_equal(mca->addr, f->macaddr)) {
                                        found = true;
@@ -1027,30 +1031,21 @@ i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors)
         * Right now, we simply care about how many we'll get; we'll
         * set them up later while requesting irq's.
         */
-       while (vectors >= vector_threshold) {
-               err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
-                                     vectors);
-               if (!err) /* Success in acquiring all requested vectors. */
-                       break;
-               else if (err < 0)
-                       vectors = 0; /* Nasty failure, quit now */
-               else /* err == number of vectors we should try again with */
-                       vectors = err;
-       }
-
-       if (vectors < vector_threshold) {
+       err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+                                   vector_threshold, vectors);
+       if (err < 0) {
                dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts.\n");
                kfree(adapter->msix_entries);
                adapter->msix_entries = NULL;
-               err = -EIO;
-       } else {
-               /* Adjust for only the vectors we'll use, which is minimum
-                * of max_msix_q_vectors + NONQ_VECS, or the number of
-                * vectors we were allocated.
-                */
-               adapter->num_msix_vectors = vectors;
+               return err;
        }
-       return err;
+
+       /* Adjust for only the vectors we'll use, which is minimum
+        * of max_msix_q_vectors + NONQ_VECS, or the number of
+        * vectors we were allocated.
+        */
+       adapter->num_msix_vectors = err;
+       return 0;
 }
 
 /**
@@ -1236,8 +1231,6 @@ void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter)
        pci_disable_msix(adapter->pdev);
        kfree(adapter->msix_entries);
        adapter->msix_entries = NULL;
-
-       return;
 }
 
 /**
@@ -1309,7 +1302,6 @@ static void i40evf_watchdog_task(struct work_struct *work)
                goto restart_watchdog;
 
        if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
-               dev_info(&adapter->pdev->dev, "Checking for redemption\n");
                if ((rd32(hw, I40E_VFGEN_RSTAT) & 0x3) == I40E_VFR_VFACTIVE) {
                        /* A chance for redemption! */
                        dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
@@ -1534,9 +1526,13 @@ static void i40evf_reset_task(struct work_struct *work)
                        rstat_val);
                adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
 
-               if (netif_running(adapter->netdev))
-                       i40evf_close(adapter->netdev);
-
+               if (netif_running(adapter->netdev)) {
+                       set_bit(__I40E_DOWN, &adapter->vsi.state);
+                       i40evf_down(adapter);
+                       i40evf_free_traffic_irqs(adapter);
+                       i40evf_free_all_tx_resources(adapter);
+                       i40evf_free_all_rx_resources(adapter);
+               }
                i40evf_free_misc_irq(adapter);
                i40evf_reset_interrupt_capability(adapter);
                i40evf_free_queues(adapter);
@@ -1848,8 +1844,6 @@ void i40evf_reinit_locked(struct i40evf_adapter *adapter)
 
        WARN_ON(in_interrupt());
 
-       adapter->state = __I40EVF_RESETTING;
-
        i40evf_down(adapter);
 
        /* allocate transmit descriptors */
@@ -2114,8 +2108,10 @@ static void i40evf_init_task(struct work_struct *work)
        adapter->vsi.back = adapter;
        adapter->vsi.base_vector = 1;
        adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
-       adapter->vsi.rx_itr_setting = I40E_ITR_DYNAMIC;
-       adapter->vsi.tx_itr_setting = I40E_ITR_DYNAMIC;
+       adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC |
+                                      ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+       adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
+                                      ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
        adapter->vsi.netdev = adapter->netdev;
 
        if (!adapter->netdev_registered) {
@@ -2157,7 +2153,6 @@ err:
                return; /* do not reschedule */
        }
        schedule_delayed_work(&adapter->init_task, HZ * 3);
-       return;
 }
 
 /**
index e294f012647d801417af4ca1f68d0629cbaf08cc..7f80bb4177225af7a026a59cd52e3ebe488b6a41 100644 (file)
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
index fa36fe12e77502658cfe864780849d6f00e93c2e..2e36c670d8df48753d98f311d41b7c02eba2f66f 100644 (file)
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 /* e1000_82575
  * e1000_82576
@@ -73,9 +70,8 @@ static s32  igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
 static s32  igb_update_nvm_checksum_82580(struct e1000_hw *hw);
 static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
 static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
-static const u16 e1000_82580_rxpbs_table[] =
-       { 36, 72, 144, 1, 2, 4, 8, 16,
-         35, 70, 140 };
+static const u16 e1000_82580_rxpbs_table[] = {
+       36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 };
 
 /**
  *  igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
@@ -526,7 +522,7 @@ out:
 static s32 igb_get_invariants_82575(struct e1000_hw *hw)
 {
        struct e1000_mac_info *mac = &hw->mac;
-       struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
+       struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
        s32 ret_val;
        u32 ctrl_ext = 0;
        u32 link_mode = 0;
@@ -1180,8 +1176,8 @@ static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
 {
        u32 swfw_sync;
 
-       while (igb_get_hw_semaphore(hw) != 0);
-       /* Empty */
+       while (igb_get_hw_semaphore(hw) != 0)
+               ; /* Empty */
 
        swfw_sync = rd32(E1000_SW_FW_SYNC);
        swfw_sync &= ~mask;
@@ -1216,7 +1212,7 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
        while (timeout) {
                if (rd32(E1000_EEMNGCTL) & mask)
                        break;
-               msleep(1);
+               usleep_range(1000, 2000);
                timeout--;
        }
        if (!timeout)
@@ -1269,7 +1265,7 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
 
        if (hw->phy.media_type != e1000_media_type_copper) {
                ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
-                                                            &duplex);
+                                                            &duplex);
                /* Use this flag to determine if link needs to be checked or
                 * not.  If  we have link clear the flag so that we do not
                 * continue to check for link.
@@ -1316,7 +1312,7 @@ void igb_power_up_serdes_link_82575(struct e1000_hw *hw)
 
        /* flush the write to verify completion */
        wrfl();
-       msleep(1);
+       usleep_range(1000, 2000);
 }
 
 /**
@@ -1411,7 +1407,7 @@ void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
 
                /* flush the write to verify completion */
                wrfl();
-               msleep(1);
+               usleep_range(1000, 2000);
        }
 }
 
@@ -1436,9 +1432,8 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
 
        /* set the completion timeout for interface */
        ret_val = igb_set_pcie_completion_timeout(hw);
-       if (ret_val) {
+       if (ret_val)
                hw_dbg("PCI-E Set completion timeout has failed.\n");
-       }
 
        hw_dbg("Masking off all interrupts\n");
        wr32(E1000_IMC, 0xffffffff);
@@ -1447,7 +1442,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
        wr32(E1000_TCTL, E1000_TCTL_PSP);
        wrfl();
 
-       msleep(10);
+       usleep_range(10000, 20000);
 
        ctrl = rd32(E1000_CTRL);
 
@@ -1676,7 +1671,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
                    hw->mac.type == e1000_82576) {
                        ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
                        if (ret_val) {
-                               printk(KERN_DEBUG "NVM Read Error\n\n");
+                               hw_dbg(KERN_DEBUG "NVM Read Error\n\n");
                                return ret_val;
                        }
 
@@ -1689,7 +1684,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
                 * link either autoneg or be forced to 1000/Full
                 */
                ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
-                           E1000_CTRL_FD | E1000_CTRL_FRCDPX;
+                               E1000_CTRL_FD | E1000_CTRL_FRCDPX;
 
                /* set speed of 1000/Full if speed/duplex is forced */
                reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
@@ -1925,7 +1920,7 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
        }
        /* Poll all queues to verify they have shut down */
        for (ms_wait = 0; ms_wait < 10; ms_wait++) {
-               msleep(1);
+               usleep_range(1000, 2000);
                rx_enabled = 0;
                for (i = 0; i < 4; i++)
                        rx_enabled |= rd32(E1000_RXDCTL(i));
@@ -1953,7 +1948,7 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
        wr32(E1000_RCTL, temp_rctl);
        wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN);
        wrfl();
-       msleep(2);
+       usleep_range(2000, 3000);
 
        /* Enable RX queues that were previously enabled and restore our
         * previous state
@@ -2005,14 +2000,14 @@ static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
         * 16ms to 55ms
         */
        ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
-                                       &pcie_devctl2);
+                                       &pcie_devctl2);
        if (ret_val)
                goto out;
 
        pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
 
        ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
-                                        &pcie_devctl2);
+                                        &pcie_devctl2);
 out:
        /* disable completion timeout resend */
        gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
@@ -2241,7 +2236,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
        wr32(E1000_TCTL, E1000_TCTL_PSP);
        wrfl();
 
-       msleep(10);
+       usleep_range(10000, 11000);
 
        /* Determine whether or not a global dev reset is requested */
        if (global_device_reset &&
@@ -2259,7 +2254,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
 
        /* Add delay to insure DEV_RST has time to complete */
        if (global_device_reset)
-               msleep(5);
+               usleep_range(5000, 6000);
 
        ret_val = igb_get_auto_rd_done(hw);
        if (ret_val) {
@@ -2436,8 +2431,7 @@ static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
 
        ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
        if (ret_val) {
-               hw_dbg("NVM Read Error while updating checksum"
-                       " compatibility bit.\n");
+               hw_dbg("NVM Read Error while updating checksum compatibility bit.\n");
                goto out;
        }
 
@@ -2447,8 +2441,7 @@ static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
                ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
                                        &nvm_data);
                if (ret_val) {
-                       hw_dbg("NVM Write Error while updating checksum"
-                               " compatibility bit.\n");
+                       hw_dbg("NVM Write Error while updating checksum compatibility bit.\n");
                        goto out;
                }
        }
index 09d78be72416563beeda5e3cb72a7338b2aa7060..b407c55738fadf0a333ee7947fedd2c0c31638a1 100644 (file)
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #ifndef _E1000_82575_H_
 #define _E1000_82575_H_
@@ -37,9 +34,9 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
                       u8 data);
 
 #define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
-                                     (ID_LED_DEF1_DEF2 <<  8) | \
-                                     (ID_LED_DEF1_DEF2 <<  4) | \
-                                     (ID_LED_OFF1_ON2))
+                                    (ID_LED_DEF1_DEF2 <<  8) | \
+                                    (ID_LED_DEF1_DEF2 <<  4) | \
+                                    (ID_LED_OFF1_ON2))
 
 #define E1000_RAR_ENTRIES_82575        16
 #define E1000_RAR_ENTRIES_82576        24
@@ -67,16 +64,16 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
 #define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX    0x01000000
 
 #define E1000_EICR_TX_QUEUE ( \
-    E1000_EICR_TX_QUEUE0 |    \
-    E1000_EICR_TX_QUEUE1 |    \
-    E1000_EICR_TX_QUEUE2 |    \
-    E1000_EICR_TX_QUEUE3)
+       E1000_EICR_TX_QUEUE0 |    \
+       E1000_EICR_TX_QUEUE1 |    \
+       E1000_EICR_TX_QUEUE2 |    \
+       E1000_EICR_TX_QUEUE3)
 
 #define E1000_EICR_RX_QUEUE ( \
-    E1000_EICR_RX_QUEUE0 |    \
-    E1000_EICR_RX_QUEUE1 |    \
-    E1000_EICR_RX_QUEUE2 |    \
-    E1000_EICR_RX_QUEUE3)
+       E1000_EICR_RX_QUEUE0 |    \
+       E1000_EICR_RX_QUEUE1 |    \
+       E1000_EICR_RX_QUEUE2 |    \
+       E1000_EICR_RX_QUEUE3)
 
 /* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
 #define E1000_IMIREXT_SIZE_BP     0x00001000  /* Packet size bypass */
@@ -92,8 +89,7 @@ union e1000_adv_rx_desc {
                struct {
                        struct {
                                __le16 pkt_info;   /* RSS type, Packet type */
-                               __le16 hdr_info;   /* Split Header,
-                                                   * header buffer length */
+                               __le16 hdr_info;   /* Split Head, buf len */
                        } lo_dword;
                        union {
                                __le32 rss;          /* RSS Hash */
index b05bf925ac721982d8ded6d3aa647d64236890f4..f85be6695e44857b7e149184405a1f06a9d2fab3 100644 (file)
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #ifndef _E1000_DEFINES_H_
 #define _E1000_DEFINES_H_
 
 /* Same mask, but for extended and packet split descriptors */
 #define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
-    E1000_RXDEXT_STATERR_CE  |            \
-    E1000_RXDEXT_STATERR_SE  |            \
-    E1000_RXDEXT_STATERR_SEQ |            \
-    E1000_RXDEXT_STATERR_CXE |            \
-    E1000_RXDEXT_STATERR_RXE)
+       E1000_RXDEXT_STATERR_CE  |            \
+       E1000_RXDEXT_STATERR_SE  |            \
+       E1000_RXDEXT_STATERR_SEQ |            \
+       E1000_RXDEXT_STATERR_CXE |            \
+       E1000_RXDEXT_STATERR_RXE)
 
 #define E1000_MRQC_RSS_FIELD_IPV4_TCP          0x00010000
 #define E1000_MRQC_RSS_FIELD_IPV4              0x00020000
 #define E1000_TCTL_RTLC   0x01000000    /* Re-transmit on late collision */
 
 /* DMA Coalescing register fields */
-#define E1000_DMACR_DMACWT_MASK         0x00003FFF /* DMA Coalescing
-                                                       * Watchdog Timer */
-#define E1000_DMACR_DMACTHR_MASK        0x00FF0000 /* DMA Coalescing Receive
-                                                       * Threshold */
+#define E1000_DMACR_DMACWT_MASK         0x00003FFF /* DMA Coal Watchdog Timer */
+#define E1000_DMACR_DMACTHR_MASK        0x00FF0000 /* DMA Coal Rx Threshold */
 #define E1000_DMACR_DMACTHR_SHIFT       16
-#define E1000_DMACR_DMAC_LX_MASK        0x30000000 /* Lx when no PCIe
-                                                       * transactions */
+#define E1000_DMACR_DMAC_LX_MASK        0x30000000 /* Lx when no PCIe trans */
 #define E1000_DMACR_DMAC_LX_SHIFT       28
 #define E1000_DMACR_DMAC_EN             0x80000000 /* Enable DMA Coalescing */
 /* DMA Coalescing BMC-to-OS Watchdog Enable */
 #define E1000_DMACR_DC_BMC2OSW_EN      0x00008000
 
-#define E1000_DMCTXTH_DMCTTHR_MASK      0x00000FFF /* DMA Coalescing Transmit
-                                                       * Threshold */
+#define E1000_DMCTXTH_DMCTTHR_MASK      0x00000FFF /* DMA Coal Tx Threshold */
 
 #define E1000_DMCTLX_TTLX_MASK          0x00000FFF /* Time to LX request */
 
-#define E1000_DMCRTRH_UTRESH_MASK       0x0007FFFF /* Receive Traffic Rate
-                                                       * Threshold */
-#define E1000_DMCRTRH_LRPRCW            0x80000000 /* Rcv packet rate in
-                                                       * current window */
+#define E1000_DMCRTRH_UTRESH_MASK       0x0007FFFF /* Rx Traffic Rate Thresh */
+#define E1000_DMCRTRH_LRPRCW            0x80000000 /* Rx pkt rate curr window */
 
-#define E1000_DMCCNT_CCOUNT_MASK        0x01FFFFFF /* DMA Coal Rcv Traffic
-                                                       * Current Cnt */
+#define E1000_DMCCNT_CCOUNT_MASK        0x01FFFFFF /* DMA Coal Rx Current Cnt */
 
-#define E1000_FCRTC_RTH_COAL_MASK       0x0003FFF0 /* Flow ctrl Rcv Threshold
-                                                       * High val */
+#define E1000_FCRTC_RTH_COAL_MASK       0x0003FFF0 /* FC Rx Thresh High val */
 #define E1000_FCRTC_RTH_COAL_SHIFT      4
 #define E1000_PCIEMISC_LX_DECISION      0x00000080 /* Lx power decision */
 
  *   o LSC    = Link Status Change
  */
 #define IMS_ENABLE_MASK ( \
-    E1000_IMS_RXT0   |    \
-    E1000_IMS_TXDW   |    \
-    E1000_IMS_RXDMT0 |    \
-    E1000_IMS_RXSEQ  |    \
-    E1000_IMS_LSC    |    \
-    E1000_IMS_DOUTSYNC)
+       E1000_IMS_RXT0   |    \
+       E1000_IMS_TXDW   |    \
+       E1000_IMS_RXDMT0 |    \
+       E1000_IMS_RXSEQ  |    \
+       E1000_IMS_LSC    |    \
+       E1000_IMS_DOUTSYNC)
 
 /* Interrupt Mask Set */
 #define E1000_IMS_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */
 #define E1000_VFTA_ENTRY_BIT_SHIFT_MASK      0x1F
 
 /* DMA Coalescing register fields */
-#define E1000_PCIEMISC_LX_DECISION      0x00000080 /* Lx power decision based
-                                                      on DMA coal */
+#define E1000_PCIEMISC_LX_DECISION      0x00000080 /* Lx power on DMA coal */
 
 /* Tx Rate-Scheduler Config fields */
 #define E1000_RTTBCNRC_RS_ENA          0x80000000
index 10741d170f2ddad46b2fad14d362998bdc19d639..89925e4058498ea1c1ffda3195576d8abcda611e 100644 (file)
@@ -1,28 +1,24 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #ifndef _E1000_HW_H_
 #define _E1000_HW_H_
@@ -320,15 +316,15 @@ struct e1000_host_mng_command_info {
 #include "e1000_mbx.h"
 
 struct e1000_mac_operations {
-       s32  (*check_for_link)(struct e1000_hw *);
-       s32  (*reset_hw)(struct e1000_hw *);
-       s32  (*init_hw)(struct e1000_hw *);
+       s32 (*check_for_link)(struct e1000_hw *);
+       s32 (*reset_hw)(struct e1000_hw *);
+       s32 (*init_hw)(struct e1000_hw *);
        bool (*check_mng_mode)(struct e1000_hw *);
-       s32  (*setup_physical_interface)(struct e1000_hw *);
+       s32 (*setup_physical_interface)(struct e1000_hw *);
        void (*rar_set)(struct e1000_hw *, u8 *, u32);
-       s32  (*read_mac_addr)(struct e1000_hw *);
-       s32  (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
-       s32  (*acquire_swfw_sync)(struct e1000_hw *, u16);
+       s32 (*read_mac_addr)(struct e1000_hw *);
+       s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
+       s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
        void (*release_swfw_sync)(struct e1000_hw *, u16);
 #ifdef CONFIG_IGB_HWMON
        s32 (*get_thermal_sensor_data)(struct e1000_hw *);
@@ -338,31 +334,31 @@ struct e1000_mac_operations {
 };
 
 struct e1000_phy_operations {
-       s32  (*acquire)(struct e1000_hw *);
-       s32  (*check_polarity)(struct e1000_hw *);
-       s32  (*check_reset_block)(struct e1000_hw *);
-       s32  (*force_speed_duplex)(struct e1000_hw *);
-       s32  (*get_cfg_done)(struct e1000_hw *hw);
-       s32  (*get_cable_length)(struct e1000_hw *);
-       s32  (*get_phy_info)(struct e1000_hw *);
-       s32  (*read_reg)(struct e1000_hw *, u32, u16 *);
+       s32 (*acquire)(struct e1000_hw *);
+       s32 (*check_polarity)(struct e1000_hw *);
+       s32 (*check_reset_block)(struct e1000_hw *);
+       s32 (*force_speed_duplex)(struct e1000_hw *);
+       s32 (*get_cfg_done)(struct e1000_hw *hw);
+       s32 (*get_cable_length)(struct e1000_hw *);
+       s32 (*get_phy_info)(struct e1000_hw *);
+       s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
        void (*release)(struct e1000_hw *);
-       s32  (*reset)(struct e1000_hw *);
-       s32  (*set_d0_lplu_state)(struct e1000_hw *, bool);
-       s32  (*set_d3_lplu_state)(struct e1000_hw *, bool);
-       s32  (*write_reg)(struct e1000_hw *, u32, u16);
+       s32 (*reset)(struct e1000_hw *);
+       s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
+       s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
+       s32 (*write_reg)(struct e1000_hw *, u32, u16);
        s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *);
        s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8);
 };
 
 struct e1000_nvm_operations {
-       s32  (*acquire)(struct e1000_hw *);
-       s32  (*read)(struct e1000_hw *, u16, u16, u16 *);
+       s32 (*acquire)(struct e1000_hw *);
+       s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
        void (*release)(struct e1000_hw *);
-       s32  (*write)(struct e1000_hw *, u16, u16, u16 *);
-       s32  (*update)(struct e1000_hw *);
-       s32  (*validate)(struct e1000_hw *);
-       s32  (*valid_led_default)(struct e1000_hw *, u16 *);
+       s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
+       s32 (*update)(struct e1000_hw *);
+       s32 (*validate)(struct e1000_hw *);
+       s32 (*valid_led_default)(struct e1000_hw *, u16 *);
 };
 
 #define E1000_MAX_SENSORS              3
index db963397cc27f42fd15829ec6dc540e19af5f562..2231598fb42d12833c8c4a134e7e888080e02df4 100644 (file)
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 /* e1000_i210
  * e1000_i211
@@ -365,7 +362,7 @@ static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
                        word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
                        if (word_address == address) {
                                *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
-                               hw_dbg("Read INVM Word 0x%02x = %x",
+                               hw_dbg("Read INVM Word 0x%02x = %x\n",
                                          address, *data);
                                status = E1000_SUCCESS;
                                break;
@@ -435,6 +432,7 @@ static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
                        *data = ID_LED_RESERVED_FFFF;
                        ret_val = E1000_SUCCESS;
                }
+               break;
        case NVM_SUB_DEV_ID:
                *data = hw->subsystem_device_id;
                break;
index 907fe99a9813130e45a3dddf0d5d48c6dfdc492d..9f34976687baedc7eb4d4844678cb2592c10e9d1 100644 (file)
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #ifndef _E1000_I210_H_
 #define _E1000_I210_H_
index 5910a932ea7c92cb67223a7c900f7c3b3e36a990..2a88595f956cf4e3089d20a986f0adb9db48681e 100644 (file)
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #include <linux/if_ether.h>
 #include <linux/delay.h>
@@ -442,7 +439,7 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
  *  The caller must have a packed mc_addr_list of multicast addresses.
  **/
 void igb_update_mc_addr_list(struct e1000_hw *hw,
-                             u8 *mc_addr_list, u32 mc_addr_count)
+                            u8 *mc_addr_list, u32 mc_addr_count)
 {
        u32 hash_value, hash_bit, hash_reg;
        int i;
@@ -866,8 +863,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
                        goto out;
 
                if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
-                       hw_dbg("Copper PHY and Auto Neg "
-                                "has not completed.\n");
+                       hw_dbg("Copper PHY and Auto Neg has not completed.\n");
                        goto out;
                }
 
@@ -929,11 +925,10 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
                         */
                        if (hw->fc.requested_mode == e1000_fc_full) {
                                hw->fc.current_mode = e1000_fc_full;
-                               hw_dbg("Flow Control = FULL.\r\n");
+                               hw_dbg("Flow Control = FULL.\n");
                        } else {
                                hw->fc.current_mode = e1000_fc_rx_pause;
-                               hw_dbg("Flow Control = "
-                                      "RX PAUSE frames only.\r\n");
+                               hw_dbg("Flow Control = RX PAUSE frames only.\n");
                        }
                }
                /* For receiving PAUSE frames ONLY.
@@ -948,7 +943,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
                          (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
                          (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
                        hw->fc.current_mode = e1000_fc_tx_pause;
-                       hw_dbg("Flow Control = TX PAUSE frames only.\r\n");
+                       hw_dbg("Flow Control = TX PAUSE frames only.\n");
                }
                /* For transmitting PAUSE frames ONLY.
                 *
@@ -962,7 +957,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
                         !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
                         (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
                        hw->fc.current_mode = e1000_fc_rx_pause;
-                       hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
+                       hw_dbg("Flow Control = RX PAUSE frames only.\n");
                }
                /* Per the IEEE spec, at this point flow control should be
                 * disabled.  However, we want to consider that we could
@@ -988,10 +983,10 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
                         (hw->fc.requested_mode == e1000_fc_tx_pause) ||
                         (hw->fc.strict_ieee)) {
                        hw->fc.current_mode = e1000_fc_none;
-                       hw_dbg("Flow Control = NONE.\r\n");
+                       hw_dbg("Flow Control = NONE.\n");
                } else {
                        hw->fc.current_mode = e1000_fc_rx_pause;
-                       hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
+                       hw_dbg("Flow Control = RX PAUSE frames only.\n");
                }
 
                /* Now we need to do one last check...  If we auto-
@@ -1266,7 +1261,7 @@ s32 igb_get_auto_rd_done(struct e1000_hw *hw)
        while (i < AUTO_READ_DONE_TIMEOUT) {
                if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
                        break;
-               msleep(1);
+               usleep_range(1000, 2000);
                i++;
        }
 
@@ -1299,7 +1294,7 @@ static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
        }
 
        if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
-               switch(hw->phy.media_type) {
+               switch (hw->phy.media_type) {
                case e1000_media_type_internal_serdes:
                        *data = ID_LED_DEFAULT_82575_SERDES;
                        break;
index 99299ba8ee3a2def53ab2e6fcc8c5c039aca0265..ea24961b0d705e557a6b9bd57772d984ab0927ae 100644 (file)
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #ifndef _E1000_MAC_H_
 #define _E1000_MAC_H_
index d5b121771c313716543b16f5a8464866afd4f8e2..162cc49345d09babbd7fab30ec0917215e1a9b64 100644 (file)
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #include "e1000_mbx.h"
 
index f52f5515e5a8a8aedcc1568f90fdf1b986b2947e..d20af6b2f581698098a972d557a0a93fe19d1d48 100644 (file)
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #ifndef _E1000_MBX_H_
 #define _E1000_MBX_H_
index 9abf82919c65535d7b3fd7f7d7200f80fffd1f0a..e8280d0d7f022942c81d399945259cf4b0a5d1f9 100644 (file)
@@ -1,28 +1,24 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #include <linux/if_ether.h>
 #include <linux/delay.h>
@@ -480,6 +476,7 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
                /* Loop to allow for up to whole page write of eeprom */
                while (widx < words) {
                        u16 word_out = data[widx];
+
                        word_out = (word_out >> 8) | (word_out << 8);
                        igb_shift_out_eec_bits(hw, word_out, 16);
                        widx++;
@@ -801,5 +798,4 @@ etrack_id:
                fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT)
                        | eeprom_verl;
        }
-       return;
 }
index 5b101170b17e4bbc9af310c8aacd5e0b891344a0..febc9cdb739125174e143b0159feb0b529cb5ac6 100644 (file)
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #ifndef _E1000_NVM_H_
 #define _E1000_NVM_H_
@@ -32,7 +29,7 @@ void igb_release_nvm(struct e1000_hw *hw);
 s32  igb_read_mac_addr(struct e1000_hw *hw);
 s32  igb_read_part_num(struct e1000_hw *hw, u32 *part_num);
 s32  igb_read_part_string(struct e1000_hw *hw, u8 *part_num,
-                          u32 part_num_size);
+                         u32 part_num_size);
 s32  igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
 s32  igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
 s32  igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
index 4009bbab7407d21945e7c1af20df5deedba564a3..c1bb64d8366fa5e7741905ea9fc22d4b241054ce 100644 (file)
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #include <linux/if_ether.h>
 #include <linux/delay.h>
@@ -924,8 +921,7 @@ static s32 igb_copper_link_autoneg(struct e1000_hw *hw)
        if (phy->autoneg_wait_to_complete) {
                ret_val = igb_wait_autoneg(hw);
                if (ret_val) {
-                       hw_dbg("Error while waiting for "
-                              "autoneg to complete\n");
+                       hw_dbg("Error while waiting for autoneg to complete\n");
                        goto out;
                }
        }
@@ -2208,16 +2204,10 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
 void igb_power_up_phy_copper(struct e1000_hw *hw)
 {
        u16 mii_reg = 0;
-       u16 power_reg = 0;
 
        /* The PHY will retain its settings across a power down/up cycle */
        hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
        mii_reg &= ~MII_CR_POWER_DOWN;
-       if (hw->phy.type == e1000_phy_i210) {
-               hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
-               power_reg &= ~GS40G_CS_POWER_DOWN;
-               hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
-       }
        hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
 }
 
@@ -2231,20 +2221,12 @@ void igb_power_up_phy_copper(struct e1000_hw *hw)
 void igb_power_down_phy_copper(struct e1000_hw *hw)
 {
        u16 mii_reg = 0;
-       u16 power_reg = 0;
 
        /* The PHY will retain its settings across a power down/up cycle */
        hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
        mii_reg |= MII_CR_POWER_DOWN;
-
-       /* i210 Phy requires an additional bit for power up/down */
-       if (hw->phy.type == e1000_phy_i210) {
-               hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
-               power_reg |= GS40G_CS_POWER_DOWN;
-               hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
-       }
        hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
-       msleep(1);
+       usleep_range(1000, 2000);
 }
 
 /**
index 4c2c36c46a7398d217c1418b3966b4cde5813812..7af4ffab0285653c4c400992edcd163782f919ac 100644 (file)
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #ifndef _E1000_PHY_H_
 #define _E1000_PHY_H_
@@ -154,7 +151,6 @@ s32  igb_check_polarity_m88(struct e1000_hw *hw);
 #define GS40G_MAC_LB                   0x4140
 #define GS40G_MAC_SPEED_1G             0X0006
 #define GS40G_COPPER_SPEC              0x0010
-#define GS40G_CS_POWER_DOWN            0x0002
 #define GS40G_LINE_LB                  0x4000
 
 /* SFP modules ID memory locations */
index bdb246e848e13bb5e569f279336dbb5a2c5bfe86..833bbb948d970975cd3a42cfa759f7604f8f04a3 100644 (file)
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #ifndef _E1000_REGS_H_
 #define _E1000_REGS_H_
 #define E1000_RA2      0x054E0  /* 2nd half of Rx address array - RW Array */
 #define E1000_PSRTYPE(_i)       (0x05480 + ((_i) * 4))
 #define E1000_RAL(_i)  (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
-                                       (0x054E0 + ((_i - 16) * 8)))
+                                       (0x054E0 + ((_i - 16) * 8)))
 #define E1000_RAH(_i)  (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
-                                       (0x054E4 + ((_i - 16) * 8)))
+                                       (0x054E4 + ((_i - 16) * 8)))
 #define E1000_IP4AT_REG(_i)     (0x05840 + ((_i) * 8))
 #define E1000_IP6AT_REG(_i)     (0x05880 + ((_i) * 4))
 #define E1000_WUPM_REG(_i)      (0x05A00 + ((_i) * 4))
 #define E1000_VMBMEM(_n)       (0x00800 + (64 * (_n)))
 #define E1000_VMOLR(_n)        (0x05AD0 + (4 * (_n)))
 #define E1000_DVMOLR(_n)       (0x0C038 + (64 * (_n)))
-#define E1000_VLVF(_n)         (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
-                                                       * Filter - RW */
+#define E1000_VLVF(_n)         (0x05D00 + (4 * (_n))) /* VLAN VM Filter */
 #define E1000_VMVIR(_n)        (0x03700 + (4 * (_n)))
 
 struct e1000_hw;
index 27130065d92a70679292ef316aaadf848772944c..06102d1f7c0362208118ec99dcaba0e6db89eeb3 100644 (file)
@@ -1,29 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 /* Linux PRO/1000 Ethernet Driver main header file */
 
@@ -198,6 +194,7 @@ struct igb_tx_buffer {
        unsigned int bytecount;
        u16 gso_segs;
        __be16 protocol;
+
        DEFINE_DMA_UNMAP_ADDR(dma);
        DEFINE_DMA_UNMAP_LEN(len);
        u32 tx_flags;
index e5570acbeea84509855a98383ab128876e7447c9..a84297c85fb1250342fa7b8d3e52a0a64be626a0 100644 (file)
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 /* ethtool support for igb */
 
@@ -286,7 +283,7 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
        }
 
        while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
-               msleep(1);
+               usleep_range(1000, 2000);
 
        if (ecmd->autoneg == AUTONEG_ENABLE) {
                hw->mac.autoneg = 1;
@@ -399,7 +396,7 @@ static int igb_set_pauseparam(struct net_device *netdev,
        adapter->fc_autoneg = pause->autoneg;
 
        while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
-               msleep(1);
+               usleep_range(1000, 2000);
 
        if (adapter->fc_autoneg == AUTONEG_ENABLE) {
                hw->fc.requested_mode = e1000_fc_default;
@@ -886,7 +883,7 @@ static int igb_set_ringparam(struct net_device *netdev,
        }
 
        while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
-               msleep(1);
+               usleep_range(1000, 2000);
 
        if (!netif_running(adapter->netdev)) {
                for (i = 0; i < adapter->num_tx_queues; i++)
@@ -1060,8 +1057,8 @@ static struct igb_reg_test reg_test_i350[] = {
        { E1000_TDT(0),    0x100, 4,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
        { E1000_TDT(4),    0x40,  4,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
        { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
-       { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
-       { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+       { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+       { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
        { E1000_TCTL,      0x100, 1,  SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
        { E1000_RA,        0, 16, TABLE64_TEST_LO,
                                                0xFFFFFFFF, 0xFFFFFFFF },
@@ -1103,8 +1100,8 @@ static struct igb_reg_test reg_test_82580[] = {
        { E1000_TDT(0),    0x100, 4,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
        { E1000_TDT(4),    0x40,  4,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
        { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
-       { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
-       { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+       { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+       { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
        { E1000_TCTL,      0x100, 1,  SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
        { E1000_RA,        0, 16, TABLE64_TEST_LO,
                                                0xFFFFFFFF, 0xFFFFFFFF },
@@ -1132,8 +1129,10 @@ static struct igb_reg_test reg_test_82576[] = {
        { E1000_RDBAH(4),  0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
        { E1000_RDLEN(4),  0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
        /* Enable all RX queues before testing. */
-       { E1000_RXDCTL(0), 0x100, 4,  WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
-       { E1000_RXDCTL(4), 0x40, 12,  WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
+       { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0,
+         E1000_RXDCTL_QUEUE_ENABLE },
+       { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0,
+         E1000_RXDCTL_QUEUE_ENABLE },
        /* RDH is read-only for 82576, only test RDT. */
        { E1000_RDT(0),    0x100, 4,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
        { E1000_RDT(4),    0x40, 12,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
@@ -1149,14 +1148,14 @@ static struct igb_reg_test reg_test_82576[] = {
        { E1000_TDBAH(4),  0x40, 12,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
        { E1000_TDLEN(4),  0x40, 12,  PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
        { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
-       { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
-       { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+       { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+       { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
        { E1000_TCTL,      0x100, 1,  SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
        { E1000_RA,        0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
        { E1000_RA,        0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
        { E1000_RA2,       0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
        { E1000_RA2,       0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
-       { E1000_MTA,       0, 128,TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+       { E1000_MTA,       0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
        { 0, 0, 0, 0 }
 };
 
@@ -1170,7 +1169,8 @@ static struct igb_reg_test reg_test_82575[] = {
        { E1000_RDBAH(0),  0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
        { E1000_RDLEN(0),  0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
        /* Enable all four RX queues before testing. */
-       { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
+       { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0,
+         E1000_RXDCTL_QUEUE_ENABLE },
        /* RDH is read-only for 82575, only test RDT. */
        { E1000_RDT(0),    0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
        { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
@@ -1196,8 +1196,8 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
 {
        struct e1000_hw *hw = &adapter->hw;
        u32 pat, val;
-       static const u32 _test[] =
-               {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+       static const u32 _test[] = {
+               0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
        for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
                wr32(reg, (_test[pat] & write));
                val = rd32(reg) & mask;
@@ -1206,11 +1206,11 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
                                "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
                                reg, val, (_test[pat] & write & mask));
                        *data = reg;
-                       return 1;
+                       return true;
                }
        }
 
-       return 0;
+       return false;
 }
 
 static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
@@ -1218,17 +1218,18 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
 {
        struct e1000_hw *hw = &adapter->hw;
        u32 val;
+
        wr32(reg, write & mask);
        val = rd32(reg);
        if ((write & mask) != (val & mask)) {
                dev_err(&adapter->pdev->dev,
-                       "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg,
-                       (val & mask), (write & mask));
+                       "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
+                       reg, (val & mask), (write & mask));
                *data = reg;
-               return 1;
+               return true;
        }
 
-       return 0;
+       return false;
 }
 
 #define REG_PATTERN_TEST(reg, mask, write) \
@@ -1387,14 +1388,14 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
        /* Hook up test interrupt handler just for this test */
        if (adapter->flags & IGB_FLAG_HAS_MSIX) {
                if (request_irq(adapter->msix_entries[0].vector,
-                               igb_test_intr, 0, netdev->name, adapter)) {
+                               igb_test_intr, 0, netdev->name, adapter)) {
                        *data = 1;
                        return -1;
                }
        } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
                shared_int = false;
                if (request_irq(irq,
-                               igb_test_intr, 0, netdev->name, adapter)) {
+                               igb_test_intr, 0, netdev->name, adapter)) {
                        *data = 1;
                        return -1;
                }
@@ -1412,7 +1413,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
        /* Disable all the interrupts */
        wr32(E1000_IMC, ~0);
        wrfl();
-       msleep(10);
+       usleep_range(10000, 11000);
 
        /* Define all writable bits for ICS */
        switch (hw->mac.type) {
@@ -1459,7 +1460,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
                        wr32(E1000_IMC, mask);
                        wr32(E1000_ICS, mask);
                        wrfl();
-                       msleep(10);
+                       usleep_range(10000, 11000);
 
                        if (adapter->test_icr & mask) {
                                *data = 3;
@@ -1481,7 +1482,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
                wr32(E1000_IMS, mask);
                wr32(E1000_ICS, mask);
                wrfl();
-               msleep(10);
+               usleep_range(10000, 11000);
 
                if (!(adapter->test_icr & mask)) {
                        *data = 4;
@@ -1503,7 +1504,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
                        wr32(E1000_IMC, ~mask);
                        wr32(E1000_ICS, ~mask);
                        wrfl();
-                       msleep(10);
+                       usleep_range(10000, 11000);
 
                        if (adapter->test_icr & mask) {
                                *data = 5;
@@ -1515,7 +1516,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
        /* Disable all the interrupts */
        wr32(E1000_IMC, ~0);
        wrfl();
-       msleep(10);
+       usleep_range(10000, 11000);
 
        /* Unhook test interrupt handler */
        if (adapter->flags & IGB_FLAG_HAS_MSIX)
@@ -1949,6 +1950,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
        *data = 0;
        if (hw->phy.media_type == e1000_media_type_internal_serdes) {
                int i = 0;
+
                hw->mac.serdes_has_link = false;
 
                /* On some blade server designs, link establishment
@@ -2413,9 +2415,11 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
        switch (cmd->flow_type) {
        case TCP_V4_FLOW:
                cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               /* Fall through */
        case UDP_V4_FLOW:
                if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
                        cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               /* Fall through */
        case SCTP_V4_FLOW:
        case AH_ESP_V4_FLOW:
        case AH_V4_FLOW:
@@ -2425,9 +2429,11 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
                break;
        case TCP_V6_FLOW:
                cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               /* Fall through */
        case UDP_V6_FLOW:
                if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
                        cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               /* Fall through */
        case SCTP_V6_FLOW:
        case AH_ESP_V6_FLOW:
        case AH_V6_FLOW:
@@ -3029,5 +3035,5 @@ static const struct ethtool_ops igb_ethtool_ops = {
 
 void igb_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops);
+       netdev->ethtool_ops = &igb_ethtool_ops;
 }
index 8333f67acf96b3a6e83746c1cedd1f0eff7636ec..44b6a68f1af727136271132014b1efa412ee7e32 100644 (file)
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #include "igb.h"
 #include "e1000_82575.h"
index fb98d4602f9d4fd130b660de0265bee786fec7a6..ea2868b22c2d676eb57dfe8da63fff49bccfbcca 100644 (file)
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
@@ -75,7 +72,7 @@ static const struct e1000_info *igb_info_tbl[] = {
        [board_82575] = &e1000_82575_info,
 };
 
-static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
+static const struct pci_device_id igb_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
@@ -117,7 +114,6 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
 
 MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
 
-void igb_reset(struct igb_adapter *);
 static int igb_setup_all_tx_resources(struct igb_adapter *);
 static int igb_setup_all_rx_resources(struct igb_adapter *);
 static void igb_free_all_tx_resources(struct igb_adapter *);
@@ -141,7 +137,7 @@ static void igb_watchdog(unsigned long);
 static void igb_watchdog_task(struct work_struct *);
 static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
 static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
-                                                struct rtnl_link_stats64 *stats);
+                                         struct rtnl_link_stats64 *stats);
 static int igb_change_mtu(struct net_device *, int);
 static int igb_set_mac(struct net_device *, void *);
 static void igb_set_uta(struct igb_adapter *adapter);
@@ -159,7 +155,8 @@ static bool igb_clean_rx_irq(struct igb_q_vector *, int);
 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
 static void igb_tx_timeout(struct net_device *);
 static void igb_reset_task(struct work_struct *);
-static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
+static void igb_vlan_mode(struct net_device *netdev,
+                         netdev_features_t features);
 static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
 static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
 static void igb_restore_vlan(struct igb_adapter *);
@@ -172,7 +169,7 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
 static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
 static int igb_ndo_set_vf_vlan(struct net_device *netdev,
                               int vf, u16 vlan, u8 qos);
-static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
+static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
 static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
                                   bool setting);
 static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
@@ -215,10 +212,9 @@ static struct notifier_block dca_notifier = {
 static void igb_netpoll(struct net_device *);
 #endif
 #ifdef CONFIG_PCI_IOV
-static unsigned int max_vfs = 0;
+static unsigned int max_vfs;
 module_param(max_vfs, uint, 0);
-MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
-                 "per physical function");
+MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
 #endif /* CONFIG_PCI_IOV */
 
 static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
@@ -384,8 +380,7 @@ static void igb_dump(struct igb_adapter *adapter)
        /* Print netdevice Info */
        if (netdev) {
                dev_info(&adapter->pdev->dev, "Net device Info\n");
-               pr_info("Device Name     state            trans_start      "
-                       "last_rx\n");
+               pr_info("Device Name     state            trans_start      last_rx\n");
                pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
                        netdev->state, netdev->trans_start, netdev->last_rx);
        }
@@ -438,9 +433,7 @@ static void igb_dump(struct igb_adapter *adapter)
                pr_info("------------------------------------\n");
                pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
                pr_info("------------------------------------\n");
-               pr_info("T [desc]     [address 63:0  ] [PlPOCIStDDM Ln] "
-                       "[bi->dma       ] leng  ntw timestamp        "
-                       "bi->skb\n");
+               pr_info("T [desc]     [address 63:0  ] [PlPOCIStDDM Ln] [bi->dma       ] leng  ntw timestamp        bi->skb\n");
 
                for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
                        const char *next_desc;
@@ -458,9 +451,8 @@ static void igb_dump(struct igb_adapter *adapter)
                        else
                                next_desc = "";
 
-                       pr_info("T [0x%03X]    %016llX %016llX %016llX"
-                               " %04X  %p %016llX %p%s\n", i,
-                               le64_to_cpu(u0->a),
+                       pr_info("T [0x%03X]    %016llX %016llX %016llX %04X  %p %016llX %p%s\n",
+                               i, le64_to_cpu(u0->a),
                                le64_to_cpu(u0->b),
                                (u64)dma_unmap_addr(buffer_info, dma),
                                dma_unmap_len(buffer_info, len),
@@ -519,10 +511,8 @@ rx_ring_summary:
                pr_info("------------------------------------\n");
                pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
                pr_info("------------------------------------\n");
-               pr_info("R  [desc]      [ PktBuf     A0] [  HeadBuf   DD] "
-                       "[bi->dma       ] [bi->skb] <-- Adv Rx Read format\n");
-               pr_info("RWB[desc]      [PcsmIpSHl PtRs] [vl er S cks ln] -----"
-                       "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
+               pr_info("R  [desc]      [ PktBuf     A0] [  HeadBuf   DD] [bi->dma       ] [bi->skb] <-- Adv Rx Read format\n");
+               pr_info("RWB[desc]      [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
 
                for (i = 0; i < rx_ring->count; i++) {
                        const char *next_desc;
@@ -584,7 +574,7 @@ static int igb_get_i2c_data(void *data)
        struct e1000_hw *hw = &adapter->hw;
        s32 i2cctl = rd32(E1000_I2CPARAMS);
 
-       return ((i2cctl & E1000_I2C_DATA_IN) != 0);
+       return !!(i2cctl & E1000_I2C_DATA_IN);
 }
 
 /**
@@ -648,7 +638,7 @@ static int igb_get_i2c_clk(void *data)
        struct e1000_hw *hw = &adapter->hw;
        s32 i2cctl = rd32(E1000_I2CPARAMS);
 
-       return ((i2cctl & E1000_I2C_CLK_IN) != 0);
+       return !!(i2cctl & E1000_I2C_CLK_IN);
 }
 
 static const struct i2c_algo_bit_data igb_i2c_algo = {
@@ -681,9 +671,9 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
 static int __init igb_init_module(void)
 {
        int ret;
+
        pr_info("%s - version %s\n",
               igb_driver_string, igb_driver_version);
-
        pr_info("%s\n", igb_copyright);
 
 #ifdef CONFIG_IGB_DCA
@@ -736,12 +726,14 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
                                adapter->rx_ring[i]->reg_idx = rbase_offset +
                                                               Q_IDX_82576(i);
                }
+               /* Fall through */
        case e1000_82575:
        case e1000_82580:
        case e1000_i350:
        case e1000_i354:
        case e1000_i210:
        case e1000_i211:
+               /* Fall through */
        default:
                for (; i < adapter->num_rx_queues; i++)
                        adapter->rx_ring[i]->reg_idx = rbase_offset + i;
@@ -1292,8 +1284,7 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
                if (adapter->hw.mac.type >= e1000_82576)
                        set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
 
-               /*
-                * On i350, i354, i210, and i211, loopback VLAN packets
+               /* On i350, i354, i210, and i211, loopback VLAN packets
                 * have the tag byte-swapped.
                 */
                if (adapter->hw.mac.type >= e1000_i350)
@@ -1345,6 +1336,7 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
        for (; v_idx < q_vectors; v_idx++) {
                int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
                int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
+
                err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
                                         tqpv, txr_idx, rqpv, rxr_idx);
 
@@ -1484,6 +1476,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
         */
        if (adapter->flags & IGB_FLAG_HAS_MSIX) {
                u32 regval = rd32(E1000_EIAM);
+
                wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
                wr32(E1000_EIMC, adapter->eims_enable_mask);
                regval = rd32(E1000_EIAC);
@@ -1495,6 +1488,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
        wrfl();
        if (adapter->flags & IGB_FLAG_HAS_MSIX) {
                int i;
+
                for (i = 0; i < adapter->num_q_vectors; i++)
                        synchronize_irq(adapter->msix_entries[i].vector);
        } else {
@@ -1513,6 +1507,7 @@ static void igb_irq_enable(struct igb_adapter *adapter)
        if (adapter->flags & IGB_FLAG_HAS_MSIX) {
                u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
                u32 regval = rd32(E1000_EIAC);
+
                wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
                regval = rd32(E1000_EIAM);
                wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
@@ -1745,6 +1740,7 @@ int igb_up(struct igb_adapter *adapter)
        /* notify VFs that reset has been completed */
        if (adapter->vfs_allocated_count) {
                u32 reg_data = rd32(E1000_CTRL_EXT);
+
                reg_data |= E1000_CTRL_EXT_PFRSTD;
                wr32(E1000_CTRL_EXT, reg_data);
        }
@@ -1787,7 +1783,7 @@ void igb_down(struct igb_adapter *adapter)
        wr32(E1000_TCTL, tctl);
        /* flush both disables and wait for them to finish */
        wrfl();
-       msleep(10);
+       usleep_range(10000, 11000);
 
        igb_irq_disable(adapter);
 
@@ -1827,7 +1823,7 @@ void igb_reinit_locked(struct igb_adapter *adapter)
 {
        WARN_ON(in_interrupt());
        while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
-               msleep(1);
+               usleep_range(1000, 2000);
        igb_down(adapter);
        igb_up(adapter);
        clear_bit(__IGB_RESETTING, &adapter->state);
@@ -1960,6 +1956,7 @@ void igb_reset(struct igb_adapter *adapter)
        /* disable receive for all VFs and wait one second */
        if (adapter->vfs_allocated_count) {
                int i;
+
                for (i = 0 ; i < adapter->vfs_allocated_count; i++)
                        adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
 
@@ -2087,7 +2084,7 @@ static const struct net_device_ops igb_netdev_ops = {
        .ndo_vlan_rx_kill_vid   = igb_vlan_rx_kill_vid,
        .ndo_set_vf_mac         = igb_ndo_set_vf_mac,
        .ndo_set_vf_vlan        = igb_ndo_set_vf_vlan,
-       .ndo_set_vf_tx_rate     = igb_ndo_set_vf_bw,
+       .ndo_set_vf_rate        = igb_ndo_set_vf_bw,
        .ndo_set_vf_spoofchk    = igb_ndo_set_vf_spoofchk,
        .ndo_get_vf_config      = igb_ndo_get_vf_config,
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2142,7 +2139,6 @@ void igb_set_fw_version(struct igb_adapter *adapter)
                }
                break;
        }
-       return;
 }
 
 /**
@@ -2529,7 +2525,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        /* let the f/w know that the h/w is now under the control of the
-        * driver. */
+        * driver.
+        */
        igb_get_hw_control(adapter);
 
        strcpy(netdev->name, "eth%d");
@@ -3077,6 +3074,7 @@ static int __igb_open(struct net_device *netdev, bool resuming)
        /* notify VFs that reset has been completed */
        if (adapter->vfs_allocated_count) {
                u32 reg_data = rd32(E1000_CTRL_EXT);
+
                reg_data |= E1000_CTRL_EXT_PFRSTD;
                wr32(E1000_CTRL_EXT, reg_data);
        }
@@ -3248,7 +3246,7 @@ void igb_setup_tctl(struct igb_adapter *adapter)
  *  Configure a transmit ring after a reset.
  **/
 void igb_configure_tx_ring(struct igb_adapter *adapter,
-                           struct igb_ring *ring)
+                          struct igb_ring *ring)
 {
        struct e1000_hw *hw = &adapter->hw;
        u32 txdctl = 0;
@@ -3389,7 +3387,8 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
 
        if (adapter->rss_indir_tbl_init != num_rx_queues) {
                for (j = 0; j < IGB_RETA_SIZE; j++)
-                       adapter->rss_indir_tbl[j] = (j * num_rx_queues) / IGB_RETA_SIZE;
+                       adapter->rss_indir_tbl[j] =
+                       (j * num_rx_queues) / IGB_RETA_SIZE;
                adapter->rss_indir_tbl_init = num_rx_queues;
        }
        igb_write_rss_indir_tbl(adapter);
@@ -3430,6 +3429,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
                if (hw->mac.type > e1000_82575) {
                        /* Set the default pool for the PF's first queue */
                        u32 vtctl = rd32(E1000_VT_CTL);
+
                        vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
                                   E1000_VT_CTL_DISABLE_DEF_POOL);
                        vtctl |= adapter->vfs_allocated_count <<
@@ -3511,7 +3511,7 @@ void igb_setup_rctl(struct igb_adapter *adapter)
 }
 
 static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
-                                   int vfn)
+                                  int vfn)
 {
        struct e1000_hw *hw = &adapter->hw;
        u32 vmolr;
@@ -4058,7 +4058,8 @@ static void igb_check_wvbr(struct igb_adapter *adapter)
        switch (hw->mac.type) {
        case e1000_82576:
        case e1000_i350:
-               if (!(wvbr = rd32(E1000_WVBR)))
+               wvbr = rd32(E1000_WVBR);
+               if (!wvbr)
                        return;
                break;
        default:
@@ -4077,7 +4078,7 @@ static void igb_spoof_check(struct igb_adapter *adapter)
        if (!adapter->wvbr)
                return;
 
-       for(j = 0; j < adapter->vfs_allocated_count; j++) {
+       for (j = 0; j < adapter->vfs_allocated_count; j++) {
                if (adapter->wvbr & (1 << j) ||
                    adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
                        dev_warn(&adapter->pdev->dev,
@@ -4209,14 +4210,15 @@ static void igb_watchdog_task(struct work_struct *work)
 
                if (!netif_carrier_ok(netdev)) {
                        u32 ctrl;
+
                        hw->mac.ops.get_speed_and_duplex(hw,
                                                         &adapter->link_speed,
                                                         &adapter->link_duplex);
 
                        ctrl = rd32(E1000_CTRL);
                        /* Links status message must follow this format */
-                       printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s "
-                              "Duplex, Flow Control: %s\n",
+                       netdev_info(netdev,
+                              "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
                               netdev->name,
                               adapter->link_speed,
                               adapter->link_duplex == FULL_DUPLEX ?
@@ -4242,11 +4244,8 @@ static void igb_watchdog_task(struct work_struct *work)
 
                        /* check for thermal sensor event */
                        if (igb_thermal_sensor_event(hw,
-                           E1000_THSTAT_LINK_THROTTLE)) {
-                               netdev_info(netdev, "The network adapter link "
-                                           "speed was downshifted because it "
-                                           "overheated\n");
-                       }
+                           E1000_THSTAT_LINK_THROTTLE))
+                               netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
 
                        /* adjust timeout factor according to speed/duplex */
                        adapter->tx_timeout_factor = 1;
@@ -4277,12 +4276,11 @@ static void igb_watchdog_task(struct work_struct *work)
                        /* check for thermal sensor event */
                        if (igb_thermal_sensor_event(hw,
                            E1000_THSTAT_PWR_DOWN)) {
-                               netdev_err(netdev, "The network adapter was "
-                                          "stopped because it overheated\n");
+                               netdev_err(netdev, "The network adapter was stopped because it overheated\n");
                        }
 
                        /* Links status message must follow this format */
-                       printk(KERN_INFO "igb: %s NIC Link is Down\n",
+                       netdev_info(netdev, "igb: %s NIC Link is Down\n",
                               netdev->name);
                        netif_carrier_off(netdev);
 
@@ -4344,6 +4342,7 @@ static void igb_watchdog_task(struct work_struct *work)
        /* Cause software interrupt to ensure Rx ring is cleaned */
        if (adapter->flags & IGB_FLAG_HAS_MSIX) {
                u32 eics = 0;
+
                for (i = 0; i < adapter->num_q_vectors; i++)
                        eics |= adapter->q_vector[i]->eims_value;
                wr32(E1000_EICS, eics);
@@ -4483,13 +4482,12 @@ static void igb_update_itr(struct igb_q_vector *q_vector,
        case low_latency:  /* 50 usec aka 20000 ints/s */
                if (bytes > 10000) {
                        /* this if handles the TSO accounting */
-                       if (bytes/packets > 8000) {
+                       if (bytes/packets > 8000)
                                itrval = bulk_latency;
-                       } else if ((packets < 10) || ((bytes/packets) > 1200)) {
+                       else if ((packets < 10) || ((bytes/packets) > 1200))
                                itrval = bulk_latency;
-                       } else if ((packets > 35)) {
+                       else if ((packets > 35))
                                itrval = lowest_latency;
-                       }
                } else if (bytes/packets > 2000) {
                        itrval = bulk_latency;
                } else if (packets <= 2 && bytes < 512) {
@@ -4675,6 +4673,7 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
                        return;
        } else {
                u8 l4_hdr = 0;
+
                switch (first->protocol) {
                case htons(ETH_P_IP):
                        vlan_macip_lens |= skb_network_header_len(skb);
@@ -4962,6 +4961,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
         */
        if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) {
                unsigned short f;
+
                for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
                        count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
        } else {
@@ -5140,7 +5140,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
                max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
 
        while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
-               msleep(1);
+               usleep_range(1000, 2000);
 
        /* igb_down has a dependency on max_frame_size */
        adapter->max_frame_size = max_frame;
@@ -5193,8 +5193,10 @@ void igb_update_stats(struct igb_adapter *adapter,
 
        rcu_read_lock();
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               u32 rqdpc = rd32(E1000_RQDPC(i));
                struct igb_ring *ring = adapter->rx_ring[i];
+               u32 rqdpc = rd32(E1000_RQDPC(i));
+               if (hw->mac.type >= e1000_i210)
+                       wr32(E1000_RQDPC(i), 0);
 
                if (rqdpc) {
                        ring->rx_stats.drops += rqdpc;
@@ -5619,6 +5621,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
                        vmolr |= E1000_VMOLR_MPME;
                } else if (vf_data->num_vf_mc_hashes) {
                        int j;
+
                        vmolr |= E1000_VMOLR_ROMPE;
                        for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
                                igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
@@ -5670,6 +5673,7 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
 
        for (i = 0; i < adapter->vfs_allocated_count; i++) {
                u32 vmolr = rd32(E1000_VMOLR(i));
+
                vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
 
                vf_data = &adapter->vf_data[i];
@@ -5768,6 +5772,7 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
 
                        if (!adapter->vf_data[vf].vlans_enabled) {
                                u32 size;
+
                                reg = rd32(E1000_VMOLR(vf));
                                size = reg & E1000_VMOLR_RLPML_MASK;
                                size += 4;
@@ -5796,6 +5801,7 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
                        adapter->vf_data[vf].vlans_enabled--;
                        if (!adapter->vf_data[vf].vlans_enabled) {
                                u32 size;
+
                                reg = rd32(E1000_VMOLR(vf));
                                size = reg & E1000_VMOLR_RLPML_MASK;
                                size -= 4;
@@ -5900,8 +5906,8 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
         */
        if (!add && (adapter->netdev->flags & IFF_PROMISC)) {
                u32 vlvf, bits;
-
                int regndx = igb_find_vlvf_entry(adapter, vid);
+
                if (regndx < 0)
                        goto out;
                /* See if any other pools are set for this VLAN filter
@@ -6492,7 +6498,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
        rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
 
        /* transfer page from old buffer to new buffer */
-       memcpy(new_buff, old_buff, sizeof(struct igb_rx_buffer));
+       *new_buff = *old_buff;
 
        /* sync the buffer for use by the device */
        dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
@@ -6961,6 +6967,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
        if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
            igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
                u16 vid;
+
                if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
                    test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
                        vid = be16_to_cpu(rx_desc->wb.upper.vlan);
@@ -7049,7 +7056,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
        if (cleaned_count)
                igb_alloc_rx_buffers(rx_ring, cleaned_count);
 
-       return (total_packets < budget);
+       return total_packets < budget;
 }
 
 static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
@@ -7170,7 +7177,7 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
                break;
        case SIOCGMIIREG:
                if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
-                                    &data->val_out))
+                                    &data->val_out))
                        return -EIO;
                break;
        case SIOCSMIIREG:
@@ -7871,7 +7878,8 @@ static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
        }
 }
 
-static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
+static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf,
+                            int min_tx_rate, int max_tx_rate)
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -7880,15 +7888,19 @@ static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
        if (hw->mac.type != e1000_82576)
                return -EOPNOTSUPP;
 
+       if (min_tx_rate)
+               return -EINVAL;
+
        actual_link_speed = igb_link_mbps(adapter->link_speed);
        if ((vf >= adapter->vfs_allocated_count) ||
            (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
-           (tx_rate < 0) || (tx_rate > actual_link_speed))
+           (max_tx_rate < 0) ||
+           (max_tx_rate > actual_link_speed))
                return -EINVAL;
 
        adapter->vf_rate_link_speed = actual_link_speed;
-       adapter->vf_data[vf].tx_rate = (u16)tx_rate;
-       igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
+       adapter->vf_data[vf].tx_rate = (u16)max_tx_rate;
+       igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed);
 
        return 0;
 }
@@ -7928,7 +7940,8 @@ static int igb_ndo_get_vf_config(struct net_device *netdev,
                return -EINVAL;
        ivi->vf = vf;
        memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
-       ivi->tx_rate = adapter->vf_data[vf].tx_rate;
+       ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
+       ivi->min_tx_rate = 0;
        ivi->vlan = adapter->vf_data[vf].pf_vlan;
        ivi->qos = adapter->vf_data[vf].pf_qos;
        ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
@@ -7953,11 +7966,13 @@ static void igb_vmm_control(struct igb_adapter *adapter)
                reg = rd32(E1000_DTXCTL);
                reg |= E1000_DTXCTL_VLAN_ADDED;
                wr32(E1000_DTXCTL, reg);
+               /* Fall through */
        case e1000_82580:
                /* enable replication vlan tag stripping */
                reg = rd32(E1000_RPLOLR);
                reg |= E1000_RPLOLR_STRVLAN;
                wr32(E1000_RPLOLR, reg);
+               /* Fall through */
        case e1000_i350:
                /* none of the above registers are supported by i350 */
                break;
@@ -8047,6 +8062,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
                } /* endif adapter->dmac is not disabled */
        } else if (hw->mac.type == e1000_82580) {
                u32 reg = rd32(E1000_PCIEMISC);
+
                wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
                wr32(E1000_DMACR, 0);
        }
index 9209d652e1c96090c712d78ce925612871ebad64..ab25e49365f79e26cfcad7a7ad12b8c5192824c9 100644 (file)
@@ -389,7 +389,7 @@ static void igb_ptp_tx_work(struct work_struct *work)
                adapter->ptp_tx_skb = NULL;
                clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
                adapter->tx_hwtstamp_timeouts++;
-               dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang");
+               dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n");
                return;
        }
 
@@ -451,7 +451,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
                rd32(E1000_RXSTMPH);
                adapter->last_rx_ptp_check = jiffies;
                adapter->rx_hwtstamp_cleared++;
-               dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang");
+               dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang\n");
        }
 }
 
index 90eef07943f4d50bbd027646e170abca5abfb1a3..7d4e8559e2e9d93524ced2c9fc3881b234101f9a 100644 (file)
@@ -119,7 +119,6 @@ static int igbvf_set_settings(struct net_device *netdev,
 static void igbvf_get_pauseparam(struct net_device *netdev,
                                  struct ethtool_pauseparam *pause)
 {
-       return;
 }
 
 static int igbvf_set_pauseparam(struct net_device *netdev,
@@ -476,5 +475,5 @@ static const struct ethtool_ops igbvf_ethtool_ops = {
 
 void igbvf_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &igbvf_ethtool_ops);
+       netdev->ethtool_ops = &igbvf_ethtool_ops;
 }
index dbb7dd2f8e360e4d6c1013182e9d667a19d7fa56..1da2d987d370b12c0dbb6b81490d83e958b4ba36 100644 (file)
@@ -656,5 +656,5 @@ static const struct ethtool_ops ixgb_ethtool_ops = {
 
 void ixgb_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &ixgb_ethtool_ops);
+       netdev->ethtool_ops = &ixgb_ethtool_ops;
 }
index 1a12c1dd7a279c8f9db97c61a61ff4847c74db97..ac9f2148cdc5ca6d20e81a43170a9550ff6ffce6 100644 (file)
@@ -155,7 +155,6 @@ struct vf_data_storage {
 struct vf_macvlans {
        struct list_head l;
        int vf;
-       int rar_entry;
        bool free;
        bool is_macvlan;
        u8 vf_macvlan[ETH_ALEN];
@@ -256,7 +255,6 @@ struct ixgbe_ring {
                struct ixgbe_tx_buffer *tx_buffer_info;
                struct ixgbe_rx_buffer *rx_buffer_info;
        };
-       unsigned long last_rx_timestamp;
        unsigned long state;
        u8 __iomem *tail;
        dma_addr_t dma;                 /* phys. address of descriptor ring */
@@ -364,7 +362,7 @@ struct ixgbe_ring_container {
        for (pos = (head).ring; pos != NULL; pos = pos->next)
 
 #define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
-                              ? 8 : 1)
+                             ? 8 : 1)
 #define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
 
 /* MAX_Q_VECTORS of these are allocated,
@@ -614,6 +612,15 @@ static inline void ixgbe_write_tail(struct ixgbe_ring *ring, u32 value)
 #define MAX_MSIX_VECTORS_82598 18
 #define MAX_Q_VECTORS_82598 16
 
+struct ixgbe_mac_addr {
+       u8 addr[ETH_ALEN];
+       u16 queue;
+       u16 state; /* bitmask */
+};
+#define IXGBE_MAC_STATE_DEFAULT                0x1
+#define IXGBE_MAC_STATE_MODIFIED       0x2
+#define IXGBE_MAC_STATE_IN_USE         0x4
+
 #define MAX_Q_VECTORS MAX_Q_VECTORS_82599
 #define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
 
@@ -770,6 +777,7 @@ struct ixgbe_adapter {
        unsigned long ptp_tx_start;
        unsigned long last_overflow_check;
        unsigned long last_rx_ptp_check;
+       unsigned long last_rx_timestamp;
        spinlock_t tmreg_lock;
        struct cyclecounter cc;
        struct timecounter tc;
@@ -785,6 +793,7 @@ struct ixgbe_adapter {
 
        u32 timer_event_accumulator;
        u32 vferr_refcount;
+       struct ixgbe_mac_addr *mac_table;
        struct kobject *info_kobj;
 #ifdef CONFIG_IXGBE_HWMON
        struct hwmon_buff *ixgbe_hwmon_buff;
@@ -863,6 +872,13 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter);
 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
 int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
                               u16 subdevice_id);
+#ifdef CONFIG_PCI_IOV
+void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
+#endif
+int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
+                        u8 *addr, u16 queue);
+int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
+                        u8 *addr, u16 queue);
 void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
 netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
                                  struct ixgbe_ring *);
@@ -941,27 +957,11 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
 }
 
 void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter);
 void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
 void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
 void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
-void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
-                            struct sk_buff *skb);
-static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
-                                        union ixgbe_adv_rx_desc *rx_desc,
-                                        struct sk_buff *skb)
-{
-       if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
-               return;
-
-       __ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb);
-
-       /*
-        * Update the last_rx_timestamp timer in order to enable watchdog check
-        * for error case of latched timestamp on a dropped packet.
-        */
-       rx_ring->last_rx_timestamp = jiffies;
-}
-
+void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb);
 int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
 int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
 void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
index 4c78ea8946c1b48838db6d7e9ce890089ff6ab59..15609331ec170ddc6bb9e62c4227107fc6b50cad 100644 (file)
 #define IXGBE_82598_RX_PB_SIZE  512
 
 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
-                                         ixgbe_link_speed speed,
-                                         bool autoneg_wait_to_complete);
+                                        ixgbe_link_speed speed,
+                                        bool autoneg_wait_to_complete);
 static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
-                                       u8 *eeprom_data);
+                                      u8 *eeprom_data);
 
 /**
  *  ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
@@ -140,7 +140,7 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
                phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
                phy->ops.check_link = &ixgbe_check_phy_link_tnx;
                phy->ops.get_firmware_version =
-                            &ixgbe_get_phy_firmware_version_tnx;
+                            &ixgbe_get_phy_firmware_version_tnx;
                break;
        case ixgbe_phy_nl:
                phy->ops.reset = &ixgbe_reset_phy_nl;
@@ -156,8 +156,8 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
 
                /* Check to see if SFP+ module is supported */
                ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
-                                                           &list_offset,
-                                                           &data_offset);
+                                                           &list_offset,
+                                                           &data_offset);
                if (ret_val != 0) {
                        ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
                        goto out;
@@ -219,8 +219,8 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
  *  Determines the link capabilities by reading the AUTOC register.
  **/
 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
-                                             ixgbe_link_speed *speed,
-                                             bool *autoneg)
+                                            ixgbe_link_speed *speed,
+                                            bool *autoneg)
 {
        s32 status = 0;
        u32 autoc = 0;
@@ -337,19 +337,25 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
        int i;
        bool link_up;
 
-       /*
-        * Validate the water mark configuration for packet buffer 0.  Zero
-        * water marks indicate that the packet buffer was not configured
-        * and the watermarks for packet buffer 0 should always be configured.
-        */
-       if (!hw->fc.low_water ||
-           !hw->fc.high_water[0] ||
-           !hw->fc.pause_time) {
-               hw_dbg(hw, "Invalid water mark configuration\n");
+       /* Validate the water mark configuration */
+       if (!hw->fc.pause_time) {
                ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
                goto out;
        }
 
+       /* Low water mark of zero causes XOFF floods */
+       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+               if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+                   hw->fc.high_water[i]) {
+                       if (!hw->fc.low_water[i] ||
+                           hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+                               hw_dbg(hw, "Invalid water mark configuration\n");
+                               ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+                               goto out;
+                       }
+               }
+       }
+
        /*
         * On 82598 having Rx FC on causes resets while doing 1G
         * so if it's on turn it off once we know link_speed. For
@@ -432,12 +438,11 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
        IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
        IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
 
-       fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
-
        /* Set up and enable Rx high/low water mark thresholds, enable XON. */
        for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
                if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
                    hw->fc.high_water[i]) {
+                       fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
                        fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
                        IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
                        IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
@@ -468,7 +473,7 @@ out:
  *  Restarts the link.  Performs autonegotiation if needed.
  **/
 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
-                                      bool autoneg_wait_to_complete)
+                                     bool autoneg_wait_to_complete)
 {
        u32 autoc_reg;
        u32 links_reg;
@@ -550,8 +555,8 @@ static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
  *  Reads the links register to determine if link is up and the current speed
  **/
 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
-                                      ixgbe_link_speed *speed, bool *link_up,
-                                      bool link_up_wait_to_complete)
+                                     ixgbe_link_speed *speed, bool *link_up,
+                                     bool link_up_wait_to_complete)
 {
        u32 links_reg;
        u32 i;
@@ -567,7 +572,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
                hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
                hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
                hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD,
-                                    &adapt_comp_reg);
+                                    &adapt_comp_reg);
                if (link_up_wait_to_complete) {
                        for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
                                if ((link_reg & 1) &&
@@ -579,11 +584,11 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
                                }
                                msleep(100);
                                hw->phy.ops.read_reg(hw, 0xC79F,
-                                                    MDIO_MMD_PMAPMD,
-                                                    &link_reg);
+                                                    MDIO_MMD_PMAPMD,
+                                                    &link_reg);
                                hw->phy.ops.read_reg(hw, 0xC00C,
-                                                    MDIO_MMD_PMAPMD,
-                                                    &adapt_comp_reg);
+                                                    MDIO_MMD_PMAPMD,
+                                                    &adapt_comp_reg);
                        }
                } else {
                        if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
@@ -656,7 +661,7 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
 
        /* Set KX4/KX support according to speed requested */
        else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
-                link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
+                link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
                autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
                if (speed & IXGBE_LINK_SPEED_10GB_FULL)
                        autoc |= IXGBE_AUTOC_KX4_SUPP;
@@ -689,14 +694,14 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
  *  Sets the link speed in the AUTOC register in the MAC and restarts link.
  **/
 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
-                                               ixgbe_link_speed speed,
-                                               bool autoneg_wait_to_complete)
+                                              ixgbe_link_speed speed,
+                                              bool autoneg_wait_to_complete)
 {
        s32 status;
 
        /* Setup the PHY according to input speed */
        status = hw->phy.ops.setup_link_speed(hw, speed,
-                                             autoneg_wait_to_complete);
+                                             autoneg_wait_to_complete);
        /* Set up MAC */
        ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
 
@@ -735,28 +740,28 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
        if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
                /* Enable Tx Atlas so packets can be transmitted again */
                hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
-                                            &analog_val);
+                                            &analog_val);
                analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
                hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
-                                             analog_val);
+                                             analog_val);
 
                hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
-                                            &analog_val);
+                                            &analog_val);
                analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
                hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
-                                             analog_val);
+                                             analog_val);
 
                hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
-                                            &analog_val);
+                                            &analog_val);
                analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
                hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
-                                             analog_val);
+                                             analog_val);
 
                hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
-                                            &analog_val);
+                                            &analog_val);
                analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
                hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
-                                             analog_val);
+                                             analog_val);
        }
 
        /* Reset PHY */
@@ -955,7 +960,7 @@ static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
        for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
                for (offset = 0; offset < hw->mac.vft_size; offset++)
                        IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
-                                       0);
+                                       0);
 
        return 0;
 }
@@ -973,7 +978,7 @@ static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
        u32  atlas_ctl;
 
        IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
-                       IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
+                       IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
        IXGBE_WRITE_FLUSH(hw);
        udelay(10);
        atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
@@ -1273,8 +1278,6 @@ static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
        /* Setup Tx packet buffer sizes */
        for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
                IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
-
-       return;
 }
 
 static struct ixgbe_mac_operations mac_ops_82598 = {
index f32b3dd1ba8e18911fd58876ed20e494d92ae389..bc7c924240a52490d50531060783e0f13bc97f20 100644 (file)
@@ -48,17 +48,17 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
                                                 ixgbe_link_speed speed,
                                                 bool autoneg_wait_to_complete);
 static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
-                                           ixgbe_link_speed speed,
-                                           bool autoneg_wait_to_complete);
+                                          ixgbe_link_speed speed,
+                                          bool autoneg_wait_to_complete);
 static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
 static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
                                      bool autoneg_wait_to_complete);
 static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
-                               ixgbe_link_speed speed,
-                               bool autoneg_wait_to_complete);
+                              ixgbe_link_speed speed,
+                              bool autoneg_wait_to_complete);
 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
-                                         ixgbe_link_speed speed,
-                                         bool autoneg_wait_to_complete);
+                                        ixgbe_link_speed speed,
+                                        bool autoneg_wait_to_complete);
 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
                                     u8 dev_addr, u8 *data);
@@ -96,9 +96,9 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
        if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
            !ixgbe_mng_enabled(hw)) {
                mac->ops.disable_tx_laser =
-                                      &ixgbe_disable_tx_laser_multispeed_fiber;
+                                      &ixgbe_disable_tx_laser_multispeed_fiber;
                mac->ops.enable_tx_laser =
-                                       &ixgbe_enable_tx_laser_multispeed_fiber;
+                                       &ixgbe_enable_tx_laser_multispeed_fiber;
                mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
        } else {
                mac->ops.disable_tx_laser = NULL;
@@ -132,13 +132,13 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
                hw->phy.ops.reset = NULL;
 
                ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
-                                                             &data_offset);
+                                                             &data_offset);
                if (ret_val != 0)
                        goto setup_sfp_out;
 
                /* PHY config will finish before releasing the semaphore */
                ret_val = hw->mac.ops.acquire_swfw_sync(hw,
-                                                       IXGBE_GSSR_MAC_CSR_SM);
+                                                       IXGBE_GSSR_MAC_CSR_SM);
                if (ret_val != 0) {
                        ret_val = IXGBE_ERR_SWFW_SYNC;
                        goto setup_sfp_out;
@@ -334,7 +334,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
                phy->ops.check_link = &ixgbe_check_phy_link_tnx;
                phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
                phy->ops.get_firmware_version =
-                            &ixgbe_get_phy_firmware_version_tnx;
+                            &ixgbe_get_phy_firmware_version_tnx;
                break;
        default:
                break;
@@ -352,7 +352,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
  *  Determines the link capabilities by reading the AUTOC register.
  **/
 static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
-                                             ixgbe_link_speed *speed,
+                                            ixgbe_link_speed *speed,
                                             bool *autoneg)
 {
        s32 status = 0;
@@ -543,7 +543,7 @@ static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
  *  Restarts the link.  Performs autonegotiation if needed.
  **/
 static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
-                               bool autoneg_wait_to_complete)
+                              bool autoneg_wait_to_complete)
 {
        u32 autoc_reg;
        u32 links_reg;
@@ -672,8 +672,8 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
  *  Set the link speed in the AUTOC register and restarts link.
  **/
 static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
-                                          ixgbe_link_speed speed,
-                                          bool autoneg_wait_to_complete)
+                                         ixgbe_link_speed speed,
+                                         bool autoneg_wait_to_complete)
 {
        s32 status = 0;
        ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
@@ -820,8 +820,8 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
         */
        if (speedcnt > 1)
                status = ixgbe_setup_mac_link_multispeed_fiber(hw,
-                                                              highest_link_speed,
-                                                              autoneg_wait_to_complete);
+                                                              highest_link_speed,
+                                                              autoneg_wait_to_complete);
 
 out:
        /* Set autoneg_advertised value based on input link speed */
@@ -1009,8 +1009,8 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
                if (speed & IXGBE_LINK_SPEED_1GB_FULL)
                        autoc |= IXGBE_AUTOC_KX_SUPP;
        } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
-                  (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
-                   link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
+                  (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
+                   link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
                /* Switch from 1G SFI to 10G SFI if requested */
                if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
                    (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
@@ -1018,7 +1018,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
                        autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
                }
        } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
-                  (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
+                  (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
                /* Switch from 10G SFI to 1G SFI if requested */
                if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
                    (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
@@ -1051,7 +1051,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
                                }
                                if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
                                        status =
-                                               IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+                                               IXGBE_ERR_AUTONEG_NOT_COMPLETE;
                                        hw_dbg(hw, "Autoneg did not complete.\n");
                                }
                        }
@@ -1074,14 +1074,14 @@ out:
  *  Restarts link on PHY and MAC based on settings passed in.
  **/
 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
-                                         ixgbe_link_speed speed,
-                                         bool autoneg_wait_to_complete)
+                                        ixgbe_link_speed speed,
+                                        bool autoneg_wait_to_complete)
 {
        s32 status;
 
        /* Setup the PHY according to input speed */
        status = hw->phy.ops.setup_link_speed(hw, speed,
-                                             autoneg_wait_to_complete);
+                                             autoneg_wait_to_complete);
        /* Set up MAC */
        ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
 
@@ -1224,7 +1224,7 @@ mac_reset_top:
                    (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
                        autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
                        autoc2 |= (hw->mac.orig_autoc2 &
-                                  IXGBE_AUTOC2_UPPER_MASK);
+                                  IXGBE_AUTOC2_UPPER_MASK);
                        IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
                }
        }
@@ -1246,7 +1246,7 @@ mac_reset_top:
        /* Add the SAN MAC address to the RAR only if it's a valid address */
        if (is_valid_ether_addr(hw->mac.san_addr)) {
                hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
-                                   hw->mac.san_addr, 0, IXGBE_RAH_AV);
+                                   hw->mac.san_addr, 0, IXGBE_RAH_AV);
 
                /* Save the SAN MAC RAR index */
                hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
@@ -1257,7 +1257,7 @@ mac_reset_top:
 
        /* Store the alternative WWNN/WWPN prefix */
        hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
-                                      &hw->mac.wwpn_prefix);
+                                      &hw->mac.wwpn_prefix);
 
 reset_hw_out:
        return status;
@@ -1271,6 +1271,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
 {
        int i;
        u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
+
        fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
 
        /*
@@ -1284,8 +1285,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
                udelay(10);
        }
        if (i >= IXGBE_FDIRCMD_CMD_POLL) {
-               hw_dbg(hw, "Flow Director previous command isn't complete, "
-                      "aborting table re-initialization.\n");
+               hw_dbg(hw, "Flow Director previous command isn't complete, aborting table re-initialization.\n");
                return IXGBE_ERR_FDIR_REINIT_FAILED;
        }
 
@@ -1299,12 +1299,12 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
         * - write 0 to bit 8 of FDIRCMD register
         */
        IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
-                       (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
-                        IXGBE_FDIRCMD_CLEARHT));
+                       (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
+                        IXGBE_FDIRCMD_CLEARHT));
        IXGBE_WRITE_FLUSH(hw);
        IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
-                       (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
-                        ~IXGBE_FDIRCMD_CLEARHT));
+                       (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
+                        ~IXGBE_FDIRCMD_CLEARHT));
        IXGBE_WRITE_FLUSH(hw);
        /*
         * Clear FDIR Hash register to clear any leftover hashes
@@ -1319,7 +1319,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
        /* Poll init-done after we write FDIRCTRL register */
        for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
                if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
-                                  IXGBE_FDIRCTRL_INIT_DONE)
+                                  IXGBE_FDIRCTRL_INIT_DONE)
                        break;
                usleep_range(1000, 2000);
        }
@@ -1368,7 +1368,7 @@ static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
        IXGBE_WRITE_FLUSH(hw);
        for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
                if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
-                                  IXGBE_FDIRCTRL_INIT_DONE)
+                                  IXGBE_FDIRCTRL_INIT_DONE)
                        break;
                usleep_range(1000, 2000);
        }
@@ -1453,7 +1453,7 @@ do { \
                bucket_hash ^= hi_hash_dword >> n; \
        else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
                sig_hash ^= hi_hash_dword << (16 - n); \
-} while (0);
+} while (0)
 
 /**
  *  ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
@@ -1529,9 +1529,9 @@ static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
  *  @queue: queue index to direct traffic to
  **/
 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
-                                          union ixgbe_atr_hash_dword input,
-                                          union ixgbe_atr_hash_dword common,
-                                          u8 queue)
+                                         union ixgbe_atr_hash_dword input,
+                                         union ixgbe_atr_hash_dword common,
+                                         u8 queue)
 {
        u64  fdirhashcmd;
        u32  fdircmd;
@@ -1555,7 +1555,7 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
 
        /* configure FDIRCMD register */
        fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
-                 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+                 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
        fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
        fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
 
@@ -1579,7 +1579,7 @@ do { \
                bucket_hash ^= lo_hash_dword >> n; \
        if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
                bucket_hash ^= hi_hash_dword >> n; \
-} while (0);
+} while (0)
 
 /**
  *  ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
@@ -1651,6 +1651,7 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
 {
        u32 mask = ntohs(input_mask->formatted.dst_port);
+
        mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
        mask |= ntohs(input_mask->formatted.src_port);
        mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
@@ -1885,7 +1886,7 @@ static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
        u32  core_ctl;
 
        IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
-                       (reg << 8));
+                       (reg << 8));
        IXGBE_WRITE_FLUSH(hw);
        udelay(10);
        core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
index 24fba39e194e682640391e42e5e618907ff230d9..4e5385a2a4658c8c5f0dc9ea4ee279d08693351d 100644 (file)
@@ -41,7 +41,7 @@ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
-                                        u16 count);
+                                       u16 count);
 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
@@ -271,6 +271,7 @@ out:
  **/
 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
 {
+       s32 ret_val;
        u32 ctrl_ext;
 
        /* Set the media type */
@@ -292,12 +293,15 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
        IXGBE_WRITE_FLUSH(hw);
 
        /* Setup flow control */
-       ixgbe_setup_fc(hw);
+       ret_val = ixgbe_setup_fc(hw);
+       if (!ret_val)
+               goto out;
 
        /* Clear adapter stopped flag */
        hw->adapter_stopped = false;
 
-       return 0;
+out:
+       return ret_val;
 }
 
 /**
@@ -481,7 +485,7 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
  *  Reads the part number string from the EEPROM.
  **/
 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
-                                  u32 pba_num_size)
+                                 u32 pba_num_size)
 {
        s32 ret_val;
        u16 data;
@@ -814,9 +818,8 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
                        eeprom->address_bits = 16;
                else
                        eeprom->address_bits = 8;
-               hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: "
-                         "%d\n", eeprom->type, eeprom->word_size,
-                         eeprom->address_bits);
+               hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: %d\n",
+                      eeprom->type, eeprom->word_size, eeprom->address_bits);
        }
 
        return 0;
@@ -1195,7 +1198,7 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
         */
        hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
 
-       hw_dbg(hw, "Detected EEPROM page size = %d words.",
+       hw_dbg(hw, "Detected EEPROM page size = %d words.\n",
               hw->eeprom.word_page_size);
 out:
        return status;
@@ -1388,8 +1391,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
        }
 
        if (i == timeout) {
-               hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore "
-                      "not granted.\n");
+               hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore not granted.\n");
                /*
                 * this release is particularly important because our attempts
                 * above to get the semaphore may have succeeded, and if there
@@ -1434,14 +1436,12 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
                 * was not granted because we don't have access to the EEPROM
                 */
                if (i >= timeout) {
-                       hw_dbg(hw, "SWESMBI Software EEPROM semaphore "
-                              "not granted.\n");
+                       hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n");
                        ixgbe_release_eeprom_semaphore(hw);
                        status = IXGBE_ERR_EEPROM;
                }
        } else {
-               hw_dbg(hw, "Software semaphore SMBI between device drivers "
-                      "not granted.\n");
+               hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
        }
 
        return status;
@@ -1483,7 +1483,7 @@ static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
         */
        for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
                ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
-                                           IXGBE_EEPROM_OPCODE_BITS);
+                                           IXGBE_EEPROM_OPCODE_BITS);
                spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
                if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
                        break;
@@ -1532,7 +1532,7 @@ static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
  *  @count: number of bits to shift out
  **/
 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
-                                        u16 count)
+                                       u16 count)
 {
        u32 eec;
        u32 mask;
@@ -1736,7 +1736,7 @@ u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
  *  caller does not need checksum_val, the value can be NULL.
  **/
 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
-                                           u16 *checksum_val)
+                                          u16 *checksum_val)
 {
        s32 status;
        u16 checksum;
@@ -1809,7 +1809,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
  *  Puts an ethernet address into a receive address register.
  **/
 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
-                          u32 enable_addr)
+                         u32 enable_addr)
 {
        u32 rar_low, rar_high;
        u32 rar_entries = hw->mac.num_rar_entries;
@@ -2053,7 +2053,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
 
        if (hw->addr_ctrl.mta_in_use > 0)
                IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
-                               IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
+                               IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
 
        hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n");
        return 0;
@@ -2071,7 +2071,7 @@ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
 
        if (a->mta_in_use > 0)
                IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
-                               hw->mac.mc_filter_type);
+                               hw->mac.mc_filter_type);
 
        return 0;
 }
@@ -2106,19 +2106,25 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
        u32 fcrtl, fcrth;
        int i;
 
-       /*
-        * Validate the water mark configuration for packet buffer 0.  Zero
-        * water marks indicate that the packet buffer was not configured
-        * and the watermarks for packet buffer 0 should always be configured.
-        */
-       if (!hw->fc.low_water ||
-           !hw->fc.high_water[0] ||
-           !hw->fc.pause_time) {
-               hw_dbg(hw, "Invalid water mark configuration\n");
+       /* Validate the water mark configuration. */
+       if (!hw->fc.pause_time) {
                ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
                goto out;
        }
 
+       /* Low water mark of zero causes XOFF floods */
+       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+               if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+                   hw->fc.high_water[i]) {
+                       if (!hw->fc.low_water[i] ||
+                           hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+                               hw_dbg(hw, "Invalid water mark configuration\n");
+                               ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+                               goto out;
+                       }
+               }
+       }
+
        /* Negotiate the fc mode to use */
        ixgbe_fc_autoneg(hw);
 
@@ -2181,12 +2187,11 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
        IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
        IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
 
-       fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
-
        /* Set up and enable Rx high/low water mark thresholds, enable XON. */
        for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
                if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
                    hw->fc.high_water[i]) {
+                       fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
                        IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
                        fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
                } else {
@@ -2654,8 +2659,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
 
        /* For informational purposes only */
        if (i >= IXGBE_MAX_SECRX_POLL)
-               hw_dbg(hw, "Rx unit being enabled before security "
-                      "path fully disabled.  Continuing with init.\n");
+               hw_dbg(hw, "Rx unit being enabled before security path fully disabled. Continuing with init.\n");
 
        return 0;
 
@@ -2782,7 +2786,7 @@ out:
  *  get and set mac_addr routines.
  **/
 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
-                                        u16 *san_mac_offset)
+                                       u16 *san_mac_offset)
 {
        s32 ret_val;
 
@@ -2828,7 +2832,7 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
        hw->mac.ops.set_lan_id(hw);
        /* apply the port offset to the address offset */
        (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
-                        (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
+                        (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
        for (i = 0; i < 3; i++) {
                ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
                                              &san_mac_data);
@@ -3068,7 +3072,7 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
  *  Turn on/off specified VLAN in the VLAN filter table.
  **/
 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
-                           bool vlan_on)
+                          bool vlan_on)
 {
        s32 regindex;
        u32 bitindex;
@@ -3190,9 +3194,9 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
                                 * Ignore it. */
                                vfta_changed = false;
                        }
-               }
-               else
+               } else {
                        IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
+               }
        }
 
        if (vfta_changed)
@@ -3292,7 +3296,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
  *  block to check the support for the alternative WWNN/WWPN prefix support.
  **/
 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
-                                        u16 *wwpn_prefix)
+                                       u16 *wwpn_prefix)
 {
        u16 offset, caps;
        u16 alt_san_mac_blk_offset;
index f12c40fb5537a18604ff030f4adc4287946dbff4..2ae5d4b8fc93e318bba749c6042affb8eb1616a7 100644 (file)
@@ -39,7 +39,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
-                                  u32 pba_num_size);
+                                 u32 pba_num_size);
 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
 enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status);
 enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status);
@@ -61,16 +61,16 @@ s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
                                    u16 words, u16 *data);
 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
-                                       u16 *data);
+                                      u16 *data);
 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
                                              u16 words, u16 *data);
 u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
-                                           u16 *checksum_val);
+                                          u16 *checksum_val);
 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
 
 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
-                          u32 enable_addr);
+                         u32 enable_addr);
 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
@@ -92,13 +92,13 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
-                           u32 vind, bool vlan_on);
+                          u32 vind, bool vlan_on);
 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
-                                 ixgbe_link_speed *speed,
-                                 bool *link_up, bool link_up_wait_to_complete);
+                                ixgbe_link_speed *speed,
+                                bool *link_up, bool link_up_wait_to_complete);
 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
-                                 u16 *wwpn_prefix);
+                                u16 *wwpn_prefix);
 
 s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val);
 s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
@@ -141,8 +141,6 @@ static inline bool ixgbe_removed(void __iomem *addr)
        return unlikely(!addr);
 }
 
-void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg);
-
 static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
 {
        u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
@@ -172,18 +170,7 @@ static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value)
 }
 #define IXGBE_WRITE_REG64(a, reg, value) ixgbe_write_reg64((a), (reg), (value))
 
-static inline u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
-{
-       u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
-       u32 value;
-
-       if (ixgbe_removed(reg_addr))
-               return IXGBE_FAILED_READ_REG;
-       value = readl(reg_addr + reg);
-       if (unlikely(value == IXGBE_FAILED_READ_REG))
-               ixgbe_check_remove(hw, reg);
-       return value;
-}
+u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg);
 #define IXGBE_READ_REG(a, reg) ixgbe_read_reg((a), (reg))
 
 #define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) \
index e055e000131bdcf0c32280270361c957d0a4d8a0..a689ee0d4bedc2d837d0765b62ab2dd0ffe1d120 100644 (file)
@@ -267,7 +267,7 @@ void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map)
  * Configure dcb settings and enable dcb mode.
  */
 s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
-                        struct ixgbe_dcb_config *dcb_config)
+                       struct ixgbe_dcb_config *dcb_config)
 {
        s32 ret = 0;
        u8 pfc_en;
@@ -389,7 +389,6 @@ static void ixgbe_dcb_read_rtrup2tc_82599(struct ixgbe_hw *hw, u8 *map)
        for (i = 0; i < MAX_USER_PRIORITY; i++)
                map[i] = IXGBE_RTRUP2TC_UP_MASK &
                        (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
-       return;
 }
 
 void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map)
index 7a77f37a7cbcbd6b7b5b87dd78e9e50b677e778c..d3ba63f9ad3712fcf82d1bafc3408070112d1aa0 100644 (file)
@@ -208,7 +208,6 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
 
        IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
 
-       fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
        /* Configure PFC Tx thresholds per TC */
        for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
                if (!(pfc_en & (1 << i))) {
@@ -217,6 +216,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
                        continue;
                }
 
+               fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
                reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
                IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
                IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
index bdb99b3b0f30f4ee2253e81bc72e84fb6a0d1e30..3b932fe64ab66c916f86f4184f45d626cc687cb1 100644 (file)
@@ -242,7 +242,6 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
                        max_tc = prio_tc[i];
        }
 
-       fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
 
        /* Configure PFC Tx thresholds per TC */
        for (i = 0; i <= max_tc; i++) {
@@ -257,6 +256,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
 
                if (enabled) {
                        reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+                       fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
                        IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
                } else {
                        reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
index d5a1e3db0774a5580b8191126e682c33191f7fa6..90c370230e2001250752725ee7d10c0ed71de341 100644 (file)
 
 /* DCB register definitions */
 #define IXGBE_RTTDCS_TDPAC      0x00000001 /* 0 Round Robin,
-                                            * 1 WSP - Weighted Strict Priority
-                                            */
+                                           * 1 WSP - Weighted Strict Priority
+                                           */
 #define IXGBE_RTTDCS_VMPAC      0x00000002 /* 0 Round Robin,
-                                            * 1 WRR - Weighted Round Robin
-                                            */
+                                           * 1 WRR - Weighted Round Robin
+                                           */
 #define IXGBE_RTTDCS_TDRM       0x00000010 /* Transmit Recycle Mode */
 #define IXGBE_RTTDCS_ARBDIS     0x00000040 /* DCB arbiter disable */
 #define IXGBE_RTTDCS_BDPM       0x00400000 /* Bypass Data Pipe - must clear! */
 #define IXGBE_RTTDCS_BPBFSM     0x00800000 /* Bypass PB Free Space - must
-                                             * clear!
-                                             */
+                                            * clear!
+                                            */
 #define IXGBE_RTTDCS_SPEED_CHG  0x80000000 /* Link speed change */
 
 /* Receive UP2TC mapping */
 #define IXGBE_RTRPT4C_LSP       0x80000000 /* LSP enable bit */
 
 #define IXGBE_RDRXCTL_MPBEN     0x00000010 /* DMA config for multiple packet
-                                            * buffers enable
-                                            */
+                                           * buffers enable
+                                           */
 #define IXGBE_RDRXCTL_MCEN      0x00000040 /* DMA config for multiple cores
-                                            * (RSS) enable
-                                            */
+                                           * (RSS) enable
+                                           */
 
 /* RTRPCS Bit Masks */
 #define IXGBE_RTRPCS_RRM        0x00000002 /* Receive Recycle Mode enable */
@@ -81,8 +81,8 @@
 
 /* RTTPCS Bit Masks */
 #define IXGBE_RTTPCS_TPPAC      0x00000020 /* 0 Round Robin,
-                                            * 1 SP - Strict Priority
-                                            */
+                                           * 1 SP - Strict Priority
+                                           */
 #define IXGBE_RTTPCS_ARBDIS     0x00000040 /* Arbiter disable */
 #define IXGBE_RTTPCS_TPRM       0x00000100 /* Transmit Recycle Mode enable */
 #define IXGBE_RTTPCS_ARBD_SHIFT 22
index edd89a1ef27f67f0f40d0cefba9ebed3e39f3185..5172b6b12c097679b9f9b532869b500ea6cb4124 100644 (file)
@@ -192,8 +192,8 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
 }
 
 static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
-                                         u8 prio, u8 bwg_id, u8 bw_pct,
-                                         u8 up_map)
+                                        u8 prio, u8 bwg_id, u8 bw_pct,
+                                        u8 up_map)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
@@ -210,7 +210,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
 }
 
 static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
-                                          u8 bw_pct)
+                                         u8 bw_pct)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
@@ -218,8 +218,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
 }
 
 static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
-                                         u8 prio, u8 bwg_id, u8 bw_pct,
-                                         u8 up_map)
+                                        u8 prio, u8 bwg_id, u8 bw_pct,
+                                        u8 up_map)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
@@ -236,7 +236,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
 }
 
 static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
-                                          u8 bw_pct)
+                                         u8 bw_pct)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
@@ -244,8 +244,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
 }
 
 static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
-                                         u8 *prio, u8 *bwg_id, u8 *bw_pct,
-                                         u8 *up_map)
+                                        u8 *prio, u8 *bwg_id, u8 *bw_pct,
+                                        u8 *up_map)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
@@ -256,7 +256,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
 }
 
 static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
-                                          u8 *bw_pct)
+                                         u8 *bw_pct)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
@@ -264,8 +264,8 @@ static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
 }
 
 static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
-                                         u8 *prio, u8 *bwg_id, u8 *bw_pct,
-                                         u8 *up_map)
+                                        u8 *prio, u8 *bwg_id, u8 *bw_pct,
+                                        u8 *up_map)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
@@ -276,7 +276,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
 }
 
 static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
-                                          u8 *bw_pct)
+                                         u8 *bw_pct)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
@@ -284,7 +284,7 @@ static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
 }
 
 static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
-                                    u8 setting)
+                                   u8 setting)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
@@ -295,7 +295,7 @@ static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
 }
 
 static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
-                                    u8 *setting)
+                                   u8 *setting)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
index 472b0f450bf90d4dc23fe5995d28d0d80c2d940e..5e2c1e35e517f915d2e6f43a5e7c1368d2f81bfe 100644 (file)
@@ -253,8 +253,7 @@ void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter)
  **/
 void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter)
 {
-       if (adapter->ixgbe_dbg_adapter)
-               debugfs_remove_recursive(adapter->ixgbe_dbg_adapter);
+       debugfs_remove_recursive(adapter->ixgbe_dbg_adapter);
        adapter->ixgbe_dbg_adapter = NULL;
 }
 
index 6c55c14d082aa6285f43941e5d96ae86acdbb21b..cc70de2598298691c5e7d375d2a89bdc6885871a 100644 (file)
@@ -141,8 +141,8 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
                         sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
                        / sizeof(u64))
 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
-                         IXGBE_PB_STATS_LEN + \
-                         IXGBE_QUEUE_STATS_LEN)
+                        IXGBE_PB_STATS_LEN + \
+                        IXGBE_QUEUE_STATS_LEN)
 
 static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
        "Register test  (offline)", "Eeprom test    (offline)",
@@ -152,7 +152,7 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
 #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
 
 static int ixgbe_get_settings(struct net_device *netdev,
-                              struct ethtool_cmd *ecmd)
+                             struct ethtool_cmd *ecmd)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
@@ -311,7 +311,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
 }
 
 static int ixgbe_set_settings(struct net_device *netdev,
-                              struct ethtool_cmd *ecmd)
+                             struct ethtool_cmd *ecmd)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
@@ -368,7 +368,7 @@ static int ixgbe_set_settings(struct net_device *netdev,
 }
 
 static void ixgbe_get_pauseparam(struct net_device *netdev,
-                                 struct ethtool_pauseparam *pause)
+                                struct ethtool_pauseparam *pause)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
@@ -390,7 +390,7 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
 }
 
 static int ixgbe_set_pauseparam(struct net_device *netdev,
-                                struct ethtool_pauseparam *pause)
+                               struct ethtool_pauseparam *pause)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
@@ -450,7 +450,7 @@ static int ixgbe_get_regs_len(struct net_device *netdev)
 #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
 
 static void ixgbe_get_regs(struct net_device *netdev,
-                           struct ethtool_regs *regs, void *p)
+                          struct ethtool_regs *regs, void *p)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
@@ -812,7 +812,7 @@ static int ixgbe_get_eeprom_len(struct net_device *netdev)
 }
 
 static int ixgbe_get_eeprom(struct net_device *netdev,
-                            struct ethtool_eeprom *eeprom, u8 *bytes)
+                           struct ethtool_eeprom *eeprom, u8 *bytes)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
@@ -918,7 +918,7 @@ err:
 }
 
 static void ixgbe_get_drvinfo(struct net_device *netdev,
-                              struct ethtool_drvinfo *drvinfo)
+                             struct ethtool_drvinfo *drvinfo)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        u32 nvm_track_id;
@@ -940,7 +940,7 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
 }
 
 static void ixgbe_get_ringparam(struct net_device *netdev,
-                                struct ethtool_ringparam *ring)
+                               struct ethtool_ringparam *ring)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
@@ -953,7 +953,7 @@ static void ixgbe_get_ringparam(struct net_device *netdev,
 }
 
 static int ixgbe_set_ringparam(struct net_device *netdev,
-                               struct ethtool_ringparam *ring)
+                              struct ethtool_ringparam *ring)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_ring *temp_ring;
@@ -1082,7 +1082,7 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
 }
 
 static void ixgbe_get_ethtool_stats(struct net_device *netdev,
-                                    struct ethtool_stats *stats, u64 *data)
+                                   struct ethtool_stats *stats, u64 *data)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct rtnl_link_stats64 temp;
@@ -1110,7 +1110,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
                }
 
                data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
-                          sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+                          sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
        }
        for (j = 0; j < netdev->num_tx_queues; j++) {
                ring = adapter->tx_ring[j];
@@ -1180,7 +1180,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
 }
 
 static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
-                              u8 *data)
+                             u8 *data)
 {
        char *p = (char *)data;
        int i;
@@ -1357,8 +1357,7 @@ static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
                ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write);
                val = ixgbe_read_reg(&adapter->hw, reg);
                if (val != (test_pattern[pat] & write & mask)) {
-                       e_err(drv, "pattern test reg %04X failed: got "
-                             "0x%08X expected 0x%08X\n",
+                       e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
                              reg, val, (test_pattern[pat] & write & mask));
                        *data = reg;
                        ixgbe_write_reg(&adapter->hw, reg, before);
@@ -1382,8 +1381,8 @@ static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
        ixgbe_write_reg(&adapter->hw, reg, write & mask);
        val = ixgbe_read_reg(&adapter->hw, reg);
        if ((write & mask) != (val & mask)) {
-               e_err(drv, "set/check reg %04X test failed: got 0x%08X "
-                     "expected 0x%08X\n", reg, (val & mask), (write & mask));
+               e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
+                     reg, (val & mask), (write & mask));
                *data = reg;
                ixgbe_write_reg(&adapter->hw, reg, before);
                return true;
@@ -1430,8 +1429,8 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
        ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle);
        after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle;
        if (value != after) {
-               e_err(drv, "failed STATUS register test got: 0x%08X "
-                     "expected: 0x%08X\n", after, value);
+               e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
+                     after, value);
                *data = 1;
                return 1;
        }
@@ -1533,10 +1532,10 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
                        return -1;
                }
        } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
-                               netdev->name, netdev)) {
+                               netdev->name, netdev)) {
                shared_int = false;
        } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
-                              netdev->name, netdev)) {
+                              netdev->name, netdev)) {
                *data = 1;
                return -1;
        }
@@ -1563,9 +1562,9 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
                         */
                        adapter->test_icr = 0;
                        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
-                                       ~mask & 0x00007FFF);
+                                       ~mask & 0x00007FFF);
                        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
-                                       ~mask & 0x00007FFF);
+                                       ~mask & 0x00007FFF);
                        IXGBE_WRITE_FLUSH(&adapter->hw);
                        usleep_range(10000, 20000);
 
@@ -1587,7 +1586,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
                IXGBE_WRITE_FLUSH(&adapter->hw);
                usleep_range(10000, 20000);
 
-               if (!(adapter->test_icr &mask)) {
+               if (!(adapter->test_icr & mask)) {
                        *data = 4;
                        break;
                }
@@ -1602,9 +1601,9 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
                         */
                        adapter->test_icr = 0;
                        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
-                                       ~mask & 0x00007FFF);
+                                       ~mask & 0x00007FFF);
                        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
-                                       ~mask & 0x00007FFF);
+                                       ~mask & 0x00007FFF);
                        IXGBE_WRITE_FLUSH(&adapter->hw);
                        usleep_range(10000, 20000);
 
@@ -1964,7 +1963,7 @@ out:
 }
 
 static void ixgbe_diag_test(struct net_device *netdev,
-                            struct ethtool_test *eth_test, u64 *data)
+                           struct ethtool_test *eth_test, u64 *data)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        bool if_running = netif_running(netdev);
@@ -1987,10 +1986,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
                        int i;
                        for (i = 0; i < adapter->num_vfs; i++) {
                                if (adapter->vfinfo[i].clear_to_send) {
-                                       netdev_warn(netdev, "%s",
-                                                   "offline diagnostic is not "
-                                                   "supported when VFs are "
-                                                   "present\n");
+                                       netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n");
                                        data[0] = 1;
                                        data[1] = 1;
                                        data[2] = 1;
@@ -2037,8 +2033,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
                 * loopback diagnostic. */
                if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
                                      IXGBE_FLAG_VMDQ_ENABLED)) {
-                       e_info(hw, "Skip MAC loopback diagnostic in VT "
-                              "mode\n");
+                       e_info(hw, "Skip MAC loopback diagnostic in VT mode\n");
                        data[3] = 0;
                        goto skip_loopback;
                }
@@ -2078,7 +2073,7 @@ skip_ol_tests:
 }
 
 static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
-                               struct ethtool_wolinfo *wol)
+                              struct ethtool_wolinfo *wol)
 {
        struct ixgbe_hw *hw = &adapter->hw;
        int retval = 0;
@@ -2094,12 +2089,12 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
 }
 
 static void ixgbe_get_wol(struct net_device *netdev,
-                          struct ethtool_wolinfo *wol)
+                         struct ethtool_wolinfo *wol)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
        wol->supported = WAKE_UCAST | WAKE_MCAST |
-                        WAKE_BCAST | WAKE_MAGIC;
+                        WAKE_BCAST | WAKE_MAGIC;
        wol->wolopts = 0;
 
        if (ixgbe_wol_exclusion(adapter, wol) ||
@@ -2181,7 +2176,7 @@ static int ixgbe_set_phys_id(struct net_device *netdev,
 }
 
 static int ixgbe_get_coalesce(struct net_device *netdev,
-                              struct ethtool_coalesce *ec)
+                             struct ethtool_coalesce *ec)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
@@ -2222,8 +2217,7 @@ static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
            adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
                if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
                        adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
-                       e_info(probe, "rx-usecs value high enough "
-                                     "to re-enable RSC\n");
+                       e_info(probe, "rx-usecs value high enough to re-enable RSC\n");
                        return true;
                }
        /* if interrupt rate is too high then disable RSC */
@@ -2236,7 +2230,7 @@ static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
 }
 
 static int ixgbe_set_coalesce(struct net_device *netdev,
-                              struct ethtool_coalesce *ec)
+                             struct ethtool_coalesce *ec)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_q_vector *q_vector;
@@ -2421,9 +2415,11 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
        switch (cmd->flow_type) {
        case TCP_V4_FLOW:
                cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               /* fallthrough */
        case UDP_V4_FLOW:
                if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
                        cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               /* fallthrough */
        case SCTP_V4_FLOW:
        case AH_ESP_V4_FLOW:
        case AH_V4_FLOW:
@@ -2433,9 +2429,11 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
                break;
        case TCP_V6_FLOW:
                cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               /* fallthrough */
        case UDP_V6_FLOW:
                if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
                        cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               /* fallthrough */
        case SCTP_V6_FLOW:
        case AH_ESP_V6_FLOW:
        case AH_V6_FLOW:
@@ -2787,8 +2785,7 @@ static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
 
                if ((flags2 & UDP_RSS_FLAGS) &&
                    !(adapter->flags2 & UDP_RSS_FLAGS))
-                       e_warn(drv, "enabling UDP RSS: fragmented packets"
-                              " may arrive out of order to the stack above\n");
+                       e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
 
                adapter->flags2 = flags2;
 
@@ -3099,5 +3096,5 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
 
 void ixgbe_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops);
+       netdev->ethtool_ops = &ixgbe_ethtool_ops;
 }
index b16cc786750dec8bb7bf29749db331a31f028ec9..0772b7730fce92de4e2ff54d44f6528397c2b3a9 100644 (file)
@@ -81,9 +81,7 @@ struct ixgbe_fcoe {
        void *extra_ddp_buffer;
        dma_addr_t extra_ddp_buffer_dma;
        unsigned long mode;
-#ifdef CONFIG_IXGBE_DCB
        u8 up;
-#endif
 };
 
 #endif /* _IXGBE_FCOE_H */
index 2067d392cc3d33850254e9a2d719a2db6491b6f5..2d9451e3968624db0b527dd00e9a106adfae16be 100644 (file)
@@ -1113,8 +1113,8 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
        err = pci_enable_msi(adapter->pdev);
        if (err) {
                netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
-                            "Unable to allocate MSI interrupt, "
-                            "falling back to legacy.  Error: %d\n", err);
+                            "Unable to allocate MSI interrupt, falling back to legacy.  Error: %d\n",
+                            err);
                return;
        }
        adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
index c4c526b7f99f48e2fe4eaccfe070273bd4078c98..ea11e2c8ee8cc5ce72792156013e1a0929b5b227 100644 (file)
@@ -301,7 +301,7 @@ static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
                ixgbe_service_event_schedule(adapter);
 }
 
-void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
+static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
 {
        u32 value;
 
@@ -320,6 +320,32 @@ void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
                ixgbe_remove_adapter(hw);
 }
 
+/**
+ * ixgbe_read_reg - Read from device register
+ * @hw: hw specific details
+ * @reg: offset of register to read
+ *
+ * Returns : value read or IXGBE_FAILED_READ_REG if removed
+ *
+ * This function is used to read device registers. It checks for device
+ * removal by confirming any read that returns all ones by checking the
+ * status register value for all ones. This function avoids reading from
+ * the hardware if a removal was previously detected in which case it
+ * returns IXGBE_FAILED_READ_REG (all ones).
+ */
+u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
+{
+       u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+       u32 value;
+
+       if (ixgbe_removed(reg_addr))
+               return IXGBE_FAILED_READ_REG;
+       value = readl(reg_addr + reg);
+       if (unlikely(value == IXGBE_FAILED_READ_REG))
+               ixgbe_check_remove(hw, reg);
+       return value;
+}
+
 static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
 {
        u16 value;
@@ -1664,7 +1690,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
 
        ixgbe_rx_checksum(rx_ring, rx_desc, skb);
 
-       ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
+       if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
+               ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb);
 
        if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
            ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
@@ -3741,35 +3768,6 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
        return 0;
 }
 
-/**
- * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
- * @adapter: driver data
- */
-static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
-{
-       struct ixgbe_hw *hw = &adapter->hw;
-       u32 vlnctrl;
-
-       vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-       vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
-       IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
-}
-
-/**
- * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
- * @adapter: driver data
- */
-static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
-{
-       struct ixgbe_hw *hw = &adapter->hw;
-       u32 vlnctrl;
-
-       vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-       vlnctrl |= IXGBE_VLNCTRL_VFE;
-       vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
-       IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
-}
-
 /**
  * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
  * @adapter: driver data
@@ -3848,6 +3846,158 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
                ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
 }
 
+/**
+ * ixgbe_write_mc_addr_list - write multicast addresses to MTA
+ * @netdev: network interface device structure
+ *
+ * Writes multicast address list to the MTA hash table.
+ * Returns: -ENOMEM on failure
+ *                0 on no addresses written
+ *                X on writing X addresses to MTA
+ **/
+static int ixgbe_write_mc_addr_list(struct net_device *netdev)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_hw *hw = &adapter->hw;
+
+       if (!netif_running(netdev))
+               return 0;
+
+       if (hw->mac.ops.update_mc_addr_list)
+               hw->mac.ops.update_mc_addr_list(hw, netdev);
+       else
+               return -ENOMEM;
+
+#ifdef CONFIG_PCI_IOV
+       ixgbe_restore_vf_multicasts(adapter);
+#endif
+
+       return netdev_mc_count(netdev);
+}
+
+#ifdef CONFIG_PCI_IOV
+void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       int i;
+       for (i = 0; i < hw->mac.num_rar_entries; i++) {
+               if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
+                       hw->mac.ops.set_rar(hw, i, adapter->mac_table[i].addr,
+                                           adapter->mac_table[i].queue,
+                                           IXGBE_RAH_AV);
+               else
+                       hw->mac.ops.clear_rar(hw, i);
+
+               adapter->mac_table[i].state &= ~(IXGBE_MAC_STATE_MODIFIED);
+       }
+}
+#endif
+
+static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       int i;
+       for (i = 0; i < hw->mac.num_rar_entries; i++) {
+               if (adapter->mac_table[i].state & IXGBE_MAC_STATE_MODIFIED) {
+                       if (adapter->mac_table[i].state &
+                           IXGBE_MAC_STATE_IN_USE)
+                               hw->mac.ops.set_rar(hw, i,
+                                               adapter->mac_table[i].addr,
+                                               adapter->mac_table[i].queue,
+                                               IXGBE_RAH_AV);
+                       else
+                               hw->mac.ops.clear_rar(hw, i);
+
+                       adapter->mac_table[i].state &=
+                                               ~(IXGBE_MAC_STATE_MODIFIED);
+               }
+       }
+}
+
+static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
+{
+       int i;
+       struct ixgbe_hw *hw = &adapter->hw;
+
+       for (i = 0; i < hw->mac.num_rar_entries; i++) {
+               adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
+               adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
+               memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+               adapter->mac_table[i].queue = 0;
+       }
+       ixgbe_sync_mac_table(adapter);
+}
+
+static int ixgbe_available_rars(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       int i, count = 0;
+
+       for (i = 0; i < hw->mac.num_rar_entries; i++) {
+               if (adapter->mac_table[i].state == 0)
+                       count++;
+       }
+       return count;
+}
+
+/* this function destroys the first RAR entry */
+static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter,
+                                        u8 *addr)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+
+       memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN);
+       adapter->mac_table[0].queue = VMDQ_P(0);
+       adapter->mac_table[0].state = (IXGBE_MAC_STATE_DEFAULT |
+                                      IXGBE_MAC_STATE_IN_USE);
+       hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr,
+                           adapter->mac_table[0].queue,
+                           IXGBE_RAH_AV);
+}
+
+int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       int i;
+
+       if (is_zero_ether_addr(addr))
+               return -EINVAL;
+
+       for (i = 0; i < hw->mac.num_rar_entries; i++) {
+               if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
+                       continue;
+               adapter->mac_table[i].state |= (IXGBE_MAC_STATE_MODIFIED |
+                                               IXGBE_MAC_STATE_IN_USE);
+               ether_addr_copy(adapter->mac_table[i].addr, addr);
+               adapter->mac_table[i].queue = queue;
+               ixgbe_sync_mac_table(adapter);
+               return i;
+       }
+       return -ENOMEM;
+}
+
+int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
+{
+       /* search table for addr, if found, set to 0 and sync */
+       int i;
+       struct ixgbe_hw *hw = &adapter->hw;
+
+       if (is_zero_ether_addr(addr))
+               return -EINVAL;
+
+       for (i = 0; i < hw->mac.num_rar_entries; i++) {
+               if (ether_addr_equal(addr, adapter->mac_table[i].addr) &&
+                   adapter->mac_table[i].queue == queue) {
+                       adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
+                       adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
+                       memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+                       adapter->mac_table[i].queue = 0;
+                       ixgbe_sync_mac_table(adapter);
+                       return 0;
+               }
+       }
+       return -ENOMEM;
+}
 /**
  * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
  * @netdev: network interface device structure
@@ -3857,39 +4007,23 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
  *                0 on no addresses written
  *                X on writing X addresses to the RAR table
  **/
-static int ixgbe_write_uc_addr_list(struct net_device *netdev)
+static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       struct ixgbe_hw *hw = &adapter->hw;
-       unsigned int rar_entries = hw->mac.num_rar_entries - 1;
        int count = 0;
 
-       /* In SR-IOV/VMDQ modes significantly less RAR entries are available */
-       if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
-               rar_entries = IXGBE_MAX_PF_MACVLANS - 1;
-
        /* return ENOMEM indicating insufficient memory for addresses */
-       if (netdev_uc_count(netdev) > rar_entries)
+       if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter))
                return -ENOMEM;
 
        if (!netdev_uc_empty(netdev)) {
                struct netdev_hw_addr *ha;
-               /* return error if we do not support writing to RAR table */
-               if (!hw->mac.ops.set_rar)
-                       return -ENOMEM;
-
                netdev_for_each_uc_addr(ha, netdev) {
-                       if (!rar_entries)
-                               break;
-                       hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
-                                           VMDQ_P(0), IXGBE_RAH_AV);
+                       ixgbe_del_mac_filter(adapter, ha->addr, vfn);
+                       ixgbe_add_mac_filter(adapter, ha->addr, vfn);
                        count++;
                }
        }
-       /* write the addresses in reverse order to avoid write combining */
-       for (; rar_entries > 0 ; rar_entries--)
-               hw->mac.ops.clear_rar(hw, rar_entries);
-
        return count;
 }
 
@@ -3907,11 +4041,12 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
        u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
+       u32 vlnctrl;
        int count;
 
        /* Check for Promiscuous and All Multicast modes */
-
        fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+       vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
 
        /* set all bits that we expect to always be set */
        fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */
@@ -3921,26 +4056,24 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
 
        /* clear the bits we are changing the status of */
        fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
-
+       vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
        if (netdev->flags & IFF_PROMISC) {
                hw->addr_ctrl.user_set_promisc = true;
                fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
-               vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
+               vmolr |= IXGBE_VMOLR_MPE;
                /* Only disable hardware filter vlans in promiscuous mode
                 * if SR-IOV and VMDQ are disabled - otherwise ensure
                 * that hardware VLAN filters remain enabled.
                 */
                if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
                                        IXGBE_FLAG_SRIOV_ENABLED)))
-                       ixgbe_vlan_filter_disable(adapter);
-               else
-                       ixgbe_vlan_filter_enable(adapter);
+                       vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
        } else {
                if (netdev->flags & IFF_ALLMULTI) {
                        fctrl |= IXGBE_FCTRL_MPE;
                        vmolr |= IXGBE_VMOLR_MPE;
                }
-               ixgbe_vlan_filter_enable(adapter);
+               vlnctrl |= IXGBE_VLNCTRL_VFE;
                hw->addr_ctrl.user_set_promisc = false;
        }
 
@@ -3949,7 +4082,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
         * sufficient space to store all the addresses then enable
         * unicast promiscuous mode
         */
-       count = ixgbe_write_uc_addr_list(netdev);
+       count = ixgbe_write_uc_addr_list(netdev, VMDQ_P(0));
        if (count < 0) {
                fctrl |= IXGBE_FCTRL_UPE;
                vmolr |= IXGBE_VMOLR_ROPE;
@@ -3959,11 +4092,13 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
         * then we should just turn on promiscuous mode so
         * that we can at least receive multicast traffic
         */
-       hw->mac.ops.update_mc_addr_list(hw, netdev);
-       vmolr |= IXGBE_VMOLR_ROMPE;
-
-       if (adapter->num_vfs)
-               ixgbe_restore_vf_multicasts(adapter);
+       count = ixgbe_write_mc_addr_list(netdev);
+       if (count < 0) {
+               fctrl |= IXGBE_FCTRL_MPE;
+               vmolr |= IXGBE_VMOLR_MPE;
+       } else if (count) {
+               vmolr |= IXGBE_VMOLR_ROMPE;
+       }
 
        if (hw->mac.type != ixgbe_mac_82598EB) {
                vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
@@ -3984,6 +4119,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
                /* NOTE:  VLAN filtering is disabled by setting PROMISC */
        }
 
+       IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
        IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
 
        if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -4100,8 +4236,8 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
            (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
            (pb == ixgbe_fcoe_get_tc(adapter)))
                tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
-
 #endif
+
        /* Calculate delay value for device */
        switch (hw->mac.type) {
        case ixgbe_mac_X540:
@@ -4142,7 +4278,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
  * @adapter: board private structure to calculate for
  * @pb: packet buffer to calculate
  */
-static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
+static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
 {
        struct ixgbe_hw *hw = &adapter->hw;
        struct net_device *dev = adapter->netdev;
@@ -4152,6 +4288,14 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
        /* Calculate max LAN frame size */
        tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
 
+#ifdef IXGBE_FCOE
+       /* FCoE traffic class uses FCOE jumbo frames */
+       if ((dev->features & NETIF_F_FCOE_MTU) &&
+           (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
+           (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
+               tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
+#endif
+
        /* Calculate delay value for device */
        switch (hw->mac.type) {
        case ixgbe_mac_X540:
@@ -4178,15 +4322,17 @@ static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
        if (!num_tc)
                num_tc = 1;
 
-       hw->fc.low_water = ixgbe_lpbthresh(adapter);
-
        for (i = 0; i < num_tc; i++) {
                hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
+               hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
 
                /* Low water marks must not be larger than high water marks */
-               if (hw->fc.low_water > hw->fc.high_water[i])
-                       hw->fc.low_water = 0;
+               if (hw->fc.low_water[i] > hw->fc.high_water[i])
+                       hw->fc.low_water[i] = 0;
        }
+
+       for (; i < MAX_TRAFFIC_CLASS; i++)
+               hw->fc.high_water[i] = 0;
 }
 
 static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
@@ -4248,20 +4394,10 @@ static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
                vmolr |= IXGBE_VMOLR_ROMPE;
                hw->mac.ops.update_mc_addr_list(hw, dev);
        }
-       ixgbe_write_uc_addr_list(adapter->netdev);
+       ixgbe_write_uc_addr_list(adapter->netdev, pool);
        IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
 }
 
-static void ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
-                                u8 *addr, u16 pool)
-{
-       struct ixgbe_hw *hw = &adapter->hw;
-       unsigned int entry;
-
-       entry = hw->mac.num_rar_entries - pool;
-       hw->mac.ops.set_rar(hw, entry, addr, VMDQ_P(pool), IXGBE_RAH_AV);
-}
-
 static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
 {
        struct ixgbe_adapter *adapter = vadapter->real_adapter;
@@ -4741,7 +4877,9 @@ void ixgbe_up(struct ixgbe_adapter *adapter)
 void ixgbe_reset(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
+       struct net_device *netdev = adapter->netdev;
        int err;
+       u8 old_addr[ETH_ALEN];
 
        if (ixgbe_removed(hw->hw_addr))
                return;
@@ -4777,9 +4915,10 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
        }
 
        clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
-
-       /* reprogram the RAR[0] in case user changed it. */
-       hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
+       /* do not flush user set addresses */
+       memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len);
+       ixgbe_flush_sw_mac_table(adapter);
+       ixgbe_mac_set_default_filter(adapter, old_addr);
 
        /* update SAN MAC vmdq pool selection */
        if (hw->mac.san_mac_rar_index)
@@ -5025,6 +5164,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
 #endif /* CONFIG_IXGBE_DCB */
 #endif /* IXGBE_FCOE */
 
+       adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
+                                    hw->mac.num_rar_entries,
+                                    GFP_ATOMIC);
+
        /* Set MAC specific capability flags and exceptions */
        switch (hw->mac.type) {
        case ixgbe_mac_82598EB:
@@ -5516,6 +5659,17 @@ err_setup_tx:
        return err;
 }
 
+static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
+{
+       ixgbe_ptp_suspend(adapter);
+
+       ixgbe_down(adapter);
+       ixgbe_free_irq(adapter);
+
+       ixgbe_free_all_tx_resources(adapter);
+       ixgbe_free_all_rx_resources(adapter);
+}
+
 /**
  * ixgbe_close - Disables a network interface
  * @netdev: network interface device structure
@@ -5533,14 +5687,10 @@ static int ixgbe_close(struct net_device *netdev)
 
        ixgbe_ptp_stop(adapter);
 
-       ixgbe_down(adapter);
-       ixgbe_free_irq(adapter);
+       ixgbe_close_suspend(adapter);
 
        ixgbe_fdir_filter_exit(adapter);
 
-       ixgbe_free_all_tx_resources(adapter);
-       ixgbe_free_all_rx_resources(adapter);
-
        ixgbe_release_hw_control(adapter);
 
        return 0;
@@ -5607,12 +5757,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
        netif_device_detach(netdev);
 
        rtnl_lock();
-       if (netif_running(netdev)) {
-               ixgbe_down(adapter);
-               ixgbe_free_irq(adapter);
-               ixgbe_free_all_tx_resources(adapter);
-               ixgbe_free_all_rx_resources(adapter);
-       }
+       if (netif_running(netdev))
+               ixgbe_close_suspend(adapter);
        rtnl_unlock();
 
        ixgbe_clear_interrupt_scheme(adapter);
@@ -5944,7 +6090,7 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
        if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
                for (i = 0; i < adapter->num_tx_queues; i++)
                        set_bit(__IXGBE_TX_FDIR_INIT_DONE,
-                               &(adapter->tx_ring[i]->state));
+                               &(adapter->tx_ring[i]->state));
                /* re-enable flow director interrupts */
                IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
        } else {
@@ -7171,16 +7317,17 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
        struct sockaddr *addr = p;
+       int ret;
 
        if (!is_valid_ether_addr(addr->sa_data))
                return -EADDRNOTAVAIL;
 
+       ixgbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
        memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
 
-       hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
-
-       return 0;
+       ret = ixgbe_add_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
+       return ret > 0 ? 0 : ret;
 }
 
 static int
@@ -7782,7 +7929,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
        .ndo_do_ioctl           = ixgbe_ioctl,
        .ndo_set_vf_mac         = ixgbe_ndo_set_vf_mac,
        .ndo_set_vf_vlan        = ixgbe_ndo_set_vf_vlan,
-       .ndo_set_vf_tx_rate     = ixgbe_ndo_set_vf_bw,
+       .ndo_set_vf_rate        = ixgbe_ndo_set_vf_bw,
        .ndo_set_vf_spoofchk    = ixgbe_ndo_set_vf_spoofchk,
        .ndo_get_vf_config      = ixgbe_ndo_get_vf_config,
        .ndo_get_stats64        = ixgbe_get_stats64,
@@ -8186,6 +8333,8 @@ skip_sriov:
                goto err_sw_init;
        }
 
+       ixgbe_mac_set_default_filter(adapter, hw->mac.perm_addr);
+
        setup_timer(&adapter->service_timer, &ixgbe_service_timer,
                    (unsigned long) adapter);
 
@@ -8241,7 +8390,7 @@ skip_sriov:
        if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
                e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
                           hw->mac.type, hw->phy.type, hw->phy.sfp_type,
-                          part_str);
+                          part_str);
        else
                e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
                           hw->mac.type, hw->phy.type, part_str);
@@ -8303,8 +8452,8 @@ skip_sriov:
 
        ixgbe_dbg_adapter_init(adapter);
 
-       /* Need link setup for MNG FW, else wait for IXGBE_UP */
-       if (ixgbe_mng_enabled(hw) && hw->mac.ops.setup_link)
+       /* setup link for SFP devices with MNG FW, else wait for IXGBE_UP */
+       if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link)
                hw->mac.ops.setup_link(hw,
                        IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
                        true);
@@ -8318,6 +8467,7 @@ err_sw_init:
        ixgbe_disable_sriov(adapter);
        adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
        iounmap(adapter->io_addr);
+       kfree(adapter->mac_table);
 err_ioremap:
        free_netdev(netdev);
 err_alloc_etherdev:
@@ -8391,6 +8541,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
 
        e_dev_info("complete\n");
 
+       kfree(adapter->mac_table);
        free_netdev(netdev);
 
        pci_disable_pcie_error_reporting(pdev);
index f5c6af2b891bd3dc797e7c4f5640b006eac78810..1918e0abf734a0d0e85f9f9df5567cc24e8cfc21 100644 (file)
@@ -223,7 +223,7 @@ out:
  *  received an ack to that message within delay * timeout period
  **/
 static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
-                           u16 mbx_id)
+                          u16 mbx_id)
 {
        struct ixgbe_mbx_info *mbx = &hw->mbx;
        s32 ret_val = IXGBE_ERR_MBX;
@@ -269,7 +269,7 @@ static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
        u32 vf_bit = vf_number % 16;
 
        if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
-                                   index)) {
+                                   index)) {
                ret_val = 0;
                hw->mbx.stats.reqs++;
        }
@@ -291,7 +291,7 @@ static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
        u32 vf_bit = vf_number % 16;
 
        if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
-                                   index)) {
+                                   index)) {
                ret_val = 0;
                hw->mbx.stats.acks++;
        }
@@ -366,7 +366,7 @@ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
  *  returns SUCCESS if it successfully copied message into the buffer
  **/
 static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
-                              u16 vf_number)
+                             u16 vf_number)
 {
        s32 ret_val;
        u16 i;
@@ -407,7 +407,7 @@ out_no_write:
  *  a message due to a VF request so no polling for message is needed.
  **/
 static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
-                             u16 vf_number)
+                            u16 vf_number)
 {
        s32 ret_val;
        u16 i;
index a9b9ad69ed0ec315d22d38c92eb1885c2900c9c3..a5cb755de3a994a34f68e0c9142414a8da52cc68 100644 (file)
  * Message ACK's are the value or'd with 0xF0000000
  */
 #define IXGBE_VT_MSGTYPE_ACK      0x80000000  /* Messages below or'd with
-                                               * this are the ACK */
+                                              * this are the ACK */
 #define IXGBE_VT_MSGTYPE_NACK     0x40000000  /* Messages below or'd with
-                                               * this are the NACK */
+                                              * this are the NACK */
 #define IXGBE_VT_MSGTYPE_CTS      0x20000000  /* Indicates that VF is still
-                                                 clear to send requests */
+                                                clear to send requests */
 #define IXGBE_VT_MSGINFO_SHIFT    16
 /* bits 23:16 are used for exra info for certain messages */
 #define IXGBE_VT_MSGINFO_MASK     (0xFF << IXGBE_VT_MSGINFO_SHIFT)
index 23f765263f12479822654a9af55263a8db233bd7..ff68b7a9deff15af054ec66238557fa47d3c1480 100644 (file)
@@ -67,7 +67,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
                        if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) {
                                ixgbe_get_phy_id(hw);
                                hw->phy.type =
-                                       ixgbe_get_phy_type_from_id(hw->phy.id);
+                                       ixgbe_get_phy_type_from_id(hw->phy.id);
 
                                if (hw->phy.type == ixgbe_phy_unknown) {
                                        hw->phy.ops.read_reg(hw,
@@ -136,12 +136,12 @@ static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
        u16 phy_id_low = 0;
 
        status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD,
-                                     &phy_id_high);
+                                     &phy_id_high);
 
        if (status == 0) {
                hw->phy.id = (u32)(phy_id_high << 16);
                status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD,
-                                             &phy_id_low);
+                                             &phy_id_low);
                hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
                hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
        }
@@ -318,7 +318,7 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
  *  @phy_data: Pointer to read data from PHY register
  **/
 s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
-                               u32 device_type, u16 *phy_data)
+                              u32 device_type, u16 *phy_data)
 {
        s32 status;
        u16 gssr;
@@ -421,7 +421,7 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
  *  @phy_data: Data to write to the PHY register
  **/
 s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
-                                u32 device_type, u16 phy_data)
+                               u32 device_type, u16 phy_data)
 {
        s32 status;
        u16 gssr;
@@ -536,7 +536,7 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
 
        if (time_out == max_time_out) {
                status = IXGBE_ERR_LINK_SETUP;
-               hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out");
+               hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out\n");
        }
 
        return status;
@@ -548,8 +548,8 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
  *  @speed: new link speed
  **/
 s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
-                                       ixgbe_link_speed speed,
-                                       bool autoneg_wait_to_complete)
+                                      ixgbe_link_speed speed,
+                                      bool autoneg_wait_to_complete)
 {
 
        /*
@@ -582,8 +582,8 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
  * Determines the link capabilities by reading the AUTOC register.
  */
 s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
-                                               ixgbe_link_speed *speed,
-                                               bool *autoneg)
+                                              ixgbe_link_speed *speed,
+                                              bool *autoneg)
 {
        s32 status = IXGBE_ERR_LINK_SETUP;
        u16 speed_ability;
@@ -592,7 +592,7 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
        *autoneg = true;
 
        status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
-                                     &speed_ability);
+                                     &speed_ability);
 
        if (status == 0) {
                if (speed_ability & MDIO_SPEED_10G)
@@ -745,7 +745,7 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
 
        if (time_out == max_time_out) {
                status = IXGBE_ERR_LINK_SETUP;
-               hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out");
+               hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out\n");
        }
 
        return status;
@@ -806,11 +806,11 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
 
        /* reset the PHY and poll for completion */
        hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
-                             (phy_data | MDIO_CTRL1_RESET));
+                             (phy_data | MDIO_CTRL1_RESET));
 
        for (i = 0; i < 100; i++) {
                hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
-                                    &phy_data);
+                                    &phy_data);
                if ((phy_data & MDIO_CTRL1_RESET) == 0)
                        break;
                usleep_range(10000, 20000);
@@ -824,7 +824,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
 
        /* Get init offsets */
        ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
-                                                     &data_offset);
+                                                     &data_offset);
        if (ret_val != 0)
                goto out;
 
@@ -838,7 +838,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
                if (ret_val)
                        goto err_eeprom;
                control = (eword & IXGBE_CONTROL_MASK_NL) >>
-                          IXGBE_CONTROL_SHIFT_NL;
+                          IXGBE_CONTROL_SHIFT_NL;
                edata = eword & IXGBE_DATA_MASK_NL;
                switch (control) {
                case IXGBE_DELAY_NL:
@@ -859,7 +859,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
                                if (ret_val)
                                        goto err_eeprom;
                                hw->phy.ops.write_reg(hw, phy_offset,
-                                                     MDIO_MMD_PMAPMD, eword);
+                                                     MDIO_MMD_PMAPMD, eword);
                                hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword,
                                       phy_offset);
                                data_offset++;
@@ -1010,10 +1010,10 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
                        if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
                                if (hw->bus.lan_id == 0)
                                        hw->phy.sfp_type =
-                                                    ixgbe_sfp_type_da_cu_core0;
+                                                    ixgbe_sfp_type_da_cu_core0;
                                else
                                        hw->phy.sfp_type =
-                                                    ixgbe_sfp_type_da_cu_core1;
+                                                    ixgbe_sfp_type_da_cu_core1;
                        } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
                                hw->phy.ops.read_i2c_eeprom(
                                                hw, IXGBE_SFF_CABLE_SPEC_COMP,
@@ -1035,10 +1035,10 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
                                    IXGBE_SFF_10GBASELR_CAPABLE)) {
                                if (hw->bus.lan_id == 0)
                                        hw->phy.sfp_type =
-                                                     ixgbe_sfp_type_srlr_core0;
+                                                     ixgbe_sfp_type_srlr_core0;
                                else
                                        hw->phy.sfp_type =
-                                                     ixgbe_sfp_type_srlr_core1;
+                                                     ixgbe_sfp_type_srlr_core1;
                        } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
                                if (hw->bus.lan_id == 0)
                                        hw->phy.sfp_type =
@@ -1087,15 +1087,15 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
                                goto err_read_i2c_eeprom;
 
                        status = hw->phy.ops.read_i2c_eeprom(hw,
-                                                   IXGBE_SFF_VENDOR_OUI_BYTE1,
-                                                   &oui_bytes[1]);
+                                                   IXGBE_SFF_VENDOR_OUI_BYTE1,
+                                                   &oui_bytes[1]);
 
                        if (status != 0)
                                goto err_read_i2c_eeprom;
 
                        status = hw->phy.ops.read_i2c_eeprom(hw,
-                                                   IXGBE_SFF_VENDOR_OUI_BYTE2,
-                                                   &oui_bytes[2]);
+                                                   IXGBE_SFF_VENDOR_OUI_BYTE2,
+                                                   &oui_bytes[2]);
 
                        if (status != 0)
                                goto err_read_i2c_eeprom;
@@ -1175,7 +1175,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
                                status = 0;
                        } else {
                                if (hw->allow_unsupported_sfp) {
-                                       e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics.  Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter.  Intel Corporation is not responsible for any harm caused by using untested modules.");
+                                       e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics.  Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter.  Intel Corporation is not responsible for any harm caused by using untested modules.\n");
                                        status = 0;
                                } else {
                                        hw_dbg(hw,
@@ -1403,8 +1403,8 @@ err_read_i2c_eeprom:
  *  so it returns the offsets to the phy init sequence block.
  **/
 s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
-                                        u16 *list_offset,
-                                        u16 *data_offset)
+                                       u16 *list_offset,
+                                       u16 *data_offset)
 {
        u16 sfp_id;
        u16 sfp_type = hw->phy.sfp_type;
@@ -1493,11 +1493,11 @@ err_phy:
  *  Performs byte read operation to SFP module's EEPROM over I2C interface.
  **/
 s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
-                                  u8 *eeprom_data)
+                                 u8 *eeprom_data)
 {
        return hw->phy.ops.read_i2c_byte(hw, byte_offset,
-                                        IXGBE_I2C_EEPROM_DEV_ADDR,
-                                        eeprom_data);
+                                        IXGBE_I2C_EEPROM_DEV_ADDR,
+                                        eeprom_data);
 }
 
 /**
@@ -1525,11 +1525,11 @@ s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
  *  Performs byte write operation to SFP module's EEPROM over I2C interface.
  **/
 s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
-                                   u8 eeprom_data)
+                                  u8 eeprom_data)
 {
        return hw->phy.ops.write_i2c_byte(hw, byte_offset,
-                                         IXGBE_I2C_EEPROM_DEV_ADDR,
-                                         eeprom_data);
+                                         IXGBE_I2C_EEPROM_DEV_ADDR,
+                                         eeprom_data);
 }
 
 /**
@@ -1542,7 +1542,7 @@ s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
  *  a specified device address.
  **/
 s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
-                                u8 dev_addr, u8 *data)
+                               u8 dev_addr, u8 *data)
 {
        s32 status = 0;
        u32 max_retry = 10;
@@ -1631,7 +1631,7 @@ read_byte_out:
  *  a specified device address.
  **/
 s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
-                                 u8 dev_addr, u8 data)
+                                u8 dev_addr, u8 data)
 {
        s32 status = 0;
        u32 max_retry = 1;
@@ -2046,7 +2046,7 @@ s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
 
        /* Check that the LASI temp alarm status was triggered */
        hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
-                            MDIO_MMD_PMAPMD, &phy_data);
+                            MDIO_MMD_PMAPMD, &phy_data);
 
        if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
                goto out;
index 0bb047f751c2a4ac6049f4500f65f035651ba830..54071ed17e3b3d589d8250f4536f622e796049dc 100644 (file)
@@ -114,47 +114,47 @@ s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
 s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
 s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
 s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
-                               u32 device_type, u16 *phy_data);
+                              u32 device_type, u16 *phy_data);
 s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
-                                u32 device_type, u16 phy_data);
+                               u32 device_type, u16 phy_data);
 s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
                           u32 device_type, u16 *phy_data);
 s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
                            u32 device_type, u16 phy_data);
 s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
 s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
-                                       ixgbe_link_speed speed,
-                                       bool autoneg_wait_to_complete);
+                                      ixgbe_link_speed speed,
+                                      bool autoneg_wait_to_complete);
 s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
-                                               ixgbe_link_speed *speed,
-                                               bool *autoneg);
+                                              ixgbe_link_speed *speed,
+                                              bool *autoneg);
 bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw);
 
 /* PHY specific */
 s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
-                             ixgbe_link_speed *speed,
-                             bool *link_up);
+                            ixgbe_link_speed *speed,
+                            bool *link_up);
 s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
 s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
-                                       u16 *firmware_version);
+                                      u16 *firmware_version);
 s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
-                                           u16 *firmware_version);
+                                          u16 *firmware_version);
 
 s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
 s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
 s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
 s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
-                                        u16 *list_offset,
-                                        u16 *data_offset);
+                                       u16 *list_offset,
+                                       u16 *data_offset);
 s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
 s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
-                                u8 dev_addr, u8 *data);
+                               u8 dev_addr, u8 *data);
 s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
-                                 u8 dev_addr, u8 data);
+                                u8 dev_addr, u8 data);
 s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
-                                  u8 *eeprom_data);
+                                 u8 *eeprom_data);
 s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
                                   u8 *sff8472_data);
 s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
-                                   u8 eeprom_data);
+                                  u8 eeprom_data);
 #endif /* _IXGBE_PHY_H_ */
index 63515a6f67fae073b40bad8c58abc55a6c238517..b3266b7536d50a70d210d88b0769ce042a0f26c7 100644 (file)
@@ -334,7 +334,7 @@ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp,
 }
 
 /**
- * ixgbe_ptp_enable
+ * ixgbe_ptp_feature_enable
  * @ptp: the ptp clock structure
  * @rq: the requested feature to change
  * @on: whether to enable or disable the feature
@@ -342,8 +342,8 @@ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp,
  * enable (or disable) ancillary features of the phc subsystem.
  * our driver only supports the PPS feature on the X540
  */
-static int ixgbe_ptp_enable(struct ptp_clock_info *ptp,
-                           struct ptp_clock_request *rq, int on)
+static int ixgbe_ptp_feature_enable(struct ptp_clock_info *ptp,
+                                   struct ptp_clock_request *rq, int on)
 {
        struct ixgbe_adapter *adapter =
                container_of(ptp, struct ixgbe_adapter, ptp_caps);
@@ -435,10 +435,8 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
 void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
-       struct ixgbe_ring *rx_ring;
        u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
        unsigned long rx_event;
-       int n;
 
        /* if we don't have a valid timestamp in the registers, just update the
         * timeout counter and exit
@@ -450,18 +448,15 @@ void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter)
 
        /* determine the most recent watchdog or rx_timestamp event */
        rx_event = adapter->last_rx_ptp_check;
-       for (n = 0; n < adapter->num_rx_queues; n++) {
-               rx_ring = adapter->rx_ring[n];
-               if (time_after(rx_ring->last_rx_timestamp, rx_event))
-                       rx_event = rx_ring->last_rx_timestamp;
-       }
+       if (time_after(adapter->last_rx_timestamp, rx_event))
+               rx_event = adapter->last_rx_timestamp;
 
        /* only need to read the high RXSTMP register to clear the lock */
        if (time_is_before_jiffies(rx_event + 5*HZ)) {
                IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
                adapter->last_rx_ptp_check = jiffies;
 
-               e_warn(drv, "clearing RX Timestamp hang");
+               e_warn(drv, "clearing RX Timestamp hang\n");
        }
 }
 
@@ -517,7 +512,7 @@ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work)
                dev_kfree_skb_any(adapter->ptp_tx_skb);
                adapter->ptp_tx_skb = NULL;
                clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
-               e_warn(drv, "clearing Tx Timestamp hang");
+               e_warn(drv, "clearing Tx Timestamp hang\n");
                return;
        }
 
@@ -530,35 +525,22 @@ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work)
 }
 
 /**
- * __ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp
- * @q_vector: structure containing interrupt and ring information
+ * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp
+ * @adapter: pointer to adapter struct
  * @skb: particular skb to send timestamp with
  *
  * if the timestamp is valid, we convert it into the timecounter ns
  * value, then store that result into the shhwtstamps structure which
  * is passed up the network stack
  */
-void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
-                            struct sk_buff *skb)
+void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb)
 {
-       struct ixgbe_adapter *adapter;
-       struct ixgbe_hw *hw;
+       struct ixgbe_hw *hw = &adapter->hw;
        struct skb_shared_hwtstamps *shhwtstamps;
        u64 regval = 0, ns;
        u32 tsyncrxctl;
        unsigned long flags;
 
-       /* we cannot process timestamps on a ring without a q_vector */
-       if (!q_vector || !q_vector->adapter)
-               return;
-
-       adapter = q_vector->adapter;
-       hw = &adapter->hw;
-
-       /*
-        * Read the tsyncrxctl register afterwards in order to prevent taking an
-        * I/O hit on every packet.
-        */
        tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
        if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID))
                return;
@@ -566,13 +548,17 @@ void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
        regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
        regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32;
 
-
        spin_lock_irqsave(&adapter->tmreg_lock, flags);
        ns = timecounter_cyc2time(&adapter->tc, regval);
        spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
 
        shhwtstamps = skb_hwtstamps(skb);
        shhwtstamps->hwtstamp = ns_to_ktime(ns);
+
+       /* Update the last_rx_timestamp timer in order to enable watchdog check
+        * for error case of latched timestamp on a dropped packet.
+        */
+       adapter->last_rx_timestamp = jiffies;
 }
 
 int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
@@ -584,9 +570,9 @@ int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
 }
 
 /**
- * ixgbe_ptp_set_ts_config - control hardware time stamping
- * @adapter: pointer to adapter struct
- * @ifreq: ioctl data
+ * ixgbe_ptp_set_timestamp_mode - setup the hardware for the requested mode
+ * @adapter: the private ixgbe adapter structure
+ * @config: the hwtstamp configuration requested
  *
  * Outgoing time stamping can be enabled and disabled. Play nice and
  * disable it when requested, although it shouldn't cause any overhead
@@ -604,25 +590,25 @@ int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
  * packets, regardless of the type specified in the register, only use V2
  * Event mode. This more accurately tells the user what the hardware is going
  * to do anyways.
+ *
+ * Note: this may modify the hwtstamp configuration towards a more general
+ * mode, if required to support the specifically requested mode.
  */
-int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
+static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
+                                struct hwtstamp_config *config)
 {
        struct ixgbe_hw *hw = &adapter->hw;
-       struct hwtstamp_config config;
        u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED;
        u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED;
        u32 tsync_rx_mtrl = PTP_EV_PORT << 16;
        bool is_l2 = false;
        u32 regval;
 
-       if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
-               return -EFAULT;
-
        /* reserved for future extensions */
-       if (config.flags)
+       if (config->flags)
                return -EINVAL;
 
-       switch (config.tx_type) {
+       switch (config->tx_type) {
        case HWTSTAMP_TX_OFF:
                tsync_tx_ctl = 0;
        case HWTSTAMP_TX_ON:
@@ -631,7 +617,7 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
                return -ERANGE;
        }
 
-       switch (config.rx_filter) {
+       switch (config->rx_filter) {
        case HWTSTAMP_FILTER_NONE:
                tsync_rx_ctl = 0;
                tsync_rx_mtrl = 0;
@@ -655,7 +641,7 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
        case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
                tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
                is_l2 = true;
-               config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+               config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
                break;
        case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
        case HWTSTAMP_FILTER_ALL:
@@ -666,7 +652,7 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
                 * Delay_Req messages and hardware does not support
                 * timestamping all packets => return error
                 */
-               config.rx_filter = HWTSTAMP_FILTER_NONE;
+               config->rx_filter = HWTSTAMP_FILTER_NONE;
                return -ERANGE;
        }
 
@@ -685,7 +671,6 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
        else
                IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
 
-
        /* enable/disable TX */
        regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
        regval &= ~IXGBE_TSYNCTXCTL_ENABLED;
@@ -707,6 +692,29 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
        regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH);
        regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
 
+       return 0;
+}
+
+/**
+ * ixgbe_ptp_set_ts_config - user entry point for timestamp mode
+ * @adapter: pointer to adapter struct
+ * @ifreq: ioctl data
+ *
+ * Set hardware to requested mode. If unsupported, return an error with no
+ * changes. Otherwise, store the mode for future reference.
+ */
+int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
+{
+       struct hwtstamp_config config;
+       int err;
+
+       if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+               return -EFAULT;
+
+       err = ixgbe_ptp_set_timestamp_mode(adapter, &config);
+       if (err)
+               return err;
+
        /* save these settings for future reference */
        memcpy(&adapter->tstamp_config, &config,
               sizeof(adapter->tstamp_config));
@@ -804,9 +812,13 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
  * ixgbe_ptp_reset
  * @adapter: the ixgbe private board structure
  *
- * When the MAC resets, all timesync features are reset. This function should be
- * called to re-enable the PTP clock structure. It will re-init the timecounter
- * structure based on the kernel time as well as setup the cycle counter data.
+ * When the MAC resets, all the hardware bits for timesync are reset. This
+ * function is used to re-enable the device for PTP based on current settings.
+ * We do lose the current clock time, so just reset the cyclecounter to the
+ * system real clock time.
+ *
+ * This function will maintain hwtstamp_config settings, and resets the SDP
+ * output if it was enabled.
  */
 void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
 {
@@ -818,8 +830,8 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
        IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
        IXGBE_WRITE_FLUSH(hw);
 
-       /* Reset the saved tstamp_config */
-       memset(&adapter->tstamp_config, 0, sizeof(adapter->tstamp_config));
+       /* reset the hardware timestamping mode */
+       ixgbe_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
 
        ixgbe_ptp_start_cyclecounter(adapter);
 
@@ -839,16 +851,23 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
 }
 
 /**
- * ixgbe_ptp_init
+ * ixgbe_ptp_create_clock
  * @adapter: the ixgbe private adapter structure
  *
- * This function performs the required steps for enabling ptp
- * support. If ptp support has already been loaded it simply calls the
- * cyclecounter init routine and exits.
+ * This function performs setup of the user entry point function table and
+ * initializes the PTP clock device, which is used to access the clock-like
+ * features of the PTP core. It will be called by ixgbe_ptp_init, only if
+ * there isn't already a clock device (such as after a suspend/resume cycle,
+ * where the clock device wasn't destroyed).
  */
-void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
+static int ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
+       long err;
+
+       /* do nothing if we already have a clock device */
+       if (!IS_ERR_OR_NULL(adapter->ptp_clock))
+               return 0;
 
        switch (adapter->hw.mac.type) {
        case ixgbe_mac_X540:
@@ -865,7 +884,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
                adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
                adapter->ptp_caps.gettime = ixgbe_ptp_gettime;
                adapter->ptp_caps.settime = ixgbe_ptp_settime;
-               adapter->ptp_caps.enable = ixgbe_ptp_enable;
+               adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
                break;
        case ixgbe_mac_82599EB:
                snprintf(adapter->ptp_caps.name,
@@ -881,24 +900,57 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
                adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
                adapter->ptp_caps.gettime = ixgbe_ptp_gettime;
                adapter->ptp_caps.settime = ixgbe_ptp_settime;
-               adapter->ptp_caps.enable = ixgbe_ptp_enable;
+               adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
                break;
        default:
                adapter->ptp_clock = NULL;
-               return;
+               return -EOPNOTSUPP;
        }
 
-       spin_lock_init(&adapter->tmreg_lock);
-       INIT_WORK(&adapter->ptp_tx_work, ixgbe_ptp_tx_hwtstamp_work);
-
        adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
                                                &adapter->pdev->dev);
        if (IS_ERR(adapter->ptp_clock)) {
+               err = PTR_ERR(adapter->ptp_clock);
                adapter->ptp_clock = NULL;
                e_dev_err("ptp_clock_register failed\n");
+               return err;
        } else
                e_dev_info("registered PHC device on %s\n", netdev->name);
 
+       /* set default timestamp mode to disabled here. We do this in
+        * create_clock instead of init, because we don't want to override the
+        * previous settings during a resume cycle.
+        */
+       adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+       adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+
+       return 0;
+}
+
+/**
+ * ixgbe_ptp_init
+ * @adapter: the ixgbe private adapter structure
+ *
+ * This function performs the required steps for enabling PTP
+ * support. If PTP support has already been loaded it simply calls the
+ * cyclecounter init routine and exits.
+ */
+void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
+{
+       /* initialize the spin lock first since we can't control when a user
+        * will call the entry functions once we have initialized the clock
+        * device
+        */
+       spin_lock_init(&adapter->tmreg_lock);
+
+       /* obtain a PTP device, or re-use an existing device */
+       if (ixgbe_ptp_create_clock(adapter))
+               return;
+
+       /* we have a clock so we can initialize work now */
+       INIT_WORK(&adapter->ptp_tx_work, ixgbe_ptp_tx_hwtstamp_work);
+
+       /* reset the PTP related hardware bits */
        ixgbe_ptp_reset(adapter);
 
        /* enter the IXGBE_PTP_RUNNING state */
@@ -908,28 +960,45 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
 }
 
 /**
- * ixgbe_ptp_stop - disable ptp device and stop the overflow check
- * @adapter: pointer to adapter struct
+ * ixgbe_ptp_suspend - stop PTP work items
+ * @ adapter: pointer to adapter struct
  *
- * this function stops the ptp support, and cancels the delayed work.
+ * this function suspends PTP activity, and prevents more PTP work from being
+ * generated, but does not destroy the PTP clock device.
  */
-void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
+void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter)
 {
        /* Leave the IXGBE_PTP_RUNNING state. */
        if (!test_and_clear_bit(__IXGBE_PTP_RUNNING, &adapter->state))
                return;
 
-       /* stop the PPS signal */
-       adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED;
-       ixgbe_ptp_setup_sdp(adapter);
+       /* since this might be called in suspend, we don't clear the state,
+        * but simply reset the auxiliary PPS signal control register
+        */
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_TSAUXC, 0x0);
 
+       /* ensure that we cancel any pending PTP Tx work item in progress */
        cancel_work_sync(&adapter->ptp_tx_work);
        if (adapter->ptp_tx_skb) {
                dev_kfree_skb_any(adapter->ptp_tx_skb);
                adapter->ptp_tx_skb = NULL;
                clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
        }
+}
+
+/**
+ * ixgbe_ptp_stop - close the PTP device
+ * @adapter: pointer to adapter struct
+ *
+ * completely destroy the PTP device, should only be called when the device is
+ * being fully closed.
+ */
+void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
+{
+       /* first, suspend PTP activity */
+       ixgbe_ptp_suspend(adapter);
 
+       /* disable the PTP clock device */
        if (adapter->ptp_clock) {
                ptp_clock_unregister(adapter->ptp_clock);
                adapter->ptp_clock = NULL;
index e6c68d396c992fffb329a1cca4daadb47169faab..16b3a1cd9db6c0c32bef5223b479cf9f4b4a4d41 100644 (file)
@@ -72,8 +72,6 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
                for (i = 0; i < num_vf_macvlans; i++) {
                        mv_list->vf = -1;
                        mv_list->free = true;
-                       mv_list->rar_entry = hw->mac.num_rar_entries -
-                               (i + adapter->num_vfs + 1);
                        list_add(&mv_list->l, &adapter->vf_mvs.l);
                        mv_list++;
                }
@@ -327,6 +325,7 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
        u32 vector_bit;
        u32 vector_reg;
        u32 mta_reg;
+       u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
 
        /* only so many hash values supported */
        entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
@@ -353,25 +352,13 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
                mta_reg |= (1 << vector_bit);
                IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
        }
+       vmolr |= IXGBE_VMOLR_ROMPE;
+       IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
 
        return 0;
 }
 
-static void ixgbe_restore_vf_macvlans(struct ixgbe_adapter *adapter)
-{
-       struct ixgbe_hw *hw = &adapter->hw;
-       struct list_head *pos;
-       struct vf_macvlans *entry;
-
-       list_for_each(pos, &adapter->vf_mvs.l) {
-               entry = list_entry(pos, struct vf_macvlans, l);
-               if (!entry->free)
-                       hw->mac.ops.set_rar(hw, entry->rar_entry,
-                                           entry->vf_macvlan,
-                                           entry->vf, IXGBE_RAH_AV);
-       }
-}
-
+#ifdef CONFIG_PCI_IOV
 void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
@@ -382,6 +369,7 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
        u32 mta_reg;
 
        for (i = 0; i < adapter->num_vfs; i++) {
+               u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i));
                vfinfo = &adapter->vfinfo[i];
                for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
                        hw->addr_ctrl.mta_in_use++;
@@ -391,11 +379,18 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
                        mta_reg |= (1 << vector_bit);
                        IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
                }
+
+               if (vfinfo->num_vf_mc_hashes)
+                       vmolr |= IXGBE_VMOLR_ROMPE;
+               else
+                       vmolr &= ~IXGBE_VMOLR_ROMPE;
+               IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
        }
 
        /* Restore any VF macvlans */
-       ixgbe_restore_vf_macvlans(adapter);
+       ixgbe_full_sync_mac_table(adapter);
 }
+#endif
 
 static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
                             u32 vf)
@@ -495,8 +490,7 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
 static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
 {
        u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
-       vmolr |= (IXGBE_VMOLR_ROMPE |
-                 IXGBE_VMOLR_BAM);
+       vmolr |= IXGBE_VMOLR_BAM;
        if (aupe)
                vmolr |= IXGBE_VMOLR_AUPE;
        else
@@ -514,7 +508,6 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
 {
        struct ixgbe_hw *hw = &adapter->hw;
        struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
-       int rar_entry = hw->mac.num_rar_entries - (vf + 1);
        u8 num_tcs = netdev_get_num_tc(adapter->netdev);
 
        /* add PF assigned VLAN or VLAN 0 */
@@ -544,7 +537,7 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
        /* Flush and reset the mta with the new values */
        ixgbe_set_rx_mode(adapter->netdev);
 
-       hw->mac.ops.clear_rar(hw, rar_entry);
+       ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
 
        /* reset VF api back to unknown */
        adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
@@ -553,11 +546,9 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
 static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
                            int vf, unsigned char *mac_addr)
 {
-       struct ixgbe_hw *hw = &adapter->hw;
-       int rar_entry = hw->mac.num_rar_entries - (vf + 1);
-
+       ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
        memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
-       hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV);
+       ixgbe_add_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
 
        return 0;
 }
@@ -565,7 +556,6 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
 static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
                                int vf, int index, unsigned char *mac_addr)
 {
-       struct ixgbe_hw *hw = &adapter->hw;
        struct list_head *pos;
        struct vf_macvlans *entry;
 
@@ -576,7 +566,8 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
                                entry->vf = -1;
                                entry->free = true;
                                entry->is_macvlan = false;
-                               hw->mac.ops.clear_rar(hw, entry->rar_entry);
+                               ixgbe_del_mac_filter(adapter,
+                                                    entry->vf_macvlan, vf);
                        }
                }
        }
@@ -612,7 +603,7 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
        entry->vf = vf;
        memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
 
-       hw->mac.ops.set_rar(hw, entry->rar_entry, mac_addr, vf, IXGBE_RAH_AV);
+       ixgbe_add_mac_filter(adapter, mac_addr, vf);
 
        return 0;
 }
@@ -1138,9 +1129,9 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
                        adapter->vfinfo[vf].vlan_count--;
                adapter->vfinfo[vf].pf_vlan = 0;
                adapter->vfinfo[vf].pf_qos = 0;
-       }
+       }
 out:
-       return err;
+       return err;
 }
 
 static int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
@@ -1231,7 +1222,8 @@ void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
        }
 }
 
-int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
+int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
+                       int max_tx_rate)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        int link_speed;
@@ -1249,13 +1241,16 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
        if (link_speed != 10000)
                return -EINVAL;
 
+       if (min_tx_rate)
+               return -EINVAL;
+
        /* rate limit cannot be less than 10Mbs or greater than link speed */
-       if (tx_rate && ((tx_rate <= 10) || (tx_rate > link_speed)))
+       if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed)))
                return -EINVAL;
 
        /* store values */
        adapter->vf_rate_link_speed = link_speed;
-       adapter->vfinfo[vf].tx_rate = tx_rate;
+       adapter->vfinfo[vf].tx_rate = max_tx_rate;
 
        /* update hardware configuration */
        ixgbe_set_vf_rate_limit(adapter, vf);
@@ -1297,7 +1292,8 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev,
                return -EINVAL;
        ivi->vf = vf;
        memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
-       ivi->tx_rate = adapter->vfinfo[vf].tx_rate;
+       ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate;
+       ivi->min_tx_rate = 0;
        ivi->vlan = adapter->vfinfo[vf].pf_vlan;
        ivi->qos = adapter->vfinfo[vf].pf_qos;
        ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
index 139eaddfb2ed5c8c14a3891137a313b4fcaa3a6a..32c26d586c01e1c5d561922774df4b250d3fdd73 100644 (file)
@@ -34,7 +34,9 @@
  */
 #define IXGBE_MAX_VFS_DRV_LIMIT  (IXGBE_MAX_VF_FUNCTIONS - 1)
 
+#ifdef CONFIG_PCI_IOV
 void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
+#endif
 void ixgbe_msg_task(struct ixgbe_adapter *adapter);
 int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
 void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
@@ -42,7 +44,8 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
 int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
 int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
                           u8 qos);
-int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
+int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
+                       int max_tx_rate);
 int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
 int ixgbe_ndo_get_vf_config(struct net_device *netdev,
                            int vf, struct ifla_vf_info *ivi);
index 8a6ff2423f076974d1c3c408b97c497d00bdc277..9a89f98b35f0290391c1a4da654906ae2364002c 100644 (file)
@@ -160,7 +160,7 @@ struct ixgbe_thermal_sensor_data {
 #define IXGBE_MAX_EITR     0x00000FF8
 #define IXGBE_MIN_EITR     8
 #define IXGBE_EITR(_i)  (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \
-                         (0x012300 + (((_i) - 24) * 4)))
+                        (0x012300 + (((_i) - 24) * 4)))
 #define IXGBE_EITR_ITR_INT_MASK 0x00000FF8
 #define IXGBE_EITR_LLI_MOD      0x00008000
 #define IXGBE_EITR_CNT_WDIS     0x80000000
@@ -213,7 +213,7 @@ struct ixgbe_thermal_sensor_data {
  * 64-127: 0x0D014 + (n-64)*0x40
  */
 #define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
-                          (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
+                         (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
                          (0x0D014 + (((_i) - 64) * 0x40))))
 /*
  * Rx DCA Control Register:
@@ -222,11 +222,11 @@ struct ixgbe_thermal_sensor_data {
  * 64-127: 0x0D00C + (n-64)*0x40
  */
 #define IXGBE_DCA_RXCTRL(_i)    (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
-                                 (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
+                                (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
                                 (0x0D00C + (((_i) - 64) * 0x40))))
 #define IXGBE_RDRXCTL           0x02F00
 #define IXGBE_RXPBSIZE(_i)      (0x03C00 + ((_i) * 4))
-                                             /* 8 of these 0x03C00 - 0x03C1C */
+                                            /* 8 of these 0x03C00 - 0x03C1C */
 #define IXGBE_RXCTRL    0x03000
 #define IXGBE_DROPEN    0x03D04
 #define IXGBE_RXPBSIZE_SHIFT 10
@@ -239,14 +239,14 @@ struct ixgbe_thermal_sensor_data {
 /* Multicast Table Array - 128 entries */
 #define IXGBE_MTA(_i)   (0x05200 + ((_i) * 4))
 #define IXGBE_RAL(_i)   (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
-                         (0x0A200 + ((_i) * 8)))
+                        (0x0A200 + ((_i) * 8)))
 #define IXGBE_RAH(_i)   (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
-                         (0x0A204 + ((_i) * 8)))
+                        (0x0A204 + ((_i) * 8)))
 #define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8))
 #define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8))
 /* Packet split receive type */
 #define IXGBE_PSRTYPE(_i)    (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \
-                              (0x0EA00 + ((_i) * 4)))
+                             (0x0EA00 + ((_i) * 4)))
 /* array of 4096 1-bit vlan filters */
 #define IXGBE_VFTA(_i)  (0x0A000 + ((_i) * 4))
 /*array of 4096 4-bit vlan vmdq indices */
@@ -696,7 +696,7 @@ struct ixgbe_thermal_sensor_data {
 
 #define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4))
 #define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \
-                         (0x08600 + ((_i) * 4)))
+                        (0x08600 + ((_i) * 4)))
 #define IXGBE_TQSM(_i)  (0x08600 + ((_i) * 4))
 
 #define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */
@@ -820,7 +820,7 @@ struct ixgbe_thermal_sensor_data {
 #define IXGBE_GCR_EXT_VT_MODE_32        0x00000002
 #define IXGBE_GCR_EXT_VT_MODE_64        0x00000003
 #define IXGBE_GCR_EXT_SRIOV             (IXGBE_GCR_EXT_MSIX_EN | \
-                                         IXGBE_GCR_EXT_VT_MODE_64)
+                                        IXGBE_GCR_EXT_VT_MODE_64)
 
 /* Time Sync Registers */
 #define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
@@ -1396,10 +1396,10 @@ enum {
 #define IXGBE_EIMC_OTHER        IXGBE_EICR_OTHER     /* INT Cause Active */
 
 #define IXGBE_EIMS_ENABLE_MASK ( \
-                                IXGBE_EIMS_RTX_QUEUE       | \
-                                IXGBE_EIMS_LSC             | \
-                                IXGBE_EIMS_TCP_TIMER       | \
-                                IXGBE_EIMS_OTHER)
+                               IXGBE_EIMS_RTX_QUEUE       | \
+                               IXGBE_EIMS_LSC             | \
+                               IXGBE_EIMS_TCP_TIMER       | \
+                               IXGBE_EIMS_OTHER)
 
 /* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
 #define IXGBE_IMIR_PORT_IM_EN     0x00010000  /* TCP port enable */
@@ -2161,18 +2161,18 @@ enum {
 
 /* Masks to determine if packets should be dropped due to frame errors */
 #define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
-                                      IXGBE_RXD_ERR_CE | \
-                                      IXGBE_RXD_ERR_LE | \
-                                      IXGBE_RXD_ERR_PE | \
-                                      IXGBE_RXD_ERR_OSE | \
-                                      IXGBE_RXD_ERR_USE)
+                                     IXGBE_RXD_ERR_CE | \
+                                     IXGBE_RXD_ERR_LE | \
+                                     IXGBE_RXD_ERR_PE | \
+                                     IXGBE_RXD_ERR_OSE | \
+                                     IXGBE_RXD_ERR_USE)
 
 #define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
-                                      IXGBE_RXDADV_ERR_CE | \
-                                      IXGBE_RXDADV_ERR_LE | \
-                                      IXGBE_RXDADV_ERR_PE | \
-                                      IXGBE_RXDADV_ERR_OSE | \
-                                      IXGBE_RXDADV_ERR_USE)
+                                     IXGBE_RXDADV_ERR_CE | \
+                                     IXGBE_RXDADV_ERR_LE | \
+                                     IXGBE_RXDADV_ERR_PE | \
+                                     IXGBE_RXDADV_ERR_OSE | \
+                                     IXGBE_RXDADV_ERR_USE)
 
 /* Multicast bit mask */
 #define IXGBE_MCSTCTRL_MFE      0x4
@@ -2393,9 +2393,9 @@ struct ixgbe_adv_tx_context_desc {
 #define IXGBE_ADVTXD_CC         0x00000080 /* Check Context */
 #define IXGBE_ADVTXD_POPTS_SHIFT      8  /* Adv desc POPTS shift */
 #define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
-                                 IXGBE_ADVTXD_POPTS_SHIFT)
+                                IXGBE_ADVTXD_POPTS_SHIFT)
 #define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
-                                 IXGBE_ADVTXD_POPTS_SHIFT)
+                                IXGBE_ADVTXD_POPTS_SHIFT)
 #define IXGBE_ADVTXD_POPTS_ISCO_1ST  0x00000000 /* 1st TSO of iSCSI PDU */
 #define IXGBE_ADVTXD_POPTS_ISCO_MDL  0x00000800 /* Middle TSO of iSCSI PDU */
 #define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
@@ -2435,10 +2435,10 @@ typedef u32 ixgbe_link_speed;
 #define IXGBE_LINK_SPEED_1GB_FULL  0x0020
 #define IXGBE_LINK_SPEED_10GB_FULL 0x0080
 #define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
-                                        IXGBE_LINK_SPEED_10GB_FULL)
+                                       IXGBE_LINK_SPEED_10GB_FULL)
 #define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \
-                                        IXGBE_LINK_SPEED_1GB_FULL | \
-                                        IXGBE_LINK_SPEED_10GB_FULL)
+                                       IXGBE_LINK_SPEED_1GB_FULL | \
+                                       IXGBE_LINK_SPEED_10GB_FULL)
 
 
 /* Physical layer type */
@@ -2746,7 +2746,7 @@ struct ixgbe_bus_info {
 /* Flow control parameters */
 struct ixgbe_fc_info {
        u32 high_water[MAX_TRAFFIC_CLASS]; /* Flow Control High-water */
-       u32 low_water; /* Flow Control Low-water */
+       u32 low_water[MAX_TRAFFIC_CLASS]; /* Flow Control Low-water */
        u16 pause_time; /* Flow Control Pause timer */
        bool send_xon; /* Flow control send XON */
        bool strict_ieee; /* Strict IEEE mode */
@@ -2840,7 +2840,7 @@ struct ixgbe_hw;
 
 /* iterator type for walking multicast address lists */
 typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
-                                  u32 *vmdq);
+                                 u32 *vmdq);
 
 /* Function pointer table */
 struct ixgbe_eeprom_operations {
@@ -2887,7 +2887,7 @@ struct ixgbe_mac_operations {
        s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
        s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
        s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
-                                    bool *);
+                                    bool *);
 
        /* Packet Buffer Manipulation */
        void (*set_rxpba)(struct ixgbe_hw *, int, u32, int);
index 188a5974b85c41f7b8279b6128f72f749e845f76..40dd798e1290efb6b3d67928d86320b12cb767fe 100644 (file)
@@ -81,7 +81,7 @@ static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
                                     bool autoneg_wait_to_complete)
 {
        return hw->phy.ops.setup_link_speed(hw, speed,
-                                           autoneg_wait_to_complete);
+                                           autoneg_wait_to_complete);
 }
 
 /**
@@ -155,7 +155,7 @@ mac_reset_top:
        /* Add the SAN MAC address to the RAR only if it's a valid address */
        if (is_valid_ether_addr(hw->mac.san_addr)) {
                hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
-                                   hw->mac.san_addr, 0, IXGBE_RAH_AV);
+                                   hw->mac.san_addr, 0, IXGBE_RAH_AV);
 
                /* Save the SAN MAC RAR index */
                hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
@@ -166,7 +166,7 @@ mac_reset_top:
 
        /* Store the alternative WWNN/WWPN prefix */
        hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
-                                  &hw->mac.wwpn_prefix);
+                                  &hw->mac.wwpn_prefix);
 
 reset_hw_out:
        return status;
@@ -237,9 +237,9 @@ static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
 
                eec = IXGBE_READ_REG(hw, IXGBE_EEC);
                eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
-                                   IXGBE_EEC_SIZE_SHIFT);
+                                   IXGBE_EEC_SIZE_SHIFT);
                eeprom->word_size = 1 << (eeprom_size +
-                                         IXGBE_EEPROM_WORD_SIZE_SHIFT);
+                                         IXGBE_EEPROM_WORD_SIZE_SHIFT);
 
                hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
                       eeprom->type, eeprom->word_size);
@@ -712,8 +712,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
                        udelay(50);
                }
        } else {
-               hw_dbg(hw, "Software semaphore SMBI between device drivers "
-                          "not granted.\n");
+               hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
        }
 
        return status;
@@ -813,7 +812,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
        .clear_hw_cntrs         = &ixgbe_clear_hw_cntrs_generic,
        .get_media_type         = &ixgbe_get_media_type_X540,
        .get_supported_physical_layer =
-                                  &ixgbe_get_supported_physical_layer_X540,
+                                 &ixgbe_get_supported_physical_layer_X540,
        .enable_rx_dma          = &ixgbe_enable_rx_dma_generic,
        .get_mac_addr           = &ixgbe_get_mac_addr_generic,
        .get_san_mac_addr       = &ixgbe_get_san_mac_addr_generic,
index 1baecb60f0657e20a4500380ece36a72f908c3d9..a757f07347193efa8fcfd47705c614d1bc854805 100644 (file)
@@ -813,5 +813,5 @@ static const struct ethtool_ops ixgbevf_ethtool_ops = {
 
 void ixgbevf_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &ixgbevf_ethtool_ops);
+       netdev->ethtool_ops = &ixgbevf_ethtool_ops;
 }
index d0799e8e31e4ea08e5dc89ec64b58c76bd5840a3..eacce3a2e9eca0f2336a1a2ab2eec732558dbb59 100644 (file)
@@ -85,7 +85,7 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = {
 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
 
 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
-MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
+MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
index b0c6050479eb460ae306cccaa93926f738e64c2e..b78378cea5e39b6e18627b9f39ac5249c40293fd 100644 (file)
@@ -1988,7 +1988,7 @@ jme_alloc_txdesc(struct jme_adapter *jme,
        return idx;
 }
 
-static void
+static int
 jme_fill_tx_map(struct pci_dev *pdev,
                struct txdesc *txdesc,
                struct jme_buffer_info *txbi,
@@ -2005,6 +2005,9 @@ jme_fill_tx_map(struct pci_dev *pdev,
                                len,
                                PCI_DMA_TODEVICE);
 
+       if (unlikely(pci_dma_mapping_error(pdev, dmaaddr)))
+               return -EINVAL;
+
        pci_dma_sync_single_for_device(pdev,
                                       dmaaddr,
                                       len,
@@ -2021,9 +2024,30 @@ jme_fill_tx_map(struct pci_dev *pdev,
 
        txbi->mapping = dmaaddr;
        txbi->len = len;
+       return 0;
 }
 
-static void
+static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count)
+{
+       struct jme_ring *txring = &(jme->txring[0]);
+       struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
+       int mask = jme->tx_ring_mask;
+       int j;
+
+       for (j = 0 ; j < count ; j++) {
+               ctxbi = txbi + ((startidx + j + 2) & (mask));
+               pci_unmap_page(jme->pdev,
+                               ctxbi->mapping,
+                               ctxbi->len,
+                               PCI_DMA_TODEVICE);
+
+                               ctxbi->mapping = 0;
+                               ctxbi->len = 0;
+       }
+
+}
+
+static int
 jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
 {
        struct jme_ring *txring = &(jme->txring[0]);
@@ -2034,25 +2058,37 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
        int mask = jme->tx_ring_mask;
        const struct skb_frag_struct *frag;
        u32 len;
+       int ret = 0;
 
        for (i = 0 ; i < nr_frags ; ++i) {
                frag = &skb_shinfo(skb)->frags[i];
                ctxdesc = txdesc + ((idx + i + 2) & (mask));
                ctxbi = txbi + ((idx + i + 2) & (mask));
 
-               jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
+               ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
                                skb_frag_page(frag),
                                frag->page_offset, skb_frag_size(frag), hidma);
+               if (ret) {
+                       jme_drop_tx_map(jme, idx, i);
+                       goto out;
+               }
+
        }
 
        len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
        ctxdesc = txdesc + ((idx + 1) & (mask));
        ctxbi = txbi + ((idx + 1) & (mask));
-       jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
+       ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
                        offset_in_page(skb->data), len, hidma);
+       if (ret)
+               jme_drop_tx_map(jme, idx, i);
+
+out:
+       return ret;
 
 }
 
+
 static int
 jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
 {
@@ -2131,6 +2167,7 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
        struct txdesc *txdesc;
        struct jme_buffer_info *txbi;
        u8 flags;
+       int ret = 0;
 
        txdesc = (struct txdesc *)txring->desc + idx;
        txbi = txring->bufinf + idx;
@@ -2155,7 +2192,10 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
        if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
                jme_tx_csum(jme, skb, &flags);
        jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
-       jme_map_tx_skb(jme, skb, idx);
+       ret = jme_map_tx_skb(jme, skb, idx);
+       if (ret)
+               return ret;
+
        txdesc->desc1.flags = flags;
        /*
         * Set tx buffer info after telling NIC to send
@@ -2228,7 +2268,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
                return NETDEV_TX_BUSY;
        }
 
-       jme_fill_tx_desc(jme, skb, idx);
+       if (jme_fill_tx_desc(jme, skb, idx))
+               return NETDEV_TX_OK;
 
        jwrite32(jme, JME_TXCS, jme->reg_txcs |
                                TXCS_SELECT_QUEUE0 |
index b7b8d74c22d9c6f7e7f9aaa7c9211722ec5929fd..c68ff5deba8cfcc5333bfad40e9d657be69df797 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/in.h>
 #include <linux/ip.h>
+#include <net/tso.h>
 #include <linux/tcp.h>
 #include <linux/udp.h>
 #include <linux/etherdevice.h>
@@ -179,9 +180,10 @@ static char mv643xx_eth_driver_version[] = "1.4";
  * Misc definitions.
  */
 #define DEFAULT_RX_QUEUE_SIZE  128
-#define DEFAULT_TX_QUEUE_SIZE  256
+#define DEFAULT_TX_QUEUE_SIZE  512
 #define SKB_DMA_REALIGN                ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
 
+#define TSO_HEADER_SIZE                128
 
 /*
  * RX/TX descriptors.
@@ -250,6 +252,7 @@ struct tx_desc {
 #define GEN_TCP_UDP_CHECKSUM           0x00020000
 #define UDP_FRAME                      0x00010000
 #define MAC_HDR_EXTRA_4_BYTES          0x00008000
+#define GEN_TCP_UDP_CHK_FULL           0x00000400
 #define MAC_HDR_EXTRA_8_BYTES          0x00000200
 
 #define TX_IHL_SHIFT                   11
@@ -345,6 +348,9 @@ struct tx_queue {
        int tx_curr_desc;
        int tx_used_desc;
 
+       char *tso_hdrs;
+       dma_addr_t tso_hdrs_dma;
+
        struct tx_desc *tx_desc_area;
        dma_addr_t tx_desc_dma;
        int tx_desc_area_size;
@@ -661,6 +667,198 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
        return 0;
 }
 
+static inline __be16 sum16_as_be(__sum16 sum)
+{
+       return (__force __be16)sum;
+}
+
+static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb,
+                      u16 *l4i_chk, u32 *command, int length)
+{
+       int ret;
+       u32 cmd = 0;
+
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               int hdr_len;
+               int tag_bytes;
+
+               BUG_ON(skb->protocol != htons(ETH_P_IP) &&
+                      skb->protocol != htons(ETH_P_8021Q));
+
+               hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
+               tag_bytes = hdr_len - ETH_HLEN;
+
+               if (length - hdr_len > mp->shared->tx_csum_limit ||
+                   unlikely(tag_bytes & ~12)) {
+                       ret = skb_checksum_help(skb);
+                       if (!ret)
+                               goto no_csum;
+                       return ret;
+               }
+
+               if (tag_bytes & 4)
+                       cmd |= MAC_HDR_EXTRA_4_BYTES;
+               if (tag_bytes & 8)
+                       cmd |= MAC_HDR_EXTRA_8_BYTES;
+
+               cmd |= GEN_TCP_UDP_CHECKSUM | GEN_TCP_UDP_CHK_FULL |
+                          GEN_IP_V4_CHECKSUM   |
+                          ip_hdr(skb)->ihl << TX_IHL_SHIFT;
+
+               /* TODO: Revisit this. With the usage of GEN_TCP_UDP_CHK_FULL
+                * it seems we don't need to pass the initial checksum. */
+               switch (ip_hdr(skb)->protocol) {
+               case IPPROTO_UDP:
+                       cmd |= UDP_FRAME;
+                       *l4i_chk = 0;
+                       break;
+               case IPPROTO_TCP:
+                       *l4i_chk = 0;
+                       break;
+               default:
+                       WARN(1, "protocol not supported");
+               }
+       } else {
+no_csum:
+               /* Errata BTS #50, IHL must be 5 if no HW checksum */
+               cmd |= 5 << TX_IHL_SHIFT;
+       }
+       *command = cmd;
+       return 0;
+}
+
+static inline int
+txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
+                struct sk_buff *skb, char *data, int length,
+                bool last_tcp, bool is_last)
+{
+       int tx_index;
+       u32 cmd_sts;
+       struct tx_desc *desc;
+
+       tx_index = txq->tx_curr_desc++;
+       if (txq->tx_curr_desc == txq->tx_ring_size)
+               txq->tx_curr_desc = 0;
+       desc = &txq->tx_desc_area[tx_index];
+
+       desc->l4i_chk = 0;
+       desc->byte_cnt = length;
+       desc->buf_ptr = dma_map_single(dev->dev.parent, data,
+                                      length, DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev->dev.parent, desc->buf_ptr))) {
+               WARN(1, "dma_map_single failed!\n");
+               return -ENOMEM;
+       }
+
+       cmd_sts = BUFFER_OWNED_BY_DMA;
+       if (last_tcp) {
+               /* last descriptor in the TCP packet */
+               cmd_sts |= ZERO_PADDING | TX_LAST_DESC;
+               /* last descriptor in SKB */
+               if (is_last)
+                       cmd_sts |= TX_ENABLE_INTERRUPT;
+       }
+       desc->cmd_sts = cmd_sts;
+       return 0;
+}
+
+static inline void
+txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length)
+{
+       struct mv643xx_eth_private *mp = txq_to_mp(txq);
+       int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+       int tx_index;
+       struct tx_desc *desc;
+       int ret;
+       u32 cmd_csum = 0;
+       u16 l4i_chk = 0;
+
+       tx_index = txq->tx_curr_desc;
+       desc = &txq->tx_desc_area[tx_index];
+
+       ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_csum, length);
+       if (ret)
+               WARN(1, "failed to prepare checksum!");
+
+       /* Should we set this? Can't use the value from skb_tx_csum()
+        * as it's not the correct initial L4 checksum to use. */
+       desc->l4i_chk = 0;
+
+       desc->byte_cnt = hdr_len;
+       desc->buf_ptr = txq->tso_hdrs_dma +
+                       txq->tx_curr_desc * TSO_HEADER_SIZE;
+       desc->cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA  | TX_FIRST_DESC |
+                                  GEN_CRC;
+
+       txq->tx_curr_desc++;
+       if (txq->tx_curr_desc == txq->tx_ring_size)
+               txq->tx_curr_desc = 0;
+}
+
+static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
+                         struct net_device *dev)
+{
+       struct mv643xx_eth_private *mp = txq_to_mp(txq);
+       int total_len, data_left, ret;
+       int desc_count = 0;
+       struct tso_t tso;
+       int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+       /* Count needed descriptors */
+       if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) {
+               netdev_dbg(dev, "not enough descriptors for TSO!\n");
+               return -EBUSY;
+       }
+
+       /* Initialize the TSO handler, and prepare the first payload */
+       tso_start(skb, &tso);
+
+       total_len = skb->len - hdr_len;
+       while (total_len > 0) {
+               char *hdr;
+
+               data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+               total_len -= data_left;
+               desc_count++;
+
+               /* prepare packet headers: MAC + IP + TCP */
+               hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE;
+               tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
+               txq_put_hdr_tso(skb, txq, data_left);
+
+               while (data_left > 0) {
+                       int size;
+                       desc_count++;
+
+                       size = min_t(int, tso.size, data_left);
+                       ret = txq_put_data_tso(dev, txq, skb, tso.data, size,
+                                              size == data_left,
+                                              total_len == 0);
+                       if (ret)
+                               goto err_release;
+                       data_left -= size;
+                       tso_build_data(skb, &tso, size);
+               }
+       }
+
+       __skb_queue_tail(&txq->tx_skb, skb);
+       skb_tx_timestamp(skb);
+
+       /* clear TX_END status */
+       mp->work_tx_end &= ~(1 << txq->index);
+
+       /* ensure all descriptors are written before poking hardware */
+       wmb();
+       txq_enable(txq);
+       txq->tx_desc_count += desc_count;
+       return 0;
+err_release:
+       /* TODO: Release all used data descriptors; header descriptors must not
+        * be DMA-unmapped.
+        */
+       return ret;
+}
+
 static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
 {
        struct mv643xx_eth_private *mp = txq_to_mp(txq);
@@ -671,8 +869,10 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
                skb_frag_t *this_frag;
                int tx_index;
                struct tx_desc *desc;
+               void *addr;
 
                this_frag = &skb_shinfo(skb)->frags[frag];
+               addr = page_address(this_frag->page.p) + this_frag->page_offset;
                tx_index = txq->tx_curr_desc++;
                if (txq->tx_curr_desc == txq->tx_ring_size)
                        txq->tx_curr_desc = 0;
@@ -692,18 +892,11 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
 
                desc->l4i_chk = 0;
                desc->byte_cnt = skb_frag_size(this_frag);
-               desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
-                                                this_frag, 0,
-                                                skb_frag_size(this_frag),
-                                                DMA_TO_DEVICE);
+               desc->buf_ptr = dma_map_single(mp->dev->dev.parent, addr,
+                                              desc->byte_cnt, DMA_TO_DEVICE);
        }
 }
 
-static inline __be16 sum16_as_be(__sum16 sum)
-{
-       return (__force __be16)sum;
-}
-
 static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
 {
        struct mv643xx_eth_private *mp = txq_to_mp(txq);
@@ -712,53 +905,17 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
        struct tx_desc *desc;
        u32 cmd_sts;
        u16 l4i_chk;
-       int length;
+       int length, ret;
 
-       cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
+       cmd_sts = 0;
        l4i_chk = 0;
 
-       if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               int hdr_len;
-               int tag_bytes;
-
-               BUG_ON(skb->protocol != htons(ETH_P_IP) &&
-                      skb->protocol != htons(ETH_P_8021Q));
-
-               hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
-               tag_bytes = hdr_len - ETH_HLEN;
-               if (skb->len - hdr_len > mp->shared->tx_csum_limit ||
-                   unlikely(tag_bytes & ~12)) {
-                       if (skb_checksum_help(skb) == 0)
-                               goto no_csum;
-                       dev_kfree_skb_any(skb);
-                       return 1;
-               }
-
-               if (tag_bytes & 4)
-                       cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
-               if (tag_bytes & 8)
-                       cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
-
-               cmd_sts |= GEN_TCP_UDP_CHECKSUM |
-                          GEN_IP_V4_CHECKSUM   |
-                          ip_hdr(skb)->ihl << TX_IHL_SHIFT;
-
-               switch (ip_hdr(skb)->protocol) {
-               case IPPROTO_UDP:
-                       cmd_sts |= UDP_FRAME;
-                       l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
-                       break;
-               case IPPROTO_TCP:
-                       l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
-                       break;
-               default:
-                       BUG();
-               }
-       } else {
-no_csum:
-               /* Errata BTS #50, IHL must be 5 if no HW checksum */
-               cmd_sts |= 5 << TX_IHL_SHIFT;
+       ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len);
+       if (ret) {
+               dev_kfree_skb_any(skb);
+               return ret;
        }
+       cmd_sts |= TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
 
        tx_index = txq->tx_curr_desc++;
        if (txq->tx_curr_desc == txq->tx_ring_size)
@@ -801,7 +958,7 @@ no_csum:
 static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct mv643xx_eth_private *mp = netdev_priv(dev);
-       int length, queue;
+       int length, queue, ret;
        struct tx_queue *txq;
        struct netdev_queue *nq;
 
@@ -825,7 +982,11 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 
        length = skb->len;
 
-       if (!txq_submit_skb(txq, skb)) {
+       if (skb_is_gso(skb))
+               ret = txq_submit_tso(txq, skb, dev);
+       else
+               ret = txq_submit_skb(txq, skb);
+       if (!ret) {
                int entries_left;
 
                txq->tx_bytes += length;
@@ -834,6 +995,8 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
                entries_left = txq->tx_ring_size - txq->tx_desc_count;
                if (entries_left < MAX_SKB_FRAGS + 1)
                        netif_tx_stop_queue(nq);
+       } else if (ret == -EBUSY) {
+               return NETDEV_TX_BUSY;
        }
 
        return NETDEV_TX_OK;
@@ -907,14 +1070,8 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
                        mp->dev->stats.tx_errors++;
                }
 
-               if (cmd_sts & TX_FIRST_DESC) {
-                       dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
-                                        desc->byte_cnt, DMA_TO_DEVICE);
-               } else {
-                       dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr,
-                                      desc->byte_cnt, DMA_TO_DEVICE);
-               }
-
+               dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
+                                desc->byte_cnt, DMA_TO_DEVICE);
                dev_kfree_skb(skb);
        }
 
@@ -1010,8 +1167,9 @@ static void txq_set_fixed_prio_mode(struct tx_queue *txq)
 
 
 /* mii management interface *************************************************/
-static void mv643xx_adjust_pscr(struct mv643xx_eth_private *mp)
+static void mv643xx_eth_adjust_link(struct net_device *dev)
 {
+       struct mv643xx_eth_private *mp = netdev_priv(dev);
        u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
        u32 autoneg_disable = FORCE_LINK_PASS |
                     DISABLE_AUTO_NEG_SPEED_GMII |
@@ -1387,7 +1545,7 @@ mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
        ret = phy_ethtool_sset(mp->phy, cmd);
        if (!ret)
-               mv643xx_adjust_pscr(mp);
+               mv643xx_eth_adjust_link(dev);
        return ret;
 }
 
@@ -1871,6 +2029,15 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
                                        nexti * sizeof(struct tx_desc);
        }
 
+       /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
+       txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
+                                          txq->tx_ring_size * TSO_HEADER_SIZE,
+                                          &txq->tso_hdrs_dma, GFP_KERNEL);
+       if (txq->tso_hdrs == NULL) {
+               dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
+                                 txq->tx_desc_area, txq->tx_desc_dma);
+               return -ENOMEM;
+       }
        skb_queue_head_init(&txq->tx_skb);
 
        return 0;
@@ -1891,6 +2058,10 @@ static void txq_deinit(struct tx_queue *txq)
        else
                dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
                                  txq->tx_desc_area, txq->tx_desc_dma);
+       if (txq->tso_hdrs)
+               dma_free_coherent(mp->dev->dev.parent,
+                                 txq->tx_ring_size * TSO_HEADER_SIZE,
+                                 txq->tso_hdrs, txq->tso_hdrs_dma);
 }
 
 
@@ -2303,7 +2474,7 @@ static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 
        ret = phy_mii_ioctl(mp->phy, ifr, cmd);
        if (!ret)
-               mv643xx_adjust_pscr(mp);
+               mv643xx_eth_adjust_link(dev);
        return ret;
 }
 
@@ -2701,13 +2872,6 @@ static void set_params(struct mv643xx_eth_private *mp,
        mp->txq_count = pd->tx_queue_count ? : 1;
 }
 
-static void mv643xx_eth_adjust_link(struct net_device *dev)
-{
-       struct mv643xx_eth_private *mp = netdev_priv(dev);
-
-       mv643xx_adjust_pscr(mp);
-}
-
 static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
                                   int phy_addr)
 {
@@ -2889,7 +3053,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
        if (err)
                goto out;
 
-       SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
+       dev->ethtool_ops = &mv643xx_eth_ethtool_ops;
 
        init_pscr(mp, pd->speed, pd->duplex);
 
@@ -2921,9 +3085,11 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
        dev->watchdog_timeo = 2 * HZ;
        dev->base_addr = 0;
 
-       dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
-       dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
-       dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM;
+       dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
+       dev->vlan_features = dev->features;
+
+       dev->features |= NETIF_F_RXCSUM;
+       dev->hw_features = dev->features;
 
        dev->priv_flags |= IFF_UNICAST_FLT;
 
index b161a525fc5bd8accb44b002b64776f05d8d0319..fc2fb25343f417964070c058baea2e8162dab595 100644 (file)
@@ -195,11 +195,10 @@ static int orion_mdio_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       bus = mdiobus_alloc_size(sizeof(struct orion_mdio_dev));
-       if (!bus) {
-               dev_err(&pdev->dev, "Cannot allocate MDIO bus\n");
+       bus = devm_mdiobus_alloc_size(&pdev->dev,
+                                     sizeof(struct orion_mdio_dev));
+       if (!bus)
                return -ENOMEM;
-       }
 
        bus->name = "orion_mdio_bus";
        bus->read = orion_mdio_read;
@@ -208,11 +207,10 @@ static int orion_mdio_probe(struct platform_device *pdev)
                 dev_name(&pdev->dev));
        bus->parent = &pdev->dev;
 
-       bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
-       if (!bus->irq) {
-               mdiobus_free(bus);
+       bus->irq = devm_kmalloc_array(&pdev->dev, PHY_MAX_ADDR, sizeof(int),
+                                     GFP_KERNEL);
+       if (!bus->irq)
                return -ENOMEM;
-       }
 
        for (i = 0; i < PHY_MAX_ADDR; i++)
                bus->irq[i] = PHY_POLL;
@@ -232,7 +230,7 @@ static int orion_mdio_probe(struct platform_device *pdev)
                clk_prepare_enable(dev->clk);
 
        dev->err_interrupt = platform_get_irq(pdev, 0);
-       if (dev->err_interrupt != -ENXIO) {
+       if (dev->err_interrupt > 0) {
                ret = devm_request_irq(&pdev->dev, dev->err_interrupt,
                                        orion_mdio_err_irq,
                                        IRQF_SHARED, pdev->name, dev);
@@ -241,6 +239,9 @@ static int orion_mdio_probe(struct platform_device *pdev)
 
                writel(MVMDIO_ERR_INT_SMI_DONE,
                        dev->regs + MVMDIO_ERR_INT_MASK);
+
+       } else if (dev->err_interrupt == -EPROBE_DEFER) {
+               return -EPROBE_DEFER;
        }
 
        mutex_init(&dev->lock);
@@ -261,8 +262,6 @@ static int orion_mdio_probe(struct platform_device *pdev)
 out_mdio:
        if (!IS_ERR(dev->clk))
                clk_disable_unprepare(dev->clk);
-       kfree(bus->irq);
-       mdiobus_free(bus);
        return ret;
 }
 
@@ -273,8 +272,6 @@ static int orion_mdio_remove(struct platform_device *pdev)
 
        writel(0, dev->regs + MVMDIO_ERR_INT_MASK);
        mdiobus_unregister(bus);
-       kfree(bus->irq);
-       mdiobus_free(bus);
        if (!IS_ERR(dev->clk))
                clk_disable_unprepare(dev->clk);
 
index 14786c8bf99efcddbbbdff7bc0f9ee9e20933864..b8919fa6ed27c601423286ed53183beacfd57a13 100644 (file)
@@ -23,6 +23,7 @@
 #include <net/ip.h>
 #include <net/ipv6.h>
 #include <linux/io.h>
+#include <net/tso.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
 #include <linux/of_mdio.h>
 
 #define MVNETA_TX_MTU_MAX              0x3ffff
 
+/* TSO header size */
+#define TSO_HEADER_SIZE 128
+
 /* Max number of Rx descriptors */
 #define MVNETA_MAX_RXD 128
 
@@ -279,9 +283,6 @@ struct mvneta_port {
        u32 cause_rx_tx;
        struct napi_struct napi;
 
-       /* Napi weight */
-       int weight;
-
        /* Core clock */
        struct clk *clk;
        u8 mcast_count[256];
@@ -413,6 +414,12 @@ struct mvneta_tx_queue {
 
        /* Index of the next TX DMA descriptor to process */
        int next_desc_to_proc;
+
+       /* DMA buffers for TSO headers */
+       char *tso_hdrs;
+
+       /* DMA address of TSO headers */
+       dma_addr_t tso_hdrs_phys;
 };
 
 struct mvneta_rx_queue {
@@ -441,7 +448,10 @@ struct mvneta_rx_queue {
        int next_desc_to_proc;
 };
 
-static int rxq_number = 8;
+/* The hardware supports eight (8) rx queues, but we are only allowing
+ * the first one to be used. Therefore, let's just allocate one queue.
+ */
+static int rxq_number = 1;
 static int txq_number = 8;
 
 static int rxq_def;
@@ -1519,14 +1529,134 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
        return rx_done;
 }
 
+static inline void
+mvneta_tso_put_hdr(struct sk_buff *skb,
+                  struct mvneta_port *pp, struct mvneta_tx_queue *txq)
+{
+       struct mvneta_tx_desc *tx_desc;
+       int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+       txq->tx_skb[txq->txq_put_index] = NULL;
+       tx_desc = mvneta_txq_next_desc_get(txq);
+       tx_desc->data_size = hdr_len;
+       tx_desc->command = mvneta_skb_tx_csum(pp, skb);
+       tx_desc->command |= MVNETA_TXD_F_DESC;
+       tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
+                                txq->txq_put_index * TSO_HEADER_SIZE;
+       mvneta_txq_inc_put(txq);
+}
+
+static inline int
+mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
+                   struct sk_buff *skb, char *data, int size,
+                   bool last_tcp, bool is_last)
+{
+       struct mvneta_tx_desc *tx_desc;
+
+       tx_desc = mvneta_txq_next_desc_get(txq);
+       tx_desc->data_size = size;
+       tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
+                                               size, DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev->dev.parent,
+                    tx_desc->buf_phys_addr))) {
+               mvneta_txq_desc_put(txq);
+               return -ENOMEM;
+       }
+
+       tx_desc->command = 0;
+       txq->tx_skb[txq->txq_put_index] = NULL;
+
+       if (last_tcp) {
+               /* last descriptor in the TCP packet */
+               tx_desc->command = MVNETA_TXD_L_DESC;
+
+               /* last descriptor in SKB */
+               if (is_last)
+                       txq->tx_skb[txq->txq_put_index] = skb;
+       }
+       mvneta_txq_inc_put(txq);
+       return 0;
+}
+
+static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
+                        struct mvneta_tx_queue *txq)
+{
+       int total_len, data_left;
+       int desc_count = 0;
+       struct mvneta_port *pp = netdev_priv(dev);
+       struct tso_t tso;
+       int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+       int i;
+
+       /* Count needed descriptors */
+       if ((txq->count + tso_count_descs(skb)) >= txq->size)
+               return 0;
+
+       if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
+               pr_info("*** Is this even  possible???!?!?\n");
+               return 0;
+       }
+
+       /* Initialize the TSO handler, and prepare the first payload */
+       tso_start(skb, &tso);
+
+       total_len = skb->len - hdr_len;
+       while (total_len > 0) {
+               char *hdr;
+
+               data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+               total_len -= data_left;
+               desc_count++;
+
+               /* prepare packet headers: MAC + IP + TCP */
+               hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
+               tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
+
+               mvneta_tso_put_hdr(skb, pp, txq);
+
+               while (data_left > 0) {
+                       int size;
+                       desc_count++;
+
+                       size = min_t(int, tso.size, data_left);
+
+                       if (mvneta_tso_put_data(dev, txq, skb,
+                                                tso.data, size,
+                                                size == data_left,
+                                                total_len == 0))
+                               goto err_release;
+                       data_left -= size;
+
+                       tso_build_data(skb, &tso, size);
+               }
+       }
+
+       return desc_count;
+
+err_release:
+       /* Release all used data descriptors; header descriptors must not
+        * be DMA-unmapped.
+        */
+       for (i = desc_count - 1; i >= 0; i--) {
+               struct mvneta_tx_desc *tx_desc = txq->descs + i;
+               if (!(tx_desc->command & MVNETA_TXD_F_DESC))
+                       dma_unmap_single(pp->dev->dev.parent,
+                                        tx_desc->buf_phys_addr,
+                                        tx_desc->data_size,
+                                        DMA_TO_DEVICE);
+               mvneta_txq_desc_put(txq);
+       }
+       return 0;
+}
+
 /* Handle tx fragmentation processing */
 static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
                                  struct mvneta_tx_queue *txq)
 {
        struct mvneta_tx_desc *tx_desc;
-       int i;
+       int i, nr_frags = skb_shinfo(skb)->nr_frags;
 
-       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+       for (i = 0; i < nr_frags; i++) {
                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
                void *addr = page_address(frag->page.p) + frag->page_offset;
 
@@ -1543,20 +1673,16 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
                        goto error;
                }
 
-               if (i == (skb_shinfo(skb)->nr_frags - 1)) {
+               if (i == nr_frags - 1) {
                        /* Last descriptor */
                        tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
-
                        txq->tx_skb[txq->txq_put_index] = skb;
-
-                       mvneta_txq_inc_put(txq);
                } else {
                        /* Descriptor in the middle: Not First, Not Last */
                        tx_desc->command = 0;
-
                        txq->tx_skb[txq->txq_put_index] = NULL;
-                       mvneta_txq_inc_put(txq);
                }
+               mvneta_txq_inc_put(txq);
        }
 
        return 0;
@@ -1584,15 +1710,18 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
        u16 txq_id = skb_get_queue_mapping(skb);
        struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
        struct mvneta_tx_desc *tx_desc;
-       struct netdev_queue *nq;
        int frags = 0;
        u32 tx_cmd;
 
        if (!netif_running(dev))
                goto out;
 
+       if (skb_is_gso(skb)) {
+               frags = mvneta_tx_tso(skb, dev, txq);
+               goto out;
+       }
+
        frags = skb_shinfo(skb)->nr_frags + 1;
-       nq    = netdev_get_tx_queue(dev, txq_id);
 
        /* Get a descriptor for the first part of the packet */
        tx_desc = mvneta_txq_next_desc_get(txq);
@@ -1635,15 +1764,16 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
                }
        }
 
-       txq->count += frags;
-       mvneta_txq_pend_desc_add(pp, txq, frags);
-
-       if (txq->size - txq->count < MAX_SKB_FRAGS + 1)
-               netif_tx_stop_queue(nq);
-
 out:
        if (frags > 0) {
                struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+               struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
+
+               txq->count += frags;
+               mvneta_txq_pend_desc_add(pp, txq, frags);
+
+               if (txq->size - txq->count < MAX_SKB_FRAGS + 1)
+                       netif_tx_stop_queue(nq);
 
                u64_stats_update_begin(&stats->syncp);
                stats->tx_packets++;
@@ -2003,7 +2133,7 @@ static void mvneta_tx_reset(struct mvneta_port *pp)
 {
        int queue;
 
-       /* free the skb's in the hal tx ring */
+       /* free the skb's in the tx ring */
        for (queue = 0; queue < txq_number; queue++)
                mvneta_txq_done_force(pp, &pp->txqs[queue]);
 
@@ -2109,6 +2239,18 @@ static int mvneta_txq_init(struct mvneta_port *pp,
                                  txq->descs, txq->descs_phys);
                return -ENOMEM;
        }
+
+       /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
+       txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
+                                          txq->size * TSO_HEADER_SIZE,
+                                          &txq->tso_hdrs_phys, GFP_KERNEL);
+       if (txq->tso_hdrs == NULL) {
+               kfree(txq->tx_skb);
+               dma_free_coherent(pp->dev->dev.parent,
+                                 txq->size * MVNETA_DESC_ALIGNED_SIZE,
+                                 txq->descs, txq->descs_phys);
+               return -ENOMEM;
+       }
        mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
 
        return 0;
@@ -2120,6 +2262,10 @@ static void mvneta_txq_deinit(struct mvneta_port *pp,
 {
        kfree(txq->tx_skb);
 
+       if (txq->tso_hdrs)
+               dma_free_coherent(pp->dev->dev.parent,
+                                 txq->size * TSO_HEADER_SIZE,
+                                 txq->tso_hdrs, txq->tso_hdrs_phys);
        if (txq->descs)
                dma_free_coherent(pp->dev->dev.parent,
                                  txq->size * MVNETA_DESC_ALIGNED_SIZE,
@@ -2279,24 +2425,28 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
                return 0;
 
        /* The interface is running, so we have to force a
-        * reallocation of the RXQs
+        * reallocation of the queues
         */
        mvneta_stop_dev(pp);
 
        mvneta_cleanup_txqs(pp);
        mvneta_cleanup_rxqs(pp);
 
-       pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
+       pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
        pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
                        SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 
        ret = mvneta_setup_rxqs(pp);
        if (ret) {
-               netdev_err(pp->dev, "unable to setup rxqs after MTU change\n");
+               netdev_err(dev, "unable to setup rxqs after MTU change\n");
                return ret;
        }
 
-       mvneta_setup_txqs(pp);
+       ret = mvneta_setup_txqs(pp);
+       if (ret) {
+               netdev_err(dev, "unable to setup txqs after MTU change\n");
+               return ret;
+       }
 
        mvneta_start_dev(pp);
        mvneta_port_up(pp);
@@ -2323,22 +2473,19 @@ static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
 static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
 {
        struct mvneta_port *pp = netdev_priv(dev);
-       u8 *mac = addr + 2;
-       int i;
-
-       if (netif_running(dev))
-               return -EBUSY;
+       struct sockaddr *sockaddr = addr;
+       int ret;
 
+       ret = eth_prepare_mac_addr_change(dev, addr);
+       if (ret < 0)
+               return ret;
        /* Remove previous address table entry */
        mvneta_mac_addr_set(pp, dev->dev_addr, -1);
 
        /* Set new addr in hw */
-       mvneta_mac_addr_set(pp, mac, rxq_def);
-
-       /* Set addr in the device */
-       for (i = 0; i < ETH_ALEN; i++)
-               dev->dev_addr[i] = mac[i];
+       mvneta_mac_addr_set(pp, sockaddr->sa_data, rxq_def);
 
+       eth_commit_mac_addr_change(dev, addr);
        return 0;
 }
 
@@ -2433,8 +2580,6 @@ static int mvneta_open(struct net_device *dev)
        struct mvneta_port *pp = netdev_priv(dev);
        int ret;
 
-       mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
-
        pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
        pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
                        SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -2638,7 +2783,7 @@ const struct ethtool_ops mvneta_eth_tool_ops = {
 };
 
 /* Initialize hw */
-static int mvneta_init(struct mvneta_port *pp, int phy_addr)
+static int mvneta_init(struct device *dev, struct mvneta_port *pp)
 {
        int queue;
 
@@ -2648,8 +2793,8 @@ static int mvneta_init(struct mvneta_port *pp, int phy_addr)
        /* Set port default values */
        mvneta_defaults_set(pp);
 
-       pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue),
-                          GFP_KERNEL);
+       pp->txqs = devm_kcalloc(dev, txq_number, sizeof(struct mvneta_tx_queue),
+                               GFP_KERNEL);
        if (!pp->txqs)
                return -ENOMEM;
 
@@ -2661,12 +2806,10 @@ static int mvneta_init(struct mvneta_port *pp, int phy_addr)
                txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
        }
 
-       pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue),
-                          GFP_KERNEL);
-       if (!pp->rxqs) {
-               kfree(pp->txqs);
+       pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(struct mvneta_rx_queue),
+                               GFP_KERNEL);
+       if (!pp->rxqs)
                return -ENOMEM;
-       }
 
        /* Create Rx descriptor rings */
        for (queue = 0; queue < rxq_number; queue++) {
@@ -2680,12 +2823,6 @@ static int mvneta_init(struct mvneta_port *pp, int phy_addr)
        return 0;
 }
 
-static void mvneta_deinit(struct mvneta_port *pp)
-{
-       kfree(pp->txqs);
-       kfree(pp->rxqs);
-}
-
 /* platform glue : initialize decoding windows */
 static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
                                     const struct mbus_dram_target_info *dram)
@@ -2768,7 +2905,6 @@ static int mvneta_probe(struct platform_device *pdev)
        struct resource *res;
        struct device_node *dn = pdev->dev.of_node;
        struct device_node *phy_node;
-       u32 phy_addr;
        struct mvneta_port *pp;
        struct net_device *dev;
        const char *dt_mac_addr;
@@ -2797,9 +2933,22 @@ static int mvneta_probe(struct platform_device *pdev)
 
        phy_node = of_parse_phandle(dn, "phy", 0);
        if (!phy_node) {
-               dev_err(&pdev->dev, "no associated PHY\n");
-               err = -ENODEV;
-               goto err_free_irq;
+               if (!of_phy_is_fixed_link(dn)) {
+                       dev_err(&pdev->dev, "no PHY specified\n");
+                       err = -ENODEV;
+                       goto err_free_irq;
+               }
+
+               err = of_phy_register_fixed_link(dn);
+               if (err < 0) {
+                       dev_err(&pdev->dev, "cannot register fixed PHY\n");
+                       goto err_free_irq;
+               }
+
+               /* In the case of a fixed PHY, the DT node associated
+                * to the PHY is the Ethernet MAC DT node.
+                */
+               phy_node = dn;
        }
 
        phy_mode = of_get_phy_mode(dn);
@@ -2813,11 +2962,9 @@ static int mvneta_probe(struct platform_device *pdev)
        dev->watchdog_timeo = 5 * HZ;
        dev->netdev_ops = &mvneta_netdev_ops;
 
-       SET_ETHTOOL_OPS(dev, &mvneta_eth_tool_ops);
+       dev->ethtool_ops = &mvneta_eth_tool_ops;
 
        pp = netdev_priv(dev);
-
-       pp->weight = MVNETA_RX_POLL_WEIGHT;
        pp->phy_node = phy_node;
        pp->phy_interface = phy_mode;
 
@@ -2864,33 +3011,31 @@ static int mvneta_probe(struct platform_device *pdev)
        pp->dev = dev;
        SET_NETDEV_DEV(dev, &pdev->dev);
 
-       err = mvneta_init(pp, phy_addr);
-       if (err < 0) {
-               dev_err(&pdev->dev, "can't init eth hal\n");
+       err = mvneta_init(&pdev->dev, pp);
+       if (err < 0)
                goto err_free_stats;
-       }
 
        err = mvneta_port_power_up(pp, phy_mode);
        if (err < 0) {
                dev_err(&pdev->dev, "can't power up port\n");
-               goto err_deinit;
+               goto err_free_stats;
        }
 
        dram_target_info = mv_mbus_dram_info();
        if (dram_target_info)
                mvneta_conf_mbus_windows(pp, dram_target_info);
 
-       netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight);
+       netif_napi_add(dev, &pp->napi, mvneta_poll, MVNETA_RX_POLL_WEIGHT);
 
-       dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
-       dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
-       dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+       dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
+       dev->hw_features |= dev->features;
+       dev->vlan_features |= dev->features;
        dev->priv_flags |= IFF_UNICAST_FLT;
 
        err = register_netdev(dev);
        if (err < 0) {
                dev_err(&pdev->dev, "failed to register\n");
-               goto err_deinit;
+               goto err_free_stats;
        }
 
        netdev_info(dev, "Using %s mac address %pM\n", mac_from,
@@ -2900,8 +3045,6 @@ static int mvneta_probe(struct platform_device *pdev)
 
        return 0;
 
-err_deinit:
-       mvneta_deinit(pp);
 err_free_stats:
        free_percpu(pp->stats);
 err_clk:
@@ -2920,7 +3063,6 @@ static int mvneta_remove(struct platform_device *pdev)
        struct mvneta_port *pp = netdev_priv(dev);
 
        unregister_netdev(dev);
-       mvneta_deinit(pp);
        clk_disable_unprepare(pp->clk);
        free_percpu(pp->stats);
        irq_dispose_mapping(dev->irq);
index b358c2f6f4bdc3817f98ab53c36bf8f8c7753ffd..8f5aa7c62b18f41f8eaa1b31e209687059921a47 100644 (file)
@@ -1488,7 +1488,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
        dev->netdev_ops = &pxa168_eth_netdev_ops;
        dev->watchdog_timeo = 2 * HZ;
        dev->base_addr = 0;
-       SET_ETHTOOL_OPS(dev, &pxa168_ethtool_ops);
+       dev->ethtool_ops = &pxa168_ethtool_ops;
 
        INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
 
index b81106451a0a4d2d46d831d9e629d3a24212aada..69693384b58ccfefd2bd9918112b8b08431f0368 100644 (file)
@@ -4760,7 +4760,7 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
 
        SET_NETDEV_DEV(dev, &hw->pdev->dev);
        dev->irq = hw->pdev->irq;
-       SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops);
+       dev->ethtool_ops = &sky2_ethtool_ops;
        dev->watchdog_timeo = TX_WATCHDOG;
        dev->netdev_ops = &sky2_netdev_ops[port];
 
index 78099eab767374319c7e258bfa1f0d6df4c64fa3..a89e46430c7415a9054457f2b7c5f2ba403eda8b 100644 (file)
@@ -212,8 +212,7 @@ static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
 
        /* First, verify that the master reports correct status */
        if (comm_pending(dev)) {
-               mlx4_warn(dev, "Communication channel is not idle."
-                         "my toggle is %d (cmd:0x%x)\n",
+               mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
                          priv->cmd.comm_toggle, cmd);
                return -EAGAIN;
        }
@@ -422,9 +421,8 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
                                        *out_param =
                                                be64_to_cpu(vhcr->out_param);
                                else {
-                                       mlx4_err(dev, "response expected while"
-                                                "output mailbox is NULL for "
-                                                "command 0x%x\n", op);
+                                       mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
+                                                op);
                                        vhcr->status = CMD_STAT_BAD_PARAM;
                                }
                        }
@@ -439,16 +437,15 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
                                        *out_param =
                                                be64_to_cpu(vhcr->out_param);
                                else {
-                                       mlx4_err(dev, "response expected while"
-                                                "output mailbox is NULL for "
-                                                "command 0x%x\n", op);
+                                       mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
+                                                op);
                                        vhcr->status = CMD_STAT_BAD_PARAM;
                                }
                        }
                        ret = mlx4_status_to_errno(vhcr->status);
                } else
-                       mlx4_err(dev, "failed execution of VHCR_POST command"
-                                "opcode 0x%x\n", op);
+                       mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n",
+                                op);
        }
 
        mutex_unlock(&priv->cmd.slave_cmd_mutex);
@@ -476,6 +473,13 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
                goto out;
        }
 
+       if (out_is_imm && !out_param) {
+               mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
+                        op);
+               err = -EINVAL;
+               goto out;
+       }
+
        err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
                            in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
        if (err)
@@ -554,6 +558,13 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
        cmd->free_head = context->next;
        spin_unlock(&cmd->context_lock);
 
+       if (out_is_imm && !out_param) {
+               mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
+                        op);
+               err = -EINVAL;
+               goto out;
+       }
+
        init_completion(&context->done);
 
        mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
@@ -625,9 +636,8 @@ static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
 
        if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
            (slave & ~0x7f) | (size & 0xff)) {
-               mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx "
-                             "master_addr:0x%llx slave_id:%d size:%d\n",
-                             slave_addr, master_addr, slave, size);
+               mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n",
+                        slave_addr, master_addr, slave, size);
                return -EINVAL;
        }
 
@@ -788,8 +798,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
            ((smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) ||
             (smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
              smp->method == IB_MGMT_METHOD_SET))) {
-               mlx4_err(dev, "slave %d is trying to execute a Subnet MGMT MAD, "
-                        "class 0x%x, method 0x%x for attr 0x%x. Rejecting\n",
+               mlx4_err(dev, "slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x for attr 0x%x - Rejecting\n",
                         slave, smp->method, smp->mgmt_class,
                         be16_to_cpu(smp->attr_id));
                return -EPERM;
@@ -1253,12 +1262,12 @@ static struct mlx4_cmd_info cmd_info[] = {
        },
        {
                .opcode = MLX4_CMD_UPDATE_QP,
-               .has_inbox = false,
+               .has_inbox = true,
                .has_outbox = false,
                .out_is_imm = false,
                .encode_slave_id = false,
                .verify = NULL,
-               .wrapper = mlx4_CMD_EPERM_wrapper
+               .wrapper = mlx4_UPDATE_QP_wrapper
        },
        {
                .opcode = MLX4_CMD_GET_OP_REQ,
@@ -1409,8 +1418,8 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
                                      ALIGN(sizeof(struct mlx4_vhcr_cmd),
                                            MLX4_ACCESS_MEM_ALIGN), 1);
                if (ret) {
-                       mlx4_err(dev, "%s:Failed reading vhcr"
-                                "ret: 0x%x\n", __func__, ret);
+                       mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
+                                __func__, ret);
                        kfree(vhcr);
                        return ret;
                }
@@ -1461,9 +1470,8 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
 
        /* Apply permission and bound checks if applicable */
        if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
-               mlx4_warn(dev, "Command:0x%x from slave: %d failed protection "
-                         "checks for resource_id:%d\n", vhcr->op, slave,
-                         vhcr->in_modifier);
+               mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
+                         vhcr->op, slave, vhcr->in_modifier);
                vhcr_cmd->status = CMD_STAT_BAD_OP;
                goto out_status;
        }
@@ -1502,8 +1510,7 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
        }
 
        if (err) {
-               mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with"
-                         " error:%d, status %d\n",
+               mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
                          vhcr->op, slave, vhcr->errno, err);
                vhcr_cmd->status = mlx4_errno_to_status(err);
                goto out_status;
@@ -1537,8 +1544,8 @@ out_status:
                                 __func__);
                else if (vhcr->e_bit &&
                         mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
-                               mlx4_warn(dev, "Failed to generate command completion "
-                                         "eqe for slave %d\n", slave);
+                               mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n",
+                                         slave);
        }
 
 out:
@@ -1577,8 +1584,9 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
 
        mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n",
                 slave, port);
-       mlx4_dbg(dev, "vlan %d QoS %d link down %d\n", vp_admin->default_vlan,
-                vp_admin->default_qos, vp_admin->link_state);
+       mlx4_dbg(dev, "vlan %d QoS %d link down %d\n",
+                vp_admin->default_vlan, vp_admin->default_qos,
+                vp_admin->link_state);
 
        work = kzalloc(sizeof(*work), GFP_KERNEL);
        if (!work)
@@ -1591,7 +1599,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
                                                   &admin_vlan_ix);
                        if (err) {
                                kfree(work);
-                               mlx4_warn((&priv->dev),
+                               mlx4_warn(&priv->dev,
                                          "No vlan resources slave %d, port %d\n",
                                          slave, port);
                                return err;
@@ -1600,7 +1608,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
                        admin_vlan_ix = NO_INDX;
                }
                work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
-               mlx4_dbg((&(priv->dev)),
+               mlx4_dbg(&priv->dev,
                         "alloc vlan %d idx  %d slave %d port %d\n",
                         (int)(vp_admin->default_vlan),
                         admin_vlan_ix, slave, port);
@@ -1661,12 +1669,12 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
                                                   vp_admin->default_vlan, &(vp_oper->vlan_idx));
                        if (err) {
                                vp_oper->vlan_idx = NO_INDX;
-                               mlx4_warn((&priv->dev),
+                               mlx4_warn(&priv->dev,
                                          "No vlan resorces slave %d, port %d\n",
                                          slave, port);
                                return err;
                        }
-                       mlx4_dbg((&(priv->dev)), "alloc vlan %d idx  %d slave %d port %d\n",
+                       mlx4_dbg(&priv->dev, "alloc vlan %d idx  %d slave %d port %d\n",
                                 (int)(vp_oper->state.default_vlan),
                                 vp_oper->vlan_idx, slave, port);
                }
@@ -1677,12 +1685,12 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
                        if (0 > vp_oper->mac_idx) {
                                err = vp_oper->mac_idx;
                                vp_oper->mac_idx = NO_INDX;
-                               mlx4_warn((&priv->dev),
+                               mlx4_warn(&priv->dev,
                                          "No mac resorces slave %d, port %d\n",
                                          slave, port);
                                return err;
                        }
-                       mlx4_dbg((&(priv->dev)), "alloc mac %llx idx  %d slave %d port %d\n",
+                       mlx4_dbg(&priv->dev, "alloc mac %llx idx  %d slave %d port %d\n",
                                 vp_oper->state.mac, vp_oper->mac_idx, slave, port);
                }
        }
@@ -1731,8 +1739,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
        slave_state[slave].comm_toggle ^= 1;
        reply = (u32) slave_state[slave].comm_toggle << 31;
        if (toggle != slave_state[slave].comm_toggle) {
-               mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER"
-                         "STATE COMPROMISIED ***\n", toggle, slave);
+               mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
+                         toggle, slave);
                goto reset_slave;
        }
        if (cmd == MLX4_COMM_CMD_RESET) {
@@ -1759,8 +1767,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
        /*command from slave in the middle of FLR*/
        if (cmd != MLX4_COMM_CMD_RESET &&
            MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
-               mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) "
-                         "in the middle of FLR\n", slave, cmd);
+               mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
+                         slave, cmd);
                return;
        }
 
@@ -1798,8 +1806,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
 
                mutex_lock(&priv->cmd.slave_cmd_mutex);
                if (mlx4_master_process_vhcr(dev, slave, NULL)) {
-                       mlx4_err(dev, "Failed processing vhcr for slave:%d,"
-                                " resetting slave.\n", slave);
+                       mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n",
+                                slave);
                        mutex_unlock(&priv->cmd.slave_cmd_mutex);
                        goto reset_slave;
                }
@@ -1816,8 +1824,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
                is_going_down = 1;
        spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
        if (is_going_down) {
-               mlx4_warn(dev, "Slave is going down aborting command(%d)"
-                         " executing from slave:%d\n",
+               mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n",
                          cmd, slave);
                return;
        }
@@ -1880,10 +1887,9 @@ void mlx4_master_comm_channel(struct work_struct *work)
                        if (toggle != slt) {
                                if (master->slave_state[slave].comm_toggle
                                    != slt) {
-                                       printk(KERN_INFO "slave %d out of sync."
-                                              " read toggle %d, state toggle %d. "
-                                              "Resynching.\n", slave, slt,
-                                              master->slave_state[slave].comm_toggle);
+                                       pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
+                                               slave, slt,
+                                               master->slave_state[slave].comm_toggle);
                                        master->slave_state[slave].comm_toggle =
                                                slt;
                                }
@@ -1896,8 +1902,7 @@ void mlx4_master_comm_channel(struct work_struct *work)
        }
 
        if (reported && reported != served)
-               mlx4_warn(dev, "Got command event with bitmask from %d slaves"
-                         " but %d were served\n",
+               mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n",
                          reported, served);
 
        if (mlx4_ARM_COMM_CHANNEL(dev))
@@ -1953,7 +1958,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
                ioremap(pci_resource_start(dev->pdev, 2) +
                        MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
        if (!priv->mfunc.comm) {
-               mlx4_err(dev, "Couldn't map communication vector.\n");
+               mlx4_err(dev, "Couldn't map communication vector\n");
                goto err_vhcr;
        }
 
@@ -2080,7 +2085,7 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
                priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
                                        MLX4_HCR_BASE, MLX4_HCR_SIZE);
                if (!priv->cmd.hcr) {
-                       mlx4_err(dev, "Couldn't map command register.\n");
+                       mlx4_err(dev, "Couldn't map command register\n");
                        return -ENOMEM;
                }
        }
@@ -2481,11 +2486,12 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in
        ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff);
        ivf->mac[5] = ((s_info->mac)  & 0xff);
 
-       ivf->vlan       = s_info->default_vlan;
-       ivf->qos        = s_info->default_qos;
-       ivf->tx_rate    = s_info->tx_rate;
-       ivf->spoofchk   = s_info->spoofchk;
-       ivf->linkstate  = s_info->link_state;
+       ivf->vlan               = s_info->default_vlan;
+       ivf->qos                = s_info->default_qos;
+       ivf->max_tx_rate        = s_info->tx_rate;
+       ivf->min_tx_rate        = 0;
+       ivf->spoofchk           = s_info->spoofchk;
+       ivf->linkstate          = s_info->link_state;
 
        return 0;
 }
index 0487121e4a0fe495d4252f01b24d16bdb2fefb06..8542030b89cf5b5d0d0c60eaa9ecc96076d72341 100644 (file)
@@ -293,6 +293,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
        atomic_set(&cq->refcount, 1);
        init_completion(&cq->free);
 
+       cq->irq = priv->eq_table.eq[cq->vector].irq;
+       cq->irq_affinity_change = false;
+
        return 0;
 
 err_radix:
index c2cd8d31bcad5612395783e4d29e5141ac37ab6c..636963db598ae0025f52aa806f323c61261b7d97 100644 (file)
@@ -125,8 +125,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
                                                   &cq->vector)) {
                                        cq->vector = (cq->ring + 1 + priv->port)
                                            % mdev->dev->caps.num_comp_vectors;
-                                       mlx4_warn(mdev, "Failed Assigning an EQ to "
-                                                 "%s ,Falling back to legacy EQ's\n",
+                                       mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
                                                  name);
                                }
                        }
index 3e8d33605fe7b17d7cde81092c92f132163abdc2..7ba3df3cb312dfdfe2cfb08074c662743c49f67a 100644 (file)
@@ -925,13 +925,13 @@ static int mlx4_en_flow_replace(struct net_device *dev,
                qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
        } else {
                if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
-                       en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist.\n",
+                       en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
                                cmd->fs.ring_cookie);
                        return -EINVAL;
                }
                qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn;
                if (!qpn) {
-                       en_warn(priv, "rxnfc: RX ring (%llu) is inactive.\n",
+                       en_warn(priv, "rxnfc: RX ring (%llu) is inactive\n",
                                cmd->fs.ring_cookie);
                        return -EINVAL;
                }
@@ -956,7 +956,7 @@ static int mlx4_en_flow_replace(struct net_device *dev,
        }
        err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
        if (err) {
-               en_err(priv, "Fail to attach network rule at location %d.\n",
+               en_err(priv, "Fail to attach network rule at location %d\n",
                       cmd->fs.location);
                goto out_free_list;
        }
@@ -1121,7 +1121,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
-       int port_up;
+       int port_up = 0;
        int err = 0;
 
        if (channel->other_count || channel->combined_count ||
@@ -1151,7 +1151,8 @@ static int mlx4_en_set_channels(struct net_device *dev,
        netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
        netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
 
-       mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
+       if (dev->num_tc)
+               mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
 
        en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num);
        en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num);
index 0c59d4fe7e3aae56afee09b7e279601192c9e26e..f953c1d7eae6a700a4fb7aacfacbaaa822c141d1 100644 (file)
@@ -133,7 +133,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
                        MLX4_EN_MAX_TX_RING_P_UP);
        if (params->udp_rss && !(mdev->dev->caps.flags
                                        & MLX4_DEV_CAP_FLAG_UDP_RSS)) {
-               mlx4_warn(mdev, "UDP RSS is not supported on this device.\n");
+               mlx4_warn(mdev, "UDP RSS is not supported on this device\n");
                params->udp_rss = 0;
        }
        for (i = 1; i <= MLX4_MAX_PORTS; i++) {
@@ -251,8 +251,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
 
        mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
        if (!mdev->LSO_support)
-               mlx4_warn(mdev, "LSO not supported, please upgrade to later "
-                               "FW version to enable LSO\n");
+               mlx4_warn(mdev, "LSO not supported, please upgrade to later FW version to enable LSO\n");
 
        if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
                         MLX4_PERM_LOCAL_WRITE |  MLX4_PERM_LOCAL_READ,
@@ -268,7 +267,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
        /* Build device profile according to supplied module parameters */
        err = mlx4_en_get_profile(mdev);
        if (err) {
-               mlx4_err(mdev, "Bad module parameters, aborting.\n");
+               mlx4_err(mdev, "Bad module parameters, aborting\n");
                goto err_mr;
        }
 
index 7e4b1720c3d1bec183957beeba7d395ce38c6e34..58209bd0c94c6ced62a5984dd72911669eaa7ef9 100644 (file)
@@ -130,7 +130,7 @@ static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
        case IPPROTO_TCP:
                return MLX4_NET_TRANS_RULE_ID_TCP;
        default:
-               return -EPROTONOSUPPORT;
+               return MLX4_NET_TRANS_RULE_NUM;
        }
 };
 
@@ -177,7 +177,7 @@ static void mlx4_en_filter_work(struct work_struct *work)
        int rc;
        __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
 
-       if (spec_tcp_udp.id < 0) {
+       if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
                en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
                        filter->ip_proto);
                goto ignore;
@@ -770,11 +770,12 @@ static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv)
                                          priv->dev->dev_addr, priv->prev_mac);
                if (err)
                        en_err(priv, "Failed changing HW MAC address\n");
-               memcpy(priv->prev_mac, priv->dev->dev_addr,
-                      sizeof(priv->prev_mac));
        } else
                en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
 
+       memcpy(priv->prev_mac, priv->dev->dev_addr,
+              sizeof(priv->prev_mac));
+
        return err;
 }
 
@@ -788,9 +789,8 @@ static int mlx4_en_set_mac(struct net_device *dev, void *addr)
        if (!is_valid_ether_addr(saddr->sa_data))
                return -EADDRNOTAVAIL;
 
-       memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
-
        mutex_lock(&mdev->state_lock);
+       memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
        err = mlx4_en_do_set_mac(priv);
        mutex_unlock(&mdev->state_lock);
 
@@ -1576,7 +1576,7 @@ int mlx4_en_start_port(struct net_device *dev)
                        cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
                err = mlx4_en_set_cq_moder(priv, cq);
                if (err) {
-                       en_err(priv, "Failed setting cq moderation parameters");
+                       en_err(priv, "Failed setting cq moderation parameters\n");
                        mlx4_en_deactivate_cq(priv, cq);
                        goto cq_err;
                }
@@ -1615,7 +1615,7 @@ int mlx4_en_start_port(struct net_device *dev)
                }
                err = mlx4_en_set_cq_moder(priv, cq);
                if (err) {
-                       en_err(priv, "Failed setting cq moderation parameters");
+                       en_err(priv, "Failed setting cq moderation parameters\n");
                        mlx4_en_deactivate_cq(priv, cq);
                        goto tx_err;
                }
@@ -2539,7 +2539,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
        netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
 
-       SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
+       dev->ethtool_ops = &mlx4_en_ethtool_ops;
 
        /*
         * Set driver features
@@ -2594,8 +2594,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
                                    prof->tx_pause, prof->tx_ppp,
                                    prof->rx_pause, prof->rx_ppp);
        if (err) {
-               en_err(priv, "Failed setting port general configurations "
-                      "for port %d, with error %d\n", priv->port, err);
+               en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
+                      priv->port, err);
                goto out;
        }
 
index ba049ae88749dac986a0712d281bbd649152acdd..e8c0d2b832b79f4f46b82cf52bca646e9b13008b 100644 (file)
@@ -270,13 +270,11 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
                                                    ring->actual_size,
                                                    GFP_KERNEL)) {
                                if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
-                                       en_err(priv, "Failed to allocate "
-                                                    "enough rx buffers\n");
+                                       en_err(priv, "Failed to allocate enough rx buffers\n");
                                        return -ENOMEM;
                                } else {
                                        new_size = rounddown_pow_of_two(ring->actual_size);
-                                       en_warn(priv, "Only %d buffers allocated "
-                                                     "reducing ring size to %d",
+                                       en_warn(priv, "Only %d buffers allocated reducing ring size to %d\n",
                                                ring->actual_size, new_size);
                                        goto reduce_rings;
                                }
@@ -685,10 +683,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                /* Drop packet on bad receive or bad checksum */
                if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
                                                MLX4_CQE_OPCODE_ERROR)) {
-                       en_err(priv, "CQE completed in error - vendor "
-                                 "syndrom:%d syndrom:%d\n",
-                                 ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
-                                 ((struct mlx4_err_cqe *) cqe)->syndrome);
+                       en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n",
+                              ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome,
+                              ((struct mlx4_err_cqe *)cqe)->syndrome);
                        goto next;
                }
                if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
@@ -898,10 +895,17 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
        mlx4_en_cq_unlock_napi(cq);
 
        /* If we used up all the quota - we're probably not done yet... */
-       if (done == budget)
+       if (done == budget) {
                INC_PERF_COUNTER(priv->pstats.napi_quota);
-       else {
+               if (unlikely(cq->mcq.irq_affinity_change)) {
+                       cq->mcq.irq_affinity_change = false;
+                       napi_complete(napi);
+                       mlx4_en_arm_cq(priv, cq);
+                       return 0;
+               }
+       } else {
                /* Done for now */
+               cq->mcq.irq_affinity_change = false;
                napi_complete(napi);
                mlx4_en_arm_cq(priv, cq);
        }
@@ -944,8 +948,8 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
        priv->rx_skb_size = eff_mtu;
        priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc));
 
-       en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
-                 "num_frags:%d):\n", eff_mtu, priv->num_frags);
+       en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n",
+              eff_mtu, priv->num_frags);
        for (i = 0; i < priv->num_frags; i++) {
                en_err(priv,
                       "  frag:%d - size:%d prefix:%d align:%d stride:%d\n",
index dd1f6d346459808dfe95690ce5fcf0af31e99231..cb964056d71023a40ff5f68b7ceb3a367607494a 100644 (file)
@@ -108,9 +108,9 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
 
        ring->buf = ring->wqres.buf.direct.buf;
 
-       en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
-              "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
-              ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
+       en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d buf_size:%d dma:%llx\n",
+              ring, ring->buf, ring->size, ring->buf_size,
+              (unsigned long long) ring->wqres.buf.direct.map);
 
        ring->qpn = qpn;
        err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
@@ -122,7 +122,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
 
        err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
        if (err) {
-               en_dbg(DRV, priv, "working without blueflame (%d)", err);
+               en_dbg(DRV, priv, "working without blueflame (%d)\n", err);
                ring->bf.uar = &mdev->priv_uar;
                ring->bf.uar->map = mdev->uar_map;
                ring->bf_enabled = false;
@@ -474,9 +474,15 @@ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
        /* If we used up all the quota - we're probably not done yet... */
        if (done < budget) {
                /* Done for now */
+               cq->mcq.irq_affinity_change = false;
                napi_complete(napi);
                mlx4_en_arm_cq(priv, cq);
                return done;
+       } else if (unlikely(cq->mcq.irq_affinity_change)) {
+               cq->mcq.irq_affinity_change = false;
+               napi_complete(napi);
+               mlx4_en_arm_cq(priv, cq);
+               return 0;
        }
        return budget;
 }
index d501a2b0fb79f18e560fd0cd067aa4c19b83b447..d954ec1eac173752e23e57653ccd4d2cae2de944 100644 (file)
@@ -53,6 +53,11 @@ enum {
        MLX4_EQ_ENTRY_SIZE      = 0x20
 };
 
+struct mlx4_irq_notify {
+       void *arg;
+       struct irq_affinity_notify notify;
+};
+
 #define MLX4_EQ_STATUS_OK         ( 0 << 28)
 #define MLX4_EQ_STATUS_WRITE_FAIL  (10 << 28)
 #define MLX4_EQ_OWNER_SW          ( 0 << 24)
@@ -152,14 +157,13 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
                                if (i != dev->caps.function &&
                                    master->slave_state[i].active)
                                        if (mlx4_GEN_EQE(dev, i, eqe))
-                                               mlx4_warn(dev, "Failed to "
-                                                         " generate event "
-                                                         "for slave %d\n", i);
+                                               mlx4_warn(dev, "Failed to generate event for slave %d\n",
+                                                         i);
                        }
                } else {
                        if (mlx4_GEN_EQE(dev, slave, eqe))
-                               mlx4_warn(dev, "Failed to generate event "
-                                              "for slave %d\n", slave);
+                               mlx4_warn(dev, "Failed to generate event for slave %d\n",
+                                         slave);
                }
                ++slave_eq->cons;
        }
@@ -177,8 +181,8 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
        s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
        if ((!!(s_eqe->owner & 0x80)) ^
            (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
-               mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. "
-                         "No free EQE on slave events queue\n", slave);
+               mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. No free EQE on slave events queue\n",
+                         slave);
                spin_unlock_irqrestore(&slave_eq->event_lock, flags);
                return;
        }
@@ -375,9 +379,9 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
                }
                break;
        default:
-               pr_err("%s: BUG!!! UNKNOWN state: "
-                      "slave:%d, port:%d\n", __func__, slave, port);
-                       goto out;
+               pr_err("%s: BUG!!! UNKNOWN state: slave:%d, port:%d\n",
+                      __func__, slave, port);
+               goto out;
        }
        ret = mlx4_get_slave_port_state(dev, slave, port);
 
@@ -425,8 +429,8 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
        for (i = 0 ; i < dev->num_slaves; i++) {
 
                if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
-                       mlx4_dbg(dev, "mlx4_handle_slave_flr: "
-                                "clean slave: %d\n", i);
+                       mlx4_dbg(dev, "mlx4_handle_slave_flr: clean slave: %d\n",
+                                i);
 
                        mlx4_delete_all_resources_for_slave(dev, i);
                        /*return the slave to running mode*/
@@ -438,8 +442,8 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
                        err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
                                       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
                        if (err)
-                               mlx4_warn(dev, "Failed to notify FW on "
-                                         "FLR done (slave:%d)\n", i);
+                               mlx4_warn(dev, "Failed to notify FW on FLR done (slave:%d)\n",
+                                         i);
                }
        }
 }
@@ -490,9 +494,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                                                be32_to_cpu(eqe->event.qp.qpn)
                                                & 0xffffff, &slave);
                                if (ret && ret != -ENOENT) {
-                                       mlx4_dbg(dev, "QP event %02x(%02x) on "
-                                                "EQ %d at index %u: could "
-                                                "not get slave id (%d)\n",
+                                       mlx4_dbg(dev, "QP event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
                                                 eqe->type, eqe->subtype,
                                                 eq->eqn, eq->cons_index, ret);
                                        break;
@@ -520,23 +522,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                                                & 0xffffff,
                                                &slave);
                                if (ret && ret != -ENOENT) {
-                                       mlx4_warn(dev, "SRQ event %02x(%02x) "
-                                                 "on EQ %d at index %u: could"
-                                                 " not get slave id (%d)\n",
+                                       mlx4_warn(dev, "SRQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
                                                  eqe->type, eqe->subtype,
                                                  eq->eqn, eq->cons_index, ret);
                                        break;
                                }
-                               mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x,"
-                                         " event: %02x(%02x)\n", __func__,
-                                         slave,
+                               mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
+                                         __func__, slave,
                                          be32_to_cpu(eqe->event.srq.srqn),
                                          eqe->type, eqe->subtype);
 
                                if (!ret && slave != dev->caps.function) {
-                                       mlx4_warn(dev, "%s: sending event "
-                                                 "%02x(%02x) to slave:%d\n",
-                                                  __func__, eqe->type,
+                                       mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
+                                                 __func__, eqe->type,
                                                  eqe->subtype, slave);
                                        mlx4_slave_event(dev, slave, eqe);
                                        break;
@@ -569,8 +567,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                                        if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
                                                if (i == mlx4_master_func_num(dev))
                                                        continue;
-                                               mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN"
-                                                        " to slave: %d, port:%d\n",
+                                               mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
                                                         __func__, i, port);
                                                s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
                                                if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
@@ -634,11 +631,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                                        be32_to_cpu(eqe->event.cq_err.cqn)
                                        & 0xffffff, &slave);
                                if (ret && ret != -ENOENT) {
-                                       mlx4_dbg(dev, "CQ event %02x(%02x) on "
-                                                "EQ %d at index %u: could "
-                                                 "not get slave id (%d)\n",
-                                                 eqe->type, eqe->subtype,
-                                                 eq->eqn, eq->cons_index, ret);
+                                       mlx4_dbg(dev, "CQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
+                                                eqe->type, eqe->subtype,
+                                                eq->eqn, eq->cons_index, ret);
                                        break;
                                }
 
@@ -667,8 +662,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
 
                case MLX4_EVENT_TYPE_COMM_CHANNEL:
                        if (!mlx4_is_master(dev)) {
-                               mlx4_warn(dev, "Received comm channel event "
-                                              "for non master device\n");
+                               mlx4_warn(dev, "Received comm channel event for non master device\n");
                                break;
                        }
                        memcpy(&priv->mfunc.master.comm_arm_bit_vector,
@@ -681,8 +675,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                case MLX4_EVENT_TYPE_FLR_EVENT:
                        flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
                        if (!mlx4_is_master(dev)) {
-                               mlx4_warn(dev, "Non-master function received"
-                                              "FLR event\n");
+                               mlx4_warn(dev, "Non-master function received FLR event\n");
                                break;
                        }
 
@@ -711,22 +704,17 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                        if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) {
                                if (mlx4_is_master(dev))
                                        for (i = 0; i < dev->num_slaves; i++) {
-                                               mlx4_dbg(dev, "%s: Sending "
-                                                       "MLX4_FATAL_WARNING_SUBTYPE_WARMING"
-                                                       " to slave: %d\n", __func__, i);
+                                               mlx4_dbg(dev, "%s: Sending MLX4_FATAL_WARNING_SUBTYPE_WARMING to slave: %d\n",
+                                                        __func__, i);
                                                if (i == dev->caps.function)
                                                        continue;
                                                mlx4_slave_event(dev, i, eqe);
                                        }
-                               mlx4_err(dev, "Temperature Threshold was reached! "
-                                       "Threshold: %d celsius degrees; "
-                                       "Current Temperature: %d\n",
-                                       be16_to_cpu(eqe->event.warming.warning_threshold),
-                                       be16_to_cpu(eqe->event.warming.current_temperature));
+                               mlx4_err(dev, "Temperature Threshold was reached! Threshold: %d celsius degrees; Current Temperature: %d\n",
+                                        be16_to_cpu(eqe->event.warming.warning_threshold),
+                                        be16_to_cpu(eqe->event.warming.current_temperature));
                        } else
-                               mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), "
-                                         "subtype %02x on EQ %d at index %u. owner=%x, "
-                                         "nent=0x%x, slave=%x, ownership=%s\n",
+                               mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), subtype %02x on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
                                          eqe->type, eqe->subtype, eq->eqn,
                                          eq->cons_index, eqe->owner, eq->nent,
                                          eqe->slave_id,
@@ -743,9 +731,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
                case MLX4_EVENT_TYPE_ECC_DETECT:
                default:
-                       mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at "
-                                 "index %u. owner=%x, nent=0x%x, slave=%x, "
-                                 "ownership=%s\n",
+                       mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
                                  eqe->type, eqe->subtype, eq->eqn,
                                  eq->cons_index, eqe->owner, eq->nent,
                                  eqe->slave_id,
@@ -1088,7 +1074,7 @@ static int mlx4_map_clr_int(struct mlx4_dev *dev)
        priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
                                 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
        if (!priv->clr_base) {
-               mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n");
+               mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n");
                return -ENOMEM;
        }
 
@@ -1102,6 +1088,57 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
        iounmap(priv->clr_base);
 }
 
+static void mlx4_irq_notifier_notify(struct irq_affinity_notify *notify,
+                                    const cpumask_t *mask)
+{
+       struct mlx4_irq_notify *n = container_of(notify,
+                                                struct mlx4_irq_notify,
+                                                notify);
+       struct mlx4_priv *priv = (struct mlx4_priv *)n->arg;
+       struct radix_tree_iter iter;
+       void **slot;
+
+       radix_tree_for_each_slot(slot, &priv->cq_table.tree, &iter, 0) {
+               struct mlx4_cq *cq = (struct mlx4_cq *)(*slot);
+
+               if (cq->irq == notify->irq)
+                       cq->irq_affinity_change = true;
+       }
+}
+
+static void mlx4_release_irq_notifier(struct kref *ref)
+{
+       struct mlx4_irq_notify *n = container_of(ref, struct mlx4_irq_notify,
+                                                notify.kref);
+       kfree(n);
+}
+
+static void mlx4_assign_irq_notifier(struct mlx4_priv *priv,
+                                    struct mlx4_dev *dev, int irq)
+{
+       struct mlx4_irq_notify *irq_notifier = NULL;
+       int err = 0;
+
+       irq_notifier = kzalloc(sizeof(*irq_notifier), GFP_KERNEL);
+       if (!irq_notifier) {
+               mlx4_warn(dev, "Failed to allocate irq notifier. irq %d\n",
+                         irq);
+               return;
+       }
+
+       irq_notifier->notify.irq = irq;
+       irq_notifier->notify.notify = mlx4_irq_notifier_notify;
+       irq_notifier->notify.release = mlx4_release_irq_notifier;
+       irq_notifier->arg = priv;
+       err = irq_set_affinity_notifier(irq, &irq_notifier->notify);
+       if (err) {
+               kfree(irq_notifier);
+               irq_notifier = NULL;
+               mlx4_warn(dev, "Failed to set irq notifier. irq %d\n", irq);
+       }
+}
+
+
 int mlx4_alloc_eq_table(struct mlx4_dev *dev)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1372,6 +1409,9 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
                                continue;
                                /*we dont want to break here*/
                        }
+                       mlx4_assign_irq_notifier(priv, dev,
+                                                priv->eq_table.eq[vec].irq);
+
                        eq_set_ci(&priv->eq_table.eq[vec], 1);
                }
        }
@@ -1398,6 +1438,9 @@ void mlx4_release_eq(struct mlx4_dev *dev, int vec)
                  Belonging to a legacy EQ*/
                mutex_lock(&priv->msix_ctl.pool_lock);
                if (priv->msix_ctl.pool_bm & 1ULL << i) {
+                       irq_set_affinity_notifier(
+                               priv->eq_table.eq[vec].irq,
+                               NULL);
                        free_irq(priv->eq_table.eq[vec].irq,
                                 &priv->eq_table.eq[vec]);
                        priv->msix_ctl.pool_bm &= ~(1ULL << i);
index d16a4d11890342167a2f2c8605e3b5e4e9d25198..c52e048913177371f6db13f4e3f5e98100585b52 100644 (file)
@@ -428,8 +428,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
        } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
                MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
                if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) {
-                       mlx4_err(dev, "phy_wqe_gid is "
-                                "enforced on this ib port\n");
+                       mlx4_err(dev, "phy_wqe_gid is enforced on this ib port\n");
                        err = -EPROTONOSUPPORT;
                        goto out;
                }
@@ -1054,10 +1053,10 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
                 */
                lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
                if (lg < MLX4_ICM_PAGE_SHIFT) {
-                       mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n",
-                                  MLX4_ICM_PAGE_SIZE,
-                                  (unsigned long long) mlx4_icm_addr(&iter),
-                                  mlx4_icm_size(&iter));
+                       mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx)\n",
+                                 MLX4_ICM_PAGE_SIZE,
+                                 (unsigned long long) mlx4_icm_addr(&iter),
+                                 mlx4_icm_size(&iter));
                        err = -EINVAL;
                        goto out;
                }
@@ -1093,14 +1092,14 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
 
        switch (op) {
        case MLX4_CMD_MAP_FA:
-               mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts);
+               mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW\n", tc, ts);
                break;
        case MLX4_CMD_MAP_ICM_AUX:
-               mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts);
+               mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux\n", tc, ts);
                break;
        case MLX4_CMD_MAP_ICM:
-               mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n",
-                         tc, ts, (unsigned long long) virt - (ts << 10));
+               mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM\n",
+                        tc, ts, (unsigned long long) virt - (ts << 10));
                break;
        }
 
@@ -1186,14 +1185,13 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
        MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
        if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
            cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
-               mlx4_err(dev, "Installed FW has unsupported "
-                        "command interface revision %d.\n",
+               mlx4_err(dev, "Installed FW has unsupported command interface revision %d\n",
                         cmd_if_rev);
                mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
                         (int) (dev->caps.fw_ver >> 32),
                         (int) (dev->caps.fw_ver >> 16) & 0xffff,
                         (int) dev->caps.fw_ver & 0xffff);
-               mlx4_err(dev, "This driver version supports only revisions %d to %d.\n",
+               mlx4_err(dev, "This driver version supports only revisions %d to %d\n",
                         MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
                err = -ENODEV;
                goto out;
index cef267e24f9c9c680613ec4ed2817c040b21c964..38e9a4c9099c25f26277855176b76e77aabed8d8 100644 (file)
@@ -104,8 +104,6 @@ module_param(enable_64b_cqe_eqe, bool, 0444);
 MODULE_PARM_DESC(enable_64b_cqe_eqe,
                 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
 
-#define HCA_GLOBAL_CAP_MASK            0
-
 #define PF_CONTEXT_BEHAVIOUR_MASK      MLX4_FUNC_CAP_64B_EQE_CQE
 
 static char mlx4_version[] =
@@ -134,8 +132,7 @@ MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
 
 static bool use_prio;
 module_param_named(use_prio, use_prio, bool, 0444);
-MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
-                 "(0/1, default 0)");
+MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)");
 
 int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
@@ -163,8 +160,7 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
        for (i = 0; i < dev->caps.num_ports - 1; i++) {
                if (port_type[i] != port_type[i + 1]) {
                        if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
-                               mlx4_err(dev, "Only same port types supported "
-                                        "on this HCA, aborting.\n");
+                               mlx4_err(dev, "Only same port types supported on this HCA, aborting\n");
                                return -EINVAL;
                        }
                }
@@ -172,8 +168,8 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
 
        for (i = 0; i < dev->caps.num_ports; i++) {
                if (!(port_type[i] & dev->caps.supported_type[i+1])) {
-                       mlx4_err(dev, "Requested port type for port %d is not "
-                                     "supported on this HCA\n", i + 1);
+                       mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n",
+                                i + 1);
                        return -EINVAL;
                }
        }
@@ -195,26 +191,23 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 
        err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
        if (err) {
-               mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+               mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
                return err;
        }
 
        if (dev_cap->min_page_sz > PAGE_SIZE) {
-               mlx4_err(dev, "HCA minimum page size of %d bigger than "
-                        "kernel PAGE_SIZE of %ld, aborting.\n",
+               mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
                         dev_cap->min_page_sz, PAGE_SIZE);
                return -ENODEV;
        }
        if (dev_cap->num_ports > MLX4_MAX_PORTS) {
-               mlx4_err(dev, "HCA has %d ports, but we only support %d, "
-                        "aborting.\n",
+               mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
                         dev_cap->num_ports, MLX4_MAX_PORTS);
                return -ENODEV;
        }
 
        if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
-               mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than "
-                        "PCI resource 2 size of 0x%llx, aborting.\n",
+               mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
                         dev_cap->uar_size,
                         (unsigned long long) pci_resource_len(dev->pdev, 2));
                return -ENODEV;
@@ -296,7 +289,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 
        dev->caps.log_num_macs  = log_num_mac;
        dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
-       dev->caps.log_num_prios = use_prio ? 3 : 0;
 
        for (i = 1; i <= dev->caps.num_ports; ++i) {
                dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
@@ -347,14 +339,12 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 
                if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
                        dev->caps.log_num_macs = dev_cap->log_max_macs[i];
-                       mlx4_warn(dev, "Requested number of MACs is too much "
-                                 "for port %d, reducing to %d.\n",
+                       mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n",
                                  i, 1 << dev->caps.log_num_macs);
                }
                if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
                        dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
-                       mlx4_warn(dev, "Requested number of VLANs is too much "
-                                 "for port %d, reducing to %d.\n",
+                       mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n",
                                  i, 1 << dev->caps.log_num_vlans);
                }
        }
@@ -366,7 +356,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
                (1 << dev->caps.log_num_macs) *
                (1 << dev->caps.log_num_vlans) *
-               (1 << dev->caps.log_num_prios) *
                dev->caps.num_ports;
        dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
 
@@ -584,13 +573,14 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
        memset(&hca_param, 0, sizeof(hca_param));
        err = mlx4_QUERY_HCA(dev, &hca_param);
        if (err) {
-               mlx4_err(dev, "QUERY_HCA command failed, aborting.\n");
+               mlx4_err(dev, "QUERY_HCA command failed, aborting\n");
                return err;
        }
 
-       /*fail if the hca has an unknown capability */
-       if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) !=
-           HCA_GLOBAL_CAP_MASK) {
+       /* fail if the hca has an unknown global capability
+        * at this time global_caps should be always zeroed
+        */
+       if (hca_param.global_caps) {
                mlx4_err(dev, "Unknown hca global capabilities\n");
                return -ENOSYS;
        }
@@ -603,19 +593,18 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
        dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
        err = mlx4_dev_cap(dev, &dev_cap);
        if (err) {
-               mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+               mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
                return err;
        }
 
        err = mlx4_QUERY_FW(dev);
        if (err)
-               mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n");
+               mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n");
 
        page_size = ~dev->caps.page_size_cap + 1;
        mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
        if (page_size > PAGE_SIZE) {
-               mlx4_err(dev, "HCA minimum page size of %d bigger than "
-                        "kernel PAGE_SIZE of %ld, aborting.\n",
+               mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
                         page_size, PAGE_SIZE);
                return -ENODEV;
        }
@@ -633,8 +622,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
        memset(&func_cap, 0, sizeof(func_cap));
        err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
        if (err) {
-               mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d).\n",
-                         err);
+               mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n",
+                        err);
                return err;
        }
 
@@ -661,8 +650,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
        dev->caps.num_amgms             = 0;
 
        if (dev->caps.num_ports > MLX4_MAX_PORTS) {
-               mlx4_err(dev, "HCA has %d ports, but we only support %d, "
-                        "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS);
+               mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
+                        dev->caps.num_ports, MLX4_MAX_PORTS);
                return -ENODEV;
        }
 
@@ -680,8 +669,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
        for (i = 1; i <= dev->caps.num_ports; ++i) {
                err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap);
                if (err) {
-                       mlx4_err(dev, "QUERY_FUNC_CAP port command failed for"
-                                " port %d, aborting (%d).\n", i, err);
+                       mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
+                                i, err);
                        goto err_mem;
                }
                dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn;
@@ -699,8 +688,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
        if (dev->caps.uar_page_size * (dev->caps.num_uars -
                                       dev->caps.reserved_uars) >
                                       pci_resource_len(dev->pdev, 2)) {
-               mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than "
-                        "PCI resource 2 size of 0x%llx, aborting.\n",
+               mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
                         dev->caps.uar_page_size * dev->caps.num_uars,
                         (unsigned long long) pci_resource_len(dev->pdev, 2));
                goto err_mem;
@@ -722,7 +710,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
        }
 
        dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
-       mlx4_warn(dev, "Timestamping is not supported in slave mode.\n");
+       mlx4_warn(dev, "Timestamping is not supported in slave mode\n");
 
        slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
 
@@ -754,10 +742,10 @@ static void mlx4_request_modules(struct mlx4_dev *dev)
                        has_eth_port = true;
        }
 
-       if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
-               request_module_nowait(IB_DRV_NAME);
        if (has_eth_port)
                request_module_nowait(EN_DRV_NAME);
+       if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
+               request_module_nowait(IB_DRV_NAME);
 }
 
 /*
@@ -784,8 +772,8 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
                        dev->caps.port_type[port] = port_types[port - 1];
                        err = mlx4_SET_PORT(dev, port, -1);
                        if (err) {
-                               mlx4_err(dev, "Failed to set port %d, "
-                                             "aborting\n", port);
+                               mlx4_err(dev, "Failed to set port %d, aborting\n",
+                                        port);
                                goto out;
                        }
                }
@@ -868,9 +856,7 @@ static ssize_t set_port_type(struct device *dev,
                }
        }
        if (err) {
-               mlx4_err(mdev, "Auto sensing is not supported on this HCA. "
-                              "Set only 'eth' or 'ib' for both ports "
-                              "(should be the same)\n");
+               mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n");
                goto out;
        }
 
@@ -975,8 +961,8 @@ static ssize_t set_port_ib_mtu(struct device *dev,
                mlx4_CLOSE_PORT(mdev, port);
                err = mlx4_SET_PORT(mdev, port, -1);
                if (err) {
-                       mlx4_err(mdev, "Failed to set port %d, "
-                                     "aborting\n", port);
+                       mlx4_err(mdev, "Failed to set port %d, aborting\n",
+                                port);
                        goto err_set_port;
                }
        }
@@ -995,19 +981,19 @@ static int mlx4_load_fw(struct mlx4_dev *dev)
        priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
                                         GFP_HIGHUSER | __GFP_NOWARN, 0);
        if (!priv->fw.fw_icm) {
-               mlx4_err(dev, "Couldn't allocate FW area, aborting.\n");
+               mlx4_err(dev, "Couldn't allocate FW area, aborting\n");
                return -ENOMEM;
        }
 
        err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
        if (err) {
-               mlx4_err(dev, "MAP_FA command failed, aborting.\n");
+               mlx4_err(dev, "MAP_FA command failed, aborting\n");
                goto err_free;
        }
 
        err = mlx4_RUN_FW(dev);
        if (err) {
-               mlx4_err(dev, "RUN_FW command failed, aborting.\n");
+               mlx4_err(dev, "RUN_FW command failed, aborting\n");
                goto err_unmap_fa;
        }
 
@@ -1091,30 +1077,30 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
 
        err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
        if (err) {
-               mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n");
+               mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n");
                return err;
        }
 
-       mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n",
+       mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n",
                 (unsigned long long) icm_size >> 10,
                 (unsigned long long) aux_pages << 2);
 
        priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
                                          GFP_HIGHUSER | __GFP_NOWARN, 0);
        if (!priv->fw.aux_icm) {
-               mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n");
+               mlx4_err(dev, "Couldn't allocate aux memory, aborting\n");
                return -ENOMEM;
        }
 
        err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
        if (err) {
-               mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n");
+               mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n");
                goto err_free_aux;
        }
 
        err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
        if (err) {
-               mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n");
+               mlx4_err(dev, "Failed to map cMPT context memory, aborting\n");
                goto err_unmap_aux;
        }
 
@@ -1125,7 +1111,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
                                  init_hca->eqc_base, dev_cap->eqc_entry_sz,
                                  num_eqs, num_eqs, 0, 0);
        if (err) {
-               mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
+               mlx4_err(dev, "Failed to map EQ context memory, aborting\n");
                goto err_unmap_cmpt;
        }
 
@@ -1146,7 +1132,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
                                  dev->caps.num_mtts,
                                  dev->caps.reserved_mtts, 1, 0);
        if (err) {
-               mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
+               mlx4_err(dev, "Failed to map MTT context memory, aborting\n");
                goto err_unmap_eq;
        }
 
@@ -1156,7 +1142,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
                                  dev->caps.num_mpts,
                                  dev->caps.reserved_mrws, 1, 1);
        if (err) {
-               mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n");
+               mlx4_err(dev, "Failed to map dMPT context memory, aborting\n");
                goto err_unmap_mtt;
        }
 
@@ -1167,7 +1153,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
                                  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
                                  0, 0);
        if (err) {
-               mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
+               mlx4_err(dev, "Failed to map QP context memory, aborting\n");
                goto err_unmap_dmpt;
        }
 
@@ -1178,7 +1164,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
                                  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
                                  0, 0);
        if (err) {
-               mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
+               mlx4_err(dev, "Failed to map AUXC context memory, aborting\n");
                goto err_unmap_qp;
        }
 
@@ -1189,7 +1175,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
                                  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
                                  0, 0);
        if (err) {
-               mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
+               mlx4_err(dev, "Failed to map ALTC context memory, aborting\n");
                goto err_unmap_auxc;
        }
 
@@ -1210,7 +1196,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
                                  dev->caps.num_cqs,
                                  dev->caps.reserved_cqs, 0, 0);
        if (err) {
-               mlx4_err(dev, "Failed to map CQ context memory, aborting.\n");
+               mlx4_err(dev, "Failed to map CQ context memory, aborting\n");
                goto err_unmap_rdmarc;
        }
 
@@ -1220,7 +1206,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
                                  dev->caps.num_srqs,
                                  dev->caps.reserved_srqs, 0, 0);
        if (err) {
-               mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n");
+               mlx4_err(dev, "Failed to map SRQ context memory, aborting\n");
                goto err_unmap_cq;
        }
 
@@ -1238,7 +1224,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
                                  dev->caps.num_mgms + dev->caps.num_amgms,
                                  0, 0);
        if (err) {
-               mlx4_err(dev, "Failed to map MCG context memory, aborting.\n");
+               mlx4_err(dev, "Failed to map MCG context memory, aborting\n");
                goto err_unmap_srq;
        }
 
@@ -1315,7 +1301,7 @@ static void mlx4_slave_exit(struct mlx4_dev *dev)
 
        mutex_lock(&priv->cmd.slave_cmd_mutex);
        if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
-               mlx4_warn(dev, "Failed to close slave function.\n");
+               mlx4_warn(dev, "Failed to close slave function\n");
        mutex_unlock(&priv->cmd.slave_cmd_mutex);
 }
 
@@ -1413,7 +1399,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
        u32 cmd_channel_ver;
 
        if (atomic_read(&pf_loading)) {
-               mlx4_warn(dev, "PF is not ready. Deferring probe\n");
+               mlx4_warn(dev, "PF is not ready - Deferring probe\n");
                return -EPROBE_DEFER;
        }
 
@@ -1426,8 +1412,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
         * NUM_OF_RESET_RETRIES times before leaving.*/
        if (ret_from_reset) {
                if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
-                       mlx4_warn(dev, "slave is currently in the "
-                                 "middle of FLR. Deferring probe.\n");
+                       mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n");
                        mutex_unlock(&priv->cmd.slave_cmd_mutex);
                        return -EPROBE_DEFER;
                } else
@@ -1441,8 +1426,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
 
        if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
                MLX4_COMM_GET_IF_REV(slave_read)) {
-               mlx4_err(dev, "slave driver version is not supported"
-                        " by the master\n");
+               mlx4_err(dev, "slave driver version is not supported by the master\n");
                goto err;
        }
 
@@ -1520,8 +1504,7 @@ static void choose_steering_mode(struct mlx4_dev *dev,
 
                        if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
                            dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
-                               mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags "
-                                         "set to use B0 steering. Falling back to A0 steering mode.\n");
+                               mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n");
                }
                dev->oper_log_mgm_entry_size =
                        mlx4_log_num_mgm_entry_size > 0 ?
@@ -1529,8 +1512,7 @@ static void choose_steering_mode(struct mlx4_dev *dev,
                        MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
                dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
        }
-       mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, "
-                "modparam log_num_mgm_entry_size = %d\n",
+       mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n",
                 mlx4_steering_mode_str(dev->caps.steering_mode),
                 dev->oper_log_mgm_entry_size,
                 mlx4_log_num_mgm_entry_size);
@@ -1564,15 +1546,15 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
                err = mlx4_QUERY_FW(dev);
                if (err) {
                        if (err == -EACCES)
-                               mlx4_info(dev, "non-primary physical function, skipping.\n");
+                               mlx4_info(dev, "non-primary physical function, skipping\n");
                        else
-                               mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
+                               mlx4_err(dev, "QUERY_FW command failed, aborting\n");
                        return err;
                }
 
                err = mlx4_load_fw(dev);
                if (err) {
-                       mlx4_err(dev, "Failed to start FW, aborting.\n");
+                       mlx4_err(dev, "Failed to start FW, aborting\n");
                        return err;
                }
 
@@ -1584,7 +1566,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
 
                err = mlx4_dev_cap(dev, &dev_cap);
                if (err) {
-                       mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+                       mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
                        goto err_stop_fw;
                }
 
@@ -1625,7 +1607,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
 
                err = mlx4_INIT_HCA(dev, &init_hca);
                if (err) {
-                       mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
+                       mlx4_err(dev, "INIT_HCA command failed, aborting\n");
                        goto err_free_icm;
                }
                /*
@@ -1636,7 +1618,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
                        memset(&init_hca, 0, sizeof(init_hca));
                        err = mlx4_QUERY_HCA(dev, &init_hca);
                        if (err) {
-                               mlx4_err(dev, "QUERY_HCA command failed, disable timestamp.\n");
+                               mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n");
                                dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
                        } else {
                                dev->caps.hca_core_clock =
@@ -1649,14 +1631,14 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
                        if (!dev->caps.hca_core_clock) {
                                dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
                                mlx4_err(dev,
-                                        "HCA frequency is 0. Timestamping is not supported.");
+                                        "HCA frequency is 0 - timestamping is not supported\n");
                        } else if (map_internal_clock(dev)) {
                                /*
                                 * Map internal clock,
                                 * in case of failure disable timestamping
                                 */
                                dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
-                               mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported.\n");
+                               mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n");
                        }
                }
        } else {
@@ -1683,7 +1665,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
 
        err = mlx4_QUERY_ADAPTER(dev, &adapter);
        if (err) {
-               mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
+               mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n");
                goto unmap_bf;
        }
 
@@ -1793,79 +1775,69 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
 
        err = mlx4_init_uar_table(dev);
        if (err) {
-               mlx4_err(dev, "Failed to initialize "
-                        "user access region table, aborting.\n");
-               return err;
+               mlx4_err(dev, "Failed to initialize user access region table, aborting\n");
+                return err;
        }
 
        err = mlx4_uar_alloc(dev, &priv->driver_uar);
        if (err) {
-               mlx4_err(dev, "Failed to allocate driver access region, "
-                        "aborting.\n");
+               mlx4_err(dev, "Failed to allocate driver access region, aborting\n");
                goto err_uar_table_free;
        }
 
        priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
        if (!priv->kar) {
-               mlx4_err(dev, "Couldn't map kernel access region, "
-                        "aborting.\n");
+               mlx4_err(dev, "Couldn't map kernel access region, aborting\n");
                err = -ENOMEM;
                goto err_uar_free;
        }
 
        err = mlx4_init_pd_table(dev);
        if (err) {
-               mlx4_err(dev, "Failed to initialize "
-                        "protection domain table, aborting.\n");
+               mlx4_err(dev, "Failed to initialize protection domain table, aborting\n");
                goto err_kar_unmap;
        }
 
        err = mlx4_init_xrcd_table(dev);
        if (err) {
-               mlx4_err(dev, "Failed to initialize "
-                        "reliable connection domain table, aborting.\n");
+               mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n");
                goto err_pd_table_free;
        }
 
        err = mlx4_init_mr_table(dev);
        if (err) {
-               mlx4_err(dev, "Failed to initialize "
-                        "memory region table, aborting.\n");
+               mlx4_err(dev, "Failed to initialize memory region table, aborting\n");
                goto err_xrcd_table_free;
        }
 
        if (!mlx4_is_slave(dev)) {
                err = mlx4_init_mcg_table(dev);
                if (err) {
-                       mlx4_err(dev, "Failed to initialize multicast group table, aborting.\n");
+                       mlx4_err(dev, "Failed to initialize multicast group table, aborting\n");
                        goto err_mr_table_free;
                }
        }
 
        err = mlx4_init_eq_table(dev);
        if (err) {
-               mlx4_err(dev, "Failed to initialize "
-                        "event queue table, aborting.\n");
+               mlx4_err(dev, "Failed to initialize event queue table, aborting\n");
                goto err_mcg_table_free;
        }
 
        err = mlx4_cmd_use_events(dev);
        if (err) {
-               mlx4_err(dev, "Failed to switch to event-driven "
-                        "firmware commands, aborting.\n");
+               mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n");
                goto err_eq_table_free;
        }
 
        err = mlx4_NOP(dev);
        if (err) {
                if (dev->flags & MLX4_FLAG_MSI_X) {
-                       mlx4_warn(dev, "NOP command failed to generate MSI-X "
-                                 "interrupt IRQ %d).\n",
+                       mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
                                  priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
-                       mlx4_warn(dev, "Trying again without MSI-X.\n");
+                       mlx4_warn(dev, "Trying again without MSI-X\n");
                } else {
-                       mlx4_err(dev, "NOP command failed to generate interrupt "
-                                "(IRQ %d), aborting.\n",
+                       mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
                                 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
                        mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
                }
@@ -1877,28 +1849,25 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
 
        err = mlx4_init_cq_table(dev);
        if (err) {
-               mlx4_err(dev, "Failed to initialize "
-                        "completion queue table, aborting.\n");
+               mlx4_err(dev, "Failed to initialize completion queue table, aborting\n");
                goto err_cmd_poll;
        }
 
        err = mlx4_init_srq_table(dev);
        if (err) {
-               mlx4_err(dev, "Failed to initialize "
-                        "shared receive queue table, aborting.\n");
+               mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n");
                goto err_cq_table_free;
        }
 
        err = mlx4_init_qp_table(dev);
        if (err) {
-               mlx4_err(dev, "Failed to initialize "
-                        "queue pair table, aborting.\n");
+               mlx4_err(dev, "Failed to initialize queue pair table, aborting\n");
                goto err_srq_table_free;
        }
 
        err = mlx4_init_counters_table(dev);
        if (err && err != -ENOENT) {
-               mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
+               mlx4_err(dev, "Failed to initialize counters table, aborting\n");
                goto err_qp_table_free;
        }
 
@@ -1908,9 +1877,8 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
                        err = mlx4_get_port_ib_caps(dev, port,
                                                    &ib_port_default_caps);
                        if (err)
-                               mlx4_warn(dev, "failed to get port %d default "
-                                         "ib capabilities (%d). Continuing "
-                                         "with caps = 0\n", port, err);
+                               mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n",
+                                         port, err);
                        dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
 
                        /* initialize per-slave default ib port capabilities */
@@ -1920,7 +1888,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
                                        if (i == mlx4_master_func_num(dev))
                                                continue;
                                        priv->mfunc.master.slave_state[i].ib_cap_mask[port] =
-                                                       ib_port_default_caps;
+                                               ib_port_default_caps;
                                }
                        }
 
@@ -1933,7 +1901,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
                                            dev->caps.pkey_table_len[port] : -1);
                        if (err) {
                                mlx4_err(dev, "Failed to set port %d, aborting\n",
-                                       port);
+                                        port);
                                goto err_counters_table_free;
                        }
                }
@@ -2009,7 +1977,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
                        kfree(entries);
                        goto no_msi;
                } else if (nreq < MSIX_LEGACY_SZ +
-                                 dev->caps.num_ports * MIN_MSIX_P_PORT) {
+                          dev->caps.num_ports * MIN_MSIX_P_PORT) {
                        /*Working in legacy mode , all EQ's shared*/
                        dev->caps.comp_pool           = 0;
                        dev->caps.num_comp_vectors = nreq - 1;
@@ -2209,8 +2177,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
 
        err = pci_enable_device(pdev);
        if (err) {
-               dev_err(&pdev->dev, "Cannot enable PCI device, "
-                       "aborting.\n");
+               dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
                return err;
        }
 
@@ -2257,14 +2224,13 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
         */
        if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) &&
            !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
-               dev_err(&pdev->dev, "Missing DCS, aborting."
-                       "(driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
+               dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
                        pci_dev_data, pci_resource_flags(pdev, 0));
                err = -ENODEV;
                goto err_disable_pdev;
        }
        if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
-               dev_err(&pdev->dev, "Missing UAR, aborting.\n");
+               dev_err(&pdev->dev, "Missing UAR, aborting\n");
                err = -ENODEV;
                goto err_disable_pdev;
        }
@@ -2279,21 +2245,19 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
 
        err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
        if (err) {
-               dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
+               dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
                err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
                if (err) {
-                       dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
+                       dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
                        goto err_release_regions;
                }
        }
        err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
        if (err) {
-               dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
-                        "consistent PCI DMA mask.\n");
+               dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
                if (err) {
-                       dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
-                               "aborting.\n");
+                       dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n");
                        goto err_release_regions;
                }
        }
@@ -2324,7 +2288,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
                if (total_vfs) {
                        unsigned vfs_offset = 0;
                        for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) &&
-                            vfs_offset + nvfs[i] < extended_func_num(pdev);
+                                    vfs_offset + nvfs[i] < extended_func_num(pdev);
                             vfs_offset += nvfs[i], i++)
                                ;
                        if (i == sizeof(nvfs)/sizeof(nvfs[0])) {
@@ -2350,8 +2314,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
                        if (err < 0)
                                goto err_free_dev;
                        else {
-                               mlx4_warn(dev, "Multiple PFs not yet supported."
-                                         " Skipping PF.\n");
+                               mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n");
                                err = -EINVAL;
                                goto err_free_dev;
                        }
@@ -2361,8 +2324,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
                        mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n",
                                  total_vfs);
                        dev->dev_vfs = kzalloc(
-                                       total_vfs * sizeof(*dev->dev_vfs),
-                                       GFP_KERNEL);
+                               total_vfs * sizeof(*dev->dev_vfs),
+                               GFP_KERNEL);
                        if (NULL == dev->dev_vfs) {
                                mlx4_err(dev, "Failed to allocate memory for VFs\n");
                                err = 0;
@@ -2370,14 +2333,14 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
                                atomic_inc(&pf_loading);
                                err = pci_enable_sriov(pdev, total_vfs);
                                if (err) {
-                                       mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
+                                       mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
                                                 err);
                                        atomic_dec(&pf_loading);
                                        err = 0;
                                } else {
                                        mlx4_warn(dev, "Running in master mode\n");
                                        dev->flags |= MLX4_FLAG_SRIOV |
-                                                     MLX4_FLAG_MASTER;
+                                               MLX4_FLAG_MASTER;
                                        dev->num_vfs = total_vfs;
                                        sriov_initialized = 1;
                                }
@@ -2394,7 +2357,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
                 */
                err = mlx4_reset(dev);
                if (err) {
-                       mlx4_err(dev, "Failed to reset HCA, aborting.\n");
+                       mlx4_err(dev, "Failed to reset HCA, aborting\n");
                        goto err_rel_own;
                }
        }
@@ -2402,7 +2365,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
 slave_start:
        err = mlx4_cmd_init(dev);
        if (err) {
-               mlx4_err(dev, "Failed to init command interface, aborting.\n");
+               mlx4_err(dev, "Failed to init command interface, aborting\n");
                goto err_sriov;
        }
 
@@ -2416,8 +2379,7 @@ slave_start:
                        dev->num_slaves = 0;
                        err = mlx4_multi_func_init(dev);
                        if (err) {
-                               mlx4_err(dev, "Failed to init slave mfunc"
-                                        " interface, aborting.\n");
+                               mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n");
                                goto err_cmd;
                        }
                }
@@ -2440,7 +2402,8 @@ slave_start:
         * No return code for this call, just warn the user in case of PCI
         * express device capabilities are under-satisfied by the bus.
         */
-       mlx4_check_pcie_caps(dev);
+       if (!mlx4_is_slave(dev))
+               mlx4_check_pcie_caps(dev);
 
        /* In master functions, the communication channel must be initialized
         * after obtaining its address from fw */
@@ -2448,8 +2411,7 @@ slave_start:
                unsigned sum = 0;
                err = mlx4_multi_func_init(dev);
                if (err) {
-                       mlx4_err(dev, "Failed to init master mfunc"
-                                "interface, aborting.\n");
+                       mlx4_err(dev, "Failed to init master mfunc interface, aborting\n");
                        goto err_close;
                }
                if (sriov_initialized) {
@@ -2460,10 +2422,7 @@ slave_start:
                        if (ib_ports &&
                            (num_vfs_argc > 1 || probe_vfs_argc > 1)) {
                                mlx4_err(dev,
-                                        "Invalid syntax of num_vfs/probe_vfs "
-                                        "with IB port. Single port VFs syntax"
-                                        " is only supported when all ports "
-                                        "are configured as ethernet\n");
+                                        "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n");
                                goto err_close;
                        }
                        for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) {
@@ -2489,8 +2448,7 @@ slave_start:
        if ((mlx4_is_mfunc(dev)) &&
            !(dev->flags & MLX4_FLAG_MSI_X)) {
                err = -ENOSYS;
-               mlx4_err(dev, "INTx is not supported in multi-function mode."
-                        " aborting.\n");
+               mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n");
                goto err_free_eq;
        }
 
@@ -2635,7 +2593,7 @@ static void __mlx4_remove_one(struct pci_dev *pdev)
        /* in SRIOV it is not allowed to unload the pf's
         * driver while there are alive vf's */
        if (mlx4_is_master(dev) && mlx4_how_many_lives_vf(dev))
-               printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
+               pr_warn("Removing PF when there are assigned VF's !!!\n");
        mlx4_stop_sense(dev);
        mlx4_unregister_device(dev);
 
@@ -2806,33 +2764,36 @@ static struct pci_driver mlx4_driver = {
 static int __init mlx4_verify_params(void)
 {
        if ((log_num_mac < 0) || (log_num_mac > 7)) {
-               pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac);
+               pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac);
                return -1;
        }
 
        if (log_num_vlan != 0)
-               pr_warning("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
-                          MLX4_LOG_NUM_VLANS);
+               pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
+                       MLX4_LOG_NUM_VLANS);
+
+       if (use_prio != 0)
+               pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n");
 
        if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
-               pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
+               pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n",
+                       log_mtts_per_seg);
                return -1;
        }
 
        /* Check if module param for ports type has legal combination */
        if (port_type_array[0] == false && port_type_array[1] == true) {
-               printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
+               pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
                port_type_array[0] = true;
        }
 
        if (mlx4_log_num_mgm_entry_size != -1 &&
            (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
             mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) {
-               pr_warning("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not "
-                          "in legal range (-1 or %d..%d)\n",
-                          mlx4_log_num_mgm_entry_size,
-                          MLX4_MIN_MGM_LOG_ENTRY_SIZE,
-                          MLX4_MAX_MGM_LOG_ENTRY_SIZE);
+               pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-1 or %d..%d)\n",
+                       mlx4_log_num_mgm_entry_size,
+                       MLX4_MIN_MGM_LOG_ENTRY_SIZE,
+                       MLX4_MAX_MGM_LOG_ENTRY_SIZE);
                return -1;
        }
 
index 80ccb4edf825f8888c6487626f2f380c7b27479b..4c36def8e10f9b518a1ba8a3f05340eb63c4dc0a 100644 (file)
@@ -638,7 +638,7 @@ static int find_entry(struct mlx4_dev *dev, u8 port,
 
                if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
                        if (*index != hash) {
-                               mlx4_err(dev, "Found zero MGID in AMGM.\n");
+                               mlx4_err(dev, "Found zero MGID in AMGM\n");
                                err = -EINVAL;
                        }
                        return err;
@@ -874,7 +874,7 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
        mlx4_err(dev, "%s", buf);
 
        if (len >= BUF_SIZE)
-               mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n");
+               mlx4_err(dev, "Network rule error message was truncated, print buffer is too small\n");
 }
 
 int mlx4_flow_attach(struct mlx4_dev *dev,
@@ -897,7 +897,7 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
                ret = parse_trans_rule(dev, cur, mailbox->buf + size);
                if (ret < 0) {
                        mlx4_free_cmd_mailbox(dev, mailbox);
-                       return -EINVAL;
+                       return ret;
                }
                size += ret;
        }
@@ -905,10 +905,10 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
        ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id);
        if (ret == -ENOMEM)
                mlx4_err_rule(dev,
-                             "mcg table is full. Fail to register network rule.\n",
+                             "mcg table is full. Fail to register network rule\n",
                              rule);
        else if (ret)
-               mlx4_err_rule(dev, "Fail to register network rule.\n", rule);
+               mlx4_err_rule(dev, "Fail to register network rule\n", rule);
 
        mlx4_free_cmd_mailbox(dev, mailbox);
 
@@ -994,7 +994,7 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
 
        members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
        if (members_count == dev->caps.num_qp_per_mgm) {
-               mlx4_err(dev, "MGM at index %x is full.\n", index);
+               mlx4_err(dev, "MGM at index %x is full\n", index);
                err = -ENOMEM;
                goto out;
        }
@@ -1042,7 +1042,7 @@ out:
        }
        if (err && link && index != -1) {
                if (index < dev->caps.num_mgms)
-                       mlx4_warn(dev, "Got AMGM index %d < %d",
+                       mlx4_warn(dev, "Got AMGM index %d < %d\n",
                                  index, dev->caps.num_mgms);
                else
                        mlx4_bitmap_free(&priv->mcg_table.bitmap,
@@ -1133,7 +1133,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
 
                if (amgm_index) {
                        if (amgm_index < dev->caps.num_mgms)
-                               mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d",
+                               mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d\n",
                                          index, amgm_index, dev->caps.num_mgms);
                        else
                                mlx4_bitmap_free(&priv->mcg_table.bitmap,
@@ -1153,7 +1153,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
                        goto out;
 
                if (index < dev->caps.num_mgms)
-                       mlx4_warn(dev, "entry %d had next AMGM index %d < %d",
+                       mlx4_warn(dev, "entry %d had next AMGM index %d < %d\n",
                                  prev, index, dev->caps.num_mgms);
                else
                        mlx4_bitmap_free(&priv->mcg_table.bitmap,
index f9c46510196341a6089b0a23d7b53455dad69ae5..9dd1b30ea757f590cc0ec4c757b0d1f5ff9d86bb 100644 (file)
@@ -216,18 +216,19 @@ extern int mlx4_debug_level;
 #define mlx4_debug_level       (0)
 #endif /* CONFIG_MLX4_DEBUG */
 
-#define mlx4_dbg(mdev, format, arg...)                                 \
+#define mlx4_dbg(mdev, format, ...)                                    \
 do {                                                                   \
        if (mlx4_debug_level)                                           \
-               dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ##arg); \
+               dev_printk(KERN_DEBUG, &(mdev)->pdev->dev, format,      \
+                          ##__VA_ARGS__);                              \
 } while (0)
 
-#define mlx4_err(mdev, format, arg...) \
-       dev_err(&mdev->pdev->dev, format, ##arg)
-#define mlx4_info(mdev, format, arg...) \
-       dev_info(&mdev->pdev->dev, format, ##arg)
-#define mlx4_warn(mdev, format, arg...) \
-       dev_warn(&mdev->pdev->dev, format, ##arg)
+#define mlx4_err(mdev, format, ...)                                    \
+       dev_err(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
+#define mlx4_info(mdev, format, ...)                                   \
+       dev_info(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
+#define mlx4_warn(mdev, format, ...)                                   \
+       dev_warn(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
 
 extern int mlx4_log_num_mgm_entry_size;
 extern int log_mtts_per_seg;
@@ -1195,6 +1196,12 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
                           struct mlx4_cmd_mailbox *outbox,
                           struct mlx4_cmd_info *cmd);
 
+int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+
 int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
                         struct mlx4_vhcr *vhcr,
                         struct mlx4_cmd_mailbox *inbox,
index 04d9b6fe3e8000fdb14714b09770b11aa46079e8..b5db1bf361dc6adac67dbade22f04ab03601956c 100644 (file)
@@ -830,26 +830,26 @@ __printf(3, 4)
 int en_print(const char *level, const struct mlx4_en_priv *priv,
             const char *format, ...);
 
-#define en_dbg(mlevel, priv, format, arg...)                   \
-do {                                                           \
-       if (NETIF_MSG_##mlevel & priv->msg_enable)              \
-               en_print(KERN_DEBUG, priv, format, ##arg);      \
+#define en_dbg(mlevel, priv, format, ...)                              \
+do {                                                                   \
+       if (NETIF_MSG_##mlevel & (priv)->msg_enable)                    \
+               en_print(KERN_DEBUG, priv, format, ##__VA_ARGS__);      \
 } while (0)
-#define en_warn(priv, format, arg...)                  \
-       en_print(KERN_WARNING, priv, format, ##arg)
-#define en_err(priv, format, arg...)                   \
-       en_print(KERN_ERR, priv, format, ##arg)
-#define en_info(priv, format, arg...)                  \
-       en_print(KERN_INFO, priv, format, ## arg)
-
-#define mlx4_err(mdev, format, arg...)                 \
-       pr_err("%s %s: " format, DRV_NAME,              \
-              dev_name(&mdev->pdev->dev), ##arg)
-#define mlx4_info(mdev, format, arg...)                        \
-       pr_info("%s %s: " format, DRV_NAME,             \
-               dev_name(&mdev->pdev->dev), ##arg)
-#define mlx4_warn(mdev, format, arg...)                        \
-       pr_warning("%s %s: " format, DRV_NAME,          \
-                  dev_name(&mdev->pdev->dev), ##arg)
+#define en_warn(priv, format, ...)                                     \
+       en_print(KERN_WARNING, priv, format, ##__VA_ARGS__)
+#define en_err(priv, format, ...)                                      \
+       en_print(KERN_ERR, priv, format, ##__VA_ARGS__)
+#define en_info(priv, format, ...)                                     \
+       en_print(KERN_INFO, priv, format, ##__VA_ARGS__)
+
+#define mlx4_err(mdev, format, ...)                                    \
+       pr_err(DRV_NAME " %s: " format,                                 \
+              dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
+#define mlx4_info(mdev, format, ...)                                   \
+       pr_info(DRV_NAME " %s: " format,                                \
+               dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
+#define mlx4_warn(mdev, format, ...)                                   \
+       pr_warn(DRV_NAME " %s: " format,                                \
+               dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
 
 #endif
index 24835853b7533ec7bf9f73c05e8a7cd713414956..3e04ea13d85dde4e8cd354e78f9099580be32109 100644 (file)
@@ -250,8 +250,8 @@ static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
                                                       MLX4_CMD_TIME_CLASS_A,
                                                       MLX4_CMD_WRAPPED);
                if (err)
-                       mlx4_warn(dev, "Failed to free mtt range at:"
-                                 "%d order:%d\n", offset, order);
+                       mlx4_warn(dev, "Failed to free mtt range at:%d order:%d\n",
+                                 offset, order);
                return;
        }
         __mlx4_free_mtt_range(dev, offset, order);
@@ -436,8 +436,8 @@ static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
                                     key_to_hw_index(mr->key) &
                                     (dev->caps.num_mpts - 1));
                if (err) {
-                       mlx4_warn(dev, "HW2SW_MPT failed (%d),", err);
-                       mlx4_warn(dev, "MR has MWs bound to it.\n");
+                       mlx4_warn(dev, "HW2SW_MPT failed (%d), MR has MWs bound to it\n",
+                                 err);
                        return err;
                }
 
@@ -773,7 +773,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
                        mlx4_alloc_mtt_range(dev,
                                             fls(dev->caps.reserved_mtts - 1));
                if (priv->reserved_mtts < 0) {
-                       mlx4_warn(dev, "MTT table of order %u is too small.\n",
+                       mlx4_warn(dev, "MTT table of order %u is too small\n",
                                  mr_table->mtt_buddy.max_order);
                        err = -ENOMEM;
                        goto err_reserve_mtts;
@@ -954,8 +954,7 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
        mailbox = mlx4_alloc_cmd_mailbox(dev);
        if (IS_ERR(mailbox)) {
                err = PTR_ERR(mailbox);
-               printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox"
-                      " failed (%d)\n", err);
+               pr_warn("mlx4_ib: mlx4_alloc_cmd_mailbox failed (%d)\n", err);
                return;
        }
 
@@ -964,8 +963,7 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
                             (dev->caps.num_mpts - 1));
        mlx4_free_cmd_mailbox(dev, mailbox);
        if (err) {
-               printk(KERN_WARNING "mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n",
-                      err);
+               pr_warn("mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", err);
                return;
        }
        fmr->mr.enabled = MLX4_MPT_EN_SW;
index cfcad26ed40f60b0e5b992195339d8c12c0e68d7..376f2f1d445ea3828ba3dee3f2eb214182d3a2ca 100644 (file)
@@ -244,8 +244,8 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
        if (validate_index(dev, table, index))
                goto out;
        if (--table->refs[index]) {
-               mlx4_dbg(dev, "Have more references for index %d,"
-                        "no need to modify mac table\n", index);
+               mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n",
+                        index);
                goto out;
        }
 
@@ -443,9 +443,8 @@ void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
        }
 
        if (--table->refs[index]) {
-               mlx4_dbg(dev, "Have %d more references for index %d,"
-                        "no need to modify vlan table\n", table->refs[index],
-                        index);
+               mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n",
+                        table->refs[index], index);
                goto out;
        }
        table->entries[index] = 0;
@@ -706,8 +705,7 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
                                        if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw,
                                                    sizeof(gid_entry_tbl->raw))) {
                                                /* found duplicate */
-                                               mlx4_warn(dev, "requested gid entry for slave:%d "
-                                                         "is a duplicate of gid at index %d\n",
+                                               mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n",
                                                          slave, i);
                                                return -EINVAL;
                                        }
@@ -1106,6 +1104,9 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
        }
 
        if (found_ix >= 0) {
+               /* Calculate a slave_gid which is the slave number in the gid
+                * table and not a globally unique slave number.
+                */
                if (found_ix < MLX4_ROCE_PF_GIDS)
                        slave_gid = 0;
                else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) *
@@ -1118,41 +1119,43 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
                          ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) /
                         (vf_gids / num_vfs)) + vf_gids % num_vfs + 1;
 
+               /* Calculate the globally unique slave id */
                if (slave_gid) {
                        struct mlx4_active_ports exclusive_ports;
                        struct mlx4_active_ports actv_ports;
                        struct mlx4_slaves_pport slaves_pport_actv;
                        unsigned max_port_p_one;
-                       int num_slaves_before = 1;
+                       int num_vfs_before = 0;
+                       int candidate_slave_gid;
 
+                       /* Calculate how many VFs are on the previous port, if exists */
                        for (i = 1; i < port; i++) {
                                bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
-                               set_bit(i, exclusive_ports.ports);
+                               set_bit(i - 1, exclusive_ports.ports);
                                slaves_pport_actv =
                                        mlx4_phys_to_slaves_pport_actv(
                                                        dev, &exclusive_ports);
-                               num_slaves_before += bitmap_weight(
+                               num_vfs_before += bitmap_weight(
                                                slaves_pport_actv.slaves,
                                                dev->num_vfs + 1);
                        }
 
-                       if (slave_gid < num_slaves_before) {
-                               bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
-                               set_bit(port - 1, exclusive_ports.ports);
-                               slaves_pport_actv =
-                                       mlx4_phys_to_slaves_pport_actv(
-                                                       dev, &exclusive_ports);
-                               slave_gid += bitmap_weight(
-                                               slaves_pport_actv.slaves,
-                                               dev->num_vfs + 1) -
-                                               num_slaves_before;
-                       }
-                       actv_ports = mlx4_get_active_ports(dev, slave_gid);
+                       /* candidate_slave_gid isn't necessarily the correct slave, but
+                        * it has the same number of ports and is assigned to the same
+                        * ports as the real slave we're looking for. On dual port VF,
+                        * slave_gid = [single port VFs on port <port>] +
+                        * [offset of the current slave from the first dual port VF] +
+                        * 1 (for the PF).
+                        */
+                       candidate_slave_gid = slave_gid + num_vfs_before;
+
+                       actv_ports = mlx4_get_active_ports(dev, candidate_slave_gid);
                        max_port_p_one = find_first_bit(
                                actv_ports.ports, dev->caps.num_ports) +
                                bitmap_weight(actv_ports.ports,
                                              dev->caps.num_ports) + 1;
 
+                       /* Calculate the real slave number */
                        for (i = 1; i < max_port_p_one; i++) {
                                if (i == port)
                                        continue;
index 8e0c3cc2a1ec786739de7298fc450c483d8fe37a..14089d9e1667fcc4287fe08ca34590e279f66561 100644 (file)
@@ -164,18 +164,17 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
                }
 
                if (total_size > dev_cap->max_icm_sz) {
-                       mlx4_err(dev, "Profile requires 0x%llx bytes; "
-                                 "won't fit in 0x%llx bytes of context memory.\n",
-                                 (unsigned long long) total_size,
-                                 (unsigned long long) dev_cap->max_icm_sz);
+                       mlx4_err(dev, "Profile requires 0x%llx bytes; won't fit in 0x%llx bytes of context memory\n",
+                                (unsigned long long) total_size,
+                                (unsigned long long) dev_cap->max_icm_sz);
                        kfree(profile);
                        return -ENOMEM;
                }
 
                if (profile[i].size)
-                       mlx4_dbg(dev, "  profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, "
-                                 "size 0x%10llx\n",
-                                i, res_name[profile[i].type], profile[i].log_num,
+                       mlx4_dbg(dev, "  profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, size 0x%10llx\n",
+                                i, res_name[profile[i].type],
+                                profile[i].log_num,
                                 (unsigned long long) profile[i].start,
                                 (unsigned long long) profile[i].size);
        }
index 61d64ebffd56e64b0fa8bf2d0fb69308e3d02c49..1d3234a6744d40e01e11cfb024a1ec6ce8540731 100644 (file)
@@ -264,8 +264,8 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
                               MLX4_CMD_FREE_RES,
                               MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
                if (err) {
-                       mlx4_warn(dev, "Failed to release qp range"
-                                 " base:%d cnt:%d\n", base_qpn, cnt);
+                       mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n",
+                                 base_qpn, cnt);
                }
        } else
                 __mlx4_qp_release_range(dev, base_qpn, cnt);
@@ -389,6 +389,41 @@ err_icm:
 
 EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
 
+#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
+int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
+                  enum mlx4_update_qp_attr attr,
+                  struct mlx4_update_qp_params *params)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_update_qp_context *cmd;
+       u64 pri_addr_path_mask = 0;
+       int err = 0;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       cmd = (struct mlx4_update_qp_context *)mailbox->buf;
+
+       if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
+               return -EINVAL;
+
+       if (attr & MLX4_UPDATE_QP_SMAC) {
+               pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
+               cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
+       }
+
+       cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
+
+       err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0,
+                      MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
+                      MLX4_CMD_NATIVE);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_update_qp);
+
 void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
 {
        struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
@@ -577,8 +612,7 @@ int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
                err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
                                     context, 0, 0, qp);
                if (err) {
-                       mlx4_err(dev, "Failed to bring QP to state: "
-                                "%d with error: %d\n",
+                       mlx4_err(dev, "Failed to bring QP to state: %d with error: %d\n",
                                 states[i + 1], err);
                        return err;
                }
index dd1b5093d8b170812451fb3f67158e1bb8e2b908..ea1c6d092145a5d7e150e8549577edc87f3b0472 100644 (file)
@@ -72,8 +72,7 @@ int mlx4_reset(struct mlx4_dev *dev)
        hca_header = kmalloc(256, GFP_KERNEL);
        if (!hca_header) {
                err = -ENOMEM;
-               mlx4_err(dev, "Couldn't allocate memory to save HCA "
-                         "PCI header, aborting.\n");
+               mlx4_err(dev, "Couldn't allocate memory to save HCA PCI header, aborting\n");
                goto out;
        }
 
@@ -84,8 +83,7 @@ int mlx4_reset(struct mlx4_dev *dev)
                        continue;
                if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) {
                        err = -ENODEV;
-                       mlx4_err(dev, "Couldn't save HCA "
-                                 "PCI header, aborting.\n");
+                       mlx4_err(dev, "Couldn't save HCA PCI header, aborting\n");
                        goto out;
                }
        }
@@ -94,7 +92,7 @@ int mlx4_reset(struct mlx4_dev *dev)
                        MLX4_RESET_SIZE);
        if (!reset) {
                err = -ENOMEM;
-               mlx4_err(dev, "Couldn't map HCA reset register, aborting.\n");
+               mlx4_err(dev, "Couldn't map HCA reset register, aborting\n");
                goto out;
        }
 
@@ -133,8 +131,7 @@ int mlx4_reset(struct mlx4_dev *dev)
 
        if (vendor == 0xffff) {
                err = -ENODEV;
-               mlx4_err(dev, "PCI device did not come back after reset, "
-                         "aborting.\n");
+               mlx4_err(dev, "PCI device did not come back after reset, aborting\n");
                goto out;
        }
 
@@ -144,16 +141,14 @@ int mlx4_reset(struct mlx4_dev *dev)
                if (pcie_capability_write_word(dev->pdev, PCI_EXP_DEVCTL,
                                               devctl)) {
                        err = -ENODEV;
-                       mlx4_err(dev, "Couldn't restore HCA PCI Express "
-                                "Device Control register, aborting.\n");
+                       mlx4_err(dev, "Couldn't restore HCA PCI Express Device Control register, aborting\n");
                        goto out;
                }
                linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4];
                if (pcie_capability_write_word(dev->pdev, PCI_EXP_LNKCTL,
                                               linkctl)) {
                        err = -ENODEV;
-                       mlx4_err(dev, "Couldn't restore HCA PCI Express "
-                                "Link control register, aborting.\n");
+                       mlx4_err(dev, "Couldn't restore HCA PCI Express Link control register, aborting\n");
                        goto out;
                }
        }
@@ -164,8 +159,8 @@ int mlx4_reset(struct mlx4_dev *dev)
 
                if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) {
                        err = -ENODEV;
-                       mlx4_err(dev, "Couldn't restore HCA reg %x, "
-                                 "aborting.\n", i);
+                       mlx4_err(dev, "Couldn't restore HCA reg %x, aborting\n",
+                                i);
                        goto out;
                }
        }
@@ -173,8 +168,7 @@ int mlx4_reset(struct mlx4_dev *dev)
        if (pci_write_config_dword(dev->pdev, PCI_COMMAND,
                                   hca_header[PCI_COMMAND / 4])) {
                err = -ENODEV;
-               mlx4_err(dev, "Couldn't restore HCA COMMAND, "
-                         "aborting.\n");
+               mlx4_err(dev, "Couldn't restore HCA COMMAND, aborting\n");
                goto out;
        }
 
index 3b5f53ef29b292d6edcb027f6b64b9c108a3a03b..dd821b363686384435ab7793959c27fb432b468b 100644 (file)
@@ -962,7 +962,7 @@ static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
                ret = alloc_srq_tr(id);
                break;
        case RES_MAC:
-               printk(KERN_ERR "implementation missing\n");
+               pr_err("implementation missing\n");
                return NULL;
        case RES_COUNTER:
                ret = alloc_counter_tr(id);
@@ -1056,10 +1056,10 @@ static int remove_mtt_ok(struct res_mtt *res, int order)
 {
        if (res->com.state == RES_MTT_BUSY ||
            atomic_read(&res->ref_count)) {
-               printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
-                      __func__, __LINE__,
-                      mtt_states_str(res->com.state),
-                      atomic_read(&res->ref_count));
+               pr_devel("%s-%d: state %s, ref_count %d\n",
+                        __func__, __LINE__,
+                        mtt_states_str(res->com.state),
+                        atomic_read(&res->ref_count));
                return -EBUSY;
        } else if (res->com.state != RES_MTT_ALLOCATED)
                return -EPERM;
@@ -3733,6 +3733,25 @@ static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
        }
 }
 
+static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
+                           u8 *gid, enum mlx4_protocol prot)
+{
+       int real_port;
+
+       if (prot != MLX4_PROT_ETH)
+               return 0;
+
+       if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
+           dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
+               real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
+               if (real_port < 0)
+                       return -EINVAL;
+               gid[5] = real_port;
+       }
+
+       return 0;
+}
+
 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
                               struct mlx4_vhcr *vhcr,
                               struct mlx4_cmd_mailbox *inbox,
@@ -3768,6 +3787,10 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
                if (err)
                        goto ex_detach;
        } else {
+               err = mlx4_adjust_port(dev, slave, gid, prot);
+               if (err)
+                       goto ex_put;
+
                err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
                if (err)
                        goto ex_put;
@@ -3857,7 +3880,7 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
                }
        }
        if (!be_mac) {
-               pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
+               pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
                       port);
                return -EINVAL;
        }
@@ -3872,6 +3895,60 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
 
 }
 
+#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
+int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd_info)
+{
+       int err;
+       u32 qpn = vhcr->in_modifier & 0xffffff;
+       struct res_qp *rqp;
+       u64 mac;
+       unsigned port;
+       u64 pri_addr_path_mask;
+       struct mlx4_update_qp_context *cmd;
+       int smac_index;
+
+       cmd = (struct mlx4_update_qp_context *)inbox->buf;
+
+       pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
+       if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
+           (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
+               return -EPERM;
+
+       /* Just change the smac for the QP */
+       err = get_res(dev, slave, qpn, RES_QP, &rqp);
+       if (err) {
+               mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
+               return err;
+       }
+
+       port = (rqp->sched_queue >> 6 & 1) + 1;
+       smac_index = cmd->qp_context.pri_path.grh_mylmc;
+       err = mac_find_smac_ix_in_slave(dev, slave, port,
+                                       smac_index, &mac);
+       if (err) {
+               mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
+                        qpn, smac_index);
+               goto err_mac;
+       }
+
+       err = mlx4_cmd(dev, inbox->dma,
+                      vhcr->in_modifier, 0,
+                      MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
+                      MLX4_CMD_NATIVE);
+       if (err) {
+               mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
+               goto err_mac;
+       }
+
+err_mac:
+       put_res(dev, slave, qpn, RES_QP);
+       return err;
+}
+
 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
                                         struct mlx4_vhcr *vhcr,
                                         struct mlx4_cmd_mailbox *inbox,
@@ -3900,7 +3977,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
        qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
        err = get_res(dev, slave, qpn, RES_QP, &rqp);
        if (err) {
-               pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
+               pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
                return err;
        }
        rule_header = (struct _rule_hw *)(ctrl + 1);
@@ -3918,7 +3995,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
        case MLX4_NET_TRANS_RULE_ID_IPV4:
        case MLX4_NET_TRANS_RULE_ID_TCP:
        case MLX4_NET_TRANS_RULE_ID_UDP:
-               pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
+               pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
                if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
                        err = -EINVAL;
                        goto err_put;
@@ -3927,7 +4004,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
                        sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
                break;
        default:
-               pr_err("Corrupted mailbox.\n");
+               pr_err("Corrupted mailbox\n");
                err = -EINVAL;
                goto err_put;
        }
@@ -3941,7 +4018,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
 
        err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
        if (err) {
-               mlx4_err(dev, "Fail to add flow steering resources.\n ");
+               mlx4_err(dev, "Fail to add flow steering resources\n");
                /* detach rule*/
                mlx4_cmd(dev, vhcr->out_param, 0, 0,
                         MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
@@ -3979,7 +4056,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
 
        err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
        if (err) {
-               mlx4_err(dev, "Fail to remove flow steering resources.\n ");
+               mlx4_err(dev, "Fail to remove flow steering resources\n");
                goto out;
        }
 
@@ -4108,8 +4185,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
 
        err = move_all_busy(dev, slave, RES_QP);
        if (err)
-               mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
-                         "for slave %d\n", slave);
+               mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
+                         slave);
 
        spin_lock_irq(mlx4_tlock(dev));
        list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
@@ -4147,10 +4224,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
                                                       MLX4_CMD_TIME_CLASS_A,
                                                       MLX4_CMD_NATIVE);
                                        if (err)
-                                               mlx4_dbg(dev, "rem_slave_qps: failed"
-                                                        " to move slave %d qpn %d to"
-                                                        " reset\n", slave,
-                                                        qp->local_qpn);
+                                               mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
+                                                        slave, qp->local_qpn);
                                        atomic_dec(&qp->rcq->ref_count);
                                        atomic_dec(&qp->scq->ref_count);
                                        atomic_dec(&qp->mtt->ref_count);
@@ -4184,8 +4259,8 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
 
        err = move_all_busy(dev, slave, RES_SRQ);
        if (err)
-               mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
-                         "busy for slave %d\n", slave);
+               mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
+                         slave);
 
        spin_lock_irq(mlx4_tlock(dev));
        list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
@@ -4215,9 +4290,7 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
                                                       MLX4_CMD_TIME_CLASS_A,
                                                       MLX4_CMD_NATIVE);
                                        if (err)
-                                               mlx4_dbg(dev, "rem_slave_srqs: failed"
-                                                        " to move slave %d srq %d to"
-                                                        " SW ownership\n",
+                                               mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
                                                         slave, srqn);
 
                                        atomic_dec(&srq->mtt->ref_count);
@@ -4252,8 +4325,8 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
 
        err = move_all_busy(dev, slave, RES_CQ);
        if (err)
-               mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
-                         "busy for slave %d\n", slave);
+               mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
+                         slave);
 
        spin_lock_irq(mlx4_tlock(dev));
        list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
@@ -4283,9 +4356,7 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
                                                       MLX4_CMD_TIME_CLASS_A,
                                                       MLX4_CMD_NATIVE);
                                        if (err)
-                                               mlx4_dbg(dev, "rem_slave_cqs: failed"
-                                                        " to move slave %d cq %d to"
-                                                        " SW ownership\n",
+                                               mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
                                                         slave, cqn);
                                        atomic_dec(&cq->mtt->ref_count);
                                        state = RES_CQ_ALLOCATED;
@@ -4317,8 +4388,8 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
 
        err = move_all_busy(dev, slave, RES_MPT);
        if (err)
-               mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
-                         "busy for slave %d\n", slave);
+               mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
+                         slave);
 
        spin_lock_irq(mlx4_tlock(dev));
        list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
@@ -4353,9 +4424,7 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
                                                     MLX4_CMD_TIME_CLASS_A,
                                                     MLX4_CMD_NATIVE);
                                        if (err)
-                                               mlx4_dbg(dev, "rem_slave_mrs: failed"
-                                                        " to move slave %d mpt %d to"
-                                                        " SW ownership\n",
+                                               mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
                                                         slave, mptn);
                                        if (mpt->mtt)
                                                atomic_dec(&mpt->mtt->ref_count);
@@ -4387,8 +4456,8 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
 
        err = move_all_busy(dev, slave, RES_MTT);
        if (err)
-               mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
-                         "busy for slave %d\n", slave);
+               mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts  - too busy for slave %d\n",
+                         slave);
 
        spin_lock_irq(mlx4_tlock(dev));
        list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
@@ -4490,8 +4559,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
 
        err = move_all_busy(dev, slave, RES_EQ);
        if (err)
-               mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
-                         "busy for slave %d\n", slave);
+               mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
+                         slave);
 
        spin_lock_irq(mlx4_tlock(dev));
        list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
@@ -4523,9 +4592,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
                                                           MLX4_CMD_TIME_CLASS_A,
                                                           MLX4_CMD_NATIVE);
                                        if (err)
-                                               mlx4_dbg(dev, "rem_slave_eqs: failed"
-                                                        " to move slave %d eqs %d to"
-                                                        " SW ownership\n", slave, eqn);
+                                               mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
+                                                        slave, eqn);
                                        mlx4_free_cmd_mailbox(dev, mailbox);
                                        atomic_dec(&eq->mtt->ref_count);
                                        state = RES_EQ_RESERVED;
@@ -4554,8 +4622,8 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
 
        err = move_all_busy(dev, slave, RES_COUNTER);
        if (err)
-               mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
-                         "busy for slave %d\n", slave);
+               mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
+                         slave);
 
        spin_lock_irq(mlx4_tlock(dev));
        list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
@@ -4585,8 +4653,8 @@ static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
 
        err = move_all_busy(dev, slave, RES_XRCD);
        if (err)
-               mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
-                         "busy for slave %d\n", slave);
+               mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
+                         slave);
 
        spin_lock_irq(mlx4_tlock(dev));
        list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
@@ -4731,10 +4799,8 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
                                       0, MLX4_CMD_UPDATE_QP,
                                       MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
                        if (err) {
-                               mlx4_info(dev, "UPDATE_QP failed for slave %d, "
-                                         "port %d, qpn %d (%d)\n",
-                                         work->slave, port, qp->local_qpn,
-                                         err);
+                               mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
+                                         work->slave, port, qp->local_qpn, err);
                                errors++;
                        }
                }
index 405c4fbcd0ad1cb56453938012ecb9035de20f23..87d1b018a9c394309a6ee78310640690146bf245 100644 (file)
@@ -620,8 +620,8 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
                               mlx5_command_str(msg_to_opcode(ent->in)),
                               msg_to_opcode(ent->in));
        }
-       mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", err,
-                     deliv_status_to_str(ent->status), ent->status);
+       mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
+                     err, deliv_status_to_str(ent->status), ent->status);
 
        return err;
 }
index 64a61b286b2c959fbb67c72dcc098199d74ff68d..7f39ebcd6ad01b3dc175ffd57b3239f9f7154a8a 100644 (file)
@@ -208,7 +208,8 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
                 */
                rmb();
 
-               mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n", eq->eqn, eqe_type_str(eqe->type));
+               mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
+                             eq->eqn, eqe_type_str(eqe->type));
                switch (eqe->type) {
                case MLX5_EVENT_TYPE_COMP:
                        cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
@@ -270,14 +271,16 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
                                u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
                                s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
 
-                               mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages);
+                               mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
+                                             func_id, npages);
                                mlx5_core_req_pages_handler(dev, func_id, npages);
                        }
                        break;
 
 
                default:
-                       mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", eqe->type, eq->eqn);
+                       mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
+                                      eqe->type, eq->eqn);
                        break;
                }
 
index c3eee5f70051e0855abf623ad0b1b10614d38b82..ee24f132e319988daaae5f870466bfc3599e3783 100644 (file)
@@ -66,10 +66,10 @@ static int set_dma_caps(struct pci_dev *pdev)
 
        err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
        if (err) {
-               dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
+               dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
                err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
                if (err) {
-                       dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
+                       dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
                        return err;
                }
        }
@@ -77,11 +77,11 @@ static int set_dma_caps(struct pci_dev *pdev)
        err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
        if (err) {
                dev_warn(&pdev->dev,
-                        "Warning: couldn't set 64-bit consistent PCI DMA mask.\n");
+                        "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
                if (err) {
                        dev_err(&pdev->dev,
-                               "Can't set consistent PCI DMA mask, aborting.\n");
+                               "Can't set consistent PCI DMA mask, aborting\n");
                        return err;
                }
        }
@@ -95,7 +95,7 @@ static int request_bar(struct pci_dev *pdev)
        int err = 0;
 
        if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
-               dev_err(&pdev->dev, "Missing registers BAR, aborting.\n");
+               dev_err(&pdev->dev, "Missing registers BAR, aborting\n");
                return -ENODEV;
        }
 
@@ -319,13 +319,13 @@ int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
 
        err = pci_enable_device(pdev);
        if (err) {
-               dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
+               dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
                goto err_dbg;
        }
 
        err = request_bar(pdev);
        if (err) {
-               dev_err(&pdev->dev, "error requesting BARs, aborting.\n");
+               dev_err(&pdev->dev, "error requesting BARs, aborting\n");
                goto err_disable;
        }
 
index 68b74e1ae1b016c9b7b9ce5866a466dd131dccac..f0c9f9a7a36142f1a7fded7a88120e1cff213aaa 100644 (file)
 
 extern int mlx5_core_debug_mask;
 
-#define mlx5_core_dbg(dev, format, arg...)                                    \
-pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__,   \
-        current->pid, ##arg)
+#define mlx5_core_dbg(dev, format, ...)                                        \
+       pr_debug("%s:%s:%d:(pid %d): " format,                          \
+                (dev)->priv.name, __func__, __LINE__, current->pid,    \
+                ##__VA_ARGS__)
 
-#define mlx5_core_dbg_mask(dev, mask, format, arg...)                         \
-do {                                                                          \
-       if ((mask) & mlx5_core_debug_mask)                                     \
-               pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name,       \
-                        __func__, __LINE__, current->pid, ##arg);             \
+#define mlx5_core_dbg_mask(dev, mask, format, ...)                     \
+do {                                                                   \
+       if ((mask) & mlx5_core_debug_mask)                              \
+               mlx5_core_dbg(dev, format, ##__VA_ARGS__);              \
 } while (0)
 
-#define mlx5_core_err(dev, format, arg...) \
-pr_err("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__,     \
-       current->pid, ##arg)
+#define mlx5_core_err(dev, format, ...)                                        \
+       pr_err("%s:%s:%d:(pid %d): " format,                            \
+              (dev)->priv.name, __func__, __LINE__, current->pid,      \
+              ##__VA_ARGS__)
 
-#define mlx5_core_warn(dev, format, arg...) \
-pr_warn("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__,    \
-       current->pid, ##arg)
+#define mlx5_core_warn(dev, format, ...)                               \
+       pr_warn("%s:%s:%d:(pid %d): " format,                           \
+               (dev)->priv.name, __func__, __LINE__, current->pid,     \
+               ##__VA_ARGS__)
 
 enum {
        MLX5_CMD_DATA, /* print command payload only */
index 4cc92764940477c4f9622fc0cbc4238a08d81283..0a11b3fe9c193a7923cb4dd1f2292460690bf337 100644 (file)
@@ -73,7 +73,7 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
        }
 
        if (err) {
-               mlx5_core_dbg(dev, "cmd exec faile %d\n", err);
+               mlx5_core_dbg(dev, "cmd exec failed %d\n", err);
                return err;
        }
 
@@ -191,7 +191,8 @@ int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
        }
 
        if (out.hdr.status) {
-               mlx5_core_err(dev, "create_psv bad status %d\n", out.hdr.status);
+               mlx5_core_err(dev, "create_psv bad status %d\n",
+                             out.hdr.status);
                return mlx5_cmd_status_to_err(&out.hdr);
        }
 
@@ -220,7 +221,8 @@ int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num)
        }
 
        if (out.hdr.status) {
-               mlx5_core_err(dev, "destroy_psv bad status %d\n", out.hdr.status);
+               mlx5_core_err(dev, "destroy_psv bad status %d\n",
+                             out.hdr.status);
                err = mlx5_cmd_status_to_err(&out.hdr);
                goto out;
        }
index d59790a82bc3d5c9f3def1e4a664a6531b93ad7d..c2a953ef0e675801827ac9bec57e1aaf399dbcca 100644 (file)
@@ -311,7 +311,8 @@ retry:
        in->num_entries = cpu_to_be32(npages);
        err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
        if (err) {
-               mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err);
+               mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
+                              func_id, npages, err);
                goto out_alloc;
        }
        dev->priv.fw_pages += npages;
@@ -319,7 +320,8 @@ retry:
        if (out.hdr.status) {
                err = mlx5_cmd_status_to_err(&out.hdr);
                if (err) {
-                       mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n", func_id, npages, out.hdr.status);
+                       mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
+                                      func_id, npages, out.hdr.status);
                        goto out_alloc;
                }
        }
@@ -378,7 +380,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
        mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
        err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
        if (err) {
-               mlx5_core_err(dev, "failed recliaming pages\n");
+               mlx5_core_err(dev, "failed reclaiming pages\n");
                goto out_free;
        }
        dev->priv.fw_pages -= npages;
@@ -414,8 +416,8 @@ static void pages_work_handler(struct work_struct *work)
                err = give_pages(dev, req->func_id, req->npages, 1);
 
        if (err)
-               mlx5_core_warn(dev, "%s fail %d\n", req->npages < 0 ?
-                              "reclaim" : "give", err);
+               mlx5_core_warn(dev, "%s fail %d\n",
+                              req->npages < 0 ? "reclaim" : "give", err);
 
        kfree(req);
 }
@@ -487,7 +489,8 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
                                            optimal_reclaimed_pages(),
                                            &nclaimed);
                        if (err) {
-                               mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err);
+                               mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
+                                              err);
                                return err;
                        }
                        if (nclaimed)
index 510576213dd0c8e823a6feecb87d427f275ca90e..8145b4668229d6a483e9b36fdc457387a6c6ab41 100644 (file)
@@ -79,7 +79,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
 
        err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
        if (err) {
-               mlx5_core_warn(dev, "ret %d", err);
+               mlx5_core_warn(dev, "ret %d\n", err);
                return err;
        }
 
@@ -96,7 +96,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
        err = radix_tree_insert(&table->tree, qp->qpn, qp);
        spin_unlock_irq(&table->lock);
        if (err) {
-               mlx5_core_warn(dev, "err %d", err);
+               mlx5_core_warn(dev, "err %d\n", err);
                goto err_cmd;
        }
 
index 16435b3cfa9f133a3fe937dc96670853873e7485..6c7c78baedcaf590f6f6c78f5155a1df2fa594e9 100644 (file)
@@ -1504,15 +1504,15 @@ ks8695_probe(struct platform_device *pdev)
        if (ksp->phyiface_regs && ksp->link_irq == -1) {
                ks8695_init_switch(ksp);
                ksp->dtype = KS8695_DTYPE_LAN;
-               SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
+               ndev->ethtool_ops = &ks8695_ethtool_ops;
        } else if (ksp->phyiface_regs && ksp->link_irq != -1) {
                ks8695_init_wan_phy(ksp);
                ksp->dtype = KS8695_DTYPE_WAN;
-               SET_ETHTOOL_OPS(ndev, &ks8695_wan_ethtool_ops);
+               ndev->ethtool_ops = &ks8695_wan_ethtool_ops;
        } else {
                /* No initialisation since HPNA does not have a PHY */
                ksp->dtype = KS8695_DTYPE_HPNA;
-               SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
+               ndev->ethtool_ops = &ks8695_ethtool_ops;
        }
 
        /* And bring up the net_device with the net core */
index e0c92e0e5e1d463f0242088d184394608b243cb6..e72918970a5892c377aeca8edf3aa877710f665b 100644 (file)
@@ -26,6 +26,8 @@
 #include <linux/regulator/consumer.h>
 
 #include <linux/spi/spi.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
 
 #include "ks8851.h"
 
@@ -85,6 +87,8 @@ union ks8851_tx_hdr {
  * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
  * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
  * @vdd_reg:   Optional regulator supplying the chip
+ * @vdd_io: Optional digital power supply for IO
+ * @gpio: Optional reset_n gpio
  *
  * The @lock ensures that the chip is protected when certain operations are
  * in progress. When the read or write packet transfer is in progress, most
@@ -133,6 +137,8 @@ struct ks8851_net {
 
        struct eeprom_93cx6     eeprom;
        struct regulator        *vdd_reg;
+       struct regulator        *vdd_io;
+       int                     gpio;
 };
 
 static int msg_enable;
@@ -1404,6 +1410,7 @@ static int ks8851_probe(struct spi_device *spi)
        struct ks8851_net *ks;
        int ret;
        unsigned cider;
+       int gpio;
 
        ndev = alloc_etherdev(sizeof(struct ks8851_net));
        if (!ndev)
@@ -1417,7 +1424,38 @@ static int ks8851_probe(struct spi_device *spi)
        ks->spidev = spi;
        ks->tx_space = 6144;
 
-       ks->vdd_reg = regulator_get_optional(&spi->dev, "vdd");
+       gpio = of_get_named_gpio_flags(spi->dev.of_node, "reset-gpios",
+                                      0, NULL);
+       if (gpio == -EPROBE_DEFER) {
+               ret = gpio;
+               goto err_gpio;
+       }
+
+       ks->gpio = gpio;
+       if (gpio_is_valid(gpio)) {
+               ret = devm_gpio_request_one(&spi->dev, gpio,
+                                           GPIOF_OUT_INIT_LOW, "ks8851_rst_n");
+               if (ret) {
+                       dev_err(&spi->dev, "reset gpio request failed\n");
+                       goto err_gpio;
+               }
+       }
+
+       ks->vdd_io = devm_regulator_get_optional(&spi->dev, "vdd-io");
+       if (IS_ERR(ks->vdd_io)) {
+               ret = PTR_ERR(ks->vdd_io);
+               if (ret == -EPROBE_DEFER)
+                       goto err_reg_io;
+       } else {
+               ret = regulator_enable(ks->vdd_io);
+               if (ret) {
+                       dev_err(&spi->dev, "regulator vdd_io enable fail: %d\n",
+                               ret);
+                       goto err_reg_io;
+               }
+       }
+
+       ks->vdd_reg = devm_regulator_get_optional(&spi->dev, "vdd");
        if (IS_ERR(ks->vdd_reg)) {
                ret = PTR_ERR(ks->vdd_reg);
                if (ret == -EPROBE_DEFER)
@@ -1425,12 +1463,16 @@ static int ks8851_probe(struct spi_device *spi)
        } else {
                ret = regulator_enable(ks->vdd_reg);
                if (ret) {
-                       dev_err(&spi->dev, "regulator enable fail: %d\n",
+                       dev_err(&spi->dev, "regulator vdd enable fail: %d\n",
                                ret);
-                       goto err_reg_en;
+                       goto err_reg;
                }
        }
 
+       if (gpio_is_valid(gpio)) {
+               usleep_range(10000, 11000);
+               gpio_set_value(gpio, 1);
+       }
 
        mutex_init(&ks->lock);
        spin_lock_init(&ks->statelock);
@@ -1471,7 +1513,7 @@ static int ks8851_probe(struct spi_device *spi)
 
        skb_queue_head_init(&ks->txq);
 
-       SET_ETHTOOL_OPS(ndev, &ks8851_ethtool_ops);
+       ndev->ethtool_ops = &ks8851_ethtool_ops;
        SET_NETDEV_DEV(ndev, &spi->dev);
 
        spi_set_drvdata(spi, ks);
@@ -1527,13 +1569,16 @@ err_netdev:
        free_irq(ndev->irq, ks);
 
 err_irq:
+       if (gpio_is_valid(gpio))
+               gpio_set_value(gpio, 0);
 err_id:
        if (!IS_ERR(ks->vdd_reg))
                regulator_disable(ks->vdd_reg);
-err_reg_en:
-       if (!IS_ERR(ks->vdd_reg))
-               regulator_put(ks->vdd_reg);
 err_reg:
+       if (!IS_ERR(ks->vdd_io))
+               regulator_disable(ks->vdd_io);
+err_reg_io:
+err_gpio:
        free_netdev(ndev);
        return ret;
 }
@@ -1547,18 +1592,26 @@ static int ks8851_remove(struct spi_device *spi)
 
        unregister_netdev(priv->netdev);
        free_irq(spi->irq, priv);
-       if (!IS_ERR(priv->vdd_reg)) {
+       if (gpio_is_valid(priv->gpio))
+               gpio_set_value(priv->gpio, 0);
+       if (!IS_ERR(priv->vdd_reg))
                regulator_disable(priv->vdd_reg);
-               regulator_put(priv->vdd_reg);
-       }
+       if (!IS_ERR(priv->vdd_io))
+               regulator_disable(priv->vdd_io);
        free_netdev(priv->netdev);
 
        return 0;
 }
 
+static const struct of_device_id ks8851_match_table[] = {
+       { .compatible = "micrel,ks8851" },
+       { }
+};
+
 static struct spi_driver ks8851_driver = {
        .driver = {
                .name = "ks8851",
+               .of_match_table = ks8851_match_table,
                .owner = THIS_MODULE,
                .pm = &ks8851_pm_ops,
        },
index 14ac0e2bc09fcbc50f65ceead949ecd7d15d6130..064a48d0c368a267826e2f77bacb2da9fa366e1e 100644 (file)
@@ -4930,7 +4930,7 @@ static void netdev_tx_timeout(struct net_device *dev)
                 * Only reset the hardware if time between calls is long
                 * enough.
                 */
-               if (jiffies - last_reset <= dev->watchdog_timeo)
+               if (time_before_eq(jiffies, last_reset + dev->watchdog_timeo))
                        hw_priv = NULL;
        }
 
@@ -7072,6 +7072,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
                dev = alloc_etherdev(sizeof(struct dev_priv));
                if (!dev)
                        goto pcidev_init_reg_err;
+               SET_NETDEV_DEV(dev, &pdev->dev);
                info->netdev[i] = dev;
 
                priv = netdev_priv(dev);
@@ -7106,7 +7107,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
                }
 
                dev->netdev_ops = &netdev_ops;
-               SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+               dev->ethtool_ops = &netdev_ethtool_ops;
                if (register_netdev(dev))
                        goto pcidev_init_reg_err;
                port_set_power_saving(port, true);
index c7b40aa21f22fe2a909754fe4e2a9a7b8bad5898..b1b5f66b8b6910ad2dc38c2157d8b12aba1668c1 100644 (file)
@@ -1593,7 +1593,7 @@ static int enc28j60_probe(struct spi_device *spi)
        dev->irq = spi->irq;
        dev->netdev_ops = &enc28j60_netdev_ops;
        dev->watchdog_timeo = TX_TIMEOUT;
-       SET_ETHTOOL_OPS(dev, &enc28j60_ethtool_ops);
+       dev->ethtool_ops = &enc28j60_ethtool_ops;
 
        enc28j60_lowpower(priv, true);
 
index 130f6b204efa29cb9c97c98b4e3b0f52b569cd35..f3d5d79f1cd15de8dff66fa4aeab6fccaa25ab8e 100644 (file)
@@ -4112,7 +4112,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer,
                    (unsigned long)mgp);
 
-       SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops);
+       netdev->ethtool_ops = &myri10ge_ethtool_ops;
        INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
        status = register_netdev(netdev);
        if (status != 0) {
index 64ec2a437f46a3280e9377c55d8226a19f82d089..291fba8b9f07351effff8ebbc9b229fc40c0e885 100644 (file)
@@ -927,7 +927,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
        dev->netdev_ops = &natsemi_netdev_ops;
        dev->watchdog_timeo = TX_TIMEOUT;
 
-       SET_ETHTOOL_OPS(dev, &ethtool_ops);
+       dev->ethtool_ops = &ethtool_ops;
 
        if (mtu)
                dev->mtu = mtu;
index dbccf1de49ecbf7011a167585caefdeb082fa4de..19bb8244b9e3e1056a2835bf2c2434f1e6ae65a3 100644 (file)
@@ -2030,7 +2030,7 @@ static int ns83820_init_one(struct pci_dev *pci_dev,
                pci_dev->subsystem_vendor, pci_dev->subsystem_device);
 
        ndev->netdev_ops = &netdev_ops;
-       SET_ETHTOOL_OPS(ndev, &ops);
+       ndev->ethtool_ops = &ops;
        ndev->watchdog_timeo = 5 * HZ;
        pci_set_drvdata(pci_dev, ndev);
 
index e900c1abdef789d234de4ad4eb1a41e0b090a358..e3cf38e6ce3c11deb08676d0ee58ec4bf62412c7 100644 (file)
@@ -7910,7 +7910,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
 
        /*  Driver entry points */
        dev->netdev_ops = &s2io_netdev_ops;
-       SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+       dev->ethtool_ops = &netdev_ethtool_ops;
        dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
                NETIF_F_TSO | NETIF_F_TSO6 |
                NETIF_F_RXCSUM | NETIF_F_LRO;
index f8f073880f84bccd5f0daedb8c1f08233c43e6c8..ddcc81ad1ae1f3c2167f03859a436a8fc3938dc9 100644 (file)
@@ -1128,5 +1128,5 @@ static const struct ethtool_ops vxge_ethtool_ops = {
 
 void vxge_initialize_ethtool_ops(struct net_device *ndev)
 {
-       SET_ETHTOOL_OPS(ndev, &vxge_ethtool_ops);
+       ndev->ethtool_ops = &vxge_ethtool_ops;
 }
index d107bcbb8543035110a98a82a21a7e72c8ec1303..7a0deadd53bf14743e4c530b895c8658e59c8be2 100644 (file)
@@ -2122,7 +2122,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
 static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
 {
        fifo->interrupt_count++;
-       if (jiffies > fifo->jiffies + HZ / 100) {
+       if (time_before(fifo->jiffies + HZ / 100, jiffies)) {
                struct __vxge_hw_fifo *hw_fifo = fifo->handle;
 
                fifo->jiffies = jiffies;
@@ -2150,7 +2150,7 @@ static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
 static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
 {
        ring->interrupt_count++;
-       if (jiffies > ring->jiffies + HZ / 100) {
+       if (time_before(ring->jiffies + HZ / 100, jiffies)) {
                struct __vxge_hw_ring *hw_ring = ring->handle;
 
                ring->jiffies = jiffies;
index fddb464aeab3a517c362d12ad4891eb3e2529cae..e8235c5c5e696cc17226a0878e376753967e837e 100644 (file)
@@ -5766,7 +5766,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
                dev->netdev_ops = &nv_netdev_ops_optimized;
 
        netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
-       SET_ETHTOOL_OPS(dev, &ops);
+       dev->ethtool_ops = &ops;
        dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
 
        pci_set_drvdata(pci_dev, dev);
index a588ffde970041def37cae92b215011d88b6eea6..44c8be1c68051ec9a9b79f4ba481060022613e79 100644 (file)
@@ -4,7 +4,7 @@
 
 config PCH_GBE
        tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"
-       depends on PCI && (X86 || COMPILE_TEST)
+       depends on PCI && (X86_32 || COMPILE_TEST)
        select MII
        select PTP_1588_CLOCK_PCH
        ---help---
index 826f0ccdc23c818139d3951c953b56a3b885d6e8..114d2fe52cc2d900bc469825d28b3e3d380ceeed 100644 (file)
@@ -508,5 +508,5 @@ static const struct ethtool_ops pch_gbe_ethtool_ops = {
 
 void pch_gbe_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &pch_gbe_ethtool_ops);
+       netdev->ethtool_ops = &pch_gbe_ethtool_ops;
 }
index b6bdeb3c19711ac960646dfdbeef2207b86f366d..9a997e4c3e084a16368ebcae6ed53d42a0c0524c 100644 (file)
@@ -724,10 +724,8 @@ static int hamachi_init_one(struct pci_dev *pdev,
 
        /* The Hamachi-specific entries in the device structure. */
        dev->netdev_ops = &hamachi_netdev_ops;
-       if (chip_tbl[hmp->chip_id].flags & CanHaveMII)
-               SET_ETHTOOL_OPS(dev, &ethtool_ops);
-       else
-               SET_ETHTOOL_OPS(dev, &ethtool_ops_no_mii);
+       dev->ethtool_ops = (chip_tbl[hmp->chip_id].flags & CanHaveMII) ?
+               &ethtool_ops : &ethtool_ops_no_mii;
        dev->watchdog_timeo = TX_TIMEOUT;
        if (mtu)
                dev->mtu = mtu;
index 9a6cb482dcd0b8bf5e26e3e2c784f57b032f5236..69a8dc0950720b7f57a83483d1cf86f0f4f9f35d 100644 (file)
@@ -472,7 +472,7 @@ static int yellowfin_init_one(struct pci_dev *pdev,
 
        /* The Yellowfin-specific entries in the device structure. */
        dev->netdev_ops = &netdev_ops;
-       SET_ETHTOOL_OPS(dev, &ethtool_ops);
+       dev->ethtool_ops = &ethtool_ops;
        dev->watchdog_timeo = TX_TIMEOUT;
 
        if (mtu)
index c14bd3116e454edad88d27f7ab8923cb999ec885..d49cba1290814ecc175d925fca6ae2dd9227cded 100644 (file)
@@ -66,6 +66,17 @@ config QLCNIC_VXLAN
          Say Y here if you want to enable hardware offload support for
          Virtual eXtensible Local Area Network (VXLAN) in the driver.
 
+config QLCNIC_HWMON
+       bool "QLOGIC QLCNIC 82XX and 83XX family HWMON support"
+       depends on QLCNIC && HWMON && !(QLCNIC=y && HWMON=m)
+       default y
+       ---help---
+         This configuration parameter can be used to read the
+         board temperature in Converged Ethernet devices
+         supported by qlcnic.
+
+         This data is available via the hwmon sysfs interface.
+
 config QLGE
        tristate "QLogic QLGE 10Gb Ethernet Driver Support"
        depends on PCI
index f09c35d669b3ec7d8898f96f0ffd9ce7362ddb13..5bf05818a12cfa707d138c420908546daaa502a5 100644 (file)
@@ -1373,7 +1373,7 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
 
        netxen_nic_change_mtu(netdev, netdev->mtu);
 
-       SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
+       netdev->ethtool_ops = &netxen_nic_ethtool_ops;
 
        netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
                              NETIF_F_RXCSUM;
index 2eabd44f8914de91ac4eaebbed17d4a08c66c96f..b5d6bc1a8b0024770c919e6ccf1f49a0cc6da1a8 100644 (file)
@@ -3838,7 +3838,7 @@ static int ql3xxx_probe(struct pci_dev *pdev,
 
        /* Set driver entry points */
        ndev->netdev_ops = &ql3xxx_netdev_ops;
-       SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
+       ndev->ethtool_ops = &ql3xxx_ethtool_ops;
        ndev->watchdog_timeo = 5 * HZ;
 
        netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
index 7b52a88923ef2e53fadf9aca0185e2af492aa7be..41abe6070466602245ece9966c1bafa87a529160 100644 (file)
@@ -39,8 +39,8 @@
 
 #define _QLCNIC_LINUX_MAJOR 5
 #define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 57
-#define QLCNIC_LINUX_VERSIONID  "5.3.57"
+#define _QLCNIC_LINUX_SUBVERSION 59
+#define QLCNIC_LINUX_VERSIONID  "5.3.59"
 #define QLCNIC_DRV_IDC_VER  0x01
 #define QLCNIC_DRIVER_VERSION  ((_QLCNIC_LINUX_MAJOR << 16) |\
                 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -537,6 +537,7 @@ struct qlcnic_hardware_context {
        u8 phys_port_id[ETH_ALEN];
        u8 lb_mode;
        u16 vxlan_port;
+       struct device *hwmon_dev;
 };
 
 struct qlcnic_adapter_stats {
@@ -1018,6 +1019,8 @@ struct qlcnic_ipaddr {
 #define QLCNIC_DEL_VXLAN_PORT          0x200000
 #endif
 
+#define QLCNIC_VLAN_FILTERING          0x800000
+
 #define QLCNIC_IS_MSI_FAMILY(adapter) \
        ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
 #define QLCNIC_IS_TSO_CAPABLE(adapter)  \
@@ -1316,6 +1319,7 @@ struct qlcnic_eswitch {
 #define QL_STATUS_INVALID_PARAM        -1
 
 #define MAX_BW                 100     /* % of link speed */
+#define MIN_BW                 1       /* % of link speed */
 #define MAX_VLAN_ID            4095
 #define MIN_VLAN_ID            2
 #define DEFAULT_MAC_LEARN      1
@@ -1692,7 +1696,7 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *);
 int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int);
 void qlcnic_set_netdev_features(struct qlcnic_adapter *,
                                struct qlcnic_esw_func_cfg *);
-void qlcnic_sriov_vf_schedule_multi(struct net_device *);
+void qlcnic_sriov_vf_set_multi(struct net_device *);
 int qlcnic_is_valid_nic_func(struct qlcnic_adapter *, u8);
 int qlcnic_get_pci_func_type(struct qlcnic_adapter *, u16, u16 *, u16 *,
                             u16 *);
@@ -1719,22 +1723,6 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
                                tx_ring->producer;
 }
 
-static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
-                                            struct net_device *netdev)
-{
-       int err;
-
-       netdev->num_tx_queues = adapter->drv_tx_rings;
-       netdev->real_num_tx_queues = adapter->drv_tx_rings;
-
-       err = netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
-       if (err)
-               netdev_err(netdev, "failed to set %d Tx queues\n",
-                          adapter->drv_tx_rings);
-
-       return err;
-}
-
 struct qlcnic_nic_template {
        int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
        int (*config_led) (struct qlcnic_adapter *, u32, u32);
@@ -2354,6 +2342,16 @@ static inline bool qlcnic_83xx_vf_check(struct qlcnic_adapter *adapter)
        return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
 }
 
+static inline bool qlcnic_sriov_check(struct qlcnic_adapter *adapter)
+{
+       bool status;
+
+       status = (qlcnic_sriov_pf_check(adapter) ||
+                 qlcnic_sriov_vf_check(adapter)) ? true : false;
+
+       return status;
+}
+
 static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
 {
        if (qlcnic_84xx_check(adapter))
@@ -2361,4 +2359,18 @@ static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
        else
                return QLC_DEFAULT_VNIC_COUNT;
 }
+
+#ifdef CONFIG_QLCNIC_HWMON
+void qlcnic_register_hwmon_dev(struct qlcnic_adapter *);
+void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *);
+#else
+static inline void qlcnic_register_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+       return;
+}
+static inline void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+       return;
+}
+#endif
 #endif                         /* __QLCNIC_H_ */
index b7cffb46a75dbd8f215752218a6f1f4239c1cc98..a4a4ec0b68f8d5e9d7b0c6f3ed5050b5787a37c4 100644 (file)
@@ -33,6 +33,7 @@ static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *);
 #define RSS_HASHTYPE_IP_TCP            0x3
 #define QLC_83XX_FW_MBX_CMD            0
 #define QLC_SKIP_INACTIVE_PCI_REGS     7
+#define QLC_MAX_LEGACY_FUNC_SUPP       8
 
 static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
        {QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1},
@@ -357,8 +358,15 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
        if (!ahw->intr_tbl)
                return -ENOMEM;
 
-       if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
+       if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+               if (adapter->ahw->pci_func >= QLC_MAX_LEGACY_FUNC_SUPP) {
+                       dev_err(&adapter->pdev->dev, "PCI function number 8 and higher are not supported with legacy interrupt, func 0x%x\n",
+                               ahw->pci_func);
+                       return -EOPNOTSUPP;
+               }
+
                qlcnic_83xx_enable_legacy(adapter);
+       }
 
        for (i = 0; i < num_msix; i++) {
                if (adapter->flags & QLCNIC_MSIX_ENABLED)
@@ -879,6 +887,9 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
                        return 0;
                }
        }
+
+       dev_err(&adapter->pdev->dev, "%s: Invalid mailbox command opcode 0x%x\n",
+               __func__, type);
        return -EINVAL;
 }
 
@@ -3026,19 +3037,18 @@ void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *adapter)
        QLCRDX(adapter->ahw, QLC_83XX_DRV_UNLOCK);
 }
 
-int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
+int qlcnic_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
                                u32 *data, u32 count)
 {
        int i, j, ret = 0;
        u32 temp;
-       int err = 0;
 
        /* Check alignment */
        if (addr & 0xF)
                return -EIO;
 
        mutex_lock(&adapter->ahw->mem_lock);
-       qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_ADDR_HI, 0);
+       qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0);
 
        for (i = 0; i < count; i++, addr += 16) {
                if (!((ADDR_IN_RANGE(addr, QLCNIC_ADDR_QDR_NET,
@@ -3049,26 +3059,16 @@ int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
                        return -EIO;
                }
 
-               qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_ADDR_LO, addr);
-               qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_LO,
-                                            *data++);
-               qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_HI,
-                                            *data++);
-               qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_ULO,
-                                            *data++);
-               qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_UHI,
-                                            *data++);
-               qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_CTRL,
-                                            QLCNIC_TA_WRITE_ENABLE);
-               qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_CTRL,
-                                            QLCNIC_TA_WRITE_START);
+               qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr);
+               qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_LO, *data++);
+               qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_HI, *data++);
+               qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_ULO, *data++);
+               qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_UHI, *data++);
+               qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_WRITE_ENABLE);
+               qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_WRITE_START);
 
                for (j = 0; j < MAX_CTL_CHECK; j++) {
-                       temp = QLCRD32(adapter, QLCNIC_MS_CTRL, &err);
-                       if (err == -EIO) {
-                               mutex_unlock(&adapter->ahw->mem_lock);
-                               return err;
-                       }
+                       temp = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL);
 
                        if ((temp & TA_CTL_BUSY) == 0)
                                break;
index 88d809c356334675026fb1a71e37107ded60c709..97784d09933f017f20a92510f3db969c0cfd746a 100644 (file)
@@ -560,7 +560,7 @@ void qlcnic_83xx_napi_del(struct qlcnic_adapter *);
 void qlcnic_83xx_napi_enable(struct qlcnic_adapter *);
 void qlcnic_83xx_napi_disable(struct qlcnic_adapter *);
 int qlcnic_83xx_config_led(struct qlcnic_adapter *, u32, u32);
-void qlcnic_ind_wr(struct qlcnic_adapter *, u32, u32);
+int qlcnic_ind_wr(struct qlcnic_adapter *, u32, u32);
 int qlcnic_ind_rd(struct qlcnic_adapter *, u32);
 int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *);
 int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *,
@@ -617,7 +617,6 @@ void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *, u32);
 int qlcnic_83xx_lock_driver(struct qlcnic_adapter *);
 void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *);
 int qlcnic_83xx_set_default_offload_settings(struct qlcnic_adapter *);
-int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *, u64, u32 *, u32);
 int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *);
 int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *, int);
 int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
@@ -659,4 +658,5 @@ void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *);
 u32 qlcnic_83xx_get_cap_size(void *, int);
 void qlcnic_83xx_set_sys_info(void *, int, u32);
 void qlcnic_83xx_store_cap_mask(void *, u32);
+int qlcnic_ms_mem_write128(struct qlcnic_adapter *, u64, u32 *, u32);
 #endif
index ba20c721ee97f59d05f18a126471cb4a4a277f0b..f33559b725283cf69b08e80a8179fb488b89acb2 100644 (file)
@@ -1363,8 +1363,8 @@ static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter)
                return ret;
        }
        /* 16 byte write to MS memory */
-       ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache,
-                                         size / 16);
+       ret = qlcnic_ms_mem_write128(adapter, dest, (u32 *)p_cache,
+                                    size / 16);
        if (ret) {
                vfree(p_cache);
                return ret;
@@ -1389,8 +1389,8 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
        p_cache = (u32 *)fw->data;
        addr = (u64)dest;
 
-       ret = qlcnic_83xx_ms_mem_write128(adapter, addr,
-                                         p_cache, size / 16);
+       ret = qlcnic_ms_mem_write128(adapter, addr,
+                                    p_cache, size / 16);
        if (ret) {
                dev_err(&adapter->pdev->dev, "MS memory write failed\n");
                release_firmware(fw);
@@ -1405,8 +1405,8 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
                        data[i] = fw->data[size + i];
                for (; i < 16; i++)
                        data[i] = 0;
-               ret = qlcnic_83xx_ms_mem_write128(adapter, addr,
-                                                 (u32 *)data, 1);
+               ret = qlcnic_ms_mem_write128(adapter, addr,
+                                            (u32 *)data, 1);
                if (ret) {
                        dev_err(&adapter->pdev->dev,
                                "MS memory write failed\n");
@@ -2181,6 +2181,8 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
                max_sds_rings = QLCNIC_MAX_SDS_RINGS;
                max_tx_rings = QLCNIC_MAX_TX_RINGS;
        } else {
+               dev_err(&adapter->pdev->dev, "%s: Invalid opmode %d\n",
+                       __func__, ret);
                return -EIO;
        }
 
index c1e11f5715b056c0e90ba096de8f397eb50fce97..304e247bdf339c59b30c816839da1bb0ccb9a6ac 100644 (file)
@@ -1027,8 +1027,11 @@ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
        u32 arg1;
 
        if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC ||
-           !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE))
+           !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) {
+               dev_err(&adapter->pdev->dev, "%s: Not a management function\n",
+                       __func__);
                return err;
+       }
 
        arg1 = id | (enable_mirroring ? BIT_4 : 0);
        arg1 |= pci_func << 8;
@@ -1318,8 +1321,12 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
        u32 arg1, arg2 = 0;
        u8 pci_func;
 
-       if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+       if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
+               dev_err(&adapter->pdev->dev, "%s: Not a management function\n",
+                       __func__);
                return err;
+       }
+
        pci_func = esw_cfg->pci_func;
        index = qlcnic_is_valid_nic_func(adapter, pci_func);
        if (index < 0)
@@ -1363,6 +1370,8 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
                        arg1 &= ~(0x0ffff << 16);
                        break;
        default:
+               dev_err(&adapter->pdev->dev, "%s: Invalid opmode 0x%x\n",
+                       __func__, esw_cfg->op_mode);
                return err;
        }
 
index 9f3adf4e70b5f31a2d143c8d946ce6d88201a096..851cb4a80d50a6d4b2733ac2693fd799cca37885 100644 (file)
@@ -373,12 +373,16 @@ int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr)
        return data;
 }
 
-void qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data)
+int qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data)
 {
+       int ret = 0;
+
        if (qlcnic_82xx_check(adapter))
                qlcnic_write_window_reg(addr, adapter->ahw->pci_base0, data);
        else
-               qlcnic_83xx_wrt_reg_indirect(adapter, addr, data);
+               ret = qlcnic_83xx_wrt_reg_indirect(adapter, addr, data);
+
+       return ret;
 }
 
 static int
@@ -567,28 +571,14 @@ static void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
 void qlcnic_set_multi(struct net_device *netdev)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
-       struct qlcnic_mac_vlan_list *cur;
-       struct netdev_hw_addr *ha;
-       size_t temp;
 
        if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
                return;
-       if (qlcnic_sriov_vf_check(adapter)) {
-               if (!netdev_mc_empty(netdev)) {
-                       netdev_for_each_mc_addr(ha, netdev) {
-                               temp = sizeof(struct qlcnic_mac_vlan_list);
-                               cur = kzalloc(temp, GFP_ATOMIC);
-                               if (cur == NULL)
-                                       break;
-                               memcpy(cur->mac_addr,
-                                      ha->addr, ETH_ALEN);
-                               list_add_tail(&cur->list, &adapter->vf_mc_list);
-                       }
-               }
-               qlcnic_sriov_vf_schedule_multi(adapter->netdev);
-               return;
-       }
-       __qlcnic_set_multi(netdev, 0);
+
+       if (qlcnic_sriov_vf_check(adapter))
+               qlcnic_sriov_vf_set_multi(netdev);
+       else
+               __qlcnic_set_multi(netdev, 0);
 }
 
 int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
@@ -630,7 +620,7 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
        struct hlist_node *n;
        struct hlist_head *head;
        int i;
-       unsigned long time;
+       unsigned long expires;
        u8 cmd;
 
        for (i = 0; i < adapter->fhash.fbucket_size; i++) {
@@ -638,8 +628,8 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
                hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
                        cmd =  tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
                                                  QLCNIC_MAC_DEL;
-                       time = tmp_fil->ftime;
-                       if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) {
+                       expires = tmp_fil->ftime + QLCNIC_FILTER_AGE * HZ;
+                       if (time_before(expires, jiffies)) {
                                qlcnic_sre_macaddr_change(adapter,
                                                          tmp_fil->faddr,
                                                          tmp_fil->vlan_id,
@@ -657,8 +647,8 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
 
                hlist_for_each_entry_safe(tmp_fil, n, head, fnode)
                {
-                       time = tmp_fil->ftime;
-                       if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) {
+                       expires = tmp_fil->ftime + QLCNIC_FILTER_AGE * HZ;
+                       if (time_before(expires, jiffies)) {
                                spin_lock_bh(&adapter->rx_mac_learn_lock);
                                adapter->rx_fhash.fnum--;
                                hlist_del(&tmp_fil->fnode);
index 173b3d12991f55a62751d5e6a213d20ee02c3174..e45bf09af0c9fe4dbe9cdc629370af792ab88c01 100644 (file)
@@ -305,7 +305,6 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
 {
        struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
        struct ethhdr *phdr = (struct ethhdr *)(skb->data);
-       struct net_device *netdev = adapter->netdev;
        u16 protocol = ntohs(skb->protocol);
        struct qlcnic_filter *fil, *tmp_fil;
        struct hlist_head *head;
@@ -314,27 +313,16 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
        u16 vlan_id = 0;
        u8 hindex, hval;
 
-       if (!qlcnic_sriov_pf_check(adapter)) {
-               if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
-                       return;
-       } else {
+       if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
+               return;
+
+       if (adapter->flags & QLCNIC_VLAN_FILTERING) {
                if (protocol == ETH_P_8021Q) {
                        vh = (struct vlan_ethhdr *)skb->data;
                        vlan_id = ntohs(vh->h_vlan_TCI);
                } else if (vlan_tx_tag_present(skb)) {
                        vlan_id = vlan_tx_tag_get(skb);
                }
-
-               if (ether_addr_equal(phdr->h_source, adapter->mac_addr) &&
-                   !vlan_id)
-                       return;
-       }
-
-       if (adapter->fhash.fnum >= adapter->fhash.fmax) {
-               adapter->stats.mac_filter_limit_overrun++;
-               netdev_info(netdev, "Can not add more than %d mac-vlan filters, configured %d\n",
-                           adapter->fhash.fmax, adapter->fhash.fnum);
-               return;
        }
 
        memcpy(&src_addr, phdr->h_source, ETH_ALEN);
@@ -353,6 +341,11 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
                }
        }
 
+       if (unlikely(adapter->fhash.fnum >= adapter->fhash.fmax)) {
+               adapter->stats.mac_filter_limit_overrun++;
+               return;
+       }
+
        fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
        if (!fil)
                return;
@@ -1216,8 +1209,7 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
        if (!skb)
                return buffer;
 
-       if (adapter->drv_mac_learn &&
-           (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+       if (adapter->rx_mac_learn) {
                t_vid = 0;
                is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
                qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
@@ -1293,8 +1285,7 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
        if (!skb)
                return buffer;
 
-       if (adapter->drv_mac_learn &&
-           (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+       if (adapter->rx_mac_learn) {
                t_vid = 0;
                is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
                qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
index dbf75393f758a153ccecfe2a8e49edb13dc8ff9a..f06ba90b428233586bb846f6962ec210e00501d8 100644 (file)
@@ -378,7 +378,8 @@ static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
        if (!adapter->fdb_mac_learn)
                return ndo_dflt_fdb_del(ndm, tb, netdev, addr);
 
-       if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
+       if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+           qlcnic_sriov_check(adapter)) {
                if (is_unicast_ether_addr(addr)) {
                        err = dev_uc_del(netdev, addr);
                        if (!err)
@@ -402,7 +403,8 @@ static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
        if (!adapter->fdb_mac_learn)
                return ndo_dflt_fdb_add(ndm, tb, netdev, addr, flags);
 
-       if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+       if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) &&
+           !qlcnic_sriov_check(adapter)) {
                pr_info("%s: FDB e-switch is not enabled\n", __func__);
                return -EOPNOTSUPP;
        }
@@ -432,7 +434,8 @@ static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb,
        if (!adapter->fdb_mac_learn)
                return ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
 
-       if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+       if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+           qlcnic_sriov_check(adapter))
                idx = ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
 
        return idx;
@@ -522,7 +525,7 @@ static const struct net_device_ops qlcnic_netdev_ops = {
 #endif
 #ifdef CONFIG_QLCNIC_SRIOV
        .ndo_set_vf_mac         = qlcnic_sriov_set_vf_mac,
-       .ndo_set_vf_tx_rate     = qlcnic_sriov_set_vf_tx_rate,
+       .ndo_set_vf_rate        = qlcnic_sriov_set_vf_tx_rate,
        .ndo_get_vf_config      = qlcnic_sriov_get_vf_config,
        .ndo_set_vf_vlan        = qlcnic_sriov_set_vf_vlan,
        .ndo_set_vf_spoofchk    = qlcnic_sriov_set_vf_spoofchk,
@@ -690,10 +693,10 @@ int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter)
                adapter->msix_entries[vector].entry = vector;
 
 restore:
-       err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
-       if (err > 0) {
+       err = pci_enable_msix_exact(pdev, adapter->msix_entries, num_msix);
+       if (err == -ENOSPC) {
                if (!adapter->drv_tss_rings && !adapter->drv_rss_rings)
-                       return -ENOSPC;
+                       return err;
 
                netdev_info(adapter->netdev,
                            "Unable to allocate %d MSI-X vectors, Available vectors %d\n",
@@ -1014,6 +1017,8 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
 
                if (pfn >= ahw->max_vnic_func) {
                        ret = QL_STATUS_INVALID_PARAM;
+                       dev_err(&adapter->pdev->dev, "%s: Invalid function 0x%x, max 0x%x\n",
+                               __func__, pfn, ahw->max_vnic_func);
                        goto err_eswitch;
                }
 
@@ -1915,8 +1920,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
        if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
                return;
 
-       if (qlcnic_sriov_vf_check(adapter))
-               qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
        smp_mb();
        netif_carrier_off(netdev);
        adapter->ahw->linkup = 0;
@@ -1928,6 +1931,8 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
                qlcnic_delete_lb_filters(adapter);
 
        qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
+       if (qlcnic_sriov_vf_check(adapter))
+               qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
 
        qlcnic_napi_disable(adapter);
 
@@ -2052,6 +2057,7 @@ out:
 
 static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
 {
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
        int err = 0;
 
        adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
@@ -2061,6 +2067,18 @@ static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
                goto err_out;
        }
 
+       if (qlcnic_83xx_check(adapter)) {
+               ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX_TX;
+               ahw->coal.tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
+               ahw->coal.tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
+               ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+               ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+       } else {
+               ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX;
+               ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+               ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+       }
+
        /* clear stats */
        memset(&adapter->stats, 0, sizeof(adapter->stats));
 err_out:
@@ -2206,6 +2224,31 @@ static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
        ahw->max_uc_count = count;
 }
 
+static int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
+                                     u8 tx_queues, u8 rx_queues)
+{
+       struct net_device *netdev = adapter->netdev;
+       int err = 0;
+
+       if (tx_queues) {
+               err = netif_set_real_num_tx_queues(netdev, tx_queues);
+               if (err) {
+                       netdev_err(netdev, "failed to set %d Tx queues\n",
+                                  tx_queues);
+                       return err;
+               }
+       }
+
+       if (rx_queues) {
+               err = netif_set_real_num_rx_queues(netdev, rx_queues);
+               if (err)
+                       netdev_err(netdev, "failed to set %d Rx queues\n",
+                                  rx_queues);
+       }
+
+       return err;
+}
+
 int
 qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
                    int pci_using_dac)
@@ -2222,10 +2265,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
 
        qlcnic_change_mtu(netdev, netdev->mtu);
 
-       if (qlcnic_sriov_vf_check(adapter))
-               SET_ETHTOOL_OPS(netdev, &qlcnic_sriov_vf_ethtool_ops);
-       else
-               SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
+       netdev->ethtool_ops = (qlcnic_sriov_vf_check(adapter)) ?
+               &qlcnic_sriov_vf_ethtool_ops : &qlcnic_ethtool_ops;
 
        netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
                             NETIF_F_IPV6_CSUM | NETIF_F_GRO |
@@ -2269,7 +2310,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
        netdev->priv_flags |= IFF_UNICAST_FLT;
        netdev->irq = adapter->msix_entries[0].vector;
 
-       err = qlcnic_set_real_num_queues(adapter, netdev);
+       err = qlcnic_set_real_num_queues(adapter, adapter->drv_tx_rings,
+                                        adapter->drv_sds_rings);
        if (err)
                return err;
 
@@ -2374,6 +2416,14 @@ void qlcnic_set_drv_version(struct qlcnic_adapter *adapter)
                qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd);
 }
 
+/* Reset firmware API lock */
+static void qlcnic_reset_api_lock(struct qlcnic_adapter *adapter)
+{
+       qlcnic_api_lock(adapter);
+       qlcnic_api_unlock(adapter);
+}
+
+
 static int
 qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
@@ -2383,9 +2433,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        int err, pci_using_dac = -1;
        char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
 
-       if (pdev->is_virtfn)
-               return -ENODEV;
-
        err = pci_enable_device(pdev);
        if (err)
                return err;
@@ -2476,6 +2523,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (qlcnic_82xx_check(adapter)) {
                qlcnic_check_vf(adapter, ent);
                adapter->portnum = adapter->ahw->pci_func;
+               qlcnic_reset_api_lock(adapter);
                err = qlcnic_start_firmware(adapter);
                if (err) {
                        dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"
@@ -2517,9 +2565,11 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                        case -ENOMEM:
                                dev_err(&pdev->dev, "Adapter initialization failed. Please reboot\n");
                                goto err_out_free_hw;
+                       case -EOPNOTSUPP:
+                               dev_err(&pdev->dev, "Adapter initialization failed\n");
+                               goto err_out_free_hw;
                        default:
-                               dev_err(&pdev->dev, "Adapter initialization failed. A reboot may be required to recover from this failure\n");
-                               dev_err(&pdev->dev, "If reboot does not help to recover from this failure, try a flash update of the adapter\n");
+                               dev_err(&pdev->dev, "Adapter initialization failed. Driver will load in maintenance mode to recover the adapter using the application\n");
                                goto err_out_maintenance_mode;
                        }
                }
@@ -2593,7 +2643,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                qlcnic_alloc_lb_filters_mem(adapter);
 
        qlcnic_add_sysfs(adapter);
-
+       qlcnic_register_hwmon_dev(adapter);
        return 0;
 
 err_out_disable_mbx_intr:
@@ -2630,7 +2680,7 @@ err_out_disable_pdev:
 err_out_maintenance_mode:
        set_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state);
        netdev->netdev_ops = &qlcnic_netdev_failed_ops;
-       SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops);
+       netdev->ethtool_ops = &qlcnic_ethtool_failed_ops;
        ahw->port_type = QLCNIC_XGBE;
 
        if (qlcnic_83xx_check(adapter))
@@ -2663,9 +2713,9 @@ static void qlcnic_remove(struct pci_dev *pdev)
                return;
 
        netdev = adapter->netdev;
-       qlcnic_sriov_pf_disable(adapter);
 
        qlcnic_cancel_idc_work(adapter);
+       qlcnic_sriov_pf_disable(adapter);
        ahw = adapter->ahw;
 
        unregister_netdev(netdev);
@@ -2700,6 +2750,8 @@ static void qlcnic_remove(struct pci_dev *pdev)
 
        qlcnic_remove_sysfs(adapter);
 
+       qlcnic_unregister_hwmon_dev(adapter);
+
        qlcnic_cleanup_pci_map(adapter->ahw);
 
        qlcnic_release_firmware(adapter);
@@ -2793,6 +2845,8 @@ static int qlcnic_close(struct net_device *netdev)
        return 0;
 }
 
+#define QLCNIC_VF_LB_BUCKET_SIZE 1
+
 void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
 {
        void *head;
@@ -2808,7 +2862,10 @@ void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
        spin_lock_init(&adapter->mac_learn_lock);
        spin_lock_init(&adapter->rx_mac_learn_lock);
 
-       if (qlcnic_82xx_check(adapter)) {
+       if (qlcnic_sriov_vf_check(adapter)) {
+               filter_size = QLCNIC_83XX_SRIOV_VF_MAX_MAC - 1;
+               adapter->fhash.fbucket_size = QLCNIC_VF_LB_BUCKET_SIZE;
+       } else if (qlcnic_82xx_check(adapter)) {
                filter_size = QLCNIC_LB_MAX_FILTERS;
                adapter->fhash.fbucket_size = QLCNIC_LB_BUCKET_SIZE;
        } else {
@@ -2934,9 +2991,13 @@ static void qlcnic_dump_tx_rings(struct qlcnic_adapter *adapter)
                            tx_ring->tx_stats.xmit_called,
                            tx_ring->tx_stats.xmit_on,
                            tx_ring->tx_stats.xmit_off);
+
+               if (tx_ring->crb_intr_mask)
+                       netdev_info(netdev, "crb_intr_mask=%d\n",
+                                   readl(tx_ring->crb_intr_mask));
+
                netdev_info(netdev,
-                           "crb_intr_mask=%d, hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
-                           readl(tx_ring->crb_intr_mask),
+                           "hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
                            readl(tx_ring->crb_cmd_producer),
                            tx_ring->producer, tx_ring->sw_consumer,
                            le32_to_cpu(*(tx_ring->hw_consumer)));
@@ -3969,12 +4030,21 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
 int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
+       u8 tx_rings, rx_rings;
        int err;
 
        if (test_bit(__QLCNIC_RESETTING, &adapter->state))
                return -EBUSY;
 
+       tx_rings = adapter->drv_tss_rings;
+       rx_rings = adapter->drv_rss_rings;
+
        netif_device_detach(netdev);
+
+       err = qlcnic_set_real_num_queues(adapter, tx_rings, rx_rings);
+       if (err)
+               goto done;
+
        if (netif_running(netdev))
                __qlcnic_down(adapter, netdev);
 
@@ -3994,7 +4064,17 @@ int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
                return err;
        }
 
-       netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
+       /* Check if we need to update real_num_{tx|rx}_queues because
+        * qlcnic_setup_intr() may change Tx/Rx rings size
+        */
+       if ((tx_rings != adapter->drv_tx_rings) ||
+           (rx_rings != adapter->drv_sds_rings)) {
+               err = qlcnic_set_real_num_queues(adapter,
+                                                adapter->drv_tx_rings,
+                                                adapter->drv_sds_rings);
+               if (err)
+                       goto done;
+       }
 
        if (qlcnic_83xx_check(adapter)) {
                qlcnic_83xx_initialize_nic(adapter, 1);
@@ -4064,7 +4144,7 @@ void qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
 
        rcu_read_lock();
        for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
-               dev = __vlan_find_dev_deep(netdev, htons(ETH_P_8021Q), vid);
+               dev = __vlan_find_dev_deep_rcu(netdev, htons(ETH_P_8021Q), vid);
                if (!dev)
                        continue;
                qlcnic_config_indev_addr(adapter, dev, event);
index 37b979b1266bc2e0aa552f84dd8a14a149c82665..f7694da8ed5dcbbc5ed0124427f7b303a91c23f3 100644 (file)
@@ -238,6 +238,8 @@ void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
 
        hdr->drv_cap_mask = hdr->cap_mask;
        fw_dump->cap_mask = hdr->cap_mask;
+
+       fw_dump->use_pex_dma = (hdr->capabilities & BIT_0) ? true : false;
 }
 
 inline u32 qlcnic_82xx_get_cap_size(void *t_hdr, int index)
@@ -276,6 +278,8 @@ inline void qlcnic_83xx_set_saved_state(void *t_hdr, u32 index,
        hdr->saved_state[index] = value;
 }
 
+#define QLCNIC_TEMPLATE_VERSION (0x20001)
+
 void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
 {
        struct qlcnic_83xx_dump_template_hdr *hdr;
@@ -288,6 +292,9 @@ void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
 
        hdr->drv_cap_mask = hdr->cap_mask;
        fw_dump->cap_mask = hdr->cap_mask;
+
+       fw_dump->use_pex_dma = (fw_dump->version & 0xfffff) >=
+                              QLCNIC_TEMPLATE_VERSION;
 }
 
 inline u32 qlcnic_83xx_get_cap_size(void *t_hdr, int index)
@@ -658,29 +665,28 @@ out:
 static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter,
                                struct __mem *mem)
 {
-       struct qlcnic_83xx_dump_template_hdr *tmpl_hdr;
        struct device *dev = &adapter->pdev->dev;
        u32 dma_no, dma_base_addr, temp_addr;
        int i, ret, dma_sts;
+       void *tmpl_hdr;
 
        tmpl_hdr = adapter->ahw->fw_dump.tmpl_hdr;
-       dma_no = tmpl_hdr->saved_state[QLC_83XX_DMA_ENGINE_INDEX];
+       dma_no = qlcnic_get_saved_state(adapter, tmpl_hdr,
+                                       QLC_83XX_DMA_ENGINE_INDEX);
        dma_base_addr = QLC_DMA_REG_BASE_ADDR(dma_no);
 
        temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_LOW;
-       ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr,
-                                          mem->desc_card_addr);
+       ret = qlcnic_ind_wr(adapter, temp_addr, mem->desc_card_addr);
        if (ret)
                return ret;
 
        temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_HI;
-       ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr, 0);
+       ret = qlcnic_ind_wr(adapter, temp_addr, 0);
        if (ret)
                return ret;
 
        temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
-       ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr,
-                                          mem->start_dma_cmd);
+       ret = qlcnic_ind_wr(adapter, temp_addr, mem->start_dma_cmd);
        if (ret)
                return ret;
 
@@ -710,15 +716,16 @@ static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
        struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
        u32 temp, dma_base_addr, size = 0, read_size = 0;
        struct qlcnic_pex_dma_descriptor *dma_descr;
-       struct qlcnic_83xx_dump_template_hdr *tmpl_hdr;
        struct device *dev = &adapter->pdev->dev;
        dma_addr_t dma_phys_addr;
        void *dma_buffer;
+       void *tmpl_hdr;
 
        tmpl_hdr = fw_dump->tmpl_hdr;
 
        /* Check if DMA engine is available */
-       temp = tmpl_hdr->saved_state[QLC_83XX_DMA_ENGINE_INDEX];
+       temp = qlcnic_get_saved_state(adapter, tmpl_hdr,
+                                     QLC_83XX_DMA_ENGINE_INDEX);
        dma_base_addr = QLC_DMA_REG_BASE_ADDR(temp);
        temp = qlcnic_ind_rd(adapter,
                             dma_base_addr + QLC_DMA_CMD_STATUS_CTRL);
@@ -764,8 +771,8 @@ static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
 
                /* Write DMA descriptor to MS memory*/
                temp = sizeof(struct qlcnic_pex_dma_descriptor) / 16;
-               *ret = qlcnic_83xx_ms_mem_write128(adapter, mem->desc_card_addr,
-                                                  (u32 *)dma_descr, temp);
+               *ret = qlcnic_ms_mem_write128(adapter, mem->desc_card_addr,
+                                             (u32 *)dma_descr, temp);
                if (*ret) {
                        dev_info(dev, "Failed to write DMA descriptor to MS memory at address 0x%x\n",
                                 mem->desc_card_addr);
@@ -1141,8 +1148,6 @@ free_mem:
        return err;
 }
 
-#define QLCNIC_TEMPLATE_VERSION (0x20001)
-
 int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
 {
        struct qlcnic_hardware_context *ahw;
@@ -1203,12 +1208,6 @@ flash_temp:
                 "Default minidump capture mask 0x%x\n",
                 fw_dump->cap_mask);
 
-       if (qlcnic_83xx_check(adapter) &&
-           (fw_dump->version & 0xfffff) >= QLCNIC_TEMPLATE_VERSION)
-               fw_dump->use_pex_dma = true;
-       else
-               fw_dump->use_pex_dma = false;
-
        qlcnic_enable_fw_dump_state(adapter);
 
        return 0;
index 396bd1fd1d277deb56d70e185280ed3172902a49..4677b2edccca79fa1072e8a67315add790271c93 100644 (file)
@@ -52,6 +52,7 @@ enum qlcnic_bc_commands {
        QLCNIC_BC_CMD_CFG_GUEST_VLAN = 0x3,
 };
 
+#define QLCNIC_83XX_SRIOV_VF_MAX_MAC 2
 #define QLC_BC_CMD 1
 
 struct qlcnic_trans_list {
@@ -151,13 +152,14 @@ struct qlcnic_vf_info {
        struct qlcnic_trans_list        rcv_pend;
        struct qlcnic_adapter           *adapter;
        struct qlcnic_vport             *vp;
-       struct mutex                    vlan_list_lock; /* Lock for VLAN list */
+       spinlock_t                      vlan_list_lock; /* Lock for VLAN list */
 };
 
 struct qlcnic_async_work_list {
        struct list_head        list;
        struct work_struct      work;
        void                    *ptr;
+       struct qlcnic_cmd_args  *cmd;
 };
 
 struct qlcnic_back_channel {
@@ -231,7 +233,7 @@ bool qlcnic_sriov_soft_flr_check(struct qlcnic_adapter *,
 void qlcnic_sriov_pf_reset(struct qlcnic_adapter *);
 int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *);
 int qlcnic_sriov_set_vf_mac(struct net_device *, int, u8 *);
-int qlcnic_sriov_set_vf_tx_rate(struct net_device *, int, int);
+int qlcnic_sriov_set_vf_tx_rate(struct net_device *, int, int, int);
 int qlcnic_sriov_get_vf_config(struct net_device *, int ,
                               struct ifla_vf_info *);
 int qlcnic_sriov_set_vf_vlan(struct net_device *, int, u16, u8);
index 0638c1810d54547df9eafb961439085dff4364a2..2bdd9deffb38d5eca8997f6567368bd5b0456e55 100644 (file)
@@ -39,6 +39,8 @@ static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8);
 static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
 static int qlcnic_sriov_vf_shutdown(struct pci_dev *);
 static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *);
+static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *,
+                                       struct qlcnic_cmd_args *);
 
 static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
        .read_crb                       = qlcnic_83xx_read_crb,
@@ -181,7 +183,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
                vf->adapter = adapter;
                vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i);
                mutex_init(&vf->send_cmd_lock);
-               mutex_init(&vf->vlan_list_lock);
+               spin_lock_init(&vf->vlan_list_lock);
                INIT_LIST_HEAD(&vf->rcv_act.wait_list);
                INIT_LIST_HEAD(&vf->rcv_pend.wait_list);
                spin_lock_init(&vf->rcv_act.lock);
@@ -197,8 +199,10 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
                                goto qlcnic_destroy_async_wq;
                        }
                        sriov->vf_info[i].vp = vp;
+                       vp->vlan_mode = QLC_GUEST_VLAN_MODE;
                        vp->max_tx_bw = MAX_BW;
-                       vp->spoofchk = true;
+                       vp->min_tx_bw = MIN_BW;
+                       vp->spoofchk = false;
                        random_ether_addr(vp->mac);
                        dev_info(&adapter->pdev->dev,
                                 "MAC Address %pM is configured for VF %d\n",
@@ -515,6 +519,8 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
 {
        int err;
 
+       adapter->flags |= QLCNIC_VLAN_FILTERING;
+       adapter->ahw->total_nic_func = 1;
        INIT_LIST_HEAD(&adapter->vf_mc_list);
        if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
                dev_warn(&adapter->pdev->dev,
@@ -770,6 +776,7 @@ static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans,
                cmd->req.arg = (u32 *)trans->req_pay;
                cmd->rsp.arg = (u32 *)trans->rsp_pay;
                cmd_op = cmd->req.arg[0] & 0xff;
+               cmd->cmd_op = cmd_op;
                remainder = (trans->rsp_pay_size) % (bc_pay_sz);
                num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
                if (remainder)
@@ -1356,7 +1363,7 @@ static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter,
        return -EIO;
 }
 
-static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
+static int __qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
                                  struct qlcnic_cmd_args *cmd)
 {
        struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -1370,7 +1377,7 @@ static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
 
        rsp = qlcnic_sriov_alloc_bc_trans(&trans);
        if (rsp)
-               return rsp;
+               goto free_cmd;
 
        rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND);
        if (rsp)
@@ -1408,12 +1415,17 @@ retry:
            (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) {
                rsp = QLCNIC_RCODE_SUCCESS;
        } else {
-               rsp = mbx_err_code;
-               if (!rsp)
-                       rsp = 1;
-               dev_err(dev,
-                       "MBX command 0x%x failed with err:0x%x for VF %d\n",
-                       opcode, mbx_err_code, func);
+               if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
+                       rsp = QLCNIC_RCODE_SUCCESS;
+               } else {
+                       rsp = mbx_err_code;
+                       if (!rsp)
+                               rsp = 1;
+
+                       dev_err(dev,
+                               "MBX command 0x%x failed with err:0x%x for VF %d\n",
+                               opcode, mbx_err_code, func);
+               }
        }
 
 err_out:
@@ -1425,9 +1437,26 @@ err_out:
 
 cleanup_transaction:
        qlcnic_sriov_cleanup_transaction(trans);
+
+free_cmd:
+       if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
+               qlcnic_free_mbx_args(cmd);
+               kfree(cmd);
+       }
+
        return rsp;
 }
 
+
+static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
+                                 struct qlcnic_cmd_args *cmd)
+{
+       if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT)
+               return qlcnic_sriov_async_issue_cmd(adapter, cmd);
+       else
+               return __qlcnic_sriov_issue_cmd(adapter, cmd);
+}
+
 static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
 {
        struct qlcnic_cmd_args cmd;
@@ -1458,58 +1487,28 @@ out:
        return ret;
 }
 
-static void qlcnic_vf_add_mc_list(struct net_device *netdev)
+static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        struct qlcnic_sriov *sriov = adapter->ahw->sriov;
-       struct qlcnic_mac_vlan_list *cur;
-       struct list_head *head, tmp_list;
        struct qlcnic_vf_info *vf;
        u16 vlan_id;
        int i;
 
-       static const u8 bcast_addr[ETH_ALEN] = {
-               0xff, 0xff, 0xff, 0xff, 0xff, 0xff
-       };
-
        vf = &adapter->ahw->sriov->vf_info[0];
-       INIT_LIST_HEAD(&tmp_list);
-       head = &adapter->vf_mc_list;
-       netif_addr_lock_bh(netdev);
-
-       while (!list_empty(head)) {
-               cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
-               list_move(&cur->list, &tmp_list);
-       }
-
-       netif_addr_unlock_bh(netdev);
 
-       while (!list_empty(&tmp_list)) {
-               cur = list_entry((&tmp_list)->next,
-                                struct qlcnic_mac_vlan_list, list);
-               if (!qlcnic_sriov_check_any_vlan(vf)) {
-                       qlcnic_nic_add_mac(adapter, bcast_addr, 0);
-                       qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
-               } else {
-                       mutex_lock(&vf->vlan_list_lock);
-                       for (i = 0; i < sriov->num_allowed_vlans; i++) {
-                               vlan_id = vf->sriov_vlans[i];
-                               if (vlan_id) {
-                                       qlcnic_nic_add_mac(adapter, bcast_addr,
-                                                          vlan_id);
-                                       qlcnic_nic_add_mac(adapter,
-                                                          cur->mac_addr,
-                                                          vlan_id);
-                               }
-                       }
-                       mutex_unlock(&vf->vlan_list_lock);
-                       if (qlcnic_84xx_check(adapter)) {
-                               qlcnic_nic_add_mac(adapter, bcast_addr, 0);
-                               qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
-                       }
+       if (!qlcnic_sriov_check_any_vlan(vf)) {
+               qlcnic_nic_add_mac(adapter, mac, 0);
+       } else {
+               spin_lock(&vf->vlan_list_lock);
+               for (i = 0; i < sriov->num_allowed_vlans; i++) {
+                       vlan_id = vf->sriov_vlans[i];
+                       if (vlan_id)
+                               qlcnic_nic_add_mac(adapter, mac, vlan_id);
                }
-               list_del(&cur->list);
-               kfree(cur);
+               spin_unlock(&vf->vlan_list_lock);
+               if (qlcnic_84xx_check(adapter))
+                       qlcnic_nic_add_mac(adapter, mac, 0);
        }
 }
 
@@ -1518,6 +1517,7 @@ void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
        struct list_head *head = &bc->async_list;
        struct qlcnic_async_work_list *entry;
 
+       flush_workqueue(bc->bc_async_wq);
        while (!list_empty(head)) {
                entry = list_entry(head->next, struct qlcnic_async_work_list,
                                   list);
@@ -1527,10 +1527,14 @@ void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
        }
 }
 
-static void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
+void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        struct qlcnic_hardware_context *ahw = adapter->ahw;
+       static const u8 bcast_addr[ETH_ALEN] = {
+               0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+       };
+       struct netdev_hw_addr *ha;
        u32 mode = VPORT_MISS_MODE_DROP;
 
        if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
@@ -1542,23 +1546,49 @@ static void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
        } else if ((netdev->flags & IFF_ALLMULTI) ||
                   (netdev_mc_count(netdev) > ahw->max_mc_count)) {
                mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+       } else {
+               qlcnic_vf_add_mc_list(netdev, bcast_addr);
+               if (!netdev_mc_empty(netdev)) {
+                       netdev_for_each_mc_addr(ha, netdev)
+                               qlcnic_vf_add_mc_list(netdev, ha->addr);
+               }
        }
 
-       if (qlcnic_sriov_vf_check(adapter))
-               qlcnic_vf_add_mc_list(netdev);
+       /* configure unicast MAC address, if there is not sufficient space
+        * to store all the unicast addresses then enable promiscuous mode
+        */
+       if (netdev_uc_count(netdev) > ahw->max_uc_count) {
+               mode = VPORT_MISS_MODE_ACCEPT_ALL;
+       } else if (!netdev_uc_empty(netdev)) {
+               netdev_for_each_uc_addr(ha, netdev)
+                       qlcnic_vf_add_mc_list(netdev, ha->addr);
+       }
+
+       if (adapter->pdev->is_virtfn) {
+               if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
+                   !adapter->fdb_mac_learn) {
+                       qlcnic_alloc_lb_filters_mem(adapter);
+                       adapter->drv_mac_learn = 1;
+                       adapter->rx_mac_learn = true;
+               } else {
+                       adapter->drv_mac_learn = 0;
+                       adapter->rx_mac_learn = false;
+               }
+       }
 
        qlcnic_nic_set_promisc(adapter, mode);
 }
 
-static void qlcnic_sriov_handle_async_multi(struct work_struct *work)
+static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work)
 {
        struct qlcnic_async_work_list *entry;
-       struct net_device *netdev;
+       struct qlcnic_adapter *adapter;
+       struct qlcnic_cmd_args *cmd;
 
        entry = container_of(work, struct qlcnic_async_work_list, work);
-       netdev = (struct net_device *)entry->ptr;
-
-       qlcnic_sriov_vf_set_multi(netdev);
+       adapter = entry->ptr;
+       cmd = entry->cmd;
+       __qlcnic_sriov_issue_cmd(adapter, cmd);
        return;
 }
 
@@ -1588,8 +1618,9 @@ qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc)
        return entry;
 }
 
-static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel *bc,
-                                               work_func_t func, void *data)
+static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc,
+                                           work_func_t func, void *data,
+                                           struct qlcnic_cmd_args *cmd)
 {
        struct qlcnic_async_work_list *entry = NULL;
 
@@ -1598,21 +1629,23 @@ static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel *bc,
                return;
 
        entry->ptr = data;
+       entry->cmd = cmd;
        INIT_WORK(&entry->work, func);
        queue_work(bc->bc_async_wq, &entry->work);
 }
 
-void qlcnic_sriov_vf_schedule_multi(struct net_device *netdev)
+static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
+                                       struct qlcnic_cmd_args *cmd)
 {
 
-       struct qlcnic_adapter *adapter = netdev_priv(netdev);
        struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
 
        if (adapter->need_fw_reset)
-               return;
+               return -EIO;
 
-       qlcnic_sriov_schedule_bc_async_work(bc, qlcnic_sriov_handle_async_multi,
-                                           netdev);
+       qlcnic_sriov_schedule_async_cmd(bc, qlcnic_sriov_handle_async_issue_cmd,
+                                       adapter, cmd);
+       return 0;
 }
 
 static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
@@ -1836,6 +1869,12 @@ static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter *adapter)
        return 0;
 }
 
+static void qlcnic_sriov_vf_periodic_tasks(struct qlcnic_adapter *adapter)
+{
+       if (adapter->fhash.fnum)
+               qlcnic_prune_lb_filters(adapter);
+}
+
 static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
 {
        struct qlcnic_adapter *adapter;
@@ -1867,6 +1906,8 @@ static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
        }
 
        idc->prev_state = idc->curr_state;
+       qlcnic_sriov_vf_periodic_tasks(adapter);
+
        if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status))
                qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
                                     idc->delay);
@@ -1890,7 +1931,7 @@ static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
        if (!vf->sriov_vlans)
                return err;
 
-       mutex_lock(&vf->vlan_list_lock);
+       spin_lock_bh(&vf->vlan_list_lock);
 
        for (i = 0; i < sriov->num_allowed_vlans; i++) {
                if (vf->sriov_vlans[i] == vlan_id) {
@@ -1899,7 +1940,7 @@ static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
                }
        }
 
-       mutex_unlock(&vf->vlan_list_lock);
+       spin_unlock_bh(&vf->vlan_list_lock);
        return err;
 }
 
@@ -1908,12 +1949,12 @@ static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov,
 {
        int err = 0;
 
-       mutex_lock(&vf->vlan_list_lock);
+       spin_lock_bh(&vf->vlan_list_lock);
 
        if (vf->num_vlan >= sriov->num_allowed_vlans)
                err = -EINVAL;
 
-       mutex_unlock(&vf->vlan_list_lock);
+       spin_unlock_bh(&vf->vlan_list_lock);
        return err;
 }
 
@@ -1966,7 +2007,7 @@ static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
        if (!vf->sriov_vlans)
                return;
 
-       mutex_lock(&vf->vlan_list_lock);
+       spin_lock_bh(&vf->vlan_list_lock);
 
        switch (opcode) {
        case QLC_VLAN_ADD:
@@ -1979,7 +2020,7 @@ static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
                netdev_err(adapter->netdev, "Invalid VLAN operation\n");
        }
 
-       mutex_unlock(&vf->vlan_list_lock);
+       spin_unlock_bh(&vf->vlan_list_lock);
        return;
 }
 
@@ -1987,6 +2028,7 @@ int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
                                   u16 vid, u8 enable)
 {
        struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+       struct net_device *netdev = adapter->netdev;
        struct qlcnic_vf_info *vf;
        struct qlcnic_cmd_args cmd;
        int ret;
@@ -2012,14 +2054,18 @@ int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
                dev_err(&adapter->pdev->dev,
                        "Failed to configure guest VLAN, err=%d\n", ret);
        } else {
+               netif_addr_lock_bh(netdev);
                qlcnic_free_mac_list(adapter);
+               netif_addr_unlock_bh(netdev);
 
                if (enable)
                        qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD);
                else
                        qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE);
 
-               qlcnic_set_multi(adapter->netdev);
+               netif_addr_lock_bh(netdev);
+               qlcnic_set_multi(netdev);
+               netif_addr_unlock_bh(netdev);
        }
 
        qlcnic_free_mbx_args(&cmd);
@@ -2150,11 +2196,11 @@ bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *vf)
 {
        bool err = false;
 
-       mutex_lock(&vf->vlan_list_lock);
+       spin_lock_bh(&vf->vlan_list_lock);
 
        if (vf->num_vlan)
                err = true;
 
-       mutex_unlock(&vf->vlan_list_lock);
+       spin_unlock_bh(&vf->vlan_list_lock);
        return err;
 }
index 2801379915447dc54683719c40058d9d89f8387f..a29538b86edfcac99010cedbea1fff0a29db799b 100644 (file)
@@ -16,6 +16,7 @@
 #define QLC_VF_FLOOD_BIT       BIT_16
 #define QLC_FLOOD_MODE         0x5
 #define QLC_SRIOV_ALLOW_VLAN0  BIT_19
+#define QLC_INTR_COAL_TYPE_MASK        0x7
 
 static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8);
 
@@ -83,7 +84,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
        info->max_tx_ques = res->num_tx_queues / max;
 
        if (qlcnic_83xx_pf_check(adapter))
-               num_macs = 1;
+               num_macs = QLCNIC_83XX_SRIOV_VF_MAX_MAC;
 
        info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
 
@@ -337,9 +338,12 @@ static int qlcnic_sriov_pf_cfg_vlan_filtering(struct qlcnic_adapter *adapter,
 
        cmd.req.arg[1] = 0x4;
        if (enable) {
+               adapter->flags |= QLCNIC_VLAN_FILTERING;
                cmd.req.arg[1] |= BIT_16;
                if (qlcnic_84xx_check(adapter))
                        cmd.req.arg[1] |= QLC_SRIOV_ALLOW_VLAN0;
+       } else {
+               adapter->flags &= ~QLCNIC_VLAN_FILTERING;
        }
 
        err = qlcnic_issue_cmd(adapter, &cmd);
@@ -471,12 +475,12 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
                return -EPERM;
        }
 
+       qlcnic_sriov_pf_disable(adapter);
+
        rtnl_lock();
        if (netif_running(netdev))
                __qlcnic_down(adapter, netdev);
 
-       qlcnic_sriov_pf_disable(adapter);
-
        qlcnic_sriov_free_vlans(adapter);
 
        qlcnic_sriov_pf_cleanup(adapter);
@@ -595,7 +599,6 @@ static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter,
 
        qlcnic_sriov_alloc_vlans(adapter);
 
-       err = qlcnic_sriov_pf_enable(adapter, num_vfs);
        return err;
 
 del_flr_queue:
@@ -626,25 +629,36 @@ static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
                __qlcnic_down(adapter, netdev);
 
        err = __qlcnic_pci_sriov_enable(adapter, num_vfs);
-       if (err) {
-               netdev_info(netdev, "Failed to enable SR-IOV on port %d\n",
-                           adapter->portnum);
+       if (err)
+               goto error;
 
-               err = -EIO;
-               if (qlcnic_83xx_configure_opmode(adapter))
-                       goto error;
-       } else {
+       if (netif_running(netdev))
+               __qlcnic_up(adapter, netdev);
+
+       rtnl_unlock();
+       err = qlcnic_sriov_pf_enable(adapter, num_vfs);
+       if (!err) {
                netdev_info(netdev,
                            "SR-IOV is enabled successfully on port %d\n",
                            adapter->portnum);
                /* Return number of vfs enabled */
-               err = num_vfs;
+               return num_vfs;
        }
+
+       rtnl_lock();
        if (netif_running(netdev))
-               __qlcnic_up(adapter, netdev);
+               __qlcnic_down(adapter, netdev);
 
 error:
+       if (!qlcnic_83xx_configure_opmode(adapter)) {
+               if (netif_running(netdev))
+                       __qlcnic_up(adapter, netdev);
+       }
+
        rtnl_unlock();
+       netdev_info(netdev, "Failed to enable SR-IOV on port %d\n",
+                   adapter->portnum);
+
        return err;
 }
 
@@ -773,7 +787,7 @@ static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
                                       struct qlcnic_vf_info *vf,
                                       u16 vlan, u8 op)
 {
-       struct qlcnic_cmd_args cmd;
+       struct qlcnic_cmd_args *cmd;
        struct qlcnic_macvlan_mbx mv;
        struct qlcnic_vport *vp;
        u8 *addr;
@@ -783,21 +797,27 @@ static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
 
        vp = vf->vp;
 
-       if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN))
+       cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+       if (!cmd)
                return -ENOMEM;
 
+       err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
+       if (err)
+               goto free_cmd;
+
+       cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
        vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
        if (vpid < 0) {
                err = -EINVAL;
-               goto out;
+               goto free_args;
        }
 
        if (vlan)
                op = ((op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
                      QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL);
 
-       cmd.req.arg[1] = op | (1 << 8) | (3 << 6);
-       cmd.req.arg[1] |= ((vpid & 0xffff) << 16) | BIT_31;
+       cmd->req.arg[1] = op | (1 << 8) | (3 << 6);
+       cmd->req.arg[1] |= ((vpid & 0xffff) << 16) | BIT_31;
 
        addr = vp->mac;
        mv.vlan = vlan;
@@ -807,18 +827,18 @@ static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
        mv.mac_addr3 = addr[3];
        mv.mac_addr4 = addr[4];
        mv.mac_addr5 = addr[5];
-       buf = &cmd.req.arg[2];
+       buf = &cmd->req.arg[2];
        memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
 
-       err = qlcnic_issue_cmd(adapter, &cmd);
+       err = qlcnic_issue_cmd(adapter, cmd);
 
-       if (err)
-               dev_err(&adapter->pdev->dev,
-                       "MAC-VLAN %s to CAM failed, err=%d.\n",
-                       ((op == 1) ? "add " : "delete "), err);
+       if (!err)
+               return err;
 
-out:
-       qlcnic_free_mbx_args(&cmd);
+free_args:
+       qlcnic_free_mbx_args(cmd);
+free_cmd:
+       kfree(cmd);
        return err;
 }
 
@@ -840,7 +860,7 @@ static void qlcnic_83xx_cfg_default_mac_vlan(struct qlcnic_adapter *adapter,
 
        sriov = adapter->ahw->sriov;
 
-       mutex_lock(&vf->vlan_list_lock);
+       spin_lock_bh(&vf->vlan_list_lock);
        if (vf->num_vlan) {
                for (i = 0; i < sriov->num_allowed_vlans; i++) {
                        vlan = vf->sriov_vlans[i];
@@ -849,7 +869,7 @@ static void qlcnic_83xx_cfg_default_mac_vlan(struct qlcnic_adapter *adapter,
                                                            opcode);
                }
        }
-       mutex_unlock(&vf->vlan_list_lock);
+       spin_unlock_bh(&vf->vlan_list_lock);
 
        if (vf->vp->vlan_mode != QLC_PVID_MODE) {
                if (qlcnic_83xx_pf_check(adapter) &&
@@ -1178,19 +1198,41 @@ static int qlcnic_sriov_validate_cfg_intrcoal(struct qlcnic_adapter *adapter,
 {
        struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
        u16 ctx_id, pkts, time;
+       int err = -EINVAL;
+       u8 type;
 
+       type = cmd->req.arg[1] & QLC_INTR_COAL_TYPE_MASK;
        ctx_id = cmd->req.arg[1] >> 16;
        pkts = cmd->req.arg[2] & 0xffff;
        time = cmd->req.arg[2] >> 16;
 
-       if (ctx_id != vf->rx_ctx_id)
-               return -EINVAL;
-       if (pkts > coal->rx_packets)
-               return -EINVAL;
-       if (time < coal->rx_time_us)
-               return -EINVAL;
+       switch (type) {
+       case QLCNIC_INTR_COAL_TYPE_RX:
+               if (ctx_id != vf->rx_ctx_id || pkts > coal->rx_packets ||
+                   time < coal->rx_time_us)
+                       goto err_label;
+               break;
+       case QLCNIC_INTR_COAL_TYPE_TX:
+               if (ctx_id != vf->tx_ctx_id || pkts > coal->tx_packets ||
+                   time < coal->tx_time_us)
+                       goto err_label;
+               break;
+       default:
+               netdev_err(adapter->netdev, "Invalid coalescing type 0x%x received\n",
+                          type);
+               return err;
+       }
 
        return 0;
+
+err_label:
+       netdev_err(adapter->netdev, "Expected: rx_ctx_id 0x%x rx_packets 0x%x rx_time_us 0x%x tx_ctx_id 0x%x tx_packets 0x%x tx_time_us 0x%x\n",
+                  vf->rx_ctx_id, coal->rx_packets, coal->rx_time_us,
+                  vf->tx_ctx_id, coal->tx_packets, coal->tx_time_us);
+       netdev_err(adapter->netdev, "Received: ctx_id 0x%x packets 0x%x time_us 0x%x type 0x%x\n",
+                  ctx_id, pkts, time, type);
+
+       return err;
 }
 
 static int qlcnic_sriov_pf_cfg_intrcoal_cmd(struct qlcnic_bc_trans *tran,
@@ -1214,7 +1256,6 @@ static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
                                             struct qlcnic_vf_info *vf,
                                             struct qlcnic_cmd_args *cmd)
 {
-       struct qlcnic_macvlan_mbx *macvlan;
        struct qlcnic_vport *vp = vf->vp;
        u8 op, new_op;
 
@@ -1224,14 +1265,6 @@ static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
        cmd->req.arg[1] |= (vf->vp->handle << 16);
        cmd->req.arg[1] |= BIT_31;
 
-       macvlan = (struct qlcnic_macvlan_mbx *)&cmd->req.arg[2];
-       if (!(macvlan->mac_addr0 & BIT_0)) {
-               dev_err(&adapter->pdev->dev,
-                       "MAC address change is not allowed from VF %d",
-                       vf->pci_func);
-               return -EINVAL;
-       }
-
        if (vp->vlan_mode == QLC_PVID_MODE) {
                op = cmd->req.arg[1] & 0x7;
                cmd->req.arg[1] &= ~0x7;
@@ -1815,7 +1848,8 @@ int qlcnic_sriov_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
        return 0;
 }
 
-int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf, int tx_rate)
+int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf,
+                               int min_tx_rate, int max_tx_rate)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        struct qlcnic_sriov *sriov = adapter->ahw->sriov;
@@ -1830,35 +1864,52 @@ int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf, int tx_rate)
        if (vf >= sriov->num_vfs)
                return -EINVAL;
 
-       if (tx_rate >= 10000 || tx_rate < 100) {
+       vf_info = &sriov->vf_info[vf];
+       vp = vf_info->vp;
+       vpid = vp->handle;
+
+       if (!min_tx_rate)
+               min_tx_rate = QLC_VF_MIN_TX_RATE;
+
+       if (max_tx_rate &&
+           (max_tx_rate >= 10000 || max_tx_rate < min_tx_rate)) {
                netdev_err(netdev,
-                          "Invalid Tx rate, allowed range is [%d - %d]",
-                          QLC_VF_MIN_TX_RATE, QLC_VF_MAX_TX_RATE);
+                          "Invalid max Tx rate, allowed range is [%d - %d]",
+                          min_tx_rate, QLC_VF_MAX_TX_RATE);
                return -EINVAL;
        }
 
-       if (tx_rate == 0)
-               tx_rate = 10000;
+       if (!max_tx_rate)
+               max_tx_rate = 10000;
 
-       vf_info = &sriov->vf_info[vf];
-       vp = vf_info->vp;
-       vpid = vp->handle;
+       if (min_tx_rate &&
+           (min_tx_rate > max_tx_rate || min_tx_rate < QLC_VF_MIN_TX_RATE)) {
+               netdev_err(netdev,
+                          "Invalid min Tx rate, allowed range is [%d - %d]",
+                          QLC_VF_MIN_TX_RATE, max_tx_rate);
+               return -EINVAL;
+       }
 
        if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
                if (qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, vpid))
                        return -EIO;
 
-               nic_info.max_tx_bw = tx_rate / 100;
+               nic_info.max_tx_bw = max_tx_rate / 100;
+               nic_info.min_tx_bw = min_tx_rate / 100;
                nic_info.bit_offsets = BIT_0;
 
                if (qlcnic_sriov_pf_set_vport_info(adapter, &nic_info, vpid))
                        return -EIO;
        }
 
-       vp->max_tx_bw = tx_rate / 100;
+       vp->max_tx_bw = max_tx_rate / 100;
        netdev_info(netdev,
-                   "Setting Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n",
-                   tx_rate, vp->max_tx_bw, vf);
+                   "Setting Max Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n",
+                   max_tx_rate, vp->max_tx_bw, vf);
+       vp->min_tx_bw = min_tx_rate / 100;
+       netdev_info(netdev,
+                   "Setting Min Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n",
+                   min_tx_rate, vp->min_tx_bw, vf);
        return 0;
 }
 
@@ -1957,9 +2008,13 @@ int qlcnic_sriov_get_vf_config(struct net_device *netdev,
        ivi->qos = vp->qos;
        ivi->spoofchk = vp->spoofchk;
        if (vp->max_tx_bw == MAX_BW)
-               ivi->tx_rate = 0;
+               ivi->max_tx_rate = 0;
+       else
+               ivi->max_tx_rate = vp->max_tx_bw * 100;
+       if (vp->min_tx_bw == MIN_BW)
+               ivi->min_tx_rate = 0;
        else
-               ivi->tx_rate = vp->max_tx_bw * 100;
+               ivi->min_tx_rate = vp->min_tx_bw * 100;
 
        ivi->vf = vf;
        return 0;
index cd346e27f2e1270078a7580c5e563bc178d5ef37..f5786d5792df06fe16db6f7ffd2276f9bdabe96f 100644 (file)
 #include <linux/sysfs.h>
 #include <linux/aer.h>
 #include <linux/log2.h>
+#ifdef CONFIG_QLCNIC_HWMON
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#endif
 
 #define QLC_STATUS_UNSUPPORTED_CMD     -2
 
@@ -358,6 +362,8 @@ int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
                if (adapter->npars[i].pci_func == pci_func)
                        return i;
        }
+
+       dev_err(&adapter->pdev->dev, "%s: Invalid nic function\n", __func__);
        return -EINVAL;
 }
 
@@ -1243,6 +1249,68 @@ static struct bin_attribute bin_attr_flash = {
        .write = qlcnic_83xx_sysfs_flash_write_handler,
 };
 
+#ifdef CONFIG_QLCNIC_HWMON
+
+static ssize_t qlcnic_hwmon_show_temp(struct device *dev,
+                                     struct device_attribute *dev_attr,
+                                     char *buf)
+{
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       unsigned int temperature = 0, value = 0;
+
+       if (qlcnic_83xx_check(adapter))
+               value = QLCRDX(adapter->ahw, QLC_83XX_ASIC_TEMP);
+       else if (qlcnic_82xx_check(adapter))
+               value = QLC_SHARED_REG_RD32(adapter, QLCNIC_ASIC_TEMP);
+
+       temperature = qlcnic_get_temp_val(value);
+       /* display millidegree celcius */
+       temperature *= 1000;
+       return sprintf(buf, "%u\n", temperature);
+}
+
+/* hwmon-sysfs attributes */
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
+                         qlcnic_hwmon_show_temp, NULL, 1);
+
+static struct attribute *qlcnic_hwmon_attrs[] = {
+       &sensor_dev_attr_temp1_input.dev_attr.attr,
+       NULL
+};
+
+ATTRIBUTE_GROUPS(qlcnic_hwmon);
+
+void qlcnic_register_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+       struct device *dev = &adapter->pdev->dev;
+       struct device *hwmon_dev;
+
+       /* Skip hwmon registration for a VF device */
+       if (qlcnic_sriov_vf_check(adapter)) {
+               adapter->ahw->hwmon_dev = NULL;
+               return;
+       }
+       hwmon_dev = hwmon_device_register_with_groups(dev, qlcnic_driver_name,
+                                                     adapter,
+                                                     qlcnic_hwmon_groups);
+       if (IS_ERR(hwmon_dev)) {
+               dev_err(dev, "Cannot register with hwmon, err=%ld\n",
+                       PTR_ERR(hwmon_dev));
+               hwmon_dev = NULL;
+       }
+       adapter->ahw->hwmon_dev = hwmon_dev;
+}
+
+void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+       struct device *hwmon_dev = adapter->ahw->hwmon_dev;
+       if (hwmon_dev) {
+               hwmon_device_unregister(hwmon_dev);
+               adapter->ahw->hwmon_dev = NULL;
+       }
+}
+#endif
+
 void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
 {
        struct device *dev = &adapter->pdev->dev;
index 0a1d76acab8171929e3c6f9ad6139b48484ebbcc..b40050e03a56f7524e19dc7c215165d0a0c75585 100644 (file)
@@ -3595,7 +3595,7 @@ static int ql_request_irq(struct ql_adapter *qdev)
        }
        return status;
 err_irq:
-       netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
+       netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
        ql_free_irq(qdev);
        return status;
 }
@@ -4770,7 +4770,7 @@ static int qlge_probe(struct pci_dev *pdev,
        ndev->irq = pdev->irq;
 
        ndev->netdev_ops = &qlge_netdev_ops;
-       SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
+       ndev->ethtool_ops = &qlge_ethtool_ops;
        ndev->watchdog_timeo = 10 * HZ;
 
        err = register_netdev(ndev);
index aa1c079f231dc6f2cfc017cd4950f9ebb8e1e9da..be425ad5e82487e94a91b7371466f7f93ab558eb 100644 (file)
@@ -7125,7 +7125,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        for (i = 0; i < ETH_ALEN; i++)
                dev->dev_addr[i] = RTL_R8(MAC0 + i);
 
-       SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
+       dev->ethtool_ops = &rtl8169_ethtool_ops;
        dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
 
        netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
index 6a9509ccd33b29dd84ddcd0b48269f1c24b8da63..6a94ede699b4d016ea00dd31077cbeb719a18c29 100644 (file)
@@ -2627,8 +2627,8 @@ static int sh_mdio_init(struct sh_eth_private *mdp,
                 pdev->name, pdev->id);
 
        /* PHY IRQ */
-       mdp->mii_bus->irq = devm_kzalloc(dev, sizeof(int) * PHY_MAX_ADDR,
-                                        GFP_KERNEL);
+       mdp->mii_bus->irq = devm_kmalloc_array(dev, PHY_MAX_ADDR, sizeof(int),
+                                              GFP_KERNEL);
        if (!mdp->mii_bus->irq) {
                ret = -ENOMEM;
                goto out_free_bus;
@@ -2843,7 +2843,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
                ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
        else
                ndev->netdev_ops = &sh_eth_netdev_ops;
-       SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
+       ndev->ethtool_ops = &sh_eth_ethtool_ops;
        ndev->watchdog_timeo = TX_TIMEOUT;
 
        /* debug message level */
index 6203c7d8550fda4a530ec91e0a089870e507b4cb..45019649bbbd73227840d866ed92709e22a0d49c 100644 (file)
@@ -358,6 +358,8 @@ struct sxgbe_core_ops {
        /* Enable disable checksum offload operations */
        void (*enable_rx_csum)(void __iomem *ioaddr);
        void (*disable_rx_csum)(void __iomem *ioaddr);
+       void (*enable_rxqueue)(void __iomem *ioaddr, int queue_num);
+       void (*disable_rxqueue)(void __iomem *ioaddr, int queue_num);
 };
 
 const struct sxgbe_core_ops *sxgbe_get_core_ops(void);
index c4da7a2b002a16fa432f0bbe6fbce25be9085acc..58c35692560e599f0977c6460edcd0a616889e5f 100644 (file)
@@ -165,6 +165,26 @@ static void sxgbe_core_set_speed(void __iomem *ioaddr, unsigned char speed)
        writel(tx_cfg, ioaddr + SXGBE_CORE_TX_CONFIG_REG);
 }
 
+static void sxgbe_core_enable_rxqueue(void __iomem *ioaddr, int queue_num)
+{
+       u32 reg_val;
+
+       reg_val = readl(ioaddr + SXGBE_CORE_RX_CTL0_REG);
+       reg_val &= ~(SXGBE_CORE_RXQ_ENABLE_MASK << queue_num);
+       reg_val |= SXGBE_CORE_RXQ_ENABLE;
+       writel(reg_val, ioaddr + SXGBE_CORE_RX_CTL0_REG);
+}
+
+static void sxgbe_core_disable_rxqueue(void __iomem *ioaddr, int queue_num)
+{
+       u32 reg_val;
+
+       reg_val = readl(ioaddr + SXGBE_CORE_RX_CTL0_REG);
+       reg_val &= ~(SXGBE_CORE_RXQ_ENABLE_MASK << queue_num);
+       reg_val |= SXGBE_CORE_RXQ_DISABLE;
+       writel(reg_val, ioaddr + SXGBE_CORE_RX_CTL0_REG);
+}
+
 static void  sxgbe_set_eee_mode(void __iomem *ioaddr)
 {
        u32 ctrl;
@@ -254,6 +274,8 @@ static const struct sxgbe_core_ops core_ops = {
        .set_eee_pls            = sxgbe_set_eee_pls,
        .enable_rx_csum         = sxgbe_enable_rx_csum,
        .disable_rx_csum        = sxgbe_disable_rx_csum,
+       .enable_rxqueue         = sxgbe_core_enable_rxqueue,
+       .disable_rxqueue        = sxgbe_core_disable_rxqueue,
 };
 
 const struct sxgbe_core_ops *sxgbe_get_core_ops(void)
index e896dbbd2e156514eaf1d83ed8e132fbc3d88e37..2686bb5b6765680a8e18eeabd89146384da1c6f5 100644 (file)
@@ -45,10 +45,10 @@ static void sxgbe_prepare_tx_desc(struct sxgbe_tx_norm_desc *p, u8 is_fd,
        p->tdes23.tx_rd_des23.first_desc = is_fd;
        p->tdes23.tx_rd_des23.buf1_size = buf1_len;
 
-       p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.total_pkt_len = pkt_len;
+       p->tdes23.tx_rd_des23.tx_pkt_len.pkt_len.total_pkt_len = pkt_len;
 
        if (cksum)
-               p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.cksum_ctl = cic_full;
+               p->tdes23.tx_rd_des23.cksum_ctl = cic_full;
 }
 
 /* Set VLAN control information */
@@ -233,6 +233,12 @@ static void sxgbe_set_rx_owner(struct sxgbe_rx_norm_desc *p)
        p->rdes23.rx_rd_des23.own_bit = 1;
 }
 
+/* Set Interrupt on completion bit */
+static void sxgbe_set_rx_int_on_com(struct sxgbe_rx_norm_desc *p)
+{
+       p->rdes23.rx_rd_des23.int_on_com = 1;
+}
+
 /* Get the receive frame size */
 static int sxgbe_get_rx_frame_len(struct sxgbe_rx_norm_desc *p)
 {
@@ -498,6 +504,7 @@ static const struct sxgbe_desc_ops desc_ops = {
        .init_rx_desc                   = sxgbe_init_rx_desc,
        .get_rx_owner                   = sxgbe_get_rx_owner,
        .set_rx_owner                   = sxgbe_set_rx_owner,
+       .set_rx_int_on_com              = sxgbe_set_rx_int_on_com,
        .get_rx_frame_len               = sxgbe_get_rx_frame_len,
        .get_rx_fd_status               = sxgbe_get_rx_fd_status,
        .get_rx_ld_status               = sxgbe_get_rx_ld_status,
index 838cb9fb0ea979514bafc7b068cd4c09f3ee5d49..18609324db723dc2fbf5c50880d876ba83deb3d3 100644 (file)
@@ -39,22 +39,22 @@ struct sxgbe_tx_norm_desc {
                        u32 int_on_com:1;
                        /* TDES3 */
                        union {
-                               u32 tcp_payload_len:18;
+                               u16 tcp_payload_len;
                                struct {
                                        u32 total_pkt_len:15;
                                        u32 reserved1:1;
-                                       u32 cksum_ctl:2;
-                               } cksum_pktlen;
+                               } pkt_len;
                        } tx_pkt_len;
 
-                       u32 tse_bit:1;
-                       u32 tcp_hdr_len:4;
-                       u32 sa_insert_ctl:3;
-                       u32 crc_pad_ctl:2;
-                       u32 last_desc:1;
-                       u32 first_desc:1;
-                       u32 ctxt_bit:1;
-                       u32 own_bit:1;
+                       u16 cksum_ctl:2;
+                       u16 tse_bit:1;
+                       u16 tcp_hdr_len:4;
+                       u16 sa_insert_ctl:3;
+                       u16 crc_pad_ctl:2;
+                       u16 last_desc:1;
+                       u16 first_desc:1;
+                       u16 ctxt_bit:1;
+                       u16 own_bit:1;
                } tx_rd_des23;
 
                /* tx write back Desc 2,3 */
@@ -70,25 +70,20 @@ struct sxgbe_tx_norm_desc {
 
 struct sxgbe_rx_norm_desc {
        union {
-               u32 rdes0; /* buf1 address */
-               struct {
+               u64 rdes01; /* buf1 address */
+               union {
                        u32 out_vlan_tag:16;
                        u32 in_vlan_tag:16;
-               } wb_rx_des0;
-       } rd_wb_des0;
-
-       union {
-               u32 rdes1;      /* buf2 address or buf1[63:32] */
-               u32 rss_hash;   /* Write-back RX */
-       } rd_wb_des1;
+                       u32 rss_hash;
+               } rx_wb_des01;
+       } rdes01;
 
        union {
                /* RX Read format Desc 2,3 */
                struct{
                        /* RDES2 */
-                       u32 buf2_addr;
+                       u64 buf2_addr:62;
                        /* RDES3 */
-                       u32 buf2_hi_addr:30;
                        u32 int_on_com:1;
                        u32 own_bit:1;
                } rx_rd_des23;
@@ -263,6 +258,9 @@ struct sxgbe_desc_ops {
        /* Set own bit */
        void (*set_rx_owner)(struct sxgbe_rx_norm_desc *p);
 
+       /* Set Interrupt on completion bit */
+       void (*set_rx_int_on_com)(struct sxgbe_rx_norm_desc *p);
+
        /* Get the receive frame size */
        int (*get_rx_frame_len)(struct sxgbe_rx_norm_desc *p);
 
index 4d989ff6c978a8ad67d36afbb7d2df5ef6632929..bb9b5b8afc5f4417bae05c4ef0e1ea02f84b7421 100644 (file)
 /* DMA core initialization */
 static int sxgbe_dma_init(void __iomem *ioaddr, int fix_burst, int burst_map)
 {
-       int retry_count = 10;
        u32 reg_val;
 
-       /* reset the DMA */
-       writel(SXGBE_DMA_SOFT_RESET, ioaddr + SXGBE_DMA_MODE_REG);
-       while (retry_count--) {
-               if (!(readl(ioaddr + SXGBE_DMA_MODE_REG) &
-                     SXGBE_DMA_SOFT_RESET))
-                       break;
-               mdelay(10);
-       }
-
-       if (retry_count < 0)
-               return -EBUSY;
-
        reg_val = readl(ioaddr + SXGBE_DMA_SYSBUS_MODE_REG);
 
        /* if fix_burst = 0, Set UNDEF = 1 of DMA_Sys_Mode Register.
index 0415fa50eeb77b82376214708419f6e4186e876f..c0981ae45874acc5c247a2b37d84a90296e1196b 100644 (file)
@@ -520,5 +520,5 @@ static const struct ethtool_ops sxgbe_ethtool_ops = {
 
 void sxgbe_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &sxgbe_ethtool_ops);
+       netdev->ethtool_ops = &sxgbe_ethtool_ops;
 }
index 137f366ec7e4bc9b1775ee6b4854b5ef09a37e2d..698494481d18072ca00ddecb825ea92bd4e86c56 100644 (file)
@@ -1076,6 +1076,9 @@ static int sxgbe_open(struct net_device *dev)
 
        /* Initialize the MAC Core */
        priv->hw->mac->core_init(priv->ioaddr);
+       SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
+               priv->hw->mac->enable_rxqueue(priv->ioaddr, queue_num);
+       }
 
        /* Request the IRQ lines */
        ret = devm_request_irq(priv->device, priv->irq, sxgbe_common_interrupt,
@@ -1452,6 +1455,7 @@ static void sxgbe_rx_refill(struct sxgbe_priv_data *priv)
                /* Added memory barrier for RX descriptor modification */
                wmb();
                priv->hw->desc->set_rx_owner(p);
+               priv->hw->desc->set_rx_int_on_com(p);
                /* Added memory barrier for RX descriptor modification */
                wmb();
        }
@@ -2034,6 +2038,24 @@ static int sxgbe_hw_init(struct sxgbe_priv_data * const priv)
        return 0;
 }
 
+static int sxgbe_sw_reset(void __iomem *addr)
+{
+       int retry_count = 10;
+
+       writel(SXGBE_DMA_SOFT_RESET, addr + SXGBE_DMA_MODE_REG);
+       while (retry_count--) {
+               if (!(readl(addr + SXGBE_DMA_MODE_REG) &
+                     SXGBE_DMA_SOFT_RESET))
+                       break;
+               mdelay(10);
+       }
+
+       if (retry_count < 0)
+               return -EBUSY;
+
+       return 0;
+}
+
 /**
  * sxgbe_drv_probe
  * @device: device pointer
@@ -2066,6 +2088,10 @@ struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
        priv->plat = plat_dat;
        priv->ioaddr = addr;
 
+       ret = sxgbe_sw_reset(priv->ioaddr);
+       if (ret)
+               goto error_free_netdev;
+
        /* Verify driver arguments */
        sxgbe_verify_args();
 
@@ -2182,9 +2208,14 @@ error_free_netdev:
 int sxgbe_drv_remove(struct net_device *ndev)
 {
        struct sxgbe_priv_data *priv = netdev_priv(ndev);
+       u8 queue_num;
 
        netdev_info(ndev, "%s: removing driver\n", __func__);
 
+       SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
+               priv->hw->mac->disable_rxqueue(priv->ioaddr, queue_num);
+       }
+
        priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
        priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
 
index 01af2cbb479d10a96c1038bfc3f2ff167a20beb3..43ccb4a6de15a3fe2dedc9198a3866fd89143f47 100644 (file)
@@ -27,7 +27,7 @@
 #define SXGBE_SMA_PREAD_CMD    0x02 /* post read  increament address */
 #define SXGBE_SMA_READ_CMD     0x03 /* read command */
 #define SXGBE_SMA_SKIP_ADDRFRM 0x00040000 /* skip the address frame */
-#define SXGBE_MII_BUSY         0x00800000 /* mii busy */
+#define SXGBE_MII_BUSY         0x00400000 /* mii busy */
 
 static int sxgbe_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_data)
 {
@@ -147,6 +147,7 @@ int sxgbe_mdio_register(struct net_device *ndev)
        struct sxgbe_mdio_bus_data *mdio_data = priv->plat->mdio_bus_data;
        int err, phy_addr;
        int *irqlist;
+       bool phy_found = false;
        bool act;
 
        /* allocate the new mdio bus */
@@ -162,7 +163,7 @@ int sxgbe_mdio_register(struct net_device *ndev)
                irqlist = priv->mii_irq;
 
        /* assign mii bus fields */
-       mdio_bus->name = "samsxgbe";
+       mdio_bus->name = "sxgbe";
        mdio_bus->read = &sxgbe_mdio_read;
        mdio_bus->write = &sxgbe_mdio_write;
        snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%x",
@@ -216,13 +217,22 @@ int sxgbe_mdio_register(struct net_device *ndev)
                        netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n",
                                    phy->phy_id, phy_addr, irq_str,
                                    dev_name(&phy->dev), act ? " active" : "");
+                       phy_found = true;
                }
        }
 
+       if (!phy_found) {
+               netdev_err(ndev, "PHY not found\n");
+               goto phyfound_err;
+       }
+
        priv->mii = mdio_bus;
 
        return 0;
 
+phyfound_err:
+       err = -ENODEV;
+       mdiobus_unregister(mdio_bus);
 mdiobus_err:
        mdiobus_free(mdio_bus);
        return err;
index 5a89acb4c505fc83f3847a10c437c961db079dec..56f8bf5a3f1b99564a2b055830810fe3d156e6c4 100644 (file)
 #define SXGBE_CORE_RX_CTL2_REG         0x00A8
 #define SXGBE_CORE_RX_CTL3_REG         0x00AC
 
+#define SXGBE_CORE_RXQ_ENABLE_MASK     0x0003
+#define SXGBE_CORE_RXQ_ENABLE          0x0002
+#define SXGBE_CORE_RXQ_DISABLE         0x0000
+
 /* Interrupt Registers */
 #define SXGBE_CORE_INT_STATUS_REG      0x00B0
 #define SXGBE_CORE_INT_ENABLE_REG      0x00B4
index 63d595fd3cc5f5a9df298dfdd2583abcec9a3a03..1e274045970fa011c6dacb6baeb77db95178b164 100644 (file)
@@ -2248,7 +2248,7 @@ static int efx_register_netdev(struct efx_nic *efx)
        } else {
                net_dev->netdev_ops = &efx_farch_netdev_ops;
        }
-       SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
+       net_dev->ethtool_ops = &efx_ethtool_ops;
        net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
 
        rtnl_lock();
index 32d969e857f7befc79bf4a6f18cb153c350b374b..89b83e59e1dc601898ddd60bd0fa704fdd7b6d43 100644 (file)
@@ -156,13 +156,15 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
        efx->net_dev->rx_cpu_rmap = NULL;
 #endif
 
-       /* Disable MSI/MSI-X interrupts */
-       efx_for_each_channel(channel, efx)
-               free_irq(channel->irq, &efx->msi_context[channel->channel]);
-
-       /* Disable legacy interrupt */
-       if (efx->legacy_irq)
+       if (EFX_INT_MODE_USE_MSI(efx)) {
+               /* Disable MSI/MSI-X interrupts */
+               efx_for_each_channel(channel, efx)
+                       free_irq(channel->irq,
+                                &efx->msi_context[channel->channel]);
+       } else {
+               /* Disable legacy interrupt */
                free_irq(efx->legacy_irq, efx);
+       }
 }
 
 /* Register dump */
index 9a9205e778964186d09094128a933179926f8006..43d2e64546ed1d924c91d26900b37d1b100dd994 100644 (file)
@@ -1633,7 +1633,8 @@ int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
 
        ivi->vf = vf_i;
        ether_addr_copy(ivi->mac, vf->addr.mac_addr);
-       ivi->tx_rate = 0;
+       ivi->max_tx_rate = 0;
+       ivi->min_tx_rate = 0;
        tci = ntohs(vf->addr.tci);
        ivi->vlan = tci & VLAN_VID_MASK;
        ivi->qos = (tci >> VLAN_PRIO_SHIFT) & 0x7;
index acbbe48a519c0c673ff8de55c31b1690c3976cb5..a86339903b9b0662a835df431f2b79d0677aa93f 100644 (file)
@@ -1877,7 +1877,7 @@ static int sis190_init_one(struct pci_dev *pdev,
 
        dev->netdev_ops = &sis190_netdev_ops;
 
-       SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
+       dev->ethtool_ops = &sis190_ethtool_ops;
        dev->watchdog_timeo = SIS190_TX_TIMEOUT;
 
        spin_lock_init(&tp->lock);
index c7a4868571f9f81f1607dc50225dec8bdaf6f017..6b33127ab352a43ed6a787af7eedde554241e1b3 100644 (file)
@@ -318,7 +318,7 @@ static int smc91c92_probe(struct pcmcia_device *link)
 
     /* The SMC91c92-specific entries in the device structure. */
     dev->netdev_ops = &smc_netdev_ops;
-    SET_ETHTOOL_OPS(dev, &ethtool_ops);
+    dev->ethtool_ops = &ethtool_ops;
     dev->watchdog_timeo = TX_TIMEOUT;
 
     smc->mii_if.dev = dev;
index d1b4dca53a9d10be97f05e2e09dd08418598bf05..bcaa41af1e628e9f8d2e84fccc7a38ea716e428c 100644 (file)
@@ -147,18 +147,19 @@ MODULE_ALIAS("platform:smc91x");
  */
 #define MII_DELAY              1
 
-#if SMC_DEBUG > 0
-#define DBG(n, dev, args...)                           \
-       do {                                            \
-               if (SMC_DEBUG >= (n))                   \
-                       netdev_dbg(dev, args);          \
+#define DBG(n, dev, fmt, ...)                                  \
+       do {                                                    \
+               if (SMC_DEBUG >= (n))                           \
+                       netdev_dbg(dev, fmt, ##__VA_ARGS__);    \
        } while (0)
 
-#define PRINTK(dev, args...)   netdev_info(dev, args)
-#else
-#define DBG(n, dev, args...)   do { } while (0)
-#define PRINTK(dev, args...)   netdev_dbg(dev, args)
-#endif
+#define PRINTK(dev, fmt, ...)                                  \
+       do {                                                    \
+               if (SMC_DEBUG > 0)                              \
+                       netdev_info(dev, fmt, ##__VA_ARGS__);   \
+               else                                            \
+                       netdev_dbg(dev, fmt, ##__VA_ARGS__);    \
+       } while (0)
 
 #if SMC_DEBUG > 3
 static void PRINT_PKT(u_char *buf, int length)
@@ -191,7 +192,7 @@ static void PRINT_PKT(u_char *buf, int length)
        pr_cont("\n");
 }
 #else
-#define PRINT_PKT(x...)  do { } while (0)
+static inline void PRINT_PKT(u_char *buf, int length) { }
 #endif
 
 
@@ -1781,7 +1782,7 @@ static int smc_findirq(struct smc_local *lp)
        int timeout = 20;
        unsigned long cookie;
 
-       DBG(2, dev, "%s: %s\n", CARDNAME, __func__);
+       DBG(2, lp->dev, "%s: %s\n", CARDNAME, __func__);
 
        cookie = probe_irq_on();
 
index c5f9cb85c8ef9c31c2ce05894faffa2a484a8093..78926662d58cb7e86dc84c9962f12ffc323c212b 100644 (file)
@@ -322,9 +322,7 @@ static int stmmac_ethtool_getsettings(struct net_device *dev,
                return -EBUSY;
        }
        cmd->transceiver = XCVR_INTERNAL;
-       spin_lock_irq(&priv->lock);
        rc = phy_ethtool_gset(phy, cmd);
-       spin_unlock_irq(&priv->lock);
        return rc;
 }
 
@@ -442,7 +440,6 @@ stmmac_get_pauseparam(struct net_device *netdev,
        if (priv->flow_ctrl & FLOW_TX)
                pause->tx_pause = 1;
 
-       spin_unlock(&priv->lock);
 }
 
 static int
@@ -457,8 +454,6 @@ stmmac_set_pauseparam(struct net_device *netdev,
        if (priv->pcs)  /* FIXME */
                return -EOPNOTSUPP;
 
-       spin_lock(&priv->lock);
-
        if (pause->rx_pause)
                new_pause |= FLOW_RX;
        if (pause->tx_pause)
@@ -473,7 +468,6 @@ stmmac_set_pauseparam(struct net_device *netdev,
        } else
                priv->hw->mac->flow_ctrl(priv->ioaddr, phy->duplex,
                                         priv->flow_ctrl, priv->pause);
-       spin_unlock(&priv->lock);
        return ret;
 }
 
@@ -784,5 +778,5 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
 
 void stmmac_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &stmmac_ethtool_ops);
+       netdev->ethtool_ops = &stmmac_ethtool_ops;
 }
index 93cf4f63f42646c60c7c4e29eb147ec07db8fd1e..110ca1c766d641cbd78b60ef5e93b1497298eb09 100644 (file)
@@ -1704,7 +1704,7 @@ static int stmmac_open(struct net_device *dev)
                if (ret) {
                        pr_err("%s: Cannot attach to PHY (error: %d)\n",
                               __func__, ret);
-                       goto phy_error;
+                       return ret;
                }
        }
 
@@ -1779,8 +1779,6 @@ init_error:
 dma_desc_error:
        if (priv->phydev)
                phy_disconnect(priv->phydev);
-phy_error:
-       clk_disable_unprepare(priv->stmmac_clk);
 
        return ret;
 }
index a468eb10782361e31fd0b0af5ce6be33d68a2578..a5b1e1b776fe3313c5c9852062ed66ea1547c285 100644 (file)
@@ -205,10 +205,13 @@ int stmmac_mdio_register(struct net_device *ndev)
        if (new_bus == NULL)
                return -ENOMEM;
 
-       if (mdio_bus_data->irqs)
+       if (mdio_bus_data->irqs) {
                irqlist = mdio_bus_data->irqs;
-       else
+       } else {
+               for (addr = 0; addr < PHY_MAX_ADDR; addr++)
+                       priv->mii_irq[addr] = PHY_POLL;
                irqlist = priv->mii_irq;
+       }
 
 #ifdef CONFIG_OF
        if (priv->device->of_node)
index df8d383acf48ed0da087bb19d144499484ac0376..b9ac20f42651bd90e33e5fcb3da38db5401117a2 100644 (file)
@@ -246,7 +246,7 @@ static inline void cas_lock_tx(struct cas *cp)
        int i;
 
        for (i = 0; i < N_TX_RINGS; i++)
-               spin_lock(&cp->tx_lock[i]);
+               spin_lock_nested(&cp->tx_lock[i], i);
 }
 
 static inline void cas_lock_all(struct cas *cp)
index 2ead87759ab411819be6cb03fa8de3cef8ea2a1f..38da73a2a886b52b3753d3b9dd31f214d345e4fd 100644 (file)
@@ -2413,7 +2413,7 @@ static void bdx_set_ethtool_ops(struct net_device *netdev)
                .get_ethtool_stats = bdx_get_ethtool_stats,
        };
 
-       SET_ETHTOOL_OPS(netdev, &bdx_ethtool_ops);
+       netdev->ethtool_ops = &bdx_ethtool_ops;
 }
 
 /**
index 148da9ae83666ce7cd2284c1cc75f18e9957f15f..aa8bf45e53dc9b0ddab4265b2e527067339a00fc 100644 (file)
@@ -29,6 +29,8 @@
 #define AM33XX_GMII_SEL_RMII2_IO_CLK_EN        BIT(7)
 #define AM33XX_GMII_SEL_RMII1_IO_CLK_EN        BIT(6)
 
+#define GMII_SEL_MODE_MASK             0x3
+
 struct cpsw_phy_sel_priv {
        struct device   *dev;
        u32 __iomem     *gmii_sel;
@@ -65,7 +67,7 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
                break;
        };
 
-       mask = 0x3 << (slave * 2) | BIT(slave + 6);
+       mask = GMII_SEL_MODE_MASK << (slave * 2) | BIT(slave + 6);
        mode <<= slave * 2;
 
        if (priv->rmii_clock_external) {
@@ -81,6 +83,55 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
        writel(reg, priv->gmii_sel);
 }
 
+static void cpsw_gmii_sel_dra7xx(struct cpsw_phy_sel_priv *priv,
+                                phy_interface_t phy_mode, int slave)
+{
+       u32 reg;
+       u32 mask;
+       u32 mode = 0;
+
+       reg = readl(priv->gmii_sel);
+
+       switch (phy_mode) {
+       case PHY_INTERFACE_MODE_RMII:
+               mode = AM33XX_GMII_SEL_MODE_RMII;
+               break;
+
+       case PHY_INTERFACE_MODE_RGMII:
+       case PHY_INTERFACE_MODE_RGMII_ID:
+       case PHY_INTERFACE_MODE_RGMII_RXID:
+       case PHY_INTERFACE_MODE_RGMII_TXID:
+               mode = AM33XX_GMII_SEL_MODE_RGMII;
+               break;
+
+       case PHY_INTERFACE_MODE_MII:
+       default:
+               mode = AM33XX_GMII_SEL_MODE_MII;
+               break;
+       };
+
+       switch (slave) {
+       case 0:
+               mask = GMII_SEL_MODE_MASK;
+               break;
+       case 1:
+               mask = GMII_SEL_MODE_MASK << 4;
+               mode <<= 4;
+               break;
+       default:
+               dev_err(priv->dev, "invalid slave number...\n");
+               return;
+       }
+
+       if (priv->rmii_clock_external)
+               dev_err(priv->dev, "RMII External clock is not supported\n");
+
+       reg &= ~mask;
+       reg |= mode;
+
+       writel(reg, priv->gmii_sel);
+}
+
 static struct platform_driver cpsw_phy_sel_driver;
 static int match(struct device *dev, void *data)
 {
@@ -112,6 +163,14 @@ static const struct of_device_id cpsw_phy_sel_id_table[] = {
                .compatible     = "ti,am3352-cpsw-phy-sel",
                .data           = &cpsw_gmii_sel_am3352,
        },
+       {
+               .compatible     = "ti,dra7xx-cpsw-phy-sel",
+               .data           = &cpsw_gmii_sel_dra7xx,
+       },
+       {
+               .compatible     = "ti,am43xx-cpsw-phy-sel",
+               .data           = &cpsw_gmii_sel_am3352,
+       },
        {}
 };
 MODULE_DEVICE_TABLE(of, cpsw_phy_sel_id_table);
@@ -132,6 +191,7 @@ static int cpsw_phy_sel_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
 
+       priv->dev = &pdev->dev;
        priv->cpsw_phy_sel = of_id->data;
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gmii-sel");
index 36aa109416c4c3a387a440795a8d3611cf3d4ded..ff380dac6629d16f4d3642f9ed6789ec2da7f2ab 100644 (file)
@@ -143,13 +143,13 @@ do {                                                              \
                u32 i;          \
                for (i = 0; i < priv->num_irqs; i++) \
                        enable_irq(priv->irqs_table[i]); \
-       } while (0);
+       } while (0)
 #define cpsw_disable_irq(priv) \
        do {                    \
                u32 i;          \
                for (i = 0; i < priv->num_irqs; i++) \
                        disable_irq_nosync(priv->irqs_table[i]); \
-       } while (0);
+       } while (0)
 
 #define cpsw_slave_index(priv)                         \
                ((priv->data.dual_emac) ? priv->emac_port :     \
@@ -248,20 +248,31 @@ struct cpsw_ss_regs {
 #define TS_131              (1<<11) /* Time Sync Dest IP Addr 131 enable */
 #define TS_130              (1<<10) /* Time Sync Dest IP Addr 130 enable */
 #define TS_129              (1<<9)  /* Time Sync Dest IP Addr 129 enable */
-#define TS_BIT8             (1<<8)  /* ts_ttl_nonzero? */
+#define TS_TTL_NONZERO      (1<<8)  /* Time Sync Time To Live Non-zero enable */
+#define TS_ANNEX_F_EN       (1<<6)  /* Time Sync Annex F enable */
 #define TS_ANNEX_D_EN       (1<<4)  /* Time Sync Annex D enable */
 #define TS_LTYPE2_EN        (1<<3)  /* Time Sync LTYPE 2 enable */
 #define TS_LTYPE1_EN        (1<<2)  /* Time Sync LTYPE 1 enable */
 #define TS_TX_EN            (1<<1)  /* Time Sync Transmit Enable */
 #define TS_RX_EN            (1<<0)  /* Time Sync Receive Enable */
 
-#define CTRL_TS_BITS \
-       (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 | TS_BIT8 | \
-        TS_ANNEX_D_EN | TS_LTYPE1_EN)
+#define CTRL_V2_TS_BITS \
+       (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
+        TS_TTL_NONZERO  | TS_ANNEX_D_EN | TS_LTYPE1_EN)
+
+#define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN)
+#define CTRL_V2_TX_TS_BITS  (CTRL_V2_TS_BITS | TS_TX_EN)
+#define CTRL_V2_RX_TS_BITS  (CTRL_V2_TS_BITS | TS_RX_EN)
+
+
+#define CTRL_V3_TS_BITS \
+       (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
+        TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
+        TS_LTYPE1_EN)
 
-#define CTRL_ALL_TS_MASK (CTRL_TS_BITS | TS_TX_EN | TS_RX_EN)
-#define CTRL_TX_TS_BITS  (CTRL_TS_BITS | TS_TX_EN)
-#define CTRL_RX_TS_BITS  (CTRL_TS_BITS | TS_RX_EN)
+#define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN)
+#define CTRL_V3_TX_TS_BITS  (CTRL_V3_TS_BITS | TS_TX_EN)
+#define CTRL_V3_RX_TS_BITS  (CTRL_V3_TS_BITS | TS_RX_EN)
 
 /* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
 #define TS_SEQ_ID_OFFSET_SHIFT   (16)    /* Time Sync Sequence ID Offset */
@@ -1376,13 +1387,27 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
                slave = &priv->slaves[priv->data.active_slave];
 
        ctrl = slave_read(slave, CPSW2_CONTROL);
-       ctrl &= ~CTRL_ALL_TS_MASK;
+       switch (priv->version) {
+       case CPSW_VERSION_2:
+               ctrl &= ~CTRL_V2_ALL_TS_MASK;
 
-       if (priv->cpts->tx_enable)
-               ctrl |= CTRL_TX_TS_BITS;
+               if (priv->cpts->tx_enable)
+                       ctrl |= CTRL_V2_TX_TS_BITS;
 
-       if (priv->cpts->rx_enable)
-               ctrl |= CTRL_RX_TS_BITS;
+               if (priv->cpts->rx_enable)
+                       ctrl |= CTRL_V2_RX_TS_BITS;
+       break;
+       case CPSW_VERSION_3:
+       default:
+               ctrl &= ~CTRL_V3_ALL_TS_MASK;
+
+               if (priv->cpts->tx_enable)
+                       ctrl |= CTRL_V3_TX_TS_BITS;
+
+               if (priv->cpts->rx_enable)
+                       ctrl |= CTRL_V3_RX_TS_BITS;
+       break;
+       }
 
        mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
 
@@ -1398,7 +1423,8 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
        struct hwtstamp_config cfg;
 
        if (priv->version != CPSW_VERSION_1 &&
-           priv->version != CPSW_VERSION_2)
+           priv->version != CPSW_VERSION_2 &&
+           priv->version != CPSW_VERSION_3)
                return -EOPNOTSUPP;
 
        if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
@@ -1443,6 +1469,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
                cpsw_hwtstamp_v1(priv);
                break;
        case CPSW_VERSION_2:
+       case CPSW_VERSION_3:
                cpsw_hwtstamp_v2(priv);
                break;
        default:
@@ -1459,7 +1486,8 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
        struct hwtstamp_config cfg;
 
        if (priv->version != CPSW_VERSION_1 &&
-           priv->version != CPSW_VERSION_2)
+           priv->version != CPSW_VERSION_2 &&
+           priv->version != CPSW_VERSION_3)
                return -EOPNOTSUPP;
 
        cfg.flags = 0;
@@ -1780,25 +1808,25 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
                return -EINVAL;
 
        if (of_property_read_u32(node, "slaves", &prop)) {
-               pr_err("Missing slaves property in the DT.\n");
+               dev_err(&pdev->dev, "Missing slaves property in the DT.\n");
                return -EINVAL;
        }
        data->slaves = prop;
 
        if (of_property_read_u32(node, "active_slave", &prop)) {
-               pr_err("Missing active_slave property in the DT.\n");
+               dev_err(&pdev->dev, "Missing active_slave property in the DT.\n");
                return -EINVAL;
        }
        data->active_slave = prop;
 
        if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
-               pr_err("Missing cpts_clock_mult property in the DT.\n");
+               dev_err(&pdev->dev, "Missing cpts_clock_mult property in the DT.\n");
                return -EINVAL;
        }
        data->cpts_clock_mult = prop;
 
        if (of_property_read_u32(node, "cpts_clock_shift", &prop)) {
-               pr_err("Missing cpts_clock_shift property in the DT.\n");
+               dev_err(&pdev->dev, "Missing cpts_clock_shift property in the DT.\n");
                return -EINVAL;
        }
        data->cpts_clock_shift = prop;
@@ -1810,31 +1838,31 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
                return -ENOMEM;
 
        if (of_property_read_u32(node, "cpdma_channels", &prop)) {
-               pr_err("Missing cpdma_channels property in the DT.\n");
+               dev_err(&pdev->dev, "Missing cpdma_channels property in the DT.\n");
                return -EINVAL;
        }
        data->channels = prop;
 
        if (of_property_read_u32(node, "ale_entries", &prop)) {
-               pr_err("Missing ale_entries property in the DT.\n");
+               dev_err(&pdev->dev, "Missing ale_entries property in the DT.\n");
                return -EINVAL;
        }
        data->ale_entries = prop;
 
        if (of_property_read_u32(node, "bd_ram_size", &prop)) {
-               pr_err("Missing bd_ram_size property in the DT.\n");
+               dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n");
                return -EINVAL;
        }
        data->bd_ram_size = prop;
 
        if (of_property_read_u32(node, "rx_descs", &prop)) {
-               pr_err("Missing rx_descs property in the DT.\n");
+               dev_err(&pdev->dev, "Missing rx_descs property in the DT.\n");
                return -EINVAL;
        }
        data->rx_descs = prop;
 
        if (of_property_read_u32(node, "mac_control", &prop)) {
-               pr_err("Missing mac_control property in the DT.\n");
+               dev_err(&pdev->dev, "Missing mac_control property in the DT.\n");
                return -EINVAL;
        }
        data->mac_control = prop;
@@ -1848,7 +1876,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
        ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
        /* We do not want to force this, as in some cases may not have child */
        if (ret)
-               pr_warn("Doesn't have any child node\n");
+               dev_warn(&pdev->dev, "Doesn't have any child node\n");
 
        for_each_child_of_node(node, slave_node) {
                struct cpsw_slave_data *slave_data = data->slave_data + i;
@@ -1865,24 +1893,19 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
 
                parp = of_get_property(slave_node, "phy_id", &lenp);
                if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
-                       pr_err("Missing slave[%d] phy_id property\n", i);
+                       dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i);
                        return -EINVAL;
                }
                mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
                phyid = be32_to_cpup(parp+1);
                mdio = of_find_device_by_node(mdio_node);
-
-               if (strncmp(mdio->name, "gpio", 4) == 0) {
-                       /* GPIO bitbang MDIO driver attached */
-                       struct mii_bus *bus = dev_get_drvdata(&mdio->dev);
-
-                       snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
-                                PHY_ID_FMT, bus->id, phyid);
-               } else {
-                       /* davinci MDIO driver attached */
-                       snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
-                                PHY_ID_FMT, mdio->name, phyid);
+               of_node_put(mdio_node);
+               if (!mdio) {
+                       pr_err("Missing mdio platform device\n");
+                       return -EINVAL;
                }
+               snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
+                        PHY_ID_FMT, mdio->name, phyid);
 
                mac_addr = of_get_mac_address(slave_node);
                if (mac_addr)
@@ -1890,18 +1913,18 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
 
                slave_data->phy_if = of_get_phy_mode(slave_node);
                if (slave_data->phy_if < 0) {
-                       pr_err("Missing or malformed slave[%d] phy-mode property\n",
-                              i);
+                       dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
+                               i);
                        return slave_data->phy_if;
                }
 
                if (data->dual_emac) {
                        if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
                                                 &prop)) {
-                               pr_err("Missing dual_emac_res_vlan in DT.\n");
+                               dev_err(&pdev->dev, "Missing dual_emac_res_vlan in DT.\n");
                                slave_data->dual_emac_res_vlan = i+1;
-                               pr_err("Using %d as Reserved VLAN for %d slave\n",
-                                      slave_data->dual_emac_res_vlan, i);
+                               dev_err(&pdev->dev, "Using %d as Reserved VLAN for %d slave\n",
+                                       slave_data->dual_emac_res_vlan, i);
                        } else {
                                slave_data->dual_emac_res_vlan = prop;
                        }
@@ -1925,7 +1948,7 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
 
        ndev = alloc_etherdev(sizeof(struct cpsw_priv));
        if (!ndev) {
-               pr_err("cpsw: error allocating net_device\n");
+               dev_err(&pdev->dev, "cpsw: error allocating net_device\n");
                return -ENOMEM;
        }
 
@@ -1941,10 +1964,10 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
        if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
                memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
                        ETH_ALEN);
-               pr_info("cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr);
+               dev_info(&pdev->dev, "cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr);
        } else {
                random_ether_addr(priv_sl2->mac_addr);
-               pr_info("cpsw: Random MACID = %pM\n", priv_sl2->mac_addr);
+               dev_info(&pdev->dev, "cpsw: Random MACID = %pM\n", priv_sl2->mac_addr);
        }
        memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
 
@@ -1975,14 +1998,14 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
        ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
 
        ndev->netdev_ops = &cpsw_netdev_ops;
-       SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
+       ndev->ethtool_ops = &cpsw_ethtool_ops;
        netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT);
 
        /* register the network device */
        SET_NETDEV_DEV(ndev, &pdev->dev);
        ret = register_netdev(ndev);
        if (ret) {
-               pr_err("cpsw: error registering net device\n");
+               dev_err(&pdev->dev, "cpsw: error registering net device\n");
                free_netdev(ndev);
                ret = -ENODEV;
        }
@@ -2004,7 +2027,7 @@ static int cpsw_probe(struct platform_device *pdev)
 
        ndev = alloc_etherdev(sizeof(struct cpsw_priv));
        if (!ndev) {
-               pr_err("error allocating net_device\n");
+               dev_err(&pdev->dev, "error allocating net_device\n");
                return -ENOMEM;
        }
 
@@ -2019,7 +2042,7 @@ static int cpsw_probe(struct platform_device *pdev)
        priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
        priv->irq_enabled = true;
        if (!priv->cpts) {
-               pr_err("error allocating cpts\n");
+               dev_err(&pdev->dev, "error allocating cpts\n");
                goto clean_ndev_ret;
        }
 
@@ -2032,7 +2055,7 @@ static int cpsw_probe(struct platform_device *pdev)
        pinctrl_pm_select_default_state(&pdev->dev);
 
        if (cpsw_probe_dt(&priv->data, pdev)) {
-               pr_err("cpsw: platform data missing\n");
+               dev_err(&pdev->dev, "cpsw: platform data missing\n");
                ret = -ENODEV;
                goto clean_runtime_disable_ret;
        }
@@ -2040,10 +2063,10 @@ static int cpsw_probe(struct platform_device *pdev)
 
        if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
                memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
-               pr_info("Detected MACID = %pM\n", priv->mac_addr);
+               dev_info(&pdev->dev, "Detected MACID = %pM\n", priv->mac_addr);
        } else {
                eth_random_addr(priv->mac_addr);
-               pr_info("Random MACID = %pM\n", priv->mac_addr);
+               dev_info(&pdev->dev, "Random MACID = %pM\n", priv->mac_addr);
        }
 
        memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
@@ -2204,7 +2227,7 @@ static int cpsw_probe(struct platform_device *pdev)
        ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
 
        ndev->netdev_ops = &cpsw_netdev_ops;
-       SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
+       ndev->ethtool_ops = &cpsw_ethtool_ops;
        netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT);
 
        /* register the network device */
index 243513980b51511a9c3054d71c8a270d63949cb1..6b56f85951e581826afc152109d0eee4b53dd08d 100644 (file)
@@ -236,13 +236,11 @@ static void cpts_overflow_check(struct work_struct *work)
        schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD);
 }
 
-#define CPTS_REF_CLOCK_NAME "cpsw_cpts_rft_clk"
-
-static void cpts_clk_init(struct cpts *cpts)
+static void cpts_clk_init(struct device *dev, struct cpts *cpts)
 {
-       cpts->refclk = clk_get(NULL, CPTS_REF_CLOCK_NAME);
+       cpts->refclk = devm_clk_get(dev, "cpts");
        if (IS_ERR(cpts->refclk)) {
-               pr_err("Failed to clk_get %s\n", CPTS_REF_CLOCK_NAME);
+               dev_err(dev, "Failed to get cpts refclk\n");
                cpts->refclk = NULL;
                return;
        }
@@ -252,7 +250,6 @@ static void cpts_clk_init(struct cpts *cpts)
 static void cpts_clk_release(struct cpts *cpts)
 {
        clk_disable(cpts->refclk);
-       clk_put(cpts->refclk);
 }
 
 static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
@@ -390,7 +387,7 @@ int cpts_register(struct device *dev, struct cpts *cpts,
        for (i = 0; i < CPTS_MAX_EVENTS; i++)
                list_add(&cpts->pool_data[i].list, &cpts->pool);
 
-       cpts_clk_init(cpts);
+       cpts_clk_init(dev, cpts);
        cpts_write32(cpts, CPTS_EN, control);
        cpts_write32(cpts, TS_PEND_EN, int_enable);
 
index 88ef27067bf24a8b2569f533b63ac223d52280fe..539dbdecd310e870ea98ae05bcd989b4aebc5dfb 100644 (file)
@@ -158,9 +158,9 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
        int bitmap_size;
        struct cpdma_desc_pool *pool;
 
-       pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+       pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL);
        if (!pool)
-               return NULL;
+               goto fail;
 
        spin_lock_init(&pool->lock);
 
@@ -170,7 +170,7 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
        pool->num_desc  = size / pool->desc_size;
 
        bitmap_size  = (pool->num_desc / BITS_PER_LONG) * sizeof(long);
-       pool->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+       pool->bitmap = devm_kzalloc(dev, bitmap_size, GFP_KERNEL);
        if (!pool->bitmap)
                goto fail;
 
@@ -187,10 +187,7 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
 
        if (pool->iomap)
                return pool;
-
 fail:
-       kfree(pool->bitmap);
-       kfree(pool);
        return NULL;
 }
 
@@ -203,7 +200,6 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
 
        spin_lock_irqsave(&pool->lock, flags);
        WARN_ON(pool->used_desc);
-       kfree(pool->bitmap);
        if (pool->cpumap) {
                dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
                                  pool->phys);
@@ -211,7 +207,6 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
                iounmap(pool->iomap);
        }
        spin_unlock_irqrestore(&pool->lock, flags);
-       kfree(pool);
 }
 
 static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
@@ -276,7 +271,7 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
 {
        struct cpdma_ctlr *ctlr;
 
-       ctlr = kzalloc(sizeof(*ctlr), GFP_KERNEL);
+       ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL);
        if (!ctlr)
                return NULL;
 
@@ -468,7 +463,6 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
 
        cpdma_desc_pool_destroy(ctlr->pool);
        spin_unlock_irqrestore(&ctlr->lock, flags);
-       kfree(ctlr);
        return ret;
 }
 EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
@@ -507,21 +501,22 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
                                     cpdma_handler_fn handler)
 {
        struct cpdma_chan *chan;
-       int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
+       int offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
        unsigned long flags;
 
        if (__chan_linear(chan_num) >= ctlr->num_chan)
                return NULL;
 
-       ret = -ENOMEM;
-       chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+       chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
        if (!chan)
-               goto err_chan_alloc;
+               return ERR_PTR(-ENOMEM);
 
        spin_lock_irqsave(&ctlr->lock, flags);
-       ret = -EBUSY;
-       if (ctlr->channels[chan_num])
-               goto err_chan_busy;
+       if (ctlr->channels[chan_num]) {
+               spin_unlock_irqrestore(&ctlr->lock, flags);
+               devm_kfree(ctlr->dev, chan);
+               return ERR_PTR(-EBUSY);
+       }
 
        chan->ctlr      = ctlr;
        chan->state     = CPDMA_STATE_IDLE;
@@ -551,12 +546,6 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
        ctlr->channels[chan_num] = chan;
        spin_unlock_irqrestore(&ctlr->lock, flags);
        return chan;
-
-err_chan_busy:
-       spin_unlock_irqrestore(&ctlr->lock, flags);
-       kfree(chan);
-err_chan_alloc:
-       return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(cpdma_chan_create);
 
index 8f0e69ce07ca3e03cfbf4cf1b22c92c9af27beeb..f32d730f55cc9f5c512d0515fb0e72f4b9eb89a4 100644 (file)
@@ -1865,7 +1865,6 @@ static int davinci_emac_probe(struct platform_device *pdev)
        struct emac_priv *priv;
        unsigned long hw_ram_addr;
        struct emac_platform_data *pdata;
-       struct device *emac_dev;
        struct cpdma_params dma_params;
        struct clk *emac_clk;
        unsigned long emac_bus_frequency;
@@ -1911,7 +1910,6 @@ static int davinci_emac_probe(struct platform_device *pdev)
        priv->coal_intvl = 0;
        priv->bus_freq_mhz = (u32)(emac_bus_frequency / 1000000);
 
-       emac_dev = &ndev->dev;
        /* Get EMAC platform data */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
@@ -1930,7 +1928,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
                hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset;
 
        memset(&dma_params, 0, sizeof(dma_params));
-       dma_params.dev                  = emac_dev;
+       dma_params.dev                  = &pdev->dev;
        dma_params.dmaregs              = priv->emac_base;
        dma_params.rxthresh             = priv->emac_base + 0x120;
        dma_params.rxfree               = priv->emac_base + 0x140;
@@ -1980,7 +1978,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
        }
 
        ndev->netdev_ops = &emac_netdev_ops;
-       SET_ETHTOOL_OPS(ndev, &ethtool_ops);
+       ndev->ethtool_ops = &ethtool_ops;
        netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT);
 
        /* register the network device */
@@ -1994,7 +1992,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
 
 
        if (netif_msg_probe(priv)) {
-               dev_notice(emac_dev, "DaVinci EMAC Probe found device "\
+               dev_notice(&pdev->dev, "DaVinci EMAC Probe found device "
                           "(regs: %p, irq: %d)\n",
                           (void *)priv->emac_base_phys, ndev->irq);
        }
index 0cca9dec5d8277542a4439aed4bb8953f3943b29..735dc53d4b0163be05eec83c83a4fb8bb4612497 100644 (file)
@@ -303,7 +303,7 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
                return -EINVAL;
 
        if (of_property_read_u32(node, "bus_freq", &prop)) {
-               pr_err("Missing bus_freq property in the DT.\n");
+               dev_err(&pdev->dev, "Missing bus_freq property in the DT.\n");
                return -EINVAL;
        }
        data->bus_freq = prop;
@@ -321,15 +321,14 @@ static int davinci_mdio_probe(struct platform_device *pdev)
        struct phy_device *phy;
        int ret, addr;
 
-       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
-       data->bus = mdiobus_alloc();
+       data->bus = devm_mdiobus_alloc(dev);
        if (!data->bus) {
                dev_err(dev, "failed to alloc mii bus\n");
-               ret = -ENOMEM;
-               goto bail_out;
+               return -ENOMEM;
        }
 
        if (dev->of_node) {
@@ -349,12 +348,9 @@ static int davinci_mdio_probe(struct platform_device *pdev)
        data->bus->parent       = dev;
        data->bus->priv         = data;
 
-       /* Select default pin state */
-       pinctrl_pm_select_default_state(&pdev->dev);
-
        pm_runtime_enable(&pdev->dev);
        pm_runtime_get_sync(&pdev->dev);
-       data->clk = clk_get(&pdev->dev, "fck");
+       data->clk = devm_clk_get(dev, "fck");
        if (IS_ERR(data->clk)) {
                dev_err(dev, "failed to get device clock\n");
                ret = PTR_ERR(data->clk);
@@ -367,24 +363,9 @@ static int davinci_mdio_probe(struct platform_device *pdev)
        spin_lock_init(&data->lock);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(dev, "could not find register map resource\n");
-               ret = -ENOENT;
-               goto bail_out;
-       }
-
-       res = devm_request_mem_region(dev, res->start, resource_size(res),
-                                           dev_name(dev));
-       if (!res) {
-               dev_err(dev, "could not allocate register map resource\n");
-               ret = -ENXIO;
-               goto bail_out;
-       }
-
-       data->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
-       if (!data->regs) {
-               dev_err(dev, "could not map mdio registers\n");
-               ret = -ENOMEM;
+       data->regs = devm_ioremap_resource(dev, res);
+       if (IS_ERR(data->regs)) {
+               ret = PTR_ERR(data->regs);
                goto bail_out;
        }
 
@@ -406,16 +387,9 @@ static int davinci_mdio_probe(struct platform_device *pdev)
        return 0;
 
 bail_out:
-       if (data->bus)
-               mdiobus_free(data->bus);
-
-       if (data->clk)
-               clk_put(data->clk);
        pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
-       kfree(data);
-
        return ret;
 }
 
@@ -423,18 +397,12 @@ static int davinci_mdio_remove(struct platform_device *pdev)
 {
        struct davinci_mdio_data *data = platform_get_drvdata(pdev);
 
-       if (data->bus) {
+       if (data->bus)
                mdiobus_unregister(data->bus);
-               mdiobus_free(data->bus);
-       }
 
-       if (data->clk)
-               clk_put(data->clk);
        pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
-       kfree(data);
-
        return 0;
 }
 
index 7e1c91d41a87ff2a4e065718d4caaf84c7db8020..5eca9bb185a0a9fede62bc406aa4abfadf08d2c7 100644 (file)
@@ -2192,7 +2192,6 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
 {
        int ret;
        int i;
-       int nz_addr = 0;
        struct net_device *dev;
        struct tile_net_priv *priv;
 
@@ -2212,7 +2211,6 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
 
        /* Initialize "priv". */
        priv = netdev_priv(dev);
-       memset(priv, 0, sizeof(*priv));
        priv->dev = dev;
        priv->channel = -1;
        priv->loopify_channel = -1;
@@ -2223,15 +2221,10 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
         * be done before the device is opened.  If the MAC is all zeroes,
         * we use a random address, since we're probably on the simulator.
         */
-       for (i = 0; i < 6; i++)
-               nz_addr |= mac[i];
-
-       if (nz_addr) {
-               memcpy(dev->dev_addr, mac, ETH_ALEN);
-               dev->addr_len = 6;
-       } else {
+       if (!is_zero_ether_addr(mac))
+               ether_addr_copy(dev->dev_addr, mac);
+       else
                eth_hw_addr_random(dev);
-       }
 
        /* Register the network device. */
        ret = register_netdev(dev);
index 8a049a2b44742aabe24221b12a6bcb2a4caf357f..f66ddaee0c877f1620ad123fd9ef8402703aa1a6 100644 (file)
@@ -19,7 +19,7 @@ if NET_VENDOR_VIA
 
 config VIA_RHINE
        tristate "VIA Rhine support"
-       depends on PCI
+       depends on (PCI || USE_OF)
        select CRC32
        select MII
        ---help---
index f61dc2b72bb2f43780ace58a503bd2e3b89f61a9..981be0154be393931d958f8310bd2e0572184d04 100644 (file)
@@ -94,6 +94,10 @@ static const int multicast_filter_limit = 32;
 #include <linux/ioport.h>
 #include <linux/interrupt.h>
 #include <linux/pci.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
@@ -116,13 +120,6 @@ static const int multicast_filter_limit = 32;
 static const char version[] =
        "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
 
-/* This driver was written to use PCI memory space. Some early versions
-   of the Rhine may only work correctly with I/O space accesses. */
-#ifdef CONFIG_VIA_RHINE_MMIO
-#define USE_MMIO
-#else
-#endif
-
 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
 MODULE_LICENSE("GPL");
@@ -260,6 +257,12 @@ enum rhine_quirks {
        rq6patterns     = 0x0040,       /* 6 instead of 4 patterns for WOL */
        rqStatusWBRace  = 0x0080,       /* Tx Status Writeback Error possible */
        rqRhineI        = 0x0100,       /* See comment below */
+       rqIntPHY        = 0x0200,       /* Integrated PHY */
+       rqMgmt          = 0x0400,       /* Management adapter */
+       rqNeedEnMMIO    = 0x0800,       /* Whether the core needs to be
+                                        * switched from PIO mode to MMIO
+                                        * (only applies to PCI)
+                                        */
 };
 /*
  * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
@@ -279,6 +282,15 @@ static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
 };
 MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
 
+/* OpenFirmware identifiers for platform-bus devices
+ * The .data field is currently only used to store quirks
+ */
+static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns;
+static struct of_device_id rhine_of_tbl[] = {
+       { .compatible = "via,vt8500-rhine", .data = &vt8500_quirks },
+       { }     /* terminate list */
+};
+MODULE_DEVICE_TABLE(of, rhine_of_tbl);
 
 /* Offsets to the device registers. */
 enum register_offsets {
@@ -338,13 +350,11 @@ enum bcr1_bits {
        BCR1_MED1=0x80,         /* for VT6102 */
 };
 
-#ifdef USE_MMIO
 /* Registers we check that mmio and reg are the same. */
 static const int mmio_verify_registers[] = {
        RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
        0
 };
-#endif
 
 /* Bits in the interrupt status/mask registers. */
 enum intr_status_bits {
@@ -446,7 +456,7 @@ struct rhine_private {
        unsigned char *tx_bufs;
        dma_addr_t tx_bufs_dma;
 
-       struct pci_dev *pdev;
+       int irq;
        long pioaddr;
        struct net_device *dev;
        struct napi_struct napi;
@@ -649,20 +659,46 @@ static void rhine_chip_reset(struct net_device *dev)
                   "failed" : "succeeded");
 }
 
-#ifdef USE_MMIO
 static void enable_mmio(long pioaddr, u32 quirks)
 {
        int n;
-       if (quirks & rqRhineI) {
-               /* More recent docs say that this bit is reserved ... */
-               n = inb(pioaddr + ConfigA) | 0x20;
-               outb(n, pioaddr + ConfigA);
-       } else {
-               n = inb(pioaddr + ConfigD) | 0x80;
-               outb(n, pioaddr + ConfigD);
+
+       if (quirks & rqNeedEnMMIO) {
+               if (quirks & rqRhineI) {
+                       /* More recent docs say that this bit is reserved */
+                       n = inb(pioaddr + ConfigA) | 0x20;
+                       outb(n, pioaddr + ConfigA);
+               } else {
+                       n = inb(pioaddr + ConfigD) | 0x80;
+                       outb(n, pioaddr + ConfigD);
+               }
        }
 }
-#endif
+
+static inline int verify_mmio(struct device *hwdev,
+                             long pioaddr,
+                             void __iomem *ioaddr,
+                             u32 quirks)
+{
+       if (quirks & rqNeedEnMMIO) {
+               int i = 0;
+
+               /* Check that selected MMIO registers match the PIO ones */
+               while (mmio_verify_registers[i]) {
+                       int reg = mmio_verify_registers[i++];
+                       unsigned char a = inb(pioaddr+reg);
+                       unsigned char b = readb(ioaddr+reg);
+
+                       if (a != b) {
+                               dev_err(hwdev,
+                                       "MMIO do not match PIO [%02x] (%02x != %02x)\n",
+                                       reg, a, b);
+                               return -EIO;
+                       }
+               }
+       }
+       return 0;
+}
 
 /*
  * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
@@ -682,14 +718,12 @@ static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
        if (i > 512)
                pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
 
-#ifdef USE_MMIO
        /*
         * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
         * MMIO. If reloading EEPROM was done first this could be avoided, but
         * it is not known if that still works with the "win98-reboot" problem.
         */
        enable_mmio(pioaddr, rp->quirks);
-#endif
 
        /* Turn off EEPROM-controlled wake-up (magic packet) */
        if (rp->quirks & rqWOL)
@@ -701,7 +735,7 @@ static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
 static void rhine_poll(struct net_device *dev)
 {
        struct rhine_private *rp = netdev_priv(dev);
-       const int irq = rp->pdev->irq;
+       const int irq = rp->irq;
 
        disable_irq(irq);
        rhine_interrupt(irq, dev);
@@ -846,7 +880,8 @@ static void rhine_hw_init(struct net_device *dev, long pioaddr)
                msleep(5);
 
        /* Reload EEPROM controlled bytes cleared by soft reset */
-       rhine_reload_eeprom(pioaddr, dev);
+       if (dev_is_pci(dev->dev.parent))
+               rhine_reload_eeprom(pioaddr, dev);
 }
 
 static const struct net_device_ops rhine_netdev_ops = {
@@ -867,125 +902,37 @@ static const struct net_device_ops rhine_netdev_ops = {
 #endif
 };
 
-static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int rhine_init_one_common(struct device *hwdev, u32 quirks,
+                                long pioaddr, void __iomem *ioaddr, int irq)
 {
        struct net_device *dev;
        struct rhine_private *rp;
-       int i, rc;
-       u32 quirks;
-       long pioaddr;
-       long memaddr;
-       void __iomem *ioaddr;
-       int io_size, phy_id;
+       int i, rc, phy_id;
        const char *name;
-#ifdef USE_MMIO
-       int bar = 1;
-#else
-       int bar = 0;
-#endif
-
-/* when built into the kernel, we only print version if device is found */
-#ifndef MODULE
-       pr_info_once("%s\n", version);
-#endif
-
-       io_size = 256;
-       phy_id = 0;
-       quirks = 0;
-       name = "Rhine";
-       if (pdev->revision < VTunknown0) {
-               quirks = rqRhineI;
-               io_size = 128;
-       }
-       else if (pdev->revision >= VT6102) {
-               quirks = rqWOL | rqForceReset;
-               if (pdev->revision < VT6105) {
-                       name = "Rhine II";
-                       quirks |= rqStatusWBRace;       /* Rhine-II exclusive */
-               }
-               else {
-                       phy_id = 1;     /* Integrated PHY, phy_id fixed to 1 */
-                       if (pdev->revision >= VT6105_B0)
-                               quirks |= rq6patterns;
-                       if (pdev->revision < VT6105M)
-                               name = "Rhine III";
-                       else
-                               name = "Rhine III (Management Adapter)";
-               }
-       }
-
-       rc = pci_enable_device(pdev);
-       if (rc)
-               goto err_out;
 
        /* this should always be supported */
-       rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+       rc = dma_set_mask(hwdev, DMA_BIT_MASK(32));
        if (rc) {
-               dev_err(&pdev->dev,
-                       "32-bit PCI DMA addresses not supported by the card!?\n");
-               goto err_out_pci_disable;
-       }
-
-       /* sanity check */
-       if ((pci_resource_len(pdev, 0) < io_size) ||
-           (pci_resource_len(pdev, 1) < io_size)) {
-               rc = -EIO;
-               dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
-               goto err_out_pci_disable;
+               dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n");
+               goto err_out;
        }
 
-       pioaddr = pci_resource_start(pdev, 0);
-       memaddr = pci_resource_start(pdev, 1);
-
-       pci_set_master(pdev);
-
        dev = alloc_etherdev(sizeof(struct rhine_private));
        if (!dev) {
                rc = -ENOMEM;
-               goto err_out_pci_disable;
+               goto err_out;
        }
-       SET_NETDEV_DEV(dev, &pdev->dev);
+       SET_NETDEV_DEV(dev, hwdev);
 
        rp = netdev_priv(dev);
        rp->dev = dev;
        rp->quirks = quirks;
        rp->pioaddr = pioaddr;
-       rp->pdev = pdev;
+       rp->base = ioaddr;
+       rp->irq = irq;
        rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
 
-       rc = pci_request_regions(pdev, DRV_NAME);
-       if (rc)
-               goto err_out_free_netdev;
-
-       ioaddr = pci_iomap(pdev, bar, io_size);
-       if (!ioaddr) {
-               rc = -EIO;
-               dev_err(&pdev->dev,
-                       "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
-                       pci_name(pdev), io_size, memaddr);
-               goto err_out_free_res;
-       }
-
-#ifdef USE_MMIO
-       enable_mmio(pioaddr, quirks);
-
-       /* Check that selected MMIO registers match the PIO ones */
-       i = 0;
-       while (mmio_verify_registers[i]) {
-               int reg = mmio_verify_registers[i++];
-               unsigned char a = inb(pioaddr+reg);
-               unsigned char b = readb(ioaddr+reg);
-               if (a != b) {
-                       rc = -EIO;
-                       dev_err(&pdev->dev,
-                               "MMIO do not match PIO [%02x] (%02x != %02x)\n",
-                               reg, a, b);
-                       goto err_out_unmap;
-               }
-       }
-#endif /* USE_MMIO */
-
-       rp->base = ioaddr;
+       phy_id = rp->quirks & rqIntPHY ? 1 : 0;
 
        u64_stats_init(&rp->tx_stats.syncp);
        u64_stats_init(&rp->rx_stats.syncp);
@@ -1030,7 +977,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (rp->quirks & rqRhineI)
                dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
 
-       if (pdev->revision >= VT6105M)
+       if (rp->quirks & rqMgmt)
                dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
                                 NETIF_F_HW_VLAN_CTAG_RX |
                                 NETIF_F_HW_VLAN_CTAG_FILTER;
@@ -1038,18 +985,21 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* dev->name not defined before register_netdev()! */
        rc = register_netdev(dev);
        if (rc)
-               goto err_out_unmap;
+               goto err_out_free_netdev;
+
+       if (rp->quirks & rqRhineI)
+               name = "Rhine";
+       else if (rp->quirks & rqStatusWBRace)
+               name = "Rhine II";
+       else if (rp->quirks & rqMgmt)
+               name = "Rhine III (Management Adapter)";
+       else
+               name = "Rhine III";
 
        netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
-                   name,
-#ifdef USE_MMIO
-                   memaddr,
-#else
-                   (long)ioaddr,
-#endif
-                   dev->dev_addr, pdev->irq);
+                   name, (long)ioaddr, dev->dev_addr, rp->irq);
 
-       pci_set_drvdata(pdev, dev);
+       dev_set_drvdata(hwdev, dev);
 
        {
                u16 mii_cmd;
@@ -1078,41 +1028,158 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        return 0;
 
+err_out_free_netdev:
+       free_netdev(dev);
+err_out:
+       return rc;
+}
+
+static int rhine_init_one_pci(struct pci_dev *pdev,
+                             const struct pci_device_id *ent)
+{
+       struct device *hwdev = &pdev->dev;
+       int rc;
+       long pioaddr, memaddr;
+       void __iomem *ioaddr;
+       int io_size = pdev->revision < VTunknown0 ? 128 : 256;
+
+/* This driver was written to use PCI memory space. Some early versions
+ * of the Rhine may only work correctly with I/O space accesses.
+ * TODO: determine for which revisions this is true and assign the flag
+ *      in code as opposed to this Kconfig option (???)
+ */
+#ifdef CONFIG_VIA_RHINE_MMIO
+       u32 quirks = rqNeedEnMMIO;
+#else
+       u32 quirks = 0;
+#endif
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+       pr_info_once("%s\n", version);
+#endif
+
+       rc = pci_enable_device(pdev);
+       if (rc)
+               goto err_out;
+
+       if (pdev->revision < VTunknown0) {
+               quirks |= rqRhineI;
+       } else if (pdev->revision >= VT6102) {
+               quirks |= rqWOL | rqForceReset;
+               if (pdev->revision < VT6105) {
+                       quirks |= rqStatusWBRace;
+               } else {
+                       quirks |= rqIntPHY;
+                       if (pdev->revision >= VT6105_B0)
+                               quirks |= rq6patterns;
+                       if (pdev->revision >= VT6105M)
+                               quirks |= rqMgmt;
+               }
+       }
+
+       /* sanity check */
+       if ((pci_resource_len(pdev, 0) < io_size) ||
+           (pci_resource_len(pdev, 1) < io_size)) {
+               rc = -EIO;
+               dev_err(hwdev, "Insufficient PCI resources, aborting\n");
+               goto err_out_pci_disable;
+       }
+
+       pioaddr = pci_resource_start(pdev, 0);
+       memaddr = pci_resource_start(pdev, 1);
+
+       pci_set_master(pdev);
+
+       rc = pci_request_regions(pdev, DRV_NAME);
+       if (rc)
+               goto err_out_pci_disable;
+
+       ioaddr = pci_iomap(pdev, (quirks & rqNeedEnMMIO ? 1 : 0), io_size);
+       if (!ioaddr) {
+               rc = -EIO;
+               dev_err(hwdev,
+                       "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
+                       dev_name(hwdev), io_size, memaddr);
+               goto err_out_free_res;
+       }
+
+       enable_mmio(pioaddr, quirks);
+
+       rc = verify_mmio(hwdev, pioaddr, ioaddr, quirks);
+       if (rc)
+               goto err_out_unmap;
+
+       rc = rhine_init_one_common(&pdev->dev, quirks,
+                                  pioaddr, ioaddr, pdev->irq);
+       if (!rc)
+               return 0;
+
 err_out_unmap:
        pci_iounmap(pdev, ioaddr);
 err_out_free_res:
        pci_release_regions(pdev);
-err_out_free_netdev:
-       free_netdev(dev);
 err_out_pci_disable:
        pci_disable_device(pdev);
 err_out:
        return rc;
 }
 
+static int rhine_init_one_platform(struct platform_device *pdev)
+{
+       const struct of_device_id *match;
+       const u32 *quirks;
+       int irq;
+       struct resource *res;
+       void __iomem *ioaddr;
+
+       match = of_match_device(rhine_of_tbl, &pdev->dev);
+       if (!match)
+               return -EINVAL;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       ioaddr = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(ioaddr))
+               return PTR_ERR(ioaddr);
+
+       irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+       if (!irq)
+               return -EINVAL;
+
+       quirks = match->data;
+       if (!quirks)
+               return -EINVAL;
+
+       return rhine_init_one_common(&pdev->dev, *quirks,
+                                    (long)ioaddr, ioaddr, irq);
+}
+
 static int alloc_ring(struct net_device* dev)
 {
        struct rhine_private *rp = netdev_priv(dev);
+       struct device *hwdev = dev->dev.parent;
        void *ring;
        dma_addr_t ring_dma;
 
-       ring = pci_alloc_consistent(rp->pdev,
-                                   RX_RING_SIZE * sizeof(struct rx_desc) +
-                                   TX_RING_SIZE * sizeof(struct tx_desc),
-                                   &ring_dma);
+       ring = dma_alloc_coherent(hwdev,
+                                 RX_RING_SIZE * sizeof(struct rx_desc) +
+                                 TX_RING_SIZE * sizeof(struct tx_desc),
+                                 &ring_dma,
+                                 GFP_ATOMIC);
        if (!ring) {
                netdev_err(dev, "Could not allocate DMA memory\n");
                return -ENOMEM;
        }
        if (rp->quirks & rqRhineI) {
-               rp->tx_bufs = pci_alloc_consistent(rp->pdev,
-                                                  PKT_BUF_SZ * TX_RING_SIZE,
-                                                  &rp->tx_bufs_dma);
+               rp->tx_bufs = dma_alloc_coherent(hwdev,
+                                                PKT_BUF_SZ * TX_RING_SIZE,
+                                                &rp->tx_bufs_dma,
+                                                GFP_ATOMIC);
                if (rp->tx_bufs == NULL) {
-                       pci_free_consistent(rp->pdev,
-                                   RX_RING_SIZE * sizeof(struct rx_desc) +
-                                   TX_RING_SIZE * sizeof(struct tx_desc),
-                                   ring, ring_dma);
+                       dma_free_coherent(hwdev,
+                                         RX_RING_SIZE * sizeof(struct rx_desc) +
+                                         TX_RING_SIZE * sizeof(struct tx_desc),
+                                         ring, ring_dma);
                        return -ENOMEM;
                }
        }
@@ -1128,16 +1195,17 @@ static int alloc_ring(struct net_device* dev)
 static void free_ring(struct net_device* dev)
 {
        struct rhine_private *rp = netdev_priv(dev);
+       struct device *hwdev = dev->dev.parent;
 
-       pci_free_consistent(rp->pdev,
-                           RX_RING_SIZE * sizeof(struct rx_desc) +
-                           TX_RING_SIZE * sizeof(struct tx_desc),
-                           rp->rx_ring, rp->rx_ring_dma);
+       dma_free_coherent(hwdev,
+                         RX_RING_SIZE * sizeof(struct rx_desc) +
+                         TX_RING_SIZE * sizeof(struct tx_desc),
+                         rp->rx_ring, rp->rx_ring_dma);
        rp->tx_ring = NULL;
 
        if (rp->tx_bufs)
-               pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
-                                   rp->tx_bufs, rp->tx_bufs_dma);
+               dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE,
+                                 rp->tx_bufs, rp->tx_bufs_dma);
 
        rp->tx_bufs = NULL;
 
@@ -1146,6 +1214,7 @@ static void free_ring(struct net_device* dev)
 static void alloc_rbufs(struct net_device *dev)
 {
        struct rhine_private *rp = netdev_priv(dev);
+       struct device *hwdev = dev->dev.parent;
        dma_addr_t next;
        int i;
 
@@ -1174,9 +1243,9 @@ static void alloc_rbufs(struct net_device *dev)
                        break;
 
                rp->rx_skbuff_dma[i] =
-                       pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
-                                      PCI_DMA_FROMDEVICE);
-               if (dma_mapping_error(&rp->pdev->dev, rp->rx_skbuff_dma[i])) {
+                       dma_map_single(hwdev, skb->data, rp->rx_buf_sz,
+                                      DMA_FROM_DEVICE);
+               if (dma_mapping_error(hwdev, rp->rx_skbuff_dma[i])) {
                        rp->rx_skbuff_dma[i] = 0;
                        dev_kfree_skb(skb);
                        break;
@@ -1190,6 +1259,7 @@ static void alloc_rbufs(struct net_device *dev)
 static void free_rbufs(struct net_device* dev)
 {
        struct rhine_private *rp = netdev_priv(dev);
+       struct device *hwdev = dev->dev.parent;
        int i;
 
        /* Free all the skbuffs in the Rx queue. */
@@ -1197,9 +1267,9 @@ static void free_rbufs(struct net_device* dev)
                rp->rx_ring[i].rx_status = 0;
                rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
                if (rp->rx_skbuff[i]) {
-                       pci_unmap_single(rp->pdev,
+                       dma_unmap_single(hwdev,
                                         rp->rx_skbuff_dma[i],
-                                        rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+                                        rp->rx_buf_sz, DMA_FROM_DEVICE);
                        dev_kfree_skb(rp->rx_skbuff[i]);
                }
                rp->rx_skbuff[i] = NULL;
@@ -1230,6 +1300,7 @@ static void alloc_tbufs(struct net_device* dev)
 static void free_tbufs(struct net_device* dev)
 {
        struct rhine_private *rp = netdev_priv(dev);
+       struct device *hwdev = dev->dev.parent;
        int i;
 
        for (i = 0; i < TX_RING_SIZE; i++) {
@@ -1238,10 +1309,10 @@ static void free_tbufs(struct net_device* dev)
                rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
                if (rp->tx_skbuff[i]) {
                        if (rp->tx_skbuff_dma[i]) {
-                               pci_unmap_single(rp->pdev,
+                               dma_unmap_single(hwdev,
                                                 rp->tx_skbuff_dma[i],
                                                 rp->tx_skbuff[i]->len,
-                                                PCI_DMA_TODEVICE);
+                                                DMA_TO_DEVICE);
                        }
                        dev_kfree_skb(rp->tx_skbuff[i]);
                }
@@ -1469,7 +1540,7 @@ static void init_registers(struct net_device *dev)
 
        rhine_set_rx_mode(dev);
 
-       if (rp->pdev->revision >= VT6105M)
+       if (rp->quirks & rqMgmt)
                rhine_init_cam_filter(dev);
 
        napi_enable(&rp->napi);
@@ -1581,16 +1652,15 @@ static int rhine_open(struct net_device *dev)
        void __iomem *ioaddr = rp->base;
        int rc;
 
-       rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name,
-                       dev);
+       rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
        if (rc)
                return rc;
 
-       netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->pdev->irq);
+       netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
 
        rc = alloc_ring(dev);
        if (rc) {
-               free_irq(rp->pdev->irq, dev);
+               free_irq(rp->irq, dev);
                return rc;
        }
        alloc_rbufs(dev);
@@ -1659,6 +1729,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
                                  struct net_device *dev)
 {
        struct rhine_private *rp = netdev_priv(dev);
+       struct device *hwdev = dev->dev.parent;
        void __iomem *ioaddr = rp->base;
        unsigned entry;
 
@@ -1695,9 +1766,9 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
                                                       rp->tx_bufs));
        } else {
                rp->tx_skbuff_dma[entry] =
-                       pci_map_single(rp->pdev, skb->data, skb->len,
-                                      PCI_DMA_TODEVICE);
-               if (dma_mapping_error(&rp->pdev->dev, rp->tx_skbuff_dma[entry])) {
+                       dma_map_single(hwdev, skb->data, skb->len,
+                                      DMA_TO_DEVICE);
+               if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
                        dev_kfree_skb_any(skb);
                        rp->tx_skbuff_dma[entry] = 0;
                        dev->stats.tx_dropped++;
@@ -1788,6 +1859,7 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
 static void rhine_tx(struct net_device *dev)
 {
        struct rhine_private *rp = netdev_priv(dev);
+       struct device *hwdev = dev->dev.parent;
        int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
 
        /* find and cleanup dirty tx descriptors */
@@ -1831,10 +1903,10 @@ static void rhine_tx(struct net_device *dev)
                }
                /* Free the original skb. */
                if (rp->tx_skbuff_dma[entry]) {
-                       pci_unmap_single(rp->pdev,
+                       dma_unmap_single(hwdev,
                                         rp->tx_skbuff_dma[entry],
                                         rp->tx_skbuff[entry]->len,
-                                        PCI_DMA_TODEVICE);
+                                        DMA_TO_DEVICE);
                }
                dev_consume_skb_any(rp->tx_skbuff[entry]);
                rp->tx_skbuff[entry] = NULL;
@@ -1863,6 +1935,7 @@ static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
 static int rhine_rx(struct net_device *dev, int limit)
 {
        struct rhine_private *rp = netdev_priv(dev);
+       struct device *hwdev = dev->dev.parent;
        int count;
        int entry = rp->cur_rx % RX_RING_SIZE;
 
@@ -1924,19 +1997,19 @@ static int rhine_rx(struct net_device *dev, int limit)
                        if (pkt_len < rx_copybreak)
                                skb = netdev_alloc_skb_ip_align(dev, pkt_len);
                        if (skb) {
-                               pci_dma_sync_single_for_cpu(rp->pdev,
-                                                           rp->rx_skbuff_dma[entry],
-                                                           rp->rx_buf_sz,
-                                                           PCI_DMA_FROMDEVICE);
+                               dma_sync_single_for_cpu(hwdev,
+                                                       rp->rx_skbuff_dma[entry],
+                                                       rp->rx_buf_sz,
+                                                       DMA_FROM_DEVICE);
 
                                skb_copy_to_linear_data(skb,
                                                 rp->rx_skbuff[entry]->data,
                                                 pkt_len);
                                skb_put(skb, pkt_len);
-                               pci_dma_sync_single_for_device(rp->pdev,
-                                                              rp->rx_skbuff_dma[entry],
-                                                              rp->rx_buf_sz,
-                                                              PCI_DMA_FROMDEVICE);
+                               dma_sync_single_for_device(hwdev,
+                                                          rp->rx_skbuff_dma[entry],
+                                                          rp->rx_buf_sz,
+                                                          DMA_FROM_DEVICE);
                        } else {
                                skb = rp->rx_skbuff[entry];
                                if (skb == NULL) {
@@ -1945,10 +2018,10 @@ static int rhine_rx(struct net_device *dev, int limit)
                                }
                                rp->rx_skbuff[entry] = NULL;
                                skb_put(skb, pkt_len);
-                               pci_unmap_single(rp->pdev,
+                               dma_unmap_single(hwdev,
                                                 rp->rx_skbuff_dma[entry],
                                                 rp->rx_buf_sz,
-                                                PCI_DMA_FROMDEVICE);
+                                                DMA_FROM_DEVICE);
                        }
 
                        if (unlikely(desc_length & DescTag))
@@ -1979,10 +2052,11 @@ static int rhine_rx(struct net_device *dev, int limit)
                        if (skb == NULL)
                                break;  /* Better luck next round. */
                        rp->rx_skbuff_dma[entry] =
-                               pci_map_single(rp->pdev, skb->data,
+                               dma_map_single(hwdev, skb->data,
                                               rp->rx_buf_sz,
-                                              PCI_DMA_FROMDEVICE);
-                       if (dma_mapping_error(&rp->pdev->dev, rp->rx_skbuff_dma[entry])) {
+                                              DMA_FROM_DEVICE);
+                       if (dma_mapping_error(hwdev,
+                                             rp->rx_skbuff_dma[entry])) {
                                dev_kfree_skb(skb);
                                rp->rx_skbuff_dma[entry] = 0;
                                break;
@@ -2103,7 +2177,7 @@ static void rhine_set_rx_mode(struct net_device *dev)
                /* Too many to match, or accept all multicasts. */
                iowrite32(0xffffffff, ioaddr + MulticastFilter0);
                iowrite32(0xffffffff, ioaddr + MulticastFilter1);
-       } else if (rp->pdev->revision >= VT6105M) {
+       } else if (rp->quirks & rqMgmt) {
                int i = 0;
                u32 mCAMmask = 0;       /* 32 mCAMs (6105M and better) */
                netdev_for_each_mc_addr(ha, dev) {
@@ -2125,7 +2199,7 @@ static void rhine_set_rx_mode(struct net_device *dev)
                iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
        }
        /* enable/disable VLAN receive filtering */
-       if (rp->pdev->revision >= VT6105M) {
+       if (rp->quirks & rqMgmt) {
                if (dev->flags & IFF_PROMISC)
                        BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
                else
@@ -2136,11 +2210,11 @@ static void rhine_set_rx_mode(struct net_device *dev)
 
 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
-       struct rhine_private *rp = netdev_priv(dev);
+       struct device *hwdev = dev->dev.parent;
 
        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
-       strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
+       strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
 }
 
 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -2277,7 +2351,7 @@ static int rhine_close(struct net_device *dev)
        /* Stop the chip's Tx and Rx processes. */
        iowrite16(CmdStop, ioaddr + ChipCmd);
 
-       free_irq(rp->pdev->irq, dev);
+       free_irq(rp->irq, dev);
        free_rbufs(dev);
        free_tbufs(dev);
        free_ring(dev);
@@ -2286,7 +2360,7 @@ static int rhine_close(struct net_device *dev)
 }
 
 
-static void rhine_remove_one(struct pci_dev *pdev)
+static void rhine_remove_one_pci(struct pci_dev *pdev)
 {
        struct net_device *dev = pci_get_drvdata(pdev);
        struct rhine_private *rp = netdev_priv(dev);
@@ -2300,7 +2374,21 @@ static void rhine_remove_one(struct pci_dev *pdev)
        pci_disable_device(pdev);
 }
 
-static void rhine_shutdown (struct pci_dev *pdev)
+static int rhine_remove_one_platform(struct platform_device *pdev)
+{
+       struct net_device *dev = platform_get_drvdata(pdev);
+       struct rhine_private *rp = netdev_priv(dev);
+
+       unregister_netdev(dev);
+
+       iounmap(rp->base);
+
+       free_netdev(dev);
+
+       return 0;
+}
+
+static void rhine_shutdown_pci(struct pci_dev *pdev)
 {
        struct net_device *dev = pci_get_drvdata(pdev);
        struct rhine_private *rp = netdev_priv(dev);
@@ -2354,8 +2442,7 @@ static void rhine_shutdown (struct pci_dev *pdev)
 #ifdef CONFIG_PM_SLEEP
 static int rhine_suspend(struct device *device)
 {
-       struct pci_dev *pdev = to_pci_dev(device);
-       struct net_device *dev = pci_get_drvdata(pdev);
+       struct net_device *dev = dev_get_drvdata(device);
        struct rhine_private *rp = netdev_priv(dev);
 
        if (!netif_running(dev))
@@ -2367,23 +2454,21 @@ static int rhine_suspend(struct device *device)
 
        netif_device_detach(dev);
 
-       rhine_shutdown(pdev);
+       if (dev_is_pci(device))
+               rhine_shutdown_pci(to_pci_dev(device));
 
        return 0;
 }
 
 static int rhine_resume(struct device *device)
 {
-       struct pci_dev *pdev = to_pci_dev(device);
-       struct net_device *dev = pci_get_drvdata(pdev);
+       struct net_device *dev = dev_get_drvdata(device);
        struct rhine_private *rp = netdev_priv(dev);
 
        if (!netif_running(dev))
                return 0;
 
-#ifdef USE_MMIO
        enable_mmio(rp->pioaddr, rp->quirks);
-#endif
        rhine_power_init(dev);
        free_tbufs(dev);
        free_rbufs(dev);
@@ -2408,15 +2493,26 @@ static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
 
 #endif /* !CONFIG_PM_SLEEP */
 
-static struct pci_driver rhine_driver = {
+static struct pci_driver rhine_driver_pci = {
        .name           = DRV_NAME,
        .id_table       = rhine_pci_tbl,
-       .probe          = rhine_init_one,
-       .remove         = rhine_remove_one,
-       .shutdown       = rhine_shutdown,
+       .probe          = rhine_init_one_pci,
+       .remove         = rhine_remove_one_pci,
+       .shutdown       = rhine_shutdown_pci,
        .driver.pm      = RHINE_PM_OPS,
 };
 
+static struct platform_driver rhine_driver_platform = {
+       .probe          = rhine_init_one_platform,
+       .remove         = rhine_remove_one_platform,
+       .driver = {
+               .name   = DRV_NAME,
+               .owner  = THIS_MODULE,
+               .of_match_table = rhine_of_tbl,
+               .pm             = RHINE_PM_OPS,
+       }
+};
+
 static struct dmi_system_id rhine_dmi_table[] __initdata = {
        {
                .ident = "EPIA-M",
@@ -2437,6 +2533,8 @@ static struct dmi_system_id rhine_dmi_table[] __initdata = {
 
 static int __init rhine_init(void)
 {
+       int ret_pci, ret_platform;
+
 /* when a module, this is printed whether or not devices are found in probe */
 #ifdef MODULE
        pr_info("%s\n", version);
@@ -2449,13 +2547,19 @@ static int __init rhine_init(void)
        else if (avoid_D3)
                pr_info("avoid_D3 set\n");
 
-       return pci_register_driver(&rhine_driver);
+       ret_pci = pci_register_driver(&rhine_driver_pci);
+       ret_platform = platform_driver_register(&rhine_driver_platform);
+       if ((ret_pci < 0) && (ret_platform < 0))
+               return ret_pci;
+
+       return 0;
 }
 
 
 static void __exit rhine_cleanup(void)
 {
-       pci_unregister_driver(&rhine_driver);
+       platform_driver_unregister(&rhine_driver_platform);
+       pci_unregister_driver(&rhine_driver_pci);
 }
 
 
index fa193c4688da78719257ac982af8be1f81b270c1..4ef818a7a6c623719f0507cfc64b56ef3de709d9 100644 (file)
@@ -75,7 +75,7 @@ int temac_indirect_busywait(struct temac_local *lp)
        long end = jiffies + 2;
 
        while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) {
-               if (end - jiffies <= 0) {
+               if (time_before_eq(end, jiffies)) {
                        WARN_ON(1);
                        return -ETIMEDOUT;
                }
index 64b4639f43b6bea6b0e69155a7cb7043a14abcc4..d4abf478e2bbf6ae25f5925f406d27923b2b949c 100644 (file)
@@ -22,7 +22,7 @@ int axienet_mdio_wait_until_ready(struct axienet_local *lp)
        long end = jiffies + 2;
        while (!(axienet_ior(lp, XAE_MDIO_MCR_OFFSET) &
                 XAE_MDIO_MCR_READY_MASK)) {
-               if (end - jiffies <= 0) {
+               if (time_before_eq(end, jiffies)) {
                        WARN_ON(1);
                        return -ETIMEDOUT;
                }
index 0d87c67a5ff7208e807a980c406a934214c9d4a6..8c4aed3053ebc0a3a3757dcae408f25249f8e630 100644 (file)
@@ -702,7 +702,7 @@ static int xemaclite_mdio_wait(struct net_local *lp)
        */
        while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
                        XEL_MDIOCTRL_MDIOSTS_MASK) {
-               if (end - jiffies <= 0) {
+               if (time_before_eq(end, jiffies)) {
                        WARN_ON(1);
                        return -ETIMEDOUT;
                }
index 57eb3f906d64be9508ae700804cff05d276e347a..6cc37c15e0bf98341131a8229bd7fbd2567994a8 100644 (file)
@@ -119,27 +119,14 @@ struct ndis_recv_scale_param { /* NDIS_RECEIVE_SCALE_PARAMETERS */
 };
 
 /* Fwd declaration */
-struct hv_netvsc_packet;
 struct ndis_tcp_ip_checksum_info;
 
-/* Represent the xfer page packet which contains 1 or more netvsc packet */
-struct xferpage_packet {
-       struct list_head list_ent;
-       u32 status;
-
-       /* # of netvsc packets this xfer packet contains */
-       u32 count;
-
-       struct vmbus_channel *channel;
-};
-
 /*
  * Represent netvsc packet which contains 1 RNDIS and 1 ethernet frame
  * within the RNDIS
  */
 struct hv_netvsc_packet {
        /* Bookkeeping stuff */
-       struct list_head list_ent;
        u32 status;
 
        struct hv_device *device;
@@ -149,24 +136,11 @@ struct hv_netvsc_packet {
        u16 q_idx;
        struct vmbus_channel *channel;
 
-       /*
-        * Valid only for receives when we break a xfer page packet
-        * into multiple netvsc packets
-        */
-       struct xferpage_packet *xfer_page_pkt;
+       u64 send_completion_tid;
+       void *send_completion_ctx;
+       void (*send_completion)(void *context);
 
-       union {
-               struct {
-                       u64 recv_completion_tid;
-                       void *recv_completion_ctx;
-                       void (*recv_completion)(void *context);
-               } recv;
-               struct {
-                       u64 send_completion_tid;
-                       void *send_completion_ctx;
-                       void (*send_completion)(void *context);
-               } send;
-       } completion;
+       u32 send_buf_index;
 
        /* This points to the memory after page_buf */
        struct rndis_message *rndis_msg;
@@ -610,11 +584,11 @@ struct nvsp_message {
 
 #define NETVSC_RECEIVE_BUFFER_SIZE             (1024*1024*16)  /* 16MB */
 #define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY      (1024*1024*15)  /* 15MB */
+#define NETVSC_SEND_BUFFER_SIZE                        (1024 * 1024)   /* 1MB */
+#define NETVSC_INVALID_INDEX                   -1
 
-#define NETVSC_RECEIVE_BUFFER_ID               0xcafe
 
-/* Preallocated receive packets */
-#define NETVSC_RECEIVE_PACKETLIST_COUNT                256
+#define NETVSC_RECEIVE_BUFFER_ID               0xcafe
 
 #define NETVSC_PACKET_SIZE                      2048
 
@@ -630,12 +604,6 @@ struct netvsc_device {
        wait_queue_head_t wait_drain;
        bool start_remove;
        bool destroy;
-       /*
-        * List of free preallocated hv_netvsc_packet to represent receive
-        * packet
-        */
-       struct list_head recv_pkt_list;
-       spinlock_t recv_pkt_list_lock;
 
        /* Receive buffer allocated by us but manages by NetVSP */
        void *recv_buf;
@@ -644,6 +612,15 @@ struct netvsc_device {
        u32 recv_section_cnt;
        struct nvsp_1_receive_buffer_section *recv_section;
 
+       /* Send buffer allocated by us */
+       void *send_buf;
+       u32 send_buf_size;
+       u32 send_buf_gpadl_handle;
+       u32 send_section_cnt;
+       u32 send_section_size;
+       unsigned long *send_section_map;
+       int map_words;
+
        /* Used for NetVSP initialization protocol */
        struct completion channel_init_wait;
        struct nvsp_message channel_init_pkt;
@@ -814,6 +791,7 @@ enum ndis_per_pkt_info_type {
        IEEE_8021Q_INFO,
        ORIGINAL_PKTINFO,
        PACKET_CANCEL_ID,
+       NBL_HASH_VALUE = PACKET_CANCEL_ID,
        ORIGINAL_NET_BUFLIST,
        CACHED_NET_BUFLIST,
        SHORT_PKT_PADINFO,
@@ -960,6 +938,9 @@ struct ndis_tcp_lso_info {
 #define NDIS_LSO_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
                sizeof(struct ndis_tcp_lso_info))
 
+#define NDIS_HASH_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
+               sizeof(u32))
+
 /* Format of Information buffer passed in a SetRequest for the OID */
 /* OID_GEN_RNDIS_CONFIG_PARAMETER. */
 struct rndis_config_parameter_info {
index e7e77f12bc38872001497378dc4cce7bb92cd0d1..c041f63a6d3053f51d5e3f6651bc1db0d0d6d25c 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/slab.h>
 #include <linux/netdevice.h>
 #include <linux/if_ether.h>
+#include <asm/sync_bitops.h>
 
 #include "hyperv_net.h"
 
@@ -80,7 +81,7 @@ get_in_err:
 }
 
 
-static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
+static int netvsc_destroy_buf(struct netvsc_device *net_device)
 {
        struct nvsp_message *revoke_packet;
        int ret = 0;
@@ -146,10 +147,62 @@ static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
                net_device->recv_section = NULL;
        }
 
+       /* Deal with the send buffer we may have setup.
+        * If we got a  send section size, it means we received a
+        * SendsendBufferComplete msg (ie sent
+        * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
+        * to send a revoke msg here
+        */
+       if (net_device->send_section_size) {
+               /* Send the revoke receive buffer */
+               revoke_packet = &net_device->revoke_packet;
+               memset(revoke_packet, 0, sizeof(struct nvsp_message));
+
+               revoke_packet->hdr.msg_type =
+                       NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
+               revoke_packet->msg.v1_msg.revoke_recv_buf.id = 0;
+
+               ret = vmbus_sendpacket(net_device->dev->channel,
+                                      revoke_packet,
+                                      sizeof(struct nvsp_message),
+                                      (unsigned long)revoke_packet,
+                                      VM_PKT_DATA_INBAND, 0);
+               /* If we failed here, we might as well return and
+                * have a leak rather than continue and a bugchk
+                */
+               if (ret != 0) {
+                       netdev_err(ndev, "unable to send "
+                                  "revoke send buffer to netvsp\n");
+                       return ret;
+               }
+       }
+       /* Teardown the gpadl on the vsp end */
+       if (net_device->send_buf_gpadl_handle) {
+               ret = vmbus_teardown_gpadl(net_device->dev->channel,
+                                          net_device->send_buf_gpadl_handle);
+
+               /* If we failed here, we might as well return and have a leak
+                * rather than continue and a bugchk
+                */
+               if (ret != 0) {
+                       netdev_err(ndev,
+                                  "unable to teardown send buffer's gpadl\n");
+                       return ret;
+               }
+               net_device->recv_buf_gpadl_handle = 0;
+       }
+       if (net_device->send_buf) {
+               /* Free up the receive buffer */
+               free_pages((unsigned long)net_device->send_buf,
+                          get_order(net_device->send_buf_size));
+               net_device->send_buf = NULL;
+       }
+       kfree(net_device->send_section_map);
+
        return ret;
 }
 
-static int netvsc_init_recv_buf(struct hv_device *device)
+static int netvsc_init_buf(struct hv_device *device)
 {
        int ret = 0;
        int t;
@@ -248,10 +301,90 @@ static int netvsc_init_recv_buf(struct hv_device *device)
                goto cleanup;
        }
 
+       /* Now setup the send buffer.
+        */
+       net_device->send_buf =
+               (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
+                                        get_order(net_device->send_buf_size));
+       if (!net_device->send_buf) {
+               netdev_err(ndev, "unable to allocate send "
+                          "buffer of size %d\n", net_device->send_buf_size);
+               ret = -ENOMEM;
+               goto cleanup;
+       }
+
+       /* Establish the gpadl handle for this buffer on this
+        * channel.  Note: This call uses the vmbus connection rather
+        * than the channel to establish the gpadl handle.
+        */
+       ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
+                                   net_device->send_buf_size,
+                                   &net_device->send_buf_gpadl_handle);
+       if (ret != 0) {
+               netdev_err(ndev,
+                          "unable to establish send buffer's gpadl\n");
+               goto cleanup;
+       }
+
+       /* Notify the NetVsp of the gpadl handle */
+       init_packet = &net_device->channel_init_pkt;
+       memset(init_packet, 0, sizeof(struct nvsp_message));
+       init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
+       init_packet->msg.v1_msg.send_recv_buf.gpadl_handle =
+               net_device->send_buf_gpadl_handle;
+       init_packet->msg.v1_msg.send_recv_buf.id = 0;
+
+       /* Send the gpadl notification request */
+       ret = vmbus_sendpacket(device->channel, init_packet,
+                              sizeof(struct nvsp_message),
+                              (unsigned long)init_packet,
+                              VM_PKT_DATA_INBAND,
+                              VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+       if (ret != 0) {
+               netdev_err(ndev,
+                          "unable to send send buffer's gpadl to netvsp\n");
+               goto cleanup;
+       }
+
+       t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
+       BUG_ON(t == 0);
+
+       /* Check the response */
+       if (init_packet->msg.v1_msg.
+           send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
+               netdev_err(ndev, "Unable to complete send buffer "
+                          "initialization with NetVsp - status %d\n",
+                          init_packet->msg.v1_msg.
+                          send_recv_buf_complete.status);
+               ret = -EINVAL;
+               goto cleanup;
+       }
+
+       /* Parse the response */
+       net_device->send_section_size = init_packet->msg.
+                               v1_msg.send_send_buf_complete.section_size;
+
+       /* Section count is simply the size divided by the section size.
+        */
+       net_device->send_section_cnt =
+               net_device->send_buf_size/net_device->send_section_size;
+
+       dev_info(&device->device, "Send section size: %d, Section count:%d\n",
+                net_device->send_section_size, net_device->send_section_cnt);
+
+       /* Setup state for managing the send buffer. */
+       net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt,
+                                            BITS_PER_LONG);
+
+       net_device->send_section_map =
+               kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL);
+       if (net_device->send_section_map == NULL)
+               goto cleanup;
+
        goto exit;
 
 cleanup:
-       netvsc_destroy_recv_buf(net_device);
+       netvsc_destroy_buf(net_device);
 
 exit:
        return ret;
@@ -369,8 +502,9 @@ static int netvsc_connect_vsp(struct hv_device *device)
                net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
        else
                net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
+       net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
 
-       ret = netvsc_init_recv_buf(device);
+       ret = netvsc_init_buf(device);
 
 cleanup:
        return ret;
@@ -378,7 +512,7 @@ cleanup:
 
 static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
 {
-       netvsc_destroy_recv_buf(net_device);
+       netvsc_destroy_buf(net_device);
 }
 
 /*
@@ -387,7 +521,6 @@ static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
 int netvsc_device_remove(struct hv_device *device)
 {
        struct netvsc_device *net_device;
-       struct hv_netvsc_packet *netvsc_packet, *pos;
        unsigned long flags;
 
        net_device = hv_get_drvdata(device);
@@ -416,12 +549,6 @@ int netvsc_device_remove(struct hv_device *device)
        vmbus_close(device->channel);
 
        /* Release all resources */
-       list_for_each_entry_safe(netvsc_packet, pos,
-                                &net_device->recv_pkt_list, list_ent) {
-               list_del(&netvsc_packet->list_ent);
-               kfree(netvsc_packet);
-       }
-
        if (net_device->sub_cb_buf)
                vfree(net_device->sub_cb_buf);
 
@@ -447,6 +574,12 @@ static inline u32 hv_ringbuf_avail_percent(
        return avail_write * 100 / ring_info->ring_datasize;
 }
 
+static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
+                                        u32 index)
+{
+       sync_change_bit(index, net_device->send_section_map);
+}
+
 static void netvsc_send_completion(struct netvsc_device *net_device,
                                   struct hv_device *device,
                                   struct vmpacket_descriptor *packet)
@@ -454,6 +587,7 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
        struct nvsp_message *nvsp_packet;
        struct hv_netvsc_packet *nvsc_packet;
        struct net_device *ndev;
+       u32 send_index;
 
        ndev = net_device->ndev;
 
@@ -484,11 +618,13 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
 
                /* Notify the layer above us */
                if (nvsc_packet) {
+                       send_index = nvsc_packet->send_buf_index;
+                       if (send_index != NETVSC_INVALID_INDEX)
+                               netvsc_free_send_slot(net_device, send_index);
                        q_idx = nvsc_packet->q_idx;
                        channel = nvsc_packet->channel;
-                       nvsc_packet->completion.send.send_completion(
-                               nvsc_packet->completion.send.
-                               send_completion_ctx);
+                       nvsc_packet->send_completion(nvsc_packet->
+                                                    send_completion_ctx);
                }
 
                num_outstanding_sends =
@@ -512,6 +648,52 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
 
 }
 
+static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
+{
+       unsigned long index;
+       u32 max_words = net_device->map_words;
+       unsigned long *map_addr = (unsigned long *)net_device->send_section_map;
+       u32 section_cnt = net_device->send_section_cnt;
+       int ret_val = NETVSC_INVALID_INDEX;
+       int i;
+       int prev_val;
+
+       for (i = 0; i < max_words; i++) {
+               if (!~(map_addr[i]))
+                       continue;
+               index = ffz(map_addr[i]);
+               prev_val = sync_test_and_set_bit(index, &map_addr[i]);
+               if (prev_val)
+                       continue;
+               if ((index + (i * BITS_PER_LONG)) >= section_cnt)
+                       break;
+               ret_val = (index + (i * BITS_PER_LONG));
+               break;
+       }
+       return ret_val;
+}
+
+u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
+                           unsigned int section_index,
+                           struct hv_netvsc_packet *packet)
+{
+       char *start = net_device->send_buf;
+       char *dest = (start + (section_index * net_device->send_section_size));
+       int i;
+       u32 msg_size = 0;
+
+       for (i = 0; i < packet->page_buf_cnt; i++) {
+               char *src = phys_to_virt(packet->page_buf[i].pfn << PAGE_SHIFT);
+               u32 offset = packet->page_buf[i].offset;
+               u32 len = packet->page_buf[i].len;
+
+               memcpy(dest, (src + offset), len);
+               msg_size += len;
+               dest += len;
+       }
+       return msg_size;
+}
+
 int netvsc_send(struct hv_device *device,
                        struct hv_netvsc_packet *packet)
 {
@@ -521,6 +703,10 @@ int netvsc_send(struct hv_device *device,
        struct net_device *ndev;
        struct vmbus_channel *out_channel = NULL;
        u64 req_id;
+       unsigned int section_index = NETVSC_INVALID_INDEX;
+       u32 msg_size = 0;
+       struct sk_buff *skb;
+
 
        net_device = get_outbound_net_device(device);
        if (!net_device)
@@ -536,12 +722,28 @@ int netvsc_send(struct hv_device *device,
                sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1;
        }
 
-       /* Not using send buffer section */
+       /* Attempt to send via sendbuf */
+       if (packet->total_data_buflen < net_device->send_section_size) {
+               section_index = netvsc_get_next_send_section(net_device);
+               if (section_index != NETVSC_INVALID_INDEX) {
+                       msg_size = netvsc_copy_to_send_buf(net_device,
+                                                          section_index,
+                                                          packet);
+                       skb = (struct sk_buff *)
+                             (unsigned long)packet->send_completion_tid;
+                       if (skb)
+                               dev_kfree_skb_any(skb);
+                       packet->page_buf_cnt = 0;
+               }
+       }
+       packet->send_buf_index = section_index;
+
+
        sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
-               0xFFFFFFFF;
-       sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
+               section_index;
+       sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = msg_size;
 
-       if (packet->completion.send.send_completion)
+       if (packet->send_completion)
                req_id = (ulong)packet;
        else
                req_id = 0;
@@ -641,62 +843,6 @@ retry_send_cmplt:
        }
 }
 
-/* Send a receive completion packet to RNDIS device (ie NetVsp) */
-static void netvsc_receive_completion(void *context)
-{
-       struct hv_netvsc_packet *packet = context;
-       struct hv_device *device = packet->device;
-       struct vmbus_channel *channel;
-       struct netvsc_device *net_device;
-       u64 transaction_id = 0;
-       bool fsend_receive_comp = false;
-       unsigned long flags;
-       struct net_device *ndev;
-       u32 status = NVSP_STAT_NONE;
-
-       /*
-        * Even though it seems logical to do a GetOutboundNetDevice() here to
-        * send out receive completion, we are using GetInboundNetDevice()
-        * since we may have disable outbound traffic already.
-        */
-       net_device = get_inbound_net_device(device);
-       if (!net_device)
-               return;
-       ndev = net_device->ndev;
-
-       /* Overloading use of the lock. */
-       spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
-
-       if (packet->status != NVSP_STAT_SUCCESS)
-               packet->xfer_page_pkt->status = NVSP_STAT_FAIL;
-
-       packet->xfer_page_pkt->count--;
-
-       /*
-        * Last one in the line that represent 1 xfer page packet.
-        * Return the xfer page packet itself to the freelist
-        */
-       if (packet->xfer_page_pkt->count == 0) {
-               fsend_receive_comp = true;
-               channel = packet->xfer_page_pkt->channel;
-               transaction_id = packet->completion.recv.recv_completion_tid;
-               status = packet->xfer_page_pkt->status;
-               list_add_tail(&packet->xfer_page_pkt->list_ent,
-                             &net_device->recv_pkt_list);
-
-       }
-
-       /* Put the packet back */
-       list_add_tail(&packet->list_ent, &net_device->recv_pkt_list);
-       spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
-
-       /* Send a receive completion for the xfer page packet */
-       if (fsend_receive_comp)
-               netvsc_send_recv_completion(device, channel, net_device,
-                                           transaction_id, status);
-
-}
-
 static void netvsc_receive(struct netvsc_device *net_device,
                        struct vmbus_channel *channel,
                        struct hv_device *device,
@@ -704,16 +850,13 @@ static void netvsc_receive(struct netvsc_device *net_device,
 {
        struct vmtransfer_page_packet_header *vmxferpage_packet;
        struct nvsp_message *nvsp_packet;
-       struct hv_netvsc_packet *netvsc_packet = NULL;
-       /* struct netvsc_driver *netvscDriver; */
-       struct xferpage_packet *xferpage_packet = NULL;
+       struct hv_netvsc_packet nv_pkt;
+       struct hv_netvsc_packet *netvsc_packet = &nv_pkt;
+       u32 status = NVSP_STAT_SUCCESS;
        int i;
        int count = 0;
-       unsigned long flags;
        struct net_device *ndev;
 
-       LIST_HEAD(listHead);
-
        ndev = net_device->ndev;
 
        /*
@@ -746,78 +889,14 @@ static void netvsc_receive(struct netvsc_device *net_device,
                return;
        }
 
-       /*
-        * Grab free packets (range count + 1) to represent this xfer
-        * page packet. +1 to represent the xfer page packet itself.
-        * We grab it here so that we know exactly how many we can
-        * fulfil
-        */
-       spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
-       while (!list_empty(&net_device->recv_pkt_list)) {
-               list_move_tail(net_device->recv_pkt_list.next, &listHead);
-               if (++count == vmxferpage_packet->range_cnt + 1)
-                       break;
-       }
-       spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
-
-       /*
-        * We need at least 2 netvsc pkts (1 to represent the xfer
-        * page and at least 1 for the range) i.e. we can handled
-        * some of the xfer page packet ranges...
-        */
-       if (count < 2) {
-               netdev_err(ndev, "Got only %d netvsc pkt...needed "
-                       "%d pkts. Dropping this xfer page packet completely!\n",
-                       count, vmxferpage_packet->range_cnt + 1);
-
-               /* Return it to the freelist */
-               spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
-               for (i = count; i != 0; i--) {
-                       list_move_tail(listHead.next,
-                                      &net_device->recv_pkt_list);
-               }
-               spin_unlock_irqrestore(&net_device->recv_pkt_list_lock,
-                                      flags);
-
-               netvsc_send_recv_completion(device, channel, net_device,
-                                           vmxferpage_packet->d.trans_id,
-                                           NVSP_STAT_FAIL);
-
-               return;
-       }
-
-       /* Remove the 1st packet to represent the xfer page packet itself */
-       xferpage_packet = (struct xferpage_packet *)listHead.next;
-       list_del(&xferpage_packet->list_ent);
-       xferpage_packet->status = NVSP_STAT_SUCCESS;
-       xferpage_packet->channel = channel;
-
-       /* This is how much we can satisfy */
-       xferpage_packet->count = count - 1;
-
-       if (xferpage_packet->count != vmxferpage_packet->range_cnt) {
-               netdev_err(ndev, "Needed %d netvsc pkts to satisfy "
-                       "this xfer page...got %d\n",
-                       vmxferpage_packet->range_cnt, xferpage_packet->count);
-       }
+       count = vmxferpage_packet->range_cnt;
+       netvsc_packet->device = device;
+       netvsc_packet->channel = channel;
 
        /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
-       for (i = 0; i < (count - 1); i++) {
-               netvsc_packet = (struct hv_netvsc_packet *)listHead.next;
-               list_del(&netvsc_packet->list_ent);
-
+       for (i = 0; i < count; i++) {
                /* Initialize the netvsc packet */
                netvsc_packet->status = NVSP_STAT_SUCCESS;
-               netvsc_packet->xfer_page_pkt = xferpage_packet;
-               netvsc_packet->completion.recv.recv_completion =
-                                       netvsc_receive_completion;
-               netvsc_packet->completion.recv.recv_completion_ctx =
-                                       netvsc_packet;
-               netvsc_packet->device = device;
-               /* Save this so that we can send it back */
-               netvsc_packet->completion.recv.recv_completion_tid =
-                                       vmxferpage_packet->d.trans_id;
-
                netvsc_packet->data = (void *)((unsigned long)net_device->
                        recv_buf + vmxferpage_packet->ranges[i].byte_offset);
                netvsc_packet->total_data_buflen =
@@ -826,10 +905,12 @@ static void netvsc_receive(struct netvsc_device *net_device,
                /* Pass it to the upper layer */
                rndis_filter_receive(device, netvsc_packet);
 
-               netvsc_receive_completion(netvsc_packet->
-                               completion.recv.recv_completion_ctx);
+               if (netvsc_packet->status != NVSP_STAT_SUCCESS)
+                       status = NVSP_STAT_FAIL;
        }
 
+       netvsc_send_recv_completion(device, channel, net_device,
+                                   vmxferpage_packet->d.trans_id, status);
 }
 
 
@@ -956,11 +1037,9 @@ void netvsc_channel_cb(void *context)
 int netvsc_device_add(struct hv_device *device, void *additional_info)
 {
        int ret = 0;
-       int i;
        int ring_size =
        ((struct netvsc_device_info *)additional_info)->ring_size;
        struct netvsc_device *net_device;
-       struct hv_netvsc_packet *packet, *pos;
        struct net_device *ndev;
 
        net_device = alloc_net_device(device);
@@ -981,18 +1060,6 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
        ndev = net_device->ndev;
 
        /* Initialize the NetVSC channel extension */
-       spin_lock_init(&net_device->recv_pkt_list_lock);
-
-       INIT_LIST_HEAD(&net_device->recv_pkt_list);
-
-       for (i = 0; i < NETVSC_RECEIVE_PACKETLIST_COUNT; i++) {
-               packet = kzalloc(sizeof(struct hv_netvsc_packet), GFP_KERNEL);
-               if (!packet)
-                       break;
-
-               list_add_tail(&packet->list_ent,
-                             &net_device->recv_pkt_list);
-       }
        init_completion(&net_device->channel_init_wait);
 
        set_per_channel_state(device->channel, net_device->cb_buffer);
@@ -1028,16 +1095,8 @@ close:
 
 cleanup:
 
-       if (net_device) {
-               list_for_each_entry_safe(packet, pos,
-                                        &net_device->recv_pkt_list,
-                                        list_ent) {
-                       list_del(&packet->list_ent);
-                       kfree(packet);
-               }
-
+       if (net_device)
                kfree(net_device);
-       }
 
        return ret;
 }
index 093cf3fc46b8683390892c48fe7dc5a2382b4f98..4fd71b75e666418ab7063447160d561a294a0966 100644 (file)
@@ -224,9 +224,11 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
        if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
                return 0;
 
-       if (netvsc_set_hash(&hash, skb))
+       if (netvsc_set_hash(&hash, skb)) {
                q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
                        ndev->real_num_tx_queues;
+               skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
+       }
 
        return q_idx;
 }
@@ -235,11 +237,12 @@ static void netvsc_xmit_completion(void *context)
 {
        struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
        struct sk_buff *skb = (struct sk_buff *)
-               (unsigned long)packet->completion.send.send_completion_tid;
+               (unsigned long)packet->send_completion_tid;
+       u32 index = packet->send_buf_index;
 
        kfree(packet);
 
-       if (skb)
+       if (skb && (index == NETVSC_INVALID_INDEX))
                dev_kfree_skb_any(skb);
 }
 
@@ -383,6 +386,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
        struct ndis_tcp_lso_info *lso_info;
        int  hdr_offset;
        u32 net_trans_info;
+       u32 hash;
 
 
        /* We will atmost need two pages to describe the rndis
@@ -401,9 +405,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
        packet = kzalloc(sizeof(struct hv_netvsc_packet) +
                         (num_data_pgs * sizeof(struct hv_page_buffer)) +
                         sizeof(struct rndis_message) +
-                        NDIS_VLAN_PPI_SIZE +
-                        NDIS_CSUM_PPI_SIZE +
-                        NDIS_LSO_PPI_SIZE, GFP_ATOMIC);
+                        NDIS_VLAN_PPI_SIZE + NDIS_CSUM_PPI_SIZE +
+                        NDIS_LSO_PPI_SIZE + NDIS_HASH_PPI_SIZE, GFP_ATOMIC);
        if (!packet) {
                /* out of memory, drop packet */
                netdev_err(net, "unable to allocate hv_netvsc_packet\n");
@@ -425,9 +428,9 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
                                (num_data_pgs * sizeof(struct hv_page_buffer)));
 
        /* Set the completion routine */
-       packet->completion.send.send_completion = netvsc_xmit_completion;
-       packet->completion.send.send_completion_ctx = packet;
-       packet->completion.send.send_completion_tid = (unsigned long)skb;
+       packet->send_completion = netvsc_xmit_completion;
+       packet->send_completion_ctx = packet;
+       packet->send_completion_tid = (unsigned long)skb;
 
        isvlan = packet->vlan_tci & VLAN_TAG_PRESENT;
 
@@ -442,6 +445,14 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
 
        rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
 
+       hash = skb_get_hash_raw(skb);
+       if (hash != 0 && net->real_num_tx_queues > 1) {
+               rndis_msg_size += NDIS_HASH_PPI_SIZE;
+               ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
+                                   NBL_HASH_VALUE);
+               *(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
+       }
+
        if (isvlan) {
                struct ndis_pkt_8021q_info *vlan;
 
@@ -466,6 +477,10 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
        if (skb_is_gso(skb))
                goto do_lso;
 
+       if ((skb->ip_summed == CHECKSUM_NONE) ||
+           (skb->ip_summed == CHECKSUM_UNNECESSARY))
+               goto do_send;
+
        rndis_msg_size += NDIS_CSUM_PPI_SIZE;
        ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
                            TCPIP_CHKSUM_PKTINFO);
@@ -638,9 +653,8 @@ int netvsc_recv_callback(struct hv_device *device_obj,
                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
                                       packet->vlan_tci);
 
-       skb_record_rx_queue(skb, packet->xfer_page_pkt->channel->
-                           offermsg.offer.sub_channel_index %
-                           net->real_num_rx_queues);
+       skb_record_rx_queue(skb, packet->channel->
+                           offermsg.offer.sub_channel_index);
 
        net->stats.rx_packets++;
        net->stats.rx_bytes += packet->total_data_buflen;
@@ -806,7 +820,7 @@ static int netvsc_probe(struct hv_device *dev,
        net->features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG | NETIF_F_RXCSUM |
                        NETIF_F_IP_CSUM | NETIF_F_TSO;
 
-       SET_ETHTOOL_OPS(net, &ethtool_ops);
+       net->ethtool_ops = &ethtool_ops;
        SET_NETDEV_DEV(net, &dev->device);
 
        /* Notify the netvsc driver of the new device */
@@ -823,8 +837,6 @@ static int netvsc_probe(struct hv_device *dev,
        nvdev = hv_get_drvdata(dev);
        netif_set_real_num_tx_queues(net, nvdev->num_chn);
        netif_set_real_num_rx_queues(net, nvdev->num_chn);
-       dev_info(&dev->device, "real num tx,rx queues:%u, %u\n",
-                net->real_num_tx_queues, net->real_num_rx_queues);
 
        ret = register_netdev(net);
        if (ret != 0) {
index d92cfbe4341036bbeffdd8db834d220d5b514b61..99c527adae5bf1ee154b2a02eb0f93a6df32e333 100644 (file)
@@ -236,7 +236,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
                        packet->page_buf[0].len;
        }
 
-       packet->completion.send.send_completion = NULL;
+       packet->send_completion = NULL;
 
        ret = netvsc_send(dev->net_dev->dev, packet);
        return ret;
@@ -401,8 +401,6 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
        pkt->total_data_buflen = rndis_pkt->data_len;
        pkt->data = (void *)((unsigned long)pkt->data + data_offset);
 
-       pkt->is_data_pkt = true;
-
        vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO);
        if (vlan) {
                pkt->vlan_tci = VLAN_TAG_PRESENT | vlan->vlanid |
index e36f194673a45a2035a15830571e4e2c02039839..4517b149ed0786946e44eb1a699fe232c2fbd166 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
+#include <linux/irq.h>
 #include <linux/gpio.h>
 #include <linux/delay.h>
 #include <linux/mutex.h>
@@ -692,10 +693,7 @@ at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
        if (rc < 0)
                goto err_rx;
 
-       rc = at86rf230_start(dev);
-
-       return rc;
-
+       return at86rf230_start(dev);
 err_rx:
        at86rf230_start(dev);
 err:
@@ -963,33 +961,24 @@ static irqreturn_t at86rf230_isr_level(int irq, void *data)
        return at86rf230_isr(irq, data);
 }
 
-static int at86rf230_irq_polarity(struct at86rf230_local *lp, int pol)
-{
-       return at86rf230_write_subreg(lp, SR_IRQ_POLARITY, pol);
-}
-
 static int at86rf230_hw_init(struct at86rf230_local *lp)
 {
-       struct at86rf230_platform_data *pdata = lp->spi->dev.platform_data;
-       int rc, irq_pol;
-       u8 status;
+       int rc, irq_pol, irq_type;
+       u8 dvdd;
        u8 csma_seed[2];
 
-       rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
-       if (rc)
-               return rc;
-
        rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_FORCE_TRX_OFF);
        if (rc)
                return rc;
 
+       irq_type = irq_get_trigger_type(lp->spi->irq);
        /* configure irq polarity, defaults to high active */
-       if (pdata->irq_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW))
+       if (irq_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW))
                irq_pol = IRQ_ACTIVE_LOW;
        else
                irq_pol = IRQ_ACTIVE_HIGH;
 
-       rc = at86rf230_irq_polarity(lp, irq_pol);
+       rc = at86rf230_write_subreg(lp, SR_IRQ_POLARITY, irq_pol);
        if (rc)
                return rc;
 
@@ -1017,10 +1006,10 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
        /* Wait the next SLEEP cycle */
        msleep(100);
 
-       rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &status);
+       rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &dvdd);
        if (rc)
                return rc;
-       if (!status) {
+       if (!dvdd) {
                dev_err(&lp->spi->dev, "DVDD error\n");
                return -EINVAL;
        }
@@ -1032,7 +1021,6 @@ static struct at86rf230_platform_data *
 at86rf230_get_pdata(struct spi_device *spi)
 {
        struct at86rf230_platform_data *pdata;
-       const char *irq_type;
 
        if (!IS_ENABLED(CONFIG_OF) || !spi->dev.of_node)
                return spi->dev.platform_data;
@@ -1044,19 +1032,6 @@ at86rf230_get_pdata(struct spi_device *spi)
        pdata->rstn = of_get_named_gpio(spi->dev.of_node, "reset-gpio", 0);
        pdata->slp_tr = of_get_named_gpio(spi->dev.of_node, "sleep-gpio", 0);
 
-       pdata->irq_type = IRQF_TRIGGER_RISING;
-       of_property_read_string(spi->dev.of_node, "irq-type", &irq_type);
-       if (!strcmp(irq_type, "level-high"))
-               pdata->irq_type = IRQF_TRIGGER_HIGH;
-       else if (!strcmp(irq_type, "level-low"))
-               pdata->irq_type = IRQF_TRIGGER_LOW;
-       else if (!strcmp(irq_type, "edge-rising"))
-               pdata->irq_type = IRQF_TRIGGER_RISING;
-       else if (!strcmp(irq_type, "edge-falling"))
-               pdata->irq_type = IRQF_TRIGGER_FALLING;
-       else
-               dev_warn(&spi->dev, "wrong irq-type specified using edge-rising\n");
-
        spi->dev.platform_data = pdata;
 done:
        return pdata;
@@ -1071,7 +1046,7 @@ static int at86rf230_probe(struct spi_device *spi)
        u8 part = 0, version = 0, status;
        irq_handler_t irq_handler;
        work_func_t irq_worker;
-       int rc;
+       int rc, irq_type;
        const char *chip;
        struct ieee802154_ops *ops = NULL;
 
@@ -1087,27 +1062,17 @@ static int at86rf230_probe(struct spi_device *spi)
        }
 
        if (gpio_is_valid(pdata->rstn)) {
-               rc = gpio_request(pdata->rstn, "rstn");
+               rc = devm_gpio_request_one(&spi->dev, pdata->rstn,
+                                          GPIOF_OUT_INIT_HIGH, "rstn");
                if (rc)
                        return rc;
        }
 
        if (gpio_is_valid(pdata->slp_tr)) {
-               rc = gpio_request(pdata->slp_tr, "slp_tr");
-               if (rc)
-                       goto err_slp_tr;
-       }
-
-       if (gpio_is_valid(pdata->rstn)) {
-               rc = gpio_direction_output(pdata->rstn, 1);
-               if (rc)
-                       goto err_gpio_dir;
-       }
-
-       if (gpio_is_valid(pdata->slp_tr)) {
-               rc = gpio_direction_output(pdata->slp_tr, 0);
+               rc = devm_gpio_request_one(&spi->dev, pdata->slp_tr,
+                                          GPIOF_OUT_INIT_LOW, "slp_tr");
                if (rc)
-                       goto err_gpio_dir;
+                       return rc;
        }
 
        /* Reset */
@@ -1121,13 +1086,12 @@ static int at86rf230_probe(struct spi_device *spi)
 
        rc = __at86rf230_detect_device(spi, &man_id, &part, &version);
        if (rc < 0)
-               goto err_gpio_dir;
+               return rc;
 
        if (man_id != 0x001f) {
                dev_err(&spi->dev, "Non-Atmel dev found (MAN_ID %02x %02x)\n",
                        man_id >> 8, man_id & 0xFF);
-               rc = -EINVAL;
-               goto err_gpio_dir;
+               return -EINVAL;
        }
 
        switch (part) {
@@ -1154,16 +1118,12 @@ static int at86rf230_probe(struct spi_device *spi)
        }
 
        dev_info(&spi->dev, "Detected %s chip version %d\n", chip, version);
-       if (!ops) {
-               rc = -ENOTSUPP;
-               goto err_gpio_dir;
-       }
+       if (!ops)
+               return -ENOTSUPP;
 
        dev = ieee802154_alloc_device(sizeof(*lp), ops);
-       if (!dev) {
-               rc = -ENOMEM;
-               goto err_gpio_dir;
-       }
+       if (!dev)
+               return -ENOMEM;
 
        lp = dev->priv;
        lp->dev = dev;
@@ -1176,7 +1136,8 @@ static int at86rf230_probe(struct spi_device *spi)
        dev->extra_tx_headroom = 0;
        dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK;
 
-       if (pdata->irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
+       irq_type = irq_get_trigger_type(spi->irq);
+       if (irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
                irq_worker = at86rf230_irqwork;
                irq_handler = at86rf230_isr;
        } else {
@@ -1202,75 +1163,65 @@ static int at86rf230_probe(struct spi_device *spi)
        if (rc)
                goto err_hw_init;
 
-       rc = request_irq(spi->irq, irq_handler,
-                        IRQF_SHARED | pdata->irq_type,
-                        dev_name(&spi->dev), lp);
+       /* Read irq status register to reset irq line */
+       rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &status);
        if (rc)
                goto err_hw_init;
 
-       /* Read irq status register to reset irq line */
-       rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &status);
+       rc = devm_request_irq(&spi->dev, spi->irq, irq_handler, IRQF_SHARED,
+                             dev_name(&spi->dev), lp);
        if (rc)
-               goto err_irq;
+               goto err_hw_init;
 
        rc = ieee802154_register_device(lp->dev);
        if (rc)
-               goto err_irq;
+               goto err_hw_init;
 
        return rc;
 
-err_irq:
-       free_irq(spi->irq, lp);
 err_hw_init:
        flush_work(&lp->irqwork);
-       spi_set_drvdata(spi, NULL);
        mutex_destroy(&lp->bmux);
        ieee802154_free_device(lp->dev);
 
-err_gpio_dir:
-       if (gpio_is_valid(pdata->slp_tr))
-               gpio_free(pdata->slp_tr);
-err_slp_tr:
-       if (gpio_is_valid(pdata->rstn))
-               gpio_free(pdata->rstn);
        return rc;
 }
 
 static int at86rf230_remove(struct spi_device *spi)
 {
        struct at86rf230_local *lp = spi_get_drvdata(spi);
-       struct at86rf230_platform_data *pdata = spi->dev.platform_data;
 
        /* mask all at86rf230 irq's */
        at86rf230_write_subreg(lp, SR_IRQ_MASK, 0);
        ieee802154_unregister_device(lp->dev);
-
-       free_irq(spi->irq, lp);
        flush_work(&lp->irqwork);
-
-       if (gpio_is_valid(pdata->slp_tr))
-               gpio_free(pdata->slp_tr);
-       if (gpio_is_valid(pdata->rstn))
-               gpio_free(pdata->rstn);
-
        mutex_destroy(&lp->bmux);
        ieee802154_free_device(lp->dev);
-
        dev_dbg(&spi->dev, "unregistered at86rf230\n");
+
        return 0;
 }
 
-#if IS_ENABLED(CONFIG_OF)
-static struct of_device_id at86rf230_of_match[] = {
+static const struct of_device_id at86rf230_of_match[] = {
        { .compatible = "atmel,at86rf230", },
        { .compatible = "atmel,at86rf231", },
        { .compatible = "atmel,at86rf233", },
        { .compatible = "atmel,at86rf212", },
        { },
 };
-#endif
+MODULE_DEVICE_TABLE(of, at86rf230_of_match);
+
+static const struct spi_device_id at86rf230_device_id[] = {
+       { .name = "at86rf230", },
+       { .name = "at86rf231", },
+       { .name = "at86rf233", },
+       { .name = "at86rf212", },
+       { },
+};
+MODULE_DEVICE_TABLE(spi, at86rf230_device_id);
 
 static struct spi_driver at86rf230_driver = {
+       .id_table = at86rf230_device_id,
        .driver = {
                .of_match_table = of_match_ptr(at86rf230_of_match),
                .name   = "at86rf230",
index b8d22173925dee1aed62df3ccd4d23deb44b976b..27d83207d24ce0de722144c74e98ce65601fcfc0 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/timer.h>
 #include <linux/platform_device.h>
 #include <linux/netdevice.h>
+#include <linux/device.h>
 #include <linux/spinlock.h>
 #include <net/mac802154.h>
 #include <net/wpan-phy.h>
@@ -228,7 +229,8 @@ static int fakelb_probe(struct platform_device *pdev)
        int err = -ENOMEM;
        int i;
 
-       priv = kzalloc(sizeof(struct fakelb_priv), GFP_KERNEL);
+       priv = devm_kzalloc(&pdev->dev, sizeof(struct fakelb_priv),
+                           GFP_KERNEL);
        if (!priv)
                goto err_alloc;
 
@@ -248,7 +250,6 @@ static int fakelb_probe(struct platform_device *pdev)
 err_slave:
        list_for_each_entry(dp, &priv->list, list)
                fakelb_del(dp);
-       kfree(priv);
 err_alloc:
        return err;
 }
@@ -260,7 +261,6 @@ static int fakelb_remove(struct platform_device *pdev)
 
        list_for_each_entry_safe(dp, temp, &priv->list, list)
                fakelb_del(dp);
-       kfree(priv);
 
        return 0;
 }
index 3da44d5d91497801a141b373c60f8cd5890a1bf9..8d101d63abca9a48466edfed2db11ab54df32d63 100644 (file)
@@ -396,7 +396,8 @@ config MCS_FIR
 
 config SH_IRDA
        tristate "SuperH IrDA driver"
-       depends on IRDA && ARCH_SHMOBILE
+       depends on IRDA
+       depends on ARCH_SHMOBILE || COMPILE_TEST
        help
          Say Y here if your want to enable SuperH IrDA devices.
 
index e641bb2403624fdd91fb565ea646d70d03c2c5fd..11dbdf36d9c1b328ff70b6e5d0e5d2f7729dbb2a 100644 (file)
 #include "w83977af.h"
 #include "w83977af_ir.h"
 
-#ifdef  CONFIG_ARCH_NETWINDER            /* Adjust to NetWinder differences */
-#undef  CONFIG_NETWINDER_TX_DMA_PROBLEMS /* Not needed */
-#define CONFIG_NETWINDER_RX_DMA_PROBLEMS /* Must have this one! */
-#endif
 #define CONFIG_USE_W977_PNP        /* Currently needed */
 #define PIO_MAX_SPEED       115200 
 
@@ -332,7 +328,7 @@ static int w83977af_probe(int iobase, int irq, int dma)
                w977_write_reg(0x74, dma+1, efbase[i]);
 #else
                w977_write_reg(0x74, dma, efbase[i]);   
-#endif /*CONFIG_ARCH_NETWINDER */
+#endif /* CONFIG_ARCH_NETWINDER */
                w977_write_reg(0x75, 0x04, efbase[i]);  /* Disable Tx DMA */
        
                /* Set append hardware CRC, enable IR bank selection */ 
@@ -563,10 +559,6 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
 static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
 {
        __u8 set;
-#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
-       unsigned long flags;
-       __u8 hcr;
-#endif
         IRDA_DEBUG(4, "%s(), len=%d\n", __func__ , self->tx_buff.len);
 
        /* Save current set */
@@ -579,30 +571,13 @@ static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
        /* Choose transmit DMA channel  */ 
        switch_bank(iobase, SET2);
        outb(ADCR1_D_CHSW|/*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase+ADCR1);
-#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
-       spin_lock_irqsave(&self->lock, flags);
-
-       disable_dma(self->io.dma);
-       clear_dma_ff(self->io.dma);
-       set_dma_mode(self->io.dma, DMA_MODE_READ);
-       set_dma_addr(self->io.dma, self->tx_buff_dma);
-       set_dma_count(self->io.dma, self->tx_buff.len);
-#else
        irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
                       DMA_MODE_WRITE); 
-#endif
        self->io.direction = IO_XMIT;
        
        /* Enable DMA */
        switch_bank(iobase, SET0);
-#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
-       hcr = inb(iobase+HCR);
-       outb(hcr | HCR_EN_DMA, iobase+HCR);
-       enable_dma(self->io.dma);
-       spin_unlock_irqrestore(&self->lock, flags);
-#else  
        outb(inb(iobase+HCR) | HCR_EN_DMA | HCR_TX_WT, iobase+HCR);
-#endif
 
        /* Restore set register */
        outb(set, iobase+SSR);
@@ -711,7 +686,7 @@ static int w83977af_dma_receive(struct w83977af_ir *self)
 {
        int iobase;
        __u8 set;
-#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
+#ifdef CONFIG_ARCH_NETWINDER
        unsigned long flags;
        __u8 hcr;
 #endif
@@ -736,7 +711,7 @@ static int w83977af_dma_receive(struct w83977af_ir *self)
        self->io.direction = IO_RECV;
        self->rx_buff.data = self->rx_buff.head;
 
-#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
+#ifdef CONFIG_ARCH_NETWINDER
        spin_lock_irqsave(&self->lock, flags);
 
        disable_dma(self->io.dma);
@@ -759,7 +734,7 @@ static int w83977af_dma_receive(struct w83977af_ir *self)
        
        /* Enable DMA */
        switch_bank(iobase, SET0);
-#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
+#ifdef CONFIG_ARCH_NETWINDER
        hcr = inb(iobase+HCR);
        outb(hcr | HCR_EN_DMA, iobase+HCR);
        enable_dma(self->io.dma);
index 8b8220fcdd3d2e7448f1834d4462924d27b3d2c5..a665e902b9891096d76a427c1de26a61d49e3b3b 100644 (file)
@@ -44,9 +44,10 @@ struct macvlan_port {
        struct sk_buff_head     bc_queue;
        struct work_struct      bc_work;
        bool                    passthru;
-       int                     count;
 };
 
+#define MACVLAN_PORT_IS_EMPTY(port)    list_empty(&port->vlans)
+
 struct macvlan_skb_cb {
        const struct macvlan_dev *src;
 };
@@ -239,25 +240,28 @@ static void macvlan_process_broadcast(struct work_struct *w)
 static void macvlan_broadcast_enqueue(struct macvlan_port *port,
                                      struct sk_buff *skb)
 {
+       struct sk_buff *nskb;
        int err = -ENOMEM;
 
-       skb = skb_clone(skb, GFP_ATOMIC);
-       if (!skb)
+       nskb = skb_clone(skb, GFP_ATOMIC);
+       if (!nskb)
                goto err;
 
        spin_lock(&port->bc_queue.lock);
        if (skb_queue_len(&port->bc_queue) < skb->dev->tx_queue_len) {
-               __skb_queue_tail(&port->bc_queue, skb);
+               __skb_queue_tail(&port->bc_queue, nskb);
                err = 0;
        }
        spin_unlock(&port->bc_queue.lock);
 
        if (err)
-               goto err;
+               goto free_nskb;
 
        schedule_work(&port->bc_work);
        return;
 
+free_nskb:
+       kfree_skb(nskb);
 err:
        atomic_long_inc(&skb->dev->rx_dropped);
 }
@@ -329,11 +333,9 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
        const struct macvlan_dev *vlan = netdev_priv(dev);
        const struct macvlan_port *port = vlan->port;
        const struct macvlan_dev *dest;
-       __u8 ip_summed = skb->ip_summed;
 
        if (vlan->mode == MACVLAN_MODE_BRIDGE) {
                const struct ethhdr *eth = (void *)skb->data;
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
 
                /* send to other bridge ports directly */
                if (is_multicast_ether_addr(eth->h_dest)) {
@@ -351,7 +353,6 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
 xmit_world:
-       skb->ip_summed = ip_summed;
        skb->dev = vlan->lowerdev;
        return dev_queue_xmit(skb);
 }
@@ -527,8 +528,10 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
        struct macvlan_dev *vlan = netdev_priv(dev);
        struct net_device *lowerdev = vlan->lowerdev;
 
-       if (change & IFF_ALLMULTI)
-               dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+       if (dev->flags & IFF_UP) {
+               if (change & IFF_ALLMULTI)
+                       dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+       }
 }
 
 static void macvlan_set_mac_lists(struct net_device *dev)
@@ -584,6 +587,11 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
 #define MACVLAN_STATE_MASK \
        ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
 
+static int macvlan_get_nest_level(struct net_device *dev)
+{
+       return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
+}
+
 static void macvlan_set_lockdep_class_one(struct net_device *dev,
                                          struct netdev_queue *txq,
                                          void *_unused)
@@ -594,8 +602,9 @@ static void macvlan_set_lockdep_class_one(struct net_device *dev,
 
 static void macvlan_set_lockdep_class(struct net_device *dev)
 {
-       lockdep_set_class(&dev->addr_list_lock,
-                         &macvlan_netdev_addr_lock_key);
+       lockdep_set_class_and_subclass(&dev->addr_list_lock,
+                                      &macvlan_netdev_addr_lock_key,
+                                      macvlan_get_nest_level(dev));
        netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL);
 }
 
@@ -628,8 +637,7 @@ static void macvlan_uninit(struct net_device *dev)
 
        free_percpu(vlan->pcpu_stats);
 
-       port->count -= 1;
-       if (!port->count)
+       if (MACVLAN_PORT_IS_EMPTY(port))
                macvlan_port_destroy(port->dev);
 }
 
@@ -790,6 +798,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
        .ndo_fdb_add            = macvlan_fdb_add,
        .ndo_fdb_del            = macvlan_fdb_del,
        .ndo_fdb_dump           = ndo_dflt_fdb_dump,
+       .ndo_get_lock_subclass  = macvlan_get_nest_level,
 };
 
 void macvlan_common_setup(struct net_device *dev)
@@ -922,6 +931,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
        vlan->dev      = dev;
        vlan->port     = port;
        vlan->set_features = MACVLAN_FEATURES;
+       vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1;
 
        vlan->mode     = MACVLAN_MODE_VEPA;
        if (data && data[IFLA_MACVLAN_MODE])
@@ -931,13 +941,12 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
                vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
 
        if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
-               if (port->count)
+               if (!MACVLAN_PORT_IS_EMPTY(port))
                        return -EINVAL;
                port->passthru = true;
                eth_hw_addr_inherit(dev, lowerdev);
        }
 
-       port->count += 1;
        err = register_netdevice(dev);
        if (err < 0)
                goto destroy_port;
@@ -955,8 +964,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
 unregister_netdev:
        unregister_netdevice(dev);
 destroy_port:
-       port->count -= 1;
-       if (!port->count)
+       if (MACVLAN_PORT_IS_EMPTY(port))
                macvlan_port_destroy(lowerdev);
 
        return err;
@@ -1091,6 +1099,13 @@ static int macvlan_device_event(struct notifier_block *unused,
                        netdev_update_features(vlan->dev);
                }
                break;
+       case NETDEV_CHANGEMTU:
+               list_for_each_entry(vlan, &port->vlans, list) {
+                       if (vlan->dev->mtu <= dev->mtu)
+                               continue;
+                       dev_set_mtu(vlan->dev, dev->mtu);
+               }
+               break;
        case NETDEV_UNREGISTER:
                /* twiddle thumbs on netns device moves */
                if (dev->reg_state != NETREG_UNREGISTERING)
index ff111a89e17f9c66561d79916d8d57e282c119d2..3381c4f91a8cc236df0df8be59bd586538480498 100644 (file)
@@ -322,6 +322,15 @@ static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
                        segs = nskb;
                }
        } else {
+               /* If we receive a partial checksum and the tap side
+                * doesn't support checksum offload, compute the checksum.
+                * Note: it doesn't matter which checksum feature to
+                *        check, we either support them all or none.
+                */
+               if (skb->ip_summed == CHECKSUM_PARTIAL &&
+                   !(features & NETIF_F_ALL_CSUM) &&
+                   skb_checksum_help(skb))
+                       goto drop;
                skb_queue_tail(&q->sk.sk_receive_queue, skb);
        }
 
index 63aa9d9e34c52b1c2150fa7876ce3658a7c11fe5..27536aa8919950cc18ca0ac1b7c4965523839032 100644 (file)
@@ -348,7 +348,7 @@ static int ntb_netdev_probe(struct pci_dev *pdev)
        memcpy(ndev->dev_addr, ndev->perm_addr, ndev->addr_len);
 
        ndev->netdev_ops = &ntb_netdev_ops;
-       SET_ETHTOOL_OPS(ndev, &ntb_ethtool_ops);
+       ndev->ethtool_ops = &ntb_ethtool_ops;
 
        dev->qp = ntb_transport_create_queue(ndev, pdev, &ntb_netdev_handlers);
        if (!dev->qp) {
index b256083aa69ebabbed9ae06079835a704b66ce1a..6c622aedbae111842b0e8ec86aede54653b84ec6 100644 (file)
@@ -253,8 +253,7 @@ static int __init atheros_init(void)
 
 static void __exit atheros_exit(void)
 {
-       return phy_drivers_unregister(at803x_driver,
-                                     ARRAY_SIZE(at803x_driver));
+       phy_drivers_unregister(at803x_driver, ARRAY_SIZE(at803x_driver));
 }
 
 module_init(atheros_init);
index ba55adfc7aaef00b7cfee22fe0fd2ff8eb67ffe8..d60d875cb4450ab6cf72114c35b4c816e2e031fd 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/phy_fixed.h>
 #include <linux/err.h>
 #include <linux/slab.h>
+#include <linux/of.h>
 
 #define MII_REGS_NUM 29
 
@@ -31,7 +32,7 @@ struct fixed_mdio_bus {
 };
 
 struct fixed_phy {
-       int id;
+       int addr;
        u16 regs[MII_REGS_NUM];
        struct phy_device *phydev;
        struct fixed_phy_status status;
@@ -104,8 +105,8 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
        if (fp->status.asym_pause)
                lpa |= LPA_PAUSE_ASYM;
 
-       fp->regs[MII_PHYSID1] = fp->id >> 16;
-       fp->regs[MII_PHYSID2] = fp->id;
+       fp->regs[MII_PHYSID1] = 0;
+       fp->regs[MII_PHYSID2] = 0;
 
        fp->regs[MII_BMSR] = bmsr;
        fp->regs[MII_BMCR] = bmcr;
@@ -115,7 +116,7 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
        return 0;
 }
 
-static int fixed_mdio_read(struct mii_bus *bus, int phy_id, int reg_num)
+static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num)
 {
        struct fixed_mdio_bus *fmb = bus->priv;
        struct fixed_phy *fp;
@@ -124,7 +125,7 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_id, int reg_num)
                return -1;
 
        list_for_each_entry(fp, &fmb->phys, node) {
-               if (fp->id == phy_id) {
+               if (fp->addr == phy_addr) {
                        /* Issue callback if user registered it. */
                        if (fp->link_update) {
                                fp->link_update(fp->phydev->attached_dev,
@@ -138,7 +139,7 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_id, int reg_num)
        return 0xFFFF;
 }
 
-static int fixed_mdio_write(struct mii_bus *bus, int phy_id, int reg_num,
+static int fixed_mdio_write(struct mii_bus *bus, int phy_addr, int reg_num,
                            u16 val)
 {
        return 0;
@@ -160,7 +161,7 @@ int fixed_phy_set_link_update(struct phy_device *phydev,
                return -EINVAL;
 
        list_for_each_entry(fp, &fmb->phys, node) {
-               if (fp->id == phydev->phy_id) {
+               if (fp->addr == phydev->addr) {
                        fp->link_update = link_update;
                        fp->phydev = phydev;
                        return 0;
@@ -171,7 +172,7 @@ int fixed_phy_set_link_update(struct phy_device *phydev,
 }
 EXPORT_SYMBOL_GPL(fixed_phy_set_link_update);
 
-int fixed_phy_add(unsigned int irq, int phy_id,
+int fixed_phy_add(unsigned int irq, int phy_addr,
                  struct fixed_phy_status *status)
 {
        int ret;
@@ -184,9 +185,9 @@ int fixed_phy_add(unsigned int irq, int phy_id,
 
        memset(fp->regs, 0xFF,  sizeof(fp->regs[0]) * MII_REGS_NUM);
 
-       fmb->irqs[phy_id] = irq;
+       fmb->irqs[phy_addr] = irq;
 
-       fp->id = phy_id;
+       fp->addr = phy_addr;
        fp->status = *status;
 
        ret = fixed_phy_update_regs(fp);
@@ -203,6 +204,66 @@ err_regs:
 }
 EXPORT_SYMBOL_GPL(fixed_phy_add);
 
+void fixed_phy_del(int phy_addr)
+{
+       struct fixed_mdio_bus *fmb = &platform_fmb;
+       struct fixed_phy *fp, *tmp;
+
+       list_for_each_entry_safe(fp, tmp, &fmb->phys, node) {
+               if (fp->addr == phy_addr) {
+                       list_del(&fp->node);
+                       kfree(fp);
+                       return;
+               }
+       }
+}
+EXPORT_SYMBOL_GPL(fixed_phy_del);
+
+static int phy_fixed_addr;
+static DEFINE_SPINLOCK(phy_fixed_addr_lock);
+
+int fixed_phy_register(unsigned int irq,
+                      struct fixed_phy_status *status,
+                      struct device_node *np)
+{
+       struct fixed_mdio_bus *fmb = &platform_fmb;
+       struct phy_device *phy;
+       int phy_addr;
+       int ret;
+
+       /* Get the next available PHY address, up to PHY_MAX_ADDR */
+       spin_lock(&phy_fixed_addr_lock);
+       if (phy_fixed_addr == PHY_MAX_ADDR) {
+               spin_unlock(&phy_fixed_addr_lock);
+               return -ENOSPC;
+       }
+       phy_addr = phy_fixed_addr++;
+       spin_unlock(&phy_fixed_addr_lock);
+
+       ret = fixed_phy_add(PHY_POLL, phy_addr, status);
+       if (ret < 0)
+               return ret;
+
+       phy = get_phy_device(fmb->mii_bus, phy_addr, false);
+       if (!phy || IS_ERR(phy)) {
+               fixed_phy_del(phy_addr);
+               return -EINVAL;
+       }
+
+       of_node_get(np);
+       phy->dev.of_node = np;
+
+       ret = phy_device_register(phy);
+       if (ret) {
+               phy_device_free(phy);
+               of_node_put(np);
+               fixed_phy_del(phy_addr);
+               return ret;
+       }
+
+       return 0;
+}
+
 static int __init fixed_mdio_bus_init(void)
 {
        struct fixed_mdio_bus *fmb = &platform_fmb;
index 9c4defdec67b09299f38f1b06bf8eacbccd007d1..5f1a2250018fec5ba01a2a1a1a11f2d736f544a9 100644 (file)
@@ -215,6 +215,10 @@ static int mdio_gpio_probe(struct platform_device *pdev)
        if (pdev->dev.of_node) {
                pdata = mdio_gpio_of_get_data(pdev);
                bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio");
+               if (bus_id < 0) {
+                       dev_warn(&pdev->dev, "failed to get alias id\n");
+                       bus_id = 0;
+               }
        } else {
                pdata = dev_get_platdata(&pdev->dev);
                bus_id = pdev->id;
index 76f54b32a120832f2ce212c129592a6f30ab83df..2e58aa54484c9ca4e3154e231a3af2766bc84933 100644 (file)
@@ -69,6 +69,73 @@ struct mii_bus *mdiobus_alloc_size(size_t size)
 }
 EXPORT_SYMBOL(mdiobus_alloc_size);
 
+static void _devm_mdiobus_free(struct device *dev, void *res)
+{
+       mdiobus_free(*(struct mii_bus **)res);
+}
+
+static int devm_mdiobus_match(struct device *dev, void *res, void *data)
+{
+       struct mii_bus **r = res;
+
+       if (WARN_ON(!r || !*r))
+               return 0;
+
+       return *r == data;
+}
+
+/**
+ * devm_mdiobus_alloc_size - Resource-managed mdiobus_alloc_size()
+ * @dev:               Device to allocate mii_bus for
+ * @sizeof_priv:       Space to allocate for private structure.
+ *
+ * Managed mdiobus_alloc_size. mii_bus allocated with this function is
+ * automatically freed on driver detach.
+ *
+ * If an mii_bus allocated with this function needs to be freed separately,
+ * devm_mdiobus_free() must be used.
+ *
+ * RETURNS:
+ * Pointer to allocated mii_bus on success, NULL on failure.
+ */
+struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv)
+{
+       struct mii_bus **ptr, *bus;
+
+       ptr = devres_alloc(_devm_mdiobus_free, sizeof(*ptr), GFP_KERNEL);
+       if (!ptr)
+               return NULL;
+
+       /* use raw alloc_dr for kmalloc caller tracing */
+       bus = mdiobus_alloc_size(sizeof_priv);
+       if (bus) {
+               *ptr = bus;
+               devres_add(dev, ptr);
+       } else {
+               devres_free(ptr);
+       }
+
+       return bus;
+}
+EXPORT_SYMBOL_GPL(devm_mdiobus_alloc_size);
+
+/**
+ * devm_mdiobus_free - Resource-managed mdiobus_free()
+ * @dev:               Device this mii_bus belongs to
+ * @bus:               the mii_bus associated with the device
+ *
+ * Free mii_bus allocated with devm_mdiobus_alloc_size().
+ */
+void devm_mdiobus_free(struct device *dev, struct mii_bus *bus)
+{
+       int rc;
+
+       rc = devres_release(dev, _devm_mdiobus_free,
+                           devm_mdiobus_match, bus);
+       WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_mdiobus_free);
+
 /**
  * mdiobus_release - mii_bus device release callback
  * @d: the target struct device that contains the mii_bus
@@ -233,6 +300,12 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
        if (IS_ERR(phydev) || phydev == NULL)
                return phydev;
 
+       /*
+        * For DT, see if the auto-probed phy has a correspoding child
+        * in the bus node, and set the of_node pointer in this case.
+        */
+       of_mdiobus_link_phydev(bus, phydev);
+
        err = phy_device_register(phydev);
        if (err) {
                phy_device_free(phydev);
index 5ad971a55c5d9f21ffb3ded8e9d5704534095d21..bc7c7d2f75f26e41ccd205eeaa6402eee25e0233 100644 (file)
@@ -246,13 +246,13 @@ static int ksz9021_load_values_from_of(struct phy_device *phydev,
        if (val1 != -1)
                newval = ((newval & 0xfff0) | ((val1 / PS_TO_REG) & 0xf) << 0);
 
-       if (val2 != -1)
+       if (val2 != -2)
                newval = ((newval & 0xff0f) | ((val2 / PS_TO_REG) & 0xf) << 4);
 
-       if (val3 != -1)
+       if (val3 != -3)
                newval = ((newval & 0xf0ff) | ((val3 / PS_TO_REG) & 0xf) << 8);
 
-       if (val4 != -1)
+       if (val4 != -4)
                newval = ((newval & 0x0fff) | ((val4 / PS_TO_REG) & 0xf) << 12);
 
        return kszphy_extended_write(phydev, reg, newval);
@@ -283,6 +283,110 @@ static int ksz9021_config_init(struct phy_device *phydev)
        return 0;
 }
 
+#define MII_KSZ9031RN_MMD_CTRL_REG     0x0d
+#define MII_KSZ9031RN_MMD_REGDATA_REG  0x0e
+#define OP_DATA                                1
+#define KSZ9031_PS_TO_REG              60
+
+/* Extended registers */
+#define MII_KSZ9031RN_CONTROL_PAD_SKEW 4
+#define MII_KSZ9031RN_RX_DATA_PAD_SKEW 5
+#define MII_KSZ9031RN_TX_DATA_PAD_SKEW 6
+#define MII_KSZ9031RN_CLK_PAD_SKEW     8
+
+static int ksz9031_extended_write(struct phy_device *phydev,
+                                 u8 mode, u32 dev_addr, u32 regnum, u16 val)
+{
+       phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, dev_addr);
+       phy_write(phydev, MII_KSZ9031RN_MMD_REGDATA_REG, regnum);
+       phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, (mode << 14) | dev_addr);
+       return phy_write(phydev, MII_KSZ9031RN_MMD_REGDATA_REG, val);
+}
+
+static int ksz9031_extended_read(struct phy_device *phydev,
+                                u8 mode, u32 dev_addr, u32 regnum)
+{
+       phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, dev_addr);
+       phy_write(phydev, MII_KSZ9031RN_MMD_REGDATA_REG, regnum);
+       phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, (mode << 14) | dev_addr);
+       return phy_read(phydev, MII_KSZ9031RN_MMD_REGDATA_REG);
+}
+
+static int ksz9031_of_load_skew_values(struct phy_device *phydev,
+                                      struct device_node *of_node,
+                                      u16 reg, size_t field_sz,
+                                      char *field[], u8 numfields)
+{
+       int val[4] = {-1, -2, -3, -4};
+       int matches = 0;
+       u16 mask;
+       u16 maxval;
+       u16 newval;
+       int i;
+
+       for (i = 0; i < numfields; i++)
+               if (!of_property_read_u32(of_node, field[i], val + i))
+                       matches++;
+
+       if (!matches)
+               return 0;
+
+       if (matches < numfields)
+               newval = ksz9031_extended_read(phydev, OP_DATA, 2, reg);
+       else
+               newval = 0;
+
+       maxval = (field_sz == 4) ? 0xf : 0x1f;
+       for (i = 0; i < numfields; i++)
+               if (val[i] != -(i + 1)) {
+                       mask = 0xffff;
+                       mask ^= maxval << (field_sz * i);
+                       newval = (newval & mask) |
+                               (((val[i] / KSZ9031_PS_TO_REG) & maxval)
+                                       << (field_sz * i));
+               }
+
+       return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval);
+}
+
+static int ksz9031_config_init(struct phy_device *phydev)
+{
+       struct device *dev = &phydev->dev;
+       struct device_node *of_node = dev->of_node;
+       char *clk_skews[2] = {"rxc-skew-ps", "txc-skew-ps"};
+       char *rx_data_skews[4] = {
+               "rxd0-skew-ps", "rxd1-skew-ps",
+               "rxd2-skew-ps", "rxd3-skew-ps"
+       };
+       char *tx_data_skews[4] = {
+               "txd0-skew-ps", "txd1-skew-ps",
+               "txd2-skew-ps", "txd3-skew-ps"
+       };
+       char *control_skews[2] = {"txen-skew-ps", "rxdv-skew-ps"};
+
+       if (!of_node && dev->parent->of_node)
+               of_node = dev->parent->of_node;
+
+       if (of_node) {
+               ksz9031_of_load_skew_values(phydev, of_node,
+                               MII_KSZ9031RN_CLK_PAD_SKEW, 5,
+                               clk_skews, 2);
+
+               ksz9031_of_load_skew_values(phydev, of_node,
+                               MII_KSZ9031RN_CONTROL_PAD_SKEW, 4,
+                               control_skews, 2);
+
+               ksz9031_of_load_skew_values(phydev, of_node,
+                               MII_KSZ9031RN_RX_DATA_PAD_SKEW, 4,
+                               rx_data_skews, 4);
+
+               ksz9031_of_load_skew_values(phydev, of_node,
+                               MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4,
+                               tx_data_skews, 4);
+       }
+       return 0;
+}
+
 #define KSZ8873MLL_GLOBAL_CONTROL_4    0x06
 #define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX     (1 << 6)
 #define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED      (1 << 4)
@@ -469,7 +573,7 @@ static struct phy_driver ksphy_driver[] = {
        .features       = (PHY_GBIT_FEATURES | SUPPORTED_Pause
                                | SUPPORTED_Asym_Pause),
        .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
-       .config_init    = kszphy_config_init,
+       .config_init    = ksz9031_config_init,
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
index 1b6d09aef42748bcbba6d4fe88ca68d6ea83c852..3bc079a67a3dc85a222e2b784f7f65ec55dc6b59 100644 (file)
@@ -715,7 +715,7 @@ void phy_state_machine(struct work_struct *work)
        struct delayed_work *dwork = to_delayed_work(work);
        struct phy_device *phydev =
                        container_of(dwork, struct phy_device, state_queue);
-       int needs_aneg = 0, do_suspend = 0;
+       bool needs_aneg = false, do_suspend = false, do_resume = false;
        int err = 0;
 
        mutex_lock(&phydev->lock);
@@ -727,7 +727,7 @@ void phy_state_machine(struct work_struct *work)
        case PHY_PENDING:
                break;
        case PHY_UP:
-               needs_aneg = 1;
+               needs_aneg = true;
 
                phydev->link_timeout = PHY_AN_TIMEOUT;
 
@@ -757,7 +757,7 @@ void phy_state_machine(struct work_struct *work)
                        phydev->adjust_link(phydev->attached_dev);
 
                } else if (0 == phydev->link_timeout--)
-                       needs_aneg = 1;
+                       needs_aneg = true;
                break;
        case PHY_NOLINK:
                err = phy_read_status(phydev);
@@ -765,6 +765,17 @@ void phy_state_machine(struct work_struct *work)
                        break;
 
                if (phydev->link) {
+                       if (AUTONEG_ENABLE == phydev->autoneg) {
+                               err = phy_aneg_done(phydev);
+                               if (err < 0)
+                                       break;
+
+                               if (!err) {
+                                       phydev->state = PHY_AN;
+                                       phydev->link_timeout = PHY_AN_TIMEOUT;
+                                       break;
+                               }
+                       }
                        phydev->state = PHY_RUNNING;
                        netif_carrier_on(phydev->attached_dev);
                        phydev->adjust_link(phydev->attached_dev);
@@ -780,7 +791,7 @@ void phy_state_machine(struct work_struct *work)
                        netif_carrier_on(phydev->attached_dev);
                } else {
                        if (0 == phydev->link_timeout--)
-                               needs_aneg = 1;
+                               needs_aneg = true;
                }
 
                phydev->adjust_link(phydev->attached_dev);
@@ -816,7 +827,7 @@ void phy_state_machine(struct work_struct *work)
                        phydev->link = 0;
                        netif_carrier_off(phydev->attached_dev);
                        phydev->adjust_link(phydev->attached_dev);
-                       do_suspend = 1;
+                       do_suspend = true;
                }
                break;
        case PHY_RESUMING:
@@ -865,6 +876,7 @@ void phy_state_machine(struct work_struct *work)
                        }
                        phydev->adjust_link(phydev->attached_dev);
                }
+               do_resume = true;
                break;
        }
 
@@ -872,9 +884,10 @@ void phy_state_machine(struct work_struct *work)
 
        if (needs_aneg)
                err = phy_start_aneg(phydev);
-
-       if (do_suspend)
+       else if (do_suspend)
                phy_suspend(phydev);
+       else if (do_resume)
+               phy_resume(phydev);
 
        if (err < 0)
                phy_error(phydev);
index 466ae3e063220179580c48d351ceaa0eac5ee615..35d753d22f78b91d643548029df5b2e6eea64d49 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/mdio.h>
 #include <linux/io.h>
 #include <linux/uaccess.h>
+#include <linux/of.h>
 
 #include <asm/irq.h>
 
@@ -614,8 +615,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
        err = phy_init_hw(phydev);
        if (err)
                phy_detach(phydev);
-
-       phy_resume(phydev);
+       else
+               phy_resume(phydev);
 
        return err;
 }
@@ -1072,9 +1073,6 @@ int genphy_config_init(struct phy_device *phydev)
        int val;
        u32 features;
 
-       /* For now, I'll claim that the generic driver supports
-        * all possible port types
-        */
        features = (SUPPORTED_TP | SUPPORTED_MII
                        | SUPPORTED_AUI | SUPPORTED_FIBRE |
                        SUPPORTED_BNC);
@@ -1107,8 +1105,8 @@ int genphy_config_init(struct phy_device *phydev)
                        features |= SUPPORTED_1000baseT_Half;
        }
 
-       phydev->supported = features;
-       phydev->advertising = features;
+       phydev->supported &= features;
+       phydev->advertising &= features;
 
        return 0;
 }
@@ -1169,6 +1167,38 @@ static int gen10g_resume(struct phy_device *phydev)
        return 0;
 }
 
+static void of_set_phy_supported(struct phy_device *phydev)
+{
+       struct device_node *node = phydev->dev.of_node;
+       u32 max_speed;
+
+       if (!IS_ENABLED(CONFIG_OF_MDIO))
+               return;
+
+       if (!node)
+               return;
+
+       if (!of_property_read_u32(node, "max-speed", &max_speed)) {
+               /* The default values for phydev->supported are provided by the PHY
+                * driver "features" member, we want to reset to sane defaults fist
+                * before supporting higher speeds.
+                */
+               phydev->supported &= PHY_DEFAULT_FEATURES;
+
+               switch (max_speed) {
+               default:
+                       return;
+
+               case SPEED_1000:
+                       phydev->supported |= PHY_1000BT_FEATURES;
+               case SPEED_100:
+                       phydev->supported |= PHY_100BT_FEATURES;
+               case SPEED_10:
+                       phydev->supported |= PHY_10BT_FEATURES;
+               }
+       }
+}
+
 /**
  * phy_probe - probe and init a PHY device
  * @dev: device to probe and init
@@ -1203,7 +1233,8 @@ static int phy_probe(struct device *dev)
         * or both of these values
         */
        phydev->supported = phydrv->features;
-       phydev->advertising = phydrv->features;
+       of_set_phy_supported(phydev);
+       phydev->advertising = phydev->supported;
 
        /* Set the state to READY by default */
        phydev->state = PHY_READY;
@@ -1296,7 +1327,9 @@ static struct phy_driver genphy_driver[] = {
        .name           = "Generic PHY",
        .soft_reset     = genphy_soft_reset,
        .config_init    = genphy_config_init,
-       .features       = 0,
+       .features       = PHY_GBIT_FEATURES | SUPPORTED_MII |
+                         SUPPORTED_AUI | SUPPORTED_FIBRE |
+                         SUPPORTED_BNC,
        .config_aneg    = genphy_config_aneg,
        .aneg_done      = genphy_aneg_done,
        .read_status    = genphy_read_status,
index 11f34813e23fb5423ccf90cf9d25e208ed2275f3..180c49479c42f9b4a19f070056b782923de5084c 100644 (file)
@@ -249,8 +249,7 @@ static int __init smsc_init(void)
 
 static void __exit smsc_exit(void)
 {
-       return phy_drivers_unregister(smsc_phy_driver,
-               ARRAY_SIZE(smsc_phy_driver));
+       phy_drivers_unregister(smsc_phy_driver, ARRAY_SIZE(smsc_phy_driver));
 }
 
 MODULE_DESCRIPTION("SMSC PHY driver");
index 14372c65a7e8209b5f97da6416ef80d1f41a522c..5dc0935da99c52a07ba7c09da27503b8ee0b2695 100644 (file)
@@ -319,8 +319,7 @@ static int __init vsc82xx_init(void)
 
 static void __exit vsc82xx_exit(void)
 {
-       return phy_drivers_unregister(vsc82xx_driver,
-               ARRAY_SIZE(vsc82xx_driver));
+       phy_drivers_unregister(vsc82xx_driver, ARRAY_SIZE(vsc82xx_driver));
 }
 
 module_init(vsc82xx_init);
index e3923ebb693fccc276db45de7c7d9551ee6bc208..91d6c1272fcf0ae4a655fa74ca6b91fb578a6108 100644 (file)
@@ -757,7 +757,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
                err = get_filter(argp, &code);
                if (err >= 0) {
-                       struct sock_fprog fprog = {
+                       struct sock_fprog_kern fprog = {
                                .len = err,
                                .filter = code,
                        };
@@ -778,7 +778,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
                err = get_filter(argp, &code);
                if (err >= 0) {
-                       struct sock_fprog fprog = {
+                       struct sock_fprog_kern fprog = {
                                .len = err,
                                .filter = code,
                        };
index a8497183ff8b079985b7930fd5eb26dc9fb44813..dac7a0d9bb46e5d9d2385250f990a2a0acf5996c 100644 (file)
@@ -494,7 +494,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
        ndev->mtu = RIO_MAX_MSG_SIZE - 14;
        ndev->features = NETIF_F_LLTX;
        SET_NETDEV_DEV(ndev, &mport->dev);
-       SET_ETHTOOL_OPS(ndev, &rionet_ethtool_ops);
+       ndev->ethtool_ops = &rionet_ethtool_ops;
 
        spin_lock_init(&rnet->lock);
        spin_lock_init(&rnet->tx_lock);
index cc70ecfc70626789183e462c8b51d13f0c7fc8aa..ad4a94e9ff57c77574820fe3e188b12986feff55 100644 (file)
@@ -429,13 +429,13 @@ static void slip_write_wakeup(struct tty_struct *tty)
        if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
                return;
 
-       spin_lock(&sl->lock);
+       spin_lock_bh(&sl->lock);
        if (sl->xleft <= 0)  {
                /* Now serial buffer is almost free & we can start
                 * transmission of another packet */
                sl->dev->stats.tx_packets++;
                clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
-               spin_unlock(&sl->lock);
+               spin_unlock_bh(&sl->lock);
                sl_unlock(sl);
                return;
        }
@@ -443,7 +443,7 @@ static void slip_write_wakeup(struct tty_struct *tty)
        actual = tty->ops->write(tty, sl->xhead, sl->xleft);
        sl->xleft -= actual;
        sl->xhead += actual;
-       spin_unlock(&sl->lock);
+       spin_unlock_bh(&sl->lock);
 }
 
 static void sl_tx_timeout(struct net_device *dev)
index 33008c1d1d678756ae8fbae13238763c24cc603e..9a9ce8debefaaa5c70ac045f0ed7ab11034644cb 100644 (file)
@@ -968,7 +968,7 @@ static void team_port_disable(struct team *team,
 static void __team_compute_features(struct team *team)
 {
        struct team_port *port;
-       u32 vlan_features = TEAM_VLAN_FEATURES;
+       u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
        unsigned short max_hard_header_len = ETH_HLEN;
        unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
 
@@ -2834,8 +2834,10 @@ static int team_device_event(struct notifier_block *unused,
        case NETDEV_UP:
                if (netif_carrier_ok(dev))
                        team_port_change_check(port, true);
+               break;
        case NETDEV_DOWN:
                team_port_change_check(port, false);
+               break;
        case NETDEV_CHANGE:
                if (netif_running(port->dev))
                        team_port_change_check(port,
index dbde3412ee5eafdeb39cd238e8af4f19368917c6..a58dfebb5512326db7065a433c37676c99820407 100644 (file)
@@ -49,7 +49,7 @@ struct lb_port_mapping {
 struct lb_priv_ex {
        struct team *team;
        struct lb_port_mapping tx_hash_to_port_mapping[LB_TX_HASHTABLE_SIZE];
-       struct sock_fprog *orig_fprog;
+       struct sock_fprog_kern *orig_fprog;
        struct {
                unsigned int refresh_interval; /* in tenths of second */
                struct delayed_work refresh_dw;
@@ -241,15 +241,15 @@ static int lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx)
        return 0;
 }
 
-static int __fprog_create(struct sock_fprog **pfprog, u32 data_len,
+static int __fprog_create(struct sock_fprog_kern **pfprog, u32 data_len,
                          const void *data)
 {
-       struct sock_fprog *fprog;
+       struct sock_fprog_kern *fprog;
        struct sock_filter *filter = (struct sock_filter *) data;
 
        if (data_len % sizeof(struct sock_filter))
                return -EINVAL;
-       fprog = kmalloc(sizeof(struct sock_fprog), GFP_KERNEL);
+       fprog = kmalloc(sizeof(*fprog), GFP_KERNEL);
        if (!fprog)
                return -ENOMEM;
        fprog->filter = kmemdup(filter, data_len, GFP_KERNEL);
@@ -262,7 +262,7 @@ static int __fprog_create(struct sock_fprog **pfprog, u32 data_len,
        return 0;
 }
 
-static void __fprog_destroy(struct sock_fprog *fprog)
+static void __fprog_destroy(struct sock_fprog_kern *fprog)
 {
        kfree(fprog->filter);
        kfree(fprog);
@@ -273,7 +273,7 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
        struct lb_priv *lb_priv = get_lb_priv(team);
        struct sk_filter *fp = NULL;
        struct sk_filter *orig_fp;
-       struct sock_fprog *fprog = NULL;
+       struct sock_fprog_kern *fprog = NULL;
        int err;
 
        if (ctx->data.bin_val.len) {
index ee328ba101e72a9e3f150d8c24068182be86abc5..98bad1fb1bfb1ce66ea4219c2e767256f05d6cbb 100644 (file)
@@ -498,12 +498,12 @@ static void tun_detach_all(struct net_device *dev)
        for (i = 0; i < n; i++) {
                tfile = rtnl_dereference(tun->tfiles[i]);
                BUG_ON(!tfile);
-               wake_up_all(&tfile->wq.wait);
+               tfile->socket.sk->sk_data_ready(tfile->socket.sk);
                RCU_INIT_POINTER(tfile->tun, NULL);
                --tun->numqueues;
        }
        list_for_each_entry(tfile, &tun->disabled, next) {
-               wake_up_all(&tfile->wq.wait);
+               tfile->socket.sk->sk_data_ready(tfile->socket.sk);
                RCU_INIT_POINTER(tfile->tun, NULL);
        }
        BUG_ON(tun->numqueues != 0);
@@ -807,8 +807,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
        /* Notify and wake up reader process */
        if (tfile->flags & TUN_FASYNC)
                kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
-       wake_up_interruptible_poll(&tfile->wq.wait, POLLIN |
-                                  POLLRDNORM | POLLRDBAND);
+       tfile->socket.sk->sk_data_ready(tfile->socket.sk);
 
        rcu_read_unlock();
        return NETDEV_TX_OK;
@@ -965,7 +964,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
 
        tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
 
-       poll_wait(file, &tfile->wq.wait, wait);
+       poll_wait(file, sk_sleep(sk), wait);
 
        if (!skb_queue_empty(&sk->sk_receive_queue))
                mask |= POLLIN | POLLRDNORM;
@@ -1330,47 +1329,26 @@ done:
 static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
                           const struct iovec *iv, ssize_t len, int noblock)
 {
-       DECLARE_WAITQUEUE(wait, current);
        struct sk_buff *skb;
        ssize_t ret = 0;
+       int peeked, err, off = 0;
 
        tun_debug(KERN_INFO, tun, "tun_do_read\n");
 
-       if (unlikely(!noblock))
-               add_wait_queue(&tfile->wq.wait, &wait);
-       while (len) {
-               if (unlikely(!noblock))
-                       current->state = TASK_INTERRUPTIBLE;
+       if (!len)
+               return ret;
 
-               /* Read frames from the queue */
-               if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) {
-                       if (noblock) {
-                               ret = -EAGAIN;
-                               break;
-                       }
-                       if (signal_pending(current)) {
-                               ret = -ERESTARTSYS;
-                               break;
-                       }
-                       if (tun->dev->reg_state != NETREG_REGISTERED) {
-                               ret = -EIO;
-                               break;
-                       }
-
-                       /* Nothing to read, let's sleep */
-                       schedule();
-                       continue;
-               }
+       if (tun->dev->reg_state != NETREG_REGISTERED)
+               return -EIO;
 
+       /* Read frames from queue */
+       skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0,
+                                 &peeked, &off, &err);
+       if (skb) {
                ret = tun_put_user(tun, tfile, skb, iv, len);
                kfree_skb(skb);
-               break;
-       }
-
-       if (unlikely(!noblock)) {
-               current->state = TASK_RUNNING;
-               remove_wait_queue(&tfile->wq.wait, &wait);
-       }
+       } else
+               ret = err;
 
        return ret;
 }
@@ -2199,8 +2177,8 @@ static int tun_chr_open(struct inode *inode, struct file * file)
        tfile->flags = 0;
        tfile->ifindex = 0;
 
-       rcu_assign_pointer(tfile->socket.wq, &tfile->wq);
        init_waitqueue_head(&tfile->wq.wait);
+       RCU_INIT_POINTER(tfile->socket.wq, &tfile->wq);
 
        tfile->socket.file = file;
        tfile->socket.ops = &tun_socket_ops;
index 630caf48f63aab7c2023d4cadb7044d23a2af86d..8cfc3bb0c6a672a288784ab0dd5f09597265c39d 100644 (file)
@@ -793,7 +793,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
 
        netdev->netdev_ops = &catc_netdev_ops;
        netdev->watchdog_timeo = TX_TIMEOUT;
-       SET_ETHTOOL_OPS(netdev, &ops);
+       netdev->ethtool_ops = &ops;
 
        catc->usbdev = usbdev;
        catc->netdev = netdev;
index c9f3281506af568e534a47789418b466a0a77adc..5ee7a1dbc023833a5907a6db48695600bb728fd9 100644 (file)
 #include <net/ipv6.h>
 #include <net/addrconf.h>
 
+/* alternative VLAN for IP session 0 if not untagged */
+#define MBIM_IPS0_VID  4094
+
 /* driver specific data - must match cdc_ncm usage */
 struct cdc_mbim_state {
        struct cdc_ncm_ctx *ctx;
        atomic_t pmcount;
        struct usb_driver *subdriver;
-       struct usb_interface *control;
-       struct usb_interface *data;
+       unsigned long _unused;
+       unsigned long flags;
+};
+
+/* flags for the cdc_mbim_state.flags field */
+enum cdc_mbim_flags {
+       FLAG_IPS0_VLAN = 1 << 0,        /* IP session 0 is tagged  */
 };
 
 /* using a counter to merge subdriver requests with our own into a combined state */
@@ -62,16 +70,91 @@ static int cdc_mbim_wdm_manage_power(struct usb_interface *intf, int status)
        return cdc_mbim_manage_power(dev, status);
 }
 
+static int cdc_mbim_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
+{
+       struct usbnet *dev = netdev_priv(netdev);
+       struct cdc_mbim_state *info = (void *)&dev->data;
+
+       /* creation of this VLAN is a request to tag IP session 0 */
+       if (vid == MBIM_IPS0_VID)
+               info->flags |= FLAG_IPS0_VLAN;
+       else
+               if (vid >= 512) /* we don't map these to MBIM session */
+                       return -EINVAL;
+       return 0;
+}
+
+static int cdc_mbim_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
+{
+       struct usbnet *dev = netdev_priv(netdev);
+       struct cdc_mbim_state *info = (void *)&dev->data;
+
+       /* this is a request for an untagged IP session 0 */
+       if (vid == MBIM_IPS0_VID)
+               info->flags &= ~FLAG_IPS0_VLAN;
+       return 0;
+}
+
+static const struct net_device_ops cdc_mbim_netdev_ops = {
+       .ndo_open             = usbnet_open,
+       .ndo_stop             = usbnet_stop,
+       .ndo_start_xmit       = usbnet_start_xmit,
+       .ndo_tx_timeout       = usbnet_tx_timeout,
+       .ndo_change_mtu       = usbnet_change_mtu,
+       .ndo_set_mac_address  = eth_mac_addr,
+       .ndo_validate_addr    = eth_validate_addr,
+       .ndo_vlan_rx_add_vid  = cdc_mbim_rx_add_vid,
+       .ndo_vlan_rx_kill_vid = cdc_mbim_rx_kill_vid,
+};
+
+/* Change the control interface altsetting and update the .driver_info
+ * pointer if the matching entry after changing class codes points to
+ * a different struct
+ */
+static int cdc_mbim_set_ctrlalt(struct usbnet *dev, struct usb_interface *intf, u8 alt)
+{
+       struct usb_driver *driver = to_usb_driver(intf->dev.driver);
+       const struct usb_device_id *id;
+       struct driver_info *info;
+       int ret;
+
+       ret = usb_set_interface(dev->udev,
+                               intf->cur_altsetting->desc.bInterfaceNumber,
+                               alt);
+       if (ret)
+               return ret;
+
+       id = usb_match_id(intf, driver->id_table);
+       if (!id)
+               return -ENODEV;
+
+       info = (struct driver_info *)id->driver_info;
+       if (info != dev->driver_info) {
+               dev_dbg(&intf->dev, "driver_info updated to '%s'\n",
+                       info->description);
+               dev->driver_info = info;
+       }
+       return 0;
+}
 
 static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
 {
        struct cdc_ncm_ctx *ctx;
        struct usb_driver *subdriver = ERR_PTR(-ENODEV);
        int ret = -ENODEV;
-       u8 data_altsetting = cdc_ncm_select_altsetting(dev, intf);
+       u8 data_altsetting = 1;
        struct cdc_mbim_state *info = (void *)&dev->data;
 
-       /* Probably NCM, defer for cdc_ncm_bind */
+       /* should we change control altsetting on a NCM/MBIM function? */
+       if (cdc_ncm_select_altsetting(intf) == CDC_NCM_COMM_ALTSETTING_MBIM) {
+               data_altsetting = CDC_NCM_DATA_ALTSETTING_MBIM;
+               ret = cdc_mbim_set_ctrlalt(dev, intf, CDC_NCM_COMM_ALTSETTING_MBIM);
+               if (ret)
+                       goto err;
+               ret = -ENODEV;
+       }
+
+       /* we will hit this for NCM/MBIM functions if prefer_mbim is false */
        if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
                goto err;
 
@@ -101,7 +184,10 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
        dev->net->flags |= IFF_NOARP;
 
        /* no need to put the VLAN tci in the packet headers */
-       dev->net->features |= NETIF_F_HW_VLAN_CTAG_TX;
+       dev->net->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_FILTER;
+
+       /* monitor VLAN additions and removals */
+       dev->net->netdev_ops = &cdc_mbim_netdev_ops;
 err:
        return ret;
 }
@@ -120,6 +206,16 @@ static void cdc_mbim_unbind(struct usbnet *dev, struct usb_interface *intf)
        cdc_ncm_unbind(dev, intf);
 }
 
+/* verify that the ethernet protocol is IPv4 or IPv6 */
+static bool is_ip_proto(__be16 proto)
+{
+       switch (proto) {
+       case htons(ETH_P_IP):
+       case htons(ETH_P_IPV6):
+               return true;
+       }
+       return false;
+}
 
 static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
 {
@@ -128,6 +224,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
        struct cdc_ncm_ctx *ctx = info->ctx;
        __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN);
        u16 tci = 0;
+       bool is_ip;
        u8 *c;
 
        if (!ctx)
@@ -137,29 +234,50 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
                if (skb->len <= ETH_HLEN)
                        goto error;
 
+               /* Some applications using e.g. packet sockets will
+                * bypass the VLAN acceleration and create tagged
+                * ethernet frames directly.  We primarily look for
+                * the accelerated out-of-band tag, but fall back if
+                * required
+                */
+               skb_reset_mac_header(skb);
+               if (vlan_get_tag(skb, &tci) < 0 && skb->len > VLAN_ETH_HLEN &&
+                   __vlan_get_tag(skb, &tci) == 0) {
+                       is_ip = is_ip_proto(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
+                       skb_pull(skb, VLAN_ETH_HLEN);
+               } else {
+                       is_ip = is_ip_proto(eth_hdr(skb)->h_proto);
+                       skb_pull(skb, ETH_HLEN);
+               }
+
+               /* Is IP session <0> tagged too? */
+               if (info->flags & FLAG_IPS0_VLAN) {
+                       /* drop all untagged packets */
+                       if (!tci)
+                               goto error;
+                       /* map MBIM_IPS0_VID to IPS<0> */
+                       if (tci == MBIM_IPS0_VID)
+                               tci = 0;
+               }
+
                /* mapping VLANs to MBIM sessions:
-                *   no tag     => IPS session <0>
+                *   no tag     => IPS session <0> if !FLAG_IPS0_VLAN
                 *   1 - 255    => IPS session <vlanid>
                 *   256 - 511  => DSS session <vlanid - 256>
-                *   512 - 4095 => unsupported, drop
+                *   512 - 4093 => unsupported, drop
+                *   4094       => IPS session <0> if FLAG_IPS0_VLAN
                 */
-               vlan_get_tag(skb, &tci);
 
                switch (tci & 0x0f00) {
                case 0x0000: /* VLAN ID 0 - 255 */
-                       /* verify that datagram is IPv4 or IPv6 */
-                       skb_reset_mac_header(skb);
-                       switch (eth_hdr(skb)->h_proto) {
-                       case htons(ETH_P_IP):
-                       case htons(ETH_P_IPV6):
-                               break;
-                       default:
+                       if (!is_ip)
                                goto error;
-                       }
                        c = (u8 *)&sign;
                        c[3] = tci;
                        break;
                case 0x0100: /* VLAN ID 256 - 511 */
+                       if (is_ip)
+                               goto error;
                        sign = cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN);
                        c = (u8 *)&sign;
                        c[3] = tci;
@@ -169,7 +287,6 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
                                  "unsupported tci=0x%04x\n", tci);
                        goto error;
                }
-               skb_pull(skb, ETH_HLEN);
        }
 
        spin_lock_bh(&ctx->mtx);
@@ -204,17 +321,23 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
                return;
 
        /* need to send the NA on the VLAN dev, if any */
-       if (tci)
-               netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q),
-                                             tci);
-       else
+       rcu_read_lock();
+       if (tci) {
+               netdev = __vlan_find_dev_deep_rcu(dev->net, htons(ETH_P_8021Q),
+                                                 tci);
+               if (!netdev) {
+                       rcu_read_unlock();
+                       return;
+               }
+       } else {
                netdev = dev->net;
-       if (!netdev)
-               return;
+       }
+       dev_hold(netdev);
+       rcu_read_unlock();
 
        in6_dev = in6_dev_get(netdev);
        if (!in6_dev)
-               return;
+               goto out;
        is_router = !!in6_dev->cnf.forwarding;
        in6_dev_put(in6_dev);
 
@@ -224,6 +347,8 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
                                 true /* solicited */,
                                 false /* override */,
                                 true /* inc_opt */);
+out:
+       dev_put(netdev);
 }
 
 static bool is_neigh_solicit(u8 *buf, size_t len)
@@ -243,7 +368,7 @@ static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_
        __be16 proto = htons(ETH_P_802_3);
        struct sk_buff *skb = NULL;
 
-       if (tci < 256) { /* IPS session? */
+       if (tci < 256 || tci == MBIM_IPS0_VID) { /* IPS session? */
                if (len < sizeof(struct iphdr))
                        goto err;
 
@@ -295,6 +420,7 @@ static int cdc_mbim_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
        struct usb_cdc_ncm_dpe16 *dpe16;
        int ndpoffset;
        int loopcount = 50; /* arbitrary max preventing infinite loop */
+       u32 payload = 0;
        u8 *c;
        u16 tci;
 
@@ -313,6 +439,9 @@ next_ndp:
        case cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN):
                c = (u8 *)&ndp16->dwSignature;
                tci = c[3];
+               /* tag IPS<0> packets too if MBIM_IPS0_VID exists */
+               if (!tci && info->flags & FLAG_IPS0_VLAN)
+                       tci = MBIM_IPS0_VID;
                break;
        case cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN):
                c = (u8 *)&ndp16->dwSignature;
@@ -354,6 +483,7 @@ next_ndp:
                        if (!skb)
                                goto error;
                        usbnet_skb_return(dev, skb);
+                       payload += len; /* count payload bytes in this NTB */
                }
        }
 err_ndp:
@@ -362,6 +492,10 @@ err_ndp:
        if (ndpoffset && loopcount--)
                goto next_ndp;
 
+       /* update stats */
+       ctx->rx_overhead += skb_in->len - payload;
+       ctx->rx_ntbs++;
+
        return 1;
 error:
        return 0;
index 549dbac710ed5f576f84cedf375df8588e5a7dc5..93c9ca9924ebe3c29078a7ffe269ba490a4f6dbc 100644 (file)
@@ -65,19 +65,270 @@ static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx);
 static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer);
 static struct usb_driver cdc_ncm_driver;
 
-static int cdc_ncm_setup(struct usbnet *dev)
+struct cdc_ncm_stats {
+       char stat_string[ETH_GSTRING_LEN];
+       int sizeof_stat;
+       int stat_offset;
+};
+
+#define CDC_NCM_STAT(str, m) { \
+               .stat_string = str, \
+               .sizeof_stat = sizeof(((struct cdc_ncm_ctx *)0)->m), \
+               .stat_offset = offsetof(struct cdc_ncm_ctx, m) }
+#define CDC_NCM_SIMPLE_STAT(m) CDC_NCM_STAT(__stringify(m), m)
+
+static const struct cdc_ncm_stats cdc_ncm_gstrings_stats[] = {
+       CDC_NCM_SIMPLE_STAT(tx_reason_ntb_full),
+       CDC_NCM_SIMPLE_STAT(tx_reason_ndp_full),
+       CDC_NCM_SIMPLE_STAT(tx_reason_timeout),
+       CDC_NCM_SIMPLE_STAT(tx_reason_max_datagram),
+       CDC_NCM_SIMPLE_STAT(tx_overhead),
+       CDC_NCM_SIMPLE_STAT(tx_ntbs),
+       CDC_NCM_SIMPLE_STAT(rx_overhead),
+       CDC_NCM_SIMPLE_STAT(rx_ntbs),
+};
+
+static int cdc_ncm_get_sset_count(struct net_device __always_unused *netdev, int sset)
+{
+       switch (sset) {
+       case ETH_SS_STATS:
+               return ARRAY_SIZE(cdc_ncm_gstrings_stats);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static void cdc_ncm_get_ethtool_stats(struct net_device *netdev,
+                                   struct ethtool_stats __always_unused *stats,
+                                   u64 *data)
 {
+       struct usbnet *dev = netdev_priv(netdev);
        struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
-       u32 val;
-       u8 flags;
-       u8 iface_no;
-       int err;
-       int eth_hlen;
-       u16 mbim_mtu;
-       u16 ntb_fmt_supported;
-       __le16 max_datagram_size;
+       int i;
+       char *p = NULL;
 
-       iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
+       for (i = 0; i < ARRAY_SIZE(cdc_ncm_gstrings_stats); i++) {
+               p = (char *)ctx + cdc_ncm_gstrings_stats[i].stat_offset;
+               data[i] = (cdc_ncm_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+       }
+}
+
+static void cdc_ncm_get_strings(struct net_device __always_unused *netdev, u32 stringset, u8 *data)
+{
+       u8 *p = data;
+       int i;
+
+       switch (stringset) {
+       case ETH_SS_STATS:
+               for (i = 0; i < ARRAY_SIZE(cdc_ncm_gstrings_stats); i++) {
+                       memcpy(p, cdc_ncm_gstrings_stats[i].stat_string, ETH_GSTRING_LEN);
+                       p += ETH_GSTRING_LEN;
+               }
+       }
+}
+
+static int cdc_ncm_get_coalesce(struct net_device *netdev,
+                               struct ethtool_coalesce *ec)
+{
+       struct usbnet *dev = netdev_priv(netdev);
+       struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+
+       /* assuming maximum sized dgrams and ignoring NDPs */
+       ec->rx_max_coalesced_frames = ctx->rx_max / ctx->max_datagram_size;
+       ec->tx_max_coalesced_frames = ctx->tx_max / ctx->max_datagram_size;
+
+       /* the timer will fire CDC_NCM_TIMER_PENDING_CNT times in a row */
+       ec->tx_coalesce_usecs = ctx->timer_interval / (NSEC_PER_USEC / CDC_NCM_TIMER_PENDING_CNT);
+       return 0;
+}
+
+static void cdc_ncm_update_rxtx_max(struct usbnet *dev, u32 new_rx, u32 new_tx);
+
+static int cdc_ncm_set_coalesce(struct net_device *netdev,
+                               struct ethtool_coalesce *ec)
+{
+       struct usbnet *dev = netdev_priv(netdev);
+       struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+       u32 new_rx_max = ctx->rx_max;
+       u32 new_tx_max = ctx->tx_max;
+
+       /* assuming maximum sized dgrams and a single NDP */
+       if (ec->rx_max_coalesced_frames)
+               new_rx_max = ec->rx_max_coalesced_frames * ctx->max_datagram_size;
+       if (ec->tx_max_coalesced_frames)
+               new_tx_max = ec->tx_max_coalesced_frames * ctx->max_datagram_size;
+
+       if (ec->tx_coalesce_usecs &&
+           (ec->tx_coalesce_usecs < CDC_NCM_TIMER_INTERVAL_MIN * CDC_NCM_TIMER_PENDING_CNT ||
+            ec->tx_coalesce_usecs > CDC_NCM_TIMER_INTERVAL_MAX * CDC_NCM_TIMER_PENDING_CNT))
+               return -EINVAL;
+
+       spin_lock_bh(&ctx->mtx);
+       ctx->timer_interval = ec->tx_coalesce_usecs * (NSEC_PER_USEC / CDC_NCM_TIMER_PENDING_CNT);
+       if (!ctx->timer_interval)
+               ctx->tx_timer_pending = 0;
+       spin_unlock_bh(&ctx->mtx);
+
+       /* inform device of new values */
+       if (new_rx_max != ctx->rx_max || new_tx_max != ctx->tx_max)
+               cdc_ncm_update_rxtx_max(dev, new_rx_max, new_tx_max);
+       return 0;
+}
+
+static const struct ethtool_ops cdc_ncm_ethtool_ops = {
+       .get_settings      = usbnet_get_settings,
+       .set_settings      = usbnet_set_settings,
+       .get_link          = usbnet_get_link,
+       .nway_reset        = usbnet_nway_reset,
+       .get_drvinfo       = usbnet_get_drvinfo,
+       .get_msglevel      = usbnet_get_msglevel,
+       .set_msglevel      = usbnet_set_msglevel,
+       .get_ts_info       = ethtool_op_get_ts_info,
+       .get_sset_count    = cdc_ncm_get_sset_count,
+       .get_strings       = cdc_ncm_get_strings,
+       .get_ethtool_stats = cdc_ncm_get_ethtool_stats,
+       .get_coalesce      = cdc_ncm_get_coalesce,
+       .set_coalesce      = cdc_ncm_set_coalesce,
+};
+
+/* handle rx_max and tx_max changes */
+static void cdc_ncm_update_rxtx_max(struct usbnet *dev, u32 new_rx, u32 new_tx)
+{
+       struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+       u8 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
+       u32 val, max, min;
+
+       /* clamp new_rx to sane values */
+       min = USB_CDC_NCM_NTB_MIN_IN_SIZE;
+       max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_RX, le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize));
+
+       /* dwNtbInMaxSize spec violation? Use MIN size for both limits */
+       if (max < min) {
+               dev_warn(&dev->intf->dev, "dwNtbInMaxSize=%u is too small. Using %u\n",
+                        le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize), min);
+               max = min;
+       }
+
+       val = clamp_t(u32, new_rx, min, max);
+       if (val != new_rx) {
+               dev_dbg(&dev->intf->dev, "rx_max must be in the [%u, %u] range. Using %u\n",
+                       min, max, val);
+       }
+
+       /* usbnet use these values for sizing rx queues */
+       dev->rx_urb_size = val;
+
+       /* inform device about NTB input size changes */
+       if (val != ctx->rx_max) {
+               __le32 dwNtbInMaxSize = cpu_to_le32(val);
+
+               dev_info(&dev->intf->dev, "setting rx_max = %u\n", val);
+
+               /* need to unlink rx urbs before increasing buffer size */
+               if (netif_running(dev->net) && dev->rx_urb_size > ctx->rx_max)
+                       usbnet_unlink_rx_urbs(dev);
+
+               /* tell device to use new size */
+               if (usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE,
+                                    USB_TYPE_CLASS | USB_DIR_OUT
+                                    | USB_RECIP_INTERFACE,
+                                    0, iface_no, &dwNtbInMaxSize, 4) < 0)
+                       dev_dbg(&dev->intf->dev, "Setting NTB Input Size failed\n");
+               else
+                       ctx->rx_max = val;
+       }
+
+       /* clamp new_tx to sane values */
+       min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth16);
+       max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize));
+
+       /* some devices set dwNtbOutMaxSize too low for the above default */
+       min = min(min, max);
+
+       val = clamp_t(u32, new_tx, min, max);
+       if (val != new_tx) {
+               dev_dbg(&dev->intf->dev, "tx_max must be in the [%u, %u] range. Using %u\n",
+                       min, max, val);
+       }
+       if (val != ctx->tx_max)
+               dev_info(&dev->intf->dev, "setting tx_max = %u\n", val);
+
+       /* Adding a pad byte here if necessary simplifies the handling
+        * in cdc_ncm_fill_tx_frame, making tx_max always represent
+        * the real skb max size.
+        *
+        * We cannot use dev->maxpacket here because this is called from
+        * .bind which is called before usbnet sets up dev->maxpacket
+        */
+       if (val != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) &&
+           val % usb_maxpacket(dev->udev, dev->out, 1) == 0)
+               val++;
+
+       /* we might need to flush any pending tx buffers if running */
+       if (netif_running(dev->net) && val > ctx->tx_max) {
+               netif_tx_lock_bh(dev->net);
+               usbnet_start_xmit(NULL, dev->net);
+               ctx->tx_max = val;
+               netif_tx_unlock_bh(dev->net);
+       } else {
+               ctx->tx_max = val;
+       }
+
+       dev->hard_mtu = ctx->tx_max;
+
+       /* max qlen depend on hard_mtu and rx_urb_size */
+       usbnet_update_max_qlen(dev);
+
+       /* never pad more than 3 full USB packets per transfer */
+       ctx->min_tx_pkt = clamp_t(u16, ctx->tx_max - 3 * usb_maxpacket(dev->udev, dev->out, 1),
+                                 CDC_NCM_MIN_TX_PKT, ctx->tx_max);
+}
+
+/* helpers for NCM and MBIM differences */
+static u8 cdc_ncm_flags(struct usbnet *dev)
+{
+       struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+
+       if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting) && ctx->mbim_desc)
+               return ctx->mbim_desc->bmNetworkCapabilities;
+       if (ctx->func_desc)
+               return ctx->func_desc->bmNetworkCapabilities;
+       return 0;
+}
+
+static int cdc_ncm_eth_hlen(struct usbnet *dev)
+{
+       if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting))
+               return 0;
+       return ETH_HLEN;
+}
+
+static u32 cdc_ncm_min_dgram_size(struct usbnet *dev)
+{
+       if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting))
+               return CDC_MBIM_MIN_DATAGRAM_SIZE;
+       return CDC_NCM_MIN_DATAGRAM_SIZE;
+}
+
+static u32 cdc_ncm_max_dgram_size(struct usbnet *dev)
+{
+       struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+
+       if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting) && ctx->mbim_desc)
+               return le16_to_cpu(ctx->mbim_desc->wMaxSegmentSize);
+       if (ctx->ether_desc)
+               return le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
+       return CDC_NCM_MAX_DATAGRAM_SIZE;
+}
+
+/* initial one-time device setup.  MUST be called with the data interface
+ * in altsetting 0
+ */
+static int cdc_ncm_init(struct usbnet *dev)
+{
+       struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+       u8 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
+       int err;
 
        err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS,
                              USB_TYPE_CLASS | USB_DIR_IN
@@ -89,7 +340,36 @@ static int cdc_ncm_setup(struct usbnet *dev)
                return err; /* GET_NTB_PARAMETERS is required */
        }
 
-       /* read correct set of parameters according to device mode */
+       /* set CRC Mode */
+       if (cdc_ncm_flags(dev) & USB_CDC_NCM_NCAP_CRC_MODE) {
+               dev_dbg(&dev->intf->dev, "Setting CRC mode off\n");
+               err = usbnet_write_cmd(dev, USB_CDC_SET_CRC_MODE,
+                                      USB_TYPE_CLASS | USB_DIR_OUT
+                                      | USB_RECIP_INTERFACE,
+                                      USB_CDC_NCM_CRC_NOT_APPENDED,
+                                      iface_no, NULL, 0);
+               if (err < 0)
+                       dev_err(&dev->intf->dev, "SET_CRC_MODE failed\n");
+       }
+
+       /* set NTB format, if both formats are supported.
+        *
+        * "The host shall only send this command while the NCM Data
+        *  Interface is in alternate setting 0."
+        */
+       if (le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported) &
+                                               USB_CDC_NCM_NTB32_SUPPORTED) {
+               dev_dbg(&dev->intf->dev, "Setting NTB format to 16-bit\n");
+               err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
+                                      USB_TYPE_CLASS | USB_DIR_OUT
+                                      | USB_RECIP_INTERFACE,
+                                      USB_CDC_NCM_NTB16_FORMAT,
+                                      iface_no, NULL, 0);
+               if (err < 0)
+                       dev_err(&dev->intf->dev, "SET_NTB_FORMAT failed\n");
+       }
+
+       /* set initial device values */
        ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize);
        ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize);
        ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
@@ -97,72 +377,79 @@ static int cdc_ncm_setup(struct usbnet *dev)
        ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
        /* devices prior to NCM Errata shall set this field to zero */
        ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
-       ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
-
-       /* there are some minor differences in NCM and MBIM defaults */
-       if (cdc_ncm_comm_intf_is_mbim(ctx->control->cur_altsetting)) {
-               if (!ctx->mbim_desc)
-                       return -EINVAL;
-               eth_hlen = 0;
-               flags = ctx->mbim_desc->bmNetworkCapabilities;
-               ctx->max_datagram_size = le16_to_cpu(ctx->mbim_desc->wMaxSegmentSize);
-               if (ctx->max_datagram_size < CDC_MBIM_MIN_DATAGRAM_SIZE)
-                       ctx->max_datagram_size = CDC_MBIM_MIN_DATAGRAM_SIZE;
-       } else {
-               if (!ctx->func_desc)
-                       return -EINVAL;
-               eth_hlen = ETH_HLEN;
-               flags = ctx->func_desc->bmNetworkCapabilities;
-               ctx->max_datagram_size = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
-               if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
-                       ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
-       }
-
-       /* common absolute max for NCM and MBIM */
-       if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
-               ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
 
        dev_dbg(&dev->intf->dev,
                "dwNtbInMaxSize=%u dwNtbOutMaxSize=%u wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n",
                ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus,
-               ctx->tx_ndp_modulus, ctx->tx_max_datagrams, flags);
+               ctx->tx_ndp_modulus, ctx->tx_max_datagrams, cdc_ncm_flags(dev));
 
        /* max count of tx datagrams */
        if ((ctx->tx_max_datagrams == 0) ||
                        (ctx->tx_max_datagrams > CDC_NCM_DPT_DATAGRAMS_MAX))
                ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
 
-       /* verify maximum size of received NTB in bytes */
-       if (ctx->rx_max < USB_CDC_NCM_NTB_MIN_IN_SIZE) {
-               dev_dbg(&dev->intf->dev, "Using min receive length=%d\n",
-                       USB_CDC_NCM_NTB_MIN_IN_SIZE);
-               ctx->rx_max = USB_CDC_NCM_NTB_MIN_IN_SIZE;
-       }
+       /* set up maximum NDP size */
+       ctx->max_ndp_size = sizeof(struct usb_cdc_ncm_ndp16) + (ctx->tx_max_datagrams + 1) * sizeof(struct usb_cdc_ncm_dpe16);
 
-       if (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX) {
-               dev_dbg(&dev->intf->dev, "Using default maximum receive length=%d\n",
-                       CDC_NCM_NTB_MAX_SIZE_RX);
-               ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX;
-       }
+       /* initial coalescing timer interval */
+       ctx->timer_interval = CDC_NCM_TIMER_INTERVAL_USEC * NSEC_PER_USEC;
 
-       /* inform device about NTB input size changes */
-       if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
-               __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
+       return 0;
+}
 
-               err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE,
-                                      USB_TYPE_CLASS | USB_DIR_OUT
-                                      | USB_RECIP_INTERFACE,
-                                      0, iface_no, &dwNtbInMaxSize, 4);
-               if (err < 0)
-                       dev_dbg(&dev->intf->dev, "Setting NTB Input Size failed\n");
+/* set a new max datagram size */
+static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size)
+{
+       struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+       u8 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
+       __le16 max_datagram_size;
+       u16 mbim_mtu;
+       int err;
+
+       /* set default based on descriptors */
+       ctx->max_datagram_size = clamp_t(u32, new_size,
+                                        cdc_ncm_min_dgram_size(dev),
+                                        CDC_NCM_MAX_DATAGRAM_SIZE);
+
+       /* inform the device about the selected Max Datagram Size? */
+       if (!(cdc_ncm_flags(dev) & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE))
+               goto out;
+
+       /* read current mtu value from device */
+       err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
+                             USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
+                             0, iface_no, &max_datagram_size, 2);
+       if (err < 0) {
+               dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n");
+               goto out;
        }
 
-       /* verify maximum size of transmitted NTB in bytes */
-       if (ctx->tx_max > CDC_NCM_NTB_MAX_SIZE_TX) {
-               dev_dbg(&dev->intf->dev, "Using default maximum transmit length=%d\n",
-                       CDC_NCM_NTB_MAX_SIZE_TX);
-               ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX;
+       if (le16_to_cpu(max_datagram_size) == ctx->max_datagram_size)
+               goto out;
+
+       max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
+       err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE,
+                              USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE,
+                              0, iface_no, &max_datagram_size, 2);
+       if (err < 0)
+               dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n");
+
+out:
+       /* set MTU to max supported by the device if necessary */
+       dev->net->mtu = min_t(int, dev->net->mtu, ctx->max_datagram_size - cdc_ncm_eth_hlen(dev));
+
+       /* do not exceed operater preferred MTU */
+       if (ctx->mbim_extended_desc) {
+               mbim_mtu = le16_to_cpu(ctx->mbim_extended_desc->wMTU);
+               if (mbim_mtu != 0 && mbim_mtu < dev->net->mtu)
+                       dev->net->mtu = mbim_mtu;
        }
+}
+
+static void cdc_ncm_fix_modulus(struct usbnet *dev)
+{
+       struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+       u32 val;
 
        /*
         * verify that the structure alignment is:
@@ -199,68 +486,31 @@ static int cdc_ncm_setup(struct usbnet *dev)
        }
 
        /* adjust TX-remainder according to NCM specification. */
-       ctx->tx_remainder = ((ctx->tx_remainder - eth_hlen) &
+       ctx->tx_remainder = ((ctx->tx_remainder - cdc_ncm_eth_hlen(dev)) &
                             (ctx->tx_modulus - 1));
+}
 
-       /* additional configuration */
-
-       /* set CRC Mode */
-       if (flags & USB_CDC_NCM_NCAP_CRC_MODE) {
-               err = usbnet_write_cmd(dev, USB_CDC_SET_CRC_MODE,
-                                      USB_TYPE_CLASS | USB_DIR_OUT
-                                      | USB_RECIP_INTERFACE,
-                                      USB_CDC_NCM_CRC_NOT_APPENDED,
-                                      iface_no, NULL, 0);
-               if (err < 0)
-                       dev_dbg(&dev->intf->dev, "Setting CRC mode off failed\n");
-       }
-
-       /* set NTB format, if both formats are supported */
-       if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) {
-               err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
-                                      USB_TYPE_CLASS | USB_DIR_OUT
-                                      | USB_RECIP_INTERFACE,
-                                      USB_CDC_NCM_NTB16_FORMAT,
-                                      iface_no, NULL, 0);
-               if (err < 0)
-                       dev_dbg(&dev->intf->dev, "Setting NTB format to 16-bit failed\n");
-       }
-
-       /* inform the device about the selected Max Datagram Size */
-       if (!(flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE))
-               goto out;
-
-       /* read current mtu value from device */
-       err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
-                             USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
-                             0, iface_no, &max_datagram_size, 2);
-       if (err < 0) {
-               dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n");
-               goto out;
-       }
-
-       if (le16_to_cpu(max_datagram_size) == ctx->max_datagram_size)
-               goto out;
+static int cdc_ncm_setup(struct usbnet *dev)
+{
+       struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+       u32 def_rx, def_tx;
 
-       max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
-       err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE,
-                              USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE,
-                              0, iface_no, &max_datagram_size, 2);
-       if (err < 0)
-               dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n");
+       /* be conservative when selecting intial buffer size to
+        * increase the number of hosts this will work for
+        */
+       def_rx = min_t(u32, CDC_NCM_NTB_DEF_SIZE_RX,
+                      le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize));
+       def_tx = min_t(u32, CDC_NCM_NTB_DEF_SIZE_TX,
+                      le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize));
 
-out:
-       /* set MTU to max supported by the device if necessary */
-       if (dev->net->mtu > ctx->max_datagram_size - eth_hlen)
-               dev->net->mtu = ctx->max_datagram_size - eth_hlen;
+       /* clamp rx_max and tx_max and inform device */
+       cdc_ncm_update_rxtx_max(dev, def_rx, def_tx);
 
-       /* do not exceed operater preferred MTU */
-       if (ctx->mbim_extended_desc) {
-               mbim_mtu = le16_to_cpu(ctx->mbim_extended_desc->wMTU);
-               if (mbim_mtu != 0 && mbim_mtu < dev->net->mtu)
-                       dev->net->mtu = mbim_mtu;
-       }
+       /* sanitize the modulus and remainder values */
+       cdc_ncm_fix_modulus(dev);
 
+       /* set max datagram size */
+       cdc_ncm_set_dgram_size(dev, cdc_ncm_max_dgram_size(dev));
        return 0;
 }
 
@@ -424,10 +674,21 @@ advance:
        }
 
        /* check if we got everything */
-       if (!ctx->data || (!ctx->mbim_desc && !ctx->ether_desc)) {
-               dev_dbg(&intf->dev, "CDC descriptors missing\n");
+       if (!ctx->data) {
+               dev_dbg(&intf->dev, "CDC Union missing and no IAD found\n");
                goto error;
        }
+       if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) {
+               if (!ctx->mbim_desc) {
+                       dev_dbg(&intf->dev, "MBIM functional descriptor missing\n");
+                       goto error;
+               }
+       } else {
+               if (!ctx->ether_desc || !ctx->func_desc) {
+                       dev_dbg(&intf->dev, "NCM or ECM functional descriptors missing\n");
+                       goto error;
+               }
+       }
 
        /* claim data interface, if different from control */
        if (ctx->data != ctx->control) {
@@ -447,8 +708,8 @@ advance:
                goto error2;
        }
 
-       /* initialize data interface */
-       if (cdc_ncm_setup(dev))
+       /* initialize basic device settings */
+       if (cdc_ncm_init(dev))
                goto error2;
 
        /* configure data interface */
@@ -477,18 +738,11 @@ advance:
                dev_info(&intf->dev, "MAC-Address: %pM\n", dev->net->dev_addr);
        }
 
-       /* usbnet use these values for sizing tx/rx queues */
-       dev->hard_mtu = ctx->tx_max;
-       dev->rx_urb_size = ctx->rx_max;
+       /* finish setting up the device specific data */
+       cdc_ncm_setup(dev);
 
-       /* cdc_ncm_setup will override dwNtbOutMaxSize if it is
-        * outside the sane range. Adding a pad byte here if necessary
-        * simplifies the handling in cdc_ncm_fill_tx_frame, making
-        * tx_max always represent the real skb max size.
-        */
-       if (ctx->tx_max != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) &&
-           ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0)
-               ctx->tx_max++;
+       /* override ethtool_ops */
+       dev->net->ethtool_ops = &cdc_ncm_ethtool_ops;
 
        return 0;
 
@@ -541,10 +795,10 @@ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
 }
 EXPORT_SYMBOL_GPL(cdc_ncm_unbind);
 
-/* Select the MBIM altsetting iff it is preferred and available,
- * returning the number of the corresponding data interface altsetting
+/* Return the number of the MBIM control interface altsetting iff it
+ * is preferred and available,
  */
-u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf)
+u8 cdc_ncm_select_altsetting(struct usb_interface *intf)
 {
        struct usb_host_interface *alt;
 
@@ -563,15 +817,15 @@ u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf)
         *   the rules given in section 6 (USB Device Model) of this
         *   specification."
         */
-       if (prefer_mbim && intf->num_altsetting == 2) {
+       if (intf->num_altsetting < 2)
+               return intf->cur_altsetting->desc.bAlternateSetting;
+
+       if (prefer_mbim) {
                alt = usb_altnum_to_altsetting(intf, CDC_NCM_COMM_ALTSETTING_MBIM);
-               if (alt && cdc_ncm_comm_intf_is_mbim(alt) &&
-                   !usb_set_interface(dev->udev,
-                                      intf->cur_altsetting->desc.bInterfaceNumber,
-                                      CDC_NCM_COMM_ALTSETTING_MBIM))
-                       return CDC_NCM_DATA_ALTSETTING_MBIM;
+               if (alt && cdc_ncm_comm_intf_is_mbim(alt))
+                       return CDC_NCM_COMM_ALTSETTING_MBIM;
        }
-       return CDC_NCM_DATA_ALTSETTING_NCM;
+       return CDC_NCM_COMM_ALTSETTING_NCM;
 }
 EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting);
 
@@ -580,12 +834,11 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
        int ret;
 
        /* MBIM backwards compatible function? */
-       cdc_ncm_select_altsetting(dev, intf);
-       if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
+       if (cdc_ncm_select_altsetting(intf) != CDC_NCM_COMM_ALTSETTING_NCM)
                return -ENODEV;
 
-       /* NCM data altsetting is always 1 */
-       ret = cdc_ncm_bind_common(dev, intf, 1);
+       /* The NCM data altsetting is fixed */
+       ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM);
 
        /*
         * We should get an event when network connection is "connected" or
@@ -628,7 +881,7 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
        cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max);
 
        /* verify that there is room for the NDP and the datagram (reserve) */
-       if ((ctx->tx_max - skb->len - reserve) < CDC_NCM_NDP_SIZE)
+       if ((ctx->tx_max - skb->len - reserve) < ctx->max_ndp_size)
                return NULL;
 
        /* link to it */
@@ -638,7 +891,7 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
                nth16->wNdpIndex = cpu_to_le16(skb->len);
 
        /* push a new empty NDP */
-       ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, CDC_NCM_NDP_SIZE), 0, CDC_NCM_NDP_SIZE);
+       ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, ctx->max_ndp_size), 0, ctx->max_ndp_size);
        ndp16->dwSignature = sign;
        ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16));
        return ndp16;
@@ -683,6 +936,9 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
 
                /* count total number of frames in this NTB */
                ctx->tx_curr_frame_num = 0;
+
+               /* recent payload counter for this skb_out */
+               ctx->tx_curr_frame_payload = 0;
        }
 
        for (n = ctx->tx_curr_frame_num; n < ctx->tx_max_datagrams; n++) {
@@ -720,6 +976,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
                                ctx->tx_rem_sign = sign;
                                skb = NULL;
                                ready2send = 1;
+                               ctx->tx_reason_ntb_full++;      /* count reason for transmitting */
                        }
                        break;
                }
@@ -733,12 +990,14 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
                ndp16->dpe16[index].wDatagramIndex = cpu_to_le16(skb_out->len);
                ndp16->wLength = cpu_to_le16(ndplen + sizeof(struct usb_cdc_ncm_dpe16));
                memcpy(skb_put(skb_out, skb->len), skb->data, skb->len);
+               ctx->tx_curr_frame_payload += skb->len; /* count real tx payload data */
                dev_kfree_skb_any(skb);
                skb = NULL;
 
                /* send now if this NDP is full */
                if (index >= CDC_NCM_DPT_DATAGRAMS_MAX) {
                        ready2send = 1;
+                       ctx->tx_reason_ndp_full++;      /* count reason for transmitting */
                        break;
                }
        }
@@ -758,7 +1017,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
                ctx->tx_curr_skb = skb_out;
                goto exit_no_skb;
 
-       } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0)) {
+       } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0) && (ctx->timer_interval > 0)) {
                /* wait for more frames */
                /* push variables */
                ctx->tx_curr_skb = skb_out;
@@ -768,11 +1027,13 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
                goto exit_no_skb;
 
        } else {
+               if (n == ctx->tx_max_datagrams)
+                       ctx->tx_reason_max_datagram++;  /* count reason for transmitting */
                /* frame goes out */
                /* variables will be reset at next call */
        }
 
-       /* If collected data size is less or equal CDC_NCM_MIN_TX_PKT
+       /* If collected data size is less or equal ctx->min_tx_pkt
         * bytes, we send buffers as it is. If we get more data, it
         * would be more efficient for USB HS mobile device with DMA
         * engine to receive a full size NTB, than canceling DMA
@@ -782,10 +1043,10 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
         * a ZLP after full sized NTBs.
         */
        if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
-           skb_out->len > CDC_NCM_MIN_TX_PKT)
+           skb_out->len > ctx->min_tx_pkt)
                memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0,
                       ctx->tx_max - skb_out->len);
-       else if ((skb_out->len % dev->maxpacket) == 0)
+       else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0)
                *skb_put(skb_out, 1) = 0;       /* force short packet */
 
        /* set final frame length */
@@ -795,11 +1056,22 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
        /* return skb */
        ctx->tx_curr_skb = NULL;
        dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
+
+       /* keep private stats: framing overhead and number of NTBs */
+       ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload;
+       ctx->tx_ntbs++;
+
+       /* usbnet has already counted all the framing overhead.
+        * Adjust the stats so that the tx_bytes counter show real
+        * payload data instead.
+        */
+       dev->net->stats.tx_bytes -= skb_out->len - ctx->tx_curr_frame_payload;
+
        return skb_out;
 
 exit_no_skb:
-       /* Start timer, if there is a remaining skb */
-       if (ctx->tx_curr_skb != NULL)
+       /* Start timer, if there is a remaining non-empty skb */
+       if (ctx->tx_curr_skb != NULL && n > 0)
                cdc_ncm_tx_timeout_start(ctx);
        return NULL;
 }
@@ -810,7 +1082,7 @@ static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx)
        /* start timer, if not already started */
        if (!(hrtimer_active(&ctx->tx_timer) || atomic_read(&ctx->stop)))
                hrtimer_start(&ctx->tx_timer,
-                               ktime_set(0, CDC_NCM_TIMER_INTERVAL),
+                               ktime_set(0, ctx->timer_interval),
                                HRTIMER_MODE_REL);
 }
 
@@ -835,6 +1107,7 @@ static void cdc_ncm_txpath_bh(unsigned long param)
                cdc_ncm_tx_timeout_start(ctx);
                spin_unlock_bh(&ctx->mtx);
        } else if (dev->net != NULL) {
+               ctx->tx_reason_timeout++;       /* count reason for transmitting */
                spin_unlock_bh(&ctx->mtx);
                netif_tx_lock_bh(dev->net);
                usbnet_start_xmit(NULL, dev->net);
@@ -970,6 +1243,7 @@ int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
        struct usb_cdc_ncm_dpe16 *dpe16;
        int ndpoffset;
        int loopcount = 50; /* arbitrary max preventing infinite loop */
+       u32 payload = 0;
 
        ndpoffset = cdc_ncm_rx_verify_nth16(ctx, skb_in);
        if (ndpoffset < 0)
@@ -1022,6 +1296,7 @@ next_ndp:
                        skb->data = ((u8 *)skb_in->data) + offset;
                        skb_set_tail_pointer(skb, len);
                        usbnet_skb_return(dev, skb);
+                       payload += len; /* count payload bytes in this NTB */
                }
        }
 err_ndp:
@@ -1030,6 +1305,10 @@ err_ndp:
        if (ndpoffset && loopcount--)
                goto next_ndp;
 
+       /* update stats */
+       ctx->rx_overhead += skb_in->len - payload;
+       ctx->rx_ntbs++;
+
        return 1;
 error:
        return 0;
@@ -1049,14 +1328,14 @@ cdc_ncm_speed_change(struct usbnet *dev,
         */
        if ((tx_speed > 1000000) && (rx_speed > 1000000)) {
                netif_info(dev, link, dev->net,
-                      "%u mbit/s downlink %u mbit/s uplink\n",
-                      (unsigned int)(rx_speed / 1000000U),
-                      (unsigned int)(tx_speed / 1000000U));
+                          "%u mbit/s downlink %u mbit/s uplink\n",
+                          (unsigned int)(rx_speed / 1000000U),
+                          (unsigned int)(tx_speed / 1000000U));
        } else {
                netif_info(dev, link, dev->net,
-                      "%u kbit/s downlink %u kbit/s uplink\n",
-                      (unsigned int)(rx_speed / 1000U),
-                      (unsigned int)(tx_speed / 1000U));
+                          "%u kbit/s downlink %u kbit/s uplink\n",
+                          (unsigned int)(rx_speed / 1000U),
+                          (unsigned int)(tx_speed / 1000U));
        }
 }
 
@@ -1086,11 +1365,10 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
                 * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
                 * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
                 */
-               ctx->connected = le16_to_cpu(event->wValue);
                netif_info(dev, link, dev->net,
                           "network connection: %sconnected\n",
-                          ctx->connected ? "" : "dis");
-               usbnet_link_change(dev, ctx->connected, 0);
+                          !!event->wValue ? "" : "dis");
+               usbnet_link_change(dev, !!event->wValue, 0);
                break;
 
        case USB_CDC_NOTIFY_SPEED_CHANGE:
@@ -1110,23 +1388,11 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
        }
 }
 
-static int cdc_ncm_check_connect(struct usbnet *dev)
-{
-       struct cdc_ncm_ctx *ctx;
-
-       ctx = (struct cdc_ncm_ctx *)dev->data[0];
-       if (ctx == NULL)
-               return 1;       /* disconnected */
-
-       return !ctx->connected;
-}
-
 static const struct driver_info cdc_ncm_info = {
        .description = "CDC NCM",
        .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET,
        .bind = cdc_ncm_bind,
        .unbind = cdc_ncm_unbind,
-       .check_connect = cdc_ncm_check_connect,
        .manage_power = usbnet_manage_power,
        .status = cdc_ncm_status,
        .rx_fixup = cdc_ncm_rx_fixup,
@@ -1140,7 +1406,6 @@ static const struct driver_info wwan_info = {
                        | FLAG_WWAN,
        .bind = cdc_ncm_bind,
        .unbind = cdc_ncm_unbind,
-       .check_connect = cdc_ncm_check_connect,
        .manage_power = usbnet_manage_power,
        .status = cdc_ncm_status,
        .rx_fixup = cdc_ncm_rx_fixup,
@@ -1154,7 +1419,6 @@ static const struct driver_info wwan_noarp_info = {
                        | FLAG_WWAN | FLAG_NOARP,
        .bind = cdc_ncm_bind,
        .unbind = cdc_ncm_unbind,
-       .check_connect = cdc_ncm_check_connect,
        .manage_power = usbnet_manage_power,
        .status = cdc_ncm_status,
        .rx_fixup = cdc_ncm_rx_fixup,
index 660bd5ea9fc0b311918812af8d3959c830b96cf4..a3a05869309df6a1ac34cdb00c6ff4d031dc921b 100644 (file)
@@ -2425,7 +2425,7 @@ static void hso_net_init(struct net_device *net)
        net->type = ARPHRD_NONE;
        net->mtu = DEFAULT_MTU - 14;
        net->tx_queue_len = 10;
-       SET_ETHTOOL_OPS(net, &ops);
+       net->ethtool_ops = &ops;
 
        /* and initialize the semaphore */
        spin_lock_init(&hso_net->net_lock);
index 312178d7b698e06a6bc97ede9c7c1eb49510c322..f9822bc75425a9bacc2dcd8915344c6373778a2c 100644 (file)
@@ -172,24 +172,11 @@ err:
        return ret;
 }
 
-static int huawei_cdc_ncm_check_connect(struct usbnet *usbnet_dev)
-{
-       struct cdc_ncm_ctx *ctx;
-
-       ctx = (struct cdc_ncm_ctx *)usbnet_dev->data[0];
-
-       if (ctx == NULL)
-               return 1; /* disconnected */
-
-       return !ctx->connected;
-}
-
 static const struct driver_info huawei_cdc_ncm_info = {
        .description = "Huawei CDC NCM device",
        .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN,
        .bind = huawei_cdc_ncm_bind,
        .unbind = huawei_cdc_ncm_unbind,
-       .check_connect = huawei_cdc_ncm_check_connect,
        .manage_power = huawei_cdc_ncm_manage_power,
        .rx_fixup = cdc_ncm_rx_fixup,
        .tx_fixup = cdc_ncm_tx_fixup,
index 421934c83f1cdb3b1e7dbfbd5e0bd19d944a0da1..f72570708edb52db75031253e8e3a9d5fb811587 100644 (file)
@@ -524,7 +524,7 @@ static int ipheth_probe(struct usb_interface *intf,
        usb_set_intfdata(intf, dev);
 
        SET_NETDEV_DEV(netdev, &intf->dev);
-       SET_ETHTOOL_OPS(netdev, &ops);
+       netdev->ethtool_ops = &ops;
 
        retval = register_netdev(netdev);
        if (retval) {
index a359d3bb7c5b125422cf59dea69c8a905099fed7..dcb6d33141e0640f545555848434d8efd7822878 100644 (file)
@@ -1171,7 +1171,7 @@ err_fw:
        netdev->netdev_ops = &kaweth_netdev_ops;
        netdev->watchdog_timeo = KAWETH_TX_TIMEOUT;
        netdev->mtu = le16_to_cpu(kaweth->configuration.segment_size);
-       SET_ETHTOOL_OPS(netdev, &ops);
+       netdev->ethtool_ops = &ops;
 
        /* kaweth is zeroed as part of alloc_netdev */
        INIT_DELAYED_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl);
index 03e8a15d7deb74d328d5e563ed2077f62c8d9687..f840802159158b56a5b840d7abb83152fcd0c83f 100644 (file)
@@ -1159,7 +1159,7 @@ static int pegasus_probe(struct usb_interface *intf,
 
        net->watchdog_timeo = PEGASUS_TX_TIMEOUT;
        net->netdev_ops = &pegasus_netdev_ops;
-       SET_ETHTOOL_OPS(net, &ops);
+       net->ethtool_ops = &ops;
        pegasus->mii.dev = net;
        pegasus->mii.mdio_read = mdio_read;
        pegasus->mii.mdio_write = mdio_write;
index e3458e3c44f146653048aba99295670caabd4db5..83208d4fdc5983aa963dbf1640732c17706ef9a2 100644 (file)
@@ -669,6 +669,22 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
        {QMI_FIXED_INTF(0x12d1, 0x140c, 1)},    /* Huawei E173 */
        {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)},    /* Huawei E1820 */
+       {QMI_FIXED_INTF(0x16d8, 0x6003, 0)},    /* CMOTech 6003 */
+       {QMI_FIXED_INTF(0x16d8, 0x6007, 0)},    /* CMOTech CHE-628S */
+       {QMI_FIXED_INTF(0x16d8, 0x6008, 0)},    /* CMOTech CMU-301 */
+       {QMI_FIXED_INTF(0x16d8, 0x6280, 0)},    /* CMOTech CHU-628 */
+       {QMI_FIXED_INTF(0x16d8, 0x7001, 0)},    /* CMOTech CHU-720S */
+       {QMI_FIXED_INTF(0x16d8, 0x7002, 0)},    /* CMOTech 7002 */
+       {QMI_FIXED_INTF(0x16d8, 0x7003, 4)},    /* CMOTech CHU-629K */
+       {QMI_FIXED_INTF(0x16d8, 0x7004, 3)},    /* CMOTech 7004 */
+       {QMI_FIXED_INTF(0x16d8, 0x7006, 5)},    /* CMOTech CGU-629 */
+       {QMI_FIXED_INTF(0x16d8, 0x700a, 4)},    /* CMOTech CHU-629S */
+       {QMI_FIXED_INTF(0x16d8, 0x7211, 0)},    /* CMOTech CHU-720I */
+       {QMI_FIXED_INTF(0x16d8, 0x7212, 0)},    /* CMOTech 7212 */
+       {QMI_FIXED_INTF(0x16d8, 0x7213, 0)},    /* CMOTech 7213 */
+       {QMI_FIXED_INTF(0x16d8, 0x7251, 1)},    /* CMOTech 7251 */
+       {QMI_FIXED_INTF(0x16d8, 0x7252, 1)},    /* CMOTech 7252 */
+       {QMI_FIXED_INTF(0x16d8, 0x7253, 1)},    /* CMOTech 7253 */
        {QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
        {QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
        {QMI_FIXED_INTF(0x19d2, 0x0017, 3)},
@@ -730,16 +746,28 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
        {QMI_FIXED_INTF(0x1199, 0x68a2, 8)},    /* Sierra Wireless MC7710 in QMI mode */
        {QMI_FIXED_INTF(0x1199, 0x68a2, 19)},   /* Sierra Wireless MC7710 in QMI mode */
+       {QMI_FIXED_INTF(0x1199, 0x68c0, 8)},    /* Sierra Wireless MC73xx */
+       {QMI_FIXED_INTF(0x1199, 0x68c0, 10)},   /* Sierra Wireless MC73xx */
+       {QMI_FIXED_INTF(0x1199, 0x68c0, 11)},   /* Sierra Wireless MC73xx */
        {QMI_FIXED_INTF(0x1199, 0x901c, 8)},    /* Sierra Wireless EM7700 */
+       {QMI_FIXED_INTF(0x1199, 0x901f, 8)},    /* Sierra Wireless EM7355 */
+       {QMI_FIXED_INTF(0x1199, 0x9041, 8)},    /* Sierra Wireless MC7305/MC7355 */
        {QMI_FIXED_INTF(0x1199, 0x9051, 8)},    /* Netgear AirCard 340U */
        {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},    /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
+       {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)},    /* Alcatel L800MA */
        {QMI_FIXED_INTF(0x2357, 0x0201, 4)},    /* TP-LINK HSUPA Modem MA180 */
        {QMI_FIXED_INTF(0x2357, 0x9000, 4)},    /* TP-LINK MA260 */
        {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},    /* Telit LE920 */
        {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)},    /* Telit LE920 */
        {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)},    /* Olivetti Olicard 200 */
+       {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)},    /* Olivetti Olicard 500 */
        {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)},    /* Cinterion PLxx */
        {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)},    /* Cinterion PHxx,PXxx */
+       {QMI_FIXED_INTF(0x413c, 0x81a2, 8)},    /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
+       {QMI_FIXED_INTF(0x413c, 0x81a3, 8)},    /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
+       {QMI_FIXED_INTF(0x413c, 0x81a4, 8)},    /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
+       {QMI_FIXED_INTF(0x413c, 0x81a8, 8)},    /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
+       {QMI_FIXED_INTF(0x413c, 0x81a9, 8)},    /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
 
        /* 4. Gobi 1000 devices */
        {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},    /* Acer Gobi Modem Device */
index 3fbfb0869030aeeb6c540766bdbf924d959abcd6..25431965a625a63d99fd3fa6d3167183ea45606a 100644 (file)
@@ -630,12 +630,10 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
        int ret;
        void *tmp;
 
-       tmp = kmalloc(size, GFP_KERNEL);
+       tmp = kmemdup(data, size, GFP_KERNEL);
        if (!tmp)
                return -ENOMEM;
 
-       memcpy(tmp, data, size);
-
        ret = usb_control_msg(tp->udev, usb_sndctrlpipe(tp->udev, 0),
                               RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE,
                               value, index, tmp, size, 500);
@@ -3452,7 +3450,7 @@ static int rtl8152_probe(struct usb_interface *intf,
                              NETIF_F_TSO | NETIF_F_FRAGLIST |
                              NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
 
-       SET_ETHTOOL_OPS(netdev, &ops);
+       netdev->ethtool_ops = &ops;
        netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);
 
        tp->mii.dev = netdev;
index da2c4583bd2d9d4894ea64ae566a028e9da96b2b..6e87e5710048cf952c1505950d954a493b6b8878 100644 (file)
@@ -878,7 +878,7 @@ static int rtl8150_probe(struct usb_interface *intf,
        dev->netdev = netdev;
        netdev->netdev_ops = &rtl8150_netdev_ops;
        netdev->watchdog_timeo = RTL8150_TX_TIMEOUT;
-       SET_ETHTOOL_OPS(netdev, &ops);
+       netdev->ethtool_ops = &ops;
        dev->intr_interval = 100;       /* 100ms */
 
        if (!alloc_all_urbs(dev)) {
index 7b687469199b58357a74490cb266e4d47534c097..7d9f84a91f37dd96fbd835d170dc8ed24c281b6c 100644 (file)
@@ -1285,7 +1285,7 @@ static int virtnet_set_channels(struct net_device *dev,
        if (channels->rx_count || channels->tx_count || channels->other_count)
                return -EINVAL;
 
-       if (queue_pairs > vi->max_queue_pairs)
+       if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
                return -EINVAL;
 
        get_online_cpus();
@@ -1646,7 +1646,7 @@ static int virtnet_probe(struct virtio_device *vdev)
        dev->netdev_ops = &virtnet_netdev;
        dev->features = NETIF_F_HIGHDMA;
 
-       SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
+       dev->ethtool_ops = &virtnet_ethtool_ops;
        SET_NETDEV_DEV(dev, &vdev->dev);
 
        /* Do we support "hardware" checksums? */
@@ -1724,6 +1724,13 @@ static int virtnet_probe(struct virtio_device *vdev)
        if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
                vi->has_cvq = true;
 
+       if (vi->any_header_sg) {
+               if (vi->mergeable_rx_bufs)
+                       dev->needed_headroom = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+               else
+                       dev->needed_headroom = sizeof(struct virtio_net_hdr);
+       }
+
        /* Use single tx/rx queue pair as default */
        vi->curr_queue_pairs = 1;
        vi->max_queue_pairs = max_queue_pairs;
index 600ab56c0008bab49251505e38ca911b611da2c1..00e120296e923ef3dc5433112941e8d643bf7bf6 100644 (file)
@@ -635,5 +635,5 @@ static const struct ethtool_ops vmxnet3_ethtool_ops = {
 
 void vmxnet3_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &vmxnet3_ethtool_ops);
+       netdev->ethtool_ops = &vmxnet3_ethtool_ops;
 }
index 82355d5d155a86921be733cc40deefcbaa6b7116..e68c8eb4ea8e297ad3c3b146a194a8ac84e4b1c6 100644 (file)
@@ -127,6 +127,7 @@ struct vxlan_dev {
        struct list_head  next;         /* vxlan's per namespace list */
        struct vxlan_sock *vn_sock;     /* listening socket */
        struct net_device *dev;
+       struct net        *net;         /* netns for packet i/o */
        struct vxlan_rdst default_dst;  /* default destination */
        union vxlan_addr  saddr;        /* source address */
        __be16            dst_port;
@@ -389,8 +390,8 @@ static inline size_t vxlan_nlmsg_size(void)
                + nla_total_size(sizeof(struct nda_cacheinfo));
 }
 
-static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
-                            struct vxlan_fdb *fdb, int type)
+static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
+                            struct vxlan_rdst *rd, int type)
 {
        struct net *net = dev_net(vxlan->dev);
        struct sk_buff *skb;
@@ -400,8 +401,7 @@ static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
        if (skb == NULL)
                goto errout;
 
-       err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0,
-                            first_remote_rtnl(fdb));
+       err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd);
        if (err < 0) {
                /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
                WARN_ON(err == -EMSGSIZE);
@@ -427,10 +427,7 @@ static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
                .remote_vni = VXLAN_N_VID,
        };
 
-       INIT_LIST_HEAD(&f.remotes);
-       list_add_rcu(&remote.list, &f.remotes);
-
-       vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
+       vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
 }
 
 static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
@@ -438,11 +435,11 @@ static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
        struct vxlan_fdb f = {
                .state = NUD_STALE,
        };
+       struct vxlan_rdst remote = { };
 
-       INIT_LIST_HEAD(&f.remotes);
        memcpy(f.eth_addr, eth_addr, ETH_ALEN);
 
-       vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
+       vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
 }
 
 /* Hash Ethernet address */
@@ -533,7 +530,8 @@ static int vxlan_fdb_replace(struct vxlan_fdb *f,
 
 /* Add/update destinations for multicast */
 static int vxlan_fdb_append(struct vxlan_fdb *f,
-                           union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex)
+                           union vxlan_addr *ip, __be16 port, __u32 vni,
+                           __u32 ifindex, struct vxlan_rdst **rdp)
 {
        struct vxlan_rdst *rd;
 
@@ -551,6 +549,7 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
 
        list_add_tail_rcu(&rd->list, &f->remotes);
 
+       *rdp = rd;
        return 1;
 }
 
@@ -690,6 +689,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
                            __be16 port, __u32 vni, __u32 ifindex,
                            __u8 ndm_flags)
 {
+       struct vxlan_rdst *rd = NULL;
        struct vxlan_fdb *f;
        int notify = 0;
 
@@ -726,7 +726,8 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
                if ((flags & NLM_F_APPEND) &&
                    (is_multicast_ether_addr(f->eth_addr) ||
                     is_zero_ether_addr(f->eth_addr))) {
-                       int rc = vxlan_fdb_append(f, ip, port, vni, ifindex);
+                       int rc = vxlan_fdb_append(f, ip, port, vni, ifindex,
+                                                 &rd);
 
                        if (rc < 0)
                                return rc;
@@ -756,15 +757,18 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
                INIT_LIST_HEAD(&f->remotes);
                memcpy(f->eth_addr, mac, ETH_ALEN);
 
-               vxlan_fdb_append(f, ip, port, vni, ifindex);
+               vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
 
                ++vxlan->addrcnt;
                hlist_add_head_rcu(&f->hlist,
                                   vxlan_fdb_head(vxlan, mac));
        }
 
-       if (notify)
-               vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH);
+       if (notify) {
+               if (rd == NULL)
+                       rd = first_remote_rtnl(f);
+               vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH);
+       }
 
        return 0;
 }
@@ -785,7 +789,7 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
                    "delete %pM\n", f->eth_addr);
 
        --vxlan->addrcnt;
-       vxlan_fdb_notify(vxlan, f, RTM_DELNEIGH);
+       vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
 
        hlist_del_rcu(&f->hlist);
        call_rcu(&f->rcu, vxlan_fdb_free);
@@ -919,6 +923,7 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
         */
        if (rd && !list_is_singular(&f->remotes)) {
                list_del_rcu(&rd->list);
+               vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
                kfree_rcu(rd, rcu);
                goto out;
        }
@@ -993,7 +998,7 @@ static bool vxlan_snoop(struct net_device *dev,
 
                rdst->remote_ip = *src_ip;
                f->updated = jiffies;
-               vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH);
+               vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH);
        } else {
                /* learned new entry */
                spin_lock(&vxlan->hash_lock);
@@ -1199,6 +1204,7 @@ static void vxlan_rcv(struct vxlan_sock *vs,
 
        remote_ip = &vxlan->default_dst.remote_ip;
        skb_reset_mac_header(skb);
+       skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
        skb->protocol = eth_type_trans(skb, vxlan->dev);
 
        /* Ignore packet loops (and multicast echo) */
@@ -1614,7 +1620,8 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
                           struct dst_entry *dst, struct sk_buff *skb,
                           struct net_device *dev, struct in6_addr *saddr,
                           struct in6_addr *daddr, __u8 prio, __u8 ttl,
-                          __be16 src_port, __be16 dst_port, __be32 vni)
+                          __be16 src_port, __be16 dst_port, __be32 vni,
+                          bool xnet)
 {
        struct ipv6hdr *ip6h;
        struct vxlanhdr *vxh;
@@ -1627,7 +1634,7 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
                skb->encapsulation = 1;
        }
 
-       skb_scrub_packet(skb, false);
+       skb_scrub_packet(skb, xnet);
 
        min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
                        + VXLAN_HLEN + sizeof(struct ipv6hdr)
@@ -1707,7 +1714,7 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
 int vxlan_xmit_skb(struct vxlan_sock *vs,
                   struct rtable *rt, struct sk_buff *skb,
                   __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
-                  __be16 src_port, __be16 dst_port, __be32 vni)
+                  __be16 src_port, __be16 dst_port, __be32 vni, bool xnet)
 {
        struct vxlanhdr *vxh;
        struct udphdr *uh;
@@ -1756,7 +1763,7 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
                return err;
 
        return iptunnel_xmit(vs->sock->sk, rt, skb, src, dst, IPPROTO_UDP,
-                            tos, ttl, df, false);
+                            tos, ttl, df, xnet);
 }
 EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
 
@@ -1849,7 +1856,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                fl4.daddr = dst->sin.sin_addr.s_addr;
                fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr;
 
-               rt = ip_route_output_key(dev_net(dev), &fl4);
+               rt = ip_route_output_key(vxlan->net, &fl4);
                if (IS_ERR(rt)) {
                        netdev_dbg(dev, "no route to %pI4\n",
                                   &dst->sin.sin_addr.s_addr);
@@ -1870,7 +1877,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                        struct vxlan_dev *dst_vxlan;
 
                        ip_rt_put(rt);
-                       dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port);
+                       dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port);
                        if (!dst_vxlan)
                                goto tx_error;
                        vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1883,7 +1890,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                err = vxlan_xmit_skb(vxlan->vn_sock, rt, skb,
                                     fl4.saddr, dst->sin.sin_addr.s_addr,
                                     tos, ttl, df, src_port, dst_port,
-                                    htonl(vni << 8));
+                                    htonl(vni << 8),
+                                    !net_eq(vxlan->net, dev_net(vxlan->dev)));
 
                if (err < 0)
                        goto rt_tx_error;
@@ -1923,7 +1931,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                        struct vxlan_dev *dst_vxlan;
 
                        dst_release(ndst);
-                       dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port);
+                       dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port);
                        if (!dst_vxlan)
                                goto tx_error;
                        vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1934,7 +1942,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
 
                err = vxlan6_xmit_skb(vxlan->vn_sock, ndst, skb,
                                      dev, &fl6.saddr, &fl6.daddr, 0, ttl,
-                                     src_port, dst_port, htonl(vni << 8));
+                                     src_port, dst_port, htonl(vni << 8),
+                                     !net_eq(vxlan->net, dev_net(vxlan->dev)));
 #endif
        }
 
@@ -2078,7 +2087,7 @@ static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
 static int vxlan_init(struct net_device *dev)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
-       struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
+       struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
        struct vxlan_sock *vs;
 
        dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
@@ -2086,7 +2095,7 @@ static int vxlan_init(struct net_device *dev)
                return -ENOMEM;
 
        spin_lock(&vn->sock_lock);
-       vs = vxlan_find_sock(dev_net(dev), vxlan->dst_port);
+       vs = vxlan_find_sock(vxlan->net, vxlan->dst_port);
        if (vs) {
                /* If we have a socket with same port already, reuse it */
                atomic_inc(&vs->refcnt);
@@ -2168,8 +2177,8 @@ static void vxlan_flush(struct vxlan_dev *vxlan)
 /* Cleanup timer and forwarding table on shutdown */
 static int vxlan_stop(struct net_device *dev)
 {
-       struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
        struct vxlan_dev *vxlan = netdev_priv(dev);
+       struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
        struct vxlan_sock *vs = vxlan->vn_sock;
 
        if (vs && vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
@@ -2198,7 +2207,7 @@ static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
        struct net_device *lowerdev;
        int max_mtu;
 
-       lowerdev = __dev_get_by_index(dev_net(dev), dst->remote_ifindex);
+       lowerdev = __dev_get_by_index(vxlan->net, dst->remote_ifindex);
        if (lowerdev == NULL)
                return eth_change_mtu(dev, new_mtu);
 
@@ -2281,7 +2290,6 @@ static void vxlan_setup(struct net_device *dev)
 
        dev->tx_queue_len = 0;
        dev->features   |= NETIF_F_LLTX;
-       dev->features   |= NETIF_F_NETNS_LOCAL;
        dev->features   |= NETIF_F_SG | NETIF_F_HW_CSUM;
        dev->features   |= NETIF_F_RXCSUM;
        dev->features   |= NETIF_F_GSO_SOFTWARE;
@@ -2574,7 +2582,7 @@ EXPORT_SYMBOL_GPL(vxlan_sock_add);
 static void vxlan_sock_work(struct work_struct *work)
 {
        struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, sock_work);
-       struct net *net = dev_net(vxlan->dev);
+       struct net *net = vxlan->net;
        struct vxlan_net *vn = net_generic(net, vxlan_net_id);
        __be16 port = vxlan->dst_port;
        struct vxlan_sock *nvs;
@@ -2601,6 +2609,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
        if (!data[IFLA_VXLAN_ID])
                return -EINVAL;
 
+       vxlan->net = dev_net(dev);
+
        vni = nla_get_u32(data[IFLA_VXLAN_ID]);
        dst->remote_vni = vni;
 
@@ -2706,7 +2716,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
                return -EEXIST;
        }
 
-       SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops);
+       dev->ethtool_ops = &vxlan_ethtool_ops;
 
        /* create an fdb entry for a valid default destination */
        if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
@@ -2735,8 +2745,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
 
 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
 {
-       struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
        struct vxlan_dev *vxlan = netdev_priv(dev);
+       struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
 
        spin_lock(&vn->sock_lock);
        if (!hlist_unhashed(&vxlan->hlist))
@@ -2901,8 +2911,33 @@ static __net_init int vxlan_init_net(struct net *net)
        return 0;
 }
 
+static void __net_exit vxlan_exit_net(struct net *net)
+{
+       struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+       struct vxlan_dev *vxlan, *next;
+       struct net_device *dev, *aux;
+       LIST_HEAD(list);
+
+       rtnl_lock();
+       for_each_netdev_safe(net, dev, aux)
+               if (dev->rtnl_link_ops == &vxlan_link_ops)
+                       unregister_netdevice_queue(dev, &list);
+
+       list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
+               /* If vxlan->dev is in the same netns, it has already been added
+                * to the list by the previous loop.
+                */
+               if (!net_eq(dev_net(vxlan->dev), net))
+                       unregister_netdevice_queue(dev, &list);
+       }
+
+       unregister_netdevice_many(&list);
+       rtnl_unlock();
+}
+
 static struct pernet_operations vxlan_net_ops = {
        .init = vxlan_init_net,
+       .exit = vxlan_exit_net,
        .id   = &vxlan_net_id,
        .size = sizeof(struct vxlan_net),
 };
index de3bbf43fc5ac14f41ee9d76eeacb1f059eeaab3..cdd45fb8a1f6892587abddf2abbf0963bd7a5653 100644 (file)
@@ -1322,10 +1322,6 @@ NOTE:  This is rather a useless action right now, as the
 
 static int sdla_change_mtu(struct net_device *dev, int new_mtu)
 {
-       struct frad_local *flp;
-
-       flp = netdev_priv(dev);
-
        if (netif_running(dev))
                return -EBUSY;
 
index 9c34d2fccfac61508705a4021f436e9a9024e936..9c78090e72f87dae118dff22fd366d5ce11bacd8 100644 (file)
@@ -500,26 +500,23 @@ int i2400m_pm_notifier(struct notifier_block *notifier,
  */
 int i2400m_pre_reset(struct i2400m *i2400m)
 {
-       int result;
        struct device *dev = i2400m_dev(i2400m);
 
        d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
        d_printf(1, dev, "pre-reset shut down\n");
 
-       result = 0;
        mutex_lock(&i2400m->init_mutex);
        if (i2400m->updown) {
                netif_tx_disable(i2400m->wimax_dev.net_dev);
                __i2400m_dev_stop(i2400m);
-               result = 0;
                /* down't set updown to zero -- this way
                 * post_reset can restore properly */
        }
        mutex_unlock(&i2400m->init_mutex);
        if (i2400m->bus_release)
                i2400m->bus_release(i2400m);
-       d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
-       return result;
+       d_fnend(3, dev, "(i2400m %p) = 0\n", i2400m);
+       return 0;
 }
 EXPORT_SYMBOL_GPL(i2400m_pre_reset);
 
index 507d9a9ee69ad4b61ece2d334434691801682efe..f92050617ae682e02bb48b6676a16298ae2dfa4f 100644 (file)
@@ -1090,7 +1090,8 @@ static int ar5523_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
        return ret;
 }
 
-static void ar5523_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void ar5523_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                        u32 queues, bool drop)
 {
        struct ar5523 *ar = hw->priv;
 
index a1f0996288508e3cad8ecd8e03f33153980e593e..17d221abd58c0bb70d72a6fbf14f1e7422f8ced5 100644 (file)
@@ -175,7 +175,7 @@ int ath10k_bmi_write_memory(struct ath10k *ar,
        return 0;
 }
 
-int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
+int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
 {
        struct bmi_cmd cmd;
        union bmi_resp resp;
@@ -184,7 +184,7 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
        int ret;
 
        ath10k_dbg(ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
-                  address, *param);
+                  address, param);
 
        if (ar->bmi.done_sent) {
                ath10k_warn("command disallowed\n");
@@ -193,7 +193,7 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
 
        cmd.id            = __cpu_to_le32(BMI_EXECUTE);
        cmd.execute.addr  = __cpu_to_le32(address);
-       cmd.execute.param = __cpu_to_le32(*param);
+       cmd.execute.param = __cpu_to_le32(param);
 
        ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
        if (ret) {
@@ -204,10 +204,13 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
        if (resplen < sizeof(resp.execute)) {
                ath10k_warn("invalid execute response length (%d)\n",
                            resplen);
-               return ret;
+               return -EIO;
        }
 
-       *param = __le32_to_cpu(resp.execute.result);
+       *result = __le32_to_cpu(resp.execute.result);
+
+       ath10k_dbg(ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
+
        return 0;
 }
 
index 8d81ce1cec216c7b55fa1c0ab47b65cc41cafb0d..3a9bdf51c96a212bb98166e42b7f292179c999cf 100644 (file)
@@ -217,7 +217,7 @@ int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,
                ret;                                                    \
        })
 
-int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param);
+int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result);
 int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address);
 int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length);
 int ath10k_bmi_fast_download(struct ath10k *ar, u32 address,
index a79499c8235009f701073c83b3974d66d183e001..1e4cad8632b527346915a9b3d0545216f881580c 100644 (file)
@@ -840,35 +840,17 @@ void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
 
 static int ath10k_ce_init_src_ring(struct ath10k *ar,
                                   unsigned int ce_id,
-                                  struct ath10k_ce_pipe *ce_state,
                                   const struct ce_attr *attr)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct ath10k_ce_ring *src_ring;
-       unsigned int nentries = attr->src_nentries;
-       unsigned int ce_nbytes;
-       u32 ctrl_addr = ath10k_ce_base_address(ce_id);
-       dma_addr_t base_addr;
-       char *ptr;
-
-       nentries = roundup_pow_of_two(nentries);
-
-       if (ce_state->src_ring) {
-               WARN_ON(ce_state->src_ring->nentries != nentries);
-               return 0;
-       }
+       struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+       struct ath10k_ce_ring *src_ring = ce_state->src_ring;
+       u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
 
-       ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
-       ptr = kzalloc(ce_nbytes, GFP_KERNEL);
-       if (ptr == NULL)
-               return -ENOMEM;
+       nentries = roundup_pow_of_two(attr->src_nentries);
 
-       ce_state->src_ring = (struct ath10k_ce_ring *)ptr;
-       src_ring = ce_state->src_ring;
-
-       ptr += sizeof(struct ath10k_ce_ring);
-       src_ring->nentries = nentries;
-       src_ring->nentries_mask = nentries - 1;
+       memset(src_ring->per_transfer_context, 0,
+              nentries * sizeof(*src_ring->per_transfer_context));
 
        src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
        src_ring->sw_index &= src_ring->nentries_mask;
@@ -878,21 +860,87 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
                ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
        src_ring->write_index &= src_ring->nentries_mask;
 
-       src_ring->per_transfer_context = (void **)ptr;
+       ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
+                                        src_ring->base_addr_ce_space);
+       ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
+       ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
+       ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
+       ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
+       ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
+
+       ath10k_dbg(ATH10K_DBG_BOOT,
+                  "boot init ce src ring id %d entries %d base_addr %p\n",
+                  ce_id, nentries, src_ring->base_addr_owner_space);
+
+       return 0;
+}
+
+static int ath10k_ce_init_dest_ring(struct ath10k *ar,
+                                   unsigned int ce_id,
+                                   const struct ce_attr *attr)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+       struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
+       u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
+
+       nentries = roundup_pow_of_two(attr->dest_nentries);
+
+       memset(dest_ring->per_transfer_context, 0,
+              nentries * sizeof(*dest_ring->per_transfer_context));
+
+       dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
+       dest_ring->sw_index &= dest_ring->nentries_mask;
+       dest_ring->write_index =
+               ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
+       dest_ring->write_index &= dest_ring->nentries_mask;
+
+       ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
+                                         dest_ring->base_addr_ce_space);
+       ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
+       ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
+       ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
+       ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
+
+       ath10k_dbg(ATH10K_DBG_BOOT,
+                  "boot ce dest ring id %d entries %d base_addr %p\n",
+                  ce_id, nentries, dest_ring->base_addr_owner_space);
+
+       return 0;
+}
+
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
+                        const struct ce_attr *attr)
+{
+       struct ath10k_ce_ring *src_ring;
+       u32 nentries = attr->src_nentries;
+       dma_addr_t base_addr;
+
+       nentries = roundup_pow_of_two(nentries);
+
+       src_ring = kzalloc(sizeof(*src_ring) +
+                          (nentries *
+                           sizeof(*src_ring->per_transfer_context)),
+                          GFP_KERNEL);
+       if (src_ring == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       src_ring->nentries = nentries;
+       src_ring->nentries_mask = nentries - 1;
 
        /*
         * Legacy platforms that do not support cache
         * coherent DMA are unsupported
         */
        src_ring->base_addr_owner_space_unaligned =
-               pci_alloc_consistent(ar_pci->pdev,
-                                    (nentries * sizeof(struct ce_desc) +
-                                     CE_DESC_RING_ALIGN),
-                                    &base_addr);
+               dma_alloc_coherent(ar->dev,
+                                  (nentries * sizeof(struct ce_desc) +
+                                   CE_DESC_RING_ALIGN),
+                                  &base_addr, GFP_KERNEL);
        if (!src_ring->base_addr_owner_space_unaligned) {
-               kfree(ce_state->src_ring);
-               ce_state->src_ring = NULL;
-               return -ENOMEM;
+               kfree(src_ring);
+               return ERR_PTR(-ENOMEM);
        }
 
        src_ring->base_addr_ce_space_unaligned = base_addr;
@@ -912,88 +960,54 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
                kmalloc((nentries * sizeof(struct ce_desc) +
                         CE_DESC_RING_ALIGN), GFP_KERNEL);
        if (!src_ring->shadow_base_unaligned) {
-               pci_free_consistent(ar_pci->pdev,
-                                   (nentries * sizeof(struct ce_desc) +
-                                    CE_DESC_RING_ALIGN),
-                                   src_ring->base_addr_owner_space,
-                                   src_ring->base_addr_ce_space);
-               kfree(ce_state->src_ring);
-               ce_state->src_ring = NULL;
-               return -ENOMEM;
+               dma_free_coherent(ar->dev,
+                                 (nentries * sizeof(struct ce_desc) +
+                                  CE_DESC_RING_ALIGN),
+                                 src_ring->base_addr_owner_space,
+                                 src_ring->base_addr_ce_space);
+               kfree(src_ring);
+               return ERR_PTR(-ENOMEM);
        }
 
        src_ring->shadow_base = PTR_ALIGN(
                        src_ring->shadow_base_unaligned,
                        CE_DESC_RING_ALIGN);
 
-       ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
-                                        src_ring->base_addr_ce_space);
-       ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
-       ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
-       ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
-       ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
-       ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
-
-       ath10k_dbg(ATH10K_DBG_BOOT,
-                  "boot ce src ring id %d entries %d base_addr %p\n",
-                  ce_id, nentries, src_ring->base_addr_owner_space);
-
-       return 0;
+       return src_ring;
 }
 
-static int ath10k_ce_init_dest_ring(struct ath10k *ar,
-                                   unsigned int ce_id,
-                                   struct ath10k_ce_pipe *ce_state,
-                                   const struct ce_attr *attr)
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
+                         const struct ce_attr *attr)
 {
-       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        struct ath10k_ce_ring *dest_ring;
-       unsigned int nentries = attr->dest_nentries;
-       unsigned int ce_nbytes;
-       u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+       u32 nentries;
        dma_addr_t base_addr;
-       char *ptr;
 
-       nentries = roundup_pow_of_two(nentries);
+       nentries = roundup_pow_of_two(attr->dest_nentries);
 
-       if (ce_state->dest_ring) {
-               WARN_ON(ce_state->dest_ring->nentries != nentries);
-               return 0;
-       }
-
-       ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
-       ptr = kzalloc(ce_nbytes, GFP_KERNEL);
-       if (ptr == NULL)
-               return -ENOMEM;
-
-       ce_state->dest_ring = (struct ath10k_ce_ring *)ptr;
-       dest_ring = ce_state->dest_ring;
+       dest_ring = kzalloc(sizeof(*dest_ring) +
+                           (nentries *
+                            sizeof(*dest_ring->per_transfer_context)),
+                           GFP_KERNEL);
+       if (dest_ring == NULL)
+               return ERR_PTR(-ENOMEM);
 
-       ptr += sizeof(struct ath10k_ce_ring);
        dest_ring->nentries = nentries;
        dest_ring->nentries_mask = nentries - 1;
 
-       dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
-       dest_ring->sw_index &= dest_ring->nentries_mask;
-       dest_ring->write_index =
-               ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
-       dest_ring->write_index &= dest_ring->nentries_mask;
-
-       dest_ring->per_transfer_context = (void **)ptr;
-
        /*
         * Legacy platforms that do not support cache
         * coherent DMA are unsupported
         */
        dest_ring->base_addr_owner_space_unaligned =
-               pci_alloc_consistent(ar_pci->pdev,
-                                    (nentries * sizeof(struct ce_desc) +
-                                     CE_DESC_RING_ALIGN),
-                                    &base_addr);
+               dma_alloc_coherent(ar->dev,
+                                  (nentries * sizeof(struct ce_desc) +
+                                   CE_DESC_RING_ALIGN),
+                                  &base_addr, GFP_KERNEL);
        if (!dest_ring->base_addr_owner_space_unaligned) {
-               kfree(ce_state->dest_ring);
-               ce_state->dest_ring = NULL;
-               return -ENOMEM;
+               kfree(dest_ring);
+               return ERR_PTR(-ENOMEM);
        }
 
        dest_ring->base_addr_ce_space_unaligned = base_addr;
@@ -1012,39 +1026,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
                        dest_ring->base_addr_ce_space_unaligned,
                        CE_DESC_RING_ALIGN);
 
-       ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
-                                         dest_ring->base_addr_ce_space);
-       ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
-       ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
-       ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
-       ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
-
-       ath10k_dbg(ATH10K_DBG_BOOT,
-                  "boot ce dest ring id %d entries %d base_addr %p\n",
-                  ce_id, nentries, dest_ring->base_addr_owner_space);
-
-       return 0;
-}
-
-static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
-                                            unsigned int ce_id,
-                                            const struct ce_attr *attr)
-{
-       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
-       u32 ctrl_addr = ath10k_ce_base_address(ce_id);
-
-       spin_lock_bh(&ar_pci->ce_lock);
-
-       ce_state->ar = ar;
-       ce_state->id = ce_id;
-       ce_state->ctrl_addr = ctrl_addr;
-       ce_state->attr_flags = attr->flags;
-       ce_state->src_sz_max = attr->src_sz_max;
-
-       spin_unlock_bh(&ar_pci->ce_lock);
-
-       return ce_state;
+       return dest_ring;
 }
 
 /*
@@ -1054,11 +1036,11 @@ static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
  * initialization. It may be that only one side or the other is
  * initialized by software/firmware.
  */
-struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
-                               unsigned int ce_id,
-                               const struct ce_attr *attr)
+int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
+                       const struct ce_attr *attr)
 {
-       struct ath10k_ce_pipe *ce_state;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
        int ret;
 
        /*
@@ -1074,64 +1056,128 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
 
        ret = ath10k_pci_wake(ar);
        if (ret)
-               return NULL;
+               return ret;
 
-       ce_state = ath10k_ce_init_state(ar, ce_id, attr);
-       if (!ce_state) {
-               ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id);
-               goto out;
-       }
+       spin_lock_bh(&ar_pci->ce_lock);
+       ce_state->ar = ar;
+       ce_state->id = ce_id;
+       ce_state->ctrl_addr = ath10k_ce_base_address(ce_id);
+       ce_state->attr_flags = attr->flags;
+       ce_state->src_sz_max = attr->src_sz_max;
+       spin_unlock_bh(&ar_pci->ce_lock);
 
        if (attr->src_nentries) {
-               ret = ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr);
+               ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
                if (ret) {
                        ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
                                   ce_id, ret);
-                       ath10k_ce_deinit(ce_state);
-                       ce_state = NULL;
                        goto out;
                }
        }
 
        if (attr->dest_nentries) {
-               ret = ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr);
+               ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
                if (ret) {
                        ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
                                   ce_id, ret);
-                       ath10k_ce_deinit(ce_state);
-                       ce_state = NULL;
                        goto out;
                }
        }
 
 out:
        ath10k_pci_sleep(ar);
-       return ce_state;
+       return ret;
 }
 
-void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state)
+static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
+{
+       u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+
+       ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0);
+       ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
+       ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0);
+       ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);
+}
+
+static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
+{
+       u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+
+       ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0);
+       ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
+       ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0);
+}
+
+void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
+{
+       int ret;
+
+       ret = ath10k_pci_wake(ar);
+       if (ret)
+               return;
+
+       ath10k_ce_deinit_src_ring(ar, ce_id);
+       ath10k_ce_deinit_dest_ring(ar, ce_id);
+
+       ath10k_pci_sleep(ar);
+}
+
+int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
+                        const struct ce_attr *attr)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+       int ret;
+
+       if (attr->src_nentries) {
+               ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
+               if (IS_ERR(ce_state->src_ring)) {
+                       ret = PTR_ERR(ce_state->src_ring);
+                       ath10k_err("failed to allocate copy engine source ring %d: %d\n",
+                                  ce_id, ret);
+                       ce_state->src_ring = NULL;
+                       return ret;
+               }
+       }
+
+       if (attr->dest_nentries) {
+               ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id,
+                                                               attr);
+               if (IS_ERR(ce_state->dest_ring)) {
+                       ret = PTR_ERR(ce_state->dest_ring);
+                       ath10k_err("failed to allocate copy engine destination ring %d: %d\n",
+                                  ce_id, ret);
+                       ce_state->dest_ring = NULL;
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
 {
-       struct ath10k *ar = ce_state->ar;
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
 
        if (ce_state->src_ring) {
                kfree(ce_state->src_ring->shadow_base_unaligned);
-               pci_free_consistent(ar_pci->pdev,
-                                   (ce_state->src_ring->nentries *
-                                    sizeof(struct ce_desc) +
-                                    CE_DESC_RING_ALIGN),
-                                   ce_state->src_ring->base_addr_owner_space,
-                                   ce_state->src_ring->base_addr_ce_space);
+               dma_free_coherent(ar->dev,
+                                 (ce_state->src_ring->nentries *
+                                  sizeof(struct ce_desc) +
+                                  CE_DESC_RING_ALIGN),
+                                 ce_state->src_ring->base_addr_owner_space,
+                                 ce_state->src_ring->base_addr_ce_space);
                kfree(ce_state->src_ring);
        }
 
        if (ce_state->dest_ring) {
-               pci_free_consistent(ar_pci->pdev,
-                                   (ce_state->dest_ring->nentries *
-                                    sizeof(struct ce_desc) +
-                                    CE_DESC_RING_ALIGN),
-                                   ce_state->dest_ring->base_addr_owner_space,
-                                   ce_state->dest_ring->base_addr_ce_space);
+               dma_free_coherent(ar->dev,
+                                 (ce_state->dest_ring->nentries *
+                                  sizeof(struct ce_desc) +
+                                  CE_DESC_RING_ALIGN),
+                                 ce_state->dest_ring->base_addr_owner_space,
+                                 ce_state->dest_ring->base_addr_ce_space);
                kfree(ce_state->dest_ring);
        }
 
index 8eb7f99ed992277b0efb3e7ae4f971b8e4eb7557..fd0bc3561e42a9ea4d84644524b9f19b4cdfb207 100644 (file)
@@ -104,7 +104,8 @@ struct ath10k_ce_ring {
        void *shadow_base_unaligned;
        struct ce_desc *shadow_base;
 
-       void **per_transfer_context;
+       /* keep last */
+       void *per_transfer_context[0];
 };
 
 struct ath10k_ce_pipe {
@@ -210,10 +211,12 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
 
 /*==================CE Engine Initialization=======================*/
 
-/* Initialize an instance of a CE */
-struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
-                               unsigned int ce_id,
-                               const struct ce_attr *attr);
+int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
+                       const struct ce_attr *attr);
+void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id);
+int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
+                         const struct ce_attr *attr);
+void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
 
 /*==================CE Engine Shutdown=======================*/
 /*
@@ -236,8 +239,6 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
                               unsigned int *nbytesp,
                               unsigned int *transfer_idp);
 
-void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state);
-
 /*==================CE Interrupt Handlers====================*/
 void ath10k_ce_per_engine_service_any(struct ath10k *ar);
 void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
index ebc5fc2ede75cbac75da3a2f789a946d4e1274ad..75b3dfbd6509ff21448edf10897a4d7397c1bd2c 100644 (file)
@@ -249,30 +249,40 @@ exit:
 
 static int ath10k_download_and_run_otp(struct ath10k *ar)
 {
-       u32 address = ar->hw_params.patch_load_addr;
-       u32 exec_param;
+       u32 result, address = ar->hw_params.patch_load_addr;
        int ret;
 
        /* OTP is optional */
 
-       if (!ar->otp_data || !ar->otp_len)
+       if (!ar->otp_data || !ar->otp_len) {
+               ath10k_warn("Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
+                           ar->otp_data, ar->otp_len);
                return 0;
+       }
+
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n",
+                  address, ar->otp_len);
 
        ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
        if (ret) {
                ath10k_err("could not write otp (%d)\n", ret);
-               goto exit;
+               return ret;
        }
 
-       exec_param = 0;
-       ret = ath10k_bmi_execute(ar, address, &exec_param);
+       ret = ath10k_bmi_execute(ar, address, 0, &result);
        if (ret) {
                ath10k_err("could not execute otp (%d)\n", ret);
-               goto exit;
+               return ret;
        }
 
-exit:
-       return ret;
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
+
+       if (result != 0) {
+               ath10k_err("otp calibration failed: %d", result);
+               return -EINVAL;
+       }
+
+       return 0;
 }
 
 static int ath10k_download_fw(struct ath10k *ar)
@@ -389,8 +399,8 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
        /* first fetch the firmware file (firmware-*.bin) */
        ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name);
        if (IS_ERR(ar->firmware)) {
-               ath10k_err("Could not fetch firmware file '%s': %ld\n",
-                          name, PTR_ERR(ar->firmware));
+               ath10k_err("could not fetch firmware file '%s/%s': %ld\n",
+                          ar->hw_params.fw.dir, name, PTR_ERR(ar->firmware));
                return PTR_ERR(ar->firmware);
        }
 
@@ -401,14 +411,14 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
        magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
 
        if (len < magic_len) {
-               ath10k_err("firmware image too small to contain magic: %zu\n",
-                          len);
+               ath10k_err("firmware file '%s/%s' too small to contain magic: %zu\n",
+                          ar->hw_params.fw.dir, name, len);
                ret = -EINVAL;
                goto err;
        }
 
        if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
-               ath10k_err("Invalid firmware magic\n");
+               ath10k_err("invalid firmware magic\n");
                ret = -EINVAL;
                goto err;
        }
@@ -430,7 +440,7 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
                data += sizeof(*hdr);
 
                if (len < ie_len) {
-                       ath10k_err("Invalid length for FW IE %d (%zu < %zu)\n",
+                       ath10k_err("invalid length for FW IE %d (%zu < %zu)\n",
                                   ie_id, len, ie_len);
                        ret = -EINVAL;
                        goto err;
@@ -513,8 +523,8 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
        }
 
        if (!ar->firmware_data || !ar->firmware_len) {
-               ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from %s, skipping\n",
-                           name);
+               ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
+                           ar->hw_params.fw.dir, name);
                ret = -ENOMEDIUM;
                goto err;
        }
@@ -531,7 +541,9 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
                                         ar->hw_params.fw.board);
        if (IS_ERR(ar->board)) {
                ret = PTR_ERR(ar->board);
-               ath10k_err("could not fetch board data (%d)\n", ret);
+               ath10k_err("could not fetch board data '%s/%s' (%d)\n",
+                          ar->hw_params.fw.dir, ar->hw_params.fw.board,
+                          ret);
                goto err;
        }
 
@@ -549,19 +561,21 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
 {
        int ret;
 
+       ar->fw_api = 2;
+       ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+
        ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE);
-       if (ret == 0) {
-               ar->fw_api = 2;
-               goto out;
-       }
+       if (ret == 0)
+               goto success;
+
+       ar->fw_api = 1;
+       ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
 
        ret = ath10k_core_fetch_firmware_api_1(ar);
        if (ret)
                return ret;
 
-       ar->fw_api = 1;
-
-out:
+success:
        ath10k_dbg(ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
 
        return 0;
@@ -572,16 +586,22 @@ static int ath10k_init_download_firmware(struct ath10k *ar)
        int ret;
 
        ret = ath10k_download_board_data(ar);
-       if (ret)
+       if (ret) {
+               ath10k_err("failed to download board data: %d\n", ret);
                return ret;
+       }
 
        ret = ath10k_download_and_run_otp(ar);
-       if (ret)
+       if (ret) {
+               ath10k_err("failed to run otp: %d\n", ret);
                return ret;
+       }
 
        ret = ath10k_download_fw(ar);
-       if (ret)
+       if (ret) {
+               ath10k_err("failed to download firmware: %d\n", ret);
                return ret;
+       }
 
        return ret;
 }
@@ -660,8 +680,8 @@ static void ath10k_core_restart(struct work_struct *work)
 
        switch (ar->state) {
        case ATH10K_STATE_ON:
-               ath10k_halt(ar);
                ar->state = ATH10K_STATE_RESTARTING;
+               ath10k_halt(ar);
                ieee80211_restart_hw(ar->hw);
                break;
        case ATH10K_STATE_OFF:
@@ -835,9 +855,12 @@ int ath10k_core_start(struct ath10k *ar)
        INIT_LIST_HEAD(&ar->arvifs);
 
        if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
-               ath10k_info("%s (0x%x) fw %s api %d htt %d.%d\n",
-                           ar->hw_params.name, ar->target_version,
-                           ar->hw->wiphy->fw_version, ar->fw_api,
+               ath10k_info("%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d\n",
+                           ar->hw_params.name,
+                           ar->target_version,
+                           ar->chip_id,
+                           ar->hw->wiphy->fw_version,
+                           ar->fw_api,
                            ar->htt.target_version_major,
                            ar->htt.target_version_minor);
 
@@ -885,7 +908,9 @@ void ath10k_core_stop(struct ath10k *ar)
        lockdep_assert_held(&ar->conf_mutex);
 
        /* try to suspend target */
-       ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR);
+       if (ar->state != ATH10K_STATE_RESTARTING)
+               ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR);
+
        ath10k_debug_stop(ar);
        ath10k_htc_stop(&ar->htc);
        ath10k_htt_detach(&ar->htt);
index 0e71979d837cf90888c74e4d85035c2a4d6fd4ef..2c1dfd71914688ec80bb4849031458943c986293 100644 (file)
@@ -119,6 +119,7 @@ struct ath10k_peer_stat {
        u8 peer_macaddr[ETH_ALEN];
        u32 peer_rssi;
        u32 peer_tx_rate;
+       u32 peer_rx_rate; /* 10x only */
 };
 
 struct ath10k_target_stats {
@@ -130,6 +131,12 @@ struct ath10k_target_stats {
        u32 cycle_count;
        u32 phy_err_count;
        u32 chan_tx_power;
+       u32 ack_rx_bad;
+       u32 rts_bad;
+       u32 rts_good;
+       u32 fcs_bad;
+       u32 no_beacons;
+       u32 mib_int_count;
 
        /* PDEV TX stats */
        s32 comp_queued;
@@ -260,6 +267,8 @@ struct ath10k_vif {
        u8 fixed_rate;
        u8 fixed_nss;
        u8 force_sgi;
+       bool use_cts_prot;
+       int num_legacy_stations;
 };
 
 struct ath10k_vif_iter {
@@ -419,13 +428,18 @@ struct ath10k {
        struct cfg80211_chan_def chandef;
 
        int free_vdev_map;
+       bool promisc;
+       bool monitor;
        int monitor_vdev_id;
-       bool monitor_enabled;
-       bool monitor_present;
+       bool monitor_started;
        unsigned int filter_flags;
        unsigned long dev_flags;
        u32 dfs_block_radar_events;
 
+       /* protected by conf_mutex */
+       bool radar_enabled;
+       int num_started_vdevs;
+
        struct wmi_pdev_set_wmm_params_arg wmm_params;
        struct completion install_key_done;
 
index 6debd281350aeb840978606212655fba6d6fb7a3..1b7ff4ba122ce42af61265eae30d8fb98d97201e 100644 (file)
@@ -161,7 +161,7 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
        u8 *tmp = ev->data;
        struct ath10k_target_stats *stats;
        int num_pdev_stats, num_vdev_stats, num_peer_stats;
-       struct wmi_pdev_stats *ps;
+       struct wmi_pdev_stats_10x *ps;
        int i;
 
        spin_lock_bh(&ar->data_lock);
@@ -173,7 +173,7 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
        num_peer_stats = __le32_to_cpu(ev->num_peer_stats); /* 0 or max peers */
 
        if (num_pdev_stats) {
-               ps = (struct wmi_pdev_stats *)tmp;
+               ps = (struct wmi_pdev_stats_10x *)tmp;
 
                stats->ch_noise_floor = __le32_to_cpu(ps->chan_nf);
                stats->tx_frame_count = __le32_to_cpu(ps->tx_frame_count);
@@ -228,7 +228,18 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
                stats->phy_err_drop = __le32_to_cpu(ps->wal.rx.phy_err_drop);
                stats->mpdu_errs = __le32_to_cpu(ps->wal.rx.mpdu_errs);
 
-               tmp += sizeof(struct wmi_pdev_stats);
+               if (test_bit(ATH10K_FW_FEATURE_WMI_10X,
+                            ar->fw_features)) {
+                       stats->ack_rx_bad = __le32_to_cpu(ps->ack_rx_bad);
+                       stats->rts_bad = __le32_to_cpu(ps->rts_bad);
+                       stats->rts_good = __le32_to_cpu(ps->rts_good);
+                       stats->fcs_bad = __le32_to_cpu(ps->fcs_bad);
+                       stats->no_beacons = __le32_to_cpu(ps->no_beacons);
+                       stats->mib_int_count = __le32_to_cpu(ps->mib_int_count);
+                       tmp += sizeof(struct wmi_pdev_stats_10x);
+               } else {
+                       tmp += sizeof(struct wmi_pdev_stats_old);
+               }
        }
 
        /* 0 or max vdevs */
@@ -243,22 +254,29 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
        }
 
        if (num_peer_stats) {
-               struct wmi_peer_stats *peer_stats;
+               struct wmi_peer_stats_10x *peer_stats;
                struct ath10k_peer_stat *s;
 
                stats->peers = num_peer_stats;
 
                for (i = 0; i < num_peer_stats; i++) {
-                       peer_stats = (struct wmi_peer_stats *)tmp;
+                       peer_stats = (struct wmi_peer_stats_10x *)tmp;
                        s = &stats->peer_stat[i];
 
-                       WMI_MAC_ADDR_TO_CHAR_ARRAY(&peer_stats->peer_macaddr,
-                                                  s->peer_macaddr);
+                       memcpy(s->peer_macaddr, &peer_stats->peer_macaddr.addr,
+                              ETH_ALEN);
                        s->peer_rssi = __le32_to_cpu(peer_stats->peer_rssi);
                        s->peer_tx_rate =
                                __le32_to_cpu(peer_stats->peer_tx_rate);
-
-                       tmp += sizeof(struct wmi_peer_stats);
+                       if (test_bit(ATH10K_FW_FEATURE_WMI_10X,
+                                    ar->fw_features)) {
+                               s->peer_rx_rate =
+                                       __le32_to_cpu(peer_stats->peer_rx_rate);
+                               tmp += sizeof(struct wmi_peer_stats_10x);
+
+                       } else {
+                               tmp += sizeof(struct wmi_peer_stats_old);
+                       }
                }
        }
 
@@ -272,7 +290,7 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
        struct ath10k *ar = file->private_data;
        struct ath10k_target_stats *fw_stats;
        char *buf = NULL;
-       unsigned int len = 0, buf_len = 2500;
+       unsigned int len = 0, buf_len = 8000;
        ssize_t ret_cnt = 0;
        long left;
        int i;
@@ -320,6 +338,16 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
                         "Cycle count", fw_stats->cycle_count);
        len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
                         "PHY error count", fw_stats->phy_err_count);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                        "RTS bad count", fw_stats->rts_bad);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                        "RTS good count", fw_stats->rts_good);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                        "FCS bad count", fw_stats->fcs_bad);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                        "No beacon count", fw_stats->no_beacons);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                        "MIB int count", fw_stats->mib_int_count);
 
        len += scnprintf(buf + len, buf_len - len, "\n");
        len += scnprintf(buf + len, buf_len - len, "%30s\n",
@@ -411,8 +439,8 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
                         "MPDU errors (FCS, MIC, ENC)", fw_stats->mpdu_errs);
 
        len += scnprintf(buf + len, buf_len - len, "\n");
-       len += scnprintf(buf + len, buf_len - len, "%30s\n",
-                        "ath10k PEER stats");
+       len += scnprintf(buf + len, buf_len - len, "%30s (%d)\n",
+                        "ath10k PEER stats", fw_stats->peers);
        len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
                                 "=================");
 
@@ -425,6 +453,9 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
                len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
                                 "Peer TX rate",
                                 fw_stats->peer_stat[i].peer_tx_rate);
+               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                                "Peer RX rate",
+                                fw_stats->peer_stat[i].peer_rx_rate);
                len += scnprintf(buf + len, buf_len - len, "\n");
        }
        spin_unlock_bh(&ar->data_lock);
@@ -451,27 +482,37 @@ static ssize_t ath10k_read_simulate_fw_crash(struct file *file,
                                             char __user *user_buf,
                                             size_t count, loff_t *ppos)
 {
-       const char buf[] = "To simulate firmware crash write the keyword"
-                          " `crash` to this file.\nThis will force firmware"
-                          " to report a crash to the host system.\n";
+       const char buf[] = "To simulate firmware crash write one of the"
+                          " keywords to this file:\n `soft` - this will send"
+                          " WMI_FORCE_FW_HANG_ASSERT to firmware if FW"
+                          " supports that command.\n `hard` - this will send"
+                          " to firmware command with illegal parameters"
+                          " causing firmware crash.\n";
+
        return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
 }
 
+/* Simulate firmware crash:
+ * 'soft': Call wmi command causing firmware hang. This firmware hang is
+ * recoverable by warm firmware reset.
+ * 'hard': Force firmware crash by setting any vdev parameter for not allowed
+ * vdev id. This is hard firmware crash because it is recoverable only by cold
+ * firmware reset.
+ */
 static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
                                              const char __user *user_buf,
                                              size_t count, loff_t *ppos)
 {
        struct ath10k *ar = file->private_data;
-       char buf[32] = {};
+       char buf[32];
        int ret;
 
        mutex_lock(&ar->conf_mutex);
 
        simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
-       if (strcmp(buf, "crash") && strcmp(buf, "crash\n")) {
-               ret = -EINVAL;
-               goto exit;
-       }
+
+       /* make sure that buf is null terminated */
+       buf[sizeof(buf) - 1] = 0;
 
        if (ar->state != ATH10K_STATE_ON &&
            ar->state != ATH10K_STATE_RESTARTED) {
@@ -479,14 +520,30 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
                goto exit;
        }
 
-       ath10k_info("simulating firmware crash\n");
+       /* drop the possible '\n' from the end */
+       if (buf[count - 1] == '\n') {
+               buf[count - 1] = 0;
+               count--;
+       }
 
-       ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
-       if (ret)
-               ath10k_warn("failed to force fw hang (%d)\n", ret);
+       if (!strcmp(buf, "soft")) {
+               ath10k_info("simulating soft firmware crash\n");
+               ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
+       } else if (!strcmp(buf, "hard")) {
+               ath10k_info("simulating hard firmware crash\n");
+               ret = ath10k_wmi_vdev_set_param(ar, TARGET_NUM_VDEVS + 1,
+                                       ar->wmi.vdev_param->rts_threshold, 0);
+       } else {
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       if (ret) {
+               ath10k_warn("failed to simulate firmware crash: %d\n", ret);
+               goto exit;
+       }
 
-       if (ret == 0)
-               ret = count;
+       ret = count;
 
 exit:
        mutex_unlock(&ar->conf_mutex);
index 7f1bccd3597f1bb2a3b40d5a482f308e99e9e27c..5b58dbb174161a5ead5a8a5865fac70ba7e1b056 100644 (file)
@@ -157,6 +157,9 @@ int ath10k_htc_send(struct ath10k_htc *htc,
                        goto err_pull;
                }
                ep->tx_credits -= credits;
+               ath10k_dbg(ATH10K_DBG_HTC,
+                          "htc ep %d consumed %d credits (total %d)\n",
+                          eid, credits, ep->tx_credits);
                spin_unlock_bh(&htc->tx_lock);
        }
 
@@ -185,6 +188,9 @@ err_credits:
        if (ep->tx_credit_flow_enabled) {
                spin_lock_bh(&htc->tx_lock);
                ep->tx_credits += credits;
+               ath10k_dbg(ATH10K_DBG_HTC,
+                          "htc ep %d reverted %d credits back (total %d)\n",
+                          eid, credits, ep->tx_credits);
                spin_unlock_bh(&htc->tx_lock);
 
                if (ep->ep_ops.ep_tx_credits)
@@ -234,12 +240,12 @@ ath10k_htc_process_credit_report(struct ath10k_htc *htc,
                if (report->eid >= ATH10K_HTC_EP_COUNT)
                        break;
 
-               ath10k_dbg(ATH10K_DBG_HTC, "ep %d got %d credits\n",
-                          report->eid, report->credits);
-
                ep = &htc->endpoint[report->eid];
                ep->tx_credits += report->credits;
 
+               ath10k_dbg(ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
+                          report->eid, report->credits, ep->tx_credits);
+
                if (ep->ep_ops.ep_tx_credits) {
                        spin_unlock_bh(&htc->tx_lock);
                        ep->ep_ops.ep_tx_credits(htc->ar);
index 654867fc1ae73bbd7a13cf4dc61f8ac89a0b7823..645a563e3fb9675a5c545277cec88b68a757fff0 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/bug.h>
 #include <linux/interrupt.h>
 #include <linux/dmapool.h>
+#include <net/mac80211.h>
 
 #include "htc.h"
 #include "rx_desc.h"
@@ -1172,23 +1173,6 @@ struct htt_peer_unmap_event {
        u16 peer_id;
 };
 
-struct htt_rx_info {
-       struct sk_buff *skb;
-       enum htt_rx_mpdu_status status;
-       enum htt_rx_mpdu_encrypt_type encrypt_type;
-       s8 signal;
-       struct {
-               u8 info0;
-               u32 info1;
-               u32 info2;
-       } rate;
-
-       u32 tsf;
-       bool fcs_err;
-       bool amsdu_more;
-       bool mic_err;
-};
-
 struct ath10k_htt_txbuf {
        struct htt_data_tx_desc_frag frags[2];
        struct ath10k_htc_hdr htc_hdr;
@@ -1289,6 +1273,9 @@ struct ath10k_htt {
        struct tasklet_struct txrx_compl_task;
        struct sk_buff_head tx_compl_q;
        struct sk_buff_head rx_compl_q;
+
+       /* rx_status template */
+       struct ieee80211_rx_status rx_status;
 };
 
 #define RX_HTT_HDR_STATUS_LEN 64
index cdcbe2de95f97d602cb086c301f0778aad5bc49c..f85a3cf6da3103d6f909b80715a1d0763fce7a39 100644 (file)
@@ -297,6 +297,7 @@ static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb)
        }
 }
 
+/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
                                   u8 **fw_desc, int *fw_desc_len,
                                   struct sk_buff **head_msdu,
@@ -310,7 +311,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
 
        if (htt->rx_confused) {
                ath10k_warn("htt is confused. refusing rx\n");
-               return 0;
+               return -1;
        }
 
        msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt);
@@ -442,6 +443,9 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
        }
        *tail_msdu = msdu;
 
+       if (*head_msdu == NULL)
+               msdu_chaining = -1;
+
        /*
         * Don't refill the ring yet.
         *
@@ -636,6 +640,190 @@ struct amsdu_subframe_hdr {
        __be16 len;
 } __packed;
 
+static const u8 rx_legacy_rate_idx[] = {
+       3,      /* 0x00  - 11Mbps  */
+       2,      /* 0x01  - 5.5Mbps */
+       1,      /* 0x02  - 2Mbps   */
+       0,      /* 0x03  - 1Mbps   */
+       3,      /* 0x04  - 11Mbps  */
+       2,      /* 0x05  - 5.5Mbps */
+       1,      /* 0x06  - 2Mbps   */
+       0,      /* 0x07  - 1Mbps   */
+       10,     /* 0x08  - 48Mbps  */
+       8,      /* 0x09  - 24Mbps  */
+       6,      /* 0x0A  - 12Mbps  */
+       4,      /* 0x0B  - 6Mbps   */
+       11,     /* 0x0C  - 54Mbps  */
+       9,      /* 0x0D  - 36Mbps  */
+       7,      /* 0x0E  - 18Mbps  */
+       5,      /* 0x0F  - 9Mbps   */
+};
+
+static void ath10k_htt_rx_h_rates(struct ath10k *ar,
+                                 enum ieee80211_band band,
+                                 u8 info0, u32 info1, u32 info2,
+                                 struct ieee80211_rx_status *status)
+{
+       u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
+       u8 preamble = 0;
+
+       /* Check if valid fields */
+       if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
+               return;
+
+       preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
+
+       switch (preamble) {
+       case HTT_RX_LEGACY:
+               cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
+               rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
+               rate_idx = 0;
+
+               if (rate < 0x08 || rate > 0x0F)
+                       break;
+
+               switch (band) {
+               case IEEE80211_BAND_2GHZ:
+                       if (cck)
+                               rate &= ~BIT(3);
+                       rate_idx = rx_legacy_rate_idx[rate];
+                       break;
+               case IEEE80211_BAND_5GHZ:
+                       rate_idx = rx_legacy_rate_idx[rate];
+                       /* We are using same rate table registering
+                          HW - ath10k_rates[]. In case of 5GHz skip
+                          CCK rates, so -4 here */
+                       rate_idx -= 4;
+                       break;
+               default:
+                       break;
+               }
+
+               status->rate_idx = rate_idx;
+               break;
+       case HTT_RX_HT:
+       case HTT_RX_HT_WITH_TXBF:
+               /* HT-SIG - Table 20-11 in info1 and info2 */
+               mcs = info1 & 0x1F;
+               nss = mcs >> 3;
+               bw = (info1 >> 7) & 1;
+               sgi = (info2 >> 7) & 1;
+
+               status->rate_idx = mcs;
+               status->flag |= RX_FLAG_HT;
+               if (sgi)
+                       status->flag |= RX_FLAG_SHORT_GI;
+               if (bw)
+                       status->flag |= RX_FLAG_40MHZ;
+               break;
+       case HTT_RX_VHT:
+       case HTT_RX_VHT_WITH_TXBF:
+               /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
+                  TODO check this */
+               mcs = (info2 >> 4) & 0x0F;
+               nss = ((info1 >> 10) & 0x07) + 1;
+               bw = info1 & 3;
+               sgi = info2 & 1;
+
+               status->rate_idx = mcs;
+               status->vht_nss = nss;
+
+               if (sgi)
+                       status->flag |= RX_FLAG_SHORT_GI;
+
+               switch (bw) {
+               /* 20MHZ */
+               case 0:
+                       break;
+               /* 40MHZ */
+               case 1:
+                       status->flag |= RX_FLAG_40MHZ;
+                       break;
+               /* 80MHZ */
+               case 2:
+                       status->vht_flag |= RX_VHT_FLAG_80MHZ;
+               }
+
+               status->flag |= RX_FLAG_VHT;
+               break;
+       default:
+               break;
+       }
+}
+
+static void ath10k_htt_rx_h_protected(struct ath10k_htt *htt,
+                                     struct ieee80211_rx_status *rx_status,
+                                     struct sk_buff *skb,
+                                     enum htt_rx_mpdu_encrypt_type enctype)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+
+       if (enctype == HTT_RX_MPDU_ENCRYPT_NONE) {
+               rx_status->flag &= ~(RX_FLAG_DECRYPTED |
+                                    RX_FLAG_IV_STRIPPED |
+                                    RX_FLAG_MMIC_STRIPPED);
+               return;
+       }
+
+       rx_status->flag |= RX_FLAG_DECRYPTED |
+                          RX_FLAG_IV_STRIPPED |
+                          RX_FLAG_MMIC_STRIPPED;
+       hdr->frame_control = __cpu_to_le16(__le16_to_cpu(hdr->frame_control) &
+                                          ~IEEE80211_FCTL_PROTECTED);
+}
+
+static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
+                                   struct ieee80211_rx_status *status)
+{
+       struct ieee80211_channel *ch;
+
+       spin_lock_bh(&ar->data_lock);
+       ch = ar->scan_channel;
+       if (!ch)
+               ch = ar->rx_channel;
+       spin_unlock_bh(&ar->data_lock);
+
+       if (!ch)
+               return false;
+
+       status->band = ch->band;
+       status->freq = ch->center_freq;
+
+       return true;
+}
+
+static void ath10k_process_rx(struct ath10k *ar,
+                             struct ieee80211_rx_status *rx_status,
+                             struct sk_buff *skb)
+{
+       struct ieee80211_rx_status *status;
+
+       status = IEEE80211_SKB_RXCB(skb);
+       *status = *rx_status;
+
+       ath10k_dbg(ATH10K_DBG_DATA,
+                  "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %imic-err %i\n",
+                  skb,
+                  skb->len,
+                  status->flag == 0 ? "legacy" : "",
+                  status->flag & RX_FLAG_HT ? "ht" : "",
+                  status->flag & RX_FLAG_VHT ? "vht" : "",
+                  status->flag & RX_FLAG_40MHZ ? "40" : "",
+                  status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
+                  status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
+                  status->rate_idx,
+                  status->vht_nss,
+                  status->freq,
+                  status->band, status->flag,
+                  !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
+                  !!(status->flag & RX_FLAG_MMIC_ERROR));
+       ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
+                       skb->data, skb->len);
+
+       ieee80211_rx(ar->hw, skb);
+}
+
 static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
 {
        /* nwifi header is padded to 4 bytes. this fixes 4addr rx */
@@ -643,11 +831,12 @@ static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
 }
 
 static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
-                               struct htt_rx_info *info)
+                               struct ieee80211_rx_status *rx_status,
+                               struct sk_buff *skb_in)
 {
        struct htt_rx_desc *rxd;
+       struct sk_buff *skb = skb_in;
        struct sk_buff *first;
-       struct sk_buff *skb = info->skb;
        enum rx_msdu_decap_format fmt;
        enum htt_rx_mpdu_encrypt_type enctype;
        struct ieee80211_hdr *hdr;
@@ -728,24 +917,27 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
                        break;
                }
 
-               info->skb = skb;
-               info->encrypt_type = enctype;
+               skb_in = skb;
+               ath10k_htt_rx_h_protected(htt, rx_status, skb_in, enctype);
                skb = skb->next;
-               info->skb->next = NULL;
+               skb_in->next = NULL;
 
                if (skb)
-                       info->amsdu_more = true;
+                       rx_status->flag |= RX_FLAG_AMSDU_MORE;
+               else
+                       rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
 
-               ath10k_process_rx(htt->ar, info);
+               ath10k_process_rx(htt->ar, rx_status, skb_in);
        }
 
        /* FIXME: It might be nice to re-assemble the A-MSDU when there's a
         * monitor interface active for sniffing purposes. */
 }
 
-static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
+static void ath10k_htt_rx_msdu(struct ath10k_htt *htt,
+                              struct ieee80211_rx_status *rx_status,
+                              struct sk_buff *skb)
 {
-       struct sk_buff *skb = info->skb;
        struct htt_rx_desc *rxd;
        struct ieee80211_hdr *hdr;
        enum rx_msdu_decap_format fmt;
@@ -808,66 +1000,9 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
                break;
        }
 
-       info->skb = skb;
-       info->encrypt_type = enctype;
-
-       ath10k_process_rx(htt->ar, info);
-}
-
-static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb)
-{
-       struct htt_rx_desc *rxd;
-       u32 flags;
-
-       rxd = (void *)skb->data - sizeof(*rxd);
-       flags = __le32_to_cpu(rxd->attention.flags);
-
-       if (flags & RX_ATTENTION_FLAGS_DECRYPT_ERR)
-               return true;
-
-       return false;
-}
-
-static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb)
-{
-       struct htt_rx_desc *rxd;
-       u32 flags;
-
-       rxd = (void *)skb->data - sizeof(*rxd);
-       flags = __le32_to_cpu(rxd->attention.flags);
-
-       if (flags & RX_ATTENTION_FLAGS_FCS_ERR)
-               return true;
-
-       return false;
-}
-
-static bool ath10k_htt_rx_has_mic_err(struct sk_buff *skb)
-{
-       struct htt_rx_desc *rxd;
-       u32 flags;
-
-       rxd = (void *)skb->data - sizeof(*rxd);
-       flags = __le32_to_cpu(rxd->attention.flags);
-
-       if (flags & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
-               return true;
-
-       return false;
-}
-
-static bool ath10k_htt_rx_is_mgmt(struct sk_buff *skb)
-{
-       struct htt_rx_desc *rxd;
-       u32 flags;
-
-       rxd = (void *)skb->data - sizeof(*rxd);
-       flags = __le32_to_cpu(rxd->attention.flags);
-
-       if (flags & RX_ATTENTION_FLAGS_MGMT_TYPE)
-               return true;
+       ath10k_htt_rx_h_protected(htt, rx_status, skb, enctype);
 
-       return false;
+       ath10k_process_rx(htt->ar, rx_status, skb);
 }
 
 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
@@ -952,21 +1087,73 @@ static int ath10k_unchain_msdu(struct sk_buff *msdu_head)
        return 0;
 }
 
+static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt,
+                                       struct sk_buff *head,
+                                       enum htt_rx_mpdu_status status,
+                                       bool channel_set,
+                                       u32 attention)
+{
+       if (head->len == 0) {
+               ath10k_dbg(ATH10K_DBG_HTT,
+                          "htt rx dropping due to zero-len\n");
+               return false;
+       }
+
+       if (attention & RX_ATTENTION_FLAGS_DECRYPT_ERR) {
+               ath10k_dbg(ATH10K_DBG_HTT,
+                          "htt rx dropping due to decrypt-err\n");
+               return false;
+       }
+
+       if (!channel_set) {
+               ath10k_warn("no channel configured; ignoring frame!\n");
+               return false;
+       }
+
+       /* Skip mgmt frames while we handle this in WMI */
+       if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
+           attention & RX_ATTENTION_FLAGS_MGMT_TYPE) {
+               ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
+               return false;
+       }
+
+       if (status != HTT_RX_IND_MPDU_STATUS_OK &&
+           status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
+           status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
+           !htt->ar->monitor_started) {
+               ath10k_dbg(ATH10K_DBG_HTT,
+                          "htt rx ignoring frame w/ status %d\n",
+                          status);
+               return false;
+       }
+
+       if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
+               ath10k_dbg(ATH10K_DBG_HTT,
+                          "htt rx CAC running\n");
+               return false;
+       }
+
+       return true;
+}
+
 static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
                                  struct htt_rx_indication *rx)
 {
-       struct htt_rx_info info;
+       struct ieee80211_rx_status *rx_status = &htt->rx_status;
        struct htt_rx_indication_mpdu_range *mpdu_ranges;
+       struct htt_rx_desc *rxd;
+       enum htt_rx_mpdu_status status;
        struct ieee80211_hdr *hdr;
        int num_mpdu_ranges;
+       u32 attention;
        int fw_desc_len;
        u8 *fw_desc;
+       bool channel_set;
        int i, j;
+       int ret;
 
        lockdep_assert_held(&htt->rx_ring.lock);
 
-       memset(&info, 0, sizeof(info));
-
        fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
        fw_desc = (u8 *)&rx->fw_desc;
 
@@ -974,106 +1161,90 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
                             HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
        mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
 
+       /* Fill this once, while this is per-ppdu */
+       if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_START_VALID) {
+               memset(rx_status, 0, sizeof(*rx_status));
+               rx_status->signal  = ATH10K_DEFAULT_NOISE_FLOOR +
+                                    rx->ppdu.combined_rssi;
+       }
+
+       if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
+               /* TSF available only in 32-bit */
+               rx_status->mactime = __le32_to_cpu(rx->ppdu.tsf) & 0xffffffff;
+               rx_status->flag |= RX_FLAG_MACTIME_END;
+       }
+
+       channel_set = ath10k_htt_rx_h_channel(htt->ar, rx_status);
+
+       if (channel_set) {
+               ath10k_htt_rx_h_rates(htt->ar, rx_status->band,
+                                     rx->ppdu.info0,
+                                     __le32_to_cpu(rx->ppdu.info1),
+                                     __le32_to_cpu(rx->ppdu.info2),
+                                     rx_status);
+       }
+
        ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
                        rx, sizeof(*rx) +
                        (sizeof(struct htt_rx_indication_mpdu_range) *
                                num_mpdu_ranges));
 
        for (i = 0; i < num_mpdu_ranges; i++) {
-               info.status = mpdu_ranges[i].mpdu_range_status;
+               status = mpdu_ranges[i].mpdu_range_status;
 
                for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) {
                        struct sk_buff *msdu_head, *msdu_tail;
-                       enum htt_rx_mpdu_status status;
-                       int msdu_chaining;
 
                        msdu_head = NULL;
                        msdu_tail = NULL;
-                       msdu_chaining = ath10k_htt_rx_amsdu_pop(htt,
-                                                        &fw_desc,
-                                                        &fw_desc_len,
-                                                        &msdu_head,
-                                                        &msdu_tail);
-
-                       if (!msdu_head) {
-                               ath10k_warn("htt rx no data!\n");
-                               continue;
-                       }
-
-                       if (msdu_head->len == 0) {
-                               ath10k_dbg(ATH10K_DBG_HTT,
-                                          "htt rx dropping due to zero-len\n");
-                               ath10k_htt_rx_free_msdu_chain(msdu_head);
-                               continue;
-                       }
-
-                       if (ath10k_htt_rx_has_decrypt_err(msdu_head)) {
-                               ath10k_dbg(ATH10K_DBG_HTT,
-                                          "htt rx dropping due to decrypt-err\n");
+                       ret = ath10k_htt_rx_amsdu_pop(htt,
+                                                     &fw_desc,
+                                                     &fw_desc_len,
+                                                     &msdu_head,
+                                                     &msdu_tail);
+
+                       if (ret < 0) {
+                               ath10k_warn("failed to pop amsdu from htt rx ring %d\n",
+                                           ret);
                                ath10k_htt_rx_free_msdu_chain(msdu_head);
                                continue;
                        }
 
-                       status = info.status;
-
-                       /* Skip mgmt frames while we handle this in WMI */
-                       if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
-                           ath10k_htt_rx_is_mgmt(msdu_head)) {
-                               ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
-                               ath10k_htt_rx_free_msdu_chain(msdu_head);
-                               continue;
-                       }
+                       rxd = container_of((void *)msdu_head->data,
+                                          struct htt_rx_desc,
+                                          msdu_payload);
+                       attention = __le32_to_cpu(rxd->attention.flags);
 
-                       if (status != HTT_RX_IND_MPDU_STATUS_OK &&
-                           status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
-                           status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
-                           !htt->ar->monitor_enabled) {
-                               ath10k_dbg(ATH10K_DBG_HTT,
-                                          "htt rx ignoring frame w/ status %d\n",
-                                          status);
+                       if (!ath10k_htt_rx_amsdu_allowed(htt, msdu_head,
+                                                        status,
+                                                        channel_set,
+                                                        attention)) {
                                ath10k_htt_rx_free_msdu_chain(msdu_head);
                                continue;
                        }
 
-                       if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
-                               ath10k_dbg(ATH10K_DBG_HTT,
-                                          "htt rx CAC running\n");
+                       if (ret > 0 &&
+                           ath10k_unchain_msdu(msdu_head) < 0) {
                                ath10k_htt_rx_free_msdu_chain(msdu_head);
                                continue;
                        }
 
-                       if (msdu_chaining &&
-                           (ath10k_unchain_msdu(msdu_head) < 0)) {
-                               ath10k_htt_rx_free_msdu_chain(msdu_head);
-                               continue;
-                       }
-
-                       info.skb     = msdu_head;
-                       info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
-                       info.mic_err = ath10k_htt_rx_has_mic_err(msdu_head);
-
-                       if (info.fcs_err)
-                               ath10k_dbg(ATH10K_DBG_HTT,
-                                          "htt rx has FCS err\n");
-
-                       if (info.mic_err)
-                               ath10k_dbg(ATH10K_DBG_HTT,
-                                          "htt rx has MIC err\n");
-
-                       info.signal  = ATH10K_DEFAULT_NOISE_FLOOR;
-                       info.signal += rx->ppdu.combined_rssi;
+                       if (attention & RX_ATTENTION_FLAGS_FCS_ERR)
+                               rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+                       else
+                               rx_status->flag &= ~RX_FLAG_FAILED_FCS_CRC;
 
-                       info.rate.info0 = rx->ppdu.info0;
-                       info.rate.info1 = __le32_to_cpu(rx->ppdu.info1);
-                       info.rate.info2 = __le32_to_cpu(rx->ppdu.info2);
-                       info.tsf = __le32_to_cpu(rx->ppdu.tsf);
+                       if (attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
+                               rx_status->flag |= RX_FLAG_MMIC_ERROR;
+                       else
+                               rx_status->flag &= ~RX_FLAG_MMIC_ERROR;
 
                        hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
 
                        if (ath10k_htt_rx_hdr_is_amsdu(hdr))
-                               ath10k_htt_rx_amsdu(htt, &info);
+                               ath10k_htt_rx_amsdu(htt, rx_status, msdu_head);
                        else
-                               ath10k_htt_rx_msdu(htt, &info);
+                               ath10k_htt_rx_msdu(htt, rx_status, msdu_head);
                }
        }
 
@@ -1084,11 +1255,12 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
                                struct htt_rx_fragment_indication *frag)
 {
        struct sk_buff *msdu_head, *msdu_tail;
+       enum htt_rx_mpdu_encrypt_type enctype;
        struct htt_rx_desc *rxd;
        enum rx_msdu_decap_format fmt;
-       struct htt_rx_info info = {};
+       struct ieee80211_rx_status *rx_status = &htt->rx_status;
        struct ieee80211_hdr *hdr;
-       int msdu_chaining;
+       int ret;
        bool tkip_mic_err;
        bool decrypt_err;
        u8 *fw_desc;
@@ -1102,19 +1274,15 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
        msdu_tail = NULL;
 
        spin_lock_bh(&htt->rx_ring.lock);
-       msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
-                                               &msdu_head, &msdu_tail);
+       ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
+                                     &msdu_head, &msdu_tail);
        spin_unlock_bh(&htt->rx_ring.lock);
 
        ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
 
-       if (!msdu_head) {
-               ath10k_warn("htt rx frag no data\n");
-               return;
-       }
-
-       if (msdu_chaining || msdu_head != msdu_tail) {
-               ath10k_warn("aggregation with fragmentation?!\n");
+       if (ret) {
+               ath10k_warn("failed to pop amsdu from httr rx ring for fragmented rx %d\n",
+                           ret);
                ath10k_htt_rx_free_msdu_chain(msdu_head);
                return;
        }
@@ -1136,57 +1304,54 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
                goto end;
        }
 
-       info.skb = msdu_head;
-       info.status = HTT_RX_IND_MPDU_STATUS_OK;
-       info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0),
-                               RX_MPDU_START_INFO0_ENCRYPT_TYPE);
-       info.skb->ip_summed = ath10k_htt_rx_get_csum_state(info.skb);
+       enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+                    RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+       ath10k_htt_rx_h_protected(htt, rx_status, msdu_head, enctype);
+       msdu_head->ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
 
-       if (tkip_mic_err) {
+       if (tkip_mic_err)
                ath10k_warn("tkip mic error\n");
-               info.status = HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR;
-       }
 
        if (decrypt_err) {
                ath10k_warn("decryption err in fragmented rx\n");
-               dev_kfree_skb_any(info.skb);
+               dev_kfree_skb_any(msdu_head);
                goto end;
        }
 
-       if (info.encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) {
+       if (enctype != HTT_RX_MPDU_ENCRYPT_NONE) {
                hdrlen = ieee80211_hdrlen(hdr->frame_control);
-               paramlen = ath10k_htt_rx_crypto_param_len(info.encrypt_type);
+               paramlen = ath10k_htt_rx_crypto_param_len(enctype);
 
                /* It is more efficient to move the header than the payload */
-               memmove((void *)info.skb->data + paramlen,
-                       (void *)info.skb->data,
+               memmove((void *)msdu_head->data + paramlen,
+                       (void *)msdu_head->data,
                        hdrlen);
-               skb_pull(info.skb, paramlen);
-               hdr = (struct ieee80211_hdr *)info.skb->data;
+               skb_pull(msdu_head, paramlen);
+               hdr = (struct ieee80211_hdr *)msdu_head->data;
        }
 
        /* remove trailing FCS */
        trim  = 4;
 
        /* remove crypto trailer */
-       trim += ath10k_htt_rx_crypto_tail_len(info.encrypt_type);
+       trim += ath10k_htt_rx_crypto_tail_len(enctype);
 
        /* last fragment of TKIP frags has MIC */
        if (!ieee80211_has_morefrags(hdr->frame_control) &&
-           info.encrypt_type == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
+           enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
                trim += 8;
 
-       if (trim > info.skb->len) {
+       if (trim > msdu_head->len) {
                ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n");
-               dev_kfree_skb_any(info.skb);
+               dev_kfree_skb_any(msdu_head);
                goto end;
        }
 
-       skb_trim(info.skb, info.skb->len - trim);
+       skb_trim(msdu_head, msdu_head->len - trim);
 
        ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ",
-                       info.skb->data, info.skb->len);
-       ath10k_process_rx(htt->ar, &info);
+                       msdu_head->data, msdu_head->len);
+       ath10k_process_rx(htt->ar, rx_status, msdu_head);
 
 end:
        if (fw_desc_len > 0) {
index 35fc44e281f57968171283d7d336cce5b20eddac..007e855f4ba99f9067725a11b85fdeadb3412483 100644 (file)
@@ -28,6 +28,7 @@
 #define QCA988X_HW_2_0_CHIP_ID_REV     0x2
 #define QCA988X_HW_2_0_FW_DIR          "ath10k/QCA988X/hw2.0"
 #define QCA988X_HW_2_0_FW_FILE         "firmware.bin"
+#define QCA988X_HW_2_0_FW_2_FILE       "firmware-2.bin"
 #define QCA988X_HW_2_0_OTP_FILE                "otp.bin"
 #define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
 #define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
index 511a2f81e7afc190419623235cdbefe9a66e4039..7026f021ccbb00240aab98d91a59f4b8fe4bea78 100644 (file)
@@ -165,7 +165,7 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
                        first_errno = ret;
 
                if (ret)
-                       ath10k_warn("could not remove peer wep key %d (%d)\n",
+                       ath10k_warn("failed to remove peer wep key %d: %d\n",
                                    i, ret);
 
                peer->keys[i] = NULL;
@@ -213,7 +213,8 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
                        first_errno = ret;
 
                if (ret)
-                       ath10k_warn("could not remove key for %pM\n", addr);
+                       ath10k_warn("failed to remove key for %pM: %d\n",
+                                   addr, ret);
        }
 
        return first_errno;
@@ -323,14 +324,14 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
 
        ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
        if (ret) {
-               ath10k_warn("Failed to create wmi peer %pM on vdev %i: %i\n",
+               ath10k_warn("failed to create wmi peer %pM on vdev %i: %i\n",
                            addr, vdev_id, ret);
                return ret;
        }
 
        ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
        if (ret) {
-               ath10k_warn("Failed to wait for created wmi peer %pM on vdev %i: %i\n",
+               ath10k_warn("failed to wait for created wmi peer %pM on vdev %i: %i\n",
                            addr, vdev_id, ret);
                return ret;
        }
@@ -351,7 +352,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
        ret = ath10k_wmi_pdev_set_param(ar, param,
                                        ATH10K_KICKOUT_THRESHOLD);
        if (ret) {
-               ath10k_warn("Failed to set kickout threshold on vdev %i: %d\n",
+               ath10k_warn("failed to set kickout threshold on vdev %i: %d\n",
                            arvif->vdev_id, ret);
                return ret;
        }
@@ -360,7 +361,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
        ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
                                        ATH10K_KEEPALIVE_MIN_IDLE);
        if (ret) {
-               ath10k_warn("Failed to set keepalive minimum idle time on vdev %i : %d\n",
+               ath10k_warn("failed to set keepalive minimum idle time on vdev %i: %d\n",
                            arvif->vdev_id, ret);
                return ret;
        }
@@ -369,7 +370,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
        ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
                                        ATH10K_KEEPALIVE_MAX_IDLE);
        if (ret) {
-               ath10k_warn("Failed to set keepalive maximum idle time on vdev %i: %d\n",
+               ath10k_warn("failed to set keepalive maximum idle time on vdev %i: %d\n",
                            arvif->vdev_id, ret);
                return ret;
        }
@@ -378,7 +379,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
        ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
                                        ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
        if (ret) {
-               ath10k_warn("Failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
+               ath10k_warn("failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
                            arvif->vdev_id, ret);
                return ret;
        }
@@ -488,92 +489,20 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
        return 0;
 }
 
-static int ath10k_vdev_start(struct ath10k_vif *arvif)
+static bool ath10k_monitor_is_enabled(struct ath10k *ar)
 {
-       struct ath10k *ar = arvif->ar;
-       struct cfg80211_chan_def *chandef = &ar->chandef;
-       struct wmi_vdev_start_request_arg arg = {};
-       int ret = 0;
-
        lockdep_assert_held(&ar->conf_mutex);
 
-       reinit_completion(&ar->vdev_setup_done);
-
-       arg.vdev_id = arvif->vdev_id;
-       arg.dtim_period = arvif->dtim_period;
-       arg.bcn_intval = arvif->beacon_interval;
-
-       arg.channel.freq = chandef->chan->center_freq;
-       arg.channel.band_center_freq1 = chandef->center_freq1;
-       arg.channel.mode = chan_to_phymode(chandef);
-
-       arg.channel.min_power = 0;
-       arg.channel.max_power = chandef->chan->max_power * 2;
-       arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
-       arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
-
-       if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
-               arg.ssid = arvif->u.ap.ssid;
-               arg.ssid_len = arvif->u.ap.ssid_len;
-               arg.hidden_ssid = arvif->u.ap.hidden_ssid;
-
-               /* For now allow DFS for AP mode */
-               arg.channel.chan_radar =
-                       !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
-       } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
-               arg.ssid = arvif->vif->bss_conf.ssid;
-               arg.ssid_len = arvif->vif->bss_conf.ssid_len;
-       }
-
        ath10k_dbg(ATH10K_DBG_MAC,
-                  "mac vdev %d start center_freq %d phymode %s\n",
-                  arg.vdev_id, arg.channel.freq,
-                  ath10k_wmi_phymode_str(arg.channel.mode));
-
-       ret = ath10k_wmi_vdev_start(ar, &arg);
-       if (ret) {
-               ath10k_warn("WMI vdev %i start failed: ret %d\n",
-                           arg.vdev_id, ret);
-               return ret;
-       }
-
-       ret = ath10k_vdev_setup_sync(ar);
-       if (ret) {
-               ath10k_warn("vdev %i setup failed %d\n",
-                           arg.vdev_id, ret);
-               return ret;
-       }
+                  "mac monitor refs: promisc %d monitor %d cac %d\n",
+                  ar->promisc, ar->monitor,
+                  test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags));
 
-       return ret;
+       return ar->promisc || ar->monitor ||
+              test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
 }
 
-static int ath10k_vdev_stop(struct ath10k_vif *arvif)
-{
-       struct ath10k *ar = arvif->ar;
-       int ret;
-
-       lockdep_assert_held(&ar->conf_mutex);
-
-       reinit_completion(&ar->vdev_setup_done);
-
-       ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
-       if (ret) {
-               ath10k_warn("WMI vdev %i stop failed: ret %d\n",
-                           arvif->vdev_id, ret);
-               return ret;
-       }
-
-       ret = ath10k_vdev_setup_sync(ar);
-       if (ret) {
-               ath10k_warn("vdev %i setup sync failed %d\n",
-                           arvif->vdev_id, ret);
-               return ret;
-       }
-
-       return ret;
-}
-
-static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
+static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
 {
        struct cfg80211_chan_def *chandef = &ar->chandef;
        struct ieee80211_channel *channel = chandef->chan;
@@ -582,11 +511,6 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
 
        lockdep_assert_held(&ar->conf_mutex);
 
-       if (!ar->monitor_present) {
-               ath10k_warn("mac montor stop -- monitor is not present\n");
-               return -EINVAL;
-       }
-
        arg.vdev_id = vdev_id;
        arg.channel.freq = channel->center_freq;
        arg.channel.band_center_freq1 = chandef->center_freq1;
@@ -604,88 +528,75 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
 
        ret = ath10k_wmi_vdev_start(ar, &arg);
        if (ret) {
-               ath10k_warn("Monitor vdev %i start failed: ret %d\n",
+               ath10k_warn("failed to request monitor vdev %i start: %d\n",
                            vdev_id, ret);
                return ret;
        }
 
        ret = ath10k_vdev_setup_sync(ar);
        if (ret) {
-               ath10k_warn("Monitor vdev %i setup failed %d\n",
+               ath10k_warn("failed to synchronize setup for monitor vdev %i: %d\n",
                            vdev_id, ret);
                return ret;
        }
 
        ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
        if (ret) {
-               ath10k_warn("Monitor vdev %i up failed: %d\n",
+               ath10k_warn("failed to put up monitor vdev %i: %d\n",
                            vdev_id, ret);
                goto vdev_stop;
        }
 
        ar->monitor_vdev_id = vdev_id;
-       ar->monitor_enabled = true;
 
+       ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %i started\n",
+                  ar->monitor_vdev_id);
        return 0;
 
 vdev_stop:
        ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
        if (ret)
-               ath10k_warn("Monitor vdev %i stop failed: %d\n",
+               ath10k_warn("failed to stop monitor vdev %i after start failure: %d\n",
                            ar->monitor_vdev_id, ret);
 
        return ret;
 }
 
-static int ath10k_monitor_stop(struct ath10k *ar)
+static int ath10k_monitor_vdev_stop(struct ath10k *ar)
 {
        int ret = 0;
 
        lockdep_assert_held(&ar->conf_mutex);
 
-       if (!ar->monitor_present) {
-               ath10k_warn("mac montor stop -- monitor is not present\n");
-               return -EINVAL;
-       }
-
-       if (!ar->monitor_enabled) {
-               ath10k_warn("mac montor stop -- monitor is not enabled\n");
-               return -EINVAL;
-       }
-
        ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
        if (ret)
-               ath10k_warn("Monitor vdev %i down failed: %d\n",
+               ath10k_warn("failed to put down monitor vdev %i: %d\n",
                            ar->monitor_vdev_id, ret);
 
        ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
        if (ret)
-               ath10k_warn("Monitor vdev %i stop failed: %d\n",
+               ath10k_warn("failed to to request monitor vdev %i stop: %d\n",
                            ar->monitor_vdev_id, ret);
 
        ret = ath10k_vdev_setup_sync(ar);
        if (ret)
-               ath10k_warn("Monitor_down sync failed, vdev %i: %d\n",
+               ath10k_warn("failed to synchronise monitor vdev %i: %d\n",
                            ar->monitor_vdev_id, ret);
 
-       ar->monitor_enabled = false;
+       ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
+                  ar->monitor_vdev_id);
        return ret;
 }
 
-static int ath10k_monitor_create(struct ath10k *ar)
+static int ath10k_monitor_vdev_create(struct ath10k *ar)
 {
        int bit, ret = 0;
 
        lockdep_assert_held(&ar->conf_mutex);
 
-       if (ar->monitor_present) {
-               ath10k_warn("Monitor mode already enabled\n");
-               return 0;
-       }
-
        bit = ffs(ar->free_vdev_map);
        if (bit == 0) {
-               ath10k_warn("No free VDEV slots\n");
+               ath10k_warn("failed to find free vdev id for monitor vdev\n");
                return -ENOMEM;
        }
 
@@ -696,7 +607,7 @@ static int ath10k_monitor_create(struct ath10k *ar)
                                     WMI_VDEV_TYPE_MONITOR,
                                     0, ar->mac_addr);
        if (ret) {
-               ath10k_warn("WMI vdev %i monitor create failed: ret %d\n",
+               ath10k_warn("failed to request monitor vdev %i creation: %d\n",
                            ar->monitor_vdev_id, ret);
                goto vdev_fail;
        }
@@ -704,7 +615,6 @@ static int ath10k_monitor_create(struct ath10k *ar)
        ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
                   ar->monitor_vdev_id);
 
-       ar->monitor_present = true;
        return 0;
 
 vdev_fail:
@@ -715,48 +625,123 @@ vdev_fail:
        return ret;
 }
 
-static int ath10k_monitor_destroy(struct ath10k *ar)
+static int ath10k_monitor_vdev_delete(struct ath10k *ar)
 {
        int ret = 0;
 
        lockdep_assert_held(&ar->conf_mutex);
 
-       if (!ar->monitor_present)
-               return 0;
-
        ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
        if (ret) {
-               ath10k_warn("WMI vdev %i monitor delete failed: %d\n",
+               ath10k_warn("failed to request wmi monitor vdev %i removal: %d\n",
                            ar->monitor_vdev_id, ret);
                return ret;
        }
 
        ar->free_vdev_map |= 1 << (ar->monitor_vdev_id);
-       ar->monitor_present = false;
 
        ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
                   ar->monitor_vdev_id);
        return ret;
 }
 
-static int ath10k_start_cac(struct ath10k *ar)
+static int ath10k_monitor_start(struct ath10k *ar)
 {
        int ret;
 
        lockdep_assert_held(&ar->conf_mutex);
 
-       set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+       if (!ath10k_monitor_is_enabled(ar)) {
+               ath10k_warn("trying to start monitor with no references\n");
+               return 0;
+       }
 
-       ret = ath10k_monitor_create(ar);
+       if (ar->monitor_started) {
+               ath10k_dbg(ATH10K_DBG_MAC, "mac monitor already started\n");
+               return 0;
+       }
+
+       ret = ath10k_monitor_vdev_create(ar);
        if (ret) {
-               clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+               ath10k_warn("failed to create monitor vdev: %d\n", ret);
                return ret;
        }
 
-       ret = ath10k_monitor_start(ar, ar->monitor_vdev_id);
+       ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
+       if (ret) {
+               ath10k_warn("failed to start monitor vdev: %d\n", ret);
+               ath10k_monitor_vdev_delete(ar);
+               return ret;
+       }
+
+       ar->monitor_started = true;
+       ath10k_dbg(ATH10K_DBG_MAC, "mac monitor started\n");
+
+       return 0;
+}
+
+static void ath10k_monitor_stop(struct ath10k *ar)
+{
+       int ret;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       if (ath10k_monitor_is_enabled(ar)) {
+               ath10k_dbg(ATH10K_DBG_MAC,
+                          "mac monitor will be stopped later\n");
+               return;
+       }
+
+       if (!ar->monitor_started) {
+               ath10k_dbg(ATH10K_DBG_MAC,
+                          "mac monitor probably failed to start earlier\n");
+               return;
+       }
+
+       ret = ath10k_monitor_vdev_stop(ar);
+       if (ret)
+               ath10k_warn("failed to stop monitor vdev: %d\n", ret);
+
+       ret = ath10k_monitor_vdev_delete(ar);
+       if (ret)
+               ath10k_warn("failed to delete monitor vdev: %d\n", ret);
+
+       ar->monitor_started = false;
+       ath10k_dbg(ATH10K_DBG_MAC, "mac monitor stopped\n");
+}
+
+static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
+{
+       struct ath10k *ar = arvif->ar;
+       u32 vdev_param, rts_cts = 0;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       vdev_param = ar->wmi.vdev_param->enable_rtscts;
+
+       if (arvif->use_cts_prot || arvif->num_legacy_stations > 0)
+               rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
+
+       if (arvif->num_legacy_stations > 0)
+               rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES,
+                             WMI_RTSCTS_PROFILE);
+
+       return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+                                        rts_cts);
+}
+
+static int ath10k_start_cac(struct ath10k *ar)
+{
+       int ret;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+
+       ret = ath10k_monitor_start(ar);
        if (ret) {
+               ath10k_warn("failed to start monitor (cac): %d\n", ret);
                clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
-               ath10k_monitor_destroy(ar);
                return ret;
        }
 
@@ -774,58 +759,26 @@ static int ath10k_stop_cac(struct ath10k *ar)
        if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags))
                return 0;
 
-       ath10k_monitor_stop(ar);
-       ath10k_monitor_destroy(ar);
        clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+       ath10k_monitor_stop(ar);
 
        ath10k_dbg(ATH10K_DBG_MAC, "mac cac finished\n");
 
        return 0;
 }
 
-static const char *ath10k_dfs_state(enum nl80211_dfs_state dfs_state)
+static void ath10k_recalc_radar_detection(struct ath10k *ar)
 {
-       switch (dfs_state) {
-       case NL80211_DFS_USABLE:
-               return "USABLE";
-       case NL80211_DFS_UNAVAILABLE:
-               return "UNAVAILABLE";
-       case NL80211_DFS_AVAILABLE:
-               return "AVAILABLE";
-       default:
-               WARN_ON(1);
-               return "bug";
-       }
-}
-
-static void ath10k_config_radar_detection(struct ath10k *ar)
-{
-       struct ieee80211_channel *chan = ar->hw->conf.chandef.chan;
-       bool radar = ar->hw->conf.radar_enabled;
-       bool chan_radar = !!(chan->flags & IEEE80211_CHAN_RADAR);
-       enum nl80211_dfs_state dfs_state = chan->dfs_state;
        int ret;
 
        lockdep_assert_held(&ar->conf_mutex);
 
-       ath10k_dbg(ATH10K_DBG_MAC,
-                  "mac radar config update: chan %dMHz radar %d chan radar %d chan state %s\n",
-                  chan->center_freq, radar, chan_radar,
-                  ath10k_dfs_state(dfs_state));
-
-       /*
-        * It's safe to call it even if CAC is not started.
-        * This call here guarantees changing channel, etc. will stop CAC.
-        */
        ath10k_stop_cac(ar);
 
-       if (!radar)
+       if (!ar->radar_enabled)
                return;
 
-       if (!chan_radar)
-               return;
-
-       if (dfs_state != NL80211_DFS_USABLE)
+       if (ar->num_started_vdevs > 0)
                return;
 
        ret = ath10k_start_cac(ar);
@@ -835,11 +788,106 @@ static void ath10k_config_radar_detection(struct ath10k *ar)
                 * radiation is not allowed, make this channel DFS_UNAVAILABLE
                 * by indicating that radar was detected.
                 */
-               ath10k_warn("failed to start CAC (%d)\n", ret);
+               ath10k_warn("failed to start CAC: %d\n", ret);
                ieee80211_radar_detected(ar->hw);
        }
 }
 
+static int ath10k_vdev_start(struct ath10k_vif *arvif)
+{
+       struct ath10k *ar = arvif->ar;
+       struct cfg80211_chan_def *chandef = &ar->chandef;
+       struct wmi_vdev_start_request_arg arg = {};
+       int ret = 0;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       reinit_completion(&ar->vdev_setup_done);
+
+       arg.vdev_id = arvif->vdev_id;
+       arg.dtim_period = arvif->dtim_period;
+       arg.bcn_intval = arvif->beacon_interval;
+
+       arg.channel.freq = chandef->chan->center_freq;
+       arg.channel.band_center_freq1 = chandef->center_freq1;
+       arg.channel.mode = chan_to_phymode(chandef);
+
+       arg.channel.min_power = 0;
+       arg.channel.max_power = chandef->chan->max_power * 2;
+       arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
+       arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
+
+       if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+               arg.ssid = arvif->u.ap.ssid;
+               arg.ssid_len = arvif->u.ap.ssid_len;
+               arg.hidden_ssid = arvif->u.ap.hidden_ssid;
+
+               /* For now allow DFS for AP mode */
+               arg.channel.chan_radar =
+                       !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
+       } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
+               arg.ssid = arvif->vif->bss_conf.ssid;
+               arg.ssid_len = arvif->vif->bss_conf.ssid_len;
+       }
+
+       ath10k_dbg(ATH10K_DBG_MAC,
+                  "mac vdev %d start center_freq %d phymode %s\n",
+                  arg.vdev_id, arg.channel.freq,
+                  ath10k_wmi_phymode_str(arg.channel.mode));
+
+       ret = ath10k_wmi_vdev_start(ar, &arg);
+       if (ret) {
+               ath10k_warn("failed to start WMI vdev %i: %d\n",
+                           arg.vdev_id, ret);
+               return ret;
+       }
+
+       ret = ath10k_vdev_setup_sync(ar);
+       if (ret) {
+               ath10k_warn("failed to synchronise setup for vdev %i: %d\n",
+                           arg.vdev_id, ret);
+               return ret;
+       }
+
+       ar->num_started_vdevs++;
+       ath10k_recalc_radar_detection(ar);
+
+       return ret;
+}
+
+static int ath10k_vdev_stop(struct ath10k_vif *arvif)
+{
+       struct ath10k *ar = arvif->ar;
+       int ret;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       reinit_completion(&ar->vdev_setup_done);
+
+       ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
+       if (ret) {
+               ath10k_warn("failed to stop WMI vdev %i: %d\n",
+                           arvif->vdev_id, ret);
+               return ret;
+       }
+
+       ret = ath10k_vdev_setup_sync(ar);
+       if (ret) {
+               ath10k_warn("failed to syncronise setup for vdev %i: %d\n",
+                           arvif->vdev_id, ret);
+               return ret;
+       }
+
+       WARN_ON(ar->num_started_vdevs == 0);
+
+       if (ar->num_started_vdevs != 0) {
+               ar->num_started_vdevs--;
+               ath10k_recalc_radar_detection(ar);
+       }
+
+       return ret;
+}
+
 static void ath10k_control_beaconing(struct ath10k_vif *arvif,
                                struct ieee80211_bss_conf *info)
 {
@@ -880,7 +928,7 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
        ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
                                 arvif->bssid);
        if (ret) {
-               ath10k_warn("Failed to bring up vdev %d: %i\n",
+               ath10k_warn("failed to bring up vdev %d: %i\n",
                            arvif->vdev_id, ret);
                ath10k_vdev_stop(arvif);
                return;
@@ -904,7 +952,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
        if (!info->ibss_joined) {
                ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer);
                if (ret)
-                       ath10k_warn("Failed to delete IBSS self peer:%pM for VDEV:%d ret:%d\n",
+                       ath10k_warn("failed to delete IBSS self peer %pM for vdev %d: %d\n",
                                    self_peer, arvif->vdev_id, ret);
 
                if (is_zero_ether_addr(arvif->bssid))
@@ -913,7 +961,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
                ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id,
                                         arvif->bssid);
                if (ret) {
-                       ath10k_warn("Failed to delete IBSS BSSID peer:%pM for VDEV:%d ret:%d\n",
+                       ath10k_warn("failed to delete IBSS BSSID peer %pM for vdev %d: %d\n",
                                    arvif->bssid, arvif->vdev_id, ret);
                        return;
                }
@@ -925,7 +973,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
 
        ret = ath10k_peer_create(arvif->ar, arvif->vdev_id, self_peer);
        if (ret) {
-               ath10k_warn("Failed to create IBSS self peer:%pM for VDEV:%d ret:%d\n",
+               ath10k_warn("failed to create IBSS self peer %pM for vdev %d: %d\n",
                            self_peer, arvif->vdev_id, ret);
                return;
        }
@@ -934,7 +982,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
        ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
                                        ATH10K_DEFAULT_ATIM);
        if (ret)
-               ath10k_warn("Failed to set IBSS ATIM for VDEV:%d ret:%d\n",
+               ath10k_warn("failed to set IBSS ATIM for vdev %d: %d\n",
                            arvif->vdev_id, ret);
 }
 
@@ -961,7 +1009,7 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
                ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
                                                  conf->dynamic_ps_timeout);
                if (ret) {
-                       ath10k_warn("Failed to set inactivity time for vdev %d: %i\n",
+                       ath10k_warn("failed to set inactivity time for vdev %d: %i\n",
                                    arvif->vdev_id, ret);
                        return ret;
                }
@@ -974,8 +1022,8 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
 
        ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
        if (ret) {
-               ath10k_warn("Failed to set PS Mode: %d for VDEV: %d\n",
-                           psmode, arvif->vdev_id);
+               ath10k_warn("failed to set PS Mode %d for vdev %d: %d\n",
+                           psmode, arvif->vdev_id, ret);
                return ret;
        }
 
@@ -1429,7 +1477,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
 
        ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
        if (!ap_sta) {
-               ath10k_warn("Failed to find station entry for %pM, vdev %i\n",
+               ath10k_warn("failed to find station entry for bss %pM vdev %i\n",
                            bss_conf->bssid, arvif->vdev_id);
                rcu_read_unlock();
                return;
@@ -1442,7 +1490,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
        ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta,
                                        bss_conf, &peer_arg);
        if (ret) {
-               ath10k_warn("Peer assoc prepare failed for %pM vdev %i\n: %d",
+               ath10k_warn("failed to prepare peer assoc for %pM vdev %i: %d\n",
                            bss_conf->bssid, arvif->vdev_id, ret);
                rcu_read_unlock();
                return;
@@ -1452,7 +1500,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
 
        ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
        if (ret) {
-               ath10k_warn("Peer assoc failed for %pM vdev %i\n: %d",
+               ath10k_warn("failed to run peer assoc for %pM vdev %i: %d\n",
                            bss_conf->bssid, arvif->vdev_id, ret);
                return;
        }
@@ -1473,7 +1521,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
 
        ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
        if (ret) {
-               ath10k_warn("VDEV: %d up failed: ret %d\n",
+               ath10k_warn("failed to set vdev %d up: %d\n",
                            arvif->vdev_id, ret);
                return;
        }
@@ -1524,7 +1572,7 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
 }
 
 static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
-                               struct ieee80211_sta *sta)
+                               struct ieee80211_sta *sta, bool reassoc)
 {
        struct wmi_peer_assoc_complete_arg peer_arg;
        int ret = 0;
@@ -1533,34 +1581,46 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
 
        ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg);
        if (ret) {
-               ath10k_warn("WMI peer assoc prepare failed for %pM vdev %i: %i\n",
+               ath10k_warn("failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
                            sta->addr, arvif->vdev_id, ret);
                return ret;
        }
 
+       peer_arg.peer_reassoc = reassoc;
        ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
        if (ret) {
-               ath10k_warn("Peer assoc failed for STA %pM vdev %i: %d\n",
+               ath10k_warn("failed to run peer assoc for STA %pM vdev %i: %d\n",
                            sta->addr, arvif->vdev_id, ret);
                return ret;
        }
 
        ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, &sta->ht_cap);
        if (ret) {
-               ath10k_warn("failed to setup peer SMPS for vdev: %d\n", ret);
+               ath10k_warn("failed to setup peer SMPS for vdev %d: %d\n",
+                           arvif->vdev_id, ret);
                return ret;
        }
 
+       if (!sta->wme) {
+               arvif->num_legacy_stations++;
+               ret  = ath10k_recalc_rtscts_prot(arvif);
+               if (ret) {
+                       ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n",
+                                   arvif->vdev_id, ret);
+                       return ret;
+               }
+       }
+
        ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
        if (ret) {
-               ath10k_warn("could not install peer wep keys for vdev %i: %d\n",
+               ath10k_warn("failed to install peer wep keys for vdev %i: %d\n",
                            arvif->vdev_id, ret);
                return ret;
        }
 
        ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
        if (ret) {
-               ath10k_warn("could not set qos params for STA %pM for vdev %i: %d\n",
+               ath10k_warn("failed to set qos params for STA %pM for vdev %i: %d\n",
                            sta->addr, arvif->vdev_id, ret);
                return ret;
        }
@@ -1575,9 +1635,19 @@ static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif,
 
        lockdep_assert_held(&ar->conf_mutex);
 
+       if (!sta->wme) {
+               arvif->num_legacy_stations--;
+               ret = ath10k_recalc_rtscts_prot(arvif);
+               if (ret) {
+                       ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n",
+                                   arvif->vdev_id, ret);
+                       return ret;
+               }
+       }
+
        ret = ath10k_clear_peer_keys(arvif, sta->addr);
        if (ret) {
-               ath10k_warn("could not clear all peer wep keys for vdev %i: %d\n",
+               ath10k_warn("failed to clear all peer wep keys for vdev %i: %d\n",
                            arvif->vdev_id, ret);
                return ret;
        }
@@ -1685,19 +1755,44 @@ static int ath10k_update_channel_list(struct ath10k *ar)
        return ret;
 }
 
+static enum wmi_dfs_region
+ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region)
+{
+       switch (dfs_region) {
+       case NL80211_DFS_UNSET:
+               return WMI_UNINIT_DFS_DOMAIN;
+       case NL80211_DFS_FCC:
+               return WMI_FCC_DFS_DOMAIN;
+       case NL80211_DFS_ETSI:
+               return WMI_ETSI_DFS_DOMAIN;
+       case NL80211_DFS_JP:
+               return WMI_MKK4_DFS_DOMAIN;
+       }
+       return WMI_UNINIT_DFS_DOMAIN;
+}
+
 static void ath10k_regd_update(struct ath10k *ar)
 {
        struct reg_dmn_pair_mapping *regpair;
        int ret;
+       enum wmi_dfs_region wmi_dfs_reg;
+       enum nl80211_dfs_regions nl_dfs_reg;
 
        lockdep_assert_held(&ar->conf_mutex);
 
        ret = ath10k_update_channel_list(ar);
        if (ret)
-               ath10k_warn("could not update channel list (%d)\n", ret);
+               ath10k_warn("failed to update channel list: %d\n", ret);
 
        regpair = ar->ath_common.regulatory.regpair;
 
+       if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
+               nl_dfs_reg = ar->dfs_detector->region;
+               wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg);
+       } else {
+               wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN;
+       }
+
        /* Target allows setting up per-band regdomain but ath_common provides
         * a combined one only */
        ret = ath10k_wmi_pdev_set_regdomain(ar,
@@ -1705,9 +1800,10 @@ static void ath10k_regd_update(struct ath10k *ar)
                                            regpair->reg_domain, /* 2ghz */
                                            regpair->reg_domain, /* 5ghz */
                                            regpair->reg_2ghz_ctl,
-                                           regpair->reg_5ghz_ctl);
+                                           regpair->reg_5ghz_ctl,
+                                           wmi_dfs_reg);
        if (ret)
-               ath10k_warn("could not set pdev regdomain (%d)\n", ret);
+               ath10k_warn("failed to set pdev regdomain: %d\n", ret);
 }
 
 static void ath10k_reg_notifier(struct wiphy *wiphy,
@@ -1725,7 +1821,7 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
                result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
                                                          request->dfs_region);
                if (!result)
-                       ath10k_warn("dfs region 0x%X not supported, will trigger radar for every pulse\n",
+                       ath10k_warn("DFS region 0x%X not supported, will trigger radar for every pulse\n",
                                    request->dfs_region);
        }
 
@@ -1759,10 +1855,10 @@ static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar,
        if (info->control.vif)
                return ath10k_vif_to_arvif(info->control.vif)->vdev_id;
 
-       if (ar->monitor_enabled)
+       if (ar->monitor_started)
                return ar->monitor_vdev_id;
 
-       ath10k_warn("could not resolve vdev id\n");
+       ath10k_warn("failed to resolve vdev id\n");
        return 0;
 }
 
@@ -1803,7 +1899,9 @@ static void ath10k_tx_wep_key_work(struct work_struct *work)
                                        arvif->ar->wmi.vdev_param->def_keyid,
                                        keyidx);
        if (ret) {
-               ath10k_warn("could not update wep keyidx (%d)\n", ret);
+               ath10k_warn("failed to update wep key index for vdev %d: %d\n",
+                           arvif->vdev_id,
+                           ret);
                return;
        }
 
@@ -1879,7 +1977,7 @@ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
                             ar->fw_features)) {
                        if (skb_queue_len(&ar->wmi_mgmt_tx_queue) >=
                            ATH10K_MAX_NUM_MGMT_PENDING) {
-                               ath10k_warn("wmi mgmt_tx queue limit reached\n");
+                               ath10k_warn("reached WMI management tranmist queue limit\n");
                                ret = -EBUSY;
                                goto exit;
                        }
@@ -1903,7 +2001,7 @@ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
 
 exit:
        if (ret) {
-               ath10k_warn("tx failed (%d). dropping packet.\n", ret);
+               ath10k_warn("failed to transmit packet, dropping: %d\n", ret);
                ieee80211_free_txskb(ar->hw, skb);
        }
 }
@@ -1964,7 +2062,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
                if (!peer) {
                        ret = ath10k_peer_create(ar, vdev_id, peer_addr);
                        if (ret)
-                               ath10k_warn("peer %pM on vdev %d not created (%d)\n",
+                               ath10k_warn("failed to create peer %pM on vdev %d: %d\n",
                                            peer_addr, vdev_id, ret);
                }
 
@@ -1984,7 +2082,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
                if (!peer) {
                        ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
                        if (ret)
-                               ath10k_warn("peer %pM on vdev %d not deleted (%d)\n",
+                               ath10k_warn("failed to delete peer %pM on vdev %d: %d\n",
                                            peer_addr, vdev_id, ret);
                }
 
@@ -2018,7 +2116,8 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
 
                ret = ath10k_wmi_mgmt_tx(ar, skb);
                if (ret) {
-                       ath10k_warn("wmi mgmt_tx failed (%d)\n", ret);
+                       ath10k_warn("failed to transmit management frame via WMI: %d\n",
+                                   ret);
                        ieee80211_free_txskb(ar->hw, skb);
                }
        }
@@ -2043,7 +2142,7 @@ void ath10k_reset_scan(unsigned long ptr)
                return;
        }
 
-       ath10k_warn("scan timeout. resetting. fw issue?\n");
+       ath10k_warn("scan timed out, firmware problem?\n");
 
        if (ar->scan.is_roc)
                ieee80211_remain_on_channel_expired(ar->hw);
@@ -2079,7 +2178,7 @@ static int ath10k_abort_scan(struct ath10k *ar)
 
        ret = ath10k_wmi_stop_scan(ar, &arg);
        if (ret) {
-               ath10k_warn("could not submit wmi stop scan (%d)\n", ret);
+               ath10k_warn("failed to stop wmi scan: %d\n", ret);
                spin_lock_bh(&ar->data_lock);
                ar->scan.in_progress = false;
                ath10k_offchan_tx_purge(ar);
@@ -2099,7 +2198,7 @@ static int ath10k_abort_scan(struct ath10k *ar)
 
        spin_lock_bh(&ar->data_lock);
        if (ar->scan.in_progress) {
-               ath10k_warn("could not stop scan. its still in progress\n");
+               ath10k_warn("failed to stop scan, it's still in progress\n");
                ar->scan.in_progress = false;
                ath10k_offchan_tx_purge(ar);
                ret = -ETIMEDOUT;
@@ -2192,9 +2291,17 @@ static void ath10k_tx(struct ieee80211_hw *hw,
  */
 void ath10k_halt(struct ath10k *ar)
 {
+       struct ath10k_vif *arvif;
+
        lockdep_assert_held(&ar->conf_mutex);
 
-       ath10k_stop_cac(ar);
+       if (ath10k_monitor_is_enabled(ar)) {
+               clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+               ar->promisc = false;
+               ar->monitor = false;
+               ath10k_monitor_stop(ar);
+       }
+
        del_timer_sync(&ar->scan.timeout);
        ath10k_offchan_tx_purge(ar);
        ath10k_mgmt_over_wmi_tx_purge(ar);
@@ -2208,6 +2315,17 @@ void ath10k_halt(struct ath10k *ar)
                ar->scan.in_progress = false;
                ieee80211_scan_completed(ar->hw, true);
        }
+
+       list_for_each_entry(arvif, &ar->arvifs, list) {
+               if (!arvif->beacon)
+                       continue;
+
+               dma_unmap_single(arvif->ar->dev,
+                                ATH10K_SKB_CB(arvif->beacon)->paddr,
+                                arvif->beacon->len, DMA_TO_DEVICE);
+               dev_kfree_skb_any(arvif->beacon);
+               arvif->beacon = NULL;
+       }
        spin_unlock_bh(&ar->data_lock);
 }
 
@@ -2226,14 +2344,14 @@ static int ath10k_start(struct ieee80211_hw *hw)
 
        ret = ath10k_hif_power_up(ar);
        if (ret) {
-               ath10k_err("could not init hif (%d)\n", ret);
+               ath10k_err("Could not init hif: %d\n", ret);
                ar->state = ATH10K_STATE_OFF;
                goto exit;
        }
 
        ret = ath10k_core_start(ar);
        if (ret) {
-               ath10k_err("could not init core (%d)\n", ret);
+               ath10k_err("Could not init core: %d\n", ret);
                ath10k_hif_power_down(ar);
                ar->state = ATH10K_STATE_OFF;
                goto exit;
@@ -2246,13 +2364,11 @@ static int ath10k_start(struct ieee80211_hw *hw)
 
        ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1);
        if (ret)
-               ath10k_warn("could not enable WMI_PDEV_PARAM_PMF_QOS (%d)\n",
-                           ret);
+               ath10k_warn("failed to enable PMF QOS: %d\n", ret);
 
        ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 1);
        if (ret)
-               ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n",
-                           ret);
+               ath10k_warn("failed to enable dynamic BW: %d\n", ret);
 
        /*
         * By default FW set ARP frames ac to voice (6). In that case ARP
@@ -2266,11 +2382,12 @@ static int ath10k_start(struct ieee80211_hw *hw)
        ret = ath10k_wmi_pdev_set_param(ar,
                                        ar->wmi.pdev_param->arp_ac_override, 0);
        if (ret) {
-               ath10k_warn("could not set arp ac override parameter: %d\n",
+               ath10k_warn("failed to set arp ac override parameter: %d\n",
                            ret);
                goto exit;
        }
 
+       ar->num_started_vdevs = 0;
        ath10k_regd_update(ar);
        ret = 0;
 
@@ -2309,7 +2426,7 @@ static int ath10k_config_ps(struct ath10k *ar)
        list_for_each_entry(arvif, &ar->arvifs, list) {
                ret = ath10k_mac_vif_setup_ps(arvif);
                if (ret) {
-                       ath10k_warn("could not setup powersave (%d)\n", ret);
+                       ath10k_warn("failed to setup powersave: %d\n", ret);
                        break;
                }
        }
@@ -2343,7 +2460,6 @@ static const char *chandef_get_width(enum nl80211_chan_width width)
 static void ath10k_config_chan(struct ath10k *ar)
 {
        struct ath10k_vif *arvif;
-       bool monitor_was_enabled;
        int ret;
 
        lockdep_assert_held(&ar->conf_mutex);
@@ -2357,10 +2473,8 @@ static void ath10k_config_chan(struct ath10k *ar)
 
        /* First stop monitor interface. Some FW versions crash if there's a
         * lone monitor interface. */
-       monitor_was_enabled = ar->monitor_enabled;
-
-       if (ar->monitor_enabled)
-               ath10k_monitor_stop(ar);
+       if (ar->monitor_started)
+               ath10k_monitor_vdev_stop(ar);
 
        list_for_each_entry(arvif, &ar->arvifs, list) {
                if (!arvif->is_started)
@@ -2371,7 +2485,7 @@ static void ath10k_config_chan(struct ath10k *ar)
 
                ret = ath10k_vdev_stop(arvif);
                if (ret) {
-                       ath10k_warn("could not stop vdev %d (%d)\n",
+                       ath10k_warn("failed to stop vdev %d: %d\n",
                                    arvif->vdev_id, ret);
                        continue;
                }
@@ -2388,7 +2502,7 @@ static void ath10k_config_chan(struct ath10k *ar)
 
                ret = ath10k_vdev_start(arvif);
                if (ret) {
-                       ath10k_warn("could not start vdev %d (%d)\n",
+                       ath10k_warn("failed to start vdev %d: %d\n",
                                    arvif->vdev_id, ret);
                        continue;
                }
@@ -2399,14 +2513,14 @@ static void ath10k_config_chan(struct ath10k *ar)
                ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
                                         arvif->bssid);
                if (ret) {
-                       ath10k_warn("could not bring vdev up %d (%d)\n",
+                       ath10k_warn("failed to bring vdev up %d: %d\n",
                                    arvif->vdev_id, ret);
                        continue;
                }
        }
 
-       if (monitor_was_enabled)
-               ath10k_monitor_start(ar, ar->monitor_vdev_id);
+       if (ath10k_monitor_is_enabled(ar))
+               ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
 }
 
 static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
@@ -2420,15 +2534,17 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
 
        if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
                ath10k_dbg(ATH10K_DBG_MAC,
-                          "mac config channel %d mhz flags 0x%x\n",
+                          "mac config channel %dMHz flags 0x%x radar %d\n",
                           conf->chandef.chan->center_freq,
-                          conf->chandef.chan->flags);
+                          conf->chandef.chan->flags,
+                          conf->radar_enabled);
 
                spin_lock_bh(&ar->data_lock);
                ar->rx_channel = conf->chandef.chan;
                spin_unlock_bh(&ar->data_lock);
 
-               ath10k_config_radar_detection(ar);
+               ar->radar_enabled = conf->radar_enabled;
+               ath10k_recalc_radar_detection(ar);
 
                if (!cfg80211_chandef_identical(&ar->chandef, &conf->chandef)) {
                        ar->chandef = conf->chandef;
@@ -2444,14 +2560,14 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
                ret = ath10k_wmi_pdev_set_param(ar, param,
                                                hw->conf.power_level * 2);
                if (ret)
-                       ath10k_warn("mac failed to set 2g txpower %d (%d)\n",
+                       ath10k_warn("failed to set 2g txpower %d: %d\n",
                                    hw->conf.power_level, ret);
 
                param = ar->wmi.pdev_param->txpower_limit5g;
                ret = ath10k_wmi_pdev_set_param(ar, param,
                                                hw->conf.power_level * 2);
                if (ret)
-                       ath10k_warn("mac failed to set 5g txpower %d (%d)\n",
+                       ath10k_warn("failed to set 5g txpower %d: %d\n",
                                    hw->conf.power_level, ret);
        }
 
@@ -2459,10 +2575,19 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
                ath10k_config_ps(ar);
 
        if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
-               if (conf->flags & IEEE80211_CONF_MONITOR)
-                       ret = ath10k_monitor_create(ar);
-               else
-                       ret = ath10k_monitor_destroy(ar);
+               if (conf->flags & IEEE80211_CONF_MONITOR && !ar->monitor) {
+                       ar->monitor = true;
+                       ret = ath10k_monitor_start(ar);
+                       if (ret) {
+                               ath10k_warn("failed to start monitor (config): %d\n",
+                                           ret);
+                               ar->monitor = false;
+                       }
+               } else if (!(conf->flags & IEEE80211_CONF_MONITOR) &&
+                          ar->monitor) {
+                       ar->monitor = false;
+                       ath10k_monitor_stop(ar);
+               }
        }
 
        mutex_unlock(&ar->conf_mutex);
@@ -2497,12 +2622,6 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
        INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work);
        INIT_LIST_HEAD(&arvif->list);
 
-       if ((vif->type == NL80211_IFTYPE_MONITOR) && ar->monitor_present) {
-               ath10k_warn("Only one monitor interface allowed\n");
-               ret = -EBUSY;
-               goto err;
-       }
-
        bit = ffs(ar->free_vdev_map);
        if (bit == 0) {
                ret = -EBUSY;
@@ -2545,7 +2664,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
        ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
                                     arvif->vdev_subtype, vif->addr);
        if (ret) {
-               ath10k_warn("WMI vdev %i create failed: ret %d\n",
+               ath10k_warn("failed to create WMI vdev %i: %d\n",
                            arvif->vdev_id, ret);
                goto err;
        }
@@ -2557,7 +2676,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
        ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param,
                                        arvif->def_wep_key_idx);
        if (ret) {
-               ath10k_warn("Failed to set vdev %i default keyid: %d\n",
+               ath10k_warn("failed to set vdev %i default key id: %d\n",
                            arvif->vdev_id, ret);
                goto err_vdev_delete;
        }
@@ -2567,7 +2686,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
                                        ATH10K_HW_TXRX_NATIVE_WIFI);
        /* 10.X firmware does not support this VDEV parameter. Do not warn */
        if (ret && ret != -EOPNOTSUPP) {
-               ath10k_warn("Failed to set vdev %i TX encap: %d\n",
+               ath10k_warn("failed to set vdev %i TX encapsulation: %d\n",
                            arvif->vdev_id, ret);
                goto err_vdev_delete;
        }
@@ -2575,14 +2694,14 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
        if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
                ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
                if (ret) {
-                       ath10k_warn("Failed to create vdev %i peer for AP: %d\n",
+                       ath10k_warn("failed to create vdev %i peer for AP: %d\n",
                                    arvif->vdev_id, ret);
                        goto err_vdev_delete;
                }
 
                ret = ath10k_mac_set_kickout(arvif);
                if (ret) {
-                       ath10k_warn("Failed to set vdev %i kickout parameters: %d\n",
+                       ath10k_warn("failed to set vdev %i kickout parameters: %d\n",
                                    arvif->vdev_id, ret);
                        goto err_peer_delete;
                }
@@ -2594,7 +2713,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
                ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
                                                  param, value);
                if (ret) {
-                       ath10k_warn("Failed to set vdev %i RX wake policy: %d\n",
+                       ath10k_warn("failed to set vdev %i RX wake policy: %d\n",
                                    arvif->vdev_id, ret);
                        goto err_peer_delete;
                }
@@ -2604,7 +2723,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
                ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
                                                  param, value);
                if (ret) {
-                       ath10k_warn("Failed to set vdev %i TX wake thresh: %d\n",
+                       ath10k_warn("failed to set vdev %i TX wake thresh: %d\n",
                                    arvif->vdev_id, ret);
                        goto err_peer_delete;
                }
@@ -2614,7 +2733,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
                ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
                                                  param, value);
                if (ret) {
-                       ath10k_warn("Failed to set vdev %i PSPOLL count: %d\n",
+                       ath10k_warn("failed to set vdev %i PSPOLL count: %d\n",
                                    arvif->vdev_id, ret);
                        goto err_peer_delete;
                }
@@ -2622,21 +2741,18 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
 
        ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
        if (ret) {
-               ath10k_warn("failed to set rts threshold for vdev %d (%d)\n",
+               ath10k_warn("failed to set rts threshold for vdev %d: %d\n",
                            arvif->vdev_id, ret);
                goto err_peer_delete;
        }
 
        ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
        if (ret) {
-               ath10k_warn("failed to set frag threshold for vdev %d (%d)\n",
+               ath10k_warn("failed to set frag threshold for vdev %d: %d\n",
                            arvif->vdev_id, ret);
                goto err_peer_delete;
        }
 
-       if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
-               ar->monitor_present = true;
-
        mutex_unlock(&ar->conf_mutex);
        return 0;
 
@@ -2668,6 +2784,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
 
        spin_lock_bh(&ar->data_lock);
        if (arvif->beacon) {
+               dma_unmap_single(arvif->ar->dev,
+                                ATH10K_SKB_CB(arvif->beacon)->paddr,
+                                arvif->beacon->len, DMA_TO_DEVICE);
                dev_kfree_skb_any(arvif->beacon);
                arvif->beacon = NULL;
        }
@@ -2679,7 +2798,7 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
        if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
                ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);
                if (ret)
-                       ath10k_warn("Failed to remove peer for AP vdev %i: %d\n",
+                       ath10k_warn("failed to remove peer for AP vdev %i: %d\n",
                                    arvif->vdev_id, ret);
 
                kfree(arvif->u.ap.noa_data);
@@ -2690,12 +2809,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
 
        ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
        if (ret)
-               ath10k_warn("WMI vdev %i delete failed: %d\n",
+               ath10k_warn("failed to delete WMI vdev %i: %d\n",
                            arvif->vdev_id, ret);
 
-       if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
-               ar->monitor_present = false;
-
        ath10k_peer_cleanup(ar, arvif->vdev_id);
 
        mutex_unlock(&ar->conf_mutex);
@@ -2728,28 +2844,17 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw,
        *total_flags &= SUPPORTED_FILTERS;
        ar->filter_flags = *total_flags;
 
-       /* Monitor must not be started if it wasn't created first.
-        * Promiscuous mode may be started on a non-monitor interface - in
-        * such case the monitor vdev is not created so starting the
-        * monitor makes no sense. Since ath10k uses no special RX filters
-        * (only BSS filter in STA mode) there's no need for any special
-        * action here. */
-       if ((ar->filter_flags & FIF_PROMISC_IN_BSS) &&
-           !ar->monitor_enabled && ar->monitor_present) {
-               ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d start\n",
-                          ar->monitor_vdev_id);
-
-               ret = ath10k_monitor_start(ar, ar->monitor_vdev_id);
-               if (ret)
-                       ath10k_warn("Unable to start monitor mode\n");
-       } else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) &&
-                  ar->monitor_enabled && ar->monitor_present) {
-               ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d stop\n",
-                          ar->monitor_vdev_id);
-
-               ret = ath10k_monitor_stop(ar);
-               if (ret)
-                       ath10k_warn("Unable to stop monitor mode\n");
+       if (ar->filter_flags & FIF_PROMISC_IN_BSS && !ar->promisc) {
+               ar->promisc = true;
+               ret = ath10k_monitor_start(ar);
+               if (ret) {
+                       ath10k_warn("failed to start monitor (promisc): %d\n",
+                                   ret);
+                       ar->promisc = false;
+               }
+       } else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) && ar->promisc) {
+               ar->promisc = false;
+               ath10k_monitor_stop(ar);
        }
 
        mutex_unlock(&ar->conf_mutex);
@@ -2780,7 +2885,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
                           arvif->vdev_id, arvif->beacon_interval);
 
                if (ret)
-                       ath10k_warn("Failed to set beacon interval for vdev %d: %i\n",
+                       ath10k_warn("failed to set beacon interval for vdev %d: %i\n",
                                    arvif->vdev_id, ret);
        }
 
@@ -2793,7 +2898,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
                ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
                                                WMI_BEACON_STAGGERED_MODE);
                if (ret)
-                       ath10k_warn("Failed to set beacon mode for vdev %d: %i\n",
+                       ath10k_warn("failed to set beacon mode for vdev %d: %i\n",
                                    arvif->vdev_id, ret);
        }
 
@@ -2808,7 +2913,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
                ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
                                                arvif->dtim_period);
                if (ret)
-                       ath10k_warn("Failed to set dtim period for vdev %d: %i\n",
+                       ath10k_warn("failed to set dtim period for vdev %d: %i\n",
                                    arvif->vdev_id, ret);
        }
 
@@ -2829,7 +2934,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
                        ret = ath10k_peer_create(ar, arvif->vdev_id,
                                                 info->bssid);
                        if (ret)
-                               ath10k_warn("Failed to add peer %pM for vdev %d when changing bssid: %i\n",
+                               ath10k_warn("failed to add peer %pM for vdev %d when changing bssid: %i\n",
                                            info->bssid, arvif->vdev_id, ret);
 
                        if (vif->type == NL80211_IFTYPE_STATION) {
@@ -2868,20 +2973,13 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
                ath10k_control_beaconing(arvif, info);
 
        if (changed & BSS_CHANGED_ERP_CTS_PROT) {
-               u32 cts_prot;
-               if (info->use_cts_prot)
-                       cts_prot = 1;
-               else
-                       cts_prot = 0;
-
+               arvif->use_cts_prot = info->use_cts_prot;
                ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
-                          arvif->vdev_id, cts_prot);
+                          arvif->vdev_id, info->use_cts_prot);
 
-               vdev_param = ar->wmi.vdev_param->enable_rtscts;
-               ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
-                                               cts_prot);
+               ret = ath10k_recalc_rtscts_prot(arvif);
                if (ret)
-                       ath10k_warn("Failed to set CTS prot for vdev %d: %d\n",
+                       ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n",
                                    arvif->vdev_id, ret);
        }
 
@@ -2900,7 +2998,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
                ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
                                                slottime);
                if (ret)
-                       ath10k_warn("Failed to set erp slot for vdev %d: %i\n",
+                       ath10k_warn("failed to set erp slot for vdev %d: %i\n",
                                    arvif->vdev_id, ret);
        }
 
@@ -2919,7 +3017,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
                ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
                                                preamble);
                if (ret)
-                       ath10k_warn("Failed to set preamble for vdev %d: %i\n",
+                       ath10k_warn("failed to set preamble for vdev %d: %i\n",
                                    arvif->vdev_id, ret);
        }
 
@@ -2990,7 +3088,7 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
 
        ret = ath10k_start_scan(ar, &arg);
        if (ret) {
-               ath10k_warn("could not start hw scan (%d)\n", ret);
+               ath10k_warn("failed to start hw scan: %d\n", ret);
                spin_lock_bh(&ar->data_lock);
                ar->scan.in_progress = false;
                spin_unlock_bh(&ar->data_lock);
@@ -3010,8 +3108,7 @@ static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
        mutex_lock(&ar->conf_mutex);
        ret = ath10k_abort_scan(ar);
        if (ret) {
-               ath10k_warn("couldn't abort scan (%d). forcefully sending scan completion to mac80211\n",
-                           ret);
+               ath10k_warn("failed to abort scan: %d\n", ret);
                ieee80211_scan_completed(hw, 1 /* aborted */);
        }
        mutex_unlock(&ar->conf_mutex);
@@ -3089,7 +3186,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 
        if (!peer) {
                if (cmd == SET_KEY) {
-                       ath10k_warn("cannot install key for non-existent peer %pM\n",
+                       ath10k_warn("failed to install key for non-existent peer %pM\n",
                                    peer_addr);
                        ret = -EOPNOTSUPP;
                        goto exit;
@@ -3112,7 +3209,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 
        ret = ath10k_install_key(arvif, key, cmd, peer_addr);
        if (ret) {
-               ath10k_warn("key installation failed for vdev %i peer %pM: %d\n",
+               ath10k_warn("failed to install key for vdev %i peer %pM: %d\n",
                            arvif->vdev_id, peer_addr, ret);
                goto exit;
        }
@@ -3127,7 +3224,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                peer->keys[key->keyidx] = NULL;
        else if (peer == NULL)
                /* impossible unless FW goes crazy */
-               ath10k_warn("peer %pM disappeared!\n", peer_addr);
+               ath10k_warn("Peer %pM disappeared!\n", peer_addr);
        spin_unlock_bh(&ar->data_lock);
 
 exit:
@@ -3195,6 +3292,16 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
                                    sta->addr, smps, err);
        }
 
+       if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
+               ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
+                          sta->addr);
+
+               err = ath10k_station_assoc(ar, arvif, sta, true);
+               if (err)
+                       ath10k_warn("failed to reassociate station: %pM\n",
+                                   sta->addr);
+       }
+
        mutex_unlock(&ar->conf_mutex);
 }
 
@@ -3236,7 +3343,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
                        max_num_peers = TARGET_NUM_PEERS;
 
                if (ar->num_peers >= max_num_peers) {
-                       ath10k_warn("Number of peers exceeded: peers number %d (max peers %d)\n",
+                       ath10k_warn("number of peers exceeded: peers number %d (max peers %d)\n",
                                    ar->num_peers, max_num_peers);
                        ret = -ENOBUFS;
                        goto exit;
@@ -3248,7 +3355,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
 
                ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
                if (ret)
-                       ath10k_warn("Failed to add peer %pM for vdev %d when adding a new sta: %i\n",
+                       ath10k_warn("failed to add peer %pM for vdev %d when adding a new sta: %i\n",
                                    sta->addr, arvif->vdev_id, ret);
        } else if ((old_state == IEEE80211_STA_NONE &&
                    new_state == IEEE80211_STA_NOTEXIST)) {
@@ -3260,7 +3367,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
                           arvif->vdev_id, sta->addr);
                ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
                if (ret)
-                       ath10k_warn("Failed to delete peer %pM for vdev %d: %i\n",
+                       ath10k_warn("failed to delete peer %pM for vdev %d: %i\n",
                                    sta->addr, arvif->vdev_id, ret);
 
                if (vif->type == NL80211_IFTYPE_STATION)
@@ -3275,9 +3382,9 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
                ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM associated\n",
                           sta->addr);
 
-               ret = ath10k_station_assoc(ar, arvif, sta);
+               ret = ath10k_station_assoc(ar, arvif, sta, false);
                if (ret)
-                       ath10k_warn("Failed to associate station %pM for vdev %i: %i\n",
+                       ath10k_warn("failed to associate station %pM for vdev %i: %i\n",
                                    sta->addr, arvif->vdev_id, ret);
        } else if (old_state == IEEE80211_STA_ASSOC &&
                   new_state == IEEE80211_STA_AUTH &&
@@ -3291,7 +3398,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
 
                ret = ath10k_station_disassoc(ar, arvif, sta);
                if (ret)
-                       ath10k_warn("Failed to disassociate station: %pM vdev %i ret %i\n",
+                       ath10k_warn("failed to disassociate station: %pM vdev %i: %i\n",
                                    sta->addr, arvif->vdev_id, ret);
        }
 exit:
@@ -3339,7 +3446,7 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
                                          WMI_STA_PS_PARAM_UAPSD,
                                          arvif->u.sta.uapsd);
        if (ret) {
-               ath10k_warn("could not set uapsd params %d\n", ret);
+               ath10k_warn("failed to set uapsd params: %d\n", ret);
                goto exit;
        }
 
@@ -3352,7 +3459,7 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
                                          WMI_STA_PS_PARAM_RX_WAKE_POLICY,
                                          value);
        if (ret)
-               ath10k_warn("could not set rx wake param %d\n", ret);
+               ath10k_warn("failed to set rx wake param: %d\n", ret);
 
 exit:
        return ret;
@@ -3402,13 +3509,13 @@ static int ath10k_conf_tx(struct ieee80211_hw *hw,
        /* FIXME: FW accepts wmm params per hw, not per vif */
        ret = ath10k_wmi_pdev_set_wmm_params(ar, &ar->wmm_params);
        if (ret) {
-               ath10k_warn("could not set wmm params %d\n", ret);
+               ath10k_warn("failed to set wmm params: %d\n", ret);
                goto exit;
        }
 
        ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
        if (ret)
-               ath10k_warn("could not set sta uapsd %d\n", ret);
+               ath10k_warn("failed to set sta uapsd: %d\n", ret);
 
 exit:
        mutex_unlock(&ar->conf_mutex);
@@ -3461,7 +3568,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
 
        ret = ath10k_start_scan(ar, &arg);
        if (ret) {
-               ath10k_warn("could not start roc scan (%d)\n", ret);
+               ath10k_warn("failed to start roc scan: %d\n", ret);
                spin_lock_bh(&ar->data_lock);
                ar->scan.in_progress = false;
                spin_unlock_bh(&ar->data_lock);
@@ -3470,7 +3577,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
 
        ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ);
        if (ret == 0) {
-               ath10k_warn("could not switch to channel for roc scan\n");
+               ath10k_warn("failed to switch to channel for roc scan\n");
                ath10k_abort_scan(ar);
                ret = -ETIMEDOUT;
                goto exit;
@@ -3511,7 +3618,7 @@ static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
 
                ret = ath10k_mac_set_rts(arvif, value);
                if (ret) {
-                       ath10k_warn("could not set rts threshold for vdev %d (%d)\n",
+                       ath10k_warn("failed to set rts threshold for vdev %d: %d\n",
                                    arvif->vdev_id, ret);
                        break;
                }
@@ -3534,7 +3641,7 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
 
                ret = ath10k_mac_set_rts(arvif, value);
                if (ret) {
-                       ath10k_warn("could not set fragmentation threshold for vdev %d (%d)\n",
+                       ath10k_warn("failed to set fragmentation threshold for vdev %d: %d\n",
                                    arvif->vdev_id, ret);
                        break;
                }
@@ -3544,7 +3651,8 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
        return ret;
 }
 
-static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                        u32 queues, bool drop)
 {
        struct ath10k *ar = hw->priv;
        bool skip;
@@ -3573,7 +3681,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
                }), ATH10K_FLUSH_TIMEOUT_HZ);
 
        if (ret <= 0 || skip)
-               ath10k_warn("tx not flushed (skip %i ar-state %i): %i\n",
+               ath10k_warn("failed to flush transmit queue (skip %i ar-state %i): %i\n",
                            skip, ar->state, ret);
 
 skip:
@@ -3608,7 +3716,7 @@ static int ath10k_suspend(struct ieee80211_hw *hw,
 
        ret = ath10k_hif_suspend(ar);
        if (ret) {
-               ath10k_warn("could not suspend hif (%d)\n", ret);
+               ath10k_warn("failed to suspend hif: %d\n", ret);
                goto resume;
        }
 
@@ -3617,7 +3725,7 @@ static int ath10k_suspend(struct ieee80211_hw *hw,
 resume:
        ret = ath10k_wmi_pdev_resume_target(ar);
        if (ret)
-               ath10k_warn("could not resume target (%d)\n", ret);
+               ath10k_warn("failed to resume target: %d\n", ret);
 
        ret = 1;
 exit:
@@ -3634,14 +3742,14 @@ static int ath10k_resume(struct ieee80211_hw *hw)
 
        ret = ath10k_hif_resume(ar);
        if (ret) {
-               ath10k_warn("could not resume hif (%d)\n", ret);
+               ath10k_warn("failed to resume hif: %d\n", ret);
                ret = 1;
                goto exit;
        }
 
        ret = ath10k_wmi_pdev_resume_target(ar);
        if (ret) {
-               ath10k_warn("could not resume target (%d)\n", ret);
+               ath10k_warn("failed to resume target: %d\n", ret);
                ret = 1;
                goto exit;
        }
@@ -3964,7 +4072,7 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
        ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
                                        vdev_param, fixed_rate);
        if (ret) {
-               ath10k_warn("Could not set fixed_rate param 0x%02x: %d\n",
+               ath10k_warn("failed to set fixed rate param 0x%02x: %d\n",
                            fixed_rate, ret);
                ret = -EINVAL;
                goto exit;
@@ -3977,7 +4085,7 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
                                        vdev_param, fixed_nss);
 
        if (ret) {
-               ath10k_warn("Could not set fixed_nss param %d: %d\n",
+               ath10k_warn("failed to set fixed nss param %d: %d\n",
                            fixed_nss, ret);
                ret = -EINVAL;
                goto exit;
@@ -3990,7 +4098,7 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
                                        force_sgi);
 
        if (ret) {
-               ath10k_warn("Could not set sgi param %d: %d\n",
+               ath10k_warn("failed to set sgi param %d: %d\n",
                            force_sgi, ret);
                ret = -EINVAL;
                goto exit;
@@ -4026,7 +4134,7 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
        }
 
        if (fixed_rate == WMI_FIXED_RATE_NONE && force_sgi) {
-               ath10k_warn("Could not force SGI usage for default rate settings\n");
+               ath10k_warn("failed to force SGI usage for default rate settings\n");
                return -EINVAL;
        }
 
@@ -4072,8 +4180,8 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
                        bw = WMI_PEER_CHWIDTH_80MHZ;
                        break;
                case IEEE80211_STA_RX_BW_160:
-                       ath10k_warn("mac sta rc update for %pM: invalid bw %d\n",
-                                   sta->addr, sta->bandwidth);
+                       ath10k_warn("Invalid bandwith %d in rc update for %pM\n",
+                                   sta->bandwidth, sta->addr);
                        bw = WMI_PEER_CHWIDTH_20MHZ;
                        break;
                }
@@ -4099,8 +4207,8 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
                        smps = WMI_PEER_SMPS_DYNAMIC;
                        break;
                case IEEE80211_SMPS_NUM_MODES:
-                       ath10k_warn("mac sta rc update for %pM: invalid smps: %d\n",
-                                   sta->addr, sta->smps_mode);
+                       ath10k_warn("Invalid smps %d in sta rc update for %pM\n",
+                                   sta->smps_mode, sta->addr);
                        smps = WMI_PEER_SMPS_PS_NONE;
                        break;
                }
@@ -4108,15 +4216,6 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
                arsta->smps = smps;
        }
 
-       if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
-               /* FIXME: Not implemented. Probably the only way to do it would
-                * be to re-assoc the peer. */
-               changed &= ~IEEE80211_RC_SUPP_RATES_CHANGED;
-               ath10k_dbg(ATH10K_DBG_MAC,
-                          "mac sta rc update for %pM: changing supported rates not implemented\n",
-                          sta->addr);
-       }
-
        arsta->changed |= changed;
 
        spin_unlock_bh(&ar->data_lock);
@@ -4516,7 +4615,6 @@ int ath10k_mac_register(struct ath10k *ar)
                        IEEE80211_HW_REPORTS_TX_ACK_STATUS |
                        IEEE80211_HW_HAS_RATE_CONTROL |
                        IEEE80211_HW_SUPPORTS_STATIC_SMPS |
-                       IEEE80211_HW_WANT_MONITOR_VIF |
                        IEEE80211_HW_AP_LINK_PS |
                        IEEE80211_HW_SPECTRUM_MGMT;
 
@@ -4570,19 +4668,19 @@ int ath10k_mac_register(struct ath10k *ar)
                                                             NL80211_DFS_UNSET);
 
                if (!ar->dfs_detector)
-                       ath10k_warn("dfs pattern detector init failed\n");
+                       ath10k_warn("failed to initialise DFS pattern detector\n");
        }
 
        ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
                            ath10k_reg_notifier);
        if (ret) {
-               ath10k_err("Regulatory initialization failed: %i\n", ret);
+               ath10k_err("failed to initialise regulatory: %i\n", ret);
                goto err_free;
        }
 
        ret = ieee80211_register_hw(ar->hw);
        if (ret) {
-               ath10k_err("ieee80211 registration failed: %d\n", ret);
+               ath10k_err("failed to register ieee80211: %d\n", ret);
                goto err_free;
        }
 
index 9d242d801d9d354f772b74257826427f0a598180..66b1f3017f2b4a616da98cf61010987b9a7beb72 100644 (file)
@@ -39,15 +39,27 @@ enum ath10k_pci_irq_mode {
        ATH10K_PCI_IRQ_MSI = 2,
 };
 
-static unsigned int ath10k_target_ps;
+enum ath10k_pci_reset_mode {
+       ATH10K_PCI_RESET_AUTO = 0,
+       ATH10K_PCI_RESET_WARM_ONLY = 1,
+};
+
+static unsigned int ath10k_pci_target_ps;
 static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
+static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
 
-module_param(ath10k_target_ps, uint, 0644);
-MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
+module_param_named(target_ps, ath10k_pci_target_ps, uint, 0644);
+MODULE_PARM_DESC(target_ps, "Enable ath10k Target (SoC) PS option");
 
 module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
 MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
 
+module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
+MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
+
+/* how long wait to wait for target to initialise, in ms */
+#define ATH10K_PCI_TARGET_WAIT 3000
+
 #define QCA988X_2_0_DEVICE_ID  (0x003c)
 
 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
@@ -346,9 +358,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
         *   2) Buffer in DMA-able space
         */
        orig_nbytes = nbytes;
-       data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
-                                                        orig_nbytes,
-                                                        &ce_data_base);
+       data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
+                                                      orig_nbytes,
+                                                      &ce_data_base,
+                                                      GFP_ATOMIC);
 
        if (!data_buf) {
                ret = -ENOMEM;
@@ -442,12 +455,12 @@ done:
                                __le32_to_cpu(((__le32 *)data_buf)[i]);
                }
        } else
-               ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
-                          __func__, address);
+               ath10k_warn("failed to read diag value at 0x%x: %d\n",
+                           address, ret);
 
        if (data_buf)
-               pci_free_consistent(ar_pci->pdev, orig_nbytes,
-                                   data_buf, ce_data_base);
+               dma_free_coherent(ar->dev, orig_nbytes, data_buf,
+                                 ce_data_base);
 
        return ret;
 }
@@ -490,9 +503,10 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
         *   2) Buffer in DMA-able space
         */
        orig_nbytes = nbytes;
-       data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
-                                                        orig_nbytes,
-                                                        &ce_data_base);
+       data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
+                                                      orig_nbytes,
+                                                      &ce_data_base,
+                                                      GFP_ATOMIC);
        if (!data_buf) {
                ret = -ENOMEM;
                goto done;
@@ -588,13 +602,13 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
 
 done:
        if (data_buf) {
-               pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
-                                   ce_data_base);
+               dma_free_coherent(ar->dev, orig_nbytes, data_buf,
+                                 ce_data_base);
        }
 
        if (ret != 0)
-               ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
-                          address);
+               ath10k_warn("failed to write diag value at 0x%x: %d\n",
+                           address, ret);
 
        return ret;
 }
@@ -803,6 +817,9 @@ unlock:
 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+       ath10k_dbg(ATH10K_DBG_PCI, "pci hif get free queue number\n");
+
        return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
 }
 
@@ -854,6 +871,8 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)
 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
                                               int force)
 {
+       ath10k_dbg(ATH10K_DBG_PCI, "pci hif send complete check\n");
+
        if (!force) {
                int resources;
                /*
@@ -880,7 +899,7 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 
-       ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+       ath10k_dbg(ATH10K_DBG_PCI, "pci hif set callbacks\n");
 
        memcpy(&ar_pci->msg_callbacks_current, callbacks,
               sizeof(ar_pci->msg_callbacks_current));
@@ -938,6 +957,8 @@ static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
 {
        int ret = 0;
 
+       ath10k_dbg(ATH10K_DBG_PCI, "pci hif map service\n");
+
        /* polling for received messages not supported */
        *dl_is_polled = 0;
 
@@ -997,6 +1018,8 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
 {
        int ul_is_polled, dl_is_polled;
 
+       ath10k_dbg(ATH10K_DBG_PCI, "pci hif get default pipe\n");
+
        (void)ath10k_pci_hif_map_service_to_pipe(ar,
                                                 ATH10K_HTC_SVC_ID_RSVD_CTRL,
                                                 ul_pipe,
@@ -1098,6 +1121,8 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        int ret, ret_early;
 
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot hif start\n");
+
        ath10k_pci_free_early_irq(ar);
        ath10k_pci_kill_tasklet(ar);
 
@@ -1233,18 +1258,10 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
 
 static void ath10k_pci_ce_deinit(struct ath10k *ar)
 {
-       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct ath10k_pci_pipe *pipe_info;
-       int pipe_num;
+       int i;
 
-       for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
-               pipe_info = &ar_pci->pipe_info[pipe_num];
-               if (pipe_info->ce_hdl) {
-                       ath10k_ce_deinit(pipe_info->ce_hdl);
-                       pipe_info->ce_hdl = NULL;
-                       pipe_info->buf_sz = 0;
-               }
-       }
+       for (i = 0; i < CE_COUNT; i++)
+               ath10k_ce_deinit_pipe(ar, i);
 }
 
 static void ath10k_pci_hif_stop(struct ath10k *ar)
@@ -1252,7 +1269,7 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        int ret;
 
-       ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot hif stop\n");
 
        ret = ath10k_ce_disable_interrupts(ar);
        if (ret)
@@ -1697,30 +1714,49 @@ static int ath10k_pci_init_config(struct ath10k *ar)
        return 0;
 }
 
+static int ath10k_pci_alloc_ce(struct ath10k *ar)
+{
+       int i, ret;
+
+       for (i = 0; i < CE_COUNT; i++) {
+               ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
+               if (ret) {
+                       ath10k_err("failed to allocate copy engine pipe %d: %d\n",
+                                  i, ret);
+                       return ret;
+               }
+       }
 
+       return 0;
+}
+
+static void ath10k_pci_free_ce(struct ath10k *ar)
+{
+       int i;
+
+       for (i = 0; i < CE_COUNT; i++)
+               ath10k_ce_free_pipe(ar, i);
+}
 
 static int ath10k_pci_ce_init(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        struct ath10k_pci_pipe *pipe_info;
        const struct ce_attr *attr;
-       int pipe_num;
+       int pipe_num, ret;
 
        for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
                pipe_info = &ar_pci->pipe_info[pipe_num];
+               pipe_info->ce_hdl = &ar_pci->ce_states[pipe_num];
                pipe_info->pipe_num = pipe_num;
                pipe_info->hif_ce_state = ar;
                attr = &host_ce_config_wlan[pipe_num];
 
-               pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
-               if (pipe_info->ce_hdl == NULL) {
-                       ath10k_err("failed to initialize CE for pipe: %d\n",
-                                  pipe_num);
-
-                       /* It is safe to call it here. It checks if ce_hdl is
-                        * valid for each pipe */
-                       ath10k_pci_ce_deinit(ar);
-                       return -1;
+               ret = ath10k_ce_init_pipe(ar, pipe_num, attr);
+               if (ret) {
+                       ath10k_err("failed to initialize copy engine pipe %d: %d\n",
+                                  pipe_num, ret);
+                       return ret;
                }
 
                if (pipe_num == CE_COUNT - 1) {
@@ -1741,16 +1777,15 @@ static int ath10k_pci_ce_init(struct ath10k *ar)
 static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       u32 fw_indicator_address, fw_indicator;
+       u32 fw_indicator;
 
        ath10k_pci_wake(ar);
 
-       fw_indicator_address = ar_pci->fw_indicator_address;
-       fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
+       fw_indicator = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
 
        if (fw_indicator & FW_IND_EVENT_PENDING) {
                /* ACK: clear Target-side pending event */
-               ath10k_pci_write32(ar, fw_indicator_address,
+               ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
                                   fw_indicator & ~FW_IND_EVENT_PENDING);
 
                if (ar_pci->started) {
@@ -1769,11 +1804,10 @@ static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
 
 static int ath10k_pci_warm_reset(struct ath10k *ar)
 {
-       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        int ret = 0;
        u32 val;
 
-       ath10k_dbg(ATH10K_DBG_BOOT, "boot performing warm chip reset\n");
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset\n");
 
        ret = ath10k_do_pci_wake(ar);
        if (ret) {
@@ -1801,7 +1835,7 @@ static int ath10k_pci_warm_reset(struct ath10k *ar)
        msleep(100);
 
        /* clear fw indicator */
-       ath10k_pci_write32(ar, ar_pci->fw_indicator_address, 0);
+       ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
 
        /* clear target LF timer interrupts */
        val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
@@ -1934,7 +1968,9 @@ static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
                irq_mode = "legacy";
 
        if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
-               ath10k_info("pci irq %s\n", irq_mode);
+               ath10k_info("pci irq %s irq_mode %d reset_mode %d\n",
+                           irq_mode, ath10k_pci_irq_mode,
+                           ath10k_pci_reset_mode);
 
        return 0;
 
@@ -1956,6 +1992,8 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
 {
        int ret;
 
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power up\n");
+
        /*
         * Hardware CUS232 version 2 has some issues with cold reset and the
         * preferred (and safer) way to perform a device reset is through a
@@ -1966,9 +2004,14 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
         */
        ret = __ath10k_pci_hif_power_up(ar, false);
        if (ret) {
-               ath10k_warn("failed to power up target using warm reset (%d), trying cold reset\n",
+               ath10k_warn("failed to power up target using warm reset: %d\n",
                            ret);
 
+               if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
+                       return ret;
+
+               ath10k_warn("trying cold reset\n");
+
                ret = __ath10k_pci_hif_power_up(ar, true);
                if (ret) {
                        ath10k_err("failed to power up target using cold reset too (%d)\n",
@@ -1984,12 +2027,14 @@ static void ath10k_pci_hif_power_down(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power down\n");
+
        ath10k_pci_free_early_irq(ar);
        ath10k_pci_kill_tasklet(ar);
        ath10k_pci_deinit_irq(ar);
+       ath10k_pci_ce_deinit(ar);
        ath10k_pci_warm_reset(ar);
 
-       ath10k_pci_ce_deinit(ar);
        if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
                ath10k_do_pci_sleep(ar);
 }
@@ -2137,7 +2182,6 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
 static void ath10k_pci_early_irq_tasklet(unsigned long data)
 {
        struct ath10k *ar = (struct ath10k *)data;
-       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        u32 fw_ind;
        int ret;
 
@@ -2148,9 +2192,9 @@ static void ath10k_pci_early_irq_tasklet(unsigned long data)
                return;
        }
 
-       fw_ind = ath10k_pci_read32(ar, ar_pci->fw_indicator_address);
+       fw_ind = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
        if (fw_ind & FW_IND_EVENT_PENDING) {
-               ath10k_pci_write32(ar, ar_pci->fw_indicator_address,
+               ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
                                   fw_ind & ~FW_IND_EVENT_PENDING);
 
                /* Some structures are unavailable during early boot or at
@@ -2385,33 +2429,66 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar)
 static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       int wait_limit = 300; /* 3 sec */
+       unsigned long timeout;
        int ret;
+       u32 val;
+
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
 
        ret = ath10k_pci_wake(ar);
        if (ret) {
-               ath10k_err("failed to wake up target: %d\n", ret);
+               ath10k_err("failed to wake up target for init: %d\n", ret);
                return ret;
        }
 
-       while (wait_limit-- &&
-              !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
-                FW_IND_INITIALIZED)) {
+       timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
+
+       do {
+               val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
+
+               ath10k_dbg(ATH10K_DBG_BOOT, "boot target indicator %x\n", val);
+
+               /* target should never return this */
+               if (val == 0xffffffff)
+                       continue;
+
+               /* the device has crashed so don't bother trying anymore */
+               if (val & FW_IND_EVENT_PENDING)
+                       break;
+
+               if (val & FW_IND_INITIALIZED)
+                       break;
+
                if (ar_pci->num_msi_intrs == 0)
                        /* Fix potential race by repeating CORE_BASE writes */
-                       iowrite32(PCIE_INTR_FIRMWARE_MASK |
-                                 PCIE_INTR_CE_MASK_ALL,
-                                 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
-                                                PCIE_INTR_ENABLE_ADDRESS));
+                       ath10k_pci_soc_write32(ar, PCIE_INTR_ENABLE_ADDRESS,
+                                              PCIE_INTR_FIRMWARE_MASK |
+                                              PCIE_INTR_CE_MASK_ALL);
+
                mdelay(10);
-       }
+       } while (time_before(jiffies, timeout));
 
-       if (wait_limit < 0) {
-               ath10k_err("target stalled\n");
+       if (val == 0xffffffff) {
+               ath10k_err("failed to read device register, device is gone\n");
                ret = -EIO;
                goto out;
        }
 
+       if (val & FW_IND_EVENT_PENDING) {
+               ath10k_warn("device has crashed during init\n");
+               ret = -ECOMM;
+               goto out;
+       }
+
+       if (!(val & FW_IND_INITIALIZED)) {
+               ath10k_err("failed to receive initialized event from target: %08x\n",
+                          val);
+               ret = -ETIMEDOUT;
+               goto out;
+       }
+
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot target initialised\n");
+
 out:
        ath10k_pci_sleep(ar);
        return ret;
@@ -2422,6 +2499,8 @@ static int ath10k_pci_cold_reset(struct ath10k *ar)
        int i, ret;
        u32 val;
 
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset\n");
+
        ret = ath10k_do_pci_wake(ar);
        if (ret) {
                ath10k_err("failed to wake up target: %d\n",
@@ -2453,6 +2532,9 @@ static int ath10k_pci_cold_reset(struct ath10k *ar)
        }
 
        ath10k_do_pci_sleep(ar);
+
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset complete\n");
+
        return 0;
 }
 
@@ -2484,7 +2566,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
        struct ath10k_pci *ar_pci;
        u32 lcr_val, chip_id;
 
-       ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+       ath10k_dbg(ATH10K_DBG_PCI, "pci probe\n");
 
        ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
        if (ar_pci == NULL)
@@ -2503,7 +2585,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
                goto err_ar_pci;
        }
 
-       if (ath10k_target_ps)
+       if (ath10k_pci_target_ps)
                set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
 
        ath10k_pci_dump_features(ar_pci);
@@ -2516,7 +2598,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
        }
 
        ar_pci->ar = ar;
-       ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
        atomic_set(&ar_pci->keep_awake_count, 0);
 
        pci_set_drvdata(pdev, ar);
@@ -2594,16 +2675,24 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
 
        ath10k_do_pci_sleep(ar);
 
+       ret = ath10k_pci_alloc_ce(ar);
+       if (ret) {
+               ath10k_err("failed to allocate copy engine pipes: %d\n", ret);
+               goto err_iomap;
+       }
+
        ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
 
        ret = ath10k_core_register(ar, chip_id);
        if (ret) {
                ath10k_err("failed to register driver core: %d\n", ret);
-               goto err_iomap;
+               goto err_free_ce;
        }
 
        return 0;
 
+err_free_ce:
+       ath10k_pci_free_ce(ar);
 err_iomap:
        pci_iounmap(pdev, mem);
 err_master:
@@ -2626,7 +2715,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
        struct ath10k *ar = pci_get_drvdata(pdev);
        struct ath10k_pci *ar_pci;
 
-       ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+       ath10k_dbg(ATH10K_DBG_PCI, "pci remove\n");
 
        if (!ar)
                return;
@@ -2639,6 +2728,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
        tasklet_kill(&ar_pci->msi_fw_err);
 
        ath10k_core_unregister(ar);
+       ath10k_pci_free_ce(ar);
 
        pci_iounmap(pdev, ar_pci->mem);
        pci_release_region(pdev, BAR_NUM);
@@ -2680,6 +2770,5 @@ module_exit(ath10k_pci_exit);
 MODULE_AUTHOR("Qualcomm Atheros");
 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
 MODULE_LICENSE("Dual BSD/GPL");
-MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
-MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_2_FILE);
 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
index b43fdb4f731973544e1a55f700c779e4191d76bb..dfdebb4157aa177acde13ea1149b5a7490c5593e 100644 (file)
@@ -189,9 +189,6 @@ struct ath10k_pci {
 
        struct ath10k_hif_cb msg_callbacks_current;
 
-       /* Target address used to signal a pending firmware event */
-       u32 fw_indicator_address;
-
        /* Copy Engine used for Diagnostic Accesses */
        struct ath10k_ce_pipe *ce_diag;
 
index 0541dd939ce9d8be7dc7e195ef952526abab0255..82669a77e553b8f6902c7dc03f99e24b42a3dea6 100644 (file)
@@ -100,189 +100,6 @@ exit:
                wake_up(&htt->empty_tx_wq);
 }
 
-static const u8 rx_legacy_rate_idx[] = {
-       3,      /* 0x00  - 11Mbps  */
-       2,      /* 0x01  - 5.5Mbps */
-       1,      /* 0x02  - 2Mbps   */
-       0,      /* 0x03  - 1Mbps   */
-       3,      /* 0x04  - 11Mbps  */
-       2,      /* 0x05  - 5.5Mbps */
-       1,      /* 0x06  - 2Mbps   */
-       0,      /* 0x07  - 1Mbps   */
-       10,     /* 0x08  - 48Mbps  */
-       8,      /* 0x09  - 24Mbps  */
-       6,      /* 0x0A  - 12Mbps  */
-       4,      /* 0x0B  - 6Mbps   */
-       11,     /* 0x0C  - 54Mbps  */
-       9,      /* 0x0D  - 36Mbps  */
-       7,      /* 0x0E  - 18Mbps  */
-       5,      /* 0x0F  - 9Mbps   */
-};
-
-static void process_rx_rates(struct ath10k *ar, struct htt_rx_info *info,
-                            enum ieee80211_band band,
-                            struct ieee80211_rx_status *status)
-{
-       u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
-       u8 info0 = info->rate.info0;
-       u32 info1 = info->rate.info1;
-       u32 info2 = info->rate.info2;
-       u8 preamble = 0;
-
-       /* Check if valid fields */
-       if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
-               return;
-
-       preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
-
-       switch (preamble) {
-       case HTT_RX_LEGACY:
-               cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
-               rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
-               rate_idx = 0;
-
-               if (rate < 0x08 || rate > 0x0F)
-                       break;
-
-               switch (band) {
-               case IEEE80211_BAND_2GHZ:
-                       if (cck)
-                               rate &= ~BIT(3);
-                       rate_idx = rx_legacy_rate_idx[rate];
-                       break;
-               case IEEE80211_BAND_5GHZ:
-                       rate_idx = rx_legacy_rate_idx[rate];
-                       /* We are using same rate table registering
-                          HW - ath10k_rates[]. In case of 5GHz skip
-                          CCK rates, so -4 here */
-                       rate_idx -= 4;
-                       break;
-               default:
-                       break;
-               }
-
-               status->rate_idx = rate_idx;
-               break;
-       case HTT_RX_HT:
-       case HTT_RX_HT_WITH_TXBF:
-               /* HT-SIG - Table 20-11 in info1 and info2 */
-               mcs = info1 & 0x1F;
-               nss = mcs >> 3;
-               bw = (info1 >> 7) & 1;
-               sgi = (info2 >> 7) & 1;
-
-               status->rate_idx = mcs;
-               status->flag |= RX_FLAG_HT;
-               if (sgi)
-                       status->flag |= RX_FLAG_SHORT_GI;
-               if (bw)
-                       status->flag |= RX_FLAG_40MHZ;
-               break;
-       case HTT_RX_VHT:
-       case HTT_RX_VHT_WITH_TXBF:
-               /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
-                  TODO check this */
-               mcs = (info2 >> 4) & 0x0F;
-               nss = ((info1 >> 10) & 0x07) + 1;
-               bw = info1 & 3;
-               sgi = info2 & 1;
-
-               status->rate_idx = mcs;
-               status->vht_nss = nss;
-
-               if (sgi)
-                       status->flag |= RX_FLAG_SHORT_GI;
-
-               switch (bw) {
-               /* 20MHZ */
-               case 0:
-                       break;
-               /* 40MHZ */
-               case 1:
-                       status->flag |= RX_FLAG_40MHZ;
-                       break;
-               /* 80MHZ */
-               case 2:
-                       status->vht_flag |= RX_VHT_FLAG_80MHZ;
-               }
-
-               status->flag |= RX_FLAG_VHT;
-               break;
-       default:
-               break;
-       }
-}
-
-void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
-{
-       struct ieee80211_rx_status *status;
-       struct ieee80211_channel *ch;
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)info->skb->data;
-
-       status = IEEE80211_SKB_RXCB(info->skb);
-       memset(status, 0, sizeof(*status));
-
-       if (info->encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) {
-               status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED |
-                               RX_FLAG_MMIC_STRIPPED;
-               hdr->frame_control = __cpu_to_le16(
-                               __le16_to_cpu(hdr->frame_control) &
-                               ~IEEE80211_FCTL_PROTECTED);
-       }
-
-       if (info->mic_err)
-               status->flag |= RX_FLAG_MMIC_ERROR;
-
-       if (info->fcs_err)
-               status->flag |= RX_FLAG_FAILED_FCS_CRC;
-
-       if (info->amsdu_more)
-               status->flag |= RX_FLAG_AMSDU_MORE;
-
-       status->signal = info->signal;
-
-       spin_lock_bh(&ar->data_lock);
-       ch = ar->scan_channel;
-       if (!ch)
-               ch = ar->rx_channel;
-       spin_unlock_bh(&ar->data_lock);
-
-       if (!ch) {
-               ath10k_warn("no channel configured; ignoring frame!\n");
-               dev_kfree_skb_any(info->skb);
-               return;
-       }
-
-       process_rx_rates(ar, info, ch->band, status);
-       status->band = ch->band;
-       status->freq = ch->center_freq;
-
-       if (info->rate.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
-               /* TSF available only in 32-bit */
-               status->mactime = info->tsf & 0xffffffff;
-               status->flag |= RX_FLAG_MACTIME_END;
-       }
-
-       ath10k_dbg(ATH10K_DBG_DATA,
-                  "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i\n",
-                  info->skb,
-                  info->skb->len,
-                  status->flag == 0 ? "legacy" : "",
-                  status->flag & RX_FLAG_HT ? "ht" : "",
-                  status->flag & RX_FLAG_VHT ? "vht" : "",
-                  status->flag & RX_FLAG_40MHZ ? "40" : "",
-                  status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
-                  status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
-                  status->rate_idx,
-                  status->vht_nss,
-                  status->freq,
-                  status->band, status->flag, info->fcs_err);
-       ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
-                       info->skb->data, info->skb->len);
-
-       ieee80211_rx(ar->hw, info->skb);
-}
-
 struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
                                     const u8 *addr)
 {
index 356dc9c04c9e3981feaeb04d8df4e8f45fc36f7f..aee3e20058f814f2e7a774005d84d77c684442f4 100644 (file)
@@ -21,7 +21,6 @@
 
 void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
                          const struct htt_tx_done *tx_done);
-void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info);
 
 struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
                                     const u8 *addr);
index cb1f7b5bcf4cdefa774411e09fa39b80728d82e4..72cc4f20d102c39bfc7d7c750b4aeb62465cc981 100644 (file)
@@ -1362,13 +1362,10 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
        struct sk_buff *bcn;
        int ret, vdev_id = 0;
 
-       ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
-
        ev = (struct wmi_host_swba_event *)skb->data;
        map = __le32_to_cpu(ev->vdev_map);
 
-       ath10k_dbg(ATH10K_DBG_MGMT, "host swba:\n"
-                  "-vdev map 0x%x\n",
+       ath10k_dbg(ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
                   ev->vdev_map);
 
        for (; map; map >>= 1, vdev_id++) {
@@ -1385,12 +1382,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
                bcn_info = &ev->bcn_info[i];
 
                ath10k_dbg(ATH10K_DBG_MGMT,
-                          "-bcn_info[%d]:\n"
-                          "--tim_len %d\n"
-                          "--tim_mcast %d\n"
-                          "--tim_changed %d\n"
-                          "--tim_num_ps_pending %d\n"
-                          "--tim_bitmap 0x%08x%08x%08x%08x\n",
+                          "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
                           i,
                           __le32_to_cpu(bcn_info->tim_info.tim_len),
                           __le32_to_cpu(bcn_info->tim_info.tim_mcast),
@@ -1439,6 +1431,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
                                         ATH10K_SKB_CB(arvif->beacon)->paddr,
                                         arvif->beacon->len, DMA_TO_DEVICE);
                        dev_kfree_skb_any(arvif->beacon);
+                       arvif->beacon = NULL;
                }
 
                ATH10K_SKB_CB(bcn)->paddr = dma_map_single(arvif->ar->dev,
@@ -1448,6 +1441,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
                                        ATH10K_SKB_CB(bcn)->paddr);
                if (ret) {
                        ath10k_warn("failed to map beacon: %d\n", ret);
+                       dev_kfree_skb_any(bcn);
                        goto skip;
                }
 
@@ -2393,8 +2387,9 @@ int ath10k_wmi_connect_htc_service(struct ath10k *ar)
        return 0;
 }
 
-int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
-                                 u16 rd5g, u16 ctl2g, u16 ctl5g)
+static int ath10k_wmi_main_pdev_set_regdomain(struct ath10k *ar, u16 rd,
+                                             u16 rd2g, u16 rd5g, u16 ctl2g,
+                                             u16 ctl5g)
 {
        struct wmi_pdev_set_regdomain_cmd *cmd;
        struct sk_buff *skb;
@@ -2418,6 +2413,46 @@ int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
                                   ar->wmi.cmd->pdev_set_regdomain_cmdid);
 }
 
+static int ath10k_wmi_10x_pdev_set_regdomain(struct ath10k *ar, u16 rd,
+                                            u16 rd2g, u16 rd5g,
+                                            u16 ctl2g, u16 ctl5g,
+                                            enum wmi_dfs_region dfs_reg)
+{
+       struct wmi_pdev_set_regdomain_cmd_10x *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data;
+       cmd->reg_domain = __cpu_to_le32(rd);
+       cmd->reg_domain_2G = __cpu_to_le32(rd2g);
+       cmd->reg_domain_5G = __cpu_to_le32(rd5g);
+       cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
+       cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
+       cmd->dfs_domain = __cpu_to_le32(dfs_reg);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
+                  rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg);
+
+       return ath10k_wmi_cmd_send(ar, skb,
+                                  ar->wmi.cmd->pdev_set_regdomain_cmdid);
+}
+
+int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
+                                 u16 rd5g, u16 ctl2g, u16 ctl5g,
+                                 enum wmi_dfs_region dfs_reg)
+{
+       if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+               return ath10k_wmi_10x_pdev_set_regdomain(ar, rd, rd2g, rd5g,
+                                                       ctl2g, ctl5g, dfs_reg);
+       else
+               return ath10k_wmi_main_pdev_set_regdomain(ar, rd, rd2g, rd5g,
+                                                        ctl2g, ctl5g);
+}
+
 int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
                                const struct wmi_channel_arg *arg)
 {
@@ -3456,8 +3491,9 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
                __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
 
        ath10k_dbg(ATH10K_DBG_WMI,
-                  "wmi peer assoc vdev %d addr %pM\n",
-                  arg->vdev_id, arg->addr);
+                  "wmi peer assoc vdev %d addr %pM (%s)\n",
+                  arg->vdev_id, arg->addr,
+                  arg->peer_reassoc ? "reassociate" : "new");
        return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
 }
 
index 4fcc96aa9513b89a45e11fd9c2d8da79414204c9..ae838221af65d46efd817f18f5bb872034e1c5c5 100644 (file)
@@ -198,16 +198,6 @@ struct wmi_mac_addr {
        } __packed;
 } __packed;
 
-/* macro to convert MAC address from WMI word format to char array */
-#define WMI_MAC_ADDR_TO_CHAR_ARRAY(pwmi_mac_addr, c_macaddr) do { \
-       (c_macaddr)[0] =  ((pwmi_mac_addr)->word0) & 0xff; \
-       (c_macaddr)[1] = (((pwmi_mac_addr)->word0) >> 8) & 0xff; \
-       (c_macaddr)[2] = (((pwmi_mac_addr)->word0) >> 16) & 0xff; \
-       (c_macaddr)[3] = (((pwmi_mac_addr)->word0) >> 24) & 0xff; \
-       (c_macaddr)[4] =  ((pwmi_mac_addr)->word1) & 0xff; \
-       (c_macaddr)[5] = (((pwmi_mac_addr)->word1) >> 8) & 0xff; \
-       } while (0)
-
 struct wmi_cmd_map {
        u32 init_cmdid;
        u32 start_scan_cmdid;
@@ -2185,6 +2175,31 @@ struct wmi_pdev_set_regdomain_cmd {
        __le32 conformance_test_limit_5G;
 } __packed;
 
+enum wmi_dfs_region {
+       /* Uninitialized dfs domain */
+       WMI_UNINIT_DFS_DOMAIN = 0,
+
+       /* FCC3 dfs domain */
+       WMI_FCC_DFS_DOMAIN = 1,
+
+       /* ETSI dfs domain */
+       WMI_ETSI_DFS_DOMAIN = 2,
+
+       /*Japan dfs domain */
+       WMI_MKK4_DFS_DOMAIN = 3,
+};
+
+struct wmi_pdev_set_regdomain_cmd_10x {
+       __le32 reg_domain;
+       __le32 reg_domain_2G;
+       __le32 reg_domain_5G;
+       __le32 conformance_test_limit_2G;
+       __le32 conformance_test_limit_5G;
+
+       /* dfs domain from wmi_dfs_region */
+       __le32 dfs_domain;
+} __packed;
+
 /* Command to set/unset chip in quiet mode */
 struct wmi_pdev_set_quiet_cmd {
        /* period in TUs */
@@ -2210,6 +2225,19 @@ enum ath10k_protmode {
        ATH10K_PROT_RTSCTS   = 2,    /* RTS-CTS */
 };
 
+enum wmi_rtscts_profile {
+       WMI_RTSCTS_FOR_NO_RATESERIES = 0,
+       WMI_RTSCTS_FOR_SECOND_RATESERIES,
+       WMI_RTSCTS_ACROSS_SW_RETRIES
+};
+
+#define WMI_RTSCTS_ENABLED             1
+#define WMI_RTSCTS_SET_MASK            0x0f
+#define WMI_RTSCTS_SET_LSB             0
+
+#define WMI_RTSCTS_PROFILE_MASK                0xf0
+#define WMI_RTSCTS_PROFILE_LSB         4
+
 enum wmi_beacon_gen_mode {
        WMI_BEACON_STAGGERED_MODE = 0,
        WMI_BEACON_BURST_MODE = 1
@@ -2682,6 +2710,9 @@ struct wal_dbg_tx_stats {
        /* wal pdev resets  */
        __le32 pdev_resets;
 
+       /* frames dropped due to non-availability of stateless TIDs */
+       __le32 stateless_tid_alloc_failure;
+
        __le32 phy_underrun;
 
        /* MPDU is more than txop limit */
@@ -2738,13 +2769,21 @@ enum wmi_stats_id {
        WMI_REQUEST_AP_STAT     = 0x02
 };
 
+struct wlan_inst_rssi_args {
+       __le16 cfg_retry_count;
+       __le16 retry_count;
+};
+
 struct wmi_request_stats_cmd {
        __le32 stats_id;
 
-       /*
-        * Space to add parameters like
-        * peer mac addr
-        */
+       __le32 vdev_id;
+
+       /* peer MAC address */
+       struct wmi_mac_addr peer_macaddr;
+
+       /* Instantaneous RSSI arguments */
+       struct wlan_inst_rssi_args inst_rssi_args;
 } __packed;
 
 /* Suspend option */
@@ -2795,7 +2834,7 @@ struct wmi_stats_event {
  * PDEV statistics
  * TODO: add all PDEV stats here
  */
-struct wmi_pdev_stats {
+struct wmi_pdev_stats_old {
        __le32 chan_nf;        /* Channel noise floor */
        __le32 tx_frame_count; /* TX frame count */
        __le32 rx_frame_count; /* RX frame count */
@@ -2806,6 +2845,23 @@ struct wmi_pdev_stats {
        struct wal_dbg_stats wal; /* WAL dbg stats */
 } __packed;
 
+struct wmi_pdev_stats_10x {
+       __le32 chan_nf;        /* Channel noise floor */
+       __le32 tx_frame_count; /* TX frame count */
+       __le32 rx_frame_count; /* RX frame count */
+       __le32 rx_clear_count; /* rx clear count */
+       __le32 cycle_count;    /* cycle count */
+       __le32 phy_err_count;  /* Phy error count */
+       __le32 chan_tx_pwr;    /* channel tx power */
+       struct wal_dbg_stats wal; /* WAL dbg stats */
+       __le32 ack_rx_bad;
+       __le32 rts_bad;
+       __le32 rts_good;
+       __le32 fcs_bad;
+       __le32 no_beacons;
+       __le32 mib_int_count;
+} __packed;
+
 /*
  * VDEV statistics
  * TODO: add all VDEV stats here
@@ -2818,10 +2874,17 @@ struct wmi_vdev_stats {
  * peer statistics.
  * TODO: add more stats
  */
-struct wmi_peer_stats {
+struct wmi_peer_stats_old {
+       struct wmi_mac_addr peer_macaddr;
+       __le32 peer_rssi;
+       __le32 peer_tx_rate;
+} __packed;
+
+struct wmi_peer_stats_10x {
        struct wmi_mac_addr peer_macaddr;
        __le32 peer_rssi;
        __le32 peer_tx_rate;
+       __le32 peer_rx_rate;
 } __packed;
 
 struct wmi_vdev_create_cmd {
@@ -4202,7 +4265,8 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
 int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt);
 int ath10k_wmi_pdev_resume_target(struct ath10k *ar);
 int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
-                                 u16 rd5g, u16 ctl2g, u16 ctl5g);
+                                 u16 rd5g, u16 ctl2g, u16 ctl5g,
+                                 enum wmi_dfs_region dfs_reg);
 int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value);
 int ath10k_wmi_cmd_init(struct ath10k *ar);
 int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *);
index 1a2973b7acf2500f5c97c992315793d0b4b9a9a7..0fce1c76638e9c9ae103e01ac6752dced8b1670d 100644 (file)
@@ -3709,8 +3709,8 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
                        AR5K_REG_MS(AR5K_TUNE_MAX_TXPOWER, AR5K_TPC_CHIRP),
                        AR5K_TPC);
        } else {
-               ath5k_hw_reg_write(ah, AR5K_PHY_TXPOWER_RATE_MAX |
-                       AR5K_TUNE_MAX_TXPOWER, AR5K_PHY_TXPOWER_RATE_MAX);
+               ath5k_hw_reg_write(ah, AR5K_TUNE_MAX_TXPOWER,
+                       AR5K_PHY_TXPOWER_RATE_MAX);
        }
 
        return 0;
index e39e5860a2e9347a2d44318e69b07410c4bd2dc5..9c125ff083f73de2c766bd5ccd6f3dc16aad7ae2 100644 (file)
@@ -1,11 +1,19 @@
 config ATH6KL
        tristate "Atheros mobile chipsets support"
+       depends on CFG80211
+        ---help---
+         This module adds core support for wireless adapters based on
+         Atheros AR6003 and AR6004 chipsets. You still need separate
+         bus drivers for USB and SDIO to be able to use real devices.
+
+         If you choose to build it as a module, it will be called
+         ath6kl_core. Please note that AR6002 and AR6001 are not
+         supported by this driver.
 
 config ATH6KL_SDIO
        tristate "Atheros ath6kl SDIO support"
        depends on ATH6KL
        depends on MMC
-       depends on CFG80211
        ---help---
          This module adds support for wireless adapters based on
          Atheros AR6003 and AR6004 chipsets running over SDIO. If you
@@ -17,25 +25,31 @@ config ATH6KL_USB
        tristate "Atheros ath6kl USB support"
        depends on ATH6KL
        depends on USB
-       depends on CFG80211
        ---help---
          This module adds support for wireless adapters based on
-         Atheros AR6004 chipset running over USB. This is still under
-         implementation and it isn't functional. If you choose to
-         build it as a module, it will be called ath6kl_usb.
+         Atheros AR6004 chipset and chipsets based on it running over
+         USB. If you choose to build it as a module, it will be
+         called ath6kl_usb.
 
 config ATH6KL_DEBUG
        bool "Atheros ath6kl debugging"
        depends on ATH6KL
        ---help---
-         Enables debug support
+         Enables ath6kl debug support, including debug messages
+         enabled with debug_mask module parameter and debugfs
+         interface.
+
+         If unsure, say Y to make it easier to debug problems.
 
 config ATH6KL_TRACING
        bool "Atheros ath6kl tracing support"
        depends on ATH6KL
        depends on EVENT_TRACING
        ---help---
-         Select this to ath6kl use tracing infrastructure.
+         Select this to ath6kl use tracing infrastructure which, for
+         example, can be enabled with help of trace-cmd. All debug
+         messages and commands are delivered to using individually
+         enablable trace points.
 
          If unsure, say Y to make it easier to debug problems.
 
@@ -47,3 +61,5 @@ config ATH6KL_REGDOMAIN
          Enabling this makes it possible to change the regdomain in
          the firmware. This can be only enabled if regulatory requirements
          are taken into account.
+
+         If unsure, say N.
index c2c6f460495859ae3517c4f20daa2c15caebdde0..0e26f4a34fda329910ecc278de9fe7bea8fa6c57 100644 (file)
@@ -724,8 +724,9 @@ ath6kl_add_bss_if_needed(struct ath6kl_vif *vif,
                        ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
                                   "added bss %pM to cfg80211\n", bssid);
                kfree(ie);
-       } else
+       } else {
                ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "cfg80211 already has a bss\n");
+       }
 
        return bss;
 }
@@ -970,7 +971,6 @@ static int ath6kl_set_probed_ssids(struct ath6kl *ar,
                                          ssid_list[i].flag,
                                          ssid_list[i].ssid.ssid_len,
                                          ssid_list[i].ssid.ssid);
-
        }
 
        /* Make sure no old entries are left behind */
@@ -1759,7 +1759,7 @@ static bool is_rate_ht40(s32 rate, u8 *mcs, bool *sgi)
 }
 
 static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
-                             u8 *mac, struct station_info *sinfo)
+                             const u8 *mac, struct station_info *sinfo)
 {
        struct ath6kl *ar = ath6kl_priv(dev);
        struct ath6kl_vif *vif = netdev_priv(dev);
@@ -1897,7 +1897,6 @@ static int ath6kl_wow_usr(struct ath6kl *ar, struct ath6kl_vif *vif,
 
        /* Configure the patterns that we received from the user. */
        for (i = 0; i < wow->n_patterns; i++) {
-
                /*
                 * Convert given nl80211 specific mask value to equivalent
                 * driver specific mask value and send it to the chip along
@@ -2850,8 +2849,9 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
        if (p.prwise_crypto_type == 0) {
                p.prwise_crypto_type = NONE_CRYPT;
                ath6kl_set_cipher(vif, 0, true);
-       } else if (info->crypto.n_ciphers_pairwise == 1)
+       } else if (info->crypto.n_ciphers_pairwise == 1) {
                ath6kl_set_cipher(vif, info->crypto.ciphers_pairwise[0], true);
+       }
 
        switch (info->crypto.cipher_group) {
        case WLAN_CIPHER_SUITE_WEP40:
@@ -2897,7 +2897,6 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
        }
 
        if (info->inactivity_timeout) {
-
                inactivity_timeout = info->inactivity_timeout;
 
                if (ar->hw.flags & ATH6KL_HW_AP_INACTIVITY_MINS)
@@ -2975,7 +2974,7 @@ static int ath6kl_stop_ap(struct wiphy *wiphy, struct net_device *dev)
 static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
 
 static int ath6kl_del_station(struct wiphy *wiphy, struct net_device *dev,
-                             u8 *mac)
+                             const u8 *mac)
 {
        struct ath6kl *ar = ath6kl_priv(dev);
        struct ath6kl_vif *vif = netdev_priv(dev);
@@ -2986,7 +2985,8 @@ static int ath6kl_del_station(struct wiphy *wiphy, struct net_device *dev,
 }
 
 static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev,
-                                u8 *mac, struct station_parameters *params)
+                                const u8 *mac,
+                                struct station_parameters *params)
 {
        struct ath6kl *ar = ath6kl_priv(dev);
        struct ath6kl_vif *vif = netdev_priv(dev);
index 4b46adbe8c923b7dd410c919dc3cbc289541ea4f..b0b6520427600a05687e299318f78f873e3fd948 100644 (file)
@@ -45,9 +45,9 @@ module_param(testmode, uint, 0644);
 module_param(recovery_enable, uint, 0644);
 module_param(heart_beat_poll, uint, 0644);
 MODULE_PARM_DESC(recovery_enable, "Enable recovery from firmware error");
-MODULE_PARM_DESC(heart_beat_poll, "Enable fw error detection periodic"   \
-                "polling. This also specifies the polling interval in"  \
-                "msecs. Set reocvery_enable for this to be effective");
+MODULE_PARM_DESC(heart_beat_poll,
+                "Enable fw error detection periodic polling in msecs - Also set recovery_enable for this to be effective");
+
 
 void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
 {
index dbfd17d0a5faa33fa390b150d8d39e03a42d3746..55c4064dd5067f26b8dfaf54689f85c9eb67a88e 100644 (file)
@@ -172,7 +172,6 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
                           struct ath6kl_irq_proc_registers *irq_proc_reg,
                           struct ath6kl_irq_enable_reg *irq_enable_reg)
 {
-
        ath6kl_dbg(ATH6KL_DBG_IRQ, ("<------- Register Table -------->\n"));
 
        if (irq_proc_reg != NULL) {
@@ -219,7 +218,6 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
                                   "GMBOX lookahead alias 1:   0x%x\n",
                                   irq_proc_reg->rx_gmbox_lkahd_alias[1]);
                }
-
        }
 
        if (irq_enable_reg != NULL) {
@@ -1396,7 +1394,6 @@ static ssize_t ath6kl_create_qos_write(struct file *file,
                                                const char __user *user_buf,
                                                size_t count, loff_t *ppos)
 {
-
        struct ath6kl *ar = file->private_data;
        struct ath6kl_vif *vif;
        char buf[200];
@@ -1575,7 +1572,6 @@ static ssize_t ath6kl_delete_qos_write(struct file *file,
                                const char __user *user_buf,
                                size_t count, loff_t *ppos)
 {
-
        struct ath6kl *ar = file->private_data;
        struct ath6kl_vif *vif;
        char buf[100];
index ca9ba005f2871f3e42bbc914bb5ca90f6e8b90e9..e194c10d9f0071725c1c5eba917347ae8209b0a0 100644 (file)
@@ -97,8 +97,8 @@ static inline void ath6kl_dump_registers(struct ath6kl_device *dev,
                struct ath6kl_irq_proc_registers *irq_proc_reg,
                struct ath6kl_irq_enable_reg *irq_en_reg)
 {
-
 }
+
 static inline void dump_cred_dist_stats(struct htc_target *target)
 {
 }
index fea7709b5dda59aa26fa0fbc5eb70caccea77918..18c070850a09b870900624a83ee5576551281cdb 100644 (file)
@@ -37,7 +37,6 @@ static int ath6kl_hif_cp_scat_dma_buf(struct hif_scatter_req *req,
        buf = req->virt_dma_buf;
 
        for (i = 0; i < req->scat_entries; i++) {
-
                if (from_dma)
                        memcpy(req->scat_list[i].buf, buf,
                               req->scat_list[i].len);
@@ -116,7 +115,6 @@ static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
                            le32_to_cpu(regdump_val[i + 2]),
                            le32_to_cpu(regdump_val[i + 3]));
        }
-
 }
 
 static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev)
@@ -701,5 +699,4 @@ int ath6kl_hif_setup(struct ath6kl_device *dev)
 
 fail_setup:
        return status;
-
 }
index 61f6b21fb0aeeaa4e6ef20dfca3baddf6ad638cd..dc6bd8cd9b837d85155d494a0afd2b32b7775240 100644 (file)
@@ -197,9 +197,9 @@ struct hif_scatter_req {
        /* bounce buffer for upper layers to copy to/from */
        u8 *virt_dma_buf;
 
-       struct hif_scatter_item scat_list[1];
-
        u32 scat_q_depth;
+
+       struct hif_scatter_item scat_list[0];
 };
 
 struct ath6kl_irq_proc_registers {
index 65e5b719093d47943b3135e902cf68001eefd988..e481f14b98787e88354278d26e3c59615ff2851c 100644 (file)
@@ -112,9 +112,9 @@ static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
                if (cur_ep_dist->endpoint == ENDPOINT_0)
                        continue;
 
-               if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
+               if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
                        cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
-               else {
+               else {
                        /*
                         * For the remaining data endpoints, we assume that
                         * each cred_per_msg are the same. We use a simple
@@ -129,7 +129,6 @@ static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
                        count = (count * 3) >> 2;
                        count = max(count, cur_ep_dist->cred_per_msg);
                        cur_ep_dist->cred_norm = count;
-
                }
 
                ath6kl_dbg(ATH6KL_DBG_CREDIT,
@@ -549,7 +548,6 @@ static int htc_check_credits(struct htc_target *target,
                             enum htc_endpoint_id eid, unsigned int len,
                             int *req_cred)
 {
-
        *req_cred = (len > target->tgt_cred_sz) ?
                     DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
 
@@ -608,7 +606,6 @@ static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
        unsigned int len;
 
        while (true) {
-
                flags = 0;
 
                if (list_empty(&endpoint->txq))
@@ -889,7 +886,6 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
                ac = target->dev->ar->ep2ac_map[endpoint->eid];
 
        while (true) {
-
                if (list_empty(&endpoint->txq))
                        break;
 
@@ -1190,7 +1186,6 @@ static void ath6kl_htc_mbox_flush_txep(struct htc_target *target,
                list_add_tail(&packet->list, &container);
                htc_tx_complete(endpoint, &container);
        }
-
 }
 
 static void ath6kl_htc_flush_txep_all(struct htc_target *target)
@@ -1394,7 +1389,6 @@ static int ath6kl_htc_rx_setup(struct htc_target *target,
 
        ep_cb = ep->ep_cb;
        for (j = 0; j < n_msg; j++) {
-
                /*
                 * Reset flag, any packets allocated using the
                 * rx_alloc() API cannot be recycled on
@@ -1424,9 +1418,9 @@ static int ath6kl_htc_rx_setup(struct htc_target *target,
                                }
                        }
 
-                       if (list_empty(&ep->rx_bufq))
+                       if (list_empty(&ep->rx_bufq)) {
                                packet = NULL;
-                       else {
+                       else {
                                packet = list_first_entry(&ep->rx_bufq,
                                                struct htc_packet, list);
                                list_del(&packet->list);
@@ -1487,7 +1481,6 @@ static int ath6kl_htc_rx_alloc(struct htc_target *target,
        spin_lock_bh(&target->rx_lock);
 
        for (i = 0; i < msg; i++) {
-
                htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
 
                if (htc_hdr->eid >= ENDPOINT_MAX) {
@@ -1708,7 +1701,6 @@ static int htc_parse_trailer(struct htc_target *target,
                lk_ahd = (struct htc_lookahead_report *) record_buf;
                if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) &&
                    next_lk_ahds) {
-
                        ath6kl_dbg(ATH6KL_DBG_HTC,
                                   "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n",
                                   lk_ahd->pre_valid, lk_ahd->post_valid);
@@ -1755,7 +1747,6 @@ static int htc_parse_trailer(struct htc_target *target,
        }
 
        return 0;
-
 }
 
 static int htc_proc_trailer(struct htc_target *target,
@@ -1776,7 +1767,6 @@ static int htc_proc_trailer(struct htc_target *target,
        status = 0;
 
        while (len > 0) {
-
                if (len < sizeof(struct htc_record_hdr)) {
                        status = -ENOMEM;
                        break;
@@ -2098,7 +2088,6 @@ static int ath6kl_htc_rx_fetch(struct htc_target *target,
                }
 
                if (!fetched_pkts) {
-
                        packet = list_first_entry(rx_pktq, struct htc_packet,
                                                   list);
 
@@ -2173,7 +2162,6 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
        look_aheads[0] = msg_look_ahead;
 
        while (true) {
-
                /*
                 * First lookahead sets the expected endpoint IDs for all
                 * packets in a bundle.
@@ -2825,8 +2813,9 @@ static int ath6kl_htc_reset(struct htc_target *target)
                        packet->buf = packet->buf_start;
                        packet->endpoint = ENDPOINT_0;
                        list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
-               } else
+               } else {
                        list_add_tail(&packet->list, &target->free_ctrl_txbuf);
+               }
        }
 
        return 0;
index 67aa924ed8b317f9d5240ad1560a820553790c24..756fe52a12c8ad5a3496de58bbfec01d0ad9820b 100644 (file)
@@ -137,7 +137,6 @@ static void get_htc_packet_credit_based(struct htc_target *target,
                        credits_required = 0;
 
                } else {
-
                        if (ep->cred_dist.credits < credits_required)
                                break;
 
@@ -169,7 +168,6 @@ static void get_htc_packet_credit_based(struct htc_target *target,
                /* queue this packet into the caller's queue */
                list_add_tail(&packet->list, queue);
        }
-
 }
 
 static void get_htc_packet(struct htc_target *target,
@@ -279,7 +277,6 @@ static int htc_issue_packets(struct htc_target *target,
                        list_add(&packet->list, pkt_queue);
                        break;
                }
-
        }
 
        if (status != 0) {
@@ -385,7 +382,6 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
                         */
                        list_for_each_entry_safe(packet, tmp_pkt,
                                                 txq, list) {
-
                                ath6kl_dbg(ATH6KL_DBG_HTC,
                                           "%s: Indicat overflowed TX pkts: %p\n",
                                           __func__, packet);
@@ -403,7 +399,6 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
                                        list_move_tail(&packet->list,
                                                       &send_queue);
                                }
-
                        }
 
                        if (list_empty(&send_queue)) {
@@ -454,7 +449,6 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
         * enough transmit resources.
         */
        while (true) {
-
                if (get_queue_depth(&ep->txq) == 0)
                        break;
 
@@ -495,8 +489,8 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
                }
 
                spin_lock_bh(&target->tx_lock);
-
        }
+
        /* done with this endpoint, we can clear the count */
        ep->tx_proc_cnt = 0;
        spin_unlock_bh(&target->tx_lock);
@@ -1106,7 +1100,6 @@ free_skb:
        dev_kfree_skb(skb);
 
        return status;
-
 }
 
 static void htc_flush_rx_queue(struct htc_target *target,
@@ -1258,7 +1251,6 @@ static int ath6kl_htc_pipe_conn_service(struct htc_target *target,
                tx_alloc = 0;
 
        } else {
-
                tx_alloc = htc_get_credit_alloc(target, conn_req->svc_id);
                if (tx_alloc == 0) {
                        status = -ENOMEM;
index 4f316bdcbab58da3911c6a2365da1989ba412744..d5ef211f261c2c19e6e8deeef985dc2b83130794 100644 (file)
@@ -1192,7 +1192,6 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
 
        if (board_ext_address &&
            ar->fw_board_len == (board_data_size + board_ext_data_size)) {
-
                /* write extended board data */
                ath6kl_dbg(ATH6KL_DBG_BOOT,
                           "writing extended board data to 0x%x (%d B)\n",
index 5839fc23bdc789d5013f1c89e48f0f65eff37efe..d56554674da47924477c02423ad08c50caef2918 100644 (file)
@@ -571,7 +571,6 @@ void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status)
 
 static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel)
 {
-
        struct ath6kl *ar = vif->ar;
 
        vif->profile.ch = cpu_to_le16(channel);
@@ -600,7 +599,6 @@ static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel)
 
 static void ath6kl_check_ch_switch(struct ath6kl *ar, u16 channel)
 {
-
        struct ath6kl_vif *vif;
        int res = 0;
 
@@ -692,9 +690,9 @@ void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast)
                cfg80211_michael_mic_failure(vif->ndev, sta->mac,
                                             NL80211_KEYTYPE_PAIRWISE, keyid,
                                             tsc, GFP_KERNEL);
-       } else
+       } else {
                ath6kl_cfg80211_tkip_micerr_event(vif, keyid, ismcast);
-
+       }
 }
 
 static void ath6kl_update_target_stats(struct ath6kl_vif *vif, u8 *ptr, u32 len)
@@ -1093,8 +1091,9 @@ static int ath6kl_open(struct net_device *dev)
        if (test_bit(CONNECTED, &vif->flags)) {
                netif_carrier_on(dev);
                netif_wake_queue(dev);
-       } else
+       } else {
                netif_carrier_off(dev);
+       }
 
        return 0;
 }
@@ -1146,7 +1145,6 @@ static int ath6kl_set_features(struct net_device *dev,
                        dev->features = features | NETIF_F_RXCSUM;
                        return err;
                }
-
        }
 
        return err;
index 7126bdd4236c2b1e78dd3a9d2c600dbadbb53f0e..339d89f14d32b1991c3d3d646f460b90f41411bb 100644 (file)
@@ -348,7 +348,7 @@ static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
        int i, scat_req_sz, scat_list_sz, size;
        u8 *virt_buf;
 
-       scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item);
+       scat_list_sz = n_scat_entry * sizeof(struct hif_scatter_item);
        scat_req_sz = sizeof(*s_req) + scat_list_sz;
 
        if (!virt_scat)
@@ -425,8 +425,9 @@ static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
                        memcpy(tbuf, buf, len);
 
                bounced = true;
-       } else
+       } else {
                tbuf = buf;
+       }
 
        ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
        if ((request & HIF_READ) && bounced)
@@ -441,9 +442,9 @@ static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
 static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
                                      struct bus_request *req)
 {
-       if (req->scat_req)
+       if (req->scat_req) {
                ath6kl_sdio_scat_rw(ar_sdio, req);
-       else {
+       else {
                void *context;
                int status;
 
@@ -656,7 +657,6 @@ static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
        list_add_tail(&s_req->list, &ar_sdio->scat_req);
 
        spin_unlock_bh(&ar_sdio->scat_lock);
-
 }
 
 /* scatter gather read write request */
@@ -674,9 +674,9 @@ static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
                   "hif-scatter: total len: %d scatter entries: %d\n",
                   scat_req->len, scat_req->scat_entries);
 
-       if (request & HIF_SYNCHRONOUS)
+       if (request & HIF_SYNCHRONOUS) {
                status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
-       else {
+       else {
                spin_lock_bh(&ar_sdio->wr_async_lock);
                list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
                spin_unlock_bh(&ar_sdio->wr_async_lock);
@@ -856,7 +856,6 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
 
        if (ar->suspend_mode == WLAN_POWER_STATE_WOW ||
            (!ar->suspend_mode && wow)) {
-
                ret = ath6kl_set_sdio_pm_caps(ar);
                if (ret)
                        goto cut_pwr;
@@ -878,7 +877,6 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
 
        if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP ||
            !ar->suspend_mode || try_deepsleep) {
-
                flags = sdio_get_host_pm_caps(func);
                if (!(flags & MMC_PM_KEEP_POWER))
                        goto cut_pwr;
@@ -1061,7 +1059,6 @@ static int ath6kl_sdio_bmi_credits(struct ath6kl *ar)
 
        timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
        while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
-
                /*
                 * Hit the credit counter with a 4-byte access, the first byte
                 * read will hit the counter and cause a decrement, while the
index a580a629a0da6ba24b251dab8147e7de74c34f7c..d5eeeae7711b253c7dcddbf4214613cd5aff86f7 100644 (file)
@@ -289,7 +289,7 @@ struct host_interest {
        u32 hi_hp_rx_traffic_ratio;                    /* 0xd8 */
 
        /* test applications flags */
-       u32 hi_test_apps_related    ;                  /* 0xdc */
+       u32 hi_test_apps_related;                      /* 0xdc */
        /* location of test script */
        u32 hi_ota_testscript;                         /* 0xe0 */
        /* location of CAL data */
index ebb24045a8ae6cbcac844a239d11a05ee6f86e14..40432fe7a5d2cc112a4ce6d68bb8fd43a115e01f 100644 (file)
@@ -125,8 +125,9 @@ static bool ath6kl_process_uapsdq(struct ath6kl_sta *conn,
                *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
                spin_unlock_bh(&conn->psq_lock);
                return false;
-       } else if (!conn->apsd_info)
+       } else if (!conn->apsd_info) {
                return false;
+       }
 
        if (test_bit(WMM_ENABLED, &vif->flags)) {
                ether_type = be16_to_cpu(datap->h_proto);
@@ -316,8 +317,9 @@ int ath6kl_control_tx(void *devt, struct sk_buff *skb,
                cookie = NULL;
                ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
                           skb, skb->len);
-       } else
+       } else {
                cookie = ath6kl_alloc_cookie(ar);
+       }
 
        if (cookie == NULL) {
                spin_unlock_bh(&ar->lock);
@@ -359,7 +361,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
        struct ath6kl_vif *vif = netdev_priv(dev);
        u32 map_no = 0;
        u16 htc_tag = ATH6KL_DATA_PKT_TAG;
-       u8 ac = 99 ; /* initialize to unmapped ac */
+       u8 ac = 99; /* initialize to unmapped ac */
        bool chk_adhoc_ps_mapping = false;
        int ret;
        struct wmi_tx_meta_v2 meta_v2;
@@ -449,8 +451,9 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
                        if (ret)
                                goto fail_tx;
                }
-       } else
+       } else {
                goto fail_tx;
+       }
 
        spin_lock_bh(&ar->lock);
 
@@ -702,7 +705,6 @@ void ath6kl_tx_complete(struct htc_target *target,
 
        /* reap completed packets */
        while (!list_empty(packet_queue)) {
-
                packet = list_first_entry(packet_queue, struct htc_packet,
                                          list);
                list_del(&packet->list);
@@ -1089,8 +1091,9 @@ static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
                        else
                                skb_queue_tail(&rxtid->q, node->skb);
                        node->skb = NULL;
-               } else
+               } else {
                        stats->num_hole++;
+               }
 
                rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
                idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
@@ -1211,7 +1214,7 @@ static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
                return is_queued;
 
        spin_lock_bh(&rxtid->lock);
-       for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
+       for (idx = 0; idx < rxtid->hold_q_sz; idx++) {
                if (rxtid->hold_q[idx].skb) {
                        /*
                         * There is a frame in the queue and no
@@ -1265,7 +1268,6 @@ static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif *vif,
        is_apsdq_empty_at_start = is_apsdq_empty;
 
        while ((!is_apsdq_empty) && (num_frames_to_deliver)) {
-
                spin_lock_bh(&conn->psq_lock);
                skb = skb_dequeue(&conn->apsdq);
                is_apsdq_empty = skb_queue_empty(&conn->apsdq);
@@ -1606,16 +1608,18 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
                        if (!conn)
                                return;
                        aggr_conn = conn->aggr_conn;
-               } else
+               } else {
                        aggr_conn = vif->aggr_cntxt->aggr_conn;
+               }
 
                if (aggr_process_recv_frm(aggr_conn, tid, seq_no,
                                          is_amsdu, skb)) {
                        /* aggregation code will handle the skb */
                        return;
                }
-       } else if (!is_broadcast_ether_addr(datap->h_dest))
+       } else if (!is_broadcast_ether_addr(datap->h_dest)) {
                vif->net_stats.multicast++;
+       }
 
        ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
 }
@@ -1710,8 +1714,9 @@ void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no,
                sta = ath6kl_find_sta_by_aid(vif->ar, aid);
                if (sta)
                        aggr_conn = sta->aggr_conn;
-       } else
+       } else {
                aggr_conn = vif->aggr_cntxt->aggr_conn;
+       }
 
        if (!aggr_conn)
                return;
@@ -1766,7 +1771,6 @@ void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
                skb_queue_head_init(&rxtid->q);
                spin_lock_init(&rxtid->lock);
        }
-
 }
 
 struct aggr_info *aggr_init(struct ath6kl_vif *vif)
@@ -1806,8 +1810,9 @@ void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid_mux)
                sta = ath6kl_find_sta_by_aid(vif->ar, aid);
                if (sta)
                        aggr_conn = sta->aggr_conn;
-       } else
+       } else {
                aggr_conn = vif->aggr_cntxt->aggr_conn;
+       }
 
        if (!aggr_conn)
                return;
index 56c3fd5cef65a07e915d63d8c0dc24727a87406b..3afc5a463d06f822f339250deecfb1cefadea592 100644 (file)
@@ -236,7 +236,6 @@ static void ath6kl_usb_free_pipe_resources(struct ath6kl_usb_pipe *pipe)
                        break;
                kfree(urb_context);
        }
-
 }
 
 static void ath6kl_usb_cleanup_pipe_resources(struct ath6kl_usb *ar_usb)
@@ -245,7 +244,6 @@ static void ath6kl_usb_cleanup_pipe_resources(struct ath6kl_usb *ar_usb)
 
        for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++)
                ath6kl_usb_free_pipe_resources(&ar_usb->pipes[i]);
-
 }
 
 static u8 ath6kl_usb_get_logical_pipe_num(struct ath6kl_usb *ar_usb,
index 8b4ce28e3ce8f51f7cda070364394a117d5aef66..4d7f9e4712e991deea8553f7505a7f79b9a6f6e7 100644 (file)
@@ -289,8 +289,9 @@ int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx,
                           ath6kl_wmi_determine_user_priority(((u8 *) llc_hdr) +
                                        sizeof(struct ath6kl_llc_snap_hdr),
                                        layer2_priority);
-               } else
+               } else {
                        usr_pri = layer2_priority & 0x7;
+               }
 
                /*
                 * Queue the EAPOL frames in the same WMM_AC_VO queue
@@ -359,8 +360,9 @@ int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb)
                hdr_size = roundup(sizeof(struct ieee80211_qos_hdr),
                                   sizeof(u32));
                skb_pull(skb, hdr_size);
-       } else if (sub_type == cpu_to_le16(IEEE80211_STYPE_DATA))
+       } else if (sub_type == cpu_to_le16(IEEE80211_STYPE_DATA)) {
                skb_pull(skb, sizeof(struct ieee80211_hdr_3addr));
+       }
 
        datap = skb->data;
        llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap);
@@ -936,7 +938,6 @@ ath6kl_regd_find_country_by_rd(u16 regdmn)
 
 static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
 {
-
        struct ath6kl_wmi_regdomain *ev;
        struct country_code_to_enum_rd *country = NULL;
        struct reg_dmn_pair_mapping *regpair = NULL;
@@ -946,10 +947,9 @@ static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
        ev = (struct ath6kl_wmi_regdomain *) datap;
        reg_code = le32_to_cpu(ev->reg_code);
 
-       if ((reg_code >> ATH6KL_COUNTRY_RD_SHIFT) & COUNTRY_ERD_FLAG)
+       if ((reg_code >> ATH6KL_COUNTRY_RD_SHIFT) & COUNTRY_ERD_FLAG) {
                country = ath6kl_regd_find_country((u16) reg_code);
-       else if (!(((u16) reg_code & WORLD_SKU_MASK) == WORLD_SKU_PREFIX)) {
-
+       } else if (!(((u16) reg_code & WORLD_SKU_MASK) == WORLD_SKU_PREFIX)) {
                regpair = ath6kl_get_regpair((u16) reg_code);
                country = ath6kl_regd_find_country_by_rd((u16) reg_code);
                if (regpair)
@@ -1499,7 +1499,6 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
 
        if ((reply->cac_indication == CAC_INDICATION_ADMISSION_RESP) &&
            (reply->status_code != IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED)) {
-
                ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion);
                tsinfo = le16_to_cpu(ts->tsinfo);
                tsid = (tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
@@ -1530,7 +1529,6 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
         * for delete qos stream from AP
         */
        else if (reply->cac_indication == CAC_INDICATION_DELETE) {
-
                ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion);
                tsinfo = le16_to_cpu(ts->tsinfo);
                ts_id = ((tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
@@ -2322,7 +2320,7 @@ int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index,
        return ret;
 }
 
-int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, u8 *krk)
+int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, const u8 *krk)
 {
        struct sk_buff *skb;
        struct wmi_add_krk_cmd *cmd;
@@ -2479,7 +2477,6 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
                goto free_data_skb;
 
        for (index = 0; index < num_pri_streams; index++) {
-
                if (WARN_ON(!data_sync_bufs[index].skb))
                        goto free_data_skb;
 
@@ -2704,7 +2701,6 @@ static void ath6kl_wmi_relinquish_implicit_pstream_credits(struct wmi *wmi)
 
        for (i = 0; i < WMM_NUM_AC; i++) {
                if (stream_exist & (1 << i)) {
-
                        /*
                         * FIXME: Is this lock & unlock inside
                         * for loop correct? may need rework.
@@ -2870,8 +2866,9 @@ int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
        if (host_mode == ATH6KL_HOST_MODE_ASLEEP) {
                ath6kl_wmi_relinquish_implicit_pstream_credits(wmi);
                cmd->asleep = cpu_to_le32(1);
-       } else
+       } else {
                cmd->awake = cpu_to_le32(1);
+       }
 
        ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
                                  WMI_SET_HOST_SLEEP_MODE_CMDID,
index b5f226503baf7e37ea15c7953878f7a05185375f..7809afbb3e937e65a3f5045069815e79f551ac1c 100644 (file)
@@ -898,7 +898,6 @@ struct wmi_start_scan_cmd {
  *  flags here
  */
 enum wmi_scan_ctrl_flags_bits {
-
        /* set if can scan in the connect cmd */
        CONNECT_SCAN_CTRL_FLAGS = 0x01,
 
@@ -2617,7 +2616,7 @@ int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index,
                          u8 *key_material,
                          u8 key_op_ctrl, u8 *mac_addr,
                          enum wmi_sync_flag sync_flag);
-int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, u8 *krk);
+int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, const u8 *krk);
 int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index);
 int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, u8 if_idx, const u8 *bssid,
                            const u8 *pmkid, bool set);
index 8e1c7b0fe76c178567fb2c03e70bd8d4e303f337..8fcd586d1c3980c413c8499cb3aefd518f507f69 100644 (file)
@@ -53,7 +53,8 @@ obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
 obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o
 ath9k_common-y:=       common.o \
                        common-init.o \
-                       common-beacon.o
+                       common-beacon.o \
+                       common-debug.o
 
 ath9k_htc-y += htc_hst.o \
                hif_usb.o \
index a0398fe3eb284f94e650f4541e18bfcd6713e5ef..be3eb2a8d602ee9096cf2f7b798d4ea5807dba82 100644 (file)
@@ -86,7 +86,6 @@ static int ath_ahb_probe(struct platform_device *pdev)
        int irq;
        int ret = 0;
        struct ath_hw *ah;
-       struct ath_common *common;
        char hw_name[64];
 
        if (!dev_get_platdata(&pdev->dev)) {
@@ -146,9 +145,6 @@ static int ath_ahb_probe(struct platform_device *pdev)
        wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
                   hw_name, (unsigned long)mem, irq);
 
-       common = ath9k_hw_common(sc->sc_ah);
-       /* Will be cleared in ath9k_start() */
-       set_bit(ATH_OP_INVALID, &common->op_flags);
        return 0;
 
  err_irq:
index 6d47783f2e5b7ecfd4343c29c8ad4bcbff7252aa..ba502a2d199bc2c85e00718d7f1cda1f71e90e43 100644 (file)
@@ -155,6 +155,9 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel,
                ATH9K_ANI_RSSI_THR_LOW,
                ATH9K_ANI_RSSI_THR_HIGH);
 
+       if (AR_SREV_9100(ah) && immunityLevel < ATH9K_ANI_OFDM_DEF_LEVEL)
+               immunityLevel = ATH9K_ANI_OFDM_DEF_LEVEL;
+
        if (!scan)
                aniState->ofdmNoiseImmunityLevel = immunityLevel;
 
@@ -235,6 +238,9 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel,
                BEACON_RSSI(ah), ATH9K_ANI_RSSI_THR_LOW,
                ATH9K_ANI_RSSI_THR_HIGH);
 
+       if (AR_SREV_9100(ah) && immunityLevel < ATH9K_ANI_CCK_DEF_LEVEL)
+               immunityLevel = ATH9K_ANI_CCK_DEF_LEVEL;
+
        if (ah->opmode == NL80211_IFTYPE_STATION &&
            BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_LOW &&
            immunityLevel > ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI)
index 0a6163e9248c0fdefa0f0e78a695201fb8a7d0f2..c38399bc9aa96e84fce4929319599e1d4db694c7 100644 (file)
@@ -410,7 +410,7 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
        {0x00009e30, 0x06336f77},
        {0x00009e34, 0x6af6532f},
        {0x00009e38, 0x0cc80c00},
-       {0x00009e40, 0x0d261820},
+       {0x00009e40, 0x0d261800},
        {0x00009e4c, 0x00001004},
        {0x00009e50, 0x00ff03f1},
        {0x00009e54, 0x00000000},
index f76139bbb74f0fcf144e22dd382be140c48cf08b..2c42ff05efa38f507cdd0f54905d8b79c0c77d32 100644 (file)
@@ -592,7 +592,7 @@ static const u32 ar9331_1p1_baseband_core[][2] = {
        {0x00009e30, 0x06336f77},
        {0x00009e34, 0x6af6532f},
        {0x00009e38, 0x0cc80c00},
-       {0x00009e40, 0x0d261820},
+       {0x00009e40, 0x0d261800},
        {0x00009e4c, 0x00001004},
        {0x00009e50, 0x00ff03f1},
        {0x00009fc0, 0x803e4788},
index 0ac8be96097f2e70f436f75aadb4be1b2d2bc44b..2154efcd3900514af944619a174db08d2514a140 100644 (file)
@@ -231,7 +231,7 @@ static const u32 ar9331_1p2_baseband_core[][2] = {
        {0x00009e30, 0x06336f77},
        {0x00009e34, 0x6af6532f},
        {0x00009e38, 0x0cc80c00},
-       {0x00009e40, 0x0d261820},
+       {0x00009e40, 0x0d261800},
        {0x00009e4c, 0x00001004},
        {0x00009e50, 0x00ff03f1},
        {0x00009fc0, 0x803e4788},
index a01f0edb65182a16b95181038786c9d8073a1cab..b995ffe88b33bb8af6ca6a7aaa47e23adee34857 100644 (file)
@@ -318,7 +318,7 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
        {0x00009e30, 0x06336f77},
        {0x00009e34, 0x6af6532f},
        {0x00009e38, 0x0cc80c00},
-       {0x00009e40, 0x0d261820},
+       {0x00009e40, 0x0d261800},
        {0x00009e4c, 0x00001004},
        {0x00009e50, 0x00ff03f1},
        {0x00009e54, 0x00000000},
@@ -348,9 +348,9 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
        {0x0000a370, 0x00000000},
        {0x0000a390, 0x00000001},
        {0x0000a394, 0x00000444},
-       {0x0000a398, 0x00000000},
-       {0x0000a39c, 0x210d0401},
-       {0x0000a3a0, 0xab9a7144},
+       {0x0000a398, 0x001f0e0f},
+       {0x0000a39c, 0x0075393f},
+       {0x0000a3a0, 0xb79f6427},
        {0x0000a3a4, 0x00000000},
        {0x0000a3a8, 0xaaaaaaaa},
        {0x0000a3ac, 0x3c466478},
index 3c9113d9b1bc3cfe3aa7e9e07be8efa1a2df4d14..8e5c3b9786e3ac3fab8cf42e0d80bf84f268d4d2 100644 (file)
@@ -257,9 +257,9 @@ static const u32 qca953x_1p0_baseband_core[][2] = {
        {0x0000a370, 0x00000000},
        {0x0000a390, 0x00000001},
        {0x0000a394, 0x00000444},
-       {0x0000a398, 0x1f020503},
-       {0x0000a39c, 0x29180c03},
-       {0x0000a3a0, 0x9a8b6844},
+       {0x0000a398, 0x001f0e0f},
+       {0x0000a39c, 0x0075393f},
+       {0x0000a3a0, 0xb79f6427},
        {0x0000a3a4, 0x000000ff},
        {0x0000a3a8, 0x6a6a6a6a},
        {0x0000a3ac, 0x6a6a6a6a},
index e6aec2c0207ff43754a79a236a769a33bacf401e..a5ca65240af30b8980b5732a610d73ef155063c9 100644 (file)
@@ -90,7 +90,7 @@ static const u32 ar9580_1p0_baseband_core[][2] = {
        {0x00009e30, 0x06336f77},
        {0x00009e34, 0x6af6532f},
        {0x00009e38, 0x0cc80c00},
-       {0x00009e40, 0x0d261820},
+       {0x00009e40, 0x0d261800},
        {0x00009e4c, 0x00001004},
        {0x00009e50, 0x00ff03f1},
        {0x00009e54, 0x00000000},
index 44d74495c4de1465dbf42cba08b6bff03090023a..b20469425865d21299b4d34b1572c866de878bf2 100644 (file)
@@ -23,8 +23,8 @@
 #include <linux/leds.h>
 #include <linux/completion.h>
 
-#include "debug.h"
 #include "common.h"
+#include "debug.h"
 #include "mci.h"
 #include "dfs.h"
 #include "spectral.h"
@@ -114,6 +114,9 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
 #define ATH_TXFIFO_DEPTH           8
 #define ATH_TX_ERROR               0x01
 
+/* Stop tx traffic 1ms before the GO goes away */
+#define ATH_P2P_PS_STOP_TIME       1000
+
 #define IEEE80211_SEQ_SEQ_SHIFT    4
 #define IEEE80211_SEQ_MAX          4096
 #define IEEE80211_WEP_IVLEN        3
@@ -251,7 +254,6 @@ struct ath_atx_tid {
 
        s8 bar_index;
        bool sched;
-       bool paused;
        bool active;
 };
 
@@ -272,6 +274,7 @@ struct ath_node {
 #ifdef CONFIG_ATH9K_STATION_STATISTICS
        struct ath_rx_rate_stats rx_rate_stats;
 #endif
+       u8 key_idx[4];
 };
 
 struct ath_tx_control {
@@ -367,11 +370,15 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
 /********/
 
 struct ath_vif {
+       struct ieee80211_vif *vif;
        struct ath_node mcast_node;
        int av_bslot;
        bool primary_sta_vif;
        __le64 tsf_adjust; /* TSF adjustment for staggered beacons */
        struct ath_buf *av_bcbuf;
+
+       /* P2P Client */
+       struct ieee80211_noa_data noa;
 };
 
 struct ath9k_vif_iter_data {
@@ -464,6 +471,8 @@ int ath_update_survey_stats(struct ath_softc *sc);
 void ath_update_survey_nf(struct ath_softc *sc, int channel);
 void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type);
 void ath_ps_full_sleep(unsigned long data);
+void ath9k_p2p_ps_timer(void *priv);
+void ath9k_update_p2p_ps(struct ath_softc *sc, struct ieee80211_vif *vif);
 
 /**********/
 /* BTCOEX */
@@ -714,6 +723,9 @@ struct ath_softc {
        struct completion paprd_complete;
        wait_queue_head_t tx_wait;
 
+       struct ath_gen_timer *p2p_ps_timer;
+       struct ath_vif *p2p_ps_vif;
+
        unsigned long driver_data;
 
        u8 gtt_cnt;
index bd9e634879e69d4b2b33d741f7c4a8c51801ba9f..e387f0b2954a0cf5500610b330f744a5b68cd69c 100644 (file)
@@ -537,8 +537,6 @@ static void ath9k_cache_beacon_config(struct ath_softc *sc,
        cur_conf->dtim_period = bss_conf->dtim_period;
        cur_conf->dtim_count = 1;
        cur_conf->ibss_creator = bss_conf->ibss_creator;
-       cur_conf->bmiss_timeout =
-               ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
 
        /*
         * It looks like mac80211 may end up using beacon interval of zero in
@@ -549,6 +547,9 @@ static void ath9k_cache_beacon_config(struct ath_softc *sc,
        if (cur_conf->beacon_interval == 0)
                cur_conf->beacon_interval = 100;
 
+       cur_conf->bmiss_timeout =
+               ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
+
        /*
         * We don't parse dtim period from mac80211 during the driver
         * initialization as it breaks association with hidden-ssid
diff --git a/drivers/net/wireless/ath/ath9k/common-debug.c b/drivers/net/wireless/ath/ath9k/common-debug.c
new file mode 100644 (file)
index 0000000..3b289f9
--- /dev/null
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "common.h"
+
+static ssize_t read_file_modal_eeprom(struct file *file, char __user *user_buf,
+                                     size_t count, loff_t *ppos)
+{
+       struct ath_hw *ah = file->private_data;
+       u32 len = 0, size = 6000;
+       char *buf;
+       size_t retval;
+
+       buf = kzalloc(size, GFP_KERNEL);
+       if (buf == NULL)
+               return -ENOMEM;
+
+       len = ah->eep_ops->dump_eeprom(ah, false, buf, len, size);
+
+       retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+       kfree(buf);
+
+       return retval;
+}
+
+static const struct file_operations fops_modal_eeprom = {
+       .read = read_file_modal_eeprom,
+       .open = simple_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
+
+void ath9k_cmn_debug_modal_eeprom(struct dentry *debugfs_phy,
+                                 struct ath_hw *ah)
+{
+       debugfs_create_file("modal_eeprom", S_IRUSR, debugfs_phy, ah,
+                           &fops_modal_eeprom);
+}
+EXPORT_SYMBOL(ath9k_cmn_debug_modal_eeprom);
+
+static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
+                                    size_t count, loff_t *ppos)
+{
+       struct ath_hw *ah = file->private_data;
+       u32 len = 0, size = 1500;
+       ssize_t retval = 0;
+       char *buf;
+
+       buf = kzalloc(size, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       len = ah->eep_ops->dump_eeprom(ah, true, buf, len, size);
+
+       retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+       kfree(buf);
+
+       return retval;
+}
+
+static const struct file_operations fops_base_eeprom = {
+       .read = read_file_base_eeprom,
+       .open = simple_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
+void ath9k_cmn_debug_base_eeprom(struct dentry *debugfs_phy,
+                                struct ath_hw *ah)
+{
+       debugfs_create_file("base_eeprom", S_IRUSR, debugfs_phy, ah,
+                           &fops_base_eeprom);
+}
+EXPORT_SYMBOL(ath9k_cmn_debug_base_eeprom);
+
+void ath9k_cmn_debug_stat_rx(struct ath_rx_stats *rxstats,
+                            struct ath_rx_status *rs)
+{
+#define RX_PHY_ERR_INC(c) rxstats->phy_err_stats[c]++
+#define RX_CMN_STAT_INC(c) (rxstats->c++)
+
+       RX_CMN_STAT_INC(rx_pkts_all);
+       rxstats->rx_bytes_all += rs->rs_datalen;
+
+       if (rs->rs_status & ATH9K_RXERR_CRC)
+               RX_CMN_STAT_INC(crc_err);
+       if (rs->rs_status & ATH9K_RXERR_DECRYPT)
+               RX_CMN_STAT_INC(decrypt_crc_err);
+       if (rs->rs_status & ATH9K_RXERR_MIC)
+               RX_CMN_STAT_INC(mic_err);
+       if (rs->rs_status & ATH9K_RX_DELIM_CRC_PRE)
+               RX_CMN_STAT_INC(pre_delim_crc_err);
+       if (rs->rs_status & ATH9K_RX_DELIM_CRC_POST)
+               RX_CMN_STAT_INC(post_delim_crc_err);
+       if (rs->rs_status & ATH9K_RX_DECRYPT_BUSY)
+               RX_CMN_STAT_INC(decrypt_busy_err);
+
+       if (rs->rs_status & ATH9K_RXERR_PHY) {
+               RX_CMN_STAT_INC(phy_err);
+               if (rs->rs_phyerr < ATH9K_PHYERR_MAX)
+                       RX_PHY_ERR_INC(rs->rs_phyerr);
+       }
+
+#undef RX_CMN_STAT_INC
+#undef RX_PHY_ERR_INC
+}
+EXPORT_SYMBOL(ath9k_cmn_debug_stat_rx);
+
+static ssize_t read_file_recv(struct file *file, char __user *user_buf,
+                             size_t count, loff_t *ppos)
+{
+#define RXS_ERR(s, e)                                  \
+       do {                                            \
+               len += scnprintf(buf + len, size - len, \
+                                "%18s : %10u\n", s,    \
+                                rxstats->e);           \
+       } while (0)
+
+       struct ath_rx_stats *rxstats = file->private_data;
+       char *buf;
+       unsigned int len = 0, size = 1600;
+       ssize_t retval = 0;
+
+       buf = kzalloc(size, GFP_KERNEL);
+       if (buf == NULL)
+               return -ENOMEM;
+
+       RXS_ERR("PKTS-ALL", rx_pkts_all);
+       RXS_ERR("BYTES-ALL", rx_bytes_all);
+       RXS_ERR("BEACONS", rx_beacons);
+       RXS_ERR("FRAGS", rx_frags);
+       RXS_ERR("SPECTRAL", rx_spectral);
+
+       RXS_ERR("CRC ERR", crc_err);
+       RXS_ERR("DECRYPT CRC ERR", decrypt_crc_err);
+       RXS_ERR("PHY ERR", phy_err);
+       RXS_ERR("MIC ERR", mic_err);
+       RXS_ERR("PRE-DELIM CRC ERR", pre_delim_crc_err);
+       RXS_ERR("POST-DELIM CRC ERR", post_delim_crc_err);
+       RXS_ERR("DECRYPT BUSY ERR", decrypt_busy_err);
+       RXS_ERR("LENGTH-ERR", rx_len_err);
+       RXS_ERR("OOM-ERR", rx_oom_err);
+       RXS_ERR("RATE-ERR", rx_rate_err);
+       RXS_ERR("TOO-MANY-FRAGS", rx_too_many_frags_err);
+
+       if (len > size)
+               len = size;
+
+       retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+       kfree(buf);
+
+       return retval;
+
+#undef RXS_ERR
+}
+
+static const struct file_operations fops_recv = {
+       .read = read_file_recv,
+       .open = simple_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
+void ath9k_cmn_debug_recv(struct dentry *debugfs_phy,
+                         struct ath_rx_stats *rxstats)
+{
+       debugfs_create_file("recv", S_IRUSR, debugfs_phy, rxstats,
+                           &fops_recv);
+}
+EXPORT_SYMBOL(ath9k_cmn_debug_recv);
+
+static ssize_t read_file_phy_err(struct file *file, char __user *user_buf,
+                                size_t count, loff_t *ppos)
+{
+#define PHY_ERR(s, p) \
+       len += scnprintf(buf + len, size - len, "%22s : %10u\n", s, \
+                        rxstats->phy_err_stats[p]);
+
+       struct ath_rx_stats *rxstats = file->private_data;
+       char *buf;
+       unsigned int len = 0, size = 1600;
+       ssize_t retval = 0;
+
+       buf = kzalloc(size, GFP_KERNEL);
+       if (buf == NULL)
+               return -ENOMEM;
+
+       PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN);
+       PHY_ERR("TIMING ERR", ATH9K_PHYERR_TIMING);
+       PHY_ERR("PARITY ERR", ATH9K_PHYERR_PARITY);
+       PHY_ERR("RATE ERR", ATH9K_PHYERR_RATE);
+       PHY_ERR("LENGTH ERR", ATH9K_PHYERR_LENGTH);
+       PHY_ERR("RADAR ERR", ATH9K_PHYERR_RADAR);
+       PHY_ERR("SERVICE ERR", ATH9K_PHYERR_SERVICE);
+       PHY_ERR("TOR ERR", ATH9K_PHYERR_TOR);
+       PHY_ERR("OFDM-TIMING ERR", ATH9K_PHYERR_OFDM_TIMING);
+       PHY_ERR("OFDM-SIGNAL-PARITY ERR", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
+       PHY_ERR("OFDM-RATE ERR", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
+       PHY_ERR("OFDM-LENGTH ERR", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
+       PHY_ERR("OFDM-POWER-DROP ERR", ATH9K_PHYERR_OFDM_POWER_DROP);
+       PHY_ERR("OFDM-SERVICE ERR", ATH9K_PHYERR_OFDM_SERVICE);
+       PHY_ERR("OFDM-RESTART ERR", ATH9K_PHYERR_OFDM_RESTART);
+       PHY_ERR("FALSE-RADAR-EXT ERR", ATH9K_PHYERR_FALSE_RADAR_EXT);
+       PHY_ERR("CCK-TIMING ERR", ATH9K_PHYERR_CCK_TIMING);
+       PHY_ERR("CCK-HEADER-CRC ERR", ATH9K_PHYERR_CCK_HEADER_CRC);
+       PHY_ERR("CCK-RATE ERR", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
+       PHY_ERR("CCK-SERVICE ERR", ATH9K_PHYERR_CCK_SERVICE);
+       PHY_ERR("CCK-RESTART ERR", ATH9K_PHYERR_CCK_RESTART);
+       PHY_ERR("CCK-LENGTH ERR", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
+       PHY_ERR("CCK-POWER-DROP ERR", ATH9K_PHYERR_CCK_POWER_DROP);
+       PHY_ERR("HT-CRC ERR", ATH9K_PHYERR_HT_CRC_ERROR);
+       PHY_ERR("HT-LENGTH ERR", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
+       PHY_ERR("HT-RATE ERR", ATH9K_PHYERR_HT_RATE_ILLEGAL);
+
+       if (len > size)
+               len = size;
+
+       retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+       kfree(buf);
+
+       return retval;
+
+#undef PHY_ERR
+}
+
+static const struct file_operations fops_phy_err = {
+       .read = read_file_phy_err,
+       .open = simple_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
+void ath9k_cmn_debug_phy_err(struct dentry *debugfs_phy,
+                            struct ath_rx_stats *rxstats)
+{
+       debugfs_create_file("phy_err", S_IRUSR, debugfs_phy, rxstats,
+                           &fops_phy_err);
+}
+EXPORT_SYMBOL(ath9k_cmn_debug_phy_err);
diff --git a/drivers/net/wireless/ath/ath9k/common-debug.h b/drivers/net/wireless/ath/ath9k/common-debug.h
new file mode 100644 (file)
index 0000000..7c97884
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+
+
+/**
+ * struct ath_rx_stats - RX Statistics
+ * @rx_pkts_all:  No. of total frames received, including ones that
+       may have had errors.
+ * @rx_bytes_all:  No. of total bytes received, including ones that
+       may have had errors.
+ * @crc_err: No. of frames with incorrect CRC value
+ * @decrypt_crc_err: No. of frames whose CRC check failed after
+       decryption process completed
+ * @phy_err: No. of frames whose reception failed because the PHY
+       encountered an error
+ * @mic_err: No. of frames with incorrect TKIP MIC verification failure
+ * @pre_delim_crc_err: Pre-Frame delimiter CRC error detections
+ * @post_delim_crc_err: Post-Frame delimiter CRC error detections
+ * @decrypt_busy_err: Decryption interruptions counter
+ * @phy_err_stats: Individual PHY error statistics
+ * @rx_len_err:  No. of frames discarded due to bad length.
+ * @rx_oom_err:  No. of frames dropped due to OOM issues.
+ * @rx_rate_err:  No. of frames dropped due to rate errors.
+ * @rx_too_many_frags_err:  Frames dropped due to too-many-frags received.
+ * @rx_beacons:  No. of beacons received.
+ * @rx_frags:  No. of rx-fragements received.
+ * @rx_spectral: No of spectral packets received.
+ */
+struct ath_rx_stats {
+       u32 rx_pkts_all;
+       u32 rx_bytes_all;
+       u32 crc_err;
+       u32 decrypt_crc_err;
+       u32 phy_err;
+       u32 mic_err;
+       u32 pre_delim_crc_err;
+       u32 post_delim_crc_err;
+       u32 decrypt_busy_err;
+       u32 phy_err_stats[ATH9K_PHYERR_MAX];
+       u32 rx_len_err;
+       u32 rx_oom_err;
+       u32 rx_rate_err;
+       u32 rx_too_many_frags_err;
+       u32 rx_beacons;
+       u32 rx_frags;
+       u32 rx_spectral;
+};
+
+void ath9k_cmn_debug_modal_eeprom(struct dentry *debugfs_phy,
+                                 struct ath_hw *ah);
+void ath9k_cmn_debug_base_eeprom(struct dentry *debugfs_phy,
+                                struct ath_hw *ah);
+void ath9k_cmn_debug_stat_rx(struct ath_rx_stats *rxstats,
+                            struct ath_rx_status *rs);
+void ath9k_cmn_debug_recv(struct dentry *debugfs_phy,
+                         struct ath_rx_stats *rxstats);
+void ath9k_cmn_debug_phy_err(struct dentry *debugfs_phy,
+                            struct ath_rx_stats *rxstats);
index ca38116838f00e5206c5ec35b3589c9bc4cbba58..ffc454b18637588f37964857c5a1b4997cc2ee5c 100644 (file)
@@ -23,6 +23,7 @@
 
 #include "common-init.h"
 #include "common-beacon.h"
+#include "common-debug.h"
 
 /* Common header for Atheros 802.11n base driver cores */
 
index 780ff1bee6f69ceac8729a9b75913c0bdad6c28b..6cc42be48d4e60e68f3e2603ebfc465f89159a0d 100644 (file)
@@ -948,151 +948,11 @@ static const struct file_operations fops_reset = {
        .llseek = default_llseek,
 };
 
-static ssize_t read_file_recv(struct file *file, char __user *user_buf,
-                             size_t count, loff_t *ppos)
-{
-#define RXS_ERR(s, e)                                      \
-       do {                                                \
-               len += scnprintf(buf + len, size - len,     \
-                                "%18s : %10u\n", s,        \
-                                sc->debug.stats.rxstats.e);\
-       } while (0)
-
-       struct ath_softc *sc = file->private_data;
-       char *buf;
-       unsigned int len = 0, size = 1600;
-       ssize_t retval = 0;
-
-       buf = kzalloc(size, GFP_KERNEL);
-       if (buf == NULL)
-               return -ENOMEM;
-
-       RXS_ERR("PKTS-ALL", rx_pkts_all);
-       RXS_ERR("BYTES-ALL", rx_bytes_all);
-       RXS_ERR("BEACONS", rx_beacons);
-       RXS_ERR("FRAGS", rx_frags);
-       RXS_ERR("SPECTRAL", rx_spectral);
-
-       RXS_ERR("CRC ERR", crc_err);
-       RXS_ERR("DECRYPT CRC ERR", decrypt_crc_err);
-       RXS_ERR("PHY ERR", phy_err);
-       RXS_ERR("MIC ERR", mic_err);
-       RXS_ERR("PRE-DELIM CRC ERR", pre_delim_crc_err);
-       RXS_ERR("POST-DELIM CRC ERR", post_delim_crc_err);
-       RXS_ERR("DECRYPT BUSY ERR", decrypt_busy_err);
-       RXS_ERR("LENGTH-ERR", rx_len_err);
-       RXS_ERR("OOM-ERR", rx_oom_err);
-       RXS_ERR("RATE-ERR", rx_rate_err);
-       RXS_ERR("TOO-MANY-FRAGS", rx_too_many_frags_err);
-
-       if (len > size)
-               len = size;
-
-       retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
-       kfree(buf);
-
-       return retval;
-
-#undef RXS_ERR
-}
-
 void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
 {
-#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++
-
-       RX_STAT_INC(rx_pkts_all);
-       sc->debug.stats.rxstats.rx_bytes_all += rs->rs_datalen;
-
-       if (rs->rs_status & ATH9K_RXERR_CRC)
-               RX_STAT_INC(crc_err);
-       if (rs->rs_status & ATH9K_RXERR_DECRYPT)
-               RX_STAT_INC(decrypt_crc_err);
-       if (rs->rs_status & ATH9K_RXERR_MIC)
-               RX_STAT_INC(mic_err);
-       if (rs->rs_status & ATH9K_RX_DELIM_CRC_PRE)
-               RX_STAT_INC(pre_delim_crc_err);
-       if (rs->rs_status & ATH9K_RX_DELIM_CRC_POST)
-               RX_STAT_INC(post_delim_crc_err);
-       if (rs->rs_status & ATH9K_RX_DECRYPT_BUSY)
-               RX_STAT_INC(decrypt_busy_err);
-
-       if (rs->rs_status & ATH9K_RXERR_PHY) {
-               RX_STAT_INC(phy_err);
-               if (rs->rs_phyerr < ATH9K_PHYERR_MAX)
-                       RX_PHY_ERR_INC(rs->rs_phyerr);
-       }
-
-#undef RX_PHY_ERR_INC
+       ath9k_cmn_debug_stat_rx(&sc->debug.stats.rxstats, rs);
 }
 
-static const struct file_operations fops_recv = {
-       .read = read_file_recv,
-       .open = simple_open,
-       .owner = THIS_MODULE,
-       .llseek = default_llseek,
-};
-
-static ssize_t read_file_phy_err(struct file *file, char __user *user_buf,
-                                size_t count, loff_t *ppos)
-{
-#define PHY_ERR(s, p) \
-       len += scnprintf(buf + len, size - len, "%22s : %10u\n", s, \
-                        sc->debug.stats.rxstats.phy_err_stats[p]);
-
-       struct ath_softc *sc = file->private_data;
-       char *buf;
-       unsigned int len = 0, size = 1600;
-       ssize_t retval = 0;
-
-       buf = kzalloc(size, GFP_KERNEL);
-       if (buf == NULL)
-               return -ENOMEM;
-
-       PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN);
-       PHY_ERR("TIMING ERR", ATH9K_PHYERR_TIMING);
-       PHY_ERR("PARITY ERR", ATH9K_PHYERR_PARITY);
-       PHY_ERR("RATE ERR", ATH9K_PHYERR_RATE);
-       PHY_ERR("LENGTH ERR", ATH9K_PHYERR_LENGTH);
-       PHY_ERR("RADAR ERR", ATH9K_PHYERR_RADAR);
-       PHY_ERR("SERVICE ERR", ATH9K_PHYERR_SERVICE);
-       PHY_ERR("TOR ERR", ATH9K_PHYERR_TOR);
-       PHY_ERR("OFDM-TIMING ERR", ATH9K_PHYERR_OFDM_TIMING);
-       PHY_ERR("OFDM-SIGNAL-PARITY ERR", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
-       PHY_ERR("OFDM-RATE ERR", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
-       PHY_ERR("OFDM-LENGTH ERR", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
-       PHY_ERR("OFDM-POWER-DROP ERR", ATH9K_PHYERR_OFDM_POWER_DROP);
-       PHY_ERR("OFDM-SERVICE ERR", ATH9K_PHYERR_OFDM_SERVICE);
-       PHY_ERR("OFDM-RESTART ERR", ATH9K_PHYERR_OFDM_RESTART);
-       PHY_ERR("FALSE-RADAR-EXT ERR", ATH9K_PHYERR_FALSE_RADAR_EXT);
-       PHY_ERR("CCK-TIMING ERR", ATH9K_PHYERR_CCK_TIMING);
-       PHY_ERR("CCK-HEADER-CRC ERR", ATH9K_PHYERR_CCK_HEADER_CRC);
-       PHY_ERR("CCK-RATE ERR", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
-       PHY_ERR("CCK-SERVICE ERR", ATH9K_PHYERR_CCK_SERVICE);
-       PHY_ERR("CCK-RESTART ERR", ATH9K_PHYERR_CCK_RESTART);
-       PHY_ERR("CCK-LENGTH ERR", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
-       PHY_ERR("CCK-POWER-DROP ERR", ATH9K_PHYERR_CCK_POWER_DROP);
-       PHY_ERR("HT-CRC ERR", ATH9K_PHYERR_HT_CRC_ERROR);
-       PHY_ERR("HT-LENGTH ERR", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
-       PHY_ERR("HT-RATE ERR", ATH9K_PHYERR_HT_RATE_ILLEGAL);
-
-       if (len > size)
-               len = size;
-
-       retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
-       kfree(buf);
-
-       return retval;
-
-#undef PHY_ERR
-}
-
-static const struct file_operations fops_phy_err = {
-       .read = read_file_phy_err,
-       .open = simple_open,
-       .owner = THIS_MODULE,
-       .llseek = default_llseek,
-};
-
 static ssize_t read_file_regidx(struct file *file, char __user *user_buf,
                                 size_t count, loff_t *ppos)
 {
@@ -1268,62 +1128,6 @@ static const struct file_operations fops_dump_nfcal = {
        .llseek = default_llseek,
 };
 
-static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
-                                    size_t count, loff_t *ppos)
-{
-       struct ath_softc *sc = file->private_data;
-       struct ath_hw *ah = sc->sc_ah;
-       u32 len = 0, size = 1500;
-       ssize_t retval = 0;
-       char *buf;
-
-       buf = kzalloc(size, GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-
-       len = ah->eep_ops->dump_eeprom(ah, true, buf, len, size);
-
-       retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
-       kfree(buf);
-
-       return retval;
-}
-
-static const struct file_operations fops_base_eeprom = {
-       .read = read_file_base_eeprom,
-       .open = simple_open,
-       .owner = THIS_MODULE,
-       .llseek = default_llseek,
-};
-
-static ssize_t read_file_modal_eeprom(struct file *file, char __user *user_buf,
-                                     size_t count, loff_t *ppos)
-{
-       struct ath_softc *sc = file->private_data;
-       struct ath_hw *ah = sc->sc_ah;
-       u32 len = 0, size = 6000;
-       char *buf;
-       size_t retval;
-
-       buf = kzalloc(size, GFP_KERNEL);
-       if (buf == NULL)
-               return -ENOMEM;
-
-       len = ah->eep_ops->dump_eeprom(ah, false, buf, len, size);
-
-       retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
-       kfree(buf);
-
-       return retval;
-}
-
-static const struct file_operations fops_modal_eeprom = {
-       .read = read_file_modal_eeprom,
-       .open = simple_open,
-       .owner = THIS_MODULE,
-       .llseek = default_llseek,
-};
-
 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
 static ssize_t read_file_btcoex(struct file *file, char __user *user_buf,
                                size_t count, loff_t *ppos)
@@ -1524,10 +1328,10 @@ int ath9k_init_debug(struct ath_hw *ah)
                            &fops_misc);
        debugfs_create_file("reset", S_IRUSR, sc->debug.debugfs_phy, sc,
                            &fops_reset);
-       debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy, sc,
-                           &fops_recv);
-       debugfs_create_file("phy_err", S_IRUSR, sc->debug.debugfs_phy, sc,
-                           &fops_phy_err);
+
+       ath9k_cmn_debug_recv(sc->debug.debugfs_phy, &sc->debug.stats.rxstats);
+       ath9k_cmn_debug_phy_err(sc->debug.debugfs_phy, &sc->debug.stats.rxstats);
+
        debugfs_create_u8("rx_chainmask", S_IRUSR, sc->debug.debugfs_phy,
                          &ah->rxchainmask);
        debugfs_create_u8("tx_chainmask", S_IRUSR, sc->debug.debugfs_phy,
@@ -1547,10 +1351,10 @@ int ath9k_init_debug(struct ath_hw *ah)
                            &fops_regdump);
        debugfs_create_file("dump_nfcal", S_IRUSR, sc->debug.debugfs_phy, sc,
                            &fops_dump_nfcal);
-       debugfs_create_file("base_eeprom", S_IRUSR, sc->debug.debugfs_phy, sc,
-                           &fops_base_eeprom);
-       debugfs_create_file("modal_eeprom", S_IRUSR, sc->debug.debugfs_phy, sc,
-                           &fops_modal_eeprom);
+
+       ath9k_cmn_debug_base_eeprom(sc->debug.debugfs_phy, sc->sc_ah);
+       ath9k_cmn_debug_modal_eeprom(sc->debug.debugfs_phy, sc->sc_ah);
+
        debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR,
                           sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask);
        debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR,
index 559a68c2709cc882d5edf646417dd7348d8fffc8..53ae15bd0c9d4ac8b40a7a9de3689c607d56c3cd 100644 (file)
@@ -221,50 +221,6 @@ struct ath_rx_rate_stats {
        } cck_stats[4];
 };
 
-/**
- * struct ath_rx_stats - RX Statistics
- * @rx_pkts_all:  No. of total frames received, including ones that
-       may have had errors.
- * @rx_bytes_all:  No. of total bytes received, including ones that
-       may have had errors.
- * @crc_err: No. of frames with incorrect CRC value
- * @decrypt_crc_err: No. of frames whose CRC check failed after
-       decryption process completed
- * @phy_err: No. of frames whose reception failed because the PHY
-       encountered an error
- * @mic_err: No. of frames with incorrect TKIP MIC verification failure
- * @pre_delim_crc_err: Pre-Frame delimiter CRC error detections
- * @post_delim_crc_err: Post-Frame delimiter CRC error detections
- * @decrypt_busy_err: Decryption interruptions counter
- * @phy_err_stats: Individual PHY error statistics
- * @rx_len_err:  No. of frames discarded due to bad length.
- * @rx_oom_err:  No. of frames dropped due to OOM issues.
- * @rx_rate_err:  No. of frames dropped due to rate errors.
- * @rx_too_many_frags_err:  Frames dropped due to too-many-frags received.
- * @rx_beacons:  No. of beacons received.
- * @rx_frags:  No. of rx-fragements received.
- * @rx_spectral: No of spectral packets received.
- */
-struct ath_rx_stats {
-       u32 rx_pkts_all;
-       u32 rx_bytes_all;
-       u32 crc_err;
-       u32 decrypt_crc_err;
-       u32 phy_err;
-       u32 mic_err;
-       u32 pre_delim_crc_err;
-       u32 post_delim_crc_err;
-       u32 decrypt_busy_err;
-       u32 phy_err_stats[ATH9K_PHYERR_MAX];
-       u32 rx_len_err;
-       u32 rx_oom_err;
-       u32 rx_rate_err;
-       u32 rx_too_many_frags_err;
-       u32 rx_beacons;
-       u32 rx_frags;
-       u32 rx_spectral;
-};
-
 #define ANT_MAIN 0
 #define ANT_ALT  1
 
index d76e6e0120d2c28236163f56de4471c46f397c60..ffca918ff16aff4be941d572ac19d4f152e5b2c2 100644 (file)
@@ -72,7 +72,7 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
                ath_txq_lock(sc, txq);
                if (tid->active) {
                        len += scnprintf(buf + len, size - len,
-                                        "%3d%11d%10d%10d%10d%10d%9d%6d%8d\n",
+                                        "%3d%11d%10d%10d%10d%10d%9d%6d\n",
                                         tid->tidno,
                                         tid->seq_start,
                                         tid->seq_next,
@@ -80,8 +80,7 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
                                         tid->baw_head,
                                         tid->baw_tail,
                                         tid->bar_index,
-                                        tid->sched,
-                                        tid->paused);
+                                        tid->sched);
                }
                ath_txq_unlock(sc, txq);
        }
index 857bb28b389411ea2e1586abd3f5c749c0127327..e0c740dcfea8e27438eb95f5fd6d2a48e1423131 100644 (file)
@@ -178,12 +178,14 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
        pe.ts = mactime;
        if (ath9k_postprocess_radar_event(sc, &ard, &pe)) {
                struct dfs_pattern_detector *pd = sc->dfs_detector;
-               static u64 last_ts;
+#ifdef CONFIG_ATH9K_DEBUGFS
                ath_dbg(common, DFS,
                        "ath9k_dfs_process_phyerr: channel=%d, ts=%llu, "
                        "width=%d, rssi=%d, delta_ts=%llu\n",
-                       pe.freq, pe.ts, pe.width, pe.rssi, pe.ts-last_ts);
-               last_ts = pe.ts;
+                       pe.freq, pe.ts, pe.width, pe.rssi,
+                       pe.ts - sc->debug.stats.dfs_stats.last_ts);
+               sc->debug.stats.dfs_stats.last_ts = pe.ts;
+#endif
                DFS_STAT_INC(sc, pulses_processed);
                if (pd != NULL && pd->add_pulse(pd, &pe)) {
                        DFS_STAT_INC(sc, radar_detected);
index 7936c9126a20d5dee79cae53bb2a06cdb78ed881..d9486867a5e00f089de4adb08989a59470ad26d7 100644 (file)
@@ -51,6 +51,7 @@ struct ath_dfs_stats {
        /* pattern detection stats */
        u32 pulses_processed;
        u32 radar_detected;
+       u64 last_ts;
 };
 
 #if defined(CONFIG_ATH9K_DFS_DEBUGFS)
index dab1f0cab9937d17fd0df75d88c3eafb0ff8ec54..09a5d72f3ff5b4e9cf27b6c0305193e738c1aba5 100644 (file)
@@ -325,14 +325,14 @@ static inline struct ath9k_htc_tx_ctl *HTC_SKB_CB(struct sk_buff *skb)
 
 #define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++)
 #define TX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c += a)
-#define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.rx_stats.c++)
-#define RX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.rx_stats.c += a)
+#define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c++)
+#define RX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c += a)
 #define CAB_STAT_INC   priv->debug.tx_stats.cab_queued++
 
 #define TX_QSTAT_INC(q) (priv->debug.tx_stats.queue_stats[q]++)
 
 void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv,
-                          struct ath_htc_rx_status *rxs);
+                          struct ath_rx_status *rs);
 
 struct ath_tx_stats {
        u32 buf_queued;
@@ -345,25 +345,18 @@ struct ath_tx_stats {
        u32 queue_stats[IEEE80211_NUM_ACS];
 };
 
-struct ath_rx_stats {
+struct ath_skbrx_stats {
        u32 skb_allocated;
        u32 skb_completed;
        u32 skb_completed_bytes;
        u32 skb_dropped;
-       u32 err_crc;
-       u32 err_decrypt_crc;
-       u32 err_mic;
-       u32 err_pre_delim;
-       u32 err_post_delim;
-       u32 err_decrypt_busy;
-       u32 err_phy;
-       u32 err_phy_stats[ATH9K_PHYERR_MAX];
 };
 
 struct ath9k_debug {
        struct dentry *debugfs_phy;
        struct ath_tx_stats tx_stats;
        struct ath_rx_stats rx_stats;
+       struct ath_skbrx_stats skbrx_stats;
 };
 
 void ath9k_htc_get_et_strings(struct ieee80211_hw *hw,
@@ -385,7 +378,7 @@ void ath9k_htc_get_et_stats(struct ieee80211_hw *hw,
 #define TX_QSTAT_INC(c) do { } while (0)
 
 static inline void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv,
-                                        struct ath_htc_rx_status *rxs)
+                                        struct ath_rx_status *rs)
 {
 }
 
index fb071ee4fcfb3dd5969005a40537908ec92791c6..8b529e4b8ac4f37019426102b5d8d4b428882701 100644 (file)
@@ -243,39 +243,14 @@ static const struct file_operations fops_xmit = {
 };
 
 void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv,
-                          struct ath_htc_rx_status *rxs)
+                            struct ath_rx_status *rs)
 {
-#define RX_PHY_ERR_INC(c) priv->debug.rx_stats.err_phy_stats[c]++
-
-       if (rxs->rs_status & ATH9K_RXERR_CRC)
-               priv->debug.rx_stats.err_crc++;
-       if (rxs->rs_status & ATH9K_RXERR_DECRYPT)
-               priv->debug.rx_stats.err_decrypt_crc++;
-       if (rxs->rs_status & ATH9K_RXERR_MIC)
-               priv->debug.rx_stats.err_mic++;
-       if (rxs->rs_status & ATH9K_RX_DELIM_CRC_PRE)
-               priv->debug.rx_stats.err_pre_delim++;
-       if (rxs->rs_status & ATH9K_RX_DELIM_CRC_POST)
-               priv->debug.rx_stats.err_post_delim++;
-       if (rxs->rs_status & ATH9K_RX_DECRYPT_BUSY)
-               priv->debug.rx_stats.err_decrypt_busy++;
-
-       if (rxs->rs_status & ATH9K_RXERR_PHY) {
-               priv->debug.rx_stats.err_phy++;
-               if (rxs->rs_phyerr < ATH9K_PHYERR_MAX)
-                       RX_PHY_ERR_INC(rxs->rs_phyerr);
-       }
-
-#undef RX_PHY_ERR_INC
+       ath9k_cmn_debug_stat_rx(&priv->debug.rx_stats, rs);
 }
 
-static ssize_t read_file_recv(struct file *file, char __user *user_buf,
+static ssize_t read_file_skb_rx(struct file *file, char __user *user_buf,
                              size_t count, loff_t *ppos)
 {
-#define PHY_ERR(s, p)                                                  \
-       len += scnprintf(buf + len, size - len, "%20s : %10u\n", s,     \
-                        priv->debug.rx_stats.err_phy_stats[p]);
-
        struct ath9k_htc_priv *priv = file->private_data;
        char *buf;
        unsigned int len = 0, size = 1500;
@@ -287,63 +262,13 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
 
        len += scnprintf(buf + len, size - len,
                         "%20s : %10u\n", "SKBs allocated",
-                        priv->debug.rx_stats.skb_allocated);
+                        priv->debug.skbrx_stats.skb_allocated);
        len += scnprintf(buf + len, size - len,
                         "%20s : %10u\n", "SKBs completed",
-                        priv->debug.rx_stats.skb_completed);
+                        priv->debug.skbrx_stats.skb_completed);
        len += scnprintf(buf + len, size - len,
                         "%20s : %10u\n", "SKBs Dropped",
-                        priv->debug.rx_stats.skb_dropped);
-
-       len += scnprintf(buf + len, size - len,
-                        "%20s : %10u\n", "CRC ERR",
-                        priv->debug.rx_stats.err_crc);
-       len += scnprintf(buf + len, size - len,
-                        "%20s : %10u\n", "DECRYPT CRC ERR",
-                        priv->debug.rx_stats.err_decrypt_crc);
-       len += scnprintf(buf + len, size - len,
-                        "%20s : %10u\n", "MIC ERR",
-                        priv->debug.rx_stats.err_mic);
-       len += scnprintf(buf + len, size - len,
-                        "%20s : %10u\n", "PRE-DELIM CRC ERR",
-                        priv->debug.rx_stats.err_pre_delim);
-       len += scnprintf(buf + len, size - len,
-                        "%20s : %10u\n", "POST-DELIM CRC ERR",
-                        priv->debug.rx_stats.err_post_delim);
-       len += scnprintf(buf + len, size - len,
-                        "%20s : %10u\n", "DECRYPT BUSY ERR",
-                        priv->debug.rx_stats.err_decrypt_busy);
-       len += scnprintf(buf + len, size - len,
-                        "%20s : %10u\n", "TOTAL PHY ERR",
-                        priv->debug.rx_stats.err_phy);
-
-
-       PHY_ERR("UNDERRUN", ATH9K_PHYERR_UNDERRUN);
-       PHY_ERR("TIMING", ATH9K_PHYERR_TIMING);
-       PHY_ERR("PARITY", ATH9K_PHYERR_PARITY);
-       PHY_ERR("RATE", ATH9K_PHYERR_RATE);
-       PHY_ERR("LENGTH", ATH9K_PHYERR_LENGTH);
-       PHY_ERR("RADAR", ATH9K_PHYERR_RADAR);
-       PHY_ERR("SERVICE", ATH9K_PHYERR_SERVICE);
-       PHY_ERR("TOR", ATH9K_PHYERR_TOR);
-       PHY_ERR("OFDM-TIMING", ATH9K_PHYERR_OFDM_TIMING);
-       PHY_ERR("OFDM-SIGNAL-PARITY", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
-       PHY_ERR("OFDM-RATE", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
-       PHY_ERR("OFDM-LENGTH", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
-       PHY_ERR("OFDM-POWER-DROP", ATH9K_PHYERR_OFDM_POWER_DROP);
-       PHY_ERR("OFDM-SERVICE", ATH9K_PHYERR_OFDM_SERVICE);
-       PHY_ERR("OFDM-RESTART", ATH9K_PHYERR_OFDM_RESTART);
-       PHY_ERR("FALSE-RADAR-EXT", ATH9K_PHYERR_FALSE_RADAR_EXT);
-       PHY_ERR("CCK-TIMING", ATH9K_PHYERR_CCK_TIMING);
-       PHY_ERR("CCK-HEADER-CRC", ATH9K_PHYERR_CCK_HEADER_CRC);
-       PHY_ERR("CCK-RATE", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
-       PHY_ERR("CCK-SERVICE", ATH9K_PHYERR_CCK_SERVICE);
-       PHY_ERR("CCK-RESTART", ATH9K_PHYERR_CCK_RESTART);
-       PHY_ERR("CCK-LENGTH", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
-       PHY_ERR("CCK-POWER-DROP", ATH9K_PHYERR_CCK_POWER_DROP);
-       PHY_ERR("HT-CRC", ATH9K_PHYERR_HT_CRC_ERROR);
-       PHY_ERR("HT-LENGTH", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
-       PHY_ERR("HT-RATE", ATH9K_PHYERR_HT_RATE_ILLEGAL);
+                        priv->debug.skbrx_stats.skb_dropped);
 
        if (len > size)
                len = size;
@@ -352,12 +277,10 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
        kfree(buf);
 
        return retval;
-
-#undef PHY_ERR
 }
 
-static const struct file_operations fops_recv = {
-       .read = read_file_recv,
+static const struct file_operations fops_skb_rx = {
+       .read = read_file_skb_rx,
        .open = simple_open,
        .owner = THIS_MODULE,
        .llseek = default_llseek,
@@ -486,423 +409,6 @@ static const struct file_operations fops_debug = {
        .llseek = default_llseek,
 };
 
-static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
-                                    size_t count, loff_t *ppos)
-{
-       struct ath9k_htc_priv *priv = file->private_data;
-       struct ath_common *common = ath9k_hw_common(priv->ah);
-       struct base_eep_header *pBase = NULL;
-       unsigned int len = 0, size = 1500;
-       ssize_t retval = 0;
-       char *buf;
-
-       pBase = ath9k_htc_get_eeprom_base(priv);
-
-       if (pBase == NULL) {
-               ath_err(common, "Unknown EEPROM type\n");
-               return 0;
-       }
-
-       buf = kzalloc(size, GFP_KERNEL);
-       if (buf == NULL)
-               return -ENOMEM;
-
-       len += scnprintf(buf + len, size - len,
-                        "%20s : %10d\n", "Major Version",
-                        pBase->version >> 12);
-       len += scnprintf(buf + len, size - len,
-                        "%20s : %10d\n", "Minor Version",
-                        pBase->version & 0xFFF);
-       len += scnprintf(buf + len, size - len,
-                        "%20s : %10d\n", "Checksum",
-                        pBase->checksum);
-       len += scnprintf(buf + len, size - len,
-                        "%20s : %10d\n", "Length",
-                        pBase->length);
-       len += scnprintf(buf + len, size - len,
-                        "%20s : %10d\n", "RegDomain1",
-                        pBase->regDmn[0]);
-       len += scnprintf(buf + len, size - len,
-                        "%20s : %10d\n", "RegDomain2",
-                        pBase->regDmn[1]);
-       len += scnprintf(buf + len, size - len,
-                        "%20s : %10d\n",
-                        "TX Mask", pBase->txMask);
-       len += scnprintf(buf + len, size - len,
-                        "%20s : %10d\n",
-                        "RX Mask", pBase->rxMask);
-       len += scnprintf(buf + len, size - len,
-                        "%20s : %10d\n",
-                        "Allow 5GHz",
-                        !!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
-       len += scnprintf(buf + len, size - len,
-                        "%20s : %10d\n",
-                        "Allow 2GHz",
-                        !!(pBase->opCapFlags & AR5416_OPFLAGS_11G));
-       len += scnprintf(buf + len, size - len,
-                        "%20s : %10d\n",
-                        "Disable 2GHz HT20",
-                        !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT20));
-       len += scnprintf(buf + len, size - len,
-                        "%20s : %10d\n",
-                        "Disable 2GHz HT40",
-                        !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT40));
-       len += scnprintf(buf + len, size - len,
-                        "%20s : %10d\n",
-                        "Disable 5Ghz HT20",
-                        !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT20));
-       len += scnprintf(buf + len, size - len,
-                        "%20s : %10d\n",
-                        "Disable 5Ghz HT40",
-                        !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40));
-       len += scnprintf(buf + len, size - len,
-                        "%20s : %10d\n",
-                        "Big Endian",
-                        !!(pBase->eepMisc & 0x01));
-       len += scnprintf(buf + len, size - len,
-                        "%20s : %10d\n",
-                        "Cal Bin Major Ver",
-                        (pBase->binBuildNumber >> 24) & 0xFF);
-       len += scnprintf(buf + len, size - len,
-                        "%20s : %10d\n",
-                        "Cal Bin Minor Ver",
-                        (pBase->binBuildNumber >> 16) & 0xFF);
-       len += scnprintf(buf + len, size - len,
-                        "%20s : %10d\n",
-                        "Cal Bin Build",
-                        (pBase->binBuildNumber >> 8) & 0xFF);
-
-       /*
-        * UB91 specific data.
-        */
-       if (AR_SREV_9271(priv->ah)) {
-               struct base_eep_header_4k *pBase4k =
-                       &priv->ah->eeprom.map4k.baseEepHeader;
-
-               len += scnprintf(buf + len, size - len,
-                                "%20s : %10d\n",
-                                "TX Gain type",
-                                pBase4k->txGainType);
-       }
-
-       /*
-        * UB95 specific data.
-        */
-       if (priv->ah->hw_version.usbdev == AR9287_USB) {
-               struct base_eep_ar9287_header *pBase9287 =
-                       &priv->ah->eeprom.map9287.baseEepHeader;
-
-               len += scnprintf(buf + len, size - len,
-                                "%20s : %10ddB\n",
-                                "Power Table Offset",
-                                pBase9287->pwrTableOffset);
-
-               len += scnprintf(buf + len, size - len,
-                                "%20s : %10d\n",
-                                "OpenLoop Power Ctrl",
-                                pBase9287->openLoopPwrCntl);
-       }
-
-       len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
-                        pBase->macAddr);
-       if (len > size)
-               len = size;
-
-       retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
-       kfree(buf);
-
-       return retval;
-}
-
-static const struct file_operations fops_base_eeprom = {
-       .read = read_file_base_eeprom,
-       .open = simple_open,
-       .owner = THIS_MODULE,
-       .llseek = default_llseek,
-};
-
-static ssize_t read_4k_modal_eeprom(struct file *file,
-                                   char __user *user_buf,
-                                   size_t count, loff_t *ppos)
-{
-#define PR_EEP(_s, _val)                                               \
-       do {                                                            \
-               len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
-                                _s, (_val));                           \
-       } while (0)
-
-       struct ath9k_htc_priv *priv = file->private_data;
-       struct modal_eep_4k_header *pModal = &priv->ah->eeprom.map4k.modalHeader;
-       unsigned int len = 0, size = 2048;
-       ssize_t retval = 0;
-       char *buf;
-
-       buf = kzalloc(size, GFP_KERNEL);
-       if (buf == NULL)
-               return -ENOMEM;
-
-       PR_EEP("Chain0 Ant. Control", pModal->antCtrlChain[0]);
-       PR_EEP("Ant. Common Control", pModal->antCtrlCommon);
-       PR_EEP("Chain0 Ant. Gain", pModal->antennaGainCh[0]);
-       PR_EEP("Switch Settle", pModal->switchSettling);
-       PR_EEP("Chain0 TxRxAtten", pModal->txRxAttenCh[0]);
-       PR_EEP("Chain0 RxTxMargin", pModal->rxTxMarginCh[0]);
-       PR_EEP("ADC Desired size", pModal->adcDesiredSize);
-       PR_EEP("PGA Desired size", pModal->pgaDesiredSize);
-       PR_EEP("Chain0 xlna Gain", pModal->xlnaGainCh[0]);
-       PR_EEP("txEndToXpaOff", pModal->txEndToXpaOff);
-       PR_EEP("txEndToRxOn", pModal->txEndToRxOn);
-       PR_EEP("txFrameToXpaOn", pModal->txFrameToXpaOn);
-       PR_EEP("CCA Threshold)", pModal->thresh62);
-       PR_EEP("Chain0 NF Threshold", pModal->noiseFloorThreshCh[0]);
-       PR_EEP("xpdGain", pModal->xpdGain);
-       PR_EEP("External PD", pModal->xpd);
-       PR_EEP("Chain0 I Coefficient", pModal->iqCalICh[0]);
-       PR_EEP("Chain0 Q Coefficient", pModal->iqCalQCh[0]);
-       PR_EEP("pdGainOverlap", pModal->pdGainOverlap);
-       PR_EEP("O/D Bias Version", pModal->version);
-       PR_EEP("CCK OutputBias", pModal->ob_0);
-       PR_EEP("BPSK OutputBias", pModal->ob_1);
-       PR_EEP("QPSK OutputBias", pModal->ob_2);
-       PR_EEP("16QAM OutputBias", pModal->ob_3);
-       PR_EEP("64QAM OutputBias", pModal->ob_4);
-       PR_EEP("CCK Driver1_Bias", pModal->db1_0);
-       PR_EEP("BPSK Driver1_Bias", pModal->db1_1);
-       PR_EEP("QPSK Driver1_Bias", pModal->db1_2);
-       PR_EEP("16QAM Driver1_Bias", pModal->db1_3);
-       PR_EEP("64QAM Driver1_Bias", pModal->db1_4);
-       PR_EEP("CCK Driver2_Bias", pModal->db2_0);
-       PR_EEP("BPSK Driver2_Bias", pModal->db2_1);
-       PR_EEP("QPSK Driver2_Bias", pModal->db2_2);
-       PR_EEP("16QAM Driver2_Bias", pModal->db2_3);
-       PR_EEP("64QAM Driver2_Bias", pModal->db2_4);
-       PR_EEP("xPA Bias Level", pModal->xpaBiasLvl);
-       PR_EEP("txFrameToDataStart", pModal->txFrameToDataStart);
-       PR_EEP("txFrameToPaOn", pModal->txFrameToPaOn);
-       PR_EEP("HT40 Power Inc.", pModal->ht40PowerIncForPdadc);
-       PR_EEP("Chain0 bswAtten", pModal->bswAtten[0]);
-       PR_EEP("Chain0 bswMargin", pModal->bswMargin[0]);
-       PR_EEP("HT40 Switch Settle", pModal->swSettleHt40);
-       PR_EEP("Chain0 xatten2Db", pModal->xatten2Db[0]);
-       PR_EEP("Chain0 xatten2Margin", pModal->xatten2Margin[0]);
-       PR_EEP("Ant. Diversity ctl1", pModal->antdiv_ctl1);
-       PR_EEP("Ant. Diversity ctl2", pModal->antdiv_ctl2);
-       PR_EEP("TX Diversity", pModal->tx_diversity);
-
-       if (len > size)
-               len = size;
-
-       retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
-       kfree(buf);
-
-       return retval;
-
-#undef PR_EEP
-}
-
-static ssize_t read_def_modal_eeprom(struct file *file,
-                                    char __user *user_buf,
-                                    size_t count, loff_t *ppos)
-{
-#define PR_EEP(_s, _val)                                               \
-       do {                                                            \
-               if (pBase->opCapFlags & AR5416_OPFLAGS_11G) {           \
-                       pModal = &priv->ah->eeprom.def.modalHeader[1];  \
-                       len += scnprintf(buf + len, size - len, "%20s : %8d%7s", \
-                                        _s, (_val), "|");              \
-               }                                                       \
-               if (pBase->opCapFlags & AR5416_OPFLAGS_11A) {           \
-                       pModal = &priv->ah->eeprom.def.modalHeader[0];  \
-                       len += scnprintf(buf + len, size - len, "%9d\n",\
-                                       (_val));                        \
-               }                                                       \
-       } while (0)
-
-       struct ath9k_htc_priv *priv = file->private_data;
-       struct base_eep_header *pBase = &priv->ah->eeprom.def.baseEepHeader;
-       struct modal_eep_header *pModal = NULL;
-       unsigned int len = 0, size = 3500;
-       ssize_t retval = 0;
-       char *buf;
-
-       buf = kzalloc(size, GFP_KERNEL);
-       if (buf == NULL)
-               return -ENOMEM;
-
-       len += scnprintf(buf + len, size - len,
-                        "%31s %15s\n", "2G", "5G");
-       len += scnprintf(buf + len, size - len,
-                        "%32s %16s\n", "====", "====\n");
-
-       PR_EEP("Chain0 Ant. Control", pModal->antCtrlChain[0]);
-       PR_EEP("Chain1 Ant. Control", pModal->antCtrlChain[1]);
-       PR_EEP("Chain2 Ant. Control", pModal->antCtrlChain[2]);
-       PR_EEP("Ant. Common Control", pModal->antCtrlCommon);
-       PR_EEP("Chain0 Ant. Gain", pModal->antennaGainCh[0]);
-       PR_EEP("Chain1 Ant. Gain", pModal->antennaGainCh[1]);
-       PR_EEP("Chain2 Ant. Gain", pModal->antennaGainCh[2]);
-       PR_EEP("Switch Settle", pModal->switchSettling);
-       PR_EEP("Chain0 TxRxAtten", pModal->txRxAttenCh[0]);
-       PR_EEP("Chain1 TxRxAtten", pModal->txRxAttenCh[1]);
-       PR_EEP("Chain2 TxRxAtten", pModal->txRxAttenCh[2]);
-       PR_EEP("Chain0 RxTxMargin", pModal->rxTxMarginCh[0]);
-       PR_EEP("Chain1 RxTxMargin", pModal->rxTxMarginCh[1]);
-       PR_EEP("Chain2 RxTxMargin", pModal->rxTxMarginCh[2]);
-       PR_EEP("ADC Desired size", pModal->adcDesiredSize);
-       PR_EEP("PGA Desired size", pModal->pgaDesiredSize);
-       PR_EEP("Chain0 xlna Gain", pModal->xlnaGainCh[0]);
-       PR_EEP("Chain1 xlna Gain", pModal->xlnaGainCh[1]);
-       PR_EEP("Chain2 xlna Gain", pModal->xlnaGainCh[2]);
-       PR_EEP("txEndToXpaOff", pModal->txEndToXpaOff);
-       PR_EEP("txEndToRxOn", pModal->txEndToRxOn);
-       PR_EEP("txFrameToXpaOn", pModal->txFrameToXpaOn);
-       PR_EEP("CCA Threshold)", pModal->thresh62);
-       PR_EEP("Chain0 NF Threshold", pModal->noiseFloorThreshCh[0]);
-       PR_EEP("Chain1 NF Threshold", pModal->noiseFloorThreshCh[1]);
-       PR_EEP("Chain2 NF Threshold", pModal->noiseFloorThreshCh[2]);
-       PR_EEP("xpdGain", pModal->xpdGain);
-       PR_EEP("External PD", pModal->xpd);
-       PR_EEP("Chain0 I Coefficient", pModal->iqCalICh[0]);
-       PR_EEP("Chain1 I Coefficient", pModal->iqCalICh[1]);
-       PR_EEP("Chain2 I Coefficient", pModal->iqCalICh[2]);
-       PR_EEP("Chain0 Q Coefficient", pModal->iqCalQCh[0]);
-       PR_EEP("Chain1 Q Coefficient", pModal->iqCalQCh[1]);
-       PR_EEP("Chain2 Q Coefficient", pModal->iqCalQCh[2]);
-       PR_EEP("pdGainOverlap", pModal->pdGainOverlap);
-       PR_EEP("Chain0 OutputBias", pModal->ob);
-       PR_EEP("Chain0 DriverBias", pModal->db);
-       PR_EEP("xPA Bias Level", pModal->xpaBiasLvl);
-       PR_EEP("2chain pwr decrease", pModal->pwrDecreaseFor2Chain);
-       PR_EEP("3chain pwr decrease", pModal->pwrDecreaseFor3Chain);
-       PR_EEP("txFrameToDataStart", pModal->txFrameToDataStart);
-       PR_EEP("txFrameToPaOn", pModal->txFrameToPaOn);
-       PR_EEP("HT40 Power Inc.", pModal->ht40PowerIncForPdadc);
-       PR_EEP("Chain0 bswAtten", pModal->bswAtten[0]);
-       PR_EEP("Chain1 bswAtten", pModal->bswAtten[1]);
-       PR_EEP("Chain2 bswAtten", pModal->bswAtten[2]);
-       PR_EEP("Chain0 bswMargin", pModal->bswMargin[0]);
-       PR_EEP("Chain1 bswMargin", pModal->bswMargin[1]);
-       PR_EEP("Chain2 bswMargin", pModal->bswMargin[2]);
-       PR_EEP("HT40 Switch Settle", pModal->swSettleHt40);
-       PR_EEP("Chain0 xatten2Db", pModal->xatten2Db[0]);
-       PR_EEP("Chain1 xatten2Db", pModal->xatten2Db[1]);
-       PR_EEP("Chain2 xatten2Db", pModal->xatten2Db[2]);
-       PR_EEP("Chain0 xatten2Margin", pModal->xatten2Margin[0]);
-       PR_EEP("Chain1 xatten2Margin", pModal->xatten2Margin[1]);
-       PR_EEP("Chain2 xatten2Margin", pModal->xatten2Margin[2]);
-       PR_EEP("Chain1 OutputBias", pModal->ob_ch1);
-       PR_EEP("Chain1 DriverBias", pModal->db_ch1);
-       PR_EEP("LNA Control", pModal->lna_ctl);
-       PR_EEP("XPA Bias Freq0", pModal->xpaBiasLvlFreq[0]);
-       PR_EEP("XPA Bias Freq1", pModal->xpaBiasLvlFreq[1]);
-       PR_EEP("XPA Bias Freq2", pModal->xpaBiasLvlFreq[2]);
-
-       if (len > size)
-               len = size;
-
-       retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
-       kfree(buf);
-
-       return retval;
-
-#undef PR_EEP
-}
-
-static ssize_t read_9287_modal_eeprom(struct file *file,
-                                     char __user *user_buf,
-                                     size_t count, loff_t *ppos)
-{
-#define PR_EEP(_s, _val)                                               \
-       do {                                                            \
-               len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
-                                _s, (_val));                           \
-       } while (0)
-
-       struct ath9k_htc_priv *priv = file->private_data;
-       struct modal_eep_ar9287_header *pModal = &priv->ah->eeprom.map9287.modalHeader;
-       unsigned int len = 0, size = 3000;
-       ssize_t retval = 0;
-       char *buf;
-
-       buf = kzalloc(size, GFP_KERNEL);
-       if (buf == NULL)
-               return -ENOMEM;
-
-       PR_EEP("Chain0 Ant. Control", pModal->antCtrlChain[0]);
-       PR_EEP("Chain1 Ant. Control", pModal->antCtrlChain[1]);
-       PR_EEP("Ant. Common Control", pModal->antCtrlCommon);
-       PR_EEP("Chain0 Ant. Gain", pModal->antennaGainCh[0]);
-       PR_EEP("Chain1 Ant. Gain", pModal->antennaGainCh[1]);
-       PR_EEP("Switch Settle", pModal->switchSettling);
-       PR_EEP("Chain0 TxRxAtten", pModal->txRxAttenCh[0]);
-       PR_EEP("Chain1 TxRxAtten", pModal->txRxAttenCh[1]);
-       PR_EEP("Chain0 RxTxMargin", pModal->rxTxMarginCh[0]);
-       PR_EEP("Chain1 RxTxMargin", pModal->rxTxMarginCh[1]);
-       PR_EEP("ADC Desired size", pModal->adcDesiredSize);
-       PR_EEP("txEndToXpaOff", pModal->txEndToXpaOff);
-       PR_EEP("txEndToRxOn", pModal->txEndToRxOn);
-       PR_EEP("txFrameToXpaOn", pModal->txFrameToXpaOn);
-       PR_EEP("CCA Threshold)", pModal->thresh62);
-       PR_EEP("Chain0 NF Threshold", pModal->noiseFloorThreshCh[0]);
-       PR_EEP("Chain1 NF Threshold", pModal->noiseFloorThreshCh[1]);
-       PR_EEP("xpdGain", pModal->xpdGain);
-       PR_EEP("External PD", pModal->xpd);
-       PR_EEP("Chain0 I Coefficient", pModal->iqCalICh[0]);
-       PR_EEP("Chain1 I Coefficient", pModal->iqCalICh[1]);
-       PR_EEP("Chain0 Q Coefficient", pModal->iqCalQCh[0]);
-       PR_EEP("Chain1 Q Coefficient", pModal->iqCalQCh[1]);
-       PR_EEP("pdGainOverlap", pModal->pdGainOverlap);
-       PR_EEP("xPA Bias Level", pModal->xpaBiasLvl);
-       PR_EEP("txFrameToDataStart", pModal->txFrameToDataStart);
-       PR_EEP("txFrameToPaOn", pModal->txFrameToPaOn);
-       PR_EEP("HT40 Power Inc.", pModal->ht40PowerIncForPdadc);
-       PR_EEP("Chain0 bswAtten", pModal->bswAtten[0]);
-       PR_EEP("Chain1 bswAtten", pModal->bswAtten[1]);
-       PR_EEP("Chain0 bswMargin", pModal->bswMargin[0]);
-       PR_EEP("Chain1 bswMargin", pModal->bswMargin[1]);
-       PR_EEP("HT40 Switch Settle", pModal->swSettleHt40);
-       PR_EEP("AR92x7 Version", pModal->version);
-       PR_EEP("DriverBias1", pModal->db1);
-       PR_EEP("DriverBias2", pModal->db1);
-       PR_EEP("CCK OutputBias", pModal->ob_cck);
-       PR_EEP("PSK OutputBias", pModal->ob_psk);
-       PR_EEP("QAM OutputBias", pModal->ob_qam);
-       PR_EEP("PAL_OFF OutputBias", pModal->ob_pal_off);
-
-       if (len > size)
-               len = size;
-
-       retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
-       kfree(buf);
-
-       return retval;
-
-#undef PR_EEP
-}
-
-static ssize_t read_file_modal_eeprom(struct file *file, char __user *user_buf,
-                                     size_t count, loff_t *ppos)
-{
-       struct ath9k_htc_priv *priv = file->private_data;
-
-       if (AR_SREV_9271(priv->ah))
-               return read_4k_modal_eeprom(file, user_buf, count, ppos);
-       else if (priv->ah->hw_version.usbdev == AR9280_USB)
-               return read_def_modal_eeprom(file, user_buf, count, ppos);
-       else if (priv->ah->hw_version.usbdev == AR9287_USB)
-               return read_9287_modal_eeprom(file, user_buf, count, ppos);
-
-       return 0;
-}
-
-static const struct file_operations fops_modal_eeprom = {
-       .read = read_file_modal_eeprom,
-       .open = simple_open,
-       .owner = THIS_MODULE,
-       .llseek = default_llseek,
-};
-
-
 /* Ethtool support for get-stats */
 #define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO"
 static const char ath9k_htc_gstrings_stats[][ETH_GSTRING_LEN] = {
@@ -947,6 +453,8 @@ int ath9k_htc_get_et_sset_count(struct ieee80211_hw *hw,
 
 #define STXBASE priv->debug.tx_stats
 #define SRXBASE priv->debug.rx_stats
+#define SKBTXBASE priv->debug.tx_stats
+#define SKBRXBASE priv->debug.skbrx_stats
 #define ASTXQ(a)                                       \
        data[i++] = STXBASE.a[IEEE80211_AC_BE];         \
        data[i++] = STXBASE.a[IEEE80211_AC_BK];         \
@@ -960,24 +468,24 @@ void ath9k_htc_get_et_stats(struct ieee80211_hw *hw,
        struct ath9k_htc_priv *priv = hw->priv;
        int i = 0;
 
-       data[i++] = STXBASE.skb_success;
-       data[i++] = STXBASE.skb_success_bytes;
-       data[i++] = SRXBASE.skb_completed;
-       data[i++] = SRXBASE.skb_completed_bytes;
+       data[i++] = SKBTXBASE.skb_success;
+       data[i++] = SKBTXBASE.skb_success_bytes;
+       data[i++] = SKBRXBASE.skb_completed;
+       data[i++] = SKBRXBASE.skb_completed_bytes;
 
        ASTXQ(queue_stats);
 
-       data[i++] = SRXBASE.err_crc;
-       data[i++] = SRXBASE.err_decrypt_crc;
-       data[i++] = SRXBASE.err_phy;
-       data[i++] = SRXBASE.err_mic;
-       data[i++] = SRXBASE.err_pre_delim;
-       data[i++] = SRXBASE.err_post_delim;
-       data[i++] = SRXBASE.err_decrypt_busy;
+       data[i++] = SRXBASE.crc_err;
+       data[i++] = SRXBASE.decrypt_crc_err;
+       data[i++] = SRXBASE.phy_err;
+       data[i++] = SRXBASE.mic_err;
+       data[i++] = SRXBASE.pre_delim_crc_err;
+       data[i++] = SRXBASE.post_delim_crc_err;
+       data[i++] = SRXBASE.decrypt_busy_err;
 
-       data[i++] = SRXBASE.err_phy_stats[ATH9K_PHYERR_RADAR];
-       data[i++] = SRXBASE.err_phy_stats[ATH9K_PHYERR_OFDM_TIMING];
-       data[i++] = SRXBASE.err_phy_stats[ATH9K_PHYERR_CCK_TIMING];
+       data[i++] = SRXBASE.phy_err_stats[ATH9K_PHYERR_RADAR];
+       data[i++] = SRXBASE.phy_err_stats[ATH9K_PHYERR_OFDM_TIMING];
+       data[i++] = SRXBASE.phy_err_stats[ATH9K_PHYERR_CCK_TIMING];
 
        WARN_ON(i != ATH9K_HTC_SSTATS_LEN);
 }
@@ -1001,18 +509,21 @@ int ath9k_htc_init_debug(struct ath_hw *ah)
                            priv, &fops_tgt_rx_stats);
        debugfs_create_file("xmit", S_IRUSR, priv->debug.debugfs_phy,
                            priv, &fops_xmit);
-       debugfs_create_file("recv", S_IRUSR, priv->debug.debugfs_phy,
-                           priv, &fops_recv);
+       debugfs_create_file("skb_rx", S_IRUSR, priv->debug.debugfs_phy,
+                           priv, &fops_skb_rx);
+
+       ath9k_cmn_debug_recv(priv->debug.debugfs_phy, &priv->debug.rx_stats);
+       ath9k_cmn_debug_phy_err(priv->debug.debugfs_phy, &priv->debug.rx_stats);
+
        debugfs_create_file("slot", S_IRUSR, priv->debug.debugfs_phy,
                            priv, &fops_slot);
        debugfs_create_file("queue", S_IRUSR, priv->debug.debugfs_phy,
                            priv, &fops_queue);
        debugfs_create_file("debug", S_IRUSR | S_IWUSR, priv->debug.debugfs_phy,
                            priv, &fops_debug);
-       debugfs_create_file("base_eeprom", S_IRUSR, priv->debug.debugfs_phy,
-                           priv, &fops_base_eeprom);
-       debugfs_create_file("modal_eeprom", S_IRUSR, priv->debug.debugfs_phy,
-                           priv, &fops_modal_eeprom);
+
+       ath9k_cmn_debug_base_eeprom(priv->debug.debugfs_phy, priv->ah);
+       ath9k_cmn_debug_modal_eeprom(priv->debug.debugfs_phy, priv->ah);
 
        return 0;
 }
index f46cd0250e488217ca4aa517be12924e3b61c6a8..5627917c5ff761137061467e1b9d71f281ce0652 100644 (file)
@@ -95,8 +95,10 @@ static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
 
        if ((vif->type == NL80211_IFTYPE_AP ||
             vif->type == NL80211_IFTYPE_MESH_POINT) &&
-           bss_conf->enable_beacon)
+           bss_conf->enable_beacon) {
                priv->reconfig_beacon = true;
+               priv->rearm_ani = true;
+       }
 
        if (bss_conf->assoc) {
                priv->rearm_ani = true;
@@ -257,6 +259,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
 
        ath9k_htc_ps_wakeup(priv);
 
+       ath9k_htc_stop_ani(priv);
        del_timer_sync(&priv->tx.cleanup_timer);
        ath9k_htc_tx_drain(priv);
 
index 289f3d8924b5735cb773d2b1d527b5cf106b5d64..bb86eb2ffc953545d759647d70cacc86a4227022 100644 (file)
@@ -996,8 +996,6 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
                goto rx_next;
        }
 
-       ath9k_htc_err_stat_rx(priv, rxstatus);
-
        /* Get the RX status information */
 
        memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
@@ -1005,6 +1003,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
        /* Copy everything from ath_htc_rx_status (HTC_RX_FRAME_HEADER).
         * After this, we can drop this part of skb. */
        rx_status_htc_to_ath(&rx_stats, rxstatus);
+       ath9k_htc_err_stat_rx(priv, &rx_stats);
        rx_status->mactime = be64_to_cpu(rxstatus->rs_tstamp);
        skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
 
index c8a9dfab1fee2ea4778046b186428ba3f014e054..2a8ed8375ec0584771f4f7dcc833d17131ec34b2 100644 (file)
@@ -26,7 +26,6 @@
 #include "ar9003_mac.h"
 #include "ar9003_mci.h"
 #include "ar9003_phy.h"
-#include "debug.h"
 #include "ath9k.h"
 
 static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type);
@@ -246,6 +245,8 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
                return;
        case AR9300_DEVID_AR953X:
                ah->hw_version.macVersion = AR_SREV_VERSION_9531;
+               if (ah->get_mac_revision)
+                       ah->hw_version.macRev = ah->get_mac_revision();
                return;
        }
 
index cbbb02a6b13b463c9bfdf8b1bad21f45cde0b4cd..1af77081181e122a138451e1765611c28a94d1a9 100644 (file)
@@ -508,7 +508,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
        sc->tx99_power = MAX_RATE_POWER + 1;
        init_waitqueue_head(&sc->tx_wait);
 
-       if (!pdata) {
+       if (!pdata || pdata->use_eeprom) {
                ah->ah_flags |= AH_USE_EEPROM;
                sc->sc_ah->led_pin = -1;
        } else {
@@ -589,6 +589,9 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
        if (ret)
                goto err_btcoex;
 
+       sc->p2p_ps_timer = ath_gen_timer_alloc(sc->sc_ah, ath9k_p2p_ps_timer,
+               NULL, sc, AR_FIRST_NDP_TIMER);
+
        ath9k_cmn_init_crypto(sc->sc_ah);
        ath9k_init_misc(sc);
        ath_fill_led_pin(sc);
@@ -644,13 +647,13 @@ static void ath9k_init_txpower_limits(struct ath_softc *sc)
 
 static const struct ieee80211_iface_limit if_limits[] = {
        { .max = 2048,  .types = BIT(NL80211_IFTYPE_STATION) |
-                                BIT(NL80211_IFTYPE_P2P_CLIENT) |
                                 BIT(NL80211_IFTYPE_WDS) },
        { .max = 8,     .types =
 #ifdef CONFIG_MAC80211_MESH
                                 BIT(NL80211_IFTYPE_MESH_POINT) |
 #endif
-                                BIT(NL80211_IFTYPE_AP) |
+                                BIT(NL80211_IFTYPE_AP) },
+       { .max = 1,     .types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
                                 BIT(NL80211_IFTYPE_P2P_GO) },
 };
 
@@ -711,7 +714,8 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
        if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt)
                hw->flags |= IEEE80211_HW_MFP_CAPABLE;
 
-       hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+       hw->wiphy->features |= (NL80211_FEATURE_ACTIVE_MONITOR |
+                               NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE);
 
        if (!config_enabled(CONFIG_ATH9K_TX99)) {
                hw->wiphy->interface_modes =
@@ -783,6 +787,9 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
        common = ath9k_hw_common(ah);
        ath9k_set_hw_capab(sc, hw);
 
+       /* Will be cleared in ath9k_start() */
+       set_bit(ATH_OP_INVALID, &common->op_flags);
+
        /* Initialize regulatory */
        error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
                              ath9k_reg_notifier);
@@ -852,6 +859,9 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
 {
        int i = 0;
 
+       if (sc->p2p_ps_timer)
+               ath_gen_timer_free(sc->sc_ah, sc->p2p_ps_timer);
+
        ath9k_deinit_btcoex(sc);
 
        for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
index 51ce36f108f9a8f0f6619e6b73e613722c2a72a8..275205ab5f15ea15a00f341942efbe6f3bdc6adc 100644 (file)
@@ -958,3 +958,25 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah)
        return;
 }
 EXPORT_SYMBOL(ath9k_hw_set_interrupts);
+
+#define ATH9K_HW_MAX_DCU       10
+#define ATH9K_HW_SLICE_PER_DCU 16
+#define ATH9K_HW_BIT_IN_SLICE  16
+void ath9k_hw_set_tx_filter(struct ath_hw *ah, u8 destidx, bool set)
+{
+       int dcu_idx;
+       u32 filter;
+
+       for (dcu_idx = 0; dcu_idx < 10; dcu_idx++) {
+               filter = SM(set, AR_D_TXBLK_WRITE_COMMAND);
+               filter |= SM(dcu_idx, AR_D_TXBLK_WRITE_DCU);
+               filter |= SM((destidx / ATH9K_HW_SLICE_PER_DCU),
+                            AR_D_TXBLK_WRITE_SLICE);
+               filter |= BIT(destidx % ATH9K_HW_BIT_IN_SLICE);
+               ath_dbg(ath9k_hw_common(ah), PS,
+                       "DCU%d staid %d set %d txfilter %08x\n",
+                       dcu_idx, destidx, set, filter);
+               REG_WRITE(ah, AR_D_TXBLK_BASE, filter);
+       }
+}
+EXPORT_SYMBOL(ath9k_hw_set_tx_filter);
index 89df634e81f9a703d6dcf5eefee8881ef738570b..da768675753595c3c27ef3fd920327af3c1a547e 100644 (file)
@@ -729,6 +729,7 @@ void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning);
 void ath9k_hw_abortpcurecv(struct ath_hw *ah);
 bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset);
 int ath9k_hw_beaconq_setup(struct ath_hw *ah);
+void ath9k_hw_set_tx_filter(struct ath_hw *ah, u8 destidx, bool set);
 
 /* Interrupt Handling */
 bool ath9k_hw_intrpend(struct ath_hw *ah);
index d69853b848ce1f10167275c4e85d6026ee41c5f1..6965ceac7bc6d0713041ef97867cf11dfcbda5b5 100644 (file)
@@ -261,6 +261,8 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
        sc->gtt_cnt = 0;
        ieee80211_wake_queues(sc->hw);
 
+       ath9k_p2p_ps_timer(sc);
+
        return true;
 }
 
@@ -419,6 +421,7 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
        an->sc = sc;
        an->sta = sta;
        an->vif = vif;
+       memset(&an->key_idx, 0, sizeof(an->key_idx));
 
        ath_tx_node_init(sc, an);
 }
@@ -1119,6 +1122,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
        if (ath9k_uses_beacons(vif->type))
                ath9k_beacon_assign_slot(sc, vif);
 
+       avp->vif = vif;
+
        an->sc = sc;
        an->sta = NULL;
        an->vif = vif;
@@ -1163,6 +1168,29 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
        return 0;
 }
 
+static void
+ath9k_update_p2p_ps_timer(struct ath_softc *sc, struct ath_vif *avp)
+{
+       struct ath_hw *ah = sc->sc_ah;
+       s32 tsf, target_tsf;
+
+       if (!avp || !avp->noa.has_next_tsf)
+               return;
+
+       ath9k_hw_gen_timer_stop(ah, sc->p2p_ps_timer);
+
+       tsf = ath9k_hw_gettsf32(sc->sc_ah);
+
+       target_tsf = avp->noa.next_tsf;
+       if (!avp->noa.absent)
+               target_tsf -= ATH_P2P_PS_STOP_TIME;
+
+       if (target_tsf - tsf < ATH_P2P_PS_STOP_TIME)
+               target_tsf = tsf + ATH_P2P_PS_STOP_TIME;
+
+       ath9k_hw_gen_timer_start(ah, sc->p2p_ps_timer, (u32) target_tsf, 1000000);
+}
+
 static void ath9k_remove_interface(struct ieee80211_hw *hw,
                                   struct ieee80211_vif *vif)
 {
@@ -1174,6 +1202,13 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
 
        mutex_lock(&sc->mutex);
 
+       spin_lock_bh(&sc->sc_pcu_lock);
+       if (avp == sc->p2p_ps_vif) {
+               sc->p2p_ps_vif = NULL;
+               ath9k_update_p2p_ps_timer(sc, NULL);
+       }
+       spin_unlock_bh(&sc->sc_pcu_lock);
+
        sc->nvifs--;
        sc->tx99_vif = NULL;
 
@@ -1427,8 +1462,10 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
                return 0;
 
        key = ath_key_config(common, vif, sta, &ps_key);
-       if (key > 0)
+       if (key > 0) {
                an->ps_key = key;
+               an->key_idx[0] = key;
+       }
 
        return 0;
 }
@@ -1446,6 +1483,7 @@ static void ath9k_del_ps_key(struct ath_softc *sc,
 
        ath_key_delete(common, &ps_key);
        an->ps_key = 0;
+       an->key_idx[0] = 0;
 }
 
 static int ath9k_sta_remove(struct ieee80211_hw *hw,
@@ -1460,6 +1498,19 @@ static int ath9k_sta_remove(struct ieee80211_hw *hw,
        return 0;
 }
 
+static void ath9k_sta_set_tx_filter(struct ath_hw *ah,
+                                   struct ath_node *an,
+                                   bool set)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
+               if (!an->key_idx[i])
+                       continue;
+               ath9k_hw_set_tx_filter(ah, an->key_idx[i], set);
+       }
+}
+
 static void ath9k_sta_notify(struct ieee80211_hw *hw,
                         struct ieee80211_vif *vif,
                         enum sta_notify_cmd cmd,
@@ -1472,8 +1523,10 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw,
        case STA_NOTIFY_SLEEP:
                an->sleeping = true;
                ath_tx_aggr_sleep(sta, sc, an);
+               ath9k_sta_set_tx_filter(sc->sc_ah, an, true);
                break;
        case STA_NOTIFY_AWAKE:
+               ath9k_sta_set_tx_filter(sc->sc_ah, an, false);
                an->sleeping = false;
                ath_tx_aggr_wakeup(sc, an);
                break;
@@ -1529,7 +1582,8 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
 {
        struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       int ret = 0;
+       struct ath_node *an = NULL;
+       int ret = 0, i;
 
        if (ath9k_modparam_nohwcrypt)
                return -ENOSPC;
@@ -1551,13 +1605,16 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
 
        mutex_lock(&sc->mutex);
        ath9k_ps_wakeup(sc);
-       ath_dbg(common, CONFIG, "Set HW Key\n");
+       ath_dbg(common, CONFIG, "Set HW Key %d\n", cmd);
+       if (sta)
+               an = (struct ath_node *)sta->drv_priv;
 
        switch (cmd) {
        case SET_KEY:
                if (sta)
                        ath9k_del_ps_key(sc, vif, sta);
 
+               key->hw_key_idx = 0;
                ret = ath_key_config(common, vif, sta, key);
                if (ret >= 0) {
                        key->hw_key_idx = ret;
@@ -1570,9 +1627,27 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
                                key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
                        ret = 0;
                }
+               if (an && key->hw_key_idx) {
+                       for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
+                               if (an->key_idx[i])
+                                       continue;
+                               an->key_idx[i] = key->hw_key_idx;
+                               break;
+                       }
+                       WARN_ON(i == ARRAY_SIZE(an->key_idx));
+               }
                break;
        case DISABLE_KEY:
                ath_key_delete(common, key);
+               if (an) {
+                       for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
+                               if (an->key_idx[i] != key->hw_key_idx)
+                                       continue;
+                               an->key_idx[i] = 0;
+                               break;
+                       }
+               }
+               key->hw_key_idx = 0;
                break;
        default:
                ret = -EINVAL;
@@ -1636,6 +1711,72 @@ static void ath9k_bss_assoc_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
                ath9k_set_assoc_state(sc, vif);
 }
 
+void ath9k_p2p_ps_timer(void *priv)
+{
+       struct ath_softc *sc = priv;
+       struct ath_vif *avp = sc->p2p_ps_vif;
+       struct ieee80211_vif *vif;
+       struct ieee80211_sta *sta;
+       struct ath_node *an;
+       u32 tsf;
+
+       if (!avp)
+               return;
+
+       tsf = ath9k_hw_gettsf32(sc->sc_ah);
+       if (!avp->noa.absent)
+               tsf += ATH_P2P_PS_STOP_TIME;
+
+       if (!avp->noa.has_next_tsf ||
+           avp->noa.next_tsf - tsf > BIT(31))
+               ieee80211_update_p2p_noa(&avp->noa, tsf);
+
+       ath9k_update_p2p_ps_timer(sc, avp);
+
+       rcu_read_lock();
+
+       vif = avp->vif;
+       sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
+       if (!sta)
+               goto out;
+
+       an = (void *) sta->drv_priv;
+       if (an->sleeping == !!avp->noa.absent)
+               goto out;
+
+       an->sleeping = avp->noa.absent;
+       if (an->sleeping)
+               ath_tx_aggr_sleep(sta, sc, an);
+       else
+               ath_tx_aggr_wakeup(sc, an);
+
+out:
+       rcu_read_unlock();
+}
+
+void ath9k_update_p2p_ps(struct ath_softc *sc, struct ieee80211_vif *vif)
+{
+       struct ath_vif *avp = (void *)vif->drv_priv;
+       unsigned long flags;
+       u32 tsf;
+
+       if (!sc->p2p_ps_timer)
+               return;
+
+       if (vif->type != NL80211_IFTYPE_STATION || !vif->p2p)
+               return;
+
+       sc->p2p_ps_vif = avp;
+
+       spin_lock_irqsave(&sc->sc_pm_lock, flags);
+       if (!(sc->ps_flags & PS_BEACON_SYNC)) {
+               tsf = ath9k_hw_gettsf32(sc->sc_ah);
+               ieee80211_parse_p2p_noa(&vif->bss_conf.p2p_noa_attr, &avp->noa, tsf);
+               ath9k_update_p2p_ps_timer(sc, avp);
+       }
+       spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+}
+
 static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
                                   struct ieee80211_vif *vif,
                                   struct ieee80211_bss_conf *bss_conf,
@@ -1710,6 +1851,12 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
                }
        }
 
+       if (changed & BSS_CHANGED_P2P_PS) {
+               spin_lock_bh(&sc->sc_pcu_lock);
+               ath9k_update_p2p_ps(sc, vif);
+               spin_unlock_bh(&sc->sc_pcu_lock);
+       }
+
        if (changed & CHECK_ANI)
                ath_check_ani(sc);
 
@@ -1883,7 +2030,8 @@ static bool ath9k_has_tx_pending(struct ath_softc *sc)
        return !!npend;
 }
 
-static void ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void ath9k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                       u32 queues, bool drop)
 {
        struct ath_softc *sc = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
index 25304adece571d9d4e498df9398103a49ad08a7e..4dec09e565ed865470ff6bc5738e86c21d523906 100644 (file)
@@ -686,7 +686,7 @@ static bool ath_pci_eeprom_read(struct ath_common *common, u32 off, u16 *data)
        struct ath_softc *sc = (struct ath_softc *) common->priv;
        struct ath9k_platform_data *pdata = sc->dev->platform_data;
 
-       if (pdata) {
+       if (pdata && !pdata->use_eeprom) {
                if (off >= (ARRAY_SIZE(pdata->eeprom_data))) {
                        ath_err(common,
                                "%s: eeprom read failed, offset %08x is out of range\n",
@@ -784,7 +784,6 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        struct ath_softc *sc;
        struct ieee80211_hw *hw;
-       struct ath_common *common;
        u8 csz;
        u32 val;
        int ret = 0;
@@ -877,10 +876,6 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
                   hw_name, (unsigned long)sc->mem, pdev->irq);
 
-       /* Will be cleared in ath9k_start() */
-       common = ath9k_hw_common(sc->sc_ah);
-       set_bit(ATH_OP_INVALID, &common->op_flags);
-
        return 0;
 
 err_init:
@@ -919,6 +914,7 @@ static int ath_pci_suspend(struct device *device)
         */
        ath9k_stop_btcoex(sc);
        ath9k_hw_disable(sc->sc_ah);
+       del_timer_sync(&sc->sleep_timer);
        ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
 
        return 0;
index 6c9accdb52e4140076d7378f530e975c34f68433..43ae199601f7ad8c301b16e6babef99fc742cda3 100644 (file)
@@ -538,7 +538,10 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
                sc->ps_flags &= ~PS_BEACON_SYNC;
                ath_dbg(common, PS,
                        "Reconfigure beacon timers based on synchronized timestamp\n");
-               ath9k_set_beacon(sc);
+               if (!(WARN_ON_ONCE(sc->cur_beacon_conf.beacon_interval == 0)))
+                       ath9k_set_beacon(sc);
+               if (sc->p2p_ps_vif)
+                       ath9k_update_p2p_ps(sc, sc->p2p_ps_vif->vif);
        }
 
        if (ath_beacon_dtim_pending_cab(skb)) {
@@ -975,6 +978,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
        u64 tsf = 0;
        unsigned long flags;
        dma_addr_t new_buf_addr;
+       unsigned int budget = 512;
 
        if (edma)
                dma_type = DMA_BIDIRECTIONAL;
@@ -1113,15 +1117,17 @@ requeue_drop_frag:
                }
 requeue:
                list_add_tail(&bf->list, &sc->rx.rxbuf);
-               if (flush)
-                       continue;
 
                if (edma) {
                        ath_rx_edma_buf_link(sc, qtype);
                } else {
                        ath_rx_buf_relink(sc, bf);
-                       ath9k_hw_rxena(ah);
+                       if (!flush)
+                               ath9k_hw_rxena(ah);
                }
+
+               if (!budget--)
+                       break;
        } while (1);
 
        if (!(ah->imask & ATH9K_INT_RXEOL)) {
index b1fd3fa84983df0e0b85d86b86b02c158883fd35..f1bbce3f7774ee16a9e29b8bb62f72ade1a9d99f 100644 (file)
 #define AR_D_QCUMASK         0x000003FF
 #define AR_D_QCUMASK_RESV0   0xFFFFFC00
 
-#define AR_D_TXBLK_CMD  0x1038
-#define AR_D_TXBLK_DATA(i) (AR_D_TXBLK_CMD+(i))
-
 #define AR_D0_LCL_IFS     0x1040
 #define AR_D1_LCL_IFS     0x1044
 #define AR_D2_LCL_IFS     0x1048
index 87cbec47fb48371403daaa70b32c1b9bc40ce1ec..66acb2cbd9df3cc45c307bb6b38118436da3d01a 100644 (file)
@@ -107,9 +107,6 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
 {
        struct ath_atx_ac *ac = tid->ac;
 
-       if (tid->paused)
-               return;
-
        if (tid->sched)
                return;
 
@@ -1407,7 +1404,6 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
        ath_tx_tid_change_state(sc, txtid);
 
        txtid->active = true;
-       txtid->paused = true;
        *ssn = txtid->seq_start = txtid->seq_next;
        txtid->bar_index = -1;
 
@@ -1427,7 +1423,6 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
 
        ath_txq_lock(sc, txq);
        txtid->active = false;
-       txtid->paused = false;
        ath_tx_flush_tid(sc, txtid);
        ath_tx_tid_change_state(sc, txtid);
        ath_txq_unlock_complete(sc, txq);
@@ -1487,7 +1482,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
                ath_txq_lock(sc, txq);
                ac->clear_ps_filter = true;
 
-               if (!tid->paused && ath_tid_has_buffered(tid)) {
+               if (ath_tid_has_buffered(tid)) {
                        ath_tx_queue_tid(txq, tid);
                        ath_txq_schedule(sc, txq);
                }
@@ -1510,7 +1505,6 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
        ath_txq_lock(sc, txq);
 
        tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
-       tid->paused = false;
 
        if (ath_tid_has_buffered(tid)) {
                ath_tx_queue_tid(txq, tid);
@@ -1544,8 +1538,6 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
                        continue;
 
                tid = ATH_AN_2_TID(an, i);
-               if (tid->paused)
-                       continue;
 
                ath_txq_lock(sc, tid->ac->txq);
                while (nframes > 0) {
@@ -1844,9 +1836,6 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
                        list_del(&tid->list);
                        tid->sched = false;
 
-                       if (tid->paused)
-                               continue;
-
                        if (ath_tx_sched_aggr(sc, txq, tid, &stop))
                                sent = true;
 
@@ -2698,7 +2687,6 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
                tid->baw_size  = WME_MAX_BA;
                tid->baw_head  = tid->baw_tail = 0;
                tid->sched     = false;
-               tid->paused    = false;
                tid->active        = false;
                __skb_queue_head_init(&tid->buf_q);
                __skb_queue_head_init(&tid->retry_q);
index 4c8cdb097b6599c08816a4e184537d764bc9ef43..f8ded84b7be8c5e3b6038910a8829364a5e5ba1e 100644 (file)
@@ -1707,7 +1707,9 @@ found:
        return 0;
 }
 
-static void carl9170_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void carl9170_op_flush(struct ieee80211_hw *hw,
+                             struct ieee80211_vif *vif,
+                             u32 queues, bool drop)
 {
        struct ar9170 *ar = hw->priv;
        unsigned int vid;
index ca115f33746f228bc4f9007e0397b567f644e2a8..f35c7f30f9a6f66f234a4816260d0ea473fc573e 100644 (file)
@@ -1076,8 +1076,14 @@ static int carl9170_usb_probe(struct usb_interface *intf,
 
        carl9170_set_state(ar, CARL9170_STOPPED);
 
-       return request_firmware_nowait(THIS_MODULE, 1, CARL9170FW_NAME,
+       err = request_firmware_nowait(THIS_MODULE, 1, CARL9170FW_NAME,
                &ar->udev->dev, GFP_KERNEL, ar, carl9170_usb_firmware_step2);
+       if (err) {
+               usb_put_dev(udev);
+               usb_put_dev(udev);
+               carl9170_free(ar);
+       }
+       return err;
 }
 
 static void carl9170_usb_disconnect(struct usb_interface *intf)
index a1a69c5db409262e3f63a1f1246d6590e448cbfa..650be79c7ac97070f0fccd62cea916ce56d8c097 100644 (file)
@@ -73,9 +73,52 @@ static const struct radar_types etsi_radar_types_v15 = {
        .radar_types            = etsi_radar_ref_types_v15,
 };
 
-/* for now, we support ETSI radar types, FCC and JP are TODO */
+#define FCC_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB)      \
+{                                                              \
+       ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX),               \
+       PMIN - PRI_TOLERANCE,                                   \
+       PMAX * PRF + PRI_TOLERANCE, PRF, PPB * PRF,             \
+       PPB_THRESH(PPB), PRI_TOLERANCE,                         \
+}
+
+static const struct radar_detector_specs fcc_radar_ref_types[] = {
+       FCC_PATTERN(0, 0, 1, 1428, 1428, 1, 18),
+       FCC_PATTERN(1, 0, 5, 150, 230, 1, 23),
+       FCC_PATTERN(2, 6, 10, 200, 500, 1, 16),
+       FCC_PATTERN(3, 11, 20, 200, 500, 1, 12),
+       FCC_PATTERN(4, 50, 100, 1000, 2000, 20, 1),
+       FCC_PATTERN(5, 0, 1, 333, 333, 1, 9),
+};
+
+static const struct radar_types fcc_radar_types = {
+       .region                 = NL80211_DFS_FCC,
+       .num_radar_types        = ARRAY_SIZE(fcc_radar_ref_types),
+       .radar_types            = fcc_radar_ref_types,
+};
+
+#define JP_PATTERN FCC_PATTERN
+static const struct radar_detector_specs jp_radar_ref_types[] = {
+       JP_PATTERN(0, 0, 1, 1428, 1428, 1, 18),
+       JP_PATTERN(1, 2, 3, 3846, 3846, 1, 18),
+       JP_PATTERN(2, 0, 1, 1388, 1388, 1, 18),
+       JP_PATTERN(3, 1, 2, 4000, 4000, 1, 18),
+       JP_PATTERN(4, 0, 5, 150, 230, 1, 23),
+       JP_PATTERN(5, 6, 10, 200, 500, 1, 16),
+       JP_PATTERN(6, 11, 20, 200, 500, 1, 12),
+       JP_PATTERN(7, 50, 100, 1000, 2000, 20, 1),
+       JP_PATTERN(5, 0, 1, 333, 333, 1, 9),
+};
+
+static const struct radar_types jp_radar_types = {
+       .region                 = NL80211_DFS_JP,
+       .num_radar_types        = ARRAY_SIZE(jp_radar_ref_types),
+       .radar_types            = jp_radar_ref_types,
+};
+
 static const struct radar_types *dfs_domains[] = {
        &etsi_radar_types_v15,
+       &fcc_radar_types,
+       &jp_radar_types,
 };
 
 /**
index 4806a49cb61b139e0250e8152e27c91b79b50a0e..6e699d050d1e637f3185be85103d9f66f0c97314 100644 (file)
@@ -172,7 +172,7 @@ static int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid,
 
 static int wil_cfg80211_get_station(struct wiphy *wiphy,
                                    struct net_device *ndev,
-                                   u8 *mac, struct station_info *sinfo)
+                                   const u8 *mac, struct station_info *sinfo)
 {
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
        int rc;
@@ -671,7 +671,7 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
 }
 
 static int wil_cfg80211_del_station(struct wiphy *wiphy,
-                                   struct net_device *dev, u8 *mac)
+                                   struct net_device *dev, const u8 *mac)
 {
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
 
index 5824cd41e4bac6d387087ab84739df3225b8f799..73593aa3cd9813e2bd6849b969427f83f1a9a578 100644 (file)
@@ -338,7 +338,7 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
        }
 
        if (isr)
-               wil_err(wil, "un-handled MISC ISR bits 0x%08x\n", isr);
+               wil_dbg_irq(wil, "un-handled MISC ISR bits 0x%08x\n", isr);
 
        wil->isr_misc = 0;
 
index 95f4efe9ef37c652722a5ca381af4529a6ea65e9..670cc6de3b4cec171e36af8a05497b9acacf86a2 100644 (file)
@@ -81,7 +81,7 @@ static void wil_disconnect_cid(struct wil6210_priv *wil, int cid)
        memset(&sta->stats, 0, sizeof(sta->stats));
 }
 
-static void _wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
+static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid)
 {
        int cid = -ENOENT;
        struct net_device *ndev = wil_to_ndev(wil);
@@ -252,7 +252,7 @@ int wil_priv_init(struct wil6210_priv *wil)
        return 0;
 }
 
-void wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
+void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid)
 {
        del_timer_sync(&wil->connect_timer);
        _wil6210_disconnect(wil, bssid);
@@ -363,8 +363,8 @@ static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
                wil_err(wil, "Firmware not ready\n");
                return -ETIME;
        } else {
-               wil_dbg_misc(wil, "FW ready after %d ms\n",
-                            jiffies_to_msecs(to-left));
+               wil_info(wil, "FW ready after %d ms. HW version 0x%08x\n",
+                        jiffies_to_msecs(to-left), wil->hw_version);
        }
        return 0;
 }
index f1e1bb338d681e71c96b130363bf04387c3ffa85..0660884183070d7d46876e0a07effb2c1650e005 100644 (file)
@@ -74,8 +74,6 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
        if (rc)
                goto release_irq;
 
-       wil_info(wil, "HW version: 0x%08x\n", wil->hw_version);
-
        return 0;
 
  release_irq:
index d04629fe053f5e2f864bc1342ebdadf144d17008..ec29954bd44dd1d8de1cf46ce736cc4f8157127e 100644 (file)
@@ -91,6 +91,22 @@ void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
 
        spin_lock(&r->reorder_lock);
 
+       /** Due to the race between WMI events, where BACK establishment
+        * reported, and data Rx, few packets may be pass up before reorder
+        * buffer get allocated. Catch up by pretending SSN is what we
+        * see in the 1-st Rx packet
+        */
+       if (r->first_time) {
+               r->first_time = false;
+               if (seq != r->head_seq_num) {
+                       wil_err(wil, "Error: 1-st frame with wrong sequence"
+                               " %d, should be %d. Fixing...\n", seq,
+                               r->head_seq_num);
+                       r->head_seq_num = seq;
+                       r->ssn = seq;
+               }
+       }
+
        /* frame with out of date sequence number */
        if (seq_less(seq, r->head_seq_num)) {
                dev_kfree_skb(skb);
@@ -162,6 +178,7 @@ struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
        r->head_seq_num = ssn;
        r->buf_size = size;
        r->stored_mpdu_num = 0;
+       r->first_time = true;
        return r;
 }
 
index 2a2dec75f02606c9ea71ecd95d846287520157d5..3427ac4a4fa136127d25046c24d61eae161032d2 100644 (file)
@@ -35,7 +35,7 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
 #define WIL6210_MEM_SIZE (2*1024*1024UL)
 
 #define WIL6210_RX_RING_SIZE   (128)
-#define WIL6210_TX_RING_SIZE   (128)
+#define WIL6210_TX_RING_SIZE   (512)
 #define WIL6210_MAX_TX_RINGS   (24) /* HW limit */
 #define WIL6210_MAX_CID                (8) /* HW limit */
 #define WIL6210_NAPI_BUDGET    (16) /* arbitrary */
@@ -301,6 +301,7 @@ struct wil_tid_ampdu_rx {
        u16 buf_size;
        u16 timeout;
        u8 dialog_token;
+       bool first_time; /* is it 1-st time this buffer used? */
 };
 
 struct wil6210_stats {
@@ -507,7 +508,7 @@ void wil_wdev_free(struct wil6210_priv *wil);
 int wmi_set_mac_address(struct wil6210_priv *wil, void *addr);
 int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan);
 int wmi_pcp_stop(struct wil6210_priv *wil);
-void wil6210_disconnect(struct wil6210_priv *wil, void *bssid);
+void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid);
 
 int wil_rx_init(struct wil6210_priv *wil);
 void wil_rx_fini(struct wil6210_priv *wil);
index 2ba56eef0c457d4397c27fa84a1291b011d3884f..e9a11cb3428ad1c44d4f33b18aeab3198a937120 100644 (file)
@@ -192,7 +192,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
        might_sleep();
 
        if (!test_bit(wil_status_fwready, &wil->status)) {
-               wil_err(wil, "FW not ready\n");
+               wil_err(wil, "WMI: cannot send command while FW not ready\n");
                return -EAGAIN;
        }
 
@@ -276,8 +276,8 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
        wil->fw_version = le32_to_cpu(evt->sw_version);
        wil->n_mids = evt->numof_additional_mids;
 
-       wil_dbg_wmi(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version,
-                   evt->mac, wil->n_mids);
+       wil_info(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version,
+                evt->mac, wil->n_mids);
 
        if (!is_valid_ether_addr(ndev->dev_addr)) {
                memcpy(ndev->dev_addr, evt->mac, ETH_ALEN);
@@ -290,7 +290,7 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
 static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d,
                             int len)
 {
-       wil_dbg_wmi(wil, "WMI: FW ready\n");
+       wil_dbg_wmi(wil, "WMI: got FW ready event\n");
 
        set_bit(wil_status_fwready, &wil->status);
        /* reuse wmi_ready for the firmware ready indication */
@@ -348,7 +348,7 @@ static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
 {
        if (wil->scan_request) {
                struct wmi_scan_complete_event *data = d;
-               bool aborted = (data->status != 0);
+               bool aborted = (data->status != WMI_SCAN_SUCCESS);
 
                wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", data->status);
                cfg80211_scan_done(wil->scan_request, aborted);
@@ -802,6 +802,7 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan)
                .network_type = wmi_nettype,
                .disable_sec_offload = 1,
                .channel = chan - 1,
+               .pcp_max_assoc_sta = WIL6210_MAX_CID,
        };
        struct {
                struct wil6210_mbox_hdr_wmi wmi;
index 50b8528394f4050b8098098e9bbdd0520ae1bf2a..17334c852866c3cc8a061eecb829052ebd9415cf 100644 (file)
@@ -28,7 +28,7 @@
 #define __WILOCITY_WMI_H__
 
 /* General */
-
+#define WILOCITY_MAX_ASSOC_STA (8)
 #define WMI_MAC_LEN            (6)
 #define WMI_PROX_RANGE_NUM     (3)
 
@@ -219,15 +219,6 @@ struct wmi_disconnect_sta_cmd {
        __le16 disconnect_reason;
 } __packed;
 
-/*
- * WMI_RECONNECT_CMDID
- */
-struct wmi_reconnect_cmd {
-       u8 channel;                     /* hint */
-       u8 reserved;
-       u8 bssid[WMI_MAC_LEN];          /* mandatory if set */
-} __packed;
-
 
 /*
  * WMI_SET_PMK_CMDID
@@ -296,11 +287,13 @@ enum wmi_scan_type {
        WMI_LONG_SCAN           = 0,
        WMI_SHORT_SCAN          = 1,
        WMI_PBC_SCAN            = 2,
+       WMI_ACTIVE_SCAN         = 3,
+       WMI_DIRECT_SCAN         = 4,
 };
 
 struct wmi_start_scan_cmd {
-       u8 reserved[8];
-
+       u8 direct_scan_mac_addr[6];
+       u8 reserved[2];
        __le32 home_dwell_time; /* Max duration in the home channel(ms) */
        __le32 force_scan_interval;     /* Time interval between scans (ms)*/
        u8 scan_type;           /* wmi_scan_type */
@@ -332,6 +325,7 @@ struct wmi_probed_ssid_cmd {
        u8 ssid[WMI_MAX_SSID_LEN];
 } __packed;
 
+
 /*
  * WMI_SET_APPIE_CMDID
  * Add Application specified IE to a management frame
@@ -427,7 +421,7 @@ struct wmi_bcon_ctrl_cmd {
        __le16 frag_num;
        __le64 ss_mask;
        u8 network_type;
-       u8 reserved;
+       u8 pcp_max_assoc_sta;
        u8 disable_sec_offload;
        u8 disable_sec;
 } __packed;
@@ -450,7 +444,7 @@ enum wmi_port_role {
 struct wmi_port_allocate_cmd {
        u8 mac[WMI_MAC_LEN];
        u8 port_role;
-       u8 midid;
+       u8 mid;
 } __packed;
 
 /*
@@ -467,6 +461,7 @@ struct wmi_delete_port_cmd {
 enum wmi_discovery_mode {
        WMI_DISCOVERY_MODE_NON_OFFLOAD  = 0,
        WMI_DISCOVERY_MODE_OFFLOAD      = 1,
+       WMI_DISCOVERY_MODE_PEER2PEER    = 2,
 };
 
 struct wmi_p2p_cfg_cmd {
@@ -493,7 +488,8 @@ struct wmi_power_mgmt_cfg_cmd {
  */
 struct wmi_pcp_start_cmd {
        __le16 bcon_interval;
-       u8 reserved0[10];
+       u8 pcp_max_assoc_sta;
+       u8 reserved0[9];
        u8 network_type;
        u8 channel;
        u8 disable_sec_offload;
@@ -857,6 +853,7 @@ enum wmi_event_id {
        WMI_RF_MGMT_STATUS_EVENTID              = 0x1853,
        WMI_BF_SM_MGMT_DONE_EVENTID             = 0x1838,
        WMI_RX_MGMT_PACKET_EVENTID              = 0x1840,
+       WMI_TX_MGMT_PACKET_EVENTID              = 0x1841,
 
        /* Performance monitoring events */
        WMI_DATA_PORT_OPEN_EVENTID              = 0x1860,
@@ -1040,16 +1037,23 @@ enum wmi_disconnect_reason {
 struct wmi_disconnect_event {
        __le16 protocol_reason_status;  /* reason code, see 802.11 spec. */
        u8 bssid[WMI_MAC_LEN];          /* set if known */
-       u8 disconnect_reason;           /* see wmi_disconnect_reason_e */
-       u8 assoc_resp_len;
-       u8 assoc_info[0];
+       u8 disconnect_reason;           /* see wmi_disconnect_reason */
+       u8 assoc_resp_len;              /* not in use */
+       u8 assoc_info[0];               /* not in use */
 } __packed;
 
 /*
  * WMI_SCAN_COMPLETE_EVENTID
  */
+enum scan_status {
+       WMI_SCAN_SUCCESS        = 0,
+       WMI_SCAN_FAILED         = 1,
+       WMI_SCAN_ABORTED        = 2,
+       WMI_SCAN_REJECTED       = 3,
+};
+
 struct wmi_scan_complete_event {
-       __le32 status;
+       __le32 status;  /* scan_status */
 } __packed;
 
 /*
@@ -1256,6 +1260,14 @@ struct wmi_rx_mgmt_info {
        u8 channel;     /* From Radio MNGR */
 } __packed;
 
+
+/*
+ * WMI_TX_MGMT_PACKET_EVENTID
+ */
+struct wmi_tx_mgmt_packet_event {
+       u8 payload[0];
+} __packed;
+
 struct wmi_rx_mgmt_packet_event {
        struct wmi_rx_mgmt_info info;
        u8 payload[0];
index 088d544ec63f940b2a7b2234f1eac9b7458a19ce..e3f67b8d3f8003d546867b51648d25fda81d0f15 100644 (file)
@@ -1,7 +1,8 @@
 config B43
        tristate "Broadcom 43xx wireless support (mac80211 stack)"
-       depends on SSB_POSSIBLE && MAC80211 && HAS_DMA
-       select SSB
+       depends on (BCMA_POSSIBLE || SSB_POSSIBLE) && MAC80211 && HAS_DMA
+       select BCMA if B43_BCMA
+       select SSB if B43_SSB
        select FW_LOADER
        ---help---
          b43 is a driver for the Broadcom 43xx series wireless devices.
@@ -27,14 +28,33 @@ config B43
          If unsure, say M.
 
 config B43_BCMA
-       bool "Support for BCMA bus"
-       depends on B43 && (BCMA = y || BCMA = B43)
-       default y
+       bool
 
 config B43_SSB
        bool
-       depends on B43 && (SSB = y || SSB = B43)
-       default y
+
+choice
+       prompt "Supported bus types"
+       depends on B43
+       default B43_BCMA_AND_SSB
+
+config B43_BUSES_BCMA_AND_SSB
+       bool "BCMA and SSB"
+       depends on BCMA_POSSIBLE && SSB_POSSIBLE
+       select B43_BCMA
+       select B43_SSB
+
+config B43_BUSES_BCMA
+       bool "BCMA only"
+       depends on BCMA_POSSIBLE
+       select B43_BCMA
+
+config B43_BUSES_SSB
+       bool "SSB only"
+       depends on SSB_POSSIBLE
+       select B43_SSB
+
+endchoice
 
 # Auto-select SSB PCI-HOST support, if possible
 config B43_PCI_AUTOSELECT
@@ -53,7 +73,7 @@ config B43_PCICORE_AUTOSELECT
 
 config B43_PCMCIA
        bool "Broadcom 43xx PCMCIA device support"
-       depends on B43 && SSB_PCMCIAHOST_POSSIBLE
+       depends on B43 && B43_SSB && SSB_PCMCIAHOST_POSSIBLE
        select SSB_PCMCIAHOST
        ---help---
          Broadcom 43xx PCMCIA device support.
@@ -73,7 +93,7 @@ config B43_PCMCIA
 
 config B43_SDIO
        bool "Broadcom 43xx SDIO device support"
-       depends on B43 && SSB_SDIOHOST_POSSIBLE
+       depends on B43 && B43_SSB && SSB_SDIOHOST_POSSIBLE
        select SSB_SDIOHOST
        ---help---
          Broadcom 43xx device support for Soft-MAC SDIO devices.
@@ -98,7 +118,7 @@ config B43_BCMA_PIO
 
 config B43_PIO
        bool
-       depends on B43
+       depends on B43 && B43_SSB
        select SSB_BLOCKIO
        default y
 
@@ -116,7 +136,7 @@ config B43_PHY_N
 
 config B43_PHY_LP
        bool "Support for low-power (LP-PHY) devices"
-       depends on B43
+       depends on B43 && B43_SSB
        default y
        ---help---
          Support for the LP-PHY.
index 54376fddfaf9f11fef6e4a7a0ae5554d5e427f4c..4113b69347640359aa22020eb0dcfd7efe0b7c14 100644 (file)
@@ -915,10 +915,6 @@ struct b43_wl {
        char rng_name[30 + 1];
 #endif /* CONFIG_B43_HWRNG */
 
-       /* List of all wireless devices on this chip */
-       struct list_head devlist;
-       u8 nr_devs;
-
        bool radiotap_enabled;
        bool radio_enabled;
 
index 184c95659279070f2a64edfa6628f109ded51f22..f3205c6988bc4354a14a5226011f762868253936 100644 (file)
@@ -5,7 +5,9 @@ enum b43_bus_type {
 #ifdef CONFIG_B43_BCMA
        B43_BUS_BCMA,
 #endif
+#ifdef CONFIG_B43_SSB
        B43_BUS_SSB,
+#endif
 };
 
 struct b43_bus_dev {
@@ -52,13 +54,21 @@ struct b43_bus_dev {
 
 static inline bool b43_bus_host_is_pcmcia(struct b43_bus_dev *dev)
 {
+#ifdef CONFIG_B43_SSB
        return (dev->bus_type == B43_BUS_SSB &&
                dev->sdev->bus->bustype == SSB_BUSTYPE_PCMCIA);
+#else
+       return false;
+#endif
 }
 static inline bool b43_bus_host_is_sdio(struct b43_bus_dev *dev)
 {
+#ifdef CONFIG_B43_SSB
        return (dev->bus_type == B43_BUS_SSB &&
                dev->sdev->bus->bustype == SSB_BUSTYPE_SDIO);
+#else
+       return false;
+#endif
 }
 
 struct b43_bus_dev *b43_bus_dev_bcma_init(struct bcma_device *core);
index 69fc3d65531a7eeb88c4901f804ab197dbbb939e..3d67e6b08e1cc8c4a1cdbf00bf6b1125ea24ae6a 100644 (file)
@@ -182,7 +182,7 @@ static struct ieee80211_rate __b43_ratetable[] = {
 #define b43_g_ratetable                (__b43_ratetable + 0)
 #define b43_g_ratetable_size   12
 
-#define CHAN4G(_channel, _freq, _flags) {                      \
+#define CHAN2G(_channel, _freq, _flags) {                      \
        .band                   = IEEE80211_BAND_2GHZ,          \
        .center_freq            = (_freq),                      \
        .hw_value               = (_channel),                   \
@@ -191,23 +191,31 @@ static struct ieee80211_rate __b43_ratetable[] = {
        .max_power              = 30,                           \
 }
 static struct ieee80211_channel b43_2ghz_chantable[] = {
-       CHAN4G(1, 2412, 0),
-       CHAN4G(2, 2417, 0),
-       CHAN4G(3, 2422, 0),
-       CHAN4G(4, 2427, 0),
-       CHAN4G(5, 2432, 0),
-       CHAN4G(6, 2437, 0),
-       CHAN4G(7, 2442, 0),
-       CHAN4G(8, 2447, 0),
-       CHAN4G(9, 2452, 0),
-       CHAN4G(10, 2457, 0),
-       CHAN4G(11, 2462, 0),
-       CHAN4G(12, 2467, 0),
-       CHAN4G(13, 2472, 0),
-       CHAN4G(14, 2484, 0),
+       CHAN2G(1, 2412, 0),
+       CHAN2G(2, 2417, 0),
+       CHAN2G(3, 2422, 0),
+       CHAN2G(4, 2427, 0),
+       CHAN2G(5, 2432, 0),
+       CHAN2G(6, 2437, 0),
+       CHAN2G(7, 2442, 0),
+       CHAN2G(8, 2447, 0),
+       CHAN2G(9, 2452, 0),
+       CHAN2G(10, 2457, 0),
+       CHAN2G(11, 2462, 0),
+       CHAN2G(12, 2467, 0),
+       CHAN2G(13, 2472, 0),
+       CHAN2G(14, 2484, 0),
 };
-#undef CHAN4G
+#undef CHAN2G
 
+#define CHAN4G(_channel, _flags) {                             \
+       .band                   = IEEE80211_BAND_5GHZ,          \
+       .center_freq            = 4000 + (5 * (_channel)),      \
+       .hw_value               = (_channel),                   \
+       .flags                  = (_flags),                     \
+       .max_antenna_gain       = 0,                            \
+       .max_power              = 30,                           \
+}
 #define CHAN5G(_channel, _flags) {                             \
        .band                   = IEEE80211_BAND_5GHZ,          \
        .center_freq            = 5000 + (5 * (_channel)),      \
@@ -217,6 +225,18 @@ static struct ieee80211_channel b43_2ghz_chantable[] = {
        .max_power              = 30,                           \
 }
 static struct ieee80211_channel b43_5ghz_nphy_chantable[] = {
+       CHAN4G(184, 0),         CHAN4G(186, 0),
+       CHAN4G(188, 0),         CHAN4G(190, 0),
+       CHAN4G(192, 0),         CHAN4G(194, 0),
+       CHAN4G(196, 0),         CHAN4G(198, 0),
+       CHAN4G(200, 0),         CHAN4G(202, 0),
+       CHAN4G(204, 0),         CHAN4G(206, 0),
+       CHAN4G(208, 0),         CHAN4G(210, 0),
+       CHAN4G(212, 0),         CHAN4G(214, 0),
+       CHAN4G(216, 0),         CHAN4G(218, 0),
+       CHAN4G(220, 0),         CHAN4G(222, 0),
+       CHAN4G(224, 0),         CHAN4G(226, 0),
+       CHAN4G(228, 0),
        CHAN5G(32, 0),          CHAN5G(34, 0),
        CHAN5G(36, 0),          CHAN5G(38, 0),
        CHAN5G(40, 0),          CHAN5G(42, 0),
@@ -260,18 +280,7 @@ static struct ieee80211_channel b43_5ghz_nphy_chantable[] = {
        CHAN5G(170, 0),         CHAN5G(172, 0),
        CHAN5G(174, 0),         CHAN5G(176, 0),
        CHAN5G(178, 0),         CHAN5G(180, 0),
-       CHAN5G(182, 0),         CHAN5G(184, 0),
-       CHAN5G(186, 0),         CHAN5G(188, 0),
-       CHAN5G(190, 0),         CHAN5G(192, 0),
-       CHAN5G(194, 0),         CHAN5G(196, 0),
-       CHAN5G(198, 0),         CHAN5G(200, 0),
-       CHAN5G(202, 0),         CHAN5G(204, 0),
-       CHAN5G(206, 0),         CHAN5G(208, 0),
-       CHAN5G(210, 0),         CHAN5G(212, 0),
-       CHAN5G(214, 0),         CHAN5G(216, 0),
-       CHAN5G(218, 0),         CHAN5G(220, 0),
-       CHAN5G(222, 0),         CHAN5G(224, 0),
-       CHAN5G(226, 0),         CHAN5G(228, 0),
+       CHAN5G(182, 0),
 };
 
 static struct ieee80211_channel b43_5ghz_aphy_chantable[] = {
@@ -295,6 +304,7 @@ static struct ieee80211_channel b43_5ghz_aphy_chantable[] = {
        CHAN5G(208, 0),         CHAN5G(212, 0),
        CHAN5G(216, 0),
 };
+#undef CHAN4G
 #undef CHAN5G
 
 static struct ieee80211_supported_band b43_band_5GHz_nphy = {
@@ -1175,18 +1185,7 @@ static void b43_bcma_phy_reset(struct b43_wldev *dev)
        bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, flags);
        udelay(2);
 
-       /* Take PHY out of reset */
-       flags = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
-       flags &= ~B43_BCMA_IOCTL_PHY_RESET;
-       flags |= BCMA_IOCTL_FGC;
-       bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, flags);
-       udelay(1);
-
-       /* Do not force clock anymore */
-       flags = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
-       flags &= ~BCMA_IOCTL_FGC;
-       bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, flags);
-       udelay(1);
+       b43_phy_take_out_of_reset(dev);
 }
 
 static void b43_bcma_wireless_core_reset(struct b43_wldev *dev, bool gmode)
@@ -1195,18 +1194,22 @@ static void b43_bcma_wireless_core_reset(struct b43_wldev *dev, bool gmode)
                  B43_BCMA_CLKCTLST_PHY_PLL_REQ;
        u32 status = B43_BCMA_CLKCTLST_80211_PLL_ST |
                     B43_BCMA_CLKCTLST_PHY_PLL_ST;
+       u32 flags;
+
+       flags = B43_BCMA_IOCTL_PHY_CLKEN;
+       if (gmode)
+               flags |= B43_BCMA_IOCTL_GMODE;
+       b43_device_enable(dev, flags);
 
-       b43_device_enable(dev, B43_BCMA_IOCTL_PHY_CLKEN);
        bcma_core_set_clockmode(dev->dev->bdev, BCMA_CLKMODE_FAST);
        b43_bcma_phy_reset(dev);
        bcma_core_pll_ctl(dev->dev->bdev, req, status, true);
 }
 #endif
 
+#ifdef CONFIG_B43_SSB
 static void b43_ssb_wireless_core_reset(struct b43_wldev *dev, bool gmode)
 {
-       struct ssb_device *sdev = dev->dev->sdev;
-       u32 tmslow;
        u32 flags = 0;
 
        if (gmode)
@@ -1218,18 +1221,9 @@ static void b43_ssb_wireless_core_reset(struct b43_wldev *dev, bool gmode)
        b43_device_enable(dev, flags);
        msleep(2);              /* Wait for the PLL to turn on. */
 
-       /* Now take the PHY out of Reset again */
-       tmslow = ssb_read32(sdev, SSB_TMSLOW);
-       tmslow |= SSB_TMSLOW_FGC;
-       tmslow &= ~B43_TMSLOW_PHYRESET;
-       ssb_write32(sdev, SSB_TMSLOW, tmslow);
-       ssb_read32(sdev, SSB_TMSLOW);   /* flush */
-       msleep(1);
-       tmslow &= ~SSB_TMSLOW_FGC;
-       ssb_write32(sdev, SSB_TMSLOW, tmslow);
-       ssb_read32(sdev, SSB_TMSLOW);   /* flush */
-       msleep(1);
+       b43_phy_take_out_of_reset(dev);
 }
+#endif
 
 void b43_wireless_core_reset(struct b43_wldev *dev, bool gmode)
 {
@@ -2704,32 +2698,37 @@ static int b43_upload_initvals(struct b43_wldev *dev)
        struct b43_firmware *fw = &dev->fw;
        const struct b43_iv *ivals;
        size_t count;
-       int err;
 
        hdr = (const struct b43_fw_header *)(fw->initvals.data->data);
        ivals = (const struct b43_iv *)(fw->initvals.data->data + hdr_len);
        count = be32_to_cpu(hdr->size);
-       err = b43_write_initvals(dev, ivals, count,
+       return b43_write_initvals(dev, ivals, count,
                                 fw->initvals.data->size - hdr_len);
-       if (err)
-               goto out;
-       if (fw->initvals_band.data) {
-               hdr = (const struct b43_fw_header *)(fw->initvals_band.data->data);
-               ivals = (const struct b43_iv *)(fw->initvals_band.data->data + hdr_len);
-               count = be32_to_cpu(hdr->size);
-               err = b43_write_initvals(dev, ivals, count,
-                                        fw->initvals_band.data->size - hdr_len);
-               if (err)
-                       goto out;
-       }
-out:
+}
 
-       return err;
+static int b43_upload_initvals_band(struct b43_wldev *dev)
+{
+       const size_t hdr_len = sizeof(struct b43_fw_header);
+       const struct b43_fw_header *hdr;
+       struct b43_firmware *fw = &dev->fw;
+       const struct b43_iv *ivals;
+       size_t count;
+
+       if (!fw->initvals_band.data)
+               return 0;
+
+       hdr = (const struct b43_fw_header *)(fw->initvals_band.data->data);
+       ivals = (const struct b43_iv *)(fw->initvals_band.data->data + hdr_len);
+       count = be32_to_cpu(hdr->size);
+       return b43_write_initvals(dev, ivals, count,
+                                 fw->initvals_band.data->size - hdr_len);
 }
 
 /* Initialize the GPIOs
  * http://bcm-specs.sipsolutions.net/GPIO
  */
+
+#ifdef CONFIG_B43_SSB
 static struct ssb_device *b43_ssb_gpio_dev(struct b43_wldev *dev)
 {
        struct ssb_bus *bus = dev->dev->sdev->bus;
@@ -2740,10 +2739,13 @@ static struct ssb_device *b43_ssb_gpio_dev(struct b43_wldev *dev)
        return bus->chipco.dev;
 #endif
 }
+#endif
 
 static int b43_gpio_init(struct b43_wldev *dev)
 {
+#ifdef CONFIG_B43_SSB
        struct ssb_device *gpiodev;
+#endif
        u32 mask, set;
 
        b43_maskset32(dev, B43_MMIO_MACCTL, ~B43_MACCTL_GPOUTSMSK, 0);
@@ -2802,7 +2804,9 @@ static int b43_gpio_init(struct b43_wldev *dev)
 /* Turn off all GPIO stuff. Call this on module unload, for example. */
 static void b43_gpio_cleanup(struct b43_wldev *dev)
 {
+#ifdef CONFIG_B43_SSB
        struct ssb_device *gpiodev;
+#endif
 
        switch (dev->dev->bus_type) {
 #ifdef CONFIG_B43_BCMA
@@ -3086,6 +3090,10 @@ static int b43_chip_init(struct b43_wldev *dev)
        if (err)
                goto err_gpio_clean;
 
+       err = b43_upload_initvals_band(dev);
+       if (err)
+               goto err_gpio_clean;
+
        /* Turn the Analog on and initialize the PHY. */
        phy->ops->switch_analog(dev, 1);
        err = b43_phy_init(dev);
@@ -3685,37 +3693,6 @@ static void b43_op_set_tsf(struct ieee80211_hw *hw,
        mutex_unlock(&wl->mutex);
 }
 
-static void b43_put_phy_into_reset(struct b43_wldev *dev)
-{
-       u32 tmp;
-
-       switch (dev->dev->bus_type) {
-#ifdef CONFIG_B43_BCMA
-       case B43_BUS_BCMA:
-               b43err(dev->wl,
-                      "Putting PHY into reset not supported on BCMA\n");
-               break;
-#endif
-#ifdef CONFIG_B43_SSB
-       case B43_BUS_SSB:
-               tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
-               tmp &= ~B43_TMSLOW_GMODE;
-               tmp |= B43_TMSLOW_PHYRESET;
-               tmp |= SSB_TMSLOW_FGC;
-               ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
-               msleep(1);
-
-               tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
-               tmp &= ~SSB_TMSLOW_FGC;
-               tmp |= B43_TMSLOW_PHYRESET;
-               ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
-               msleep(1);
-
-               break;
-#endif
-       }
-}
-
 static const char *band_to_string(enum ieee80211_band band)
 {
        switch (band) {
@@ -3731,94 +3708,73 @@ static const char *band_to_string(enum ieee80211_band band)
 }
 
 /* Expects wl->mutex locked */
-static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan)
+static int b43_switch_band(struct b43_wldev *dev,
+                          struct ieee80211_channel *chan)
 {
-       struct b43_wldev *up_dev = NULL;
-       struct b43_wldev *down_dev;
-       struct b43_wldev *d;
-       int err;
-       bool uninitialized_var(gmode);
-       int prev_status;
+       struct b43_phy *phy = &dev->phy;
+       bool gmode;
+       u32 tmp;
 
-       /* Find a device and PHY which supports the band. */
-       list_for_each_entry(d, &wl->devlist, list) {
-               switch (chan->band) {
-               case IEEE80211_BAND_5GHZ:
-                       if (d->phy.supports_5ghz) {
-                               up_dev = d;
-                               gmode = false;
-                       }
-                       break;
-               case IEEE80211_BAND_2GHZ:
-                       if (d->phy.supports_2ghz) {
-                               up_dev = d;
-                               gmode = true;
-                       }
-                       break;
-               default:
-                       B43_WARN_ON(1);
-                       return -EINVAL;
-               }
-               if (up_dev)
-                       break;
+       switch (chan->band) {
+       case IEEE80211_BAND_5GHZ:
+               gmode = false;
+               break;
+       case IEEE80211_BAND_2GHZ:
+               gmode = true;
+               break;
+       default:
+               B43_WARN_ON(1);
+               return -EINVAL;
        }
-       if (!up_dev) {
-               b43err(wl, "Could not find a device for %s-GHz band operation\n",
+
+       if (!((gmode && phy->supports_2ghz) ||
+             (!gmode && phy->supports_5ghz))) {
+               b43err(dev->wl, "This device doesn't support %s-GHz band\n",
                       band_to_string(chan->band));
                return -ENODEV;
        }
-       if ((up_dev == wl->current_dev) &&
-           (!!wl->current_dev->phy.gmode == !!gmode)) {
+
+       if (!!phy->gmode == !!gmode) {
                /* This device is already running. */
                return 0;
        }
-       b43dbg(wl, "Switching to %s-GHz band\n",
+
+       b43dbg(dev->wl, "Switching to %s GHz band\n",
               band_to_string(chan->band));
-       down_dev = wl->current_dev;
 
-       prev_status = b43_status(down_dev);
-       /* Shutdown the currently running core. */
-       if (prev_status >= B43_STAT_STARTED)
-               down_dev = b43_wireless_core_stop(down_dev);
-       if (prev_status >= B43_STAT_INITIALIZED)
-               b43_wireless_core_exit(down_dev);
+       b43_software_rfkill(dev, true);
 
-       if (down_dev != up_dev) {
-               /* We switch to a different core, so we put PHY into
-                * RESET on the old core. */
-               b43_put_phy_into_reset(down_dev);
+       phy->gmode = gmode;
+       b43_phy_put_into_reset(dev);
+       switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+       case B43_BUS_BCMA:
+               tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
+               if (gmode)
+                       tmp |= B43_BCMA_IOCTL_GMODE;
+               else
+                       tmp &= ~B43_BCMA_IOCTL_GMODE;
+               bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
+               break;
+#endif
+#ifdef CONFIG_B43_SSB
+       case B43_BUS_SSB:
+               tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
+               if (gmode)
+                       tmp |= B43_TMSLOW_GMODE;
+               else
+                       tmp &= ~B43_TMSLOW_GMODE;
+               ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
+               break;
+#endif
        }
+       b43_phy_take_out_of_reset(dev);
 
-       /* Now start the new core. */
-       up_dev->phy.gmode = gmode;
-       if (prev_status >= B43_STAT_INITIALIZED) {
-               err = b43_wireless_core_init(up_dev);
-               if (err) {
-                       b43err(wl, "Fatal: Could not initialize device for "
-                              "selected %s-GHz band\n",
-                              band_to_string(chan->band));
-                       goto init_failure;
-               }
-       }
-       if (prev_status >= B43_STAT_STARTED) {
-               err = b43_wireless_core_start(up_dev);
-               if (err) {
-                       b43err(wl, "Fatal: Could not start device for "
-                              "selected %s-GHz band\n",
-                              band_to_string(chan->band));
-                       b43_wireless_core_exit(up_dev);
-                       goto init_failure;
-               }
-       }
-       B43_WARN_ON(b43_status(up_dev) != prev_status);
+       b43_upload_initvals_band(dev);
 
-       wl->current_dev = up_dev;
+       b43_phy_init(dev);
 
        return 0;
-init_failure:
-       /* Whoops, failed to init the new core. No core is operating now. */
-       wl->current_dev = NULL;
-       return err;
 }
 
 /* Write the short and long frame retry limit values. */
@@ -3851,8 +3807,10 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
 
        dev = wl->current_dev;
 
+       b43_mac_suspend(dev);
+
        /* Switch the band (if necessary). This might change the active core. */
-       err = b43_switch_band(wl, conf->chandef.chan);
+       err = b43_switch_band(dev, conf->chandef.chan);
        if (err)
                goto out_unlock_mutex;
 
@@ -3871,8 +3829,6 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
        else
                phy->is_40mhz = false;
 
-       b43_mac_suspend(dev);
-
        if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
                b43_set_retry_limits(dev, conf->short_frame_max_tx_count,
                                          conf->long_frame_max_tx_count);
@@ -4582,8 +4538,12 @@ static void b43_imcfglo_timeouts_workaround(struct b43_wldev *dev)
        struct ssb_bus *bus;
        u32 tmp;
 
+#ifdef CONFIG_B43_SSB
        if (dev->dev->bus_type != B43_BUS_SSB)
                return;
+#else
+       return;
+#endif
 
        bus = dev->dev->sdev->bus;
 
@@ -4738,7 +4698,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
        }
        if (sprom->boardflags_lo & B43_BFL_XTAL_NOSLOW)
                hf |= B43_HF_DSCRQ; /* Disable slowclock requests from ucode. */
-#ifdef CONFIG_SSB_DRIVER_PCICORE
+#if defined(CONFIG_B43_SSB) && defined(CONFIG_SSB_DRIVER_PCICORE)
        if (dev->dev->bus_type == B43_BUS_SSB &&
            dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI &&
            dev->dev->sdev->bus->pcicore.dev->id.revision <= 10)
@@ -5129,10 +5089,81 @@ static void b43_wireless_core_detach(struct b43_wldev *dev)
        b43_phy_free(dev);
 }
 
+static void b43_supported_bands(struct b43_wldev *dev, bool *have_2ghz_phy,
+                               bool *have_5ghz_phy)
+{
+       u16 dev_id = 0;
+
+#ifdef CONFIG_B43_BCMA
+       if (dev->dev->bus_type == B43_BUS_BCMA &&
+           dev->dev->bdev->bus->hosttype == BCMA_HOSTTYPE_PCI)
+               dev_id = dev->dev->bdev->bus->host_pci->device;
+#endif
+#ifdef CONFIG_B43_SSB
+       if (dev->dev->bus_type == B43_BUS_SSB &&
+           dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI)
+               dev_id = dev->dev->sdev->bus->host_pci->device;
+#endif
+       /* Override with SPROM value if available */
+       if (dev->dev->bus_sprom->dev_id)
+               dev_id = dev->dev->bus_sprom->dev_id;
+
+       /* Note: below IDs can be "virtual" (not maching e.g. real PCI ID) */
+       switch (dev_id) {
+       case 0x4324: /* BCM4306 */
+       case 0x4312: /* BCM4311 */
+       case 0x4319: /* BCM4318 */
+       case 0x4328: /* BCM4321 */
+       case 0x432b: /* BCM4322 */
+       case 0x4350: /* BCM43222 */
+       case 0x4353: /* BCM43224 */
+       case 0x0576: /* BCM43224 */
+       case 0x435f: /* BCM6362 */
+       case 0x4331: /* BCM4331 */
+       case 0x4359: /* BCM43228 */
+       case 0x43a0: /* BCM4360 */
+       case 0x43b1: /* BCM4352 */
+               /* Dual band devices */
+               *have_2ghz_phy = true;
+               *have_5ghz_phy = true;
+               return;
+       case 0x4321: /* BCM4306 */
+       case 0x4313: /* BCM4311 */
+       case 0x431a: /* BCM4318 */
+       case 0x432a: /* BCM4321 */
+       case 0x432d: /* BCM4322 */
+       case 0x4352: /* BCM43222 */
+       case 0x4333: /* BCM4331 */
+       case 0x43a2: /* BCM4360 */
+       case 0x43b3: /* BCM4352 */
+               /* 5 GHz only devices */
+               *have_2ghz_phy = false;
+               *have_5ghz_phy = true;
+               return;
+       }
+
+       /* As a fallback, try to guess using PHY type */
+       switch (dev->phy.type) {
+       case B43_PHYTYPE_A:
+               *have_2ghz_phy = false;
+               *have_5ghz_phy = true;
+               return;
+       case B43_PHYTYPE_G:
+       case B43_PHYTYPE_N:
+       case B43_PHYTYPE_LP:
+       case B43_PHYTYPE_HT:
+       case B43_PHYTYPE_LCN:
+               *have_2ghz_phy = true;
+               *have_5ghz_phy = false;
+               return;
+       }
+
+       B43_WARN_ON(1);
+}
+
 static int b43_wireless_core_attach(struct b43_wldev *dev)
 {
        struct b43_wl *wl = dev->wl;
-       struct pci_dev *pdev = NULL;
        int err;
        u32 tmp;
        bool have_2ghz_phy = false, have_5ghz_phy = false;
@@ -5144,19 +5175,13 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
         * that in core_init(), too.
         */
 
-#ifdef CONFIG_B43_SSB
-       if (dev->dev->bus_type == B43_BUS_SSB &&
-           dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI)
-               pdev = dev->dev->sdev->bus->host_pci;
-#endif
-
        err = b43_bus_powerup(dev, 0);
        if (err) {
                b43err(wl, "Bus powerup failed\n");
                goto out;
        }
 
-       /* Get the PHY type. */
+       /* Try to guess supported bands for the first init needs */
        switch (dev->dev->bus_type) {
 #ifdef CONFIG_B43_BCMA
        case B43_BUS_BCMA:
@@ -5178,51 +5203,31 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
        }
 
        dev->phy.gmode = have_2ghz_phy;
-       dev->phy.radio_on = true;
        b43_wireless_core_reset(dev, dev->phy.gmode);
 
+       /* Get the PHY type. */
        err = b43_phy_versioning(dev);
        if (err)
                goto err_powerdown;
-       /* Check if this device supports multiband. */
-       if (!pdev ||
-           (pdev->device != 0x4312 &&
-            pdev->device != 0x4319 && pdev->device != 0x4324)) {
-               /* No multiband support. */
-               have_2ghz_phy = false;
+
+       /* Get real info about supported bands */
+       b43_supported_bands(dev, &have_2ghz_phy, &have_5ghz_phy);
+
+       /* We don't support 5 GHz on some PHYs yet */
+       switch (dev->phy.type) {
+       case B43_PHYTYPE_A:
+       case B43_PHYTYPE_N:
+       case B43_PHYTYPE_LP:
+       case B43_PHYTYPE_HT:
+               b43warn(wl, "5 GHz band is unsupported on this PHY\n");
                have_5ghz_phy = false;
-               switch (dev->phy.type) {
-               case B43_PHYTYPE_A:
-                       have_5ghz_phy = true;
-                       break;
-               case B43_PHYTYPE_LP: //FIXME not always!
-#if 0 //FIXME enabling 5GHz causes a NULL pointer dereference
-                       have_5ghz_phy = 1;
-#endif
-               case B43_PHYTYPE_G:
-               case B43_PHYTYPE_N:
-               case B43_PHYTYPE_HT:
-               case B43_PHYTYPE_LCN:
-                       have_2ghz_phy = true;
-                       break;
-               default:
-                       B43_WARN_ON(1);
-               }
        }
-       if (dev->phy.type == B43_PHYTYPE_A) {
-               /* FIXME */
-               b43err(wl, "IEEE 802.11a devices are unsupported\n");
+
+       if (!have_2ghz_phy && !have_5ghz_phy) {
+               b43err(wl, "b43 can't support any band on this device\n");
                err = -EOPNOTSUPP;
                goto err_powerdown;
        }
-       if (1 /* disable A-PHY */) {
-               /* FIXME: For now we disable the A-PHY on multi-PHY devices. */
-               if (dev->phy.type != B43_PHYTYPE_N &&
-                   dev->phy.type != B43_PHYTYPE_LP) {
-                       have_2ghz_phy = true;
-                       have_5ghz_phy = false;
-               }
-       }
 
        err = b43_phy_allocate(dev);
        if (err)
@@ -5270,7 +5275,6 @@ static void b43_one_core_detach(struct b43_bus_dev *dev)
        b43_debugfs_remove_device(wldev);
        b43_wireless_core_detach(wldev);
        list_del(&wldev->list);
-       wl->nr_devs--;
        b43_bus_set_wldev(dev, NULL);
        kfree(wldev);
 }
@@ -5295,8 +5299,6 @@ static int b43_one_core_attach(struct b43_bus_dev *dev, struct b43_wl *wl)
        if (err)
                goto err_kfree_wldev;
 
-       list_add(&wldev->list, &wl->devlist);
-       wl->nr_devs++;
        b43_bus_set_wldev(dev, wldev);
        b43_debugfs_add_device(wldev);
 
@@ -5314,6 +5316,7 @@ static int b43_one_core_attach(struct b43_bus_dev *dev, struct b43_wl *wl)
        (pdev->subsystem_vendor == PCI_VENDOR_ID_##_subvendor) &&       \
        (pdev->subsystem_device == _subdevice)                          )
 
+#ifdef CONFIG_B43_SSB
 static void b43_sprom_fixup(struct ssb_bus *bus)
 {
        struct pci_dev *pdev;
@@ -5345,6 +5348,7 @@ static void b43_wireless_exit(struct b43_bus_dev *dev, struct b43_wl *wl)
        ssb_set_devtypedata(dev->sdev, NULL);
        ieee80211_free_hw(hw);
 }
+#endif
 
 static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
 {
@@ -5386,7 +5390,6 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
        wl->hw = hw;
        mutex_init(&wl->mutex);
        spin_lock_init(&wl->hardirq_lock);
-       INIT_LIST_HEAD(&wl->devlist);
        INIT_WORK(&wl->beacon_update_trigger, b43_beacon_update_trigger_work);
        INIT_WORK(&wl->txpower_adjust_work, b43_phy_txpower_adjust_work);
        INIT_WORK(&wl->tx_work, b43_tx_work);
@@ -5486,39 +5489,42 @@ int b43_ssb_probe(struct ssb_device *sdev, const struct ssb_device_id *id)
        struct b43_bus_dev *dev;
        struct b43_wl *wl;
        int err;
-       int first = 0;
 
        dev = b43_bus_dev_ssb_init(sdev);
        if (!dev)
                return -ENOMEM;
 
        wl = ssb_get_devtypedata(sdev);
-       if (!wl) {
-               /* Probing the first core. Must setup common struct b43_wl */
-               first = 1;
-               b43_sprom_fixup(sdev->bus);
-               wl = b43_wireless_init(dev);
-               if (IS_ERR(wl)) {
-                       err = PTR_ERR(wl);
-                       goto out;
-               }
-               ssb_set_devtypedata(sdev, wl);
-               B43_WARN_ON(ssb_get_devtypedata(sdev) != wl);
+       if (wl) {
+               b43err(NULL, "Dual-core devices are not supported\n");
+               err = -ENOTSUPP;
+               goto err_ssb_kfree_dev;
+       }
+
+       b43_sprom_fixup(sdev->bus);
+
+       wl = b43_wireless_init(dev);
+       if (IS_ERR(wl)) {
+               err = PTR_ERR(wl);
+               goto err_ssb_kfree_dev;
        }
+       ssb_set_devtypedata(sdev, wl);
+       B43_WARN_ON(ssb_get_devtypedata(sdev) != wl);
+
        err = b43_one_core_attach(dev, wl);
        if (err)
-               goto err_wireless_exit;
+               goto err_ssb_wireless_exit;
 
        /* setup and start work to load firmware */
        INIT_WORK(&wl->firmware_load, b43_request_firmware);
        schedule_work(&wl->firmware_load);
 
-      out:
        return err;
 
-      err_wireless_exit:
-       if (first)
-               b43_wireless_exit(dev, wl);
+err_ssb_wireless_exit:
+       b43_wireless_exit(dev, wl);
+err_ssb_kfree_dev:
+       kfree(dev);
        return err;
 }
 
@@ -5546,13 +5552,8 @@ static void b43_ssb_remove(struct ssb_device *sdev)
        /* Unregister HW RNG driver */
        b43_rng_exit(wl);
 
-       if (list_empty(&wl->devlist)) {
-               b43_leds_unregister(wl);
-               /* Last core on the chip unregistered.
-                * We can destroy common struct b43_wl.
-                */
-               b43_wireless_exit(dev, wl);
-       }
+       b43_leds_unregister(wl);
+       b43_wireless_exit(dev, wl);
 }
 
 static struct ssb_driver b43_ssb_driver = {
index dbaa51890198945b7552ed506bc3c8bfe4ba2359..fb0ddddde16b06c322a90f687e00c78fc8465d80 100644 (file)
@@ -96,7 +96,8 @@ int b43_phy_init(struct b43_wldev *dev)
 
        phy->channel = ops->get_default_chan(dev);
 
-       ops->software_rfkill(dev, false);
+       phy->ops->switch_analog(dev, true);
+       b43_software_rfkill(dev, false);
        err = ops->init(dev);
        if (err) {
                b43err(dev->wl, "PHY init failed\n");
@@ -116,7 +117,7 @@ err_phy_exit:
        if (ops->exit)
                ops->exit(dev);
 err_block_rf:
-       ops->software_rfkill(dev, true);
+       b43_software_rfkill(dev, true);
 
        return err;
 }
@@ -125,7 +126,7 @@ void b43_phy_exit(struct b43_wldev *dev)
 {
        const struct b43_phy_operations *ops = dev->phy.ops;
 
-       ops->software_rfkill(dev, true);
+       b43_software_rfkill(dev, true);
        if (ops->exit)
                ops->exit(dev);
 }
@@ -312,6 +313,90 @@ void b43_phy_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set)
        }
 }
 
+void b43_phy_put_into_reset(struct b43_wldev *dev)
+{
+       u32 tmp;
+
+       switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+       case B43_BUS_BCMA:
+               tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
+               tmp &= ~B43_BCMA_IOCTL_GMODE;
+               tmp |= B43_BCMA_IOCTL_PHY_RESET;
+               tmp |= BCMA_IOCTL_FGC;
+               bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
+               udelay(1);
+
+               tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
+               tmp &= ~BCMA_IOCTL_FGC;
+               bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
+               udelay(1);
+               break;
+#endif
+#ifdef CONFIG_B43_SSB
+       case B43_BUS_SSB:
+               tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
+               tmp &= ~B43_TMSLOW_GMODE;
+               tmp |= B43_TMSLOW_PHYRESET;
+               tmp |= SSB_TMSLOW_FGC;
+               ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
+               usleep_range(1000, 2000);
+
+               tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
+               tmp &= ~SSB_TMSLOW_FGC;
+               ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
+               usleep_range(1000, 2000);
+
+               break;
+#endif
+       }
+}
+
+void b43_phy_take_out_of_reset(struct b43_wldev *dev)
+{
+       u32 tmp;
+
+       switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+       case B43_BUS_BCMA:
+               /* Unset reset bit (with forcing clock) */
+               tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
+               tmp &= ~B43_BCMA_IOCTL_PHY_RESET;
+               tmp &= ~B43_BCMA_IOCTL_PHY_CLKEN;
+               tmp |= BCMA_IOCTL_FGC;
+               bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
+               udelay(1);
+
+               /* Do not force clock anymore */
+               tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
+               tmp &= ~BCMA_IOCTL_FGC;
+               tmp |= B43_BCMA_IOCTL_PHY_CLKEN;
+               bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
+               udelay(1);
+               break;
+#endif
+#ifdef CONFIG_B43_SSB
+       case B43_BUS_SSB:
+               /* Unset reset bit (with forcing clock) */
+               tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
+               tmp &= ~B43_TMSLOW_PHYRESET;
+               tmp &= ~B43_TMSLOW_PHYCLKEN;
+               tmp |= SSB_TMSLOW_FGC;
+               ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
+               ssb_read32(dev->dev->sdev, SSB_TMSLOW); /* flush */
+               usleep_range(1000, 2000);
+
+               tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
+               tmp &= ~SSB_TMSLOW_FGC;
+               tmp |= B43_TMSLOW_PHYCLKEN;
+               ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
+               ssb_read32(dev->dev->sdev, SSB_TMSLOW); /* flush */
+               usleep_range(1000, 2000);
+               break;
+#endif
+       }
+}
+
 int b43_switch_channel(struct b43_wldev *dev, unsigned int new_channel)
 {
        struct b43_phy *phy = &(dev->phy);
index f1b999349876bbfc8cad799858434f5e64a14b37..47b55855c37da0363bb213fc92a4491419f7eebf 100644 (file)
@@ -231,7 +231,7 @@ struct b43_phy {
        /* HT info */
        bool is_40mhz;
 
-       /* GMODE bit enabled? */
+       /* Is GMODE (2 GHz mode) bit enabled? */
        bool gmode;
 
        /* Analog Type */
@@ -390,6 +390,9 @@ void b43_phy_lock(struct b43_wldev *dev);
  */
 void b43_phy_unlock(struct b43_wldev *dev);
 
+void b43_phy_put_into_reset(struct b43_wldev *dev);
+void b43_phy_take_out_of_reset(struct b43_wldev *dev);
+
 /**
  * b43_switch_channel - Switch to another channel
  */
index 12f467b8d564f1c5cdca06acbdf65ca8f80b48c8..8f5c14bc10e6fedcab2d7b88a0009470772dad8a 100644 (file)
@@ -1587,6 +1587,7 @@ static void b43_phy_initb5(struct b43_wldev *dev)
        b43_write16(dev, 0x03E4, (b43_read16(dev, 0x03E4) & 0xFFC0) | 0x0004);
 }
 
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/Init/B6 */
 static void b43_phy_initb6(struct b43_wldev *dev)
 {
        struct b43_phy *phy = &dev->phy;
@@ -1670,7 +1671,7 @@ static void b43_phy_initb6(struct b43_wldev *dev)
                b43_radio_write16(dev, 0x50, 0x20);
        }
        if (phy->radio_rev <= 2) {
-               b43_radio_write16(dev, 0x7C, 0x20);
+               b43_radio_write16(dev, 0x50, 0x20);
                b43_radio_write16(dev, 0x5A, 0x70);
                b43_radio_write16(dev, 0x5B, 0x7B);
                b43_radio_write16(dev, 0x5C, 0xB0);
@@ -1686,9 +1687,8 @@ static void b43_phy_initb6(struct b43_wldev *dev)
                b43_phy_write(dev, 0x2A, 0x8AC0);
        b43_phy_write(dev, 0x0038, 0x0668);
        b43_set_txpower_g(dev, &gphy->bbatt, &gphy->rfatt, gphy->tx_control);
-       if (phy->radio_rev <= 5) {
+       if (phy->radio_rev == 4 || phy->radio_rev == 5)
                b43_phy_maskset(dev, 0x5D, 0xFF80, 0x0003);
-       }
        if (phy->radio_rev <= 2)
                b43_radio_write16(dev, 0x005D, 0x000D);
 
index 24ccbe96e0c8a0723bb165b0f32677ab6ba7ded5..41dab89a2942dddbc80388079e5812382f3f6a43 100644 (file)
@@ -257,6 +257,72 @@ static void b43_nphy_rf_ctl_override(struct b43_wldev *dev, u16 field,
        }
 }
 
+static void b43_nphy_rf_ctl_intc_override_rev7(struct b43_wldev *dev,
+                                              enum n_intc_override intc_override,
+                                              u16 value, u8 core_sel)
+{
+       u16 reg, tmp, tmp2, val;
+       int core;
+
+       for (core = 0; core < 2; core++) {
+               if ((core_sel == 1 && core != 0) ||
+                   (core_sel == 2 && core != 1))
+                       continue;
+
+               reg = (core == 0) ? B43_NPHY_RFCTL_INTC1 : B43_NPHY_RFCTL_INTC2;
+
+               switch (intc_override) {
+               case N_INTC_OVERRIDE_OFF:
+                       b43_phy_write(dev, reg, 0);
+                       b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+                       break;
+               case N_INTC_OVERRIDE_TRSW:
+                       b43_phy_maskset(dev, reg, ~0xC0, value << 6);
+                       b43_phy_set(dev, reg, 0x400);
+
+                       b43_phy_mask(dev, 0x2ff, ~0xC000 & 0xFFFF);
+                       b43_phy_set(dev, 0x2ff, 0x2000);
+                       b43_phy_set(dev, 0x2ff, 0x0001);
+                       break;
+               case N_INTC_OVERRIDE_PA:
+                       tmp = 0x0030;
+                       if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+                               val = value << 5;
+                       else
+                               val = value << 4;
+                       b43_phy_maskset(dev, reg, ~tmp, val);
+                       b43_phy_set(dev, reg, 0x1000);
+                       break;
+               case N_INTC_OVERRIDE_EXT_LNA_PU:
+                       if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+                               tmp = 0x0001;
+                               tmp2 = 0x0004;
+                               val = value;
+                       } else {
+                               tmp = 0x0004;
+                               tmp2 = 0x0001;
+                               val = value << 2;
+                       }
+                       b43_phy_maskset(dev, reg, ~tmp, val);
+                       b43_phy_mask(dev, reg, ~tmp2);
+                       break;
+               case N_INTC_OVERRIDE_EXT_LNA_GAIN:
+                       if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+                               tmp = 0x0002;
+                               tmp2 = 0x0008;
+                               val = value << 1;
+                       } else {
+                               tmp = 0x0008;
+                               tmp2 = 0x0002;
+                               val = value << 3;
+                       }
+                       b43_phy_maskset(dev, reg, ~tmp, val);
+                       b43_phy_mask(dev, reg, ~tmp2);
+                       break;
+               }
+       }
+}
+
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */
 static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
                                          enum n_intc_override intc_override,
@@ -265,6 +331,12 @@ static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
        u8 i, j;
        u16 reg, tmp, val;
 
+       if (dev->phy.rev >= 7) {
+               b43_nphy_rf_ctl_intc_override_rev7(dev, intc_override, value,
+                                                  core);
+               return;
+       }
+
        B43_WARN_ON(dev->phy.rev < 3);
 
        for (i = 0; i < 2; i++) {
@@ -419,7 +491,8 @@ static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable)
                static const u16 clip[] = { 0xFFFF, 0xFFFF };
                if (nphy->deaf_count++ == 0) {
                        nphy->classifier_state = b43_nphy_classifier(dev, 0, 0);
-                       b43_nphy_classifier(dev, 0x7, 0);
+                       b43_nphy_classifier(dev, 0x7,
+                                           B43_NPHY_CLASSCTL_WAITEDEN);
                        b43_nphy_read_clip_detection(dev, nphy->clip_state);
                        b43_nphy_write_clip_detection(dev, clip);
                }
@@ -734,9 +807,16 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
        u16 bias, cbias;
        u16 pag_boost, padg_boost, pgag_boost, mixg_boost;
        u16 paa_boost, pada_boost, pgaa_boost, mixa_boost;
+       bool is_pkg_fab_smic;
 
        B43_WARN_ON(dev->phy.rev < 3);
 
+       is_pkg_fab_smic =
+               ((dev->dev->chip_id == BCMA_CHIP_ID_BCM43224 ||
+                 dev->dev->chip_id == BCMA_CHIP_ID_BCM43225 ||
+                 dev->dev->chip_id == BCMA_CHIP_ID_BCM43421) &&
+                dev->dev->chip_pkg == BCMA_PKG_ID_BCM43224_FAB_SMIC);
+
        b43_chantab_radio_2056_upload(dev, e);
        b2056_upload_syn_pll_cp2(dev, band == IEEE80211_BAND_5GHZ);
 
@@ -744,7 +824,8 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
            b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
                b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
                b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
-               if (dev->dev->chip_id == 0x4716) {
+               if (dev->dev->chip_id == BCMA_CHIP_ID_BCM4716 ||
+                   dev->dev->chip_id == BCMA_CHIP_ID_BCM47162) {
                        b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x14);
                        b43_radio_write(dev, B2056_SYN_PLL_CP2, 0);
                } else {
@@ -752,6 +833,13 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
                        b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x14);
                }
        }
+       if (sprom->boardflags2_hi & B43_BFH2_GPLL_WAR2 &&
+           b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+               b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1f);
+               b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1f);
+               b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x0b);
+               b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x20);
+       }
        if (sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
            b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
                b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
@@ -767,7 +855,8 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
                                b43_radio_write(dev,
                                        offset | B2056_TX_PADG_IDAC, 0xcc);
 
-                               if (dev->dev->chip_id == 0x4716) {
+                               if (dev->dev->chip_id == BCMA_CHIP_ID_BCM4716 ||
+                                   dev->dev->chip_id == BCMA_CHIP_ID_BCM47162) {
                                        bias = 0x40;
                                        cbias = 0x45;
                                        pag_boost = 0x5;
@@ -776,6 +865,10 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
                                } else {
                                        bias = 0x25;
                                        cbias = 0x20;
+                                       if (is_pkg_fab_smic) {
+                                               bias = 0x2a;
+                                               cbias = 0x38;
+                                       }
                                        pag_boost = 0x4;
                                        pgag_boost = 0x03;
                                        mixg_boost = 0x65;
@@ -844,6 +937,8 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
                        mixa_boost = 0xF;
                }
 
+               cbias = is_pkg_fab_smic ? 0x35 : 0x30;
+
                for (i = 0; i < 2; i++) {
                        offset = i ? B2056_TX1 : B2056_TX0;
 
@@ -862,11 +957,11 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
                        b43_radio_write(dev,
                                offset | B2056_TX_PADA_CASCBIAS, 0x03);
                        b43_radio_write(dev,
-                               offset | B2056_TX_INTPAA_IAUX_STAT, 0x50);
+                               offset | B2056_TX_INTPAA_IAUX_STAT, 0x30);
                        b43_radio_write(dev,
-                               offset | B2056_TX_INTPAA_IMAIN_STAT, 0x50);
+                               offset | B2056_TX_INTPAA_IMAIN_STAT, 0x30);
                        b43_radio_write(dev,
-                               offset | B2056_TX_INTPAA_CASCBIAS, 0x30);
+                               offset | B2056_TX_INTPAA_CASCBIAS, cbias);
                }
        }
 
@@ -1164,23 +1259,20 @@ static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
        u16 seq_mode;
        u32 tmp;
 
-       if (nphy->hang_avoid)
-               b43_nphy_stay_in_carrier_search(dev, true);
+       b43_nphy_stay_in_carrier_search(dev, true);
 
        if ((nphy->bb_mult_save & 0x80000000) == 0) {
                tmp = b43_ntab_read(dev, B43_NTAB16(15, 87));
                nphy->bb_mult_save = (tmp & 0xFFFF) | 0x80000000;
        }
 
+       /* TODO: add modify_bbmult argument */
        if (!dev->phy.is_40mhz)
                tmp = 0x6464;
        else
                tmp = 0x4747;
        b43_ntab_write(dev, B43_NTAB16(15, 87), tmp);
 
-       if (nphy->hang_avoid)
-               b43_nphy_stay_in_carrier_search(dev, false);
-
        b43_phy_write(dev, B43_NPHY_SAMP_DEPCNT, (samps - 1));
 
        if (loops != 0xFFFF)
@@ -1213,6 +1305,8 @@ static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
                b43err(dev->wl, "run samples timeout\n");
 
        b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
+
+       b43_nphy_stay_in_carrier_search(dev, false);
 }
 
 /**************************************************
@@ -1588,8 +1682,8 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
        struct b43_phy_n *nphy = dev->phy.n;
 
        u16 saved_regs_phy_rfctl[2];
-       u16 saved_regs_phy[13];
-       u16 regs_to_store[] = {
+       u16 saved_regs_phy[22];
+       u16 regs_to_store_rev3[] = {
                B43_NPHY_AFECTL_OVER1, B43_NPHY_AFECTL_OVER,
                B43_NPHY_AFECTL_C1, B43_NPHY_AFECTL_C2,
                B43_NPHY_TXF_40CO_B1S1, B43_NPHY_RFCTL_OVER,
@@ -1598,6 +1692,20 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
                B43_NPHY_RFCTL_LUT_TRSW_UP1, B43_NPHY_RFCTL_LUT_TRSW_UP2,
                B43_NPHY_RFCTL_RSSIO1, B43_NPHY_RFCTL_RSSIO2
        };
+       u16 regs_to_store_rev7[] = {
+               B43_NPHY_AFECTL_OVER1, B43_NPHY_AFECTL_OVER,
+               B43_NPHY_AFECTL_C1, B43_NPHY_AFECTL_C2,
+               B43_NPHY_TXF_40CO_B1S1, B43_NPHY_RFCTL_OVER,
+               0x342, 0x343, 0x346, 0x347,
+               0x2ff,
+               B43_NPHY_TXF_40CO_B1S0, B43_NPHY_TXF_40CO_B32S1,
+               B43_NPHY_RFCTL_CMD,
+               B43_NPHY_RFCTL_LUT_TRSW_UP1, B43_NPHY_RFCTL_LUT_TRSW_UP2,
+               0x340, 0x341, 0x344, 0x345,
+               B43_NPHY_RFCTL_RSSIO1, B43_NPHY_RFCTL_RSSIO2
+       };
+       u16 *regs_to_store;
+       int regs_amount;
 
        u16 class;
 
@@ -1617,6 +1725,15 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
        u8 rx_core_state;
        int core, i, j, vcm;
 
+       if (dev->phy.rev >= 7) {
+               regs_to_store = regs_to_store_rev7;
+               regs_amount = ARRAY_SIZE(regs_to_store_rev7);
+       } else {
+               regs_to_store = regs_to_store_rev3;
+               regs_amount = ARRAY_SIZE(regs_to_store_rev3);
+       }
+       BUG_ON(regs_amount > ARRAY_SIZE(saved_regs_phy));
+
        class = b43_nphy_classifier(dev, 0, 0);
        b43_nphy_classifier(dev, 7, 4);
        b43_nphy_read_clip_detection(dev, clip_state);
@@ -1624,22 +1741,29 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
 
        saved_regs_phy_rfctl[0] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
        saved_regs_phy_rfctl[1] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
-       for (i = 0; i < ARRAY_SIZE(regs_to_store); i++)
+       for (i = 0; i < regs_amount; i++)
                saved_regs_phy[i] = b43_phy_read(dev, regs_to_store[i]);
 
        b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_OFF, 0, 7);
        b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_TRSW, 1, 7);
-       b43_nphy_rf_ctl_override(dev, 0x1, 0, 0, false);
-       b43_nphy_rf_ctl_override(dev, 0x2, 1, 0, false);
-       b43_nphy_rf_ctl_override(dev, 0x80, 1, 0, false);
-       b43_nphy_rf_ctl_override(dev, 0x40, 1, 0, false);
-
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
-               b43_nphy_rf_ctl_override(dev, 0x20, 0, 0, false);
-               b43_nphy_rf_ctl_override(dev, 0x10, 1, 0, false);
+
+       if (dev->phy.rev >= 7) {
+               /* TODO */
+               if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+               } else {
+               }
        } else {
-               b43_nphy_rf_ctl_override(dev, 0x10, 0, 0, false);
-               b43_nphy_rf_ctl_override(dev, 0x20, 1, 0, false);
+               b43_nphy_rf_ctl_override(dev, 0x1, 0, 0, false);
+               b43_nphy_rf_ctl_override(dev, 0x2, 1, 0, false);
+               b43_nphy_rf_ctl_override(dev, 0x80, 1, 0, false);
+               b43_nphy_rf_ctl_override(dev, 0x40, 1, 0, false);
+               if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+                       b43_nphy_rf_ctl_override(dev, 0x20, 0, 0, false);
+                       b43_nphy_rf_ctl_override(dev, 0x10, 1, 0, false);
+               } else {
+                       b43_nphy_rf_ctl_override(dev, 0x10, 0, 0, false);
+                       b43_nphy_rf_ctl_override(dev, 0x20, 1, 0, false);
+               }
        }
 
        rx_core_state = b43_nphy_get_rx_core_state(dev);
@@ -1654,8 +1778,11 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
 
                /* Grab RSSI results for every possible VCM */
                for (vcm = 0; vcm < 8; vcm++) {
-                       b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC, 0xE3,
-                                       vcm << 2);
+                       if (dev->phy.rev >= 7)
+                               ;
+                       else
+                               b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC,
+                                                 0xE3, vcm << 2);
                        b43_nphy_poll_rssi(dev, N_RSSI_NB, results[vcm], 8);
                }
 
@@ -1682,8 +1809,11 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
                }
 
                /* Select the best VCM */
-               b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC, 0xE3,
-                                 vcm_final << 2);
+               if (dev->phy.rev >= 7)
+                       ;
+               else
+                       b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC,
+                                         0xE3, vcm_final << 2);
 
                for (i = 0; i < 4; i++) {
                        if (core != i / 2)
@@ -1736,9 +1866,9 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
 
        b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1);
        b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_RXTX);
-       b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1, ~0x1);
+       b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1);
 
-       for (i = 0; i < ARRAY_SIZE(regs_to_store); i++)
+       for (i = 0; i < regs_amount; i++)
                b43_phy_write(dev, regs_to_store[i], saved_regs_phy[i]);
 
        /* Store for future configuration */
@@ -2494,8 +2624,8 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
        struct ssb_sprom *sprom = dev->dev->bus_sprom;
 
        /* TX to RX */
-       u8 tx2rx_events[8] = { 0x4, 0x3, 0x6, 0x5, 0x2, 0x1, 0x8, 0x1F };
-       u8 tx2rx_delays[8] = { 8, 4, 2, 2, 4, 4, 6, 1 };
+       u8 tx2rx_events[7] = { 0x4, 0x3, 0x5, 0x2, 0x1, 0x8, 0x1F };
+       u8 tx2rx_delays[7] = { 8, 4, 4, 4, 4, 6, 1 };
        /* RX to TX */
        u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3,
                                        0x1F };
@@ -2503,6 +2633,23 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
        u8 rx2tx_events[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0x3, 0x4, 0x1F };
        u8 rx2tx_delays[9] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
 
+       u16 vmids[5][4] = {
+               { 0xa2, 0xb4, 0xb4, 0x89, }, /* 0 */
+               { 0xb4, 0xb4, 0xb4, 0x24, }, /* 1 */
+               { 0xa2, 0xb4, 0xb4, 0x74, }, /* 2 */
+               { 0xa2, 0xb4, 0xb4, 0x270, }, /* 3 */
+               { 0xa2, 0xb4, 0xb4, 0x00, }, /* 4 and 5 */
+       };
+       u16 gains[5][4] = {
+               { 0x02, 0x02, 0x02, 0x00, }, /* 0 */
+               { 0x02, 0x02, 0x02, 0x02, }, /* 1 */
+               { 0x02, 0x02, 0x02, 0x04, }, /* 2 */
+               { 0x02, 0x02, 0x02, 0x00, }, /* 3 */
+               { 0x02, 0x02, 0x02, 0x00, }, /* 4 and 5 */
+       };
+       u16 *vmid, *gain;
+
+       u8 pdet_range;
        u16 tmp16;
        u32 tmp32;
 
@@ -2561,7 +2708,71 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
        b43_ntab_write(dev, B43_NTAB16(8, 0), 2);
        b43_ntab_write(dev, B43_NTAB16(8, 16), 2);
 
-       /* TODO */
+       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+               pdet_range = sprom->fem.ghz2.pdet_range;
+       else
+               pdet_range = sprom->fem.ghz5.pdet_range;
+       vmid = vmids[min_t(u16, pdet_range, 4)];
+       gain = gains[min_t(u16, pdet_range, 4)];
+       switch (pdet_range) {
+       case 3:
+               if (!(dev->phy.rev >= 4 &&
+                     b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
+                       break;
+               /* FALL THROUGH */
+       case 0:
+       case 1:
+               b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4, vmid);
+               b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x18), 4, vmid);
+               b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x0c), 4, gain);
+               b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x1c), 4, gain);
+               break;
+       case 2:
+               if (dev->phy.rev >= 6) {
+                       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+                               vmid[3] = 0x94;
+                       else
+                               vmid[3] = 0x8e;
+                       gain[3] = 3;
+               } else if (dev->phy.rev == 5) {
+                       vmid[3] = 0x84;
+                       gain[3] = 2;
+               }
+               b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4, vmid);
+               b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x18), 4, vmid);
+               b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x0c), 4, gain);
+               b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x1c), 4, gain);
+               break;
+       case 4:
+       case 5:
+               if (b43_current_band(dev->wl) != IEEE80211_BAND_2GHZ) {
+                       if (pdet_range == 4) {
+                               vmid[3] = 0x8e;
+                               tmp16 = 0x96;
+                               gain[3] = 0x2;
+                       } else {
+                               vmid[3] = 0x89;
+                               tmp16 = 0x89;
+                               gain[3] = 0;
+                       }
+               } else {
+                       if (pdet_range == 4) {
+                               vmid[3] = 0x89;
+                               tmp16 = 0x8b;
+                               gain[3] = 0x2;
+                       } else {
+                               vmid[3] = 0x74;
+                               tmp16 = 0x70;
+                               gain[3] = 0;
+                       }
+               }
+               b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4, vmid);
+               b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x0c), 4, gain);
+               vmid[3] = tmp16;
+               b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x18), 4, vmid);
+               b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x1c), 4, gain);
+               break;
+       }
 
        b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_MAST_BIAS, 0x00);
        b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_MAST_BIAS, 0x00);
@@ -2600,7 +2811,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
        /* Dropped probably-always-true condition */
        b43_phy_write(dev, B43_NPHY_ED_CRS40ASSERTTHRESH0, 0x03eb);
        b43_phy_write(dev, B43_NPHY_ED_CRS40ASSERTTHRESH1, 0x03eb);
-       b43_phy_write(dev, B43_NPHY_ED_CRS40DEASSERTTHRESH1, 0x0341);
+       b43_phy_write(dev, B43_NPHY_ED_CRS40DEASSERTTHRESH0, 0x0341);
        b43_phy_write(dev, B43_NPHY_ED_CRS40DEASSERTTHRESH1, 0x0341);
        b43_phy_write(dev, B43_NPHY_ED_CRS20LASSERTTHRESH0, 0x042b);
        b43_phy_write(dev, B43_NPHY_ED_CRS20LASSERTTHRESH1, 0x042b);
@@ -3211,6 +3422,20 @@ static void b43_nphy_tx_prepare_adjusted_power_table(struct b43_wldev *dev)
        u8 idx, delta;
        u8 i, stf_mode;
 
+       /* Array adj_pwr_tbl corresponds to the hardware table. It consists of
+        * 21 groups, each containing 4 entries.
+        *
+        * First group has entries for CCK modulation.
+        * The rest of groups has 1 entry per modulation (SISO, CDD, STBC, SDM).
+        *
+        * Group 0 is for CCK
+        * Groups 1..4 use BPSK (group per coding rate)
+        * Groups 5..8 use QPSK (group per coding rate)
+        * Groups 9..12 use 16-QAM (group per coding rate)
+        * Groups 13..16 use 64-QAM (group per coding rate)
+        * Groups 17..20 are unknown
+        */
+
        for (i = 0; i < 4; i++)
                nphy->adj_pwr_tbl[i] = nphy->tx_power_offset[i];
 
@@ -3409,10 +3634,8 @@ static void b43_nphy_tx_power_ctl_setup(struct b43_wldev *dev)
        }
 
        b43_nphy_tx_prepare_adjusted_power_table(dev);
-       /*
        b43_ntab_write_bulk(dev, B43_NTAB16(26, 64), 84, nphy->adj_pwr_tbl);
        b43_ntab_write_bulk(dev, B43_NTAB16(27, 64), 84, nphy->adj_pwr_tbl);
-       */
 
        if (nphy->hang_avoid)
                b43_nphy_stay_in_carrier_search(dev, false);
@@ -5124,7 +5347,7 @@ static int b43_phy_initn(struct b43_wldev *dev)
        b43_phy_write(dev, B43_NPHY_TXMACIF_HOLDOFF, 0x0015);
        b43_phy_write(dev, B43_NPHY_TXMACDELAY, 0x0320);
        if (phy->rev >= 3 && phy->rev <= 6)
-               b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x0014);
+               b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x0032);
        b43_nphy_tx_lp_fbw(dev);
        if (phy->rev >= 3)
                b43_nphy_spur_workaround(dev);
@@ -5441,8 +5664,11 @@ static u16 b43_nphy_op_radio_read(struct b43_wldev *dev, u16 reg)
 {
        /* Register 1 is a 32-bit register. */
        B43_WARN_ON(reg == 1);
-       /* N-PHY needs 0x100 for read access */
-       reg |= 0x100;
+
+       if (dev->phy.rev >= 7)
+               reg |= 0x200; /* Radio 0x2057 */
+       else
+               reg |= 0x100;
 
        b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
        return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW);
index b4fd9345d673a3542d94e66260ebc685eed7ef8d..2ce25607c60d389505ee3c9e664ee503adcd5406 100644 (file)
@@ -48,7 +48,7 @@ struct b2056_inittabs_pts {
        unsigned int rx_length;
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev3_syn[] = {
+static const struct b2056_inittab_entry b2056_inittab_phy_rev3_syn[] = {
        [B2056_SYN_RESERVED_ADDR2]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_SYN_RESERVED_ADDR3]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_SYN_RESERVED_ADDR4]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -232,7 +232,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev3_syn[] = {
        [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev3_tx[] = {
+static const struct b2056_inittab_entry b2056_inittab_phy_rev3_tx[] = {
        [B2056_TX_RESERVED_ADDR2]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_TX_RESERVED_ADDR3]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_TX_RESERVED_ADDR4]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -380,7 +380,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev3_tx[] = {
        [B2056_TX_STATUS_TXLPF_RC]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev3_rx[] = {
+static const struct b2056_inittab_entry b2056_inittab_phy_rev3_rx[] = {
        [B2056_RX_RESERVED_ADDR2]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_RX_RESERVED_ADDR3]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_RX_RESERVED_ADDR4]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -530,7 +530,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev3_rx[] = {
        [B2056_RX_STATUS_HPC_RC]        = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev4_syn[] = {
+static const struct b2056_inittab_entry b2056_inittab_phy_rev4_syn[] = {
        [B2056_SYN_RESERVED_ADDR2]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_SYN_RESERVED_ADDR3]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_SYN_RESERVED_ADDR4]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -714,7 +714,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev4_syn[] = {
        [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev4_tx[] = {
+static const struct b2056_inittab_entry b2056_inittab_phy_rev4_tx[] = {
        [B2056_TX_RESERVED_ADDR2]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_TX_RESERVED_ADDR3]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_TX_RESERVED_ADDR4]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -862,7 +862,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev4_tx[] = {
        [B2056_TX_STATUS_TXLPF_RC]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev4_rx[] = {
+static const struct b2056_inittab_entry b2056_inittab_phy_rev4_rx[] = {
        [B2056_RX_RESERVED_ADDR2]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_RX_RESERVED_ADDR3]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_RX_RESERVED_ADDR4]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1012,7 +1012,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev4_rx[] = {
        [B2056_RX_STATUS_HPC_RC]        = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev5_syn[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev5_syn[] = {
        [B2056_SYN_RESERVED_ADDR2]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_SYN_RESERVED_ADDR3]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_SYN_RESERVED_ADDR4]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1196,7 +1196,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev5_syn[] = {
        [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev5_tx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev5_tx[] = {
        [B2056_TX_RESERVED_ADDR2]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_TX_RESERVED_ADDR3]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_TX_RESERVED_ADDR4]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1352,7 +1352,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev5_tx[] = {
        [B2056_TX_GMBB_IDAC7]           = { .ghz5 = 0x0075, .ghz2 = 0x0075, UPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev5_rx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev5_rx[] = {
        [B2056_RX_RESERVED_ADDR2]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_RX_RESERVED_ADDR3]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_RX_RESERVED_ADDR4]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1502,7 +1502,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev5_rx[] = {
        [B2056_RX_STATUS_HPC_RC]        = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev6_syn[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev6_syn[] = {
        [B2056_SYN_RESERVED_ADDR2]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_SYN_RESERVED_ADDR3]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_SYN_RESERVED_ADDR4]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1686,7 +1686,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_syn[] = {
        [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev6_tx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev6_tx[] = {
        [B2056_TX_RESERVED_ADDR2]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_TX_RESERVED_ADDR3]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_TX_RESERVED_ADDR4]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1842,7 +1842,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_tx[] = {
        [B2056_TX_GMBB_IDAC7]           = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev6_rx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev6_rx[] = {
        [B2056_RX_RESERVED_ADDR2]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_RX_RESERVED_ADDR3]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_RX_RESERVED_ADDR4]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1992,7 +1992,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_rx[] = {
        [B2056_RX_STATUS_HPC_RC]        = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev7_syn[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev7_9_syn[] = {
        [B2056_SYN_RESERVED_ADDR2]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_SYN_RESERVED_ADDR3]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_SYN_RESERVED_ADDR4]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2176,7 +2176,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev7_syn[] = {
        [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev7_tx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev7_9_tx[] = {
        [B2056_TX_RESERVED_ADDR2]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_TX_RESERVED_ADDR3]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_TX_RESERVED_ADDR4]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2332,7 +2332,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev7_tx[] = {
        [B2056_TX_GMBB_IDAC7]           = { .ghz5 = 0x0075, .ghz2 = 0x0075, UPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev7_rx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev7_9_rx[] = {
        [B2056_RX_RESERVED_ADDR2]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_RX_RESERVED_ADDR3]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_RX_RESERVED_ADDR4]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2482,7 +2482,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev7_rx[] = {
        [B2056_RX_STATUS_HPC_RC]        = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev8_syn[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev8_syn[] = {
        [B2056_SYN_RESERVED_ADDR2]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_SYN_RESERVED_ADDR3]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_SYN_RESERVED_ADDR4]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2666,7 +2666,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev8_syn[] = {
        [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev8_tx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev8_tx[] = {
        [B2056_TX_RESERVED_ADDR2]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_TX_RESERVED_ADDR3]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_TX_RESERVED_ADDR4]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2822,7 +2822,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev8_tx[] = {
        [B2056_TX_GMBB_IDAC7]           = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev8_rx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev8_rx[] = {
        [B2056_RX_RESERVED_ADDR2]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_RX_RESERVED_ADDR3]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_RX_RESERVED_ADDR4]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2972,24 +2972,69 @@ static const struct b2056_inittab_entry b2056_inittab_rev8_rx[] = {
        [B2056_RX_STATUS_HPC_RC]        = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
 };
 
-#define INITTABSPTS(prefix) \
-       .syn            = prefix##_syn,                 \
-       .syn_length     = ARRAY_SIZE(prefix##_syn),     \
-       .tx             = prefix##_tx,                  \
-       .tx_length      = ARRAY_SIZE(prefix##_tx),      \
-       .rx             = prefix##_rx,                  \
-       .rx_length      = ARRAY_SIZE(prefix##_rx)
+static const struct b2056_inittab_entry b2056_inittab_radio_rev11_syn[] = {
+       [B2056_SYN_PLL_PFD]             = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
+       [B2056_SYN_PLL_CP2]             = { .ghz5 = 0x003f, .ghz2 = 0x003f, UPLOAD, },
+       [B2056_SYN_PLL_LOOPFILTER1]     = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
+       [B2056_SYN_PLL_LOOPFILTER2]     = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
+       [B2056_SYN_PLL_LOOPFILTER4]     = { .ghz5 = 0x002b, .ghz2 = 0x002b, UPLOAD, },
+       [B2056_SYN_PLL_VCO2]            = { .ghz5 = 0x00f7, .ghz2 = 0x00f7, UPLOAD, },
+       [B2056_SYN_PLL_VCOCAL12]        = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
+       [B2056_SYN_LOGENBUF2]           = { .ghz5 = 0x008f, .ghz2 = 0x008f, UPLOAD, },
+};
 
-static const struct b2056_inittabs_pts b2056_inittabs[] = {
-       [3] = { INITTABSPTS(b2056_inittab_rev3) },
-       [4] = { INITTABSPTS(b2056_inittab_rev4) },
-       [5] = { INITTABSPTS(b2056_inittab_rev5) },
-       [6] = { INITTABSPTS(b2056_inittab_rev6) },
-       [7] = { INITTABSPTS(b2056_inittab_rev7) },
-       [8] = { INITTABSPTS(b2056_inittab_rev8) },
-       [9] = { INITTABSPTS(b2056_inittab_rev7) },
+static const struct b2056_inittab_entry b2056_inittab_radio_rev11_tx[] = {
+       [B2056_TX_PA_SPARE2]            = { .ghz5 = 0x00ee, .ghz2 = 0x00ee, UPLOAD, },
+       [B2056_TX_INTPAA_IAUX_STAT]     = { .ghz5 = 0x0050, .ghz2 = 0x0050, UPLOAD, },
+       [B2056_TX_INTPAA_IMAIN_STAT]    = { .ghz5 = 0x0050, .ghz2 = 0x0050, UPLOAD, },
+       [B2056_TX_INTPAA_PASLOPE]       = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+       [B2056_TX_INTPAG_PASLOPE]       = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+       [B2056_TX_PADA_IDAC]            = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+       [B2056_TX_PADA_SLOPE]           = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
+       [B2056_TX_PADG_SLOPE]           = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
+       [B2056_TX_PGAA_IDAC]            = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+       [B2056_TX_PGAA_SLOPE]           = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
+       [B2056_TX_PGAG_SLOPE]           = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
+       [B2056_TX_GMBB_IDAC]            = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
+       [B2056_TX_TXSPARE1]             = { .ghz5 = 0x0030, .ghz2 = 0x0030, UPLOAD, },
+};
+
+static const struct b2056_inittab_entry b2056_inittab_radio_rev11_rx[] = {
+       [B2056_RX_BIASPOLE_LNAA1_IDAC]  = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
+       [B2056_RX_LNAA2_IDAC]           = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+       [B2056_RX_BIASPOLE_LNAG1_IDAC]  = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
+       [B2056_RX_LNAG2_IDAC]           = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+       [B2056_RX_MIXA_VCM]             = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
+       [B2056_RX_MIXA_LOB_BIAS]        = { .ghz5 = 0x0088, .ghz2 = 0x0088, UPLOAD, },
+       [B2056_RX_MIXA_BIAS_AUX]        = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
+       [B2056_RX_MIXG_VCM]             = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
+       [B2056_RX_TIA_IOPAMP]           = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
+       [B2056_RX_TIA_QOPAMP]           = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
+       [B2056_RX_TIA_IMISC]            = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
+       [B2056_RX_TIA_QMISC]            = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
+       [B2056_RX_RXLPF_OUTVCM]         = { .ghz5 = 0x0004, .ghz2 = 0x0004, UPLOAD, },
+       [B2056_RX_VGA_BIAS_DCCANCEL]    = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
+       [B2056_RX_RXSPARE3]             = { .ghz5 = 0x0005, .ghz2 = 0x0005, UPLOAD, },
 };
 
+#define INITTABSPTS(prefix) \
+       static const struct b2056_inittabs_pts prefix = {       \
+               .syn            = prefix##_syn,                 \
+               .syn_length     = ARRAY_SIZE(prefix##_syn),     \
+               .tx             = prefix##_tx,                  \
+               .tx_length      = ARRAY_SIZE(prefix##_tx),      \
+               .rx             = prefix##_rx,                  \
+               .rx_length      = ARRAY_SIZE(prefix##_rx),      \
+       }
+
+INITTABSPTS(b2056_inittab_phy_rev3);
+INITTABSPTS(b2056_inittab_phy_rev4);
+INITTABSPTS(b2056_inittab_radio_rev5);
+INITTABSPTS(b2056_inittab_radio_rev6);
+INITTABSPTS(b2056_inittab_radio_rev7_9);
+INITTABSPTS(b2056_inittab_radio_rev8);
+INITTABSPTS(b2056_inittab_radio_rev11);
+
 #define RADIOREGS3(r00, r01, r02, r03, r04, r05, r06, r07, r08, r09, \
                   r10, r11, r12, r13, r14, r15, r16, r17, r18, r19, \
                   r20, r21, r22, r23, r24, r25, r26, r27, r28, r29, \
@@ -3041,7 +3086,7 @@ static const struct b2056_inittabs_pts b2056_inittabs[] = {
        .phy_regs.phy_bw6       = r5
 
 /* http://bcm-v4.sipsolutions.net/802.11/Radio/2056/ChannelTable */
-static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev3[] = {
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_phy_rev3[] = {
   {    .freq                   = 4920,
        RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
                   0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -4036,7 +4081,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev3[] =
   },
 };
 
-static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev4[] = {
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_phy_rev4[] = {
   {    .freq                   = 4920,
        RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
                   0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -5031,7 +5076,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev4[] =
   },
 };
 
-static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev5[] = {
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev5[] = {
   {    .freq                   = 4920,
        RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
                   0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -6026,7 +6071,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev5[] =
   },
 };
 
-static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev6[] = {
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev6[] = {
   {    .freq                   = 4920,
        RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
                   0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -7021,7 +7066,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev6[] =
   },
 };
 
-static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev7_9[] = {
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev7_9[] = {
   {    .freq                   = 4920,
        RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
                   0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -8016,7 +8061,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev7_9[]
   },
 };
 
-static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev8[] = {
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev8[] = {
   {    .freq                   = 4920,
        RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
                   0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -9011,6 +9056,1154 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev8[] =
   },
 };
 
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev11[] = {
+       {
+               .freq                   = 4920,
+               RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+                          0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+                          0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+                          0x00, 0x0f, 0x00, 0x6f, 0x00),
+               PHYREGS(0x07b4, 0x07b0, 0x07ac, 0x0214, 0x0215, 0x0216),
+       },
+       {
+               .freq                   = 4930,
+               RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xed, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+                          0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+                          0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+                          0x00, 0x0f, 0x00, 0x6f, 0x00),
+               PHYREGS(0x07b8, 0x07b4, 0x07b0, 0x0213, 0x0214, 0x0215),
+       },
+       {
+               .freq                   = 4940,
+               RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xee, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+                          0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+                          0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+                          0x00, 0x0f, 0x00, 0x6f, 0x00),
+               PHYREGS(0x07bc, 0x07b8, 0x07b4, 0x0212, 0x0213, 0x0214),
+       },
+       {
+               .freq                   = 4950,
+               RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xef, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+                          0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+                          0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+                          0x00, 0x0f, 0x00, 0x6f, 0x00),
+               PHYREGS(0x07c0, 0x07bc, 0x07b8, 0x0211, 0x0212, 0x0213),
+       },
+       {
+               .freq                   = 4960,
+               RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf0, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+                          0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+                          0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+                          0x00, 0x0f, 0x00, 0x6f, 0x00),
+               PHYREGS(0x07c4, 0x07c0, 0x07bc, 0x020f, 0x0211, 0x0212),
+       },
+       {
+               .freq                   = 4970,
+               RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf1, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+                          0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+                          0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+                          0x00, 0x0f, 0x00, 0x6f, 0x00),
+               PHYREGS(0x07c8, 0x07c4, 0x07c0, 0x020e, 0x020f, 0x0211),
+       },
+       {
+               .freq                   = 4980,
+               RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf2, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+                          0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+                          0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+                          0x00, 0x0f, 0x00, 0x6f, 0x00),
+               PHYREGS(0x07cc, 0x07c8, 0x07c4, 0x020d, 0x020e, 0x020f),
+       },
+       {
+               .freq                   = 4990,
+               RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf3, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+                          0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+                          0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+                          0x00, 0x0f, 0x00, 0x6f, 0x00),
+               PHYREGS(0x07d0, 0x07cc, 0x07c8, 0x020c, 0x020d, 0x020e),
+       },
+       {
+               .freq                   = 5000,
+               RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf4, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+                          0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+                          0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+                          0x00, 0x0f, 0x00, 0x6f, 0x00),
+               PHYREGS(0x07d4, 0x07d0, 0x07cc, 0x020b, 0x020c, 0x020d),
+       },
+       {
+               .freq                   = 5010,
+               RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf5, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+                          0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+                          0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+                          0x00, 0x0f, 0x00, 0x6f, 0x00),
+               PHYREGS(0x07d8, 0x07d4, 0x07d0, 0x020a, 0x020b, 0x020c),
+       },
+       {
+               .freq                   = 5020,
+               RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf6, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+                          0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+                          0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+                          0x00, 0x0f, 0x00, 0x6f, 0x00),
+               PHYREGS(0x07dc, 0x07d8, 0x07d4, 0x0209, 0x020a, 0x020b),
+       },
+       {
+               .freq                   = 5030,
+               RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf7, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+                          0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+                          0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+                          0x00, 0x0f, 0x00, 0x6f, 0x00),
+               PHYREGS(0x07e0, 0x07dc, 0x07d8, 0x0208, 0x0209, 0x020a),
+       },
+       {
+               .freq                   = 5040,
+               RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf8, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+                          0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+                          0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+                          0x00, 0x0f, 0x00, 0x6f, 0x00),
+               PHYREGS(0x07e4, 0x07e0, 0x07dc, 0x0207, 0x0208, 0x0209),
+       },
+       {
+               .freq                   = 5050,
+               RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf9, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+                          0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+                          0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+                          0x00, 0x0f, 0x00, 0x6f, 0x00),
+               PHYREGS(0x07e8, 0x07e4, 0x07e0, 0x0206, 0x0207, 0x0208),
+       },
+       {
+               .freq                   = 5060,
+               RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfa, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+                          0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+                          0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+                          0x00, 0x0f, 0x00, 0x6f, 0x00),
+               PHYREGS(0x07ec, 0x07e8, 0x07e4, 0x0205, 0x0206, 0x0207),
+       },
+       {
+               .freq                   = 5070,
+               RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfb, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+                          0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+                          0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
+                          0x00, 0x0f, 0x00, 0x6f, 0x00),
+               PHYREGS(0x07f0, 0x07ec, 0x07e8, 0x0204, 0x0205, 0x0206),
+       },
+       {
+               .freq                   = 5080,
+               RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfc, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+                          0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+                          0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
+                          0x00, 0x0f, 0x00, 0x6f, 0x00),
+               PHYREGS(0x07f4, 0x07f0, 0x07ec, 0x0203, 0x0204, 0x0205),
+       },
+       {
+               .freq                   = 5090,
+               RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfd, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+                          0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+                          0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
+                          0x00, 0x0f, 0x00, 0x6f, 0x00),
+               PHYREGS(0x07f8, 0x07f4, 0x07f0, 0x0202, 0x0203, 0x0204),
+       },
+       {
+               .freq                   = 5100,
+               RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xfe, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+                          0xff, 0xfd, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
+                          0x00, 0x6f, 0x00, 0xfd, 0x00, 0x08, 0x00, 0x77,
+                          0x00, 0x0f, 0x00, 0x6f, 0x00),
+               PHYREGS(0x07fc, 0x07f8, 0x07f4, 0x0201, 0x0202, 0x0203),
+       },
+       {
+               .freq                   = 5110,
+               RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xff, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+                          0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
+                          0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
+                          0x00, 0x0f, 0x00, 0x6f, 0x00),
+               PHYREGS(0x0800, 0x07fc, 0x07f8, 0x0200, 0x0201, 0x0202),
+       },
+       {
+               .freq                   = 5120,
+               RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x00, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+                          0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
+                          0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
+                          0x00, 0x0f, 0x00, 0x6f, 0x00),
+               PHYREGS(0x0804, 0x0800, 0x07fc, 0x01ff, 0x0200, 0x0201),
+       },
+       {
+               .freq                   = 5130,
+               RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x01, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+                          0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
+                          0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
+                          0x00, 0x0f, 0x00, 0x6f, 0x00),
+               PHYREGS(0x0808, 0x0804, 0x0800, 0x01fe, 0x01ff, 0x0200),
+       },
+       {
+               .freq                   = 5140,
+               RADIOREGS3(0xc6, 0x01, 0x01, 0x02, 0x02, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+                          0xff, 0xfb, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
+                          0x00, 0x6f, 0x00, 0xfb, 0x00, 0x08, 0x00, 0x77,
+                          0x00, 0x0f, 0x00, 0x6f, 0x00),
+               PHYREGS(0x080c, 0x0808, 0x0804, 0x01fd, 0x01fe, 0x01ff),
+       },
+       {
+               .freq                   = 5160,
+               RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x04, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+                          0xff, 0xfa, 0x00, 0x07, 0x00, 0x77, 0x00, 0x0e,
+                          0x00, 0x6f, 0x00, 0xfa, 0x00, 0x07, 0x00, 0x77,
+                          0x00, 0x0e, 0x00, 0x6f, 0x00),
+               PHYREGS(0x0814, 0x0810, 0x080c, 0x01fb, 0x01fc, 0x01fd),
+       },
+       {
+               .freq                   = 5170,
+               RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x05, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+                          0xff, 0xfa, 0x00, 0x07, 0x00, 0x77, 0x00, 0x0e,
+                          0x00, 0x6f, 0x00, 0xfa, 0x00, 0x07, 0x00, 0x77,
+                          0x00, 0x0e, 0x00, 0x6f, 0x00),
+               PHYREGS(0x0818, 0x0814, 0x0810, 0x01fa, 0x01fb, 0x01fc),
+       },
+       {
+               .freq                   = 5180,
+               RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x06, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+                          0xff, 0xf9, 0x00, 0x06, 0x00, 0x77, 0x00, 0x0e,
+                          0x00, 0x6f, 0x00, 0xf9, 0x00, 0x06, 0x00, 0x77,
+                          0x00, 0x0e, 0x00, 0x6f, 0x00),
+               PHYREGS(0x081c, 0x0818, 0x0814, 0x01f9, 0x01fa, 0x01fb),
+       },
+       {
+               .freq                   = 5190,
+               RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x07, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+                          0xff, 0xf9, 0x00, 0x06, 0x00, 0x77, 0x00, 0x0d,
+                          0x00, 0x6f, 0x00, 0xf9, 0x00, 0x06, 0x00, 0x77,
+                          0x00, 0x0d, 0x00, 0x6f, 0x00),
+               PHYREGS(0x0820, 0x081c, 0x0818, 0x01f8, 0x01f9, 0x01fa),
+       },
+       {
+               .freq                   = 5200,
+               RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x08, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+                          0xff, 0xf9, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+                          0x00, 0x6f, 0x00, 0xf9, 0x00, 0x05, 0x00, 0x77,
+                          0x00, 0x0d, 0x00, 0x6f, 0x00),
+               PHYREGS(0x0824, 0x0820, 0x081c, 0x01f7, 0x01f8, 0x01f9),
+       },
+       {
+               .freq                   = 5210,
+               RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x09, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+                          0xff, 0xf9, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+                          0x00, 0x6f, 0x00, 0xf9, 0x00, 0x05, 0x00, 0x77,
+                          0x00, 0x0d, 0x00, 0x6f, 0x00),
+               PHYREGS(0x0828, 0x0824, 0x0820, 0x01f6, 0x01f7, 0x01f8),
+       },
+       {
+               .freq                   = 5220,
+               RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0a, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
+                          0xfe, 0xd8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+                          0x00, 0x6f, 0x00, 0xd8, 0x00, 0x05, 0x00, 0x77,
+                          0x00, 0x0d, 0x00, 0x6f, 0x00),
+               PHYREGS(0x082c, 0x0828, 0x0824, 0x01f5, 0x01f6, 0x01f7),
+       },
+       {
+               .freq                   = 5230,
+               RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0b, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
+                          0xee, 0xd8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+                          0x00, 0x6f, 0x00, 0xd8, 0x00, 0x05, 0x00, 0x77,
+                          0x00, 0x0d, 0x00, 0x6f, 0x00),
+               PHYREGS(0x0830, 0x082c, 0x0828, 0x01f4, 0x01f5, 0x01f6),
+       },
+       {
+               .freq                   = 5240,
+               RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0c, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
+                          0xee, 0xc8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+                          0x00, 0x6f, 0x00, 0xc8, 0x00, 0x05, 0x00, 0x77,
+                          0x00, 0x0d, 0x00, 0x6f, 0x00),
+               PHYREGS(0x0834, 0x0830, 0x082c, 0x01f3, 0x01f4, 0x01f5),
+       },
+       {
+               .freq                   = 5250,
+               RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0d, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
+                          0xed, 0xc7, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+                          0x00, 0x6f, 0x00, 0xc7, 0x00, 0x05, 0x00, 0x77,
+                          0x00, 0x0d, 0x00, 0x6f, 0x00),
+               PHYREGS(0x0838, 0x0834, 0x0830, 0x01f2, 0x01f3, 0x01f4),
+       },
+       {
+               .freq                   = 5260,
+               RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0e, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0e, 0x00,
+                          0xed, 0xc7, 0x00, 0x04, 0x00, 0x77, 0x00, 0x0d,
+                          0x00, 0x6f, 0x00, 0xc7, 0x00, 0x04, 0x00, 0x77,
+                          0x00, 0x0d, 0x00, 0x6f, 0x00),
+               PHYREGS(0x083c, 0x0838, 0x0834, 0x01f1, 0x01f2, 0x01f3),
+       },
+       {
+               .freq                   = 5270,
+               RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0f, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x03, 0x03, 0x03, 0x8e, 0x0e, 0x00,
+                          0xed, 0xc7, 0x00, 0x04, 0x00, 0x77, 0x00, 0x0c,
+                          0x00, 0x6f, 0x00, 0xc7, 0x00, 0x04, 0x00, 0x77,
+                          0x00, 0x0c, 0x00, 0x6f, 0x00),
+               PHYREGS(0x0840, 0x083c, 0x0838, 0x01f0, 0x01f1, 0x01f2),
+       },
+       {
+               .freq                   = 5280,
+               RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x10, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
+                          0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
+                          0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
+                          0x00, 0x0c, 0x00, 0x6f, 0x00),
+               PHYREGS(0x0844, 0x0840, 0x083c, 0x01f0, 0x01f0, 0x01f1),
+       },
+       {
+               .freq                   = 5290,
+               RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x11, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
+                          0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
+                          0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
+                          0x00, 0x0c, 0x00, 0x6f, 0x00),
+               PHYREGS(0x0848, 0x0844, 0x0840, 0x01ef, 0x01f0, 0x01f0),
+       },
+       {
+               .freq                   = 5300,
+               RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x12, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
+                          0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
+                          0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
+                          0x00, 0x0c, 0x00, 0x6f, 0x00),
+               PHYREGS(0x084c, 0x0848, 0x0844, 0x01ee, 0x01ef, 0x01f0),
+       },
+       {
+               .freq                   = 5310,
+               RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x13, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
+                          0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
+                          0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
+                          0x00, 0x0c, 0x00, 0x6f, 0x00),
+               PHYREGS(0x0850, 0x084c, 0x0848, 0x01ed, 0x01ee, 0x01ef),
+       },
+       {
+               .freq                   = 5320,
+               RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x14, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
+                          0xdb, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
+                          0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
+                          0x00, 0x0c, 0x00, 0x6f, 0x00),
+               PHYREGS(0x0854, 0x0850, 0x084c, 0x01ec, 0x01ed, 0x01ee),
+       },
+       {
+               .freq                   = 5330,
+               RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x15, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
+                          0xcb, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
+                          0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
+                          0x00, 0x0b, 0x00, 0x6f, 0x00),
+               PHYREGS(0x0858, 0x0854, 0x0850, 0x01eb, 0x01ec, 0x01ed),
+       },
+       {
+               .freq                   = 5340,
+               RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x16, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
+                          0xca, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
+                          0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
+                          0x00, 0x0b, 0x00, 0x6f, 0x00),
+               PHYREGS(0x085c, 0x0858, 0x0854, 0x01ea, 0x01eb, 0x01ec),
+       },
+       {
+               .freq                   = 5350,
+               RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x17, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
+                          0xca, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
+                          0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
+                          0x00, 0x0b, 0x00, 0x6f, 0x00),
+               PHYREGS(0x0860, 0x085c, 0x0858, 0x01e9, 0x01ea, 0x01eb),
+       },
+       {
+               .freq                   = 5360,
+               RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x18, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
+                          0xc9, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
+                          0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
+                          0x00, 0x0a, 0x00, 0x6f, 0x00),
+               PHYREGS(0x0864, 0x0860, 0x085c, 0x01e8, 0x01e9, 0x01ea),
+       },
+       {
+               .freq                   = 5370,
+               RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x19, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
+                          0xc9, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
+                          0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
+                          0x00, 0x0a, 0x00, 0x6f, 0x00),
+               PHYREGS(0x0868, 0x0864, 0x0860, 0x01e7, 0x01e8, 0x01e9),
+       },
+       {
+               .freq                   = 5380,
+               RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1a, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
+                          0xb8, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
+                          0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
+                          0x00, 0x0a, 0x00, 0x6f, 0x00),
+               PHYREGS(0x086c, 0x0868, 0x0864, 0x01e6, 0x01e7, 0x01e8),
+       },
+       {
+               .freq                   = 5390,
+               RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1b, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
+                          0xb8, 0x84, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
+                          0x00, 0x6f, 0x00, 0x84, 0x00, 0x03, 0x00, 0x77,
+                          0x00, 0x0a, 0x00, 0x6f, 0x00),
+               PHYREGS(0x0870, 0x086c, 0x0868, 0x01e5, 0x01e6, 0x01e7),
+       },
+       {
+               .freq                   = 5400,
+               RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1c, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
+                          0xb8, 0x84, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
+                          0x00, 0x6f, 0x00, 0x84, 0x00, 0x03, 0x00, 0x77,
+                          0x00, 0x0a, 0x00, 0x6f, 0x00),
+               PHYREGS(0x0874, 0x0870, 0x086c, 0x01e5, 0x01e5, 0x01e6),
+       },
+       {
+               .freq                   = 5410,
+               RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1d, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
+                          0xb7, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
+                          0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
+                          0x00, 0x0a, 0x00, 0x6f, 0x00),
+               PHYREGS(0x0878, 0x0874, 0x0870, 0x01e4, 0x01e5, 0x01e5),
+       },
+       {
+               .freq                   = 5420,
+               RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1e, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
+                          0xa7, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
+                          0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
+                          0x00, 0x0a, 0x00, 0x6f, 0x00),
+               PHYREGS(0x087c, 0x0878, 0x0874, 0x01e3, 0x01e4, 0x01e5),
+       },
+       {
+               .freq                   = 5430,
+               RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1f, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0b, 0x00,
+                          0xa6, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
+                          0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
+                          0x00, 0x0a, 0x00, 0x6f, 0x00),
+               PHYREGS(0x0880, 0x087c, 0x0878, 0x01e2, 0x01e3, 0x01e4),
+       },
+       {
+               .freq                   = 5440,
+               RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x20, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
+                          0xa6, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x09,
+                          0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
+                          0x00, 0x09, 0x00, 0x6f, 0x00),
+               PHYREGS(0x0884, 0x0880, 0x087c, 0x01e1, 0x01e2, 0x01e3),
+       },
+       {
+               .freq                   = 5450,
+               RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x21, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
+                          0x95, 0x84, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
+                          0x00, 0x6f, 0x00, 0x84, 0x00, 0x01, 0x00, 0x77,
+                          0x00, 0x09, 0x00, 0x6f, 0x00),
+               PHYREGS(0x0888, 0x0884, 0x0880, 0x01e0, 0x01e1, 0x01e2),
+       },
+       {
+               .freq                   = 5460,
+               RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x22, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
+                          0x95, 0x84, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
+                          0x00, 0x6f, 0x00, 0x84, 0x00, 0x01, 0x00, 0x77,
+                          0x00, 0x09, 0x00, 0x6f, 0x00),
+               PHYREGS(0x088c, 0x0888, 0x0884, 0x01df, 0x01e0, 0x01e1),
+       },
+       {
+               .freq                   = 5470,
+               RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x23, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
+                          0x94, 0x73, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
+                          0x00, 0x6f, 0x00, 0x73, 0x00, 0x01, 0x00, 0x77,
+                          0x00, 0x09, 0x00, 0x6f, 0x00),
+               PHYREGS(0x0890, 0x088c, 0x0888, 0x01de, 0x01df, 0x01e0),
+       },
+       {
+               .freq                   = 5480,
+               RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x24, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
+                          0x84, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+                          0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x09, 0x00, 0x6f, 0x00),
+               PHYREGS(0x0894, 0x0890, 0x088c, 0x01dd, 0x01de, 0x01df),
+       },
+       {
+               .freq                   = 5490,
+               RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x25, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
+                          0x83, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+                          0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x09, 0x00, 0x6f, 0x00),
+               PHYREGS(0x0898, 0x0894, 0x0890, 0x01dd, 0x01dd, 0x01de),
+       },
+       {
+               .freq                   = 5500,
+               RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x26, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
+                          0x82, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+                          0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x09, 0x00, 0x6f, 0x00),
+               PHYREGS(0x089c, 0x0898, 0x0894, 0x01dc, 0x01dd, 0x01dd),
+       },
+       {
+               .freq                   = 5510,
+               RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x27, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
+                          0x82, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+                          0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x09, 0x00, 0x6f, 0x00),
+               PHYREGS(0x08a0, 0x089c, 0x0898, 0x01db, 0x01dc, 0x01dd),
+       },
+       {
+               .freq                   = 5520,
+               RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x28, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
+                          0x72, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+                          0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x09, 0x00, 0x6f, 0x00),
+               PHYREGS(0x08a4, 0x08a0, 0x089c, 0x01da, 0x01db, 0x01dc),
+       },
+       {
+               .freq                   = 5530,
+               RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x29, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x09, 0x00,
+                          0x72, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+                          0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x09, 0x00, 0x6f, 0x00),
+               PHYREGS(0x08a8, 0x08a4, 0x08a0, 0x01d9, 0x01da, 0x01db),
+       },
+       {
+               .freq                   = 5540,
+               RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2a, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x09, 0x00,
+                          0x71, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+                          0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x09, 0x00, 0x6f, 0x00),
+               PHYREGS(0x08ac, 0x08a8, 0x08a4, 0x01d8, 0x01d9, 0x01da),
+       },
+       {
+               .freq                   = 5550,
+               RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2b, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
+                          0x61, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+                          0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x09, 0x00, 0x6f, 0x00),
+               PHYREGS(0x08b0, 0x08ac, 0x08a8, 0x01d7, 0x01d8, 0x01d9),
+       },
+       {
+               .freq                   = 5560,
+               RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2c, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
+                          0x61, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+                          0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x09, 0x00, 0x6f, 0x00),
+               PHYREGS(0x08b4, 0x08b0, 0x08ac, 0x01d7, 0x01d7, 0x01d8),
+       },
+       {
+               .freq                   = 5570,
+               RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2d, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
+                          0x61, 0x62, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+                          0x00, 0x6f, 0x00, 0x62, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x09, 0x00, 0x6f, 0x00),
+               PHYREGS(0x08b8, 0x08b4, 0x08b0, 0x01d6, 0x01d7, 0x01d7),
+       },
+       {
+               .freq                   = 5580,
+               RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2e, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x08, 0x00,
+                          0x60, 0x62, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
+                          0x00, 0x6f, 0x00, 0x62, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x08, 0x00, 0x6f, 0x00),
+               PHYREGS(0x08bc, 0x08b8, 0x08b4, 0x01d5, 0x01d6, 0x01d7),
+       },
+       {
+               .freq                   = 5590,
+               RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2f, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x08, 0x00,
+                          0x50, 0x61, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
+                          0x00, 0x6f, 0x00, 0x61, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x08, 0x00, 0x6f, 0x00),
+               PHYREGS(0x08c0, 0x08bc, 0x08b8, 0x01d4, 0x01d5, 0x01d6),
+       },
+       {
+               .freq                   = 5600,
+               RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x30, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
+                          0x50, 0x51, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
+                          0x00, 0x6f, 0x00, 0x51, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x08, 0x00, 0x6f, 0x00),
+               PHYREGS(0x08c4, 0x08c0, 0x08bc, 0x01d3, 0x01d4, 0x01d5),
+       },
+       {
+               .freq                   = 5610,
+               RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x31, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
+                          0x50, 0x51, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
+                          0x00, 0x6f, 0x00, 0x51, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x08, 0x00, 0x6f, 0x00),
+               PHYREGS(0x08c8, 0x08c4, 0x08c0, 0x01d2, 0x01d3, 0x01d4),
+       },
+       {
+               .freq                   = 5620,
+               RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x32, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
+                          0x50, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
+                          0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x07, 0x00, 0x6f, 0x00),
+               PHYREGS(0x08cc, 0x08c8, 0x08c4, 0x01d2, 0x01d2, 0x01d3),
+       },
+       {
+               .freq                   = 5630,
+               RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x33, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
+                          0x50, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
+                          0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x07, 0x00, 0x6f, 0x00),
+               PHYREGS(0x08d0, 0x08cc, 0x08c8, 0x01d1, 0x01d2, 0x01d2),
+       },
+       {
+               .freq                   = 5640,
+               RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x34, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
+                          0x40, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
+                          0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x07, 0x00, 0x6f, 0x00),
+               PHYREGS(0x08d4, 0x08d0, 0x08cc, 0x01d0, 0x01d1, 0x01d2),
+       },
+       {
+               .freq                   = 5650,
+               RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x35, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
+                          0x40, 0x40, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
+                          0x00, 0x6f, 0x00, 0x40, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x07, 0x00, 0x6f, 0x00),
+               PHYREGS(0x08d8, 0x08d4, 0x08d0, 0x01cf, 0x01d0, 0x01d1),
+       },
+       {
+               .freq                   = 5660,
+               RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x36, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
+                          0x40, 0x40, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+                          0x00, 0x6f, 0x00, 0x40, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x06, 0x00, 0x6f, 0x00),
+               PHYREGS(0x08dc, 0x08d8, 0x08d4, 0x01ce, 0x01cf, 0x01d0),
+       },
+       {
+               .freq                   = 5670,
+               RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x37, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
+                          0x40, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+                          0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x06, 0x00, 0x6f, 0x00),
+               PHYREGS(0x08e0, 0x08dc, 0x08d8, 0x01ce, 0x01ce, 0x01cf),
+       },
+       {
+               .freq                   = 5680,
+               RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x38, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+                          0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+                          0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x06, 0x00, 0x6f, 0x00),
+               PHYREGS(0x08e4, 0x08e0, 0x08dc, 0x01cd, 0x01ce, 0x01ce),
+       },
+       {
+               .freq                   = 5690,
+               RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x39, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+                          0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+                          0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x06, 0x00, 0x6f, 0x00),
+               PHYREGS(0x08e8, 0x08e4, 0x08e0, 0x01cc, 0x01cd, 0x01ce),
+       },
+       {
+               .freq                   = 5700,
+               RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3a, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+                          0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+                          0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x06, 0x00, 0x6e, 0x00),
+               PHYREGS(0x08ec, 0x08e8, 0x08e4, 0x01cb, 0x01cc, 0x01cd),
+       },
+       {
+               .freq                   = 5710,
+               RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3b, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+                          0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+                          0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x06, 0x00, 0x6e, 0x00),
+               PHYREGS(0x08f0, 0x08ec, 0x08e8, 0x01ca, 0x01cb, 0x01cc),
+       },
+       {
+               .freq                   = 5720,
+               RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3c, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+                          0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+                          0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x06, 0x00, 0x6e, 0x00),
+               PHYREGS(0x08f4, 0x08f0, 0x08ec, 0x01c9, 0x01ca, 0x01cb),
+       },
+       {
+               .freq                   = 5725,
+               RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x79, 0x05, 0x05, 0x02,
+                          0x15, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+                          0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+                          0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x06, 0x00, 0x6e, 0x00),
+               PHYREGS(0x08f6, 0x08f2, 0x08ee, 0x01c9, 0x01ca, 0x01cb),
+       },
+       {
+               .freq                   = 5730,
+               RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3d, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+                          0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+                          0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x06, 0x00, 0x6e, 0x00),
+               PHYREGS(0x08f8, 0x08f4, 0x08f0, 0x01c9, 0x01c9, 0x01ca),
+       },
+       {
+               .freq                   = 5735,
+               RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x7b, 0x05, 0x05, 0x02,
+                          0x15, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+                          0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+                          0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x06, 0x00, 0x6d, 0x00),
+               PHYREGS(0x08fa, 0x08f6, 0x08f2, 0x01c8, 0x01c9, 0x01ca),
+       },
+       {
+               .freq                   = 5740,
+               RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3e, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+                          0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+                          0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x06, 0x00, 0x6d, 0x00),
+               PHYREGS(0x08fc, 0x08f8, 0x08f4, 0x01c8, 0x01c9, 0x01c9),
+       },
+       {
+               .freq                   = 5745,
+               RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7d, 0x05, 0x05, 0x02,
+                          0x15, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+                          0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+                          0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x06, 0x00, 0x6d, 0x00),
+               PHYREGS(0x08fe, 0x08fa, 0x08f6, 0x01c8, 0x01c8, 0x01c9),
+       },
+       {
+               .freq                   = 5750,
+               RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3f, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+                          0x20, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+                          0x00, 0x6d, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x05, 0x00, 0x6d, 0x00),
+               PHYREGS(0x0900, 0x08fc, 0x08f8, 0x01c7, 0x01c8, 0x01c9),
+       },
+       {
+               .freq                   = 5755,
+               RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7f, 0x05, 0x05, 0x02,
+                          0x15, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+                          0x10, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+                          0x00, 0x6c, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x05, 0x00, 0x6c, 0x00),
+               PHYREGS(0x0902, 0x08fe, 0x08fa, 0x01c7, 0x01c8, 0x01c8),
+       },
+       {
+               .freq                   = 5760,
+               RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x40, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x05, 0x00,
+                          0x10, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+                          0x00, 0x6c, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x05, 0x00, 0x6c, 0x00),
+               PHYREGS(0x0904, 0x0900, 0x08fc, 0x01c6, 0x01c7, 0x01c8),
+       },
+       {
+               .freq                   = 5765,
+               RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x81, 0x05, 0x05, 0x02,
+                          0x15, 0x01, 0x05, 0x05, 0x05, 0x86, 0x05, 0x00,
+                          0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+                          0x00, 0x6c, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x05, 0x00, 0x6c, 0x00),
+               PHYREGS(0x0906, 0x0902, 0x08fe, 0x01c6, 0x01c7, 0x01c8),
+       },
+       {
+               .freq                   = 5770,
+               RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x41, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
+                          0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+                          0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x05, 0x00, 0x6b, 0x00),
+               PHYREGS(0x0908, 0x0904, 0x0900, 0x01c6, 0x01c6, 0x01c7),
+       },
+       {
+               .freq                   = 5775,
+               RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x83, 0x05, 0x05, 0x02,
+                          0x15, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
+                          0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+                          0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x05, 0x00, 0x6b, 0x00),
+               PHYREGS(0x090a, 0x0906, 0x0902, 0x01c5, 0x01c6, 0x01c7),
+       },
+       {
+               .freq                   = 5780,
+               RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x42, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
+                          0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+                          0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x05, 0x00, 0x6b, 0x00),
+               PHYREGS(0x090c, 0x0908, 0x0904, 0x01c5, 0x01c6, 0x01c6),
+       },
+       {
+               .freq                   = 5785,
+               RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x85, 0x05, 0x05, 0x02,
+                          0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+                          0x00, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+                          0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x05, 0x00, 0x6b, 0x00),
+               PHYREGS(0x090e, 0x090a, 0x0906, 0x01c4, 0x01c5, 0x01c6),
+       },
+       {
+               .freq                   = 5790,
+               RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x43, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+                          0x00, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+                          0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x05, 0x00, 0x6b, 0x00),
+               PHYREGS(0x0910, 0x090c, 0x0908, 0x01c4, 0x01c5, 0x01c6),
+       },
+       {
+               .freq                   = 5795,
+               RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x87, 0x05, 0x05, 0x02,
+                          0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+                          0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+                          0x00, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x05, 0x00, 0x6b, 0x00),
+               PHYREGS(0x0912, 0x090e, 0x090a, 0x01c4, 0x01c4, 0x01c5),
+       },
+       {
+               .freq                   = 5800,
+               RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x44, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+                          0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+                          0x00, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x05, 0x00, 0x6b, 0x00),
+               PHYREGS(0x0914, 0x0910, 0x090c, 0x01c3, 0x01c4, 0x01c5),
+       },
+       {
+               .freq                   = 5805,
+               RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x89, 0x05, 0x05, 0x02,
+                          0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+                          0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+                          0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x05, 0x00, 0x6a, 0x00),
+               PHYREGS(0x0916, 0x0912, 0x090e, 0x01c3, 0x01c4, 0x01c4),
+       },
+       {
+               .freq                   = 5810,
+               RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x45, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+                          0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+                          0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x05, 0x00, 0x6a, 0x00),
+               PHYREGS(0x0918, 0x0914, 0x0910, 0x01c2, 0x01c3, 0x01c4),
+       },
+       {
+               .freq                   = 5815,
+               RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8b, 0x05, 0x05, 0x02,
+                          0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+                          0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+                          0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x05, 0x00, 0x6a, 0x00),
+               PHYREGS(0x091a, 0x0916, 0x0912, 0x01c2, 0x01c3, 0x01c4),
+       },
+       {
+               .freq                   = 5820,
+               RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x46, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+                          0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+                          0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x05, 0x00, 0x6a, 0x00),
+               PHYREGS(0x091c, 0x0918, 0x0914, 0x01c2, 0x01c2, 0x01c3),
+       },
+       {
+               .freq                   = 5825,
+               RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8d, 0x05, 0x05, 0x02,
+                          0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+                          0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+                          0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x05, 0x00, 0x69, 0x00),
+               PHYREGS(0x091e, 0x091a, 0x0916, 0x01c1, 0x01c2, 0x01c3),
+       },
+       {
+               .freq                   = 5830,
+               RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x47, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+                          0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+                          0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x05, 0x00, 0x69, 0x00),
+               PHYREGS(0x0920, 0x091c, 0x0918, 0x01c1, 0x01c2, 0x01c2),
+       },
+       {
+               .freq                   = 5840,
+               RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x48, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+                          0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+                          0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x04, 0x00, 0x69, 0x00),
+               PHYREGS(0x0924, 0x0920, 0x091c, 0x01c0, 0x01c1, 0x01c2),
+       },
+       {
+               .freq                   = 5850,
+               RADIOREGS3(0xe0, 0x00, 0x01, 0x02, 0x49, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+                          0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+                          0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x04, 0x00, 0x69, 0x00),
+               PHYREGS(0x0928, 0x0924, 0x0920, 0x01bf, 0x01c0, 0x01c1),
+       },
+       {
+               .freq                   = 5860,
+               RADIOREGS3(0xde, 0x00, 0x01, 0x02, 0x4a, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+                          0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+                          0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x04, 0x00, 0x69, 0x00),
+               PHYREGS(0x092c, 0x0928, 0x0924, 0x01bf, 0x01bf, 0x01c0),
+       },
+       {
+               .freq                   = 5870,
+               RADIOREGS3(0xdb, 0x00, 0x01, 0x02, 0x4b, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+                          0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+                          0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x04, 0x00, 0x68, 0x00),
+               PHYREGS(0x0930, 0x092c, 0x0928, 0x01be, 0x01bf, 0x01bf),
+       },
+       {
+               .freq                   = 5880,
+               RADIOREGS3(0xd8, 0x00, 0x01, 0x02, 0x4c, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+                          0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+                          0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x04, 0x00, 0x68, 0x00),
+               PHYREGS(0x0934, 0x0930, 0x092c, 0x01bd, 0x01be, 0x01bf),
+       },
+       {
+               .freq                   = 5890,
+               RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4d, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+                          0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+                          0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x04, 0x00, 0x68, 0x00),
+               PHYREGS(0x0938, 0x0934, 0x0930, 0x01bc, 0x01bd, 0x01be),
+       },
+       {
+               .freq                   = 5900,
+               RADIOREGS3(0xd3, 0x00, 0x01, 0x02, 0x4e, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+                          0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+                          0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x04, 0x00, 0x68, 0x00),
+               PHYREGS(0x093c, 0x0938, 0x0934, 0x01bc, 0x01bc, 0x01bd),
+       },
+       {
+               .freq                   = 5910,
+               RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4f, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+                          0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+                          0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x04, 0x00, 0x68, 0x00),
+               PHYREGS(0x0940, 0x093c, 0x0938, 0x01bb, 0x01bc, 0x01bc),
+       },
+       {
+               .freq                   = 2412,
+               RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x6c, 0x06, 0x06, 0x04,
+                          0x2b, 0x01, 0x04, 0x04, 0x04, 0x8f, 0x30, 0x00,
+                          0x00, 0x00, 0x78, 0x00, 0x03, 0x00, 0x70, 0x00,
+                          0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
+                          0x70, 0x00, 0x0b, 0x00, 0x0a),
+               PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443),
+       },
+       {
+               .freq                   = 2417,
+               RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x71, 0x06, 0x06, 0x04,
+                          0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+                          0x00, 0x00, 0x78, 0x00, 0x03, 0x00, 0x70, 0x00,
+                          0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
+                          0x70, 0x00, 0x0b, 0x00, 0x0a),
+               PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441),
+       },
+       {
+               .freq                   = 2422,
+               RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x76, 0x06, 0x06, 0x04,
+                          0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+                          0x00, 0x00, 0x67, 0x00, 0x03, 0x00, 0x70, 0x00,
+                          0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
+                          0x70, 0x00, 0x0b, 0x00, 0x0a),
+               PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f),
+       },
+       {
+               .freq                   = 2427,
+               RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x7b, 0x06, 0x06, 0x04,
+                          0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+                          0x00, 0x00, 0x57, 0x00, 0x03, 0x00, 0x70, 0x00,
+                          0x0a, 0x00, 0x0a, 0x00, 0x78, 0x00, 0x03, 0x00,
+                          0x70, 0x00, 0x0a, 0x00, 0x0a),
+               PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d),
+       },
+       {
+               .freq                   = 2432,
+               RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x80, 0x06, 0x06, 0x04,
+                          0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+                          0x00, 0x00, 0x56, 0x00, 0x03, 0x00, 0x70, 0x00,
+                          0x0a, 0x00, 0x0a, 0x00, 0x77, 0x00, 0x03, 0x00,
+                          0x70, 0x00, 0x0a, 0x00, 0x0a),
+               PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a),
+       },
+       {
+               .freq                   = 2437,
+               RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x85, 0x06, 0x06, 0x04,
+                          0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+                          0x00, 0x00, 0x46, 0x00, 0x03, 0x00, 0x70, 0x00,
+                          0x0a, 0x00, 0x0a, 0x00, 0x76, 0x00, 0x03, 0x00,
+                          0x70, 0x00, 0x0a, 0x00, 0x0a),
+               PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438),
+       },
+       {
+               .freq                   = 2442,
+               RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8a, 0x06, 0x06, 0x04,
+                          0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+                          0x00, 0x00, 0x45, 0x00, 0x02, 0x00, 0x70, 0x00,
+                          0x0a, 0x00, 0x0a, 0x00, 0x66, 0x00, 0x02, 0x00,
+                          0x70, 0x00, 0x0a, 0x00, 0x0a),
+               PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436),
+       },
+       {
+               .freq                   = 2447,
+               RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8f, 0x06, 0x06, 0x04,
+                          0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+                          0x00, 0x00, 0x34, 0x00, 0x02, 0x00, 0x70, 0x00,
+                          0x0a, 0x00, 0x09, 0x00, 0x55, 0x00, 0x02, 0x00,
+                          0x70, 0x00, 0x0a, 0x00, 0x09),
+               PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434),
+       },
+       {
+               .freq                   = 2452,
+               RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x94, 0x06, 0x06, 0x04,
+                          0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+                          0x00, 0x00, 0x23, 0x00, 0x02, 0x00, 0x70, 0x00,
+                          0x0a, 0x00, 0x09, 0x00, 0x45, 0x00, 0x02, 0x00,
+                          0x70, 0x00, 0x0a, 0x00, 0x09),
+               PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431),
+       },
+       {
+               .freq                   = 2457,
+               RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x99, 0x06, 0x06, 0x04,
+                          0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+                          0x00, 0x00, 0x12, 0x00, 0x02, 0x00, 0x70, 0x00,
+                          0x0a, 0x00, 0x09, 0x00, 0x34, 0x00, 0x02, 0x00,
+                          0x70, 0x00, 0x0a, 0x00, 0x09),
+               PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f),
+       },
+       {
+               .freq                   = 2462,
+               RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x9e, 0x06, 0x06, 0x04,
+                          0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+                          0x00, 0x00, 0x02, 0x00, 0x02, 0x00, 0x70, 0x00,
+                          0x09, 0x00, 0x09, 0x00, 0x33, 0x00, 0x02, 0x00,
+                          0x70, 0x00, 0x09, 0x00, 0x09),
+               PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d),
+       },
+       {
+               .freq                   = 2467,
+               RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa3, 0x06, 0x06, 0x04,
+                          0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+                          0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x70, 0x00,
+                          0x09, 0x00, 0x09, 0x00, 0x22, 0x00, 0x02, 0x00,
+                          0x70, 0x00, 0x09, 0x00, 0x09),
+               PHYREGS(0x03df, 0x03db, 0x03d7, 0x0422, 0x0427, 0x042b),
+       },
+       {
+               .freq                   = 2472,
+               RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa8, 0x06, 0x06, 0x04,
+                          0x2b, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
+                          0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x70, 0x00,
+                          0x09, 0x00, 0x09, 0x00, 0x11, 0x00, 0x02, 0x00,
+                          0x70, 0x00, 0x09, 0x00, 0x09),
+               PHYREGS(0x03e1, 0x03dd, 0x03d9, 0x0420, 0x0424, 0x0429),
+       },
+       {
+               .freq                   = 2484,
+               RADIOREGS3(0xff, 0x01, 0x03, 0x09, 0xb4, 0x06, 0x06, 0x04,
+                          0x2b, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x20, 0x00,
+                          0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x70, 0x00,
+                          0x09, 0x00, 0x09, 0x00, 0x00, 0x00, 0x02, 0x00,
+                          0x70, 0x00, 0x09, 0x00, 0x09),
+               PHYREGS(0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424),
+       },
+};
+
+static const struct b2056_inittabs_pts
+*b43_nphy_get_inittabs_rev3(struct b43_wldev *dev)
+{
+       struct b43_phy *phy = &dev->phy;
+
+       switch (dev->phy.rev) {
+       case 3:
+               return &b2056_inittab_phy_rev3;
+       case 4:
+               return &b2056_inittab_phy_rev4;
+       default:
+               switch (phy->radio_rev) {
+               case 5:
+                       return &b2056_inittab_radio_rev5;
+               case 6:
+                       return &b2056_inittab_radio_rev6;
+               case 7:
+               case 9:
+                       return &b2056_inittab_radio_rev7_9;
+               case 8:
+                       return &b2056_inittab_radio_rev8;
+               case 11:
+                       return &b2056_inittab_radio_rev11;
+               }
+       }
+
+       return NULL;
+}
+
 static void b2056_upload_inittab(struct b43_wldev *dev, bool ghz5,
                                 bool ignore_uploadflag, u16 routing,
                                 const struct b2056_inittab_entry *e,
@@ -9037,11 +10230,11 @@ void b2056_upload_inittabs(struct b43_wldev *dev,
 {
        const struct b2056_inittabs_pts *pts;
 
-       if (dev->phy.rev >= ARRAY_SIZE(b2056_inittabs)) {
+       pts = b43_nphy_get_inittabs_rev3(dev);
+       if (!pts) {
                B43_WARN_ON(1);
                return;
        }
-       pts = &b2056_inittabs[dev->phy.rev];
 
        b2056_upload_inittab(dev, ghz5, ignore_uploadflag,
                                B2056_SYN, pts->syn, pts->syn_length);
@@ -9060,11 +10253,12 @@ void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5)
        const struct b2056_inittabs_pts *pts;
        const struct b2056_inittab_entry *e;
 
-       if (dev->phy.rev >= ARRAY_SIZE(b2056_inittabs)) {
+       pts = b43_nphy_get_inittabs_rev3(dev);
+       if (!pts) {
                B43_WARN_ON(1);
                return;
        }
-       pts = &b2056_inittabs[dev->phy.rev];
+
        e = &pts->syn[B2056_SYN_PLL_CP2];
 
        b43_radio_write(dev, B2056_SYN_PLL_CP2, ghz5 ? e->ghz5 : e->ghz2);
@@ -9073,38 +10267,46 @@ void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5)
 const struct b43_nphy_channeltab_entry_rev3 *
 b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq)
 {
+       struct b43_phy *phy = &dev->phy;
        const struct b43_nphy_channeltab_entry_rev3 *e;
        unsigned int length, i;
 
-       switch (dev->phy.rev) {
+       switch (phy->rev) {
        case 3:
-               e = b43_nphy_channeltab_rev3;
-               length = ARRAY_SIZE(b43_nphy_channeltab_rev3);
+               e = b43_nphy_channeltab_phy_rev3;
+               length = ARRAY_SIZE(b43_nphy_channeltab_phy_rev3);
                break;
        case 4:
-               e = b43_nphy_channeltab_rev4;
-               length = ARRAY_SIZE(b43_nphy_channeltab_rev4);
-               break;
-       case 5:
-               e = b43_nphy_channeltab_rev5;
-               length = ARRAY_SIZE(b43_nphy_channeltab_rev5);
-               break;
-       case 6:
-               e = b43_nphy_channeltab_rev6;
-               length = ARRAY_SIZE(b43_nphy_channeltab_rev6);
-               break;
-       case 7:
-       case 9:
-               e = b43_nphy_channeltab_rev7_9;
-               length = ARRAY_SIZE(b43_nphy_channeltab_rev7_9);
-               break;
-       case 8:
-               e = b43_nphy_channeltab_rev8;
-               length = ARRAY_SIZE(b43_nphy_channeltab_rev8);
+               e = b43_nphy_channeltab_phy_rev4;
+               length = ARRAY_SIZE(b43_nphy_channeltab_phy_rev4);
                break;
        default:
-               B43_WARN_ON(1);
-               return NULL;
+               switch (phy->radio_rev) {
+               case 5:
+                       e = b43_nphy_channeltab_radio_rev5;
+                       length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev5);
+                       break;
+               case 6:
+                       e = b43_nphy_channeltab_radio_rev6;
+                       length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev6);
+                       break;
+               case 7:
+               case 9:
+                       e = b43_nphy_channeltab_radio_rev7_9;
+                       length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev7_9);
+                       break;
+               case 8:
+                       e = b43_nphy_channeltab_radio_rev8;
+                       length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev8);
+                       break;
+               case 11:
+                       e = b43_nphy_channeltab_radio_rev11;
+                       length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev11);
+                       break;
+               default:
+                       B43_WARN_ON(1);
+                       return NULL;
+               }
        }
 
        for (i = 0; i < length; i++, e++) {
index 94c755fdda14749eaa36716032283b8cb9fea7d2..50d03ffeac8c57337b090f467899648d5be2f607 100644 (file)
@@ -1627,74 +1627,7 @@ static const u32 b43_ntab_tdtrn_r3[] = {
        0xfa58fc00, 0x0b64fc7e, 0x0800f7b6, 0x00f006be,
 };
 
-static const u32 b43_ntab_noisevar0_r3[] = {
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-};
-
-static const u32 b43_ntab_noisevar1_r3[] = {
+static const u32 b43_ntab_noisevar_r3[] = {
        0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
        0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
        0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
@@ -3114,8 +3047,7 @@ static void b43_nphy_tables_init_rev3(struct b43_wldev *dev)
        ntab_upload(dev, B43_NTAB_TMAP_R3, b43_ntab_tmap_r3);
        ntab_upload(dev, B43_NTAB_INTLEVEL_R3, b43_ntab_intlevel_r3);
        ntab_upload(dev, B43_NTAB_TDTRN_R3, b43_ntab_tdtrn_r3);
-       ntab_upload(dev, B43_NTAB_NOISEVAR0_R3, b43_ntab_noisevar0_r3);
-       ntab_upload(dev, B43_NTAB_NOISEVAR1_R3, b43_ntab_noisevar1_r3);
+       ntab_upload(dev, B43_NTAB_NOISEVAR_R3, b43_ntab_noisevar_r3);
        ntab_upload(dev, B43_NTAB_MCS_R3, b43_ntab_mcs_r3);
        ntab_upload(dev, B43_NTAB_TDI20A0_R3, b43_ntab_tdi20a0_r3);
        ntab_upload(dev, B43_NTAB_TDI20A1_R3, b43_ntab_tdi20a1_r3);
index 9ff33adcff891cad9551bafab507924a59a54afb..3a58aee4c4cf714aa72bc22a8c85670b135eb934 100644 (file)
@@ -143,8 +143,7 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
 #define B43_NTAB_TMAP_R3               B43_NTAB32(12,   0) /* TM AP  */
 #define B43_NTAB_INTLEVEL_R3           B43_NTAB32(13,   0) /* INT LV  */
 #define B43_NTAB_TDTRN_R3              B43_NTAB32(14,   0) /* TD TRN  */
-#define B43_NTAB_NOISEVAR0_R3          B43_NTAB32(16,   0) /* noise variance 0  */
-#define B43_NTAB_NOISEVAR1_R3          B43_NTAB32(16, 128) /* noise variance 1  */
+#define B43_NTAB_NOISEVAR_R3           B43_NTAB32(16,   0) /* noise variance */
 #define B43_NTAB_MCS_R3                        B43_NTAB16(18,   0) /* MCS  */
 #define B43_NTAB_TDI20A0_R3            B43_NTAB32(19, 128) /* TDI 20/0  */
 #define B43_NTAB_TDI20A1_R3            B43_NTAB32(19, 256) /* TDI 20/1  */
index 9b1a038be08b860da91461b82c6bd84dd5000531..c218c08fb2f5b15ad3233b475b5d275d2f4ba0df 100644 (file)
@@ -441,7 +441,7 @@ static void b43_wa_altagc(struct b43_wldev *dev)
 
 static void b43_wa_tr_ltov(struct b43_wldev *dev) /* TR Lookup Table Original Values */
 {
-       b43_gtab_write(dev, B43_GTAB_ORIGTR, 0, 0xC480);
+       b43_gtab_write(dev, B43_GTAB_ORIGTR, 0, 0x7654);
 }
 
 static void b43_wa_cpll_nonpilot(struct b43_wldev *dev)
index 31adb8cf0291fb0bbebf1a6896c8f4ff4d7288b9..4f38f19b8e3d373847778766ec8ad820e5d1d2bd 100644 (file)
@@ -408,7 +408,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
                mac_ctl |= B43_TXH_MAC_HWSEQ;
        if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
                mac_ctl |= B43_TXH_MAC_STMSDU;
-       if (phy->type == B43_PHYTYPE_A)
+       if (!phy->gmode)
                mac_ctl |= B43_TXH_MAC_5GHZ;
 
        /* Overwrite rates[0].count to make the retry calculation
index df130ef53d1c4054f59f2fe10d197a80673000c9..c7c9f15c0fe08170ee2f953e9690a13e605a3291 100644 (file)
@@ -303,10 +303,10 @@ static void brcmf_chip_ai_coredisable(struct brcmf_core_priv *core,
 
        ci = core->chip;
 
-       /* if core is already in reset, just return */
+       /* if core is already in reset, skip reset */
        regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL);
        if ((regdata & BCMA_RESET_CTL_RESET) != 0)
-               return;
+               goto in_reset_configure;
 
        /* configure reset */
        ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
@@ -322,6 +322,7 @@ static void brcmf_chip_ai_coredisable(struct brcmf_core_priv *core,
        SPINWAIT(ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) !=
                 BCMA_RESET_CTL_RESET, 300);
 
+in_reset_configure:
        /* in-reset configure */
        ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
                         reset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
index 939d6b13292248563f92bc4287632341c4997072..16f9ab2568a8089c1c38eff8f8998e8fc29ee330 100644 (file)
@@ -186,7 +186,7 @@ void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx);
 void brcmf_txflowblock_if(struct brcmf_if *ifp,
                          enum brcmf_netif_stop_reason reason, bool state);
 u32 brcmf_get_chip_info(struct brcmf_if *ifp);
-void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
+void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, u8 ifidx,
                      bool success);
 
 /* Sets dongle media info (drv_version, mac address). */
index c4535616064e8389125b238a50fcc74d3c6d47da..c5dcd82e884bf74a8bebb862519219870cc0f38d 100644 (file)
@@ -99,6 +99,7 @@ struct brcmf_bus {
        unsigned long tx_realloc;
        u32 chip;
        u32 chiprev;
+       bool always_use_fws_queue;
 
        struct brcmf_bus_ops *ops;
 };
index 6a8983a1fb9c3451908c1cf28426d78e6600d091..ed3e32ce8c23ee8fd032ad35520c7fee1e3cb18a 100644 (file)
@@ -32,6 +32,9 @@
 #define BRCMF_DEFAULT_SCAN_UNASSOC_TIME        40
 #define BRCMF_DEFAULT_PACKET_FILTER    "100 0 0 0 0x01 0x00"
 
+/* boost value for RSSI_DELTA in preferred join selection */
+#define BRCMF_JOIN_PREF_RSSI_BOOST     8
+
 
 bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
                      struct sk_buff *pkt, int prec)
@@ -246,6 +249,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
 {
        s8 eventmask[BRCMF_EVENTING_MASK_LEN];
        u8 buf[BRCMF_DCMD_SMLEN];
+       struct brcmf_join_pref_params join_pref_params[2];
        char *ptr;
        s32 err;
 
@@ -298,6 +302,20 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
                goto done;
        }
 
+       /* Setup join_pref to select target by RSSI(with boost on 5GHz) */
+       join_pref_params[0].type = BRCMF_JOIN_PREF_RSSI_DELTA;
+       join_pref_params[0].len = 2;
+       join_pref_params[0].rssi_gain = BRCMF_JOIN_PREF_RSSI_BOOST;
+       join_pref_params[0].band = WLC_BAND_5G;
+       join_pref_params[1].type = BRCMF_JOIN_PREF_RSSI;
+       join_pref_params[1].len = 2;
+       join_pref_params[1].rssi_gain = 0;
+       join_pref_params[1].band = 0;
+       err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params,
+                                      sizeof(join_pref_params));
+       if (err)
+               brcmf_err("Set join_pref error (%d)\n", err);
+
        /* Setup event_msgs, enable E_IF */
        err = brcmf_fil_iovar_data_get(ifp, "event_msgs", eventmask,
                                       BRCMF_EVENTING_MASK_LEN);
index 7d28cd3850925a7af0a4b9f34c2888afa83e0d03..4cacc3d85212dcc58c70dc546c910c11a48dc3da 100644 (file)
@@ -190,7 +190,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
        int ret;
        struct brcmf_if *ifp = netdev_priv(ndev);
        struct brcmf_pub *drvr = ifp->drvr;
-       struct ethhdr *eh;
+       struct ethhdr *eh = (struct ethhdr *)(skb->data);
 
        brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx);
 
@@ -236,6 +236,9 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
                goto done;
        }
 
+       if (eh->h_proto == htons(ETH_P_PAE))
+               atomic_inc(&ifp->pend_8021x_cnt);
+
        ret = brcmf_fws_process_skb(ifp, skb);
 
 done:
@@ -538,31 +541,26 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
                brcmf_netif_rx(ifp, skb);
 }
 
-void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
+void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, u8 ifidx,
                      bool success)
 {
        struct brcmf_if *ifp;
        struct ethhdr *eh;
-       u8 ifidx;
        u16 type;
-       int res;
-
-       res = brcmf_proto_hdrpull(drvr, false, &ifidx, txp);
 
        ifp = drvr->iflist[ifidx];
        if (!ifp)
                goto done;
 
-       if (res == 0) {
-               eh = (struct ethhdr *)(txp->data);
-               type = ntohs(eh->h_proto);
+       eh = (struct ethhdr *)(txp->data);
+       type = ntohs(eh->h_proto);
 
-               if (type == ETH_P_PAE) {
-                       atomic_dec(&ifp->pend_8021x_cnt);
-                       if (waitqueue_active(&ifp->pend_8021x_wait))
-                               wake_up(&ifp->pend_8021x_wait);
-               }
+       if (type == ETH_P_PAE) {
+               atomic_dec(&ifp->pend_8021x_cnt);
+               if (waitqueue_active(&ifp->pend_8021x_wait))
+                       wake_up(&ifp->pend_8021x_wait);
        }
+
        if (!success)
                ifp->stats.tx_errors++;
 done:
@@ -573,13 +571,17 @@ void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
 {
        struct brcmf_bus *bus_if = dev_get_drvdata(dev);
        struct brcmf_pub *drvr = bus_if->drvr;
+       u8 ifidx;
 
        /* await txstatus signal for firmware if active */
        if (brcmf_fws_fc_active(drvr->fws)) {
                if (!success)
                        brcmf_fws_bustxfail(drvr->fws, txp);
        } else {
-               brcmf_txfinalize(drvr, txp, success);
+               if (brcmf_proto_hdrpull(drvr, false, &ifidx, txp))
+                       brcmu_pkt_buf_free_skb(txp);
+               else
+                       brcmf_txfinalize(drvr, txp, ifidx, success);
        }
 }
 
index 614e4888504fae4f517389bfb49c4d08a5519924..2bc68a2137fccb535f5a34fb34e6394db85ec645 100644 (file)
 #define BRCMF_OBSS_COEX_OFF            0
 #define BRCMF_OBSS_COEX_ON             1
 
+/* join preference types for join_pref iovar */
+enum brcmf_join_pref_types {
+       BRCMF_JOIN_PREF_RSSI = 1,
+       BRCMF_JOIN_PREF_WPA,
+       BRCMF_JOIN_PREF_BAND,
+       BRCMF_JOIN_PREF_RSSI_DELTA,
+};
+
 enum brcmf_fil_p2p_if_types {
        BRCMF_FIL_P2P_IF_CLIENT,
        BRCMF_FIL_P2P_IF_GO,
@@ -282,6 +290,22 @@ struct brcmf_assoc_params_le {
        __le16 chanspec_list[1];
 };
 
+/**
+ * struct join_pref params - parameters for preferred join selection.
+ *
+ * @type: preference type (see enum brcmf_join_pref_types).
+ * @len: length of bytes following (currently always 2).
+ * @rssi_gain: signal gain for selection (only when @type is RSSI_DELTA).
+ * @band: band to which selection preference applies.
+ *     This is used if @type is BAND or RSSI_DELTA.
+ */
+struct brcmf_join_pref_params {
+       u8 type;
+       u8 len;
+       u8 rssi_gain;
+       u8 band;
+};
+
 /* used for join with or without a specific bssid and channel list */
 struct brcmf_join_params {
        struct brcmf_ssid_le ssid_le;
index c3e7d76dbf35f508e33a1e1d88164ede54b1967a..699908de314a94ff3f382f62536756b3f4a5eed2 100644 (file)
@@ -476,6 +476,7 @@ struct brcmf_fws_info {
        bool bus_flow_blocked;
        bool creditmap_received;
        u8 mode;
+       bool avoid_queueing;
 };
 
 /*
@@ -1369,13 +1370,12 @@ done:
 }
 
 static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
-                                        struct sk_buff *skb, u32 genbit,
-                                        u16 seq)
+                                        struct sk_buff *skb, u8 ifidx,
+                                        u32 genbit, u16 seq)
 {
        struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
        u32 hslot;
        int ret;
-       u8 ifidx;
 
        hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
 
@@ -1389,29 +1389,21 @@ static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
 
        entry->generation = genbit;
 
-       ret = brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb);
-       if (ret == 0) {
-               brcmf_skb_htod_tag_set_field(skb, GENERATION, genbit);
-               brcmf_skbcb(skb)->htod_seq = seq;
-               if (brcmf_skb_htod_seq_get_field(skb, FROMFW)) {
-                       brcmf_skb_htod_seq_set_field(skb, FROMDRV, 1);
-                       brcmf_skb_htod_seq_set_field(skb, FROMFW, 0);
-               } else {
-                       brcmf_skb_htod_seq_set_field(skb, FROMDRV, 0);
-               }
-               ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo,
-                                   skb);
+       brcmf_skb_htod_tag_set_field(skb, GENERATION, genbit);
+       brcmf_skbcb(skb)->htod_seq = seq;
+       if (brcmf_skb_htod_seq_get_field(skb, FROMFW)) {
+               brcmf_skb_htod_seq_set_field(skb, FROMDRV, 1);
+               brcmf_skb_htod_seq_set_field(skb, FROMFW, 0);
+       } else {
+               brcmf_skb_htod_seq_set_field(skb, FROMDRV, 0);
        }
+       ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo, skb);
 
        if (ret != 0) {
-               /* suppress q is full or hdrpull failed, drop this packet */
-               brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
-                                       true);
+               /* suppress q is full drop this packet */
+               brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb, true);
        } else {
-               /*
-                * Mark suppressed to avoid a double free during
-                * wlfc cleanup
-                */
+               /* Mark suppressed to avoid a double free during wlfc cleanup */
                brcmf_fws_hanger_mark_suppressed(&fws->hanger, hslot);
        }
 
@@ -1428,6 +1420,7 @@ brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
        struct sk_buff *skb;
        struct brcmf_skbuff_cb *skcb;
        struct brcmf_fws_mac_descriptor *entry = NULL;
+       u8 ifidx;
 
        brcmf_dbg(DATA, "flags %d\n", flags);
 
@@ -1476,12 +1469,15 @@ brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
        }
        brcmf_fws_macdesc_return_req_credit(skb);
 
+       if (brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb)) {
+               brcmu_pkt_buf_free_skb(skb);
+               return -EINVAL;
+       }
        if (!remove_from_hanger)
-               ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, genbit,
-                                                   seq);
-
+               ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, ifidx,
+                                                   genbit, seq);
        if (remove_from_hanger || ret)
-               brcmf_txfinalize(fws->drvr, skb, true);
+               brcmf_txfinalize(fws->drvr, skb, ifidx, true);
 
        return 0;
 }
@@ -1868,7 +1864,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
        struct ethhdr *eh = (struct ethhdr *)(skb->data);
        int fifo = BRCMF_FWS_FIFO_BCMC;
        bool multicast = is_multicast_ether_addr(eh->h_dest);
-       bool pae = eh->h_proto == htons(ETH_P_PAE);
+       int rc = 0;
 
        brcmf_dbg(DATA, "tx proto=0x%X\n", ntohs(eh->h_proto));
        /* determine the priority */
@@ -1876,8 +1872,13 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
                skb->priority = cfg80211_classify8021d(skb, NULL);
 
        drvr->tx_multicast += !!multicast;
-       if (pae)
-               atomic_inc(&ifp->pend_8021x_cnt);
+
+       if (fws->avoid_queueing) {
+               rc = brcmf_proto_txdata(drvr, ifp->ifidx, 0, skb);
+               if (rc < 0)
+                       brcmf_txfinalize(drvr, skb, ifp->ifidx, false);
+               return rc;
+       }
 
        /* set control buffer information */
        skcb->if_flags = 0;
@@ -1899,15 +1900,12 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
                brcmf_fws_schedule_deq(fws);
        } else {
                brcmf_err("drop skb: no hanger slot\n");
-               if (pae) {
-                       atomic_dec(&ifp->pend_8021x_cnt);
-                       if (waitqueue_active(&ifp->pend_8021x_wait))
-                               wake_up(&ifp->pend_8021x_wait);
-               }
-               brcmu_pkt_buf_free_skb(skb);
+               brcmf_txfinalize(drvr, skb, ifp->ifidx, false);
+               rc = -ENOMEM;
        }
        brcmf_fws_unlock(fws);
-       return 0;
+
+       return rc;
 }
 
 void brcmf_fws_reset_interface(struct brcmf_if *ifp)
@@ -1982,7 +1980,8 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker)
                                ret = brcmf_proto_txdata(drvr, ifidx, 0, skb);
                                brcmf_fws_lock(fws);
                                if (ret < 0)
-                                       brcmf_txfinalize(drvr, skb, false);
+                                       brcmf_txfinalize(drvr, skb, ifidx,
+                                                        false);
                                if (fws->bus_flow_blocked)
                                        break;
                        }
@@ -2039,6 +2038,13 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
        fws->drvr = drvr;
        fws->fcmode = fcmode;
 
+       if ((drvr->bus_if->always_use_fws_queue == false) &&
+           (fcmode == BRCMF_FWS_FCMODE_NONE)) {
+               fws->avoid_queueing = true;
+               brcmf_dbg(INFO, "FWS queueing will be avoided\n");
+               return 0;
+       }
+
        fws->fws_wq = create_singlethread_workqueue("brcmf_fws_wq");
        if (fws->fws_wq == NULL) {
                brcmf_err("workqueue creation failed\n");
index d5ef86db631b96bb7f6b8b2b5ad6484c4486c2c8..5c450d11dbc917fb3bf0ed806d0067647d15a8e6 100644 (file)
 #include <linux/slab.h>
 #include <linux/firmware.h>
 
+#include "dhd_dbg.h"
 #include "nvram.h"
 
-/* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a file
+enum nvram_parser_state {
+       IDLE,
+       KEY,
+       VALUE,
+       COMMENT,
+       END
+};
+
+/**
+ * struct nvram_parser - internal info for parser.
+ *
+ * @state: current parser state.
+ * @fwnv: input buffer being parsed.
+ * @nvram: output buffer with parse result.
+ * @nvram_len: lenght of parse result.
+ * @line: current line.
+ * @column: current column in line.
+ * @pos: byte offset in input buffer.
+ * @entry: start position of key,value entry.
+ */
+struct nvram_parser {
+       enum nvram_parser_state state;
+       const struct firmware *fwnv;
+       u8 *nvram;
+       u32 nvram_len;
+       u32 line;
+       u32 column;
+       u32 pos;
+       u32 entry;
+};
+
+static bool is_nvram_char(char c)
+{
+       /* comment marker excluded */
+       if (c == '#')
+               return false;
+
+       /* key and value may have any other readable character */
+       return (c > 0x20 && c < 0x7f);
+}
+
+static bool is_whitespace(char c)
+{
+       return (c == ' ' || c == '\r' || c == '\n' || c == '\t');
+}
+
+static enum nvram_parser_state brcmf_nvram_handle_idle(struct nvram_parser *nvp)
+{
+       char c;
+
+       c = nvp->fwnv->data[nvp->pos];
+       if (c == '\n')
+               return COMMENT;
+       if (is_whitespace(c))
+               goto proceed;
+       if (c == '#')
+               return COMMENT;
+       if (is_nvram_char(c)) {
+               nvp->entry = nvp->pos;
+               return KEY;
+       }
+       brcmf_dbg(INFO, "warning: ln=%d:col=%d: ignoring invalid character\n",
+                 nvp->line, nvp->column);
+proceed:
+       nvp->column++;
+       nvp->pos++;
+       return IDLE;
+}
+
+static enum nvram_parser_state brcmf_nvram_handle_key(struct nvram_parser *nvp)
+{
+       enum nvram_parser_state st = nvp->state;
+       char c;
+
+       c = nvp->fwnv->data[nvp->pos];
+       if (c == '=') {
+               st = VALUE;
+       } else if (!is_nvram_char(c)) {
+               brcmf_dbg(INFO, "warning: ln=%d:col=%d: '=' expected, skip invalid key entry\n",
+                         nvp->line, nvp->column);
+               return COMMENT;
+       }
+
+       nvp->column++;
+       nvp->pos++;
+       return st;
+}
+
+static enum nvram_parser_state
+brcmf_nvram_handle_value(struct nvram_parser *nvp)
+{
+       char c;
+       char *skv;
+       char *ekv;
+       u32 cplen;
+
+       c = nvp->fwnv->data[nvp->pos];
+       if (!is_nvram_char(c)) {
+               /* key,value pair complete */
+               ekv = (u8 *)&nvp->fwnv->data[nvp->pos];
+               skv = (u8 *)&nvp->fwnv->data[nvp->entry];
+               cplen = ekv - skv;
+               /* copy to output buffer */
+               memcpy(&nvp->nvram[nvp->nvram_len], skv, cplen);
+               nvp->nvram_len += cplen;
+               nvp->nvram[nvp->nvram_len] = '\0';
+               nvp->nvram_len++;
+               return IDLE;
+       }
+       nvp->pos++;
+       nvp->column++;
+       return VALUE;
+}
+
+static enum nvram_parser_state
+brcmf_nvram_handle_comment(struct nvram_parser *nvp)
+{
+       char *eol, *sol;
+
+       sol = (char *)&nvp->fwnv->data[nvp->pos];
+       eol = strchr(sol, '\n');
+       if (eol == NULL)
+               return END;
+
+       /* eat all moving to next line */
+       nvp->line++;
+       nvp->column = 1;
+       nvp->pos += (eol - sol) + 1;
+       return IDLE;
+}
+
+static enum nvram_parser_state brcmf_nvram_handle_end(struct nvram_parser *nvp)
+{
+       /* final state */
+       return END;
+}
+
+static enum nvram_parser_state
+(*nv_parser_states[])(struct nvram_parser *nvp) = {
+       brcmf_nvram_handle_idle,
+       brcmf_nvram_handle_key,
+       brcmf_nvram_handle_value,
+       brcmf_nvram_handle_comment,
+       brcmf_nvram_handle_end
+};
+
+static int brcmf_init_nvram_parser(struct nvram_parser *nvp,
+                                  const struct firmware *nv)
+{
+       memset(nvp, 0, sizeof(*nvp));
+       nvp->fwnv = nv;
+       /* Alloc for extra 0 byte + roundup by 4 + length field */
+       nvp->nvram = kzalloc(nv->size + 1 + 3 + sizeof(u32), GFP_KERNEL);
+       if (!nvp->nvram)
+               return -ENOMEM;
+
+       nvp->line = 1;
+       nvp->column = 1;
+       return 0;
+}
+
+/* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a fil
  * and ending in a NUL. Removes carriage returns, empty lines, comment lines,
  * and converts newlines to NULs. Shortens buffer as needed and pads with NULs.
  * End of buffer is completed with token identifying length of buffer.
  */
 void *brcmf_nvram_strip(const struct firmware *nv, u32 *new_length)
 {
-       u8 *nvram;
-       u32 i;
-       u32 len;
-       u32 column;
-       u8 val;
-       bool comment;
+       struct nvram_parser nvp;
+       u32 pad;
        u32 token;
        __le32 token_le;
 
-       /* Alloc for extra 0 byte + roundup by 4 + length field */
-       nvram = kmalloc(nv->size + 1 + 3 + sizeof(token_le), GFP_KERNEL);
-       if (!nvram)
+       if (brcmf_init_nvram_parser(&nvp, nv) < 0)
                return NULL;
 
-       len = 0;
-       column = 0;
-       comment = false;
-       for (i = 0; i < nv->size; i++) {
-               val = nv->data[i];
-               if (val == 0)
+       while (nvp.pos < nv->size) {
+               nvp.state = nv_parser_states[nvp.state](&nvp);
+               if (nvp.state == END)
                        break;
-               if (val == '\r')
-                       continue;
-               if (comment && (val != '\n'))
-                       continue;
-               comment = false;
-               if (val == '#') {
-                       comment = true;
-                       continue;
-               }
-               if (val == '\n') {
-                       if (column == 0)
-                               continue;
-                       nvram[len] = 0;
-                       len++;
-                       column = 0;
-                       continue;
-               }
-               nvram[len] = val;
-               len++;
-               column++;
        }
-       column = len;
-       *new_length = roundup(len + 1, 4);
-       while (column != *new_length) {
-               nvram[column] = 0;
-               column++;
+       pad = nvp.nvram_len;
+       *new_length = roundup(nvp.nvram_len + 1, 4);
+       while (pad != *new_length) {
+               nvp.nvram[pad] = 0;
+               pad++;
        }
 
        token = *new_length / 4;
        token = (~token << 16) | (token & 0x0000FFFF);
        token_le = cpu_to_le32(token);
 
-       memcpy(&nvram[*new_length], &token_le, sizeof(token_le));
+       memcpy(&nvp.nvram[*new_length], &token_le, sizeof(token_le));
        *new_length += sizeof(token_le);
 
-       return nvram;
+       return nvp.nvram;
 }
 
 void brcmf_nvram_free(void *nvram)
@@ -91,4 +224,3 @@ void brcmf_nvram_free(void *nvram)
        kfree(nvram);
 }
 
-
index 24f65cd538595a84e6c66b62b8698bbc97456288..3ce0e7cfd0271513a13fa8897ecb27e42fcbb8d7 100644 (file)
@@ -1254,6 +1254,7 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
        bus->chip = bus_pub->devid;
        bus->chiprev = bus_pub->chiprev;
        bus->proto_type = BRCMF_PROTO_BCDC;
+       bus->always_use_fws_queue = true;
 
        /* Attach to the common driver interface */
        ret = brcmf_attach(dev);
index afb3d15e38ff0379a99c5e2c534be23c57b94e38..92cb29a2003fe1c7c27d59c99ccc44c6b4eae91d 100644 (file)
@@ -221,9 +221,9 @@ static const struct ieee80211_regdomain brcmf_regdom = {
                 */
                REG_RULE(2484-10, 2484+10, 20, 6, 20, 0),
                /* IEEE 802.11a, channel 36..64 */
-               REG_RULE(5150-10, 5350+10, 40, 6, 20, 0),
+               REG_RULE(5150-10, 5350+10, 80, 6, 20, 0),
                /* IEEE 802.11a, channel 100..165 */
-               REG_RULE(5470-10, 5850+10, 40, 6, 20, 0), }
+               REG_RULE(5470-10, 5850+10, 80, 6, 20, 0), }
 };
 
 static const u32 __wl_cipher_suites[] = {
@@ -341,6 +341,60 @@ static u8 brcmf_mw_to_qdbm(u16 mw)
        return qdbm;
 }
 
+u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
+                       struct cfg80211_chan_def *ch)
+{
+       struct brcmu_chan ch_inf;
+       s32 primary_offset;
+
+       brcmf_dbg(TRACE, "chandef: control %d center %d width %d\n",
+                 ch->chan->center_freq, ch->center_freq1, ch->width);
+       ch_inf.chnum = ieee80211_frequency_to_channel(ch->center_freq1);
+       primary_offset = ch->center_freq1 - ch->chan->center_freq;
+       switch (ch->width) {
+       case NL80211_CHAN_WIDTH_20:
+               ch_inf.bw = BRCMU_CHAN_BW_20;
+               WARN_ON(primary_offset != 0);
+               break;
+       case NL80211_CHAN_WIDTH_40:
+               ch_inf.bw = BRCMU_CHAN_BW_40;
+               if (primary_offset < 0)
+                       ch_inf.sb = BRCMU_CHAN_SB_U;
+               else
+                       ch_inf.sb = BRCMU_CHAN_SB_L;
+               break;
+       case NL80211_CHAN_WIDTH_80:
+               ch_inf.bw = BRCMU_CHAN_BW_80;
+               if (primary_offset < 0) {
+                       if (primary_offset < -CH_10MHZ_APART)
+                               ch_inf.sb = BRCMU_CHAN_SB_UU;
+                       else
+                               ch_inf.sb = BRCMU_CHAN_SB_UL;
+               } else {
+                       if (primary_offset > CH_10MHZ_APART)
+                               ch_inf.sb = BRCMU_CHAN_SB_LL;
+                       else
+                               ch_inf.sb = BRCMU_CHAN_SB_LU;
+               }
+               break;
+       default:
+               WARN_ON_ONCE(1);
+       }
+       switch (ch->chan->band) {
+       case IEEE80211_BAND_2GHZ:
+               ch_inf.band = BRCMU_CHAN_BAND_2G;
+               break;
+       case IEEE80211_BAND_5GHZ:
+               ch_inf.band = BRCMU_CHAN_BAND_5G;
+               break;
+       default:
+               WARN_ON_ONCE(1);
+       }
+       d11inf->encchspec(&ch_inf);
+
+       return ch_inf.chspec;
+}
+
 u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
                        struct ieee80211_channel *ch)
 {
@@ -1236,8 +1290,8 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
                                params->chandef.chan->center_freq);
                if (params->channel_fixed) {
                        /* adding chanspec */
-                       chanspec = channel_to_chanspec(&cfg->d11inf,
-                                                      params->chandef.chan);
+                       chanspec = chandef_to_chanspec(&cfg->d11inf,
+                                                      &params->chandef);
                        join_params.params_le.chanspec_list[0] =
                                cpu_to_le16(chanspec);
                        join_params.params_le.chanspec_num = cpu_to_le32(1);
@@ -2182,7 +2236,7 @@ brcmf_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
 
 static s32
 brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
-                          u8 *mac, struct station_info *sinfo)
+                          const u8 *mac, struct station_info *sinfo)
 {
        struct brcmf_if *ifp = netdev_priv(ndev);
        struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
@@ -3733,23 +3787,6 @@ brcmf_config_ap_mgmt_ie(struct brcmf_cfg80211_vif *vif,
        return err;
 }
 
-static s32
-brcmf_cfg80211_set_channel(struct brcmf_cfg80211_info *cfg,
-                          struct brcmf_if *ifp,
-                          struct ieee80211_channel *channel)
-{
-       u16 chanspec;
-       s32 err;
-
-       brcmf_dbg(TRACE, "band=%d, center_freq=%d\n", channel->band,
-                 channel->center_freq);
-
-       chanspec = channel_to_chanspec(&cfg->d11inf, channel);
-       err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
-
-       return err;
-}
-
 static s32
 brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
                        struct cfg80211_ap_settings *settings)
@@ -3765,11 +3802,12 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
        struct brcmf_join_params join_params;
        enum nl80211_iftype dev_role;
        struct brcmf_fil_bss_enable_le bss_enable;
+       u16 chanspec;
 
-       brcmf_dbg(TRACE, "channel_type=%d, beacon_interval=%d, dtim_period=%d,\n",
-                 cfg80211_get_chandef_type(&settings->chandef),
-                 settings->beacon_interval,
-                 settings->dtim_period);
+       brcmf_dbg(TRACE, "ctrlchn=%d, center=%d, bw=%d, beacon_interval=%d, dtim_period=%d,\n",
+                 settings->chandef.chan->hw_value,
+                 settings->chandef.center_freq1, settings->chandef.width,
+                 settings->beacon_interval, settings->dtim_period);
        brcmf_dbg(TRACE, "ssid=%s(%zu), auth_type=%d, inactivity_timeout=%d\n",
                  settings->ssid, settings->ssid_len, settings->auth_type,
                  settings->inactivity_timeout);
@@ -3826,9 +3864,10 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
 
        brcmf_config_ap_mgmt_ie(ifp->vif, &settings->beacon);
 
-       err = brcmf_cfg80211_set_channel(cfg, ifp, settings->chandef.chan);
+       chanspec = chandef_to_chanspec(&cfg->d11inf, &settings->chandef);
+       err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
        if (err < 0) {
-               brcmf_err("Set Channel failed, %d\n", err);
+               brcmf_err("Set Channel failed: chspec=%d, %d\n", chanspec, err);
                goto exit;
        }
 
@@ -3975,7 +4014,7 @@ brcmf_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *ndev,
 
 static int
 brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
-                          u8 *mac)
+                          const u8 *mac)
 {
        struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
        struct brcmf_scb_val_le scbval;
@@ -4203,7 +4242,7 @@ static int brcmf_convert_nl80211_tdls_oper(enum nl80211_tdls_operation oper)
 }
 
 static int brcmf_cfg80211_tdls_oper(struct wiphy *wiphy,
-                                   struct net_device *ndev, u8 *peer,
+                                   struct net_device *ndev, const u8 *peer,
                                    enum nl80211_tdls_operation oper)
 {
        struct brcmf_if *ifp;
@@ -4364,6 +4403,8 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
                        WIPHY_FLAG_OFFCHAN_TX |
                        WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
                        WIPHY_FLAG_SUPPORTS_TDLS;
+       if (!brcmf_roamoff)
+               wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
        wiphy->mgmt_stypes = brcmf_txrx_stypes;
        wiphy->max_remain_on_channel_duration = 5000;
        brcmf_wiphy_pno_params(wiphy);
@@ -4685,7 +4726,6 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
        struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
        struct ieee80211_channel *chan;
        s32 err = 0;
-       u16 reason;
 
        if (brcmf_is_apmode(ifp->vif)) {
                err = brcmf_notify_connect_status_ap(cfg, ndev, e, data);
@@ -4706,16 +4746,6 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
                brcmf_dbg(CONN, "Linkdown\n");
                if (!brcmf_is_ibssmode(ifp->vif)) {
                        brcmf_bss_connect_done(cfg, ndev, e, false);
-                       if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTED,
-                                              &ifp->vif->sme_state)) {
-                               reason = 0;
-                               if (((e->event_code == BRCMF_E_DEAUTH_IND) ||
-                                    (e->event_code == BRCMF_E_DISASSOC_IND)) &&
-                                   (e->reason != WLAN_REASON_UNSPECIFIED))
-                                       reason = e->reason;
-                               cfg80211_disconnected(ndev, reason, NULL, 0,
-                                                     GFP_KERNEL);
-                       }
                }
                brcmf_link_down(ifp->vif);
                brcmf_init_prof(ndev_to_prof(ndev));
@@ -4948,7 +4978,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_if *ifp)
        if (!err) {
                /* only set 2G bandwidth using bw_cap command */
                band_bwcap.band = cpu_to_le32(WLC_BAND_2G);
-               band_bwcap.bw_cap = cpu_to_le32(WLC_BW_40MHZ_BIT);
+               band_bwcap.bw_cap = cpu_to_le32(WLC_BW_CAP_40MHZ);
                err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap,
                                               sizeof(band_bwcap));
        } else {
@@ -5215,6 +5245,9 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg,
                if (!(bw_cap[band] & WLC_BW_40MHZ_BIT) &&
                    ch.bw == BRCMU_CHAN_BW_40)
                        continue;
+               if (!(bw_cap[band] & WLC_BW_80MHZ_BIT) &&
+                   ch.bw == BRCMU_CHAN_BW_80)
+                       continue;
                update = false;
                for (j = 0; (j < *n_cnt && (*n_cnt < array_size)); j++) {
                        if (band_chan_arr[j].hw_value == ch.chnum) {
@@ -5231,10 +5264,13 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg,
                                ieee80211_channel_to_frequency(ch.chnum, band);
                        band_chan_arr[index].hw_value = ch.chnum;
 
-                       if (ch.bw == BRCMU_CHAN_BW_40) {
-                               /* assuming the order is HT20, HT40 Upper,
-                                * HT40 lower from chanspecs
-                                */
+                       /* assuming the chanspecs order is HT20,
+                        * HT40 upper, HT40 lower, and VHT80.
+                        */
+                       if (ch.bw == BRCMU_CHAN_BW_80) {
+                               band_chan_arr[index].flags &=
+                                       ~IEEE80211_CHAN_NO_80MHZ;
+                       } else if (ch.bw == BRCMU_CHAN_BW_40) {
                                ht40_flag = band_chan_arr[index].flags &
                                            IEEE80211_CHAN_NO_HT40;
                                if (ch.sb == BRCMU_CHAN_SB_U) {
@@ -5255,8 +5291,13 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg,
                                                    IEEE80211_CHAN_NO_HT40MINUS;
                                }
                        } else {
+                               /* disable other bandwidths for now as mentioned
+                                * order assure they are enabled for subsequent
+                                * chanspecs.
+                                */
                                band_chan_arr[index].flags =
-                                                       IEEE80211_CHAN_NO_HT40;
+                                               IEEE80211_CHAN_NO_HT40 |
+                                               IEEE80211_CHAN_NO_80MHZ;
                                ch.bw = BRCMU_CHAN_BW_20;
                                cfg->d11inf.encchspec(&ch);
                                channel = ch.chspec;
@@ -5323,13 +5364,63 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[])
        }
 }
 
+static void brcmf_update_ht_cap(struct ieee80211_supported_band *band,
+                               u32 bw_cap[2], u32 nchain)
+{
+       band->ht_cap.ht_supported = true;
+       if (bw_cap[band->band] & WLC_BW_40MHZ_BIT) {
+               band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+               band->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+       }
+       band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
+       band->ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
+       band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+       band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
+       memset(band->ht_cap.mcs.rx_mask, 0xff, nchain);
+       band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+}
+
+static __le16 brcmf_get_mcs_map(u32 nchain, enum ieee80211_vht_mcs_support supp)
+{
+       u16 mcs_map;
+       int i;
+
+       for (i = 0, mcs_map = 0xFFFF; i < nchain; i++)
+               mcs_map = (mcs_map << 2) | supp;
+
+       return cpu_to_le16(mcs_map);
+}
+
+static void brcmf_update_vht_cap(struct ieee80211_supported_band *band,
+                                u32 bw_cap[2], u32 nchain)
+{
+       __le16 mcs_map;
+
+       /* not allowed in 2.4G band */
+       if (band->band == IEEE80211_BAND_2GHZ)
+               return;
+
+       band->vht_cap.vht_supported = true;
+       /* 80MHz is mandatory */
+       band->vht_cap.cap |= IEEE80211_VHT_CAP_SHORT_GI_80;
+       if (bw_cap[band->band] & WLC_BW_160MHZ_BIT) {
+               band->vht_cap.cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
+               band->vht_cap.cap |= IEEE80211_VHT_CAP_SHORT_GI_160;
+       }
+       /* all support 256-QAM */
+       mcs_map = brcmf_get_mcs_map(nchain, IEEE80211_VHT_MCS_SUPPORT_0_9);
+       band->vht_cap.vht_mcs.rx_mcs_map = mcs_map;
+       band->vht_cap.vht_mcs.tx_mcs_map = mcs_map;
+}
+
 static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
 {
        struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
        struct wiphy *wiphy;
        s32 phy_list;
        u32 band_list[3];
-       u32 nmode;
+       u32 nmode = 0;
+       u32 vhtmode = 0;
        u32 bw_cap[2] = { 0, 0 };
        u32 rxchain;
        u32 nchain;
@@ -5360,14 +5451,16 @@ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
        brcmf_dbg(INFO, "BRCMF_C_GET_BANDLIST reported: 0x%08x 0x%08x 0x%08x phy\n",
                  band_list[0], band_list[1], band_list[2]);
 
+       (void)brcmf_fil_iovar_int_get(ifp, "vhtmode", &vhtmode);
        err = brcmf_fil_iovar_int_get(ifp, "nmode", &nmode);
        if (err) {
                brcmf_err("nmode error (%d)\n", err);
        } else {
                brcmf_get_bwcap(ifp, bw_cap);
        }
-       brcmf_dbg(INFO, "nmode=%d, bw_cap=(%d, %d)\n", nmode,
-                 bw_cap[IEEE80211_BAND_2GHZ], bw_cap[IEEE80211_BAND_5GHZ]);
+       brcmf_dbg(INFO, "nmode=%d, vhtmode=%d, bw_cap=(%d, %d)\n",
+                 nmode, vhtmode, bw_cap[IEEE80211_BAND_2GHZ],
+                 bw_cap[IEEE80211_BAND_5GHZ]);
 
        err = brcmf_fil_iovar_int_get(ifp, "rxchain", &rxchain);
        if (err) {
@@ -5398,17 +5491,10 @@ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
                else
                        continue;
 
-               if (bw_cap[band->band] & WLC_BW_40MHZ_BIT) {
-                       band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
-                       band->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
-               }
-               band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
-               band->ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
-               band->ht_cap.ht_supported = true;
-               band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
-               band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
-               memset(band->ht_cap.mcs.rx_mask, 0xff, nchain);
-               band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+               if (nmode)
+                       brcmf_update_ht_cap(band, bw_cap, nchain);
+               if (vhtmode)
+                       brcmf_update_vht_cap(band, bw_cap, nchain);
                bands[band->band] = band;
        }
 
index 8c5fa4e581392d73d28ba29b790151be8f37f569..43c71bfaa4744fe4094069b28ff4d9084d6c679f 100644 (file)
@@ -897,7 +897,8 @@ static bool brcms_tx_flush_completed(struct brcms_info *wl)
        return result;
 }
 
-static void brcms_ops_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void brcms_ops_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                           u32 queues, bool drop)
 {
        struct brcms_info *wl = hw->priv;
        int ret;
index 9417cb5a2553f70fc2d2733f80c65d61477dc185..af8ba64ace39286e3214d69bfc23f776e66b6336 100644 (file)
@@ -4870,14 +4870,11 @@ static void brcms_c_detach_module(struct brcms_c_info *wlc)
 /*
  * low level detach
  */
-static int brcms_b_detach(struct brcms_c_info *wlc)
+static void brcms_b_detach(struct brcms_c_info *wlc)
 {
        uint i;
        struct brcms_hw_band *band;
        struct brcms_hardware *wlc_hw = wlc->hw;
-       int callbacks;
-
-       callbacks = 0;
 
        brcms_b_detach_dmapio(wlc_hw);
 
@@ -4900,9 +4897,6 @@ static int brcms_b_detach(struct brcms_c_info *wlc)
                ai_detach(wlc_hw->sih);
                wlc_hw->sih = NULL;
        }
-
-       return callbacks;
-
 }
 
 /*
@@ -4917,14 +4911,15 @@ static int brcms_b_detach(struct brcms_c_info *wlc)
  */
 uint brcms_c_detach(struct brcms_c_info *wlc)
 {
-       uint callbacks = 0;
+       uint callbacks;
 
        if (wlc == NULL)
                return 0;
 
-       callbacks += brcms_b_detach(wlc);
+       brcms_b_detach(wlc);
 
        /* delete software timers */
+       callbacks = 0;
        if (!brcms_c_radio_monitor_stop(wlc))
                callbacks++;
 
index 30e54e2c6c9b6f87ccfe618553c94cf1b2cedee4..6cbc33d0fc19b4257ee66a4725f480feda51d78e 100644 (file)
 #include <brcmu_wifi.h>
 #include <brcmu_d11.h>
 
-static void brcmu_d11n_encchspec(struct brcmu_chan *ch)
+static u16 d11n_sb(enum brcmu_chan_sb sb)
 {
-       ch->chspec = ch->chnum & BRCMU_CHSPEC_CH_MASK;
+       switch (sb) {
+       case BRCMU_CHAN_SB_NONE:
+               return BRCMU_CHSPEC_D11N_SB_N;
+       case BRCMU_CHAN_SB_L:
+               return BRCMU_CHSPEC_D11N_SB_L;
+       case BRCMU_CHAN_SB_U:
+               return BRCMU_CHSPEC_D11N_SB_U;
+       default:
+               WARN_ON(1);
+       }
+       return 0;
+}
 
-       switch (ch->bw) {
+static u16 d11n_bw(enum brcmu_chan_bw bw)
+{
+       switch (bw) {
        case BRCMU_CHAN_BW_20:
-               ch->chspec |= BRCMU_CHSPEC_D11N_BW_20 | BRCMU_CHSPEC_D11N_SB_N;
-               break;
+               return BRCMU_CHSPEC_D11N_BW_20;
        case BRCMU_CHAN_BW_40:
+               return BRCMU_CHSPEC_D11N_BW_40;
        default:
-               WARN_ON_ONCE(1);
-               break;
+               WARN_ON(1);
        }
+       return 0;
+}
 
+static void brcmu_d11n_encchspec(struct brcmu_chan *ch)
+{
+       if (ch->bw == BRCMU_CHAN_BW_20)
+               ch->sb = BRCMU_CHAN_SB_NONE;
+
+       brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_CH_MASK,
+                       BRCMU_CHSPEC_CH_SHIFT, ch->chnum);
+       brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_D11N_SB_MASK,
+                       0, d11n_sb(ch->sb));
+       brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_D11N_BW_MASK,
+                       0, d11n_bw(ch->bw));
+
+       ch->chspec &= ~BRCMU_CHSPEC_D11N_BND_MASK;
        if (ch->chnum <= CH_MAX_2G_CHANNEL)
                ch->chspec |= BRCMU_CHSPEC_D11N_BND_2G;
        else
                ch->chspec |= BRCMU_CHSPEC_D11N_BND_5G;
 }
 
-static void brcmu_d11ac_encchspec(struct brcmu_chan *ch)
+static u16 d11ac_bw(enum brcmu_chan_bw bw)
 {
-       ch->chspec = ch->chnum & BRCMU_CHSPEC_CH_MASK;
-
-       switch (ch->bw) {
+       switch (bw) {
        case BRCMU_CHAN_BW_20:
-               ch->chspec |= BRCMU_CHSPEC_D11AC_BW_20;
-               break;
+               return BRCMU_CHSPEC_D11AC_BW_20;
        case BRCMU_CHAN_BW_40:
+               return BRCMU_CHSPEC_D11AC_BW_40;
        case BRCMU_CHAN_BW_80:
-       case BRCMU_CHAN_BW_80P80:
-       case BRCMU_CHAN_BW_160:
+               return BRCMU_CHSPEC_D11AC_BW_80;
        default:
-               WARN_ON_ONCE(1);
-               break;
+               WARN_ON(1);
        }
+       return 0;
+}
 
+static void brcmu_d11ac_encchspec(struct brcmu_chan *ch)
+{
+       if (ch->bw == BRCMU_CHAN_BW_20 || ch->sb == BRCMU_CHAN_SB_NONE)
+               ch->sb = BRCMU_CHAN_SB_L;
+
+       brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_CH_MASK,
+                       BRCMU_CHSPEC_CH_SHIFT, ch->chnum);
+       brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_D11AC_SB_MASK,
+                       BRCMU_CHSPEC_D11AC_SB_SHIFT, ch->sb);
+       brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_D11AC_BW_MASK,
+                       0, d11ac_bw(ch->bw));
+
+       ch->chspec &= ~BRCMU_CHSPEC_D11AC_BND_MASK;
        if (ch->chnum <= CH_MAX_2G_CHANNEL)
                ch->chspec |= BRCMU_CHSPEC_D11AC_BND_2G;
        else
@@ -73,6 +111,7 @@ static void brcmu_d11n_decchspec(struct brcmu_chan *ch)
        switch (ch->chspec & BRCMU_CHSPEC_D11N_BW_MASK) {
        case BRCMU_CHSPEC_D11N_BW_20:
                ch->bw = BRCMU_CHAN_BW_20;
+               ch->sb = BRCMU_CHAN_SB_NONE;
                break;
        case BRCMU_CHSPEC_D11N_BW_40:
                ch->bw = BRCMU_CHAN_BW_40;
@@ -112,6 +151,7 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
        switch (ch->chspec & BRCMU_CHSPEC_D11AC_BW_MASK) {
        case BRCMU_CHSPEC_D11AC_BW_20:
                ch->bw = BRCMU_CHAN_BW_20;
+               ch->sb = BRCMU_CHAN_SB_NONE;
                break;
        case BRCMU_CHSPEC_D11AC_BW_40:
                ch->bw = BRCMU_CHAN_BW_40;
@@ -128,6 +168,25 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
                break;
        case BRCMU_CHSPEC_D11AC_BW_80:
                ch->bw = BRCMU_CHAN_BW_80;
+               ch->sb = brcmu_maskget16(ch->chspec, BRCMU_CHSPEC_D11AC_SB_MASK,
+                                        BRCMU_CHSPEC_D11AC_SB_SHIFT);
+               switch (ch->sb) {
+               case BRCMU_CHAN_SB_LL:
+                       ch->chnum -= CH_30MHZ_APART;
+                       break;
+               case BRCMU_CHAN_SB_LU:
+                       ch->chnum -= CH_10MHZ_APART;
+                       break;
+               case BRCMU_CHAN_SB_UL:
+                       ch->chnum += CH_10MHZ_APART;
+                       break;
+               case BRCMU_CHAN_SB_UU:
+                       ch->chnum += CH_30MHZ_APART;
+                       break;
+               default:
+                       WARN_ON_ONCE(1);
+                       break;
+               }
                break;
        case BRCMU_CHSPEC_D11AC_BW_8080:
        case BRCMU_CHSPEC_D11AC_BW_160:
index 8660a2cba09810428f127967c996a02cfbc174a1..f9745ea8b3e042d4cec222518d7b2fb76439eedf 100644 (file)
@@ -108,13 +108,7 @@ enum brcmu_chan_bw {
 };
 
 enum brcmu_chan_sb {
-       BRCMU_CHAN_SB_NONE = 0,
-       BRCMU_CHAN_SB_L,
-       BRCMU_CHAN_SB_U,
-       BRCMU_CHAN_SB_LL,
-       BRCMU_CHAN_SB_LU,
-       BRCMU_CHAN_SB_UL,
-       BRCMU_CHAN_SB_UU,
+       BRCMU_CHAN_SB_NONE = -1,
        BRCMU_CHAN_SB_LLL,
        BRCMU_CHAN_SB_LLU,
        BRCMU_CHAN_SB_LUL,
@@ -123,6 +117,12 @@ enum brcmu_chan_sb {
        BRCMU_CHAN_SB_ULU,
        BRCMU_CHAN_SB_UUL,
        BRCMU_CHAN_SB_UUU,
+       BRCMU_CHAN_SB_L = BRCMU_CHAN_SB_LLL,
+       BRCMU_CHAN_SB_U = BRCMU_CHAN_SB_LLU,
+       BRCMU_CHAN_SB_LL = BRCMU_CHAN_SB_LLL,
+       BRCMU_CHAN_SB_LU = BRCMU_CHAN_SB_LLU,
+       BRCMU_CHAN_SB_UL = BRCMU_CHAN_SB_LUL,
+       BRCMU_CHAN_SB_UU = BRCMU_CHAN_SB_LUU,
 };
 
 struct brcmu_chan {
index 74419d4bd123772f94e222326c61ee4e93f0f9d3..76b5d3a8629481df004c333a7bcd1990bec8e40c 100644 (file)
@@ -29,6 +29,7 @@
 #define CH_UPPER_SB                    0x01
 #define CH_LOWER_SB                    0x02
 #define CH_EWA_VALID                   0x04
+#define CH_30MHZ_APART                 6
 #define CH_20MHZ_APART                 4
 #define CH_10MHZ_APART                 2
 #define CH_5MHZ_APART                  1 /* 2G band channels are 5 Mhz apart */
index 103f7bce893208c30eb692cca9aa9adf51b8d8b9..cd0cad7f775993661af9e8f4577c89976b15b811 100644 (file)
@@ -936,7 +936,8 @@ static int __cw1200_flush(struct cw1200_common *priv, bool drop)
        return ret;
 }
 
-void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+void cw1200_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                 u32 queues, bool drop)
 {
        struct cw1200_common *priv = hw->priv;
 
index 35babb62cc6a8b5a952f0a989c15ad4edf802f17..b7e386b7662b668b8299a9ab52f3c22f2633dfe5 100644 (file)
@@ -40,7 +40,8 @@ int cw1200_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
 
 int cw1200_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
 
-void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop);
+void cw1200_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                 u32 queues, bool drop);
 
 u64 cw1200_prepare_multicast(struct ieee80211_hw *hw,
                             struct netdev_hw_addr_list *mc_list);
index 67db34e56d7eb0765c5e75025e02470cae01da64..52919ad4272622aeb92d8b9d3d74daf1715a3d40 100644 (file)
@@ -882,7 +882,7 @@ void hostap_setup_dev(struct net_device *dev, local_info_t *local,
        dev->mtu = local->mtu;
 
 
-       SET_ETHTOOL_OPS(dev, &prism2_ethtool_ops);
+       dev->ethtool_ops = &prism2_ethtool_ops;
 
 }
 
index d37a6fd90d400a6dfd90d20cdf02d73e7e536d42..b598e2803500ec7b109111f5118dde1414547a22 100644 (file)
@@ -573,7 +573,7 @@ il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
                rx_status.flag |= RX_FLAG_SHORTPRE;
 
        if ((unlikely(rx_stats->phy_count > 20))) {
-               D_DROP("dsp size out of range [0,20]: %d/n",
+               D_DROP("dsp size out of range [0,20]: %d\n",
                       rx_stats->phy_count);
                return;
        }
index 888ad5c74639e351a3727c8b934a68f7969849e5..c159c05db6ef212b8b684c5726e61a8a97c62c39 100644 (file)
@@ -670,7 +670,7 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
        }
 
        if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
-               D_DROP("dsp size out of range [0,20]: %d/n",
+               D_DROP("dsp size out of range [0,20]: %d\n",
                       phy_res->cfg_phy_cnt);
                return;
        }
index 4f42174d999412102e273744fc39ff692b9a9234..ecc674627e6e10b30a5a7b11ab3150c1ad42b37e 100644 (file)
@@ -4755,7 +4755,8 @@ out:
 }
 EXPORT_SYMBOL(il_mac_change_interface);
 
-void il_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                 u32 queues, bool drop)
 {
        struct il_priv *il = hw->priv;
        unsigned long timeout = jiffies + msecs_to_jiffies(500);
index dfb13c70efe83ea8415f93ef2dad9c97a0cb6891..ea5c0f863c4ee35b2cf6738569a5cb3253553c12 100644 (file)
@@ -1723,7 +1723,8 @@ void il_mac_remove_interface(struct ieee80211_hw *hw,
                             struct ieee80211_vif *vif);
 int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                            enum nl80211_iftype newtype, bool newp2p);
-void il_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop);
+void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                 u32 queues, bool drop);
 int il_alloc_txq_mem(struct il_priv *il);
 void il_free_txq_mem(struct il_priv *il);
 
index 74b3b4de7bb7de57ef42fabfe36866eef3b05287..7fd50428b93494416db2d4c46a3862c18c5bc2e1 100644 (file)
@@ -2,10 +2,6 @@ config IWLWIFI
        tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) "
        depends on PCI && MAC80211 && HAS_IOMEM
        select FW_LOADER
-       select NEW_LEDS
-       select LEDS_CLASS
-       select LEDS_TRIGGERS
-       select MAC80211_LEDS
        ---help---
          Select to build the driver supporting the:
 
@@ -43,6 +39,14 @@ config IWLWIFI
          say M here and read <file:Documentation/kbuild/modules.txt>.  The
          module will be called iwlwifi.
 
+config IWLWIFI_LEDS
+       bool
+       depends on IWLWIFI
+       depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI
+       select LEDS_TRIGGERS
+       select MAC80211_LEDS
+       default y
+
 config IWLDVM
        tristate "Intel Wireless WiFi DVM Firmware support"
        depends on IWLWIFI
@@ -124,7 +128,6 @@ config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
          Enable use of experimental ucode for testing and debugging.
 
 config IWLWIFI_DEVICE_TRACING
-
        bool "iwlwifi device access tracing"
        depends on IWLWIFI
        depends on EVENT_TRACING
index dce7ab2e0c4bfdda46c7ed5f7b1429c7633e7ce8..4d19685f31c3ad97fc48ad4a88ffb6b491abdf2c 100644 (file)
@@ -4,9 +4,10 @@ iwldvm-objs            += main.o rs.o mac80211.o ucode.o tx.o
 iwldvm-objs            += lib.o calib.o tt.o sta.o rx.o
 
 iwldvm-objs            += power.o
-iwldvm-objs            += scan.o led.o
+iwldvm-objs            += scan.o
 iwldvm-objs            += rxon.o devices.o
 
+iwldvm-$(CONFIG_IWLWIFI_LEDS) += led.o
 iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
 
 ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
index be1086c87157c158834d622546e8bfc2b25b7dff..20e6aa9107009ccc33246190534cbc7674c54d03 100644 (file)
@@ -94,7 +94,6 @@ int iwl_send_calib_results(struct iwl_priv *priv)
 {
        struct iwl_host_cmd hcmd = {
                .id = REPLY_PHY_CALIBRATION_CMD,
-               .flags = CMD_SYNC,
        };
        struct iwl_calib_result *res;
 
index d2fe2596d54ec4525a7661a9b381f3e5352bae61..0ffb6ff1a255f8ac609ba3799c02489ab853eea1 100644 (file)
@@ -1481,7 +1481,7 @@ static ssize_t iwl_dbgfs_ucode_bt_stats_read(struct file *file,
 
        /* make request to uCode to retrieve statistics information */
        mutex_lock(&priv->mutex);
-       ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
+       ret = iwl_send_statistics_request(priv, 0, false);
        mutex_unlock(&priv->mutex);
 
        if (ret)
@@ -1868,7 +1868,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
 
        /* make request to uCode to retrieve statistics information */
        mutex_lock(&priv->mutex);
-       iwl_send_statistics_request(priv, CMD_SYNC, true);
+       iwl_send_statistics_request(priv, 0, true);
        mutex_unlock(&priv->mutex);
 
        return count;
@@ -2188,7 +2188,6 @@ static int iwl_cmd_echo_test(struct iwl_priv *priv)
        struct iwl_host_cmd cmd = {
                .id = REPLY_ECHO,
                .len = { 0 },
-               .flags = CMD_SYNC,
        };
 
        ret = iwl_dvm_send_cmd(priv, &cmd);
@@ -2320,7 +2319,7 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
        mutex_lock(&priv->mutex);
 
        /* take the return value to make compiler happy - it will fail anyway */
-       ret = iwl_dvm_send_cmd_pdu(priv, REPLY_ERROR, CMD_SYNC, 0, NULL);
+       ret = iwl_dvm_send_cmd_pdu(priv, REPLY_ERROR, 0, 0, NULL);
 
        mutex_unlock(&priv->mutex);
 
index 3441f70d0ff911594dfd4066e782f0ca042e02d6..a6f22c32a27994000f578c2baff08a2f62042aa0 100644 (file)
@@ -888,9 +888,11 @@ struct iwl_priv {
 
        struct iwl_event_log event_log;
 
+#ifdef CONFIG_IWLWIFI_LEDS
        struct led_classdev led;
        unsigned long blink_on, blink_off;
        bool led_registered;
+#endif
 
        /* WoWLAN GTK rekey data */
        u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN];
index 758c54eeb206718f8077aa2686304ea1a1e4c9bb..34b41e5f7cfccc34af9fdf63d405222138c18b37 100644 (file)
@@ -417,7 +417,6 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
        struct iwl_host_cmd hcmd = {
                .id = REPLY_CHANNEL_SWITCH,
                .len = { sizeof(cmd), },
-               .flags = CMD_SYNC,
                .data = { &cmd, },
        };
 
@@ -579,7 +578,6 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
        struct iwl_host_cmd hcmd = {
                .id = REPLY_CHANNEL_SWITCH,
                .len = { sizeof(*cmd), },
-               .flags = CMD_SYNC,
                .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
        };
        int err;
index 6a0817d9c4fa05f15e1ceffd4272925451994f80..1c6b2252d0f24cb8ee2b94d0f62040361304b58e 100644 (file)
@@ -36,8 +36,20 @@ struct iwl_priv;
 #define IWL_LED_ACTIVITY       (0<<1)
 #define IWL_LED_LINK           (1<<1)
 
+#ifdef CONFIG_IWLWIFI_LEDS
 void iwlagn_led_enable(struct iwl_priv *priv);
 void iwl_leds_init(struct iwl_priv *priv);
 void iwl_leds_exit(struct iwl_priv *priv);
+#else
+static inline void iwlagn_led_enable(struct iwl_priv *priv)
+{
+}
+static inline void iwl_leds_init(struct iwl_priv *priv)
+{
+}
+static inline void iwl_leds_exit(struct iwl_priv *priv)
+{
+}
+#endif
 
 #endif /* __iwl_leds_h__ */
index 576f7ee38ca5894150cba0e54eff4bcc5ff2f079..2191621d69c1618375482d3e3a1dc7d8f2b7f192 100644 (file)
@@ -81,7 +81,7 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
        else
                tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
 
-       return iwl_dvm_send_cmd_pdu(priv, tx_ant_cfg_cmd, CMD_SYNC,
+       return iwl_dvm_send_cmd_pdu(priv, tx_ant_cfg_cmd, 0,
                        sizeof(tx_power_cmd), &tx_power_cmd);
 }
 
@@ -141,7 +141,6 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u32 scd_q_msk)
        struct iwl_host_cmd cmd = {
                .id = REPLY_TXFIFO_FLUSH,
                .len = { sizeof(struct iwl_txfifo_flush_cmd), },
-               .flags = CMD_SYNC,
                .data = { &flush_cmd, },
        };
 
@@ -180,7 +179,7 @@ void iwlagn_dev_txfifo_flush(struct iwl_priv *priv)
                goto done;
        }
        IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
-       iwl_trans_wait_tx_queue_empty(priv->trans);
+       iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
 done:
        ieee80211_wake_queues(priv->hw);
        mutex_unlock(&priv->mutex);
@@ -333,12 +332,12 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
                memcpy(&bt_cmd_v2.basic, &basic,
                        sizeof(basic));
                ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
-                       CMD_SYNC, sizeof(bt_cmd_v2), &bt_cmd_v2);
+                       0, sizeof(bt_cmd_v2), &bt_cmd_v2);
        } else {
                memcpy(&bt_cmd_v1.basic, &basic,
                        sizeof(basic));
                ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
-                       CMD_SYNC, sizeof(bt_cmd_v1), &bt_cmd_v1);
+                       0, sizeof(bt_cmd_v1), &bt_cmd_v1);
        }
        if (ret)
                IWL_ERR(priv, "failed to send BT Coex Config\n");
@@ -1044,7 +1043,6 @@ int iwlagn_send_patterns(struct iwl_priv *priv,
        struct iwl_host_cmd cmd = {
                .id = REPLY_WOWLAN_PATTERNS,
                .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
-               .flags = CMD_SYNC,
        };
        int i, err;
 
@@ -1201,7 +1199,6 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
                if (key_data.use_rsc_tsc) {
                        struct iwl_host_cmd rsc_tsc_cmd = {
                                .id = REPLY_WOWLAN_TSC_RSC_PARAMS,
-                               .flags = CMD_SYNC,
                                .data[0] = key_data.rsc_tsc,
                                .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
                                .len[0] = sizeof(*key_data.rsc_tsc),
@@ -1215,7 +1212,7 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
                if (key_data.use_tkip) {
                        ret = iwl_dvm_send_cmd_pdu(priv,
                                                 REPLY_WOWLAN_TKIP_PARAMS,
-                                                CMD_SYNC, sizeof(tkip_cmd),
+                                                0, sizeof(tkip_cmd),
                                                 &tkip_cmd);
                        if (ret)
                                goto out;
@@ -1231,20 +1228,20 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
 
                        ret = iwl_dvm_send_cmd_pdu(priv,
                                                 REPLY_WOWLAN_KEK_KCK_MATERIAL,
-                                                CMD_SYNC, sizeof(kek_kck_cmd),
+                                                0, sizeof(kek_kck_cmd),
                                                 &kek_kck_cmd);
                        if (ret)
                                goto out;
                }
        }
 
-       ret = iwl_dvm_send_cmd_pdu(priv, REPLY_D3_CONFIG, CMD_SYNC,
+       ret = iwl_dvm_send_cmd_pdu(priv, REPLY_D3_CONFIG, 0,
                                     sizeof(d3_cfg_cmd), &d3_cfg_cmd);
        if (ret)
                goto out;
 
        ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WOWLAN_WAKEUP_FILTER,
-                                CMD_SYNC, sizeof(wakeup_filter_cmd),
+                                0, sizeof(wakeup_filter_cmd),
                                 &wakeup_filter_cmd);
        if (ret)
                goto out;
index dd55c9cf7ba80376ef3ae507b434e79d6dd1cca4..29af7b51e3708788d02f4a1651205a348a5102dd 100644 (file)
@@ -1091,7 +1091,8 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
                        FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
 }
 
-static void iwlagn_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                            u32 queues, bool drop)
 {
        struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
 
@@ -1119,7 +1120,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
                }
        }
        IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
-       iwl_trans_wait_tx_queue_empty(priv->trans);
+       iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
 done:
        mutex_unlock(&priv->mutex);
        IWL_DEBUG_MAC80211(priv, "leave\n");
index 6a6df71af1d7ba6e4b4dfec16a2042c6cc357de4..0b7f46f0b079a442b327ecf80184fd00294c4377 100644 (file)
@@ -128,7 +128,6 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
        struct iwl_tx_beacon_cmd *tx_beacon_cmd;
        struct iwl_host_cmd cmd = {
                .id = REPLY_TX_BEACON,
-               .flags = CMD_SYNC,
        };
        struct ieee80211_tx_info *info;
        u32 frame_size;
@@ -311,8 +310,7 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
                                        sizeof(struct iwl_statistics_cmd),
                                        &statistics_cmd);
        else
-               return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
-                                       CMD_SYNC,
+               return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD, 0,
                                        sizeof(struct iwl_statistics_cmd),
                                        &statistics_cmd);
 }
@@ -622,7 +620,7 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
 
                ret = iwl_dvm_send_cmd_pdu(priv,
                                       REPLY_CT_KILL_CONFIG_CMD,
-                                      CMD_SYNC, sizeof(adv_cmd), &adv_cmd);
+                                      0, sizeof(adv_cmd), &adv_cmd);
                if (ret)
                        IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
                else
@@ -637,7 +635,7 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
 
                ret = iwl_dvm_send_cmd_pdu(priv,
                                       REPLY_CT_KILL_CONFIG_CMD,
-                                      CMD_SYNC, sizeof(cmd), &cmd);
+                                      0, sizeof(cmd), &cmd);
                if (ret)
                        IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
                else
@@ -673,9 +671,7 @@ static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
 
        if (IWL_UCODE_API(priv->fw->ucode_ver) > 1) {
                IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
-               return iwl_dvm_send_cmd_pdu(priv,
-                                       TX_ANT_CONFIGURATION_CMD,
-                                       CMD_SYNC,
+               return iwl_dvm_send_cmd_pdu(priv, TX_ANT_CONFIGURATION_CMD, 0,
                                        sizeof(struct iwl_tx_ant_config_cmd),
                                        &tx_ant_cmd);
        } else {
@@ -703,7 +699,7 @@ static void iwl_send_bt_config(struct iwl_priv *priv)
                (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
 
        if (iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
-                            CMD_SYNC, sizeof(struct iwl_bt_cmd), &bt_cmd))
+                            0, sizeof(struct iwl_bt_cmd), &bt_cmd))
                IWL_ERR(priv, "failed to send BT Coex Config\n");
 }
 
@@ -987,7 +983,7 @@ static void iwl_bg_restart(struct work_struct *data)
                        ieee80211_restart_hw(priv->hw);
                else
                        IWL_ERR(priv,
-                               "Cannot request restart before registrating with mac80211");
+                               "Cannot request restart before registrating with mac80211\n");
        } else {
                WARN_ON(1);
        }
@@ -1127,7 +1123,6 @@ static void iwl_option_config(struct iwl_priv *priv)
 static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
 {
        struct iwl_nvm_data *data = priv->nvm_data;
-       char *debug_msg;
 
        if (data->sku_cap_11n_enable &&
            !priv->cfg->ht_params) {
@@ -1141,8 +1136,8 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
                return -EINVAL;
        }
 
-       debug_msg = "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n";
-       IWL_DEBUG_INFO(priv, debug_msg,
+       IWL_DEBUG_INFO(priv,
+                      "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n",
                       data->sku_cap_band_24GHz_enable ? "" : "NOT", "enabled",
                       data->sku_cap_band_52GHz_enable ? "" : "NOT", "enabled",
                       data->sku_cap_11n_enable ? "" : "NOT", "enabled");
@@ -1350,7 +1345,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
        iwl_set_hw_params(priv);
 
        if (!(priv->nvm_data->sku_cap_ipan_enable)) {
-               IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN");
+               IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN\n");
                ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
                /*
                 * if not PAN, then don't support P2P -- might be a uCode
@@ -2019,10 +2014,10 @@ void iwlagn_lift_passive_no_rx(struct iwl_priv *priv)
 
        for (mq = 0; mq < IWLAGN_FIRST_AMPDU_QUEUE; mq++) {
                if (!test_bit(mq, &priv->transport_queue_stop)) {
-                       IWL_DEBUG_TX_QUEUES(priv, "Wake queue %d", mq);
+                       IWL_DEBUG_TX_QUEUES(priv, "Wake queue %d\n", mq);
                        ieee80211_wake_queue(priv->hw, mq);
                } else {
-                       IWL_DEBUG_TX_QUEUES(priv, "Don't wake queue %d", mq);
+                       IWL_DEBUG_TX_QUEUES(priv, "Don't wake queue %d\n", mq);
                }
        }
 
@@ -2053,6 +2048,17 @@ static bool iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
        return false;
 }
 
+static void iwl_napi_add(struct iwl_op_mode *op_mode,
+                        struct napi_struct *napi,
+                        struct net_device *napi_dev,
+                        int (*poll)(struct napi_struct *, int),
+                        int weight)
+{
+       struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+
+       ieee80211_napi_add(priv->hw, napi, napi_dev, poll, weight);
+}
+
 static const struct iwl_op_mode_ops iwl_dvm_ops = {
        .start = iwl_op_mode_dvm_start,
        .stop = iwl_op_mode_dvm_stop,
@@ -2065,6 +2071,7 @@ static const struct iwl_op_mode_ops iwl_dvm_ops = {
        .cmd_queue_full = iwl_cmd_queue_full,
        .nic_config = iwl_nic_config,
        .wimax_active = iwl_wimax_active,
+       .napi_add = iwl_napi_add,
 };
 
 /*****************************************************************************
index b4e61417013aff585bf8dcfa33b8fa9e3c5c84c6..f2c1439566b5ffa7814c02c041bf34bb3b3391d6 100644 (file)
@@ -278,7 +278,7 @@ static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
                        le32_to_cpu(cmd->sleep_interval[3]),
                        le32_to_cpu(cmd->sleep_interval[4]));
 
-       return iwl_dvm_send_cmd_pdu(priv, POWER_TABLE_CMD, CMD_SYNC,
+       return iwl_dvm_send_cmd_pdu(priv, POWER_TABLE_CMD, 0,
                                sizeof(struct iwl_powertable_cmd), cmd);
 }
 
@@ -361,7 +361,7 @@ int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
 
                memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
        } else
-               IWL_ERR(priv, "set power fail, ret = %d", ret);
+               IWL_ERR(priv, "set power fail, ret = %d\n", ret);
 
        return ret;
 }
index aa773a2da4ab877f6b5876a28024a6c2e23ffa9b..32b78a66536db90bbb1e67f10e210df49f6b2d03 100644 (file)
@@ -1453,7 +1453,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
                        tbl->action = IWL_LEGACY_SWITCH_SISO;
                break;
        default:
-               IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
+               IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load);
                break;
        }
 
@@ -1628,7 +1628,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
                        tbl->action = IWL_SISO_SWITCH_ANTENNA1;
                break;
        default:
-               IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
+               IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load);
                break;
        }
 
@@ -1799,7 +1799,7 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv,
                        tbl->action = IWL_MIMO2_SWITCH_SISO_A;
                break;
        default:
-               IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
+               IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load);
                break;
        }
 
@@ -1969,7 +1969,7 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
                        tbl->action = IWL_MIMO3_SWITCH_SISO_A;
                break;
        default:
-               IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
+               IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load);
                break;
        }
 
@@ -2709,7 +2709,7 @@ static void rs_initialize_lq(struct iwl_priv *priv,
        rs_set_expected_tpt_table(lq_sta, tbl);
        rs_fill_link_cmd(NULL, lq_sta, rate);
        priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
-       iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true);
+       iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, 0, true);
 }
 
 static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
index cd8377346aff0c2936a6f0ee773ae8fd33042025..debec963c610d4693fb40ca22a27b9cd3b6310ac 100644 (file)
@@ -786,7 +786,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
 
        memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
 
-       ieee80211_rx_ni(priv->hw, skb);
+       ieee80211_rx(priv->hw, skb);
 }
 
 static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
index 503a81e581855729fde646408a8ae33b0195539f..ed50de6362ed1d5dcd56b45243ff0b140dcbafe5 100644 (file)
@@ -104,7 +104,7 @@ static int iwlagn_disable_bss(struct iwl_priv *priv,
 
        send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
        ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd,
-                               CMD_SYNC, sizeof(*send), send);
+                               0, sizeof(*send), send);
 
        send->filter_flags = old_filter;
 
@@ -134,7 +134,7 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
        send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
        send->dev_type = RXON_DEV_TYPE_P2P;
        ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd,
-                               CMD_SYNC, sizeof(*send), send);
+                               0, sizeof(*send), send);
 
        send->filter_flags = old_filter;
        send->dev_type = old_dev_type;
@@ -160,7 +160,7 @@ static int iwlagn_disconn_pan(struct iwl_priv *priv,
        int ret;
 
        send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-       ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, CMD_SYNC,
+       ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 0,
                                sizeof(*send), send);
 
        send->filter_flags = old_filter;
@@ -189,7 +189,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv,
                      ctx->qos_data.qos_active,
                      ctx->qos_data.def_qos_parm.qos_flags);
 
-       ret = iwl_dvm_send_cmd_pdu(priv, ctx->qos_cmd, CMD_SYNC,
+       ret = iwl_dvm_send_cmd_pdu(priv, ctx->qos_cmd, 0,
                               sizeof(struct iwl_qosparam_cmd),
                               &ctx->qos_data.def_qos_parm);
        if (ret)
@@ -353,7 +353,7 @@ static int iwl_send_rxon_timing(struct iwl_priv *priv,
                        le16_to_cpu(ctx->timing.atim_window));
 
        return iwl_dvm_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
-                               CMD_SYNC, sizeof(ctx->timing), &ctx->timing);
+                               0, sizeof(ctx->timing), &ctx->timing);
 }
 
 static int iwlagn_rxon_disconn(struct iwl_priv *priv,
@@ -495,7 +495,7 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv,
         * Associated RXON doesn't clear the station table in uCode,
         * so we don't need to restore stations etc. after this.
         */
-       ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, CMD_SYNC,
+       ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 0,
                      sizeof(struct iwl_rxon_cmd), &ctx->staging);
        if (ret) {
                IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
@@ -610,7 +610,7 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
        cmd.slots[0].width = cpu_to_le16(slot0);
        cmd.slots[1].width = cpu_to_le16(slot1);
 
-       ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WIPAN_PARAMS, CMD_SYNC,
+       ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WIPAN_PARAMS, 0,
                        sizeof(cmd), &cmd);
        if (ret)
                IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret);
@@ -823,7 +823,7 @@ static int iwl_check_rxon_cmd(struct iwl_priv *priv,
 
        if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
                        == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
-               IWL_WARN(priv, "CCK and auto detect");
+               IWL_WARN(priv, "CCK and auto detect\n");
                errors |= BIT(8);
        }
 
@@ -1395,7 +1395,7 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
                        priv->phy_calib_chain_noise_reset_cmd);
                ret = iwl_dvm_send_cmd_pdu(priv,
                                        REPLY_PHY_CALIBRATION_CMD,
-                                       CMD_SYNC, sizeof(cmd), &cmd);
+                                       0, sizeof(cmd), &cmd);
                if (ret)
                        IWL_ERR(priv,
                                "Could not send REPLY_PHY_CALIBRATION_CMD\n");
index be98b913ed582046d6598d0cf7454cf3bd8278cc..43bef901e8f9a80a7c3a56f63a7d2da93fda7076 100644 (file)
@@ -59,7 +59,7 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
        int ret;
        struct iwl_host_cmd cmd = {
                .id = REPLY_SCAN_ABORT_CMD,
-               .flags = CMD_SYNC | CMD_WANT_SKB,
+               .flags = CMD_WANT_SKB,
        };
        __le32 *status;
 
@@ -639,7 +639,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
        struct iwl_host_cmd cmd = {
                .id = REPLY_SCAN_CMD,
                .len = { sizeof(struct iwl_scan_cmd), },
-               .flags = CMD_SYNC,
        };
        struct iwl_scan_cmd *scan;
        struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
index 9cdd91cdf661825604e9ae8c89649213667b0b0e..6ec86adbe4a1fcc9df9aaf0d8b73ca4b48ac2e9b 100644 (file)
@@ -39,7 +39,7 @@ static int iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
        lockdep_assert_held(&priv->sta_lock);
 
        if (sta_id >= IWLAGN_STATION_COUNT) {
-               IWL_ERR(priv, "invalid sta_id %u", sta_id);
+               IWL_ERR(priv, "invalid sta_id %u\n", sta_id);
                return -EINVAL;
        }
        if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
@@ -165,7 +165,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
        iwl_free_resp(&cmd);
 
        if (cmd.handler_status)
-               IWL_ERR(priv, "%s - error in the CMD response %d", __func__,
+               IWL_ERR(priv, "%s - error in the CMD response %d\n", __func__,
                        cmd.handler_status);
 
        return cmd.handler_status;
@@ -261,7 +261,7 @@ int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
        cmd.station_flags = flags;
        cmd.sta.sta_id = sta_id;
 
-       return iwl_send_add_sta(priv, &cmd, CMD_SYNC);
+       return iwl_send_add_sta(priv, &cmd, 0);
 }
 
 static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
@@ -413,7 +413,7 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
        spin_unlock_bh(&priv->sta_lock);
 
        /* Add station to device's station table */
-       ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+       ret = iwl_send_add_sta(priv, &sta_cmd, 0);
        if (ret) {
                spin_lock_bh(&priv->sta_lock);
                IWL_ERR(priv, "Adding station %pM failed.\n",
@@ -456,7 +456,6 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
        struct iwl_host_cmd cmd = {
                .id = REPLY_REMOVE_STA,
                .len = { sizeof(struct iwl_rem_sta_cmd), },
-               .flags = CMD_SYNC,
                .data = { &rm_sta_cmd, },
        };
 
@@ -740,7 +739,7 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
                                        send_lq = true;
                        }
                        spin_unlock_bh(&priv->sta_lock);
-                       ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+                       ret = iwl_send_add_sta(priv, &sta_cmd, 0);
                        if (ret) {
                                spin_lock_bh(&priv->sta_lock);
                                IWL_ERR(priv, "Adding station %pM failed.\n",
@@ -756,8 +755,7 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
                         * current LQ command
                         */
                        if (send_lq)
-                               iwl_send_lq_cmd(priv, ctx, &lq,
-                                               CMD_SYNC, true);
+                               iwl_send_lq_cmd(priv, ctx, &lq, 0, true);
                        spin_lock_bh(&priv->sta_lock);
                        priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
                }
@@ -968,7 +966,7 @@ int iwlagn_add_bssid_station(struct iwl_priv *priv,
                return -ENOMEM;
        }
 
-       ret = iwl_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true);
+       ret = iwl_send_lq_cmd(priv, ctx, link_cmd, 0, true);
        if (ret)
                IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
 
@@ -999,7 +997,6 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
        struct iwl_host_cmd cmd = {
                .id = ctx->wep_key_cmd,
                .data = { wep_cmd, },
-               .flags = CMD_SYNC,
        };
 
        might_sleep();
@@ -1248,7 +1245,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
        sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
        sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
 
-       return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+       return iwl_send_add_sta(priv, &sta_cmd, 0);
 }
 
 int iwl_set_dynamic_key(struct iwl_priv *priv,
@@ -1284,13 +1281,13 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
                ieee80211_get_key_rx_seq(keyconf, 0, &seq);
                ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
                ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
-                                         seq.tkip.iv32, p1k, CMD_SYNC);
+                                         seq.tkip.iv32, p1k, 0);
                break;
        case WLAN_CIPHER_SUITE_CCMP:
        case WLAN_CIPHER_SUITE_WEP40:
        case WLAN_CIPHER_SUITE_WEP104:
                ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
-                                         0, NULL, CMD_SYNC);
+                                         0, NULL, 0);
                break;
        default:
                IWL_ERR(priv, "Unknown cipher %x\n", keyconf->cipher);
@@ -1409,7 +1406,7 @@ int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
        memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
        spin_unlock_bh(&priv->sta_lock);
 
-       return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+       return iwl_send_add_sta(priv, &sta_cmd, 0);
 }
 
 int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
@@ -1433,7 +1430,7 @@ int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
        memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
        spin_unlock_bh(&priv->sta_lock);
 
-       return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+       return iwl_send_add_sta(priv, &sta_cmd, 0);
 }
 
 int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
@@ -1458,7 +1455,7 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
        memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
        spin_unlock_bh(&priv->sta_lock);
 
-       return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+       return iwl_send_add_sta(priv, &sta_cmd, 0);
 }
 
 
index 058c5892c427afdf3df48038bf8e715017d379c6..acb981a0a0aaa0bb31f99e7f5306e5c03f6cdd47 100644 (file)
@@ -236,7 +236,7 @@ static void iwl_prepare_ct_kill_task(struct iwl_priv *priv)
 {
        IWL_DEBUG_TEMP(priv, "Prepare to enter IWL_TI_CT_KILL\n");
        /* make request to retrieve statistics information */
-       iwl_send_statistics_request(priv, CMD_SYNC, false);
+       iwl_send_statistics_request(priv, 0, false);
        /* Reschedule the ct_kill wait timer */
        mod_timer(&priv->thermal_throttle.ct_kill_waiting_tm,
                 jiffies + msecs_to_jiffies(CT_KILL_WAITING_DURATION));
index 398dd096674cf17bd8112e8e7d06ad4ce57427f0..3255a1723d176f59ee5547e7981a9d3bbe7d9583 100644 (file)
@@ -402,10 +402,10 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
                /* aggregation is on for this <sta,tid> */
                if (info->flags & IEEE80211_TX_CTL_AMPDU &&
                    tid_data->agg.state != IWL_AGG_ON) {
-                       IWL_ERR(priv, "TX_CTL_AMPDU while not in AGG:"
-                               " Tx flags = 0x%08x, agg.state = %d",
+                       IWL_ERR(priv,
+                               "TX_CTL_AMPDU while not in AGG: Tx flags = 0x%08x, agg.state = %d\n",
                                info->flags, tid_data->agg.state);
-                       IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d",
+                       IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d\n",
                                sta_id, tid,
                                IEEE80211_SEQ_TO_SN(tid_data->seq_number));
                        goto drop_unlock_sta;
@@ -416,7 +416,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
                 */
                if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON &&
                              tid_data->agg.state != IWL_AGG_OFF,
-                   "Tx while agg.state = %d", tid_data->agg.state))
+                             "Tx while agg.state = %d\n", tid_data->agg.state))
                        goto drop_unlock_sta;
 
                seq_number = tid_data->seq_number;
@@ -778,8 +778,8 @@ static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
                /* There are no packets for this RA / TID in the HW any more */
                if (tid_data->agg.ssn == tid_data->next_reclaimed) {
                        IWL_DEBUG_TX_QUEUES(priv,
-                               "Can continue DELBA flow ssn = next_recl ="
-                               " %d", tid_data->next_reclaimed);
+                               "Can continue DELBA flow ssn = next_recl = %d\n",
+                               tid_data->next_reclaimed);
                        iwl_trans_txq_disable(priv->trans,
                                              tid_data->agg.txq_id);
                        iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id);
@@ -791,8 +791,8 @@ static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
                /* There are no packets for this RA / TID in the HW any more */
                if (tid_data->agg.ssn == tid_data->next_reclaimed) {
                        IWL_DEBUG_TX_QUEUES(priv,
-                               "Can continue ADDBA flow ssn = next_recl ="
-                               " %d", tid_data->next_reclaimed);
+                               "Can continue ADDBA flow ssn = next_recl = %d\n",
+                               tid_data->next_reclaimed);
                        tid_data->agg.state = IWL_AGG_STARTING;
                        ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
                }
@@ -1216,8 +1216,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
                            ctx->vif->type == NL80211_IFTYPE_STATION) {
                                /* block and stop all queues */
                                priv->passive_no_rx = true;
-                               IWL_DEBUG_TX_QUEUES(priv, "stop all queues: "
-                                                   "passive channel");
+                               IWL_DEBUG_TX_QUEUES(priv,
+                                       "stop all queues: passive channel\n");
                                ieee80211_stop_queues(priv->hw);
 
                                IWL_DEBUG_TX_REPLY(priv,
@@ -1271,7 +1271,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
 
        while (!skb_queue_empty(&skbs)) {
                skb = __skb_dequeue(&skbs);
-               ieee80211_tx_status_ni(priv->hw, skb);
+               ieee80211_tx_status(priv->hw, skb);
        }
 
        return 0;
@@ -1411,7 +1411,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
 
        while (!skb_queue_empty(&reclaimed_skbs)) {
                skb = __skb_dequeue(&reclaimed_skbs);
-               ieee80211_tx_status_ni(priv->hw, skb);
+               ieee80211_tx_status(priv->hw, skb);
        }
 
        return 0;
index cf03ef5619d9fea602151cf573162848ffd12366..d5cee1530597b8cf2de444e98aebffd89d636715 100644 (file)
@@ -172,7 +172,7 @@ static int iwl_send_wimax_coex(struct iwl_priv *priv)
        memset(&coex_cmd, 0, sizeof(coex_cmd));
 
        return iwl_dvm_send_cmd_pdu(priv,
-                               COEX_PRIORITY_TABLE_CMD, CMD_SYNC,
+                               COEX_PRIORITY_TABLE_CMD, 0,
                                sizeof(coex_cmd), &coex_cmd);
 }
 
@@ -205,7 +205,7 @@ void iwl_send_prio_tbl(struct iwl_priv *priv)
        memcpy(prio_tbl_cmd.prio_tbl, iwl_bt_prio_tbl,
                sizeof(iwl_bt_prio_tbl));
        if (iwl_dvm_send_cmd_pdu(priv,
-                               REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC,
+                               REPLY_BT_COEX_PRIO_TABLE, 0,
                                sizeof(prio_tbl_cmd), &prio_tbl_cmd))
                IWL_ERR(priv, "failed to send BT prio tbl command\n");
 }
@@ -218,7 +218,7 @@ int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
        env_cmd.action = action;
        env_cmd.type = type;
        ret = iwl_dvm_send_cmd_pdu(priv,
-                              REPLY_BT_COEX_PROT_ENV, CMD_SYNC,
+                              REPLY_BT_COEX_PROT_ENV, 0,
                               sizeof(env_cmd), &env_cmd);
        if (ret)
                IWL_ERR(priv, "failed to send BT env command\n");
index 854ba84ccb730995f0d786d26a85a1c2fc14c0dc..c3817fae16c04207136e5d45e8cc65bd3a125429 100644 (file)
@@ -62,6 +62,7 @@ static const struct iwl_base_params iwl1000_base_params = {
        .led_compensation = 51,
        .wd_timeout = IWL_WATCHDOG_DISABLED,
        .max_event_log_size = 128,
+       .scd_chain_ext_wa = true,
 };
 
 static const struct iwl_ht_params iwl1000_ht_params = {
index 3e63323637f3f593dd895528dac4e5fb794fdb4c..21e5d0843a62a84a0f21ff337d1b674750fa3999 100644 (file)
@@ -75,6 +75,7 @@ static const struct iwl_base_params iwl2000_base_params = {
        .wd_timeout = IWL_DEF_WD_TIMEOUT,
        .max_event_log_size = 512,
        .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+       .scd_chain_ext_wa = true,
 };
 
 
@@ -88,6 +89,7 @@ static const struct iwl_base_params iwl2030_base_params = {
        .wd_timeout = IWL_LONG_WD_TIMEOUT,
        .max_event_log_size = 512,
        .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+       .scd_chain_ext_wa = true,
 };
 
 static const struct iwl_ht_params iwl2000_ht_params = {
index 6674f2c4541c183fbae6c2a1bb0861d9eb3a966f..332bbede39e5b0fc6bb25b7ab30bbbd22c929b8b 100644 (file)
@@ -61,6 +61,7 @@ static const struct iwl_base_params iwl5000_base_params = {
        .led_compensation = 51,
        .wd_timeout = IWL_WATCHDOG_DISABLED,
        .max_event_log_size = 512,
+       .scd_chain_ext_wa = true,
 };
 
 static const struct iwl_ht_params iwl5000_ht_params = {
index 8048de90233fa038545e9d752eaaffbfc3968c72..8f2c3c8c6b843f78f346225d371ee3ad3df54f23 100644 (file)
@@ -85,6 +85,7 @@ static const struct iwl_base_params iwl6000_base_params = {
        .wd_timeout = IWL_DEF_WD_TIMEOUT,
        .max_event_log_size = 512,
        .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+       .scd_chain_ext_wa = true,
 };
 
 static const struct iwl_base_params iwl6050_base_params = {
@@ -97,6 +98,7 @@ static const struct iwl_base_params iwl6050_base_params = {
        .wd_timeout = IWL_DEF_WD_TIMEOUT,
        .max_event_log_size = 1024,
        .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+       .scd_chain_ext_wa = true,
 };
 
 static const struct iwl_base_params iwl6000_g2_base_params = {
@@ -109,6 +111,7 @@ static const struct iwl_base_params iwl6000_g2_base_params = {
        .wd_timeout = IWL_LONG_WD_TIMEOUT,
        .max_event_log_size = 512,
        .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+       .scd_chain_ext_wa = true,
 };
 
 static const struct iwl_ht_params iwl6000_ht_params = {
index 4c2d4ef28b220c719ac49f9f9726b931c2d35442..48730064da73f5e1e058f756d9473a2a0a5bc376 100644 (file)
 #define IWL3160_UCODE_API_MAX  9
 
 /* Oldest version we won't warn about */
-#define IWL7260_UCODE_API_OK   8
-#define IWL3160_UCODE_API_OK   8
+#define IWL7260_UCODE_API_OK   9
+#define IWL3160_UCODE_API_OK   9
 
 /* Lowest firmware API version supported */
-#define IWL7260_UCODE_API_MIN  7
-#define IWL3160_UCODE_API_MIN  7
+#define IWL7260_UCODE_API_MIN  8
+#define IWL3160_UCODE_API_MIN  8
 
 /* NVM versions */
 #define IWL7260_NVM_VERSION            0x0a1d
@@ -98,7 +98,7 @@
 #define NVM_HW_SECTION_NUM_FAMILY_7000         0
 
 static const struct iwl_base_params iwl7000_base_params = {
-       .eeprom_size = OTP_LOW_IMAGE_SIZE,
+       .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_7000,
        .num_of_queues = IWLAGN_NUM_QUEUES,
        .pll_cfg_val = 0,
        .shadow_ram_support = true,
@@ -107,6 +107,7 @@ static const struct iwl_base_params iwl7000_base_params = {
        .max_event_log_size = 512,
        .shadow_reg_enable = true,
        .pcie_l1_allowed = true,
+       .apmg_wake_up_wa = true,
 };
 
 static const struct iwl_ht_params iwl7000_ht_params = {
index f5bd82b885929a8c1188dc44ffdd27892dc2739b..51c41531d81d7f5af8354aced5da7e6b4e647f78 100644 (file)
 #define IWL8000_MODULE_FIRMWARE(api) IWL8000_FW_PRE __stringify(api) ".ucode"
 
 #define NVM_HW_SECTION_NUM_FAMILY_8000         10
+#define DEFAULT_NVM_FILE_FAMILY_8000           "iwl_nvm_8000.bin"
 
 static const struct iwl_base_params iwl8000_base_params = {
-       .eeprom_size = OTP_LOW_IMAGE_SIZE,
+       .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000,
        .num_of_queues = IWLAGN_NUM_QUEUES,
        .pll_cfg_val = 0,
        .shadow_ram_support = true,
@@ -118,6 +119,7 @@ const struct iwl_cfg iwl8260_2ac_cfg = {
        .ht_params = &iwl8000_ht_params,
        .nvm_ver = IWL8000_NVM_VERSION,
        .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
+       .default_nvm_file = DEFAULT_NVM_FILE_FAMILY_8000,
 };
 
 const struct iwl_cfg iwl8260_n_cfg = {
@@ -127,6 +129,7 @@ const struct iwl_cfg iwl8260_n_cfg = {
        .ht_params = &iwl8000_ht_params,
        .nvm_ver = IWL8000_NVM_VERSION,
        .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
+       .default_nvm_file = DEFAULT_NVM_FILE_FAMILY_8000,
 };
 
 MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK));
index 7f37fb86837b7a46569ba74f90df0a7bdb8d0271..04a483d386592c4cf359d741328844b2c0f02e51 100644 (file)
 
 /* EEPROM */
 #define IWLAGN_EEPROM_IMG_SIZE         2048
-/* OTP */
-/* lower blocks contain EEPROM image and calibration data */
-#define OTP_LOW_IMAGE_SIZE             (2 * 512 * sizeof(u16)) /* 2 KB */
+
 /* high blocks contain PAPD data */
 #define OTP_HIGH_IMAGE_SIZE_6x00        (6 * 512 * sizeof(u16)) /* 6 KB */
 #define OTP_HIGH_IMAGE_SIZE_1000        (0x200 * sizeof(u16)) /* 1024 bytes */
index 3f17dc3f2c8a9fdde83bddb254efb8cc8d33502f..b7047905f41a38ce6a88f27b93ad75384306958a 100644 (file)
@@ -146,6 +146,9 @@ static inline u8 num_of_ant(u8 mask)
  * @wd_timeout: TX queues watchdog timeout
  * @max_event_log_size: size of event log buffer size for ucode event logging
  * @shadow_reg_enable: HW shadow register support
+ * @apmg_wake_up_wa: should the MAC access REQ be asserted when a command
+ *     is in flight. This is due to a HW bug in 7260, 3160 and 7265.
+ * @scd_chain_ext_wa: should the chain extension feature in SCD be disabled.
  */
 struct iwl_base_params {
        int eeprom_size;
@@ -160,6 +163,8 @@ struct iwl_base_params {
        u32 max_event_log_size;
        const bool shadow_reg_enable;
        const bool pcie_l1_allowed;
+       const bool apmg_wake_up_wa;
+       const bool scd_chain_ext_wa;
 };
 
 /*
@@ -188,6 +193,11 @@ struct iwl_ht_params {
 #define EEPROM_6000_REG_BAND_24_HT40_CHANNELS  0x80
 #define EEPROM_REGULATORY_BAND_NO_HT40         0
 
+/* lower blocks contain EEPROM image and calibration data */
+#define OTP_LOW_IMAGE_SIZE             (2 * 512 * sizeof(u16)) /* 2 KB */
+#define OTP_LOW_IMAGE_SIZE_FAMILY_7000 (16 * 512 * sizeof(u16)) /* 16 KB */
+#define OTP_LOW_IMAGE_SIZE_FAMILY_8000 (32 * 512 * sizeof(u16)) /* 32 KB */
+
 struct iwl_eeprom_params {
        const u8 regulatory_bands[7];
        bool enhanced_txpower;
@@ -264,6 +274,8 @@ struct iwl_cfg {
        u8   nvm_hw_section_num;
        bool lp_xtal_workaround;
        const struct iwl_pwr_tx_backoff *pwr_tx_backoffs;
+       bool no_power_up_nic_in_init;
+       const char *default_nvm_file;
 };
 
 /*
index 8a44f594528df6c4f503face66300c93d79f1f44..09feff4fa226e781b1b8350ade568e9001709335 100644 (file)
@@ -61,8 +61,6 @@
  *
  *****************************************************************************/
 
-#define DEBUG
-
 #include <linux/device.h>
 #include <linux/interrupt.h>
 #include <linux/export.h>
@@ -128,8 +126,8 @@ void __iwl_dbg(struct device *dev,
 #ifdef CONFIG_IWLWIFI_DEBUG
        if (iwl_have_debug_level(level) &&
            (!limit || net_ratelimit()))
-               dev_dbg(dev, "%c %s %pV", in_interrupt() ? 'I' : 'U',
-                       function, &vaf);
+               dev_printk(KERN_DEBUG, dev, "%c %s %pV",
+                          in_interrupt() ? 'I' : 'U', function, &vaf);
 #endif
        trace_iwlwifi_dbg(level, in_interrupt(), function, &vaf);
        va_end(args);
index c8cbdbe15924a61123d76828c0d1f6c7686dcfe4..295083510e729a76f13347a568c661acbdfdda52 100644 (file)
@@ -47,12 +47,32 @@ void __iwl_warn(struct device *dev, const char *fmt, ...) __printf(2, 3);
 void __iwl_info(struct device *dev, const char *fmt, ...) __printf(2, 3);
 void __iwl_crit(struct device *dev, const char *fmt, ...) __printf(2, 3);
 
+/* not all compilers can evaluate strlen() at compile time, so use sizeof() */
+#define CHECK_FOR_NEWLINE(f) BUILD_BUG_ON(f[sizeof(f) - 2] != '\n')
+
 /* No matter what is m (priv, bus, trans), this will work */
-#define IWL_ERR(m, f, a...) __iwl_err((m)->dev, false, false, f, ## a)
-#define IWL_ERR_DEV(d, f, a...) __iwl_err((d), false, false, f, ## a)
-#define IWL_WARN(m, f, a...) __iwl_warn((m)->dev, f, ## a)
-#define IWL_INFO(m, f, a...) __iwl_info((m)->dev, f, ## a)
-#define IWL_CRIT(m, f, a...) __iwl_crit((m)->dev, f, ## a)
+#define IWL_ERR_DEV(d, f, a...)                                                \
+       do {                                                            \
+               CHECK_FOR_NEWLINE(f);                                   \
+               __iwl_err((d), false, false, f, ## a);                  \
+       } while (0)
+#define IWL_ERR(m, f, a...)                                            \
+       IWL_ERR_DEV((m)->dev, f, ## a)
+#define IWL_WARN(m, f, a...)                                           \
+       do {                                                            \
+               CHECK_FOR_NEWLINE(f);                                   \
+               __iwl_warn((m)->dev, f, ## a);                          \
+       } while (0)
+#define IWL_INFO(m, f, a...)                                           \
+       do {                                                            \
+               CHECK_FOR_NEWLINE(f);                                   \
+               __iwl_info((m)->dev, f, ## a);                          \
+       } while (0)
+#define IWL_CRIT(m, f, a...)                                           \
+       do {                                                            \
+               CHECK_FOR_NEWLINE(f);                                   \
+               __iwl_crit((m)->dev, f, ## a);                          \
+       } while (0)
 
 #if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING)
 void __iwl_dbg(struct device *dev,
@@ -72,12 +92,17 @@ do {                                                                        \
                       DUMP_PREFIX_OFFSET, 16, 1, p, len, 1);           \
 } while (0)
 
+#define __IWL_DEBUG_DEV(dev, level, limit, fmt, args...)               \
+       do {                                                            \
+               CHECK_FOR_NEWLINE(fmt);                                 \
+               __iwl_dbg(dev, level, limit, __func__, fmt, ##args);    \
+       } while (0)
 #define IWL_DEBUG(m, level, fmt, args...)                              \
-       __iwl_dbg((m)->dev, level, false, __func__, fmt, ##args)
+       __IWL_DEBUG_DEV((m)->dev, level, false, fmt, ##args)
 #define IWL_DEBUG_DEV(dev, level, fmt, args...)                                \
-       __iwl_dbg((dev), level, false, __func__, fmt, ##args)
+       __IWL_DEBUG_DEV(dev, level, false, fmt, ##args)
 #define IWL_DEBUG_LIMIT(m, level, fmt, args...)                                \
-       __iwl_dbg((m)->dev, level, true, __func__, fmt, ##args)
+       __IWL_DEBUG_DEV((m)->dev, level, true, fmt, ##args)
 
 #ifdef CONFIG_IWLWIFI_DEBUG
 #define iwl_print_hex_dump(m, level, p, len)                           \
index 0a3e841b44a9ebe81cbd40b0313e8cafcc355f3a..f2a5c12269a3ed7de811399580c9ea908bc89173 100644 (file)
@@ -1243,6 +1243,7 @@ struct iwl_mod_params iwlwifi_mod_params = {
        .bt_coex_active = true,
        .power_level = IWL_POWER_INDEX_1,
        .wd_disable = true,
+       .uapsd_disable = false,
        /* the rest are 0 by default */
 };
 IWL_EXPORT_SYMBOL(iwlwifi_mod_params);
@@ -1356,6 +1357,10 @@ MODULE_PARM_DESC(wd_disable,
 module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, S_IRUGO);
 MODULE_PARM_DESC(nvm_file, "NVM file name");
 
+module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable,
+                  bool, S_IRUGO);
+MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: N)");
+
 /*
  * set bt_coex_active to true, uCode will do kill/defer
  * every time the priority line is asserted (BT is sending signals on the
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
new file mode 100644 (file)
index 0000000..2953ffc
--- /dev/null
@@ -0,0 +1,134 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __fw_error_dump_h__
+#define __fw_error_dump_h__
+
+#include <linux/types.h>
+
+#define IWL_FW_ERROR_DUMP_BARKER       0x14789632
+
+/**
+ * enum iwl_fw_error_dump_type - types of data in the dump file
+ * @IWL_FW_ERROR_DUMP_SRAM:
+ * @IWL_FW_ERROR_DUMP_REG:
+ * @IWL_FW_ERROR_DUMP_RXF:
+ * @IWL_FW_ERROR_DUMP_TXCMD: last TX command data, structured as
+ *     &struct iwl_fw_error_dump_txcmd packets
+ */
+enum iwl_fw_error_dump_type {
+       IWL_FW_ERROR_DUMP_SRAM = 0,
+       IWL_FW_ERROR_DUMP_REG = 1,
+       IWL_FW_ERROR_DUMP_RXF = 2,
+       IWL_FW_ERROR_DUMP_TXCMD = 3,
+
+       IWL_FW_ERROR_DUMP_MAX,
+};
+
+/**
+ * struct iwl_fw_error_dump_data - data for one type
+ * @type: %enum iwl_fw_error_dump_type
+ * @len: the length starting from %data - must be a multiplier of 4.
+ * @data: the data itself padded to be a multiplier of 4.
+ */
+struct iwl_fw_error_dump_data {
+       __le32 type;
+       __le32 len;
+       __u8 data[];
+} __packed;
+
+/**
+ * struct iwl_fw_error_dump_file - the layout of the header of the file
+ * @barker: must be %IWL_FW_ERROR_DUMP_BARKER
+ * @file_len: the length of all the file starting from %barker
+ * @data: array of %struct iwl_fw_error_dump_data
+ */
+struct iwl_fw_error_dump_file {
+       __le32 barker;
+       __le32 file_len;
+       u8 data[0];
+} __packed;
+
+/**
+ * struct iwl_fw_error_dump_txcmd - TX command data
+ * @cmdlen: original length of command
+ * @caplen: captured length of command (may be less)
+ * @data: captured command data, @caplen bytes
+ */
+struct iwl_fw_error_dump_txcmd {
+       __le32 cmdlen;
+       __le32 caplen;
+       u8 data[];
+} __packed;
+
+/**
+ * iwl_mvm_fw_error_next_data - advance fw error dump data pointer
+ * @data: previous data block
+ * Returns: next data block
+ */
+static inline struct iwl_fw_error_dump_data *
+iwl_mvm_fw_error_next_data(struct iwl_fw_error_dump_data *data)
+{
+       return (void *)(data->data + le32_to_cpu(data->len));
+}
+
+#endif /* __fw_error_dump_h__ */
index d14f19339d6140607c99d1b6660b039f9ac4aa66..0aa7c0085c9fd04554b1a3314a2212638f920a78 100644 (file)
  * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
  * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
  * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
- * @IWL_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD
+ * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: This uCode image supports uAPSD
  * @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan
  *     offload profile config command.
- * @IWL_UCODE_TLV_FLAGS_RX_ENERGY_API: supports rx signal strength api
- * @IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2: using the new time event API.
  * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
  *     (rather than two) IPv6 addresses
- * @IWL_UCODE_TLV_FLAGS_BF_UPDATED: new beacon filtering API
  * @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element
  *     from the probe request template.
- * @IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API: modified D3 API to allow keeping
- *     connection when going back to D0
  * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version)
  * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
- * @IWL_UCODE_TLV_FLAGS_SCHED_SCAN: this uCode image supports scheduled scan.
- * @IWL_UCODE_TLV_FLAGS_STA_KEY_CMD: new ADD_STA and ADD_STA_KEY command API
- * @IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD: support device wide power command
- *     containing CAM (Continuous Active Mode) indication.
+ * @IWL_UCODE_TLV_FLAGS_P2P_PM: P2P client supports PM as a stand alone MAC
  * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_DCM: support power save on BSS station and
  *     P2P client interfaces simultaneously if they are in different bindings.
+ * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and
+ *     P2P client interfaces simultaneously if they are in same bindings.
  * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
  * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
  * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
+ * @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
  */
 enum iwl_ucode_tlv_flag {
        IWL_UCODE_TLV_FLAGS_PAN                 = BIT(0),
@@ -104,22 +99,16 @@ enum iwl_ucode_tlv_flag {
        IWL_UCODE_TLV_FLAGS_MFP                 = BIT(2),
        IWL_UCODE_TLV_FLAGS_P2P                 = BIT(3),
        IWL_UCODE_TLV_FLAGS_DW_BC_TABLE         = BIT(4),
-       IWL_UCODE_TLV_FLAGS_NEWBT_COEX          = BIT(5),
-       IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT      = BIT(6),
        IWL_UCODE_TLV_FLAGS_SHORT_BL            = BIT(7),
-       IWL_UCODE_TLV_FLAGS_RX_ENERGY_API       = BIT(8),
-       IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2   = BIT(9),
        IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS     = BIT(10),
-       IWL_UCODE_TLV_FLAGS_BF_UPDATED          = BIT(11),
        IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID       = BIT(12),
-       IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API   = BIT(14),
        IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL    = BIT(15),
        IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE    = BIT(16),
-       IWL_UCODE_TLV_FLAGS_SCHED_SCAN          = BIT(17),
-       IWL_UCODE_TLV_FLAGS_STA_KEY_CMD         = BIT(19),
-       IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD       = BIT(20),
+       IWL_UCODE_TLV_FLAGS_P2P_PM              = BIT(21),
        IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM      = BIT(22),
+       IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM      = BIT(23),
        IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT       = BIT(24),
+       IWL_UCODE_TLV_FLAGS_EBS_SUPPORT         = BIT(25),
        IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD        = BIT(26),
        IWL_UCODE_TLV_FLAGS_BCAST_FILTERING     = BIT(29),
        IWL_UCODE_TLV_FLAGS_GO_UAPSD            = BIT(30),
@@ -128,9 +117,11 @@ enum iwl_ucode_tlv_flag {
 /**
  * enum iwl_ucode_tlv_api - ucode api
  * @IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID: wowlan config includes tid field.
+ * @IWL_UCODE_TLV_API_CSA_FLOW: ucode can do unbind-bind flow for CSA.
  */
 enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID     = BIT(0),
+       IWL_UCODE_TLV_API_CSA_FLOW              = BIT(4),
 };
 
 /**
@@ -183,6 +174,7 @@ enum iwl_ucode_sec {
 #define IWL_UCODE_SECTION_MAX 12
 #define IWL_API_ARRAY_SIZE     1
 #define IWL_CAPABILITIES_ARRAY_SIZE    1
+#define CPU1_CPU2_SEPARATOR_SECTION    0xFFFFCCCC
 
 struct iwl_ucode_capabilities {
        u32 max_probe_length;
@@ -205,6 +197,11 @@ struct fw_img {
        bool is_dual_cpus;
 };
 
+struct iwl_sf_region {
+       u32 addr;
+       u32 size;
+};
+
 /* uCode version contains 4 values: Major/Minor/API/Serial */
 #define IWL_UCODE_MAJOR(ver)   (((ver) & 0xFF000000) >> 24)
 #define IWL_UCODE_MINOR(ver)   (((ver) & 0x00FF0000) >> 16)
index 44cc3cf45762d1465e47627b70eea990f257e3a3..5eef4ae7333bad13527327cc851f022f23e3f991 100644 (file)
@@ -33,6 +33,7 @@
 #include "iwl-io.h"
 #include "iwl-csr.h"
 #include "iwl-debug.h"
+#include "iwl-prph.h"
 #include "iwl-fh.h"
 
 #define IWL_POLL_INTERVAL 10   /* microseconds */
@@ -183,6 +184,23 @@ void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
 }
 IWL_EXPORT_SYMBOL(iwl_clear_bits_prph);
 
+void iwl_force_nmi(struct iwl_trans *trans)
+{
+       /*
+        * In HW previous to the 8000 HW family, and in the 8000 HW family
+        * itself when the revision step==0, the DEVICE_SET_NMI_REG is used
+        * to force an NMI. Otherwise, a different register -
+        * DEVICE_SET_NMI_8000B_REG - is used.
+        */
+       if ((trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) ||
+           ((trans->hw_rev & 0xc) == 0x0))
+               iwl_write_prph(trans, DEVICE_SET_NMI_REG, DEVICE_SET_NMI_VAL);
+       else
+               iwl_write_prph(trans, DEVICE_SET_NMI_8000B_REG,
+                              DEVICE_SET_NMI_8000B_VAL);
+}
+IWL_EXPORT_SYMBOL(iwl_force_nmi);
+
 static const char *get_fh_string(int cmd)
 {
 #define IWL_CMD(x) case x: return #x
index 665ddd9dbbc48ff5dec47f246b8efb0639fb5c86..705d12c079e8b2ef2492ca82a56aa92d64cfe09b 100644 (file)
@@ -80,6 +80,7 @@ void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
 void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
                            u32 bits, u32 mask);
 void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
+void iwl_force_nmi(struct iwl_trans *trans);
 
 /* Error handling */
 int iwl_dump_fh(struct iwl_trans *trans, char **buf);
index d994317db85b72cbfc11f602e2d414efc54233b1..d051857729ab8e2f238115165c482d09033147db 100644 (file)
@@ -119,6 +119,7 @@ struct iwl_mod_params {
 #endif
        int ant_coupling;
        char *nvm_file;
+       bool uapsd_disable;
 };
 
 #endif /* #__iwl_modparams_h__ */
index 6be30c69850619f81c2468febb3634fd5cf390fd..85eee79c495c8f1080d9844a3f71ffad1b0d023d 100644 (file)
@@ -62,6 +62,7 @@
 #include <linux/types.h>
 #include <linux/slab.h>
 #include <linux/export.h>
+#include <linux/etherdevice.h>
 #include "iwl-drv.h"
 #include "iwl-modparams.h"
 #include "iwl-nvm-parse.h"
@@ -127,19 +128,20 @@ static const u8 iwl_nvm_channels[] = {
 
 static const u8 iwl_nvm_channels_family_8000[] = {
        /* 2.4 GHz */
-       1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+       1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
        /* 5 GHz */
        36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
        96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
        149, 153, 157, 161, 165, 169, 173, 177, 181
 };
 
-#define IWL_NUM_CHANNELS       ARRAY_SIZE(iwl_nvm_channels)
+#define IWL_NUM_CHANNELS               ARRAY_SIZE(iwl_nvm_channels)
 #define IWL_NUM_CHANNELS_FAMILY_8000   ARRAY_SIZE(iwl_nvm_channels_family_8000)
-#define NUM_2GHZ_CHANNELS      14
-#define FIRST_2GHZ_HT_MINUS    5
-#define LAST_2GHZ_HT_PLUS      9
-#define LAST_5GHZ_HT           161
+#define NUM_2GHZ_CHANNELS              14
+#define NUM_2GHZ_CHANNELS_FAMILY_8000  14
+#define FIRST_2GHZ_HT_MINUS            5
+#define LAST_2GHZ_HT_PLUS              9
+#define LAST_5GHZ_HT                   161
 
 #define DEFAULT_MAX_TX_POWER 16
 
@@ -202,21 +204,23 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
        struct ieee80211_channel *channel;
        u16 ch_flags;
        bool is_5ghz;
-       int num_of_ch;
+       int num_of_ch, num_2ghz_channels;
        const u8 *nvm_chan;
 
        if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
                num_of_ch = IWL_NUM_CHANNELS;
                nvm_chan = &iwl_nvm_channels[0];
+               num_2ghz_channels = NUM_2GHZ_CHANNELS;
        } else {
                num_of_ch = IWL_NUM_CHANNELS_FAMILY_8000;
                nvm_chan = &iwl_nvm_channels_family_8000[0];
+               num_2ghz_channels = NUM_2GHZ_CHANNELS_FAMILY_8000;
        }
 
        for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
                ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
 
-               if (ch_idx >= NUM_2GHZ_CHANNELS &&
+               if (ch_idx >= num_2ghz_channels &&
                    !data->sku_cap_band_52GHz_enable)
                        ch_flags &= ~NVM_CHANNEL_VALID;
 
@@ -225,7 +229,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
                                         "Ch. %d Flags %x [%sGHz] - No traffic\n",
                                         nvm_chan[ch_idx],
                                         ch_flags,
-                                        (ch_idx >= NUM_2GHZ_CHANNELS) ?
+                                        (ch_idx >= num_2ghz_channels) ?
                                         "5.2" : "2.4");
                        continue;
                }
@@ -234,7 +238,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
                n_channels++;
 
                channel->hw_value = nvm_chan[ch_idx];
-               channel->band = (ch_idx < NUM_2GHZ_CHANNELS) ?
+               channel->band = (ch_idx < num_2ghz_channels) ?
                                IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
                channel->center_freq =
                        ieee80211_channel_to_frequency(
@@ -242,7 +246,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
 
                /* TODO: Need to be dependent to the NVM */
                channel->flags = IEEE80211_CHAN_NO_HT40;
-               if (ch_idx < NUM_2GHZ_CHANNELS &&
+               if (ch_idx < num_2ghz_channels &&
                    (ch_flags & NVM_CHANNEL_40MHZ)) {
                        if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS)
                                channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
@@ -250,7 +254,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
                                channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
                } else if (nvm_chan[ch_idx] <= LAST_5GHZ_HT &&
                           (ch_flags & NVM_CHANNEL_40MHZ)) {
-                       if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
+                       if ((ch_idx - num_2ghz_channels) % 2 == 0)
                                channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
                        else
                                channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
@@ -447,13 +451,7 @@ static void iwl_set_hw_address(const struct iwl_cfg *cfg,
                               struct iwl_nvm_data *data,
                               const __le16 *nvm_sec)
 {
-       u8 hw_addr[ETH_ALEN];
-
-       if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
-               memcpy(hw_addr, nvm_sec + HW_ADDR, ETH_ALEN);
-       else
-               memcpy(hw_addr, nvm_sec + MAC_ADDRESS_OVERRIDE_FAMILY_8000,
-                      ETH_ALEN);
+       const u8 *hw_addr = (const u8 *)(nvm_sec + HW_ADDR);
 
        /* The byte order is little endian 16 bit, meaning 214365 */
        data->hw_addr[0] = hw_addr[1];
@@ -464,6 +462,41 @@ static void iwl_set_hw_address(const struct iwl_cfg *cfg,
        data->hw_addr[5] = hw_addr[4];
 }
 
+static void iwl_set_hw_address_family_8000(const struct iwl_cfg *cfg,
+                                          struct iwl_nvm_data *data,
+                                          const __le16 *mac_override,
+                                          const __le16 *nvm_hw)
+{
+       const u8 *hw_addr;
+
+       if (mac_override) {
+               hw_addr = (const u8 *)(mac_override +
+                                MAC_ADDRESS_OVERRIDE_FAMILY_8000);
+
+               /* The byte order is little endian 16 bit, meaning 214365 */
+               data->hw_addr[0] = hw_addr[1];
+               data->hw_addr[1] = hw_addr[0];
+               data->hw_addr[2] = hw_addr[3];
+               data->hw_addr[3] = hw_addr[2];
+               data->hw_addr[4] = hw_addr[5];
+               data->hw_addr[5] = hw_addr[4];
+
+               if (is_valid_ether_addr(hw_addr))
+                       return;
+       }
+
+       /* take the MAC address from the OTP */
+       hw_addr = (const u8 *)(nvm_hw + HW_ADDR0_FAMILY_8000);
+       data->hw_addr[0] = hw_addr[3];
+       data->hw_addr[1] = hw_addr[2];
+       data->hw_addr[2] = hw_addr[1];
+       data->hw_addr[3] = hw_addr[0];
+
+       hw_addr = (const u8 *)(nvm_hw + HW_ADDR1_FAMILY_8000);
+       data->hw_addr[4] = hw_addr[1];
+       data->hw_addr[5] = hw_addr[0];
+}
+
 struct iwl_nvm_data *
 iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
                   const __le16 *nvm_hw, const __le16 *nvm_sw,
@@ -523,7 +556,7 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
                                rx_chains);
        } else {
                /* MAC address in family 8000 */
-               iwl_set_hw_address(cfg, data, mac_override);
+               iwl_set_hw_address_family_8000(cfg, data, mac_override, nvm_hw);
 
                iwl_init_sbands(dev, cfg, data, regulatory,
                                sku & NVM_SKU_CAP_11AC_ENABLE, tx_chains,
index ea29504ac61704c39c24a117dec0a5d92aa58376..99785c892f963c7435b0048c4a097f6cae9e7808 100644 (file)
@@ -63,6 +63,7 @@
 #ifndef __iwl_op_mode_h__
 #define __iwl_op_mode_h__
 
+#include <linux/netdevice.h>
 #include <linux/debugfs.h>
 
 struct iwl_op_mode;
@@ -112,8 +113,11 @@ struct iwl_cfg;
  * @stop: stop the op_mode. Must free all the memory allocated.
  *     May sleep
  * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
- *     HCMD this Rx responds to.
- *     This callback may sleep, it is called from a threaded IRQ handler.
+ *     HCMD this Rx responds to. Can't sleep.
+ * @napi_add: NAPI initialisation. The transport is fully responsible for NAPI,
+ *     but the higher layers need to know about it (in particular mac80211 to
+ *     to able to call the right NAPI RX functions); this function is needed
+ *     to eventually call netif_napi_add() with higher layer involvement.
  * @queue_full: notifies that a HW queue is full.
  *     Must be atomic and called with BH disabled.
  * @queue_not_full: notifies that a HW queue is not full any more.
@@ -143,6 +147,11 @@ struct iwl_op_mode_ops {
        void (*stop)(struct iwl_op_mode *op_mode);
        int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
                  struct iwl_device_cmd *cmd);
+       void (*napi_add)(struct iwl_op_mode *op_mode,
+                        struct napi_struct *napi,
+                        struct net_device *napi_dev,
+                        int (*poll)(struct napi_struct *, int),
+                        int weight);
        void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
        void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
        bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
@@ -180,7 +189,6 @@ static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode,
                                  struct iwl_rx_cmd_buffer *rxb,
                                  struct iwl_device_cmd *cmd)
 {
-       might_sleep();
        return op_mode->ops->rx(op_mode, rxb, cmd);
 }
 
@@ -249,4 +257,15 @@ static inline int iwl_op_mode_exit_d0i3(struct iwl_op_mode *op_mode)
        return op_mode->ops->exit_d0i3(op_mode);
 }
 
+static inline void iwl_op_mode_napi_add(struct iwl_op_mode *op_mode,
+                                       struct napi_struct *napi,
+                                       struct net_device *napi_dev,
+                                       int (*poll)(struct napi_struct *, int),
+                                       int weight)
+{
+       if (!op_mode->ops->napi_add)
+               return;
+       op_mode->ops->napi_add(op_mode, napi, napi_dev, poll, weight);
+}
+
 #endif /* __iwl_op_mode_h__ */
index b761ac4822a35b1e6a8b59f952655d39f1f0cb82..d4fb5cad07ea1d36c508c25ec83450a2e80e3807 100644 (file)
@@ -345,7 +345,6 @@ static int iwl_send_phy_db_cmd(struct iwl_phy_db *phy_db, u16 type,
        struct iwl_phy_db_cmd phy_db_cmd;
        struct iwl_host_cmd cmd = {
                .id = PHY_DB_CMD,
-               .flags = CMD_SYNC,
        };
 
        IWL_DEBUG_INFO(phy_db->trans,
@@ -393,13 +392,13 @@ static int iwl_phy_db_send_all_channel_groups(
                                          entry->data);
                if (err) {
                        IWL_ERR(phy_db->trans,
-                               "Can't SEND phy_db section %d (%d), err %d",
+                               "Can't SEND phy_db section %d (%d), err %d\n",
                                type, i, err);
                        return err;
                }
 
                IWL_DEBUG_INFO(phy_db->trans,
-                              "Sent PHY_DB HCMD, type = %d num = %d",
+                              "Sent PHY_DB HCMD, type = %d num = %d\n",
                               type, i);
        }
 
@@ -451,7 +450,7 @@ int iwl_send_phy_db_data(struct iwl_phy_db *phy_db)
                                                 IWL_NUM_PAPD_CH_GROUPS);
        if (err) {
                IWL_ERR(phy_db->trans,
-                       "Cannot send channel specific PAPD groups");
+                       "Cannot send channel specific PAPD groups\n");
                return err;
        }
 
@@ -461,7 +460,7 @@ int iwl_send_phy_db_data(struct iwl_phy_db *phy_db)
                                                 IWL_NUM_TXP_CH_GROUPS);
        if (err) {
                IWL_ERR(phy_db->trans,
-                       "Cannot send channel specific TX power groups");
+                       "Cannot send channel specific TX power groups\n");
                return err;
        }
 
index 5f657c501406cc995f7f8c065f26d9983ba43ffe..4997e27672b3ae22b4176df391a5bbb3ae9e1f4d 100644 (file)
 
 /* Device NMI register */
 #define DEVICE_SET_NMI_REG 0x00a01c30
+#define DEVICE_SET_NMI_VAL 0x1
+#define DEVICE_SET_NMI_8000B_REG 0x00a01c24
+#define DEVICE_SET_NMI_8000B_VAL 0x1000000
 
 /* Shared registers (0x0..0x3ff, via target indirect or periphery */
 #define SHR_BASE       0x00a10000
@@ -348,4 +351,12 @@ enum secure_load_status_reg {
 
 #define LMPM_SECURE_TIME_OUT   (100)
 
+/* Rx FIFO */
+#define RXF_SIZE_ADDR                  (0xa00c88)
+#define RXF_SIZE_BYTE_CND_POS          (7)
+#define RXF_SIZE_BYTE_CNT_MSK          (0x3ff << RXF_SIZE_BYTE_CND_POS)
+
+#define RXF_LD_FENCE_OFFSET_ADDR       (0xa00c10)
+#define RXF_FIFO_RD_FENCE_ADDR         (0xa00c0c)
+
 #endif                         /* __iwl_prph_h__ */
index 8cdb0dd618a6fdfcc8d57095e41974e6e22984ab..34d49e171fb4dae1f10928c931c26dc93a414e67 100644 (file)
@@ -189,10 +189,9 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
 /**
  * enum CMD_MODE - how to send the host commands ?
  *
- * @CMD_SYNC: The caller will be stalled until the fw responds to the command
  * @CMD_ASYNC: Return right away and don't wait for the response
- * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
- *     response. The caller needs to call iwl_free_resp when done.
+ * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
+ *     the response. The caller needs to call iwl_free_resp when done.
  * @CMD_HIGH_PRIO: The command is high priority - it goes to the front of the
  *     command queue, but after other high priority commands. valid only
  *     with CMD_ASYNC.
@@ -202,7 +201,6 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
  *     (i.e. mark it as non-idle).
  */
 enum CMD_MODE {
-       CMD_SYNC                = 0,
        CMD_ASYNC               = BIT(0),
        CMD_WANT_SKB            = BIT(1),
        CMD_SEND_IN_RFKILL      = BIT(2),
@@ -427,7 +425,7 @@ struct iwl_trans;
  * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
  *     If RFkill is asserted in the middle of a SYNC host command, it must
  *     return -ERFKILL straight away.
- *     May sleep only if CMD_SYNC is set
+ *     May sleep only if CMD_ASYNC is not set
  * @tx: send an skb
  *     Must be atomic
  * @reclaim: free packet until ssn. Returns a list of freed packets.
@@ -437,8 +435,7 @@ struct iwl_trans;
  *     this one. The op_mode must not configure the HCMD queue. May sleep.
  * @txq_disable: de-configure a Tx queue to send AMPDUs
  *     Must be atomic
- * @wait_tx_queue_empty: wait until all tx queues are empty
- *     May sleep
+ * @wait_tx_queue_empty: wait until tx queues are empty. May sleep.
  * @dbgfs_register: add the dbgfs files under this directory. Files will be
  *     automatically deleted.
  * @write8: write a u8 to a register at offset ofs from the BAR
@@ -464,6 +461,11 @@ struct iwl_trans;
  * @unref: release a reference previously taken with @ref. Note that
  *     initially the reference count is 1, making an initial @unref
  *     necessary to allow low power states.
+ * @dump_data: fill a data dump with debug data, maybe containing last
+ *     TX'ed commands and similar. When called with a NULL buffer and
+ *     zero buffer length, provide only the (estimated) required buffer
+ *     length. Return the used buffer length.
+ *     Note that the transport must fill in the proper file headers.
  */
 struct iwl_trans_ops {
 
@@ -471,6 +473,8 @@ struct iwl_trans_ops {
        void (*op_mode_leave)(struct iwl_trans *iwl_trans);
        int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
                        bool run_in_rfkill);
+       int (*update_sf)(struct iwl_trans *trans,
+                        struct iwl_sf_region *st_fwrd_space);
        void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
        void (*stop_device)(struct iwl_trans *trans);
 
@@ -490,7 +494,7 @@ struct iwl_trans_ops {
        void (*txq_disable)(struct iwl_trans *trans, int queue);
 
        int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
-       int (*wait_tx_queue_empty)(struct iwl_trans *trans);
+       int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm);
 
        void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
        void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
@@ -512,6 +516,10 @@ struct iwl_trans_ops {
                              u32 value);
        void (*ref)(struct iwl_trans *trans);
        void (*unref)(struct iwl_trans *trans);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       u32 (*dump_data)(struct iwl_trans *trans, void *buf, u32 buflen);
+#endif
 };
 
 /**
@@ -630,6 +638,17 @@ static inline int iwl_trans_start_fw(struct iwl_trans *trans,
        return trans->ops->start_fw(trans, fw, run_in_rfkill);
 }
 
+static inline int iwl_trans_update_sf(struct iwl_trans *trans,
+                                     struct iwl_sf_region *st_fwrd_space)
+{
+       might_sleep();
+
+       if (trans->ops->update_sf)
+               return trans->ops->update_sf(trans, st_fwrd_space);
+
+       return 0;
+}
+
 static inline void iwl_trans_stop_device(struct iwl_trans *trans)
 {
        might_sleep();
@@ -665,6 +684,16 @@ static inline void iwl_trans_unref(struct iwl_trans *trans)
                trans->ops->unref(trans);
 }
 
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+static inline u32 iwl_trans_dump_data(struct iwl_trans *trans,
+                                     void *buf, u32 buflen)
+{
+       if (!trans->ops->dump_data)
+               return 0;
+       return trans->ops->dump_data(trans, buf, buflen);
+}
+#endif
+
 static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
                                     struct iwl_host_cmd *cmd)
 {
@@ -678,7 +707,7 @@ static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
                return -EIO;
 
        if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) {
-               IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
+               IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
                return -EIO;
        }
 
@@ -720,7 +749,7 @@ static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
                return -EIO;
 
        if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
-               IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
+               IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
 
        return trans->ops->tx(trans, skb, dev_cmd, queue);
 }
@@ -729,7 +758,7 @@ static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
                                     int ssn, struct sk_buff_head *skbs)
 {
        if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
-               IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
+               IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
 
        trans->ops->reclaim(trans, queue, ssn, skbs);
 }
@@ -746,7 +775,7 @@ static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
        might_sleep();
 
        if (unlikely((trans->state != IWL_TRANS_FW_ALIVE)))
-               IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
+               IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
 
        trans->ops->txq_enable(trans, queue, fifo, sta_id, tid,
                                 frame_limit, ssn);
@@ -759,12 +788,13 @@ static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue,
                             IWL_MAX_TID_COUNT, IWL_FRAME_LIMIT, 0);
 }
 
-static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
+static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans,
+                                               u32 txq_bm)
 {
        if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
-               IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
+               IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
 
-       return trans->ops->wait_tx_queue_empty(trans);
+       return trans->ops->wait_tx_queue_empty(trans, txq_bm);
 }
 
 static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
index ccdd3b7c4cce38fb10caf66326e7c439f0c079ee..c30d7f64ec1e4e1c47a635e091c4e503c2ceaa83 100644 (file)
@@ -3,8 +3,9 @@ iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
 iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o sf.o
 iwlmvm-y += scan.o time-event.o rs.o
 iwlmvm-y += power.o coex.o
-iwlmvm-y += led.o tt.o offloading.o
+iwlmvm-y += tt.o offloading.o
 iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
+iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
 iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
 
 ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
index fa858d548d13c0bd794b98dc4da2053893b460dc..c8c3b38228f02f9768b780a7bdd31273f49e9541 100644 (file)
@@ -104,12 +104,9 @@ static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
 #define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD   (-65)
 #define BT_ANTENNA_COUPLING_THRESHOLD          (30)
 
-int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
+static int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
 {
-       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
-               return 0;
-
-       return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, CMD_SYNC,
+       return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, 0,
                                    sizeof(struct iwl_bt_coex_prio_tbl_cmd),
                                    &iwl_bt_prio_tbl);
 }
@@ -127,10 +124,10 @@ const u32 iwl_bt_cts_kill_msk[BT_KILL_MSK_MAX] = {
 };
 
 static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = {
-       cpu_to_le32(0xf0f0f0f0),
-       cpu_to_le32(0xc0c0c0c0),
-       cpu_to_le32(0xfcfcfcfc),
-       cpu_to_le32(0xff00ff00),
+       cpu_to_le32(0xf0f0f0f0), /* 50% */
+       cpu_to_le32(0xc0c0c0c0), /* 25% */
+       cpu_to_le32(0xfcfcfcfc), /* 75% */
+       cpu_to_le32(0xfefefefe), /* 87.5% */
 };
 
 static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
@@ -303,8 +300,8 @@ static const __le64 iwl_ci_mask[][3] = {
 };
 
 static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
-       cpu_to_le32(0x22002200),
-       cpu_to_le32(0x33113311),
+       cpu_to_le32(0x28412201),
+       cpu_to_le32(0x11118451),
 };
 
 struct corunning_block_luts {
@@ -568,13 +565,13 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
                .id = BT_CONFIG,
                .len = { sizeof(*bt_cmd), },
                .dataflags = { IWL_HCMD_DFL_NOCOPY, },
-               .flags = CMD_SYNC,
        };
        int ret;
        u32 flags;
 
-       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
-               return 0;
+       ret = iwl_send_bt_prio_tbl(mvm);
+       if (ret)
+               return ret;
 
        bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
        if (!bt_cmd)
@@ -582,10 +579,12 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
        cmd.data[0] = bt_cmd;
 
        bt_cmd->max_kill = 5;
-       bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD,
-       bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling,
-       bt_cmd->bt4_tx_tx_delta_freq_thr = 15,
-       bt_cmd->bt4_tx_rx_max_freq0 = 15,
+       bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD;
+       bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling;
+       bt_cmd->bt4_tx_tx_delta_freq_thr = 15;
+       bt_cmd->bt4_tx_rx_max_freq0 = 15;
+       bt_cmd->override_primary_lut = BT_COEX_INVALID_LUT;
+       bt_cmd->override_secondary_lut = BT_COEX_INVALID_LUT;
 
        flags = iwlwifi_mod_params.bt_coex_active ?
                        BT_COEX_NW : BT_COEX_DISABLE;
@@ -611,14 +610,14 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
                bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
 
        if (IWL_MVM_BT_COEX_CORUNNING) {
-               bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_CORUN_LUT_20 |
-                                                   BT_VALID_CORUN_LUT_40);
+               bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
+                                                    BT_VALID_CORUN_LUT_40);
                bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
        }
 
        if (IWL_MVM_BT_COEX_MPLUT) {
                bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
-               bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
+               bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
        }
 
        if (mvm->cfg->bt_shared_single_ant)
@@ -663,7 +662,6 @@ static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
                .data[0] = &bt_cmd,
                .len = { sizeof(*bt_cmd), },
                .dataflags = { IWL_HCMD_DFL_NOCOPY, },
-               .flags = CMD_SYNC,
        };
        int ret = 0;
 
@@ -717,7 +715,8 @@ static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
        return ret;
 }
 
-int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable)
+static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
+                                      bool enable)
 {
        struct iwl_bt_coex_cmd *bt_cmd;
        /* Send ASYNC since this can be sent from an atomic context */
@@ -735,8 +734,7 @@ int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable)
                return 0;
 
        /* nothing to do */
-       if (mvmsta->bt_reduced_txpower_dbg ||
-           mvmsta->bt_reduced_txpower == enable)
+       if (mvmsta->bt_reduced_txpower == enable)
                return 0;
 
        bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC);
@@ -803,23 +801,10 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
 
        switch (vif->type) {
        case NL80211_IFTYPE_STATION:
+               /* Count BSSes vifs */
+               data->num_bss_ifaces++;
                /* default smps_mode for BSS / P2P client is AUTOMATIC */
                smps_mode = IEEE80211_SMPS_AUTOMATIC;
-               data->num_bss_ifaces++;
-
-               /*
-                * Count unassoc BSSes, relax SMSP constraints
-                * and disable reduced Tx Power
-                */
-               if (!vif->bss_conf.assoc) {
-                       iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
-                                           smps_mode);
-                       if (iwl_mvm_bt_coex_reduced_txp(mvm,
-                                                       mvmvif->ap_sta_id,
-                                                       false))
-                               IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
-                       return;
-               }
                break;
        case NL80211_IFTYPE_AP:
                /* default smps_mode for AP / GO is OFF */
@@ -845,8 +830,12 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
                /* ... relax constraints and disable rssi events */
                iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
                                    smps_mode);
-               if (vif->type == NL80211_IFTYPE_STATION)
+               data->reduced_tx_power = false;
+               if (vif->type == NL80211_IFTYPE_STATION) {
+                       iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
+                                                   false);
                        iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
+               }
                return;
        }
 
@@ -857,6 +846,11 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
                smps_mode = vif->type == NL80211_IFTYPE_AP ?
                                IEEE80211_SMPS_OFF :
                                IEEE80211_SMPS_DYNAMIC;
+
+       /* relax SMPS contraints for next association */
+       if (!vif->bss_conf.assoc)
+               smps_mode = IEEE80211_SMPS_AUTOMATIC;
+
        IWL_DEBUG_COEX(data->mvm,
                       "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
                       mvmvif->id, data->notif->bt_status, bt_activity_grading,
@@ -903,22 +897,18 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
                /* if secondary is not NULL, it might be a GO */
                data->secondary = chanctx_conf;
 
-       /* don't reduce the Tx power if in loose scheme */
+       /*
+        * don't reduce the Tx power if one of these is true:
+        *  we are in LOOSE
+        *  single share antenna product
+        *  BT is active
+        *  we are associated
+        */
        if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
-           mvm->cfg->bt_shared_single_ant) {
-               data->reduced_tx_power = false;
-               iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
-               return;
-       }
-
-       /* reduced Txpower only if BT is on, so ...*/
-       if (!data->notif->bt_status) {
-               /* ... cancel reduced Tx power ... */
-               if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
-                       IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
+           mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc ||
+           !data->notif->bt_status) {
                data->reduced_tx_power = false;
-
-               /* ... and there is no need to get reports on RSSI any more. */
+               iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false);
                iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
                return;
        }
@@ -1022,9 +1012,9 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
 
        /* Don't spam the fw with the same command over and over */
        if (memcmp(&cmd, &mvm->last_bt_ci_cmd, sizeof(cmd))) {
-               if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, CMD_SYNC,
+               if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, 0,
                                         sizeof(cmd), &cmd))
-                       IWL_ERR(mvm, "Failed to send BT_CI cmd");
+                       IWL_ERR(mvm, "Failed to send BT_CI cmd\n");
                memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd));
        }
 
@@ -1039,7 +1029,6 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
                IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
 }
 
-/* upon association, the fw will send in BT Coex notification */
 int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
                             struct iwl_rx_cmd_buffer *rxb,
                             struct iwl_device_cmd *dev_cmd)
@@ -1215,6 +1204,17 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
        return iwl_get_coex_type(mvm, mvmsta->vif) == BT_COEX_TIGHT_LUT;
 }
 
+bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
+                                   enum ieee80211_band band)
+{
+       u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
+
+       if (band != IEEE80211_BAND_2GHZ)
+               return false;
+
+       return bt_activity >= BT_LOW_TRAFFIC;
+}
+
 u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
                           struct ieee80211_tx_info *info, u8 ac)
 {
@@ -1249,9 +1249,6 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
 
 void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
 {
-       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
-               return;
-
        iwl_mvm_bt_coex_notif_handle(mvm);
 }
 
@@ -1270,7 +1267,6 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
                .id = BT_CONFIG,
                .len = { sizeof(*bt_cmd), },
                .dataflags = { IWL_HCMD_DFL_NOCOPY, },
-               .flags = CMD_SYNC,
        };
 
        if (!IWL_MVM_BT_COEX_CORUNNING)
index e56f5a0edf855331a1411e76406a143176b5e9d5..645b3cfc29a5e5c0bcb10f6c9eaded6a1827e5ed 100644 (file)
@@ -193,8 +193,7 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
                        wkc.wep_key.key_offset = data->wep_key_idx;
                }
 
-               ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, CMD_SYNC,
-                                          sizeof(wkc), &wkc);
+               ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0, sizeof(wkc), &wkc);
                data->error = ret != 0;
 
                mvm->ptk_ivlen = key->iv_len;
@@ -341,7 +340,6 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
        struct iwl_host_cmd cmd = {
                .id = WOWLAN_PATTERNS,
                .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
-               .flags = CMD_SYNC,
        };
        int i, err;
 
@@ -518,7 +516,6 @@ static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
                .id = REMOTE_WAKE_CONFIG_CMD,
                .len = { sizeof(*cfg), },
                .dataflags = { IWL_HCMD_DFL_NOCOPY, },
-               .flags = CMD_SYNC,
        };
        int ret;
 
@@ -666,10 +663,8 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
        if (WARN_ON(!vif->bss_conf.assoc))
                return -EINVAL;
-       /* hack */
-       vif->bss_conf.assoc = false;
+
        ret = iwl_mvm_mac_ctxt_add(mvm, vif);
-       vif->bss_conf.assoc = true;
        if (ret)
                return ret;
 
@@ -705,7 +700,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                return ret;
        rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta);
 
-       ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
+       ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false);
        if (ret)
                return ret;
 
@@ -719,7 +714,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        for (i = 1; i < MAX_BINDINGS; i++)
                quota_cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID);
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, CMD_SYNC,
+       ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0,
                                   sizeof(quota_cmd), &quota_cmd);
        if (ret)
                IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
@@ -739,15 +734,13 @@ static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
        };
        struct iwl_host_cmd cmd = {
                .id = NON_QOS_TX_COUNTER_CMD,
-               .flags = CMD_SYNC | CMD_WANT_SKB,
+               .flags = CMD_WANT_SKB,
        };
        int err;
        u32 size;
 
-       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) {
-               cmd.data[0] = &query_cmd;
-               cmd.len[0] = sizeof(query_cmd);
-       }
+       cmd.data[0] = &query_cmd;
+       cmd.len[0] = sizeof(query_cmd);
 
        err = iwl_mvm_send_cmd(mvm, &cmd);
        if (err)
@@ -758,10 +751,8 @@ static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
                err = -EINVAL;
        } else {
                err = le16_to_cpup((__le16 *)cmd.resp_pkt->data);
-               /* new API returns next, not last-used seqno */
-               if (mvm->fw->ucode_capa.flags &
-                               IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
-                       err = (u16) (err - 0x10);
+               /* firmware returns next, not last-used seqno */
+               err = (u16) (err - 0x10);
        }
 
        iwl_free_resp(&cmd);
@@ -785,11 +776,7 @@ void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 
        mvmvif->seqno_valid = false;
 
-       if (!(mvm->fw->ucode_capa.flags &
-                       IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API))
-               return;
-
-       if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, CMD_SYNC,
+       if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, 0,
                                 sizeof(query_cmd), &query_cmd))
                IWL_ERR(mvm, "failed to set non-QoS seqno\n");
 }
@@ -804,7 +791,7 @@ iwl_mvm_send_wowlan_config_cmd(struct iwl_mvm *mvm,
        if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID)
                cmd_len = sizeof(*cmd);
 
-       return iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, CMD_SYNC,
+       return iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
                                    cmd_len, cmd);
 }
 
@@ -833,7 +820,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
        };
        struct iwl_host_cmd d3_cfg_cmd = {
                .id = D3_CONFIG_CMD,
-               .flags = CMD_SYNC | CMD_WANT_SKB,
+               .flags = CMD_WANT_SKB,
                .data[0] = &d3_cfg_cmd_data,
                .len[0] = sizeof(d3_cfg_cmd_data),
        };
@@ -983,7 +970,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
                if (key_data.use_rsc_tsc) {
                        struct iwl_host_cmd rsc_tsc_cmd = {
                                .id = WOWLAN_TSC_RSC_PARAM,
-                               .flags = CMD_SYNC,
                                .data[0] = key_data.rsc_tsc,
                                .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
                                .len[0] = sizeof(*key_data.rsc_tsc),
@@ -997,7 +983,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
                if (key_data.use_tkip) {
                        ret = iwl_mvm_send_cmd_pdu(mvm,
                                                   WOWLAN_TKIP_PARAM,
-                                                  CMD_SYNC, sizeof(tkip_cmd),
+                                                  0, sizeof(tkip_cmd),
                                                   &tkip_cmd);
                        if (ret)
                                goto out;
@@ -1014,8 +1000,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
                        kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
 
                        ret = iwl_mvm_send_cmd_pdu(mvm,
-                                                  WOWLAN_KEK_KCK_MATERIAL,
-                                                  CMD_SYNC,
+                                                  WOWLAN_KEK_KCK_MATERIAL, 0,
                                                   sizeof(kek_kck_cmd),
                                                   &kek_kck_cmd);
                        if (ret)
@@ -1031,7 +1016,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
        if (ret)
                goto out;
 
-       ret = iwl_mvm_send_proto_offload(mvm, vif, false, CMD_SYNC);
+       ret = iwl_mvm_send_proto_offload(mvm, vif, false, 0);
        if (ret)
                goto out;
 
@@ -1043,7 +1028,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
        if (ret)
                goto out;
 
-       ret = iwl_mvm_power_update_mac(mvm, vif);
+       ret = iwl_mvm_power_update_mac(mvm);
        if (ret)
                goto out;
 
@@ -1082,6 +1067,15 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
 
 int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
 {
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+       if (iwl_mvm_is_d0i3_supported(mvm)) {
+               mutex_lock(&mvm->d0i3_suspend_mutex);
+               __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
+               mutex_unlock(&mvm->d0i3_suspend_mutex);
+               return 0;
+       }
+
        return __iwl_mvm_suspend(hw, wowlan, false);
 }
 
@@ -1277,7 +1271,7 @@ static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs,
 }
 
 static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
-                                  struct iwl_wowlan_status_v6 *status)
+                                  struct iwl_wowlan_status *status)
 {
        union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc;
 
@@ -1294,7 +1288,7 @@ static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
 }
 
 struct iwl_mvm_d3_gtk_iter_data {
-       struct iwl_wowlan_status_v6 *status;
+       struct iwl_wowlan_status *status;
        void *last_gtk;
        u32 cipher;
        bool find_phase, unhandled_cipher;
@@ -1370,7 +1364,7 @@ static void iwl_mvm_d3_update_gtks(struct ieee80211_hw *hw,
 
 static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
                                          struct ieee80211_vif *vif,
-                                         struct iwl_wowlan_status_v6 *status)
+                                         struct iwl_wowlan_status *status)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm_d3_gtk_iter_data gtkdata = {
@@ -1465,10 +1459,10 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
        } err_info;
        struct iwl_host_cmd cmd = {
                .id = WOWLAN_GET_STATUSES,
-               .flags = CMD_SYNC | CMD_WANT_SKB,
+               .flags = CMD_WANT_SKB,
        };
        struct iwl_wowlan_status_data status;
-       struct iwl_wowlan_status_v6 *status_v6;
+       struct iwl_wowlan_status *fw_status;
        int ret, len, status_size, i;
        bool keep;
        struct ieee80211_sta *ap_sta;
@@ -1491,7 +1485,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
        }
 
        /* only for tracing for now */
-       ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, CMD_SYNC, 0, NULL);
+       ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL);
        if (ret)
                IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
 
@@ -1505,10 +1499,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
        if (!cmd.resp_pkt)
                goto out_unlock;
 
-       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
-               status_size = sizeof(struct iwl_wowlan_status_v6);
-       else
-               status_size = sizeof(struct iwl_wowlan_status_v4);
+       status_size = sizeof(*fw_status);
 
        len = iwl_rx_packet_payload_len(cmd.resp_pkt);
        if (len < status_size) {
@@ -1516,35 +1507,18 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
                goto out_free_resp;
        }
 
-       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) {
-               status_v6 = (void *)cmd.resp_pkt->data;
-
-               status.pattern_number = le16_to_cpu(status_v6->pattern_number);
-               for (i = 0; i < 8; i++)
-                       status.qos_seq_ctr[i] =
-                               le16_to_cpu(status_v6->qos_seq_ctr[i]);
-               status.wakeup_reasons = le32_to_cpu(status_v6->wakeup_reasons);
-               status.wake_packet_length =
-                       le32_to_cpu(status_v6->wake_packet_length);
-               status.wake_packet_bufsize =
-                       le32_to_cpu(status_v6->wake_packet_bufsize);
-               status.wake_packet = status_v6->wake_packet;
-       } else {
-               struct iwl_wowlan_status_v4 *status_v4;
-               status_v6 = NULL;
-               status_v4 = (void *)cmd.resp_pkt->data;
-
-               status.pattern_number = le16_to_cpu(status_v4->pattern_number);
-               for (i = 0; i < 8; i++)
-                       status.qos_seq_ctr[i] =
-                               le16_to_cpu(status_v4->qos_seq_ctr[i]);
-               status.wakeup_reasons = le32_to_cpu(status_v4->wakeup_reasons);
-               status.wake_packet_length =
-                       le32_to_cpu(status_v4->wake_packet_length);
-               status.wake_packet_bufsize =
-                       le32_to_cpu(status_v4->wake_packet_bufsize);
-               status.wake_packet = status_v4->wake_packet;
-       }
+       fw_status = (void *)cmd.resp_pkt->data;
+
+       status.pattern_number = le16_to_cpu(fw_status->pattern_number);
+       for (i = 0; i < 8; i++)
+               status.qos_seq_ctr[i] =
+                       le16_to_cpu(fw_status->qos_seq_ctr[i]);
+       status.wakeup_reasons = le32_to_cpu(fw_status->wakeup_reasons);
+       status.wake_packet_length =
+               le32_to_cpu(fw_status->wake_packet_length);
+       status.wake_packet_bufsize =
+               le32_to_cpu(fw_status->wake_packet_bufsize);
+       status.wake_packet = fw_status->wake_packet;
 
        if (len != status_size + ALIGN(status.wake_packet_bufsize, 4)) {
                IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
@@ -1571,7 +1545,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
 
        iwl_mvm_report_wakeup_reasons(mvm, vif, &status);
 
-       keep = iwl_mvm_setup_connection_keep(mvm, vif, status_v6);
+       keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status);
 
        iwl_free_resp(&cmd);
        return keep;
@@ -1674,6 +1648,19 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 
+       if (iwl_mvm_is_d0i3_supported(mvm)) {
+               bool exit_now;
+
+               mutex_lock(&mvm->d0i3_suspend_mutex);
+               __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
+               exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
+                                               &mvm->d0i3_suspend_flags);
+               mutex_unlock(&mvm->d0i3_suspend_mutex);
+               if (exit_now)
+                       _iwl_mvm_exit_d0i3(mvm);
+               return 0;
+       }
+
        return __iwl_mvm_resume(mvm, false);
 }
 
index 9b59e1d7ae71ea888973992cb88363ba2371fdad..2e90ff795c13212d6d8ca7be35df935a907d6b3c 100644 (file)
@@ -103,10 +103,6 @@ static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
                IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val);
                dbgfs_pm->tx_data_timeout = val;
                break;
-       case MVM_DEBUGFS_PM_DISABLE_POWER_OFF:
-               IWL_DEBUG_POWER(mvm, "disable_power_off=%d\n", val);
-               dbgfs_pm->disable_power_off = val;
-               break;
        case MVM_DEBUGFS_PM_LPRX_ENA:
                IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled");
                dbgfs_pm->lprx_ena = val;
@@ -154,12 +150,6 @@ static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
                if (sscanf(buf + 16, "%d", &val) != 1)
                        return -EINVAL;
                param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
-       } else if (!strncmp("disable_power_off=", buf, 18) &&
-                  !(mvm->fw->ucode_capa.flags &
-                    IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)) {
-               if (sscanf(buf + 18, "%d", &val) != 1)
-                       return -EINVAL;
-               param = MVM_DEBUGFS_PM_DISABLE_POWER_OFF;
        } else if (!strncmp("lprx=", buf, 5)) {
                if (sscanf(buf + 5, "%d", &val) != 1)
                        return -EINVAL;
@@ -185,7 +175,7 @@ static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
 
        mutex_lock(&mvm->mutex);
        iwl_dbgfs_update_pm(mvm, vif, param, val);
-       ret = iwl_mvm_power_update_mac(mvm, vif);
+       ret = iwl_mvm_power_update_mac(mvm);
        mutex_unlock(&mvm->mutex);
 
        return ret ?: count;
@@ -272,10 +262,9 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
                        struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
 
                        pos += scnprintf(buf+pos, bufsz-pos,
-                                        "ap_sta_id %d - reduced Tx power %d force %d\n",
+                                        "ap_sta_id %d - reduced Tx power %d\n",
                                         ap_sta_id,
-                                        mvm_sta->bt_reduced_txpower,
-                                        mvm_sta->bt_reduced_txpower_dbg);
+                                        mvm_sta->bt_reduced_txpower);
                }
        }
 
@@ -293,41 +282,6 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
        return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
-static ssize_t iwl_dbgfs_reduced_txp_write(struct ieee80211_vif *vif,
-                                          char *buf, size_t count,
-                                          loff_t *ppos)
-{
-       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       struct iwl_mvm *mvm = mvmvif->mvm;
-       struct iwl_mvm_sta *mvmsta;
-       bool reduced_tx_power;
-       int ret;
-
-       if (mvmvif->ap_sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
-               return -ENOTCONN;
-
-       if (strtobool(buf, &reduced_tx_power) != 0)
-               return -EINVAL;
-
-       mutex_lock(&mvm->mutex);
-
-       mvmsta = iwl_mvm_sta_from_staid_protected(mvm, mvmvif->ap_sta_id);
-       if (IS_ERR_OR_NULL(mvmsta)) {
-               mutex_unlock(&mvm->mutex);
-               return -ENOTCONN;
-       }
-
-       mvmsta->bt_reduced_txpower_dbg = false;
-       ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
-                                         reduced_tx_power);
-       if (!ret)
-               mvmsta->bt_reduced_txpower_dbg = true;
-
-       mutex_unlock(&mvm->mutex);
-
-       return ret ? : count;
-}
-
 static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
                                enum iwl_dbgfs_bf_mask param, int value)
 {
@@ -462,9 +416,9 @@ static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif *vif, char *buf,
        mutex_lock(&mvm->mutex);
        iwl_dbgfs_update_bf(vif, param, value);
        if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value)
-               ret = iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC);
+               ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
        else
-               ret = iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC);
+               ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
        mutex_unlock(&mvm->mutex);
 
        return ret ?: count;
@@ -568,7 +522,6 @@ MVM_DEBUGFS_READ_FILE_OPS(mac_params);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
-MVM_DEBUGFS_WRITE_FILE_OPS(reduced_txp, 10);
 
 void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 {
@@ -592,8 +545,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                return;
        }
 
-       if ((mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT) &&
-           iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
+       if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
            ((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
             (vif->type == NL80211_IFTYPE_STATION && vif->p2p &&
              mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)))
@@ -601,7 +553,6 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                                         S_IRUSR);
 
        MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR);
-       MVM_DEBUGFS_ADD_FILE_VIF(reduced_txp, mvmvif->dbgfs_dir, S_IWUSR);
        MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir,
                                 S_IRUSR | S_IWUSR);
 
index 1b52deea60812e4f6ac42c94412b1ecf518f44f7..29ca72695eaa60e0f53121dd45f1d080cdefba1d 100644 (file)
@@ -65,9 +65,8 @@
 #include "mvm.h"
 #include "sta.h"
 #include "iwl-io.h"
-#include "iwl-prph.h"
 #include "debugfs.h"
-#include "fw-error-dump.h"
+#include "iwl-fw-error-dump.h"
 
 static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
                                        size_t count, loff_t *ppos)
@@ -136,9 +135,6 @@ static int iwl_dbgfs_fw_error_dump_open(struct inode *inode, struct file *file)
 
        file->private_data = mvm->fw_error_dump;
        mvm->fw_error_dump = NULL;
-       kfree(mvm->fw_error_sram);
-       mvm->fw_error_sram = NULL;
-       mvm->fw_error_sram_len = 0;
        ret = 0;
 
 out:
@@ -684,7 +680,7 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
                mvm->restart_fw++;
 
        /* take the return value to make compiler happy - it will fail anyway */
-       ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, CMD_SYNC, 0, NULL);
+       ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, 0, 0, NULL);
 
        mutex_unlock(&mvm->mutex);
 
@@ -694,7 +690,7 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
 static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf,
                                      size_t count, loff_t *ppos)
 {
-       iwl_write_prph(mvm->trans, DEVICE_SET_NMI_REG, 1);
+       iwl_force_nmi(mvm->trans);
 
        return count;
 }
@@ -841,7 +837,7 @@ static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf,
        /* send updated bcast filtering configuration */
        if (mvm->dbgfs_bcast_filtering.override &&
            iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
-               err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC,
+               err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
                                           sizeof(cmd), &cmd);
        mutex_unlock(&mvm->mutex);
 
@@ -913,7 +909,7 @@ static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm,
        /* send updated bcast filtering configuration */
        if (mvm->dbgfs_bcast_filtering.override &&
            iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
-               err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC,
+               err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
                                           sizeof(cmd), &cmd);
        mutex_unlock(&mvm->mutex);
 
@@ -1004,6 +1000,7 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
        PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT);
        PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS);
        PRINT_MVM_REF(IWL_MVM_REF_USER);
+       PRINT_MVM_REF(IWL_MVM_REF_EXIT_WORK);
 
        return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
@@ -1108,9 +1105,9 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
 
 static const struct file_operations iwl_dbgfs_fw_error_dump_ops = {
-        .open = iwl_dbgfs_fw_error_dump_open,
-        .read = iwl_dbgfs_fw_error_dump_read,
-        .release = iwl_dbgfs_fw_error_dump_release,
+       .open = iwl_dbgfs_fw_error_dump_open,
+       .read = iwl_dbgfs_fw_error_dump_read,
+       .release = iwl_dbgfs_fw_error_dump_release,
 };
 
 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
@@ -1138,9 +1135,8 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
        MVM_DEBUGFS_ADD_FILE(fw_error_dump, dbgfs_dir, S_IRUSR);
        MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
        MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR);
-       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)
-               MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir,
-                                    S_IRUSR | S_IWUSR);
+       MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir,
+                            S_IRUSR | S_IWUSR);
        MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR);
        MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, S_IRUSR);
        MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
index 21877e5966a8093d6780d3297944c1a214dc55e3..5fe82c29c8ad07bcb7bab43726a4e09b136d8b53 100644 (file)
@@ -141,7 +141,8 @@ enum iwl_bt_coex_lut_type {
        BT_COEX_TX_DIS_LUT,
 
        BT_COEX_MAX_LUT,
-};
+       BT_COEX_INVALID_LUT = 0xff,
+}; /* BT_COEX_DECISION_LUT_INDEX_API_E_VER_1 */
 
 #define BT_COEX_LUT_SIZE (12)
 #define BT_COEX_CORUN_LUT_SIZE (32)
@@ -154,19 +155,23 @@ enum iwl_bt_coex_lut_type {
  * @flags:&enum iwl_bt_coex_flags
  * @max_kill:
  * @bt_reduced_tx_power: enum %iwl_bt_reduced_tx_power
- * @bt4_antenna_isolation:
- * @bt4_antenna_isolation_thr:
- * @bt4_tx_tx_delta_freq_thr:
- * @bt4_tx_rx_max_freq0:
- * @bt_prio_boost:
+ * @override_primary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
+ *     should be set by default
+ * @override_secondary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
+ *     should be set by default
+ * @bt4_antenna_isolation: antenna isolation
+ * @bt4_antenna_isolation_thr: antenna threshold value
+ * @bt4_tx_tx_delta_freq_thr: TxTx delta frequency
+ * @bt4_tx_rx_max_freq0: TxRx max frequency
+ * @bt_prio_boost: BT priority boost registers
  * @wifi_tx_prio_boost: SW boost of wifi tx priority
  * @wifi_rx_prio_boost: SW boost of wifi rx priority
- * @kill_ack_msk:
- * @kill_cts_msk:
- * @decision_lut:
- * @bt4_multiprio_lut:
- * @bt4_corun_lut20:
- * @bt4_corun_lut40:
+ * @kill_ack_msk: kill ACK mask. 1 - Tx ACK, 0 - kill Tx of ACK.
+ * @kill_cts_msk: kill CTS mask. 1 - Tx CTS, 0 - kill Tx of CTS.
+ * @decision_lut: PTA decision LUT, per Prio-Ch
+ * @bt4_multiprio_lut: multi priority LUT configuration
+ * @bt4_corun_lut20: co-running 20 MHz LUT configuration
+ * @bt4_corun_lut40: co-running 40 MHz LUT configuration
  * @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk
  *
  * The structure is used for the BT_COEX command.
@@ -175,7 +180,8 @@ struct iwl_bt_coex_cmd {
        __le32 flags;
        u8 max_kill;
        u8 bt_reduced_tx_power;
-       u8 reserved[2];
+       u8 override_primary_lut;
+       u8 override_secondary_lut;
 
        u8 bt4_antenna_isolation;
        u8 bt4_antenna_isolation_thr;
@@ -194,7 +200,7 @@ struct iwl_bt_coex_cmd {
        __le32 bt4_corun_lut40[BT_COEX_CORUN_LUT_SIZE];
 
        __le32 valid_bit_msk;
-} __packed; /* BT_COEX_CMD_API_S_VER_3 */
+} __packed; /* BT_COEX_CMD_API_S_VER_5 */
 
 /**
  * struct iwl_bt_coex_ci_cmd - bt coex channel inhibition command
@@ -282,7 +288,7 @@ enum iwl_bt_activity_grading {
        BT_ON_NO_CONNECTION     = 1,
        BT_LOW_TRAFFIC          = 2,
        BT_HIGH_TRAFFIC         = 3,
-};
+}; /* BT_COEX_BT_ACTIVITY_GRADING_API_E_VER_1 */
 
 /**
  * struct iwl_bt_coex_profile_notif - notification about BT coex
@@ -310,7 +316,7 @@ struct iwl_bt_coex_profile_notif {
        __le32 primary_ch_lut;
        __le32 secondary_ch_lut;
        __le32 bt_activity_grading;
-} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_2 */
+} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_3 */
 
 enum iwl_bt_coex_prio_table_event {
        BT_COEX_PRIO_TBL_EVT_INIT_CALIB1                = 0,
index 10fcc1a79ebddf3087d7de7c2c29389849a425fa..13696fe419b778c68c9d72d7a289a3dd3c453b39 100644 (file)
@@ -345,21 +345,6 @@ enum iwl_wowlan_wakeup_reason {
        IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET             = BIT(12),
 }; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
 
-struct iwl_wowlan_status_v4 {
-       __le64 replay_ctr;
-       __le16 pattern_number;
-       __le16 non_qos_seq_ctr;
-       __le16 qos_seq_ctr[8];
-       __le32 wakeup_reasons;
-       __le32 rekey_status;
-       __le32 num_of_gtk_rekeys;
-       __le32 transmitted_ndps;
-       __le32 received_beacons;
-       __le32 wake_packet_length;
-       __le32 wake_packet_bufsize;
-       u8 wake_packet[]; /* can be truncated from _length to _bufsize */
-} __packed; /* WOWLAN_STATUSES_API_S_VER_4 */
-
 struct iwl_wowlan_gtk_status {
        u8 key_index;
        u8 reserved[3];
@@ -368,7 +353,7 @@ struct iwl_wowlan_gtk_status {
        struct iwl_wowlan_rsc_tsc_params_cmd rsc;
 } __packed;
 
-struct iwl_wowlan_status_v6 {
+struct iwl_wowlan_status {
        struct iwl_wowlan_gtk_status gtk;
        __le64 replay_ctr;
        __le16 pattern_number;
index 39148b5bb33262596e1dea348c1ae32c9a2c9166..8bb5b94bf9639689fa6445cd046f97eccbcec834 100644 (file)
@@ -334,7 +334,7 @@ enum {
  */
 struct iwl_lq_cmd {
        u8 sta_id;
-       u8 reserved1;
+       u8 reduced_tpc;
        u16 control;
        /* LINK_QUAL_GENERAL_PARAMS_API_S_VER_1 */
        u8 flags;
index 9426905de6b283dc0230cf51d5a694478da7797a..6959fda3fe09d09e34d5fe19c7ded403fba79c37 100644 (file)
@@ -169,8 +169,12 @@ enum iwl_scan_type {
        SCAN_TYPE_DISCOVERY_FORCED      = 6,
 }; /* SCAN_ACTIVITY_TYPE_E_VER_1 */
 
-/* Maximal number of channels to scan */
-#define MAX_NUM_SCAN_CHANNELS 0x24
+/**
+ * Maximal number of channels to scan
+ * it should be equal to:
+ * max(IWL_NUM_CHANNELS, IWL_NUM_CHANNELS_FAMILY_8000)
+ */
+#define MAX_NUM_SCAN_CHANNELS 50
 
 /**
  * struct iwl_scan_cmd - scan request command
@@ -183,9 +187,9 @@ enum iwl_scan_type {
  *     this number of packets were received (typically 1)
  * @passive2active: is auto switching from passive to active during scan allowed
  * @rxchain_sel_flags: RXON_RX_CHAIN_*
- * @max_out_time: in usecs, max out of serving channel time
+ * @max_out_time: in TUs, max out of serving channel time
  * @suspend_time: how long to pause scan when returning to service channel:
- *     bits 0-19: beacon interal in usecs (suspend before executing)
+ *     bits 0-19: beacon interal in TUs (suspend before executing)
  *     bits 20-23: reserved
  *     bits 24-31: number of beacons (suspend between channels)
  * @rxon_flags: RXON_FLG_*
@@ -383,8 +387,8 @@ enum scan_framework_client {
  * @quiet_plcp_th:     quiet channel num of packets threshold
  * @good_CRC_th:       passive to active promotion threshold
  * @rx_chain:          RXON rx chain.
- * @max_out_time:      max uSec to be out of assoceated channel
- * @suspend_time:      pause scan this long when returning to service channel
+ * @max_out_time:      max TUs to be out of assoceated channel
+ * @suspend_time:      pause scan this TUs when returning to service channel
  * @flags:             RXON flags
  * @filter_flags:      RXONfilter
  * @tx_cmd:            tx command for active scan; for 2GHz and for 5GHz.
@@ -534,13 +538,16 @@ struct iwl_scan_offload_schedule {
  *
  * IWL_SCAN_OFFLOAD_FLAG_PASS_ALL: pass all results - no filtering.
  * IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL: add cached channels to partial scan.
- * IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN: use energy based scan before partial scan
- *     on A band.
+ * IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE: EBS duration is 100mSec - typical
+ *     beacon period. Finding channel activity in this mode is not guaranteed.
+ * IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE: EBS duration is 200mSec.
+ *     Assuming beacon period is 100ms finding channel activity is guaranteed.
  */
 enum iwl_scan_offload_flags {
        IWL_SCAN_OFFLOAD_FLAG_PASS_ALL          = BIT(0),
        IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL    = BIT(2),
-       IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN       = BIT(3),
+       IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE    = BIT(5),
+       IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE = BIT(6),
 };
 
 /**
@@ -563,17 +570,24 @@ enum iwl_scan_offload_compleate_status {
        IWL_SCAN_OFFLOAD_ABORTED        = 2,
 };
 
+enum iwl_scan_ebs_status {
+       IWL_SCAN_EBS_SUCCESS,
+       IWL_SCAN_EBS_FAILED,
+       IWL_SCAN_EBS_CHAN_NOT_FOUND,
+};
+
 /**
  * iwl_scan_offload_complete - SCAN_OFFLOAD_COMPLETE_NTF_API_S_VER_1
  * @last_schedule_line:                last schedule line executed (fast or regular)
  * @last_schedule_iteration:   last scan iteration executed before scan abort
  * @status:                    enum iwl_scan_offload_compleate_status
+ * @ebs_status: last EBS status, see IWL_SCAN_EBS_*
  */
 struct iwl_scan_offload_complete {
        u8 last_schedule_line;
        u8 last_schedule_iteration;
        u8 status;
-       u8 reserved;
+       u8 ebs_status;
 } __packed;
 
 /**
index d636478672626e9436c12dc2313d3288d94303ed..39cebee8016feaab62f005e5e843447784594429 100644 (file)
@@ -255,22 +255,19 @@ struct iwl_mvm_keyinfo {
 } __packed;
 
 /**
- * struct iwl_mvm_add_sta_cmd_v5 - Add/modify a station in the fw's sta table.
+ * struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table.
  * ( REPLY_ADD_STA = 0x18 )
  * @add_modify: 1: modify existing, 0: add new station
- * @unicast_tx_key_id: unicast tx key id. Relevant only when unicast key sent
- * @multicast_tx_key_id: multicast tx key id. Relevant only when multicast key
- *     sent
+ * @awake_acs:
+ * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
+ *     AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
  * @mac_id_n_color: the Mac context this station belongs to
  * @addr[ETH_ALEN]: station's MAC address
  * @sta_id: index of station in uCode's station table
  * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave
  *     alone. 1 - modify, 0 - don't change.
- * @key: look at %iwl_mvm_keyinfo
  * @station_flags: look at %iwl_sta_flags
  * @station_flags_msk: what of %station_flags have changed
- * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
- *     AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
  * @add_immediate_ba_tid: tid for which to add block-ack support (Rx)
  *     Set %STA_MODIFY_ADD_BA_TID to use this field, and also set
  *     add_immediate_ba_ssn.
@@ -294,40 +291,7 @@ struct iwl_mvm_keyinfo {
  * ADD_STA sets up the table entry for one station, either creating a new
  * entry, or modifying a pre-existing one.
  */
-struct iwl_mvm_add_sta_cmd_v5 {
-       u8 add_modify;
-       u8 unicast_tx_key_id;
-       u8 multicast_tx_key_id;
-       u8 reserved1;
-       __le32 mac_id_n_color;
-       u8 addr[ETH_ALEN];
-       __le16 reserved2;
-       u8 sta_id;
-       u8 modify_mask;
-       __le16 reserved3;
-       struct iwl_mvm_keyinfo key;
-       __le32 station_flags;
-       __le32 station_flags_msk;
-       __le16 tid_disable_tx;
-       __le16 reserved4;
-       u8 add_immediate_ba_tid;
-       u8 remove_immediate_ba_tid;
-       __le16 add_immediate_ba_ssn;
-       __le16 sleep_tx_count;
-       __le16 sleep_state_flags;
-       __le16 assoc_id;
-       __le16 beamform_flags;
-       __le32 tfd_queue_msk;
-} __packed; /* ADD_STA_CMD_API_S_VER_5 */
-
-/**
- * struct iwl_mvm_add_sta_cmd_v7 - Add / modify a station
- * VER_7 of this command is quite similar to VER_5 except
- * exclusion of all fields related to the security key installation.
- * It only differs from VER_6 by the "awake_acs" field that is
- * reserved and ignored in VER_6.
- */
-struct iwl_mvm_add_sta_cmd_v7 {
+struct iwl_mvm_add_sta_cmd {
        u8 add_modify;
        u8 awake_acs;
        __le16 tid_disable_tx;
index 8e122f3a7a74e8a97914a13d9821a7229bc7c2bc..6cc5f52b807f1bc343ea632674e215423c3abb1d 100644 (file)
@@ -482,7 +482,8 @@ struct iwl_mvm_tx_resp {
        u8 pa_integ_res_b[3];
        u8 pa_integ_res_c[3];
        __le16 measurement_req_id;
-       __le16 reserved;
+       u8 reduced_tpc;
+       u8 reserved;
 
        __le32 tfd_info;
        __le16 seq_ctl;
index 6e75b52588de3ca68a44c41ca339df1e57eae37f..309a9b9a94fecc26918f967e7b9e7a01374d43b3 100644 (file)
@@ -71,6 +71,7 @@
 #include "fw-api-power.h"
 #include "fw-api-d3.h"
 #include "fw-api-coex.h"
+#include "fw-api-scan.h"
 
 /* maximal number of Tx queues in any platform */
 #define IWL_MVM_MAX_QUEUES     20
@@ -604,52 +605,7 @@ enum {
        TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7),
 }; /* MAC_EVENT_ACTION_API_E_VER_2 */
 
-
-/**
- * struct iwl_time_event_cmd_api_v1 - configuring Time Events
- * with struct MAC_TIME_EVENT_DATA_API_S_VER_1 (see also
- * with version 2. determined by IWL_UCODE_TLV_FLAGS)
- * ( TIME_EVENT_CMD = 0x29 )
- * @id_and_color: ID and color of the relevant MAC
- * @action: action to perform, one of FW_CTXT_ACTION_*
- * @id: this field has two meanings, depending on the action:
- *     If the action is ADD, then it means the type of event to add.
- *     For all other actions it is the unique event ID assigned when the
- *     event was added by the FW.
- * @apply_time: When to start the Time Event (in GP2)
- * @max_delay: maximum delay to event's start (apply time), in TU
- * @depends_on: the unique ID of the event we depend on (if any)
- * @interval: interval between repetitions, in TU
- * @interval_reciprocal: 2^32 / interval
- * @duration: duration of event in TU
- * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS
- * @dep_policy: one of TE_V1_INDEPENDENT, TE_V1_DEP_OTHER, TE_V1_DEP_TSF
- *     and TE_V1_EVENT_SOCIOPATHIC
- * @is_present: 0 or 1, are we present or absent during the Time Event
- * @max_frags: maximal number of fragments the Time Event can be divided to
- * @notify: notifications using TE_V1_NOTIF_* (whom to notify when)
- */
-struct iwl_time_event_cmd_v1 {
-       /* COMMON_INDEX_HDR_API_S_VER_1 */
-       __le32 id_and_color;
-       __le32 action;
-       __le32 id;
-       /* MAC_TIME_EVENT_DATA_API_S_VER_1 */
-       __le32 apply_time;
-       __le32 max_delay;
-       __le32 dep_policy;
-       __le32 depends_on;
-       __le32 is_present;
-       __le32 max_frags;
-       __le32 interval;
-       __le32 interval_reciprocal;
-       __le32 duration;
-       __le32 repeat;
-       __le32 notify;
-} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_1 */
-
-
-/* Time event - defines for command API v2 */
+/* Time event - defines for command API */
 
 /*
  * @TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed.
@@ -680,7 +636,7 @@ enum {
 #define TE_V2_PLACEMENT_POS    12
 #define TE_V2_ABSENCE_POS      15
 
-/* Time event policy values (for time event cmd api v2)
+/* Time event policy values
  * A notification (both event and fragment) includes a status indicating weather
  * the FW was able to schedule the event or not. For fragment start/end
  * notification the status is always success. There is no start/end fragment
@@ -727,7 +683,7 @@ enum {
 };
 
 /**
- * struct iwl_time_event_cmd_api_v2 - configuring Time Events
+ * struct iwl_time_event_cmd_api - configuring Time Events
  * with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also
  * with version 1. determined by IWL_UCODE_TLV_FLAGS)
  * ( TIME_EVENT_CMD = 0x29 )
@@ -750,7 +706,7 @@ enum {
  *     TE_EVENT_SOCIOPATHIC
  *     using TE_ABSENCE and using TE_NOTIF_*
  */
-struct iwl_time_event_cmd_v2 {
+struct iwl_time_event_cmd {
        /* COMMON_INDEX_HDR_API_S_VER_1 */
        __le32 id_and_color;
        __le32 action;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h b/drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h
deleted file mode 100644 (file)
index 58c8941..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-#ifndef __fw_error_dump_h__
-#define __fw_error_dump_h__
-
-#include <linux/types.h>
-
-#define IWL_FW_ERROR_DUMP_BARKER       0x14789632
-
-/**
- * enum iwl_fw_error_dump_type - types of data in the dump file
- * @IWL_FW_ERROR_DUMP_SRAM:
- * @IWL_FW_ERROR_DUMP_REG:
- */
-enum iwl_fw_error_dump_type {
-       IWL_FW_ERROR_DUMP_SRAM = 0,
-       IWL_FW_ERROR_DUMP_REG = 1,
-
-       IWL_FW_ERROR_DUMP_MAX,
-};
-
-/**
- * struct iwl_fw_error_dump_data - data for one type
- * @type: %enum iwl_fw_error_dump_type
- * @len: the length starting from %data - must be a multiplier of 4.
- * @data: the data itself padded to be a multiplier of 4.
- */
-struct iwl_fw_error_dump_data {
-       __le32 type;
-       __le32 len;
-       __u8 data[];
-} __packed __aligned(4);
-
-/**
- * struct iwl_fw_error_dump_file - the layout of the header of the file
- * @barker: must be %IWL_FW_ERROR_DUMP_BARKER
- * @file_len: the length of all the file starting from %barker
- * @data: array of %struct iwl_fw_error_dump_data
- */
-struct iwl_fw_error_dump_file {
-       __le32 barker;
-       __le32 file_len;
-       u8 data[0];
-} __packed __aligned(4);
-
-#endif /* __fw_error_dump_h__ */
index 7ce20062f32d443be34fe87865d91afd71a0a014..883e702152d5289163f48f7464cf630f6795ec2f 100644 (file)
@@ -99,7 +99,7 @@ static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
        };
 
        IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant);
-       return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, CMD_SYNC,
+       return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0,
                                    sizeof(tx_ant_cmd), &tx_ant_cmd);
 }
 
@@ -137,6 +137,8 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
                alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr);
                mvm->umac_error_event_table =
                        le32_to_cpu(palive2->error_info_addr);
+               mvm->sf_space.addr = le32_to_cpu(palive2->st_fwrd_addr);
+               mvm->sf_space.size = le32_to_cpu(palive2->st_fwrd_size);
 
                alive_data->valid = le16_to_cpu(palive2->status) ==
                                    IWL_ALIVE_STATUS_OK;
@@ -180,6 +182,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
        int ret, i;
        enum iwl_ucode_type old_type = mvm->cur_ucode;
        static const u8 alive_cmd[] = { MVM_ALIVE };
+       struct iwl_sf_region st_fwrd_space;
 
        fw = iwl_get_ucode_image(mvm, ucode_type);
        if (WARN_ON(!fw))
@@ -215,6 +218,14 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
                return -EIO;
        }
 
+       /*
+        * update the sdio allocation according to the pointer we get in the
+        * alive notification.
+        */
+       st_fwrd_space.addr = mvm->sf_space.addr;
+       st_fwrd_space.size = mvm->sf_space.size;
+       ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space);
+
        iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
 
        /*
@@ -256,7 +267,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
        IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
                       phy_cfg_cmd.phy_cfg);
 
-       return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, CMD_SYNC,
+       return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
                                    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
 }
 
@@ -288,14 +299,14 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
                goto error;
        }
 
-       ret = iwl_send_bt_prio_tbl(mvm);
+       ret = iwl_send_bt_init_conf(mvm);
        if (ret)
                goto error;
 
        /* Read the NVM only at driver load time, no need to do this twice */
        if (read_nvm) {
                /* Read nvm */
-               ret = iwl_nvm_init(mvm);
+               ret = iwl_nvm_init(mvm, true);
                if (ret) {
                        IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
                        goto error;
@@ -303,7 +314,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
        }
 
        /* In case we read the NVM from external file, load it to the NIC */
-       if (iwlwifi_mod_params.nvm_file)
+       if (mvm->nvm_file_name)
                iwl_mvm_load_nvm_to_nic(mvm);
 
        ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
@@ -424,10 +435,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
        if (ret)
                goto error;
 
-       ret = iwl_send_bt_prio_tbl(mvm);
-       if (ret)
-               goto error;
-
        ret = iwl_send_bt_init_conf(mvm);
        if (ret)
                goto error;
@@ -468,12 +475,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
        /* Initialize tx backoffs to the minimal possible */
        iwl_mvm_tt_tx_backoff(mvm, 0);
 
-       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
-               ret = iwl_power_legacy_set_cam_mode(mvm);
-               if (ret)
-                       goto error;
-       }
-
        ret = iwl_mvm_power_update_device(mvm);
        if (ret)
                goto error;
index 9ccec10bba166299cc91cf1706992937127d277a..8b530277763258551cf09292298f8f14be074174 100644 (file)
@@ -667,12 +667,9 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
        if (vif->bss_conf.qos)
                cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
 
-       /* Don't use cts to self as the fw doesn't support it currently. */
        if (vif->bss_conf.use_cts_prot) {
                cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
-               if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
-                       cmd->protection_flags |=
-                               cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
+               cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
        }
        IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n",
                       vif->bss_conf.use_cts_prot,
@@ -688,7 +685,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
 static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm,
                                     struct iwl_mac_ctx_cmd *cmd)
 {
-       int ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, CMD_SYNC,
+       int ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0,
                                       sizeof(*cmd), cmd);
        if (ret)
                IWL_ERR(mvm, "Failed to send MAC context (action:%d): %d\n",
@@ -696,19 +693,39 @@ static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm,
        return ret;
 }
 
-/*
- * Fill the specific data for mac context of type station or p2p client
- */
-static void iwl_mvm_mac_ctxt_cmd_fill_sta(struct iwl_mvm *mvm,
-                                         struct ieee80211_vif *vif,
-                                         struct iwl_mac_data_sta *ctxt_sta,
-                                         bool force_assoc_off)
+static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
+                                   struct ieee80211_vif *vif,
+                                   u32 action, bool force_assoc_off)
 {
+       struct iwl_mac_ctx_cmd cmd = {};
+       struct iwl_mac_data_sta *ctxt_sta;
+
+       WARN_ON(vif->type != NL80211_IFTYPE_STATION);
+
+       /* Fill the common data for all mac context types */
+       iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+
+       if (vif->p2p) {
+               struct ieee80211_p2p_noa_attr *noa =
+                       &vif->bss_conf.p2p_noa_attr;
+
+               cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow &
+                                       IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
+               ctxt_sta = &cmd.p2p_sta.sta;
+       } else {
+               ctxt_sta = &cmd.sta;
+       }
+
        /* We need the dtim_period to set the MAC as associated */
        if (vif->bss_conf.assoc && vif->bss_conf.dtim_period &&
            !force_assoc_off) {
                u32 dtim_offs;
 
+               /* Allow beacons to pass through as long as we are not
+                * associated, or we do not have dtim period information.
+                */
+               cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
+
                /*
                 * The DTIM count counts down, so when it is N that means N
                 * more beacon intervals happen until the DTIM TBTT. Therefore
@@ -755,51 +772,6 @@ static void iwl_mvm_mac_ctxt_cmd_fill_sta(struct iwl_mvm *mvm,
 
        ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval);
        ctxt_sta->assoc_id = cpu_to_le32(vif->bss_conf.aid);
-}
-
-static int iwl_mvm_mac_ctxt_cmd_station(struct iwl_mvm *mvm,
-                                       struct ieee80211_vif *vif,
-                                       u32 action)
-{
-       struct iwl_mac_ctx_cmd cmd = {};
-
-       WARN_ON(vif->type != NL80211_IFTYPE_STATION || vif->p2p);
-
-       /* Fill the common data for all mac context types */
-       iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
-
-       /* Allow beacons to pass through as long as we are not associated,or we
-        * do not have dtim period information */
-       if (!vif->bss_conf.assoc || !vif->bss_conf.dtim_period)
-               cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
-       else
-               cmd.filter_flags &= ~cpu_to_le32(MAC_FILTER_IN_BEACON);
-
-       /* Fill the data specific for station mode */
-       iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.sta,
-                                     action == FW_CTXT_ACTION_ADD);
-
-       return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
-}
-
-static int iwl_mvm_mac_ctxt_cmd_p2p_client(struct iwl_mvm *mvm,
-                                          struct ieee80211_vif *vif,
-                                          u32 action)
-{
-       struct iwl_mac_ctx_cmd cmd = {};
-       struct ieee80211_p2p_noa_attr *noa = &vif->bss_conf.p2p_noa_attr;
-
-       WARN_ON(vif->type != NL80211_IFTYPE_STATION || !vif->p2p);
-
-       /* Fill the common data for all mac context types */
-       iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
-
-       /* Fill the data specific for station mode */
-       iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.p2p_sta.sta,
-                                     action == FW_CTXT_ACTION_ADD);
-
-       cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow &
-                                       IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
 
        return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
 }
@@ -1137,16 +1109,12 @@ static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm,
 }
 
 static int iwl_mvm_mac_ctx_send(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-                               u32 action)
+                               u32 action, bool force_assoc_off)
 {
        switch (vif->type) {
        case NL80211_IFTYPE_STATION:
-               if (!vif->p2p)
-                       return iwl_mvm_mac_ctxt_cmd_station(mvm, vif,
-                                                           action);
-               else
-                       return iwl_mvm_mac_ctxt_cmd_p2p_client(mvm, vif,
-                                                              action);
+               return iwl_mvm_mac_ctxt_cmd_sta(mvm, vif, action,
+                                               force_assoc_off);
                break;
        case NL80211_IFTYPE_AP:
                if (!vif->p2p)
@@ -1176,7 +1144,8 @@ int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                      vif->addr, ieee80211_vif_type_p2p(vif)))
                return -EIO;
 
-       ret = iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_ADD);
+       ret = iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_ADD,
+                                  true);
        if (ret)
                return ret;
 
@@ -1187,7 +1156,8 @@ int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
        return 0;
 }
 
-int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                            bool force_assoc_off)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
@@ -1195,7 +1165,8 @@ int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                      vif->addr, ieee80211_vif_type_p2p(vif)))
                return -EIO;
 
-       return iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_MODIFY);
+       return iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_MODIFY,
+                                   force_assoc_off);
 }
 
 int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
@@ -1214,7 +1185,7 @@ int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                                                           mvmvif->color));
        cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, CMD_SYNC,
+       ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0,
                                   sizeof(cmd), &cmd);
        if (ret) {
                IWL_ERR(mvm, "Failed to remove MAC context: %d\n", ret);
@@ -1240,11 +1211,23 @@ int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
        u32 rate __maybe_unused =
                le32_to_cpu(beacon->beacon_notify_hdr.initial_rate);
 
+       lockdep_assert_held(&mvm->mutex);
+
        IWL_DEBUG_RX(mvm, "beacon status %#x retries:%d tsf:0x%16llX rate:%d\n",
                     status & TX_STATUS_MSK,
                     beacon->beacon_notify_hdr.failure_frame,
                     le64_to_cpu(beacon->tsf),
                     rate);
+
+       if (unlikely(mvm->csa_vif && mvm->csa_vif->csa_active)) {
+               if (!ieee80211_csa_is_complete(mvm->csa_vif)) {
+                       iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm->csa_vif);
+               } else {
+                       ieee80211_csa_finish(mvm->csa_vif);
+                       mvm->csa_vif = NULL;
+               }
+       }
+
        return 0;
 }
 
index f0cebf12c7b8415a3c787d0cc77a9b2b1c2a15ef..7215f59801863d3b7d72398de8c96c7b73c3902b 100644 (file)
@@ -295,7 +295,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
            !iwlwifi_mod_params.sw_crypto)
                hw->flags |= IEEE80211_HW_MFP_CAPABLE;
 
-       if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT) {
+       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT &&
+           IWL_UCODE_API(mvm->fw->ucode_ver) >= 9 &&
+           !iwlwifi_mod_params.uapsd_disable) {
                hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
                hw->uapsd_queues = IWL_UAPSD_AC_INFO;
                hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
@@ -309,11 +311,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                BIT(NL80211_IFTYPE_P2P_CLIENT) |
                BIT(NL80211_IFTYPE_AP) |
                BIT(NL80211_IFTYPE_P2P_GO) |
-               BIT(NL80211_IFTYPE_P2P_DEVICE);
-
-       /* IBSS has bugs in older versions */
-       if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
-               hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+               BIT(NL80211_IFTYPE_P2P_DEVICE) |
+               BIT(NL80211_IFTYPE_ADHOC);
 
        hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
        hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
@@ -322,6 +321,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
                hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
 
+       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_CSA_FLOW)
+               hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+
        hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
        hw->wiphy->n_iface_combinations =
                ARRAY_SIZE(iwl_mvm_iface_combinations);
@@ -365,14 +367,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        else
                hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
 
-       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
-               hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
-               hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
-               hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
-               /* we create the 802.11 header and zero length SSID IE. */
-               hw->wiphy->max_sched_scan_ie_len =
-                                       SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
-       }
+       hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+       hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
+       hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
+       /* we create the 802.11 header and zero length SSID IE. */
+       hw->wiphy->max_sched_scan_ie_len = SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
 
        hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
                               NL80211_FEATURE_P2P_GO_OPPPS;
@@ -386,7 +385,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        }
 
 #ifdef CONFIG_PM_SLEEP
-       if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
+       if (iwl_mvm_is_d0i3_supported(mvm) &&
+           device_can_wakeup(mvm->trans->dev)) {
+               mvm->wowlan.flags = WIPHY_WOWLAN_ANY;
+               hw->wiphy->wowlan = &mvm->wowlan;
+       } else if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
            mvm->trans->ops->d3_suspend &&
            mvm->trans->ops->d3_resume &&
            device_can_wakeup(mvm->trans->dev)) {
@@ -540,13 +543,22 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
                return -EACCES;
 
        /* return from D0i3 before starting a new Tx aggregation */
-       if (action == IEEE80211_AMPDU_TX_START) {
+       switch (action) {
+       case IEEE80211_AMPDU_TX_START:
+       case IEEE80211_AMPDU_TX_STOP_CONT:
+       case IEEE80211_AMPDU_TX_STOP_FLUSH:
+       case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+       case IEEE80211_AMPDU_TX_OPERATIONAL:
                iwl_mvm_ref(mvm, IWL_MVM_REF_TX_AGG);
                tx_agg_ref = true;
 
                /*
-                * wait synchronously until D0i3 exit to get the correct
-                * sequence number for the tid
+                * for tx start, wait synchronously until D0i3 exit to
+                * get the correct sequence number for the tid.
+                * additionally, some other ampdu actions use direct
+                * target access, which is not handled automatically
+                * by the trans layer (unlike commands), so wait for
+                * d0i3 exit in these cases as well.
                 */
                if (!wait_event_timeout(mvm->d0i3_exit_waitq,
                          !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status), HZ)) {
@@ -554,6 +566,9 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
                        iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
                        return -EIO;
                }
+               break;
+       default:
+               break;
        }
 
        mutex_lock(&mvm->mutex);
@@ -758,7 +773,7 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                .pwr_restriction = cpu_to_le16(tx_power),
        };
 
-       return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, CMD_SYNC,
+       return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0,
                                    sizeof(reduce_txpwr_cmd),
                                    &reduce_txpwr_cmd);
 }
@@ -817,18 +832,17 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
        if (ret)
                goto out_release;
 
-       ret = iwl_mvm_power_update_mac(mvm, vif);
+       ret = iwl_mvm_power_update_mac(mvm);
        if (ret)
                goto out_release;
 
        /* beacon filtering */
-       ret = iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC);
+       ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
        if (ret)
                goto out_remove_mac;
 
        if (!mvm->bf_allowed_vif &&
-           vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
-           mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED){
+           vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
                mvm->bf_allowed_vif = mvmvif;
                vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
                                     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
@@ -969,7 +983,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
        if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
                mvm->vif_count--;
 
-       iwl_mvm_power_update_mac(mvm, vif);
+       iwl_mvm_power_update_mac(mvm);
        iwl_mvm_mac_ctxt_remove(mvm, vif);
 
 out_release:
@@ -1007,7 +1021,7 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
        memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
        len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, len, cmd);
+       ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
        if (ret)
                IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
 }
@@ -1023,7 +1037,7 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
        if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
                return;
 
-       ieee80211_iterate_active_interfaces(
+       ieee80211_iterate_active_interfaces_atomic(
                mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
                iwl_mvm_mc_iface_iterator, &iter_data);
 }
@@ -1223,10 +1237,14 @@ static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
        if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
                return 0;
 
+       /* bcast filtering isn't supported for P2P client */
+       if (vif->p2p)
+               return 0;
+
        if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
                return 0;
 
-       return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC,
+       return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
                                    sizeof(cmd), &cmd);
 }
 #else
@@ -1253,7 +1271,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
        if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
                iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
 
-       ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
+       ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false);
        if (ret)
                IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
 
@@ -1333,10 +1351,10 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
                iwl_mvm_remove_time_event(mvm, mvmvif,
                                          &mvmvif->time_event_data);
                iwl_mvm_sf_update(mvm, vif, false);
-               WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC));
+               WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
        } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS |
                              BSS_CHANGED_QOS)) {
-               ret = iwl_mvm_power_update_mac(mvm, vif);
+               ret = iwl_mvm_power_update_mac(mvm);
                if (ret)
                        IWL_ERR(mvm, "failed to update power mode\n");
        }
@@ -1347,16 +1365,19 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
        }
 
        if (changes & BSS_CHANGED_CQM) {
-               IWL_DEBUG_MAC80211(mvm, "cqm info_changed");
+               IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n");
                /* reset cqm events tracking */
                mvmvif->bf_data.last_cqm_event = 0;
-               ret = iwl_mvm_update_beacon_filter(mvm, vif, false, CMD_SYNC);
-               if (ret)
-                       IWL_ERR(mvm, "failed to update CQM thresholds\n");
+               if (mvmvif->bf_data.bf_enabled) {
+                       ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+                       if (ret)
+                               IWL_ERR(mvm,
+                                       "failed to update CQM thresholds\n");
+               }
        }
 
        if (changes & BSS_CHANGED_ARP_FILTER) {
-               IWL_DEBUG_MAC80211(mvm, "arp filter changed");
+               IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
                iwl_mvm_configure_bcast_filter(mvm, vif);
        }
 }
@@ -1402,7 +1423,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
        mvmvif->ap_ibss_active = true;
 
        /* power updated needs to be done before quotas */
-       iwl_mvm_power_update_mac(mvm, vif);
+       iwl_mvm_power_update_mac(mvm);
 
        ret = iwl_mvm_update_quotas(mvm, vif);
        if (ret)
@@ -1410,7 +1431,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
 
        /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
        if (vif->p2p && mvm->p2p_device_vif)
-               iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
+               iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false);
 
        iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS);
 
@@ -1420,7 +1441,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
        return 0;
 
 out_quota_failed:
-       iwl_mvm_power_update_mac(mvm, vif);
+       iwl_mvm_power_update_mac(mvm);
        mvmvif->ap_ibss_active = false;
        iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
 out_unbind:
@@ -1450,13 +1471,13 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
 
        /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
        if (vif->p2p && mvm->p2p_device_vif)
-               iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
+               iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false);
 
        iwl_mvm_update_quotas(mvm, NULL);
        iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
        iwl_mvm_binding_remove_vif(mvm, vif);
 
-       iwl_mvm_power_update_mac(mvm, vif);
+       iwl_mvm_power_update_mac(mvm);
 
        iwl_mvm_mac_ctxt_remove(mvm, vif);
 
@@ -1477,7 +1498,7 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
 
        if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT |
                       BSS_CHANGED_BANDWIDTH) &&
-           iwl_mvm_mac_ctxt_changed(mvm, vif))
+           iwl_mvm_mac_ctxt_changed(mvm, vif, false))
                IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
 
        /* Need to send a new beacon template to the FW */
@@ -1495,6 +1516,9 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
 
        mutex_lock(&mvm->mutex);
 
+       if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
+               iwl_mvm_sched_scan_stop(mvm, true);
+
        switch (vif->type) {
        case NL80211_IFTYPE_STATION:
                iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
@@ -1525,7 +1549,7 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
 
        switch (mvm->scan_status) {
        case IWL_MVM_SCAN_SCHED:
-               ret = iwl_mvm_sched_scan_stop(mvm);
+               ret = iwl_mvm_sched_scan_stop(mvm, true);
                if (ret) {
                        ret = -EBUSY;
                        goto out;
@@ -1697,6 +1721,11 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
                ret = iwl_mvm_add_sta(mvm, vif, sta);
        } else if (old_state == IEEE80211_STA_NONE &&
                   new_state == IEEE80211_STA_AUTH) {
+               /*
+                * EBS may be disabled due to previous failures reported by FW.
+                * Reset EBS status here assuming environment has been changed.
+                */
+               mvm->last_ebs_successful = true;
                ret = 0;
        } else if (old_state == IEEE80211_STA_AUTH &&
                   new_state == IEEE80211_STA_ASSOC) {
@@ -1708,14 +1737,12 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
        } else if (old_state == IEEE80211_STA_ASSOC &&
                   new_state == IEEE80211_STA_AUTHORIZED) {
                /* enable beacon filtering */
-               if (vif->bss_conf.dtim_period)
-                       WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif,
-                                                            CMD_SYNC));
+               WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
                ret = 0;
        } else if (old_state == IEEE80211_STA_AUTHORIZED &&
                   new_state == IEEE80211_STA_ASSOC) {
                /* disable beacon filtering */
-               WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC));
+               WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, 0));
                ret = 0;
        } else if (old_state == IEEE80211_STA_ASSOC &&
                   new_state == IEEE80211_STA_AUTH) {
@@ -1772,7 +1799,7 @@ static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
                int ret;
 
                mutex_lock(&mvm->mutex);
-               ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
+               ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false);
                mutex_unlock(&mvm->mutex);
                return ret;
        }
@@ -1807,6 +1834,11 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
 
        mutex_lock(&mvm->mutex);
 
+       if (!iwl_mvm_is_idle(mvm)) {
+               ret = -EBUSY;
+               goto out;
+       }
+
        switch (mvm->scan_status) {
        case IWL_MVM_SCAN_OS:
                IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n");
@@ -1860,7 +1892,7 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
        int ret;
 
        mutex_lock(&mvm->mutex);
-       ret = iwl_mvm_sched_scan_stop(mvm);
+       ret = iwl_mvm_sched_scan_stop(mvm, false);
        mutex_unlock(&mvm->mutex);
        iwl_mvm_wait_for_async_handlers(mvm);
 
@@ -2156,10 +2188,10 @@ static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
                return;
 
        mutex_lock(&mvm->mutex);
+       iwl_mvm_bt_coex_vif_change(mvm);
        iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
                                 ctx->rx_chains_static,
                                 ctx->rx_chains_dynamic);
-       iwl_mvm_bt_coex_vif_change(mvm);
        mutex_unlock(&mvm->mutex);
 }
 
@@ -2179,6 +2211,11 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
 
        switch (vif->type) {
        case NL80211_IFTYPE_AP:
+               /* Unless it's a CSA flow we have nothing to do here */
+               if (vif->csa_active) {
+                       mvmvif->ap_ibss_active = true;
+                       break;
+               }
        case NL80211_IFTYPE_ADHOC:
                /*
                 * The AP binding flow is handled as part of the start_ap flow
@@ -2202,7 +2239,7 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
         * Power state must be updated before quotas,
         * otherwise fw will complain.
         */
-       iwl_mvm_power_update_mac(mvm, vif);
+       iwl_mvm_power_update_mac(mvm);
 
        /* Setting the quota at this stage is only required for monitor
         * interfaces. For the other types, the bss_info changed flow
@@ -2215,11 +2252,17 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
                        goto out_remove_binding;
        }
 
+       /* Handle binding during CSA */
+       if (vif->type == NL80211_IFTYPE_AP) {
+               iwl_mvm_update_quotas(mvm, vif);
+               iwl_mvm_mac_ctxt_changed(mvm, vif, false);
+       }
+
        goto out_unlock;
 
  out_remove_binding:
        iwl_mvm_binding_remove_vif(mvm, vif);
-       iwl_mvm_power_update_mac(mvm, vif);
+       iwl_mvm_power_update_mac(mvm);
  out_unlock:
        mutex_unlock(&mvm->mutex);
        if (ret)
@@ -2239,22 +2282,29 @@ static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
        iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
 
        switch (vif->type) {
-       case NL80211_IFTYPE_AP:
        case NL80211_IFTYPE_ADHOC:
                goto out_unlock;
        case NL80211_IFTYPE_MONITOR:
                mvmvif->monitor_active = false;
                iwl_mvm_update_quotas(mvm, NULL);
                break;
+       case NL80211_IFTYPE_AP:
+               /* This part is triggered only during CSA */
+               if (!vif->csa_active || !mvmvif->ap_ibss_active)
+                       goto out_unlock;
+
+               mvmvif->ap_ibss_active = false;
+               iwl_mvm_update_quotas(mvm, NULL);
+               /*TODO: bt_coex notification here? */
        default:
                break;
        }
 
        iwl_mvm_binding_remove_vif(mvm, vif);
-       iwl_mvm_power_update_mac(mvm, vif);
 
 out_unlock:
        mvmvif->phy_ctxt = NULL;
+       iwl_mvm_power_update_mac(mvm);
        mutex_unlock(&mvm->mutex);
 }
 
@@ -2318,9 +2368,8 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
                        return -EINVAL;
 
                if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
-                       return iwl_mvm_enable_beacon_filter(mvm, vif,
-                                                           CMD_SYNC);
-               return iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC);
+                       return iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+               return iwl_mvm_disable_beacon_filter(mvm, vif, 0);
        }
 
        return -EOPNOTSUPP;
@@ -2341,6 +2390,53 @@ static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
 }
 #endif
 
+static void iwl_mvm_channel_switch_beacon(struct ieee80211_hw *hw,
+                                         struct ieee80211_vif *vif,
+                                         struct cfg80211_chan_def *chandef)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+       mutex_lock(&mvm->mutex);
+       if (WARN(mvm->csa_vif && mvm->csa_vif->csa_active,
+                "Another CSA is already in progress"))
+               goto out_unlock;
+
+       IWL_DEBUG_MAC80211(mvm, "CSA started to freq %d\n",
+                          chandef->center_freq1);
+       mvm->csa_vif = vif;
+
+out_unlock:
+       mutex_unlock(&mvm->mutex);
+}
+
+static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
+                             struct ieee80211_vif *vif, u32 queues, bool drop)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct iwl_mvm_vif *mvmvif;
+       struct iwl_mvm_sta *mvmsta;
+
+       if (!vif || vif->type != NL80211_IFTYPE_STATION)
+               return;
+
+       mutex_lock(&mvm->mutex);
+       mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       mvmsta = iwl_mvm_sta_from_staid_protected(mvm, mvmvif->ap_sta_id);
+
+       if (WARN_ON_ONCE(!mvmsta))
+               goto done;
+
+       if (drop) {
+               if (iwl_mvm_flush_tx_path(mvm, mvmsta->tfd_queue_msk, true))
+                       IWL_ERR(mvm, "flush request fail\n");
+       } else {
+               iwl_trans_wait_tx_queue_empty(mvm->trans,
+                                             mvmsta->tfd_queue_msk);
+       }
+done:
+       mutex_unlock(&mvm->mutex);
+}
+
 const struct ieee80211_ops iwl_mvm_hw_ops = {
        .tx = iwl_mvm_mac_tx,
        .ampdu_action = iwl_mvm_mac_ampdu_action,
@@ -2364,6 +2460,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
        .sta_rc_update = iwl_mvm_sta_rc_update,
        .conf_tx = iwl_mvm_mac_conf_tx,
        .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
+       .flush = iwl_mvm_mac_flush,
        .sched_scan_start = iwl_mvm_mac_sched_scan_start,
        .sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
        .set_key = iwl_mvm_mac_set_key,
@@ -2383,6 +2480,8 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
 
        .set_tim = iwl_mvm_set_tim,
 
+       .channel_switch_beacon = iwl_mvm_channel_switch_beacon,
+
        CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
 
 #ifdef CONFIG_PM_SLEEP
index d564233a65da6157c1aaf16a099ddf94b3be933e..fcc6c29482d0ef516bba48459b09230b9ead4007 100644 (file)
@@ -164,7 +164,6 @@ enum iwl_dbgfs_pm_mask {
        MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2),
        MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3),
        MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4),
-       MVM_DEBUGFS_PM_DISABLE_POWER_OFF = BIT(5),
        MVM_DEBUGFS_PM_LPRX_ENA = BIT(6),
        MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7),
        MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8),
@@ -177,7 +176,6 @@ struct iwl_dbgfs_pm {
        u32 tx_data_timeout;
        bool skip_over_dtim;
        u8 skip_dtim_periods;
-       bool disable_power_off;
        bool lprx_ena;
        u32 lprx_rssi_threshold;
        bool snooze_ena;
@@ -232,6 +230,7 @@ enum iwl_mvm_ref_type {
        IWL_MVM_REF_USER,
        IWL_MVM_REF_TX,
        IWL_MVM_REF_TX_AGG,
+       IWL_MVM_REF_EXIT_WORK,
 
        IWL_MVM_REF_COUNT,
 };
@@ -265,6 +264,7 @@ struct iwl_mvm_vif_bf_data {
  * @uploaded: indicates the MAC context has been added to the device
  * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
  *     should get quota etc.
+ * @pm_enabled - Indicate if MAC power management is allowed
  * @monitor_active: indicates that monitor context is configured, and that the
  *     interface should get quota etc.
  * @low_latency: indicates that this interface is in low-latency mode
@@ -283,6 +283,7 @@ struct iwl_mvm_vif {
 
        bool uploaded;
        bool ap_ibss_active;
+       bool pm_enabled;
        bool monitor_active;
        bool low_latency;
        struct iwl_mvm_vif_bf_data bf_data;
@@ -451,6 +452,11 @@ struct iwl_mvm_frame_stats {
        int last_frame_idx;
 };
 
+enum {
+       D0I3_DEFER_WAKEUP,
+       D0I3_PENDING_WAKEUP,
+};
+
 struct iwl_mvm {
        /* for logger access */
        struct device *dev;
@@ -484,6 +490,7 @@ struct iwl_mvm {
        u32 log_event_table;
        u32 umac_error_event_table;
        bool support_umac_log;
+       struct iwl_sf_region sf_space;
 
        u32 ampdu_ref;
 
@@ -495,6 +502,7 @@ struct iwl_mvm {
        u8 queue_to_mac80211[IWL_MAX_HW_QUEUES];
        atomic_t queue_stop_count[IWL_MAX_HW_QUEUES];
 
+       const char *nvm_file_name;
        struct iwl_nvm_data *nvm_data;
        /* NVM sections */
        struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS];
@@ -535,6 +543,8 @@ struct iwl_mvm {
        /* Internal station */
        struct iwl_mvm_int_sta aux_sta;
 
+       bool last_ebs_successful;
+
        u8 scan_last_antenna_idx; /* to toggle TX between antennas */
        u8 mgmt_last_antenna_idx;
 
@@ -578,8 +588,12 @@ struct iwl_mvm {
        void *fw_error_dump;
        void *fw_error_sram;
        u32 fw_error_sram_len;
+       u32 *fw_error_rxf;
+       u32 fw_error_rxf_len;
 
+#ifdef CONFIG_IWLWIFI_LEDS
        struct led_classdev led;
+#endif
 
        struct ieee80211_vif *p2p_device_vif;
 
@@ -601,6 +615,9 @@ struct iwl_mvm {
        bool d0i3_offloading;
        struct work_struct d0i3_exit_work;
        struct sk_buff_head d0i3_tx;
+       /* protect d0i3_suspend_flags */
+       struct mutex d0i3_suspend_mutex;
+       unsigned long d0i3_suspend_flags;
        /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
        spinlock_t d0i3_tx_lock;
        wait_queue_head_t d0i3_exit_waitq;
@@ -629,8 +646,8 @@ struct iwl_mvm {
 
        /* Indicate if device power save is allowed */
        bool ps_disabled;
-       /* Indicate if device power management is allowed */
-       bool pm_disabled;
+
+       struct ieee80211_vif *csa_vif;
 };
 
 /* Extract MVM priv from op_mode and _hw */
@@ -705,6 +722,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
 void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm);
+void iwl_mvm_fw_error_rxf_dump(struct iwl_mvm *mvm);
 #endif
 u8 first_antenna(u8 mask);
 u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
@@ -745,7 +763,7 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
                          struct iwl_device_cmd *cmd);
 
 /* NVM */
-int iwl_nvm_init(struct iwl_mvm *mvm);
+int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic);
 int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm);
 
 int iwl_mvm_up(struct iwl_mvm *mvm);
@@ -796,7 +814,8 @@ void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm,
 int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
 void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
 int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
-int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                            bool force_assoc_off);
 int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
 u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm,
                                struct ieee80211_vif *vif);
@@ -840,7 +859,7 @@ int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
                                       struct cfg80211_sched_scan_request *req);
 int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
                             struct cfg80211_sched_scan_request *req);
-int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm);
+int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm, bool notify);
 int iwl_mvm_rx_sched_scan_results(struct iwl_mvm *mvm,
                                  struct iwl_rx_cmd_buffer *rxb,
                                  struct iwl_device_cmd *cmd);
@@ -874,10 +893,8 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm,
 int rs_pretty_print_rate(char *buf, const u32 rate);
 
 /* power management */
-int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm);
-
 int iwl_mvm_power_update_device(struct iwl_mvm *mvm);
-int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_power_update_mac(struct iwl_mvm *mvm);
 int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                                 char *buf, int bufsz);
 
@@ -886,8 +903,18 @@ int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
                                             struct iwl_rx_cmd_buffer *rxb,
                                             struct iwl_device_cmd *cmd);
 
+#ifdef CONFIG_IWLWIFI_LEDS
 int iwl_mvm_leds_init(struct iwl_mvm *mvm);
 void iwl_mvm_leds_exit(struct iwl_mvm *mvm);
+#else
+static inline int iwl_mvm_leds_init(struct iwl_mvm *mvm)
+{
+       return 0;
+}
+static inline void iwl_mvm_leds_exit(struct iwl_mvm *mvm)
+{
+}
+#endif
 
 /* D3 (WoWLAN, NetDetect) */
 int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
@@ -922,9 +949,9 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
 void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
 void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
 void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq);
+int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm);
 
 /* BT Coex */
-int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm);
 int iwl_send_bt_init_conf(struct iwl_mvm *mvm);
 int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
                             struct iwl_rx_cmd_buffer *rxb,
@@ -936,9 +963,10 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
                                struct ieee80211_sta *sta);
 bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
                                     struct ieee80211_sta *sta);
+bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
+                                   enum ieee80211_band band);
 u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
                           struct ieee80211_tx_info *info, u8 ac);
-int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable);
 
 enum iwl_bt_kill_msk {
        BT_KILL_MSK_DEFAULT,
@@ -969,17 +997,11 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
 int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
                                  struct ieee80211_vif *vif,
                                  u32 flags);
-int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
-                               struct ieee80211_vif *vif, bool enable);
-int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
-                                struct ieee80211_vif *vif,
-                                bool force,
-                                u32 flags);
-
 /* SMPS */
 void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                                enum iwl_mvm_smps_type_request req_type,
                                enum ieee80211_smps_mode smps_request);
+bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm);
 
 /* Low latency */
 int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -1003,6 +1025,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
        return mvmvif->low_latency;
 }
 
+/* Assoc status */
+bool iwl_mvm_is_idle(struct iwl_mvm *mvm);
+
 /* Thermal management and CT-kill */
 void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
 void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
index cf2d09f53782b0227f9a47d9a8761af8b5ea7ffe..808f78f6fbf9fe478553bd54264f784d98c01597 100644 (file)
 #define NVM_WRITE_OPCODE 1
 #define NVM_READ_OPCODE 0
 
+/* load nvm chunk response */
+enum {
+       READ_NVM_CHUNK_SUCCEED = 0,
+       READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
+};
+
 /*
  * prepare the NVM host command w/ the pointers to the nvm buffer
  * and send it to fw
@@ -90,7 +96,7 @@ static int iwl_nvm_write_chunk(struct iwl_mvm *mvm, u16 section,
        struct iwl_host_cmd cmd = {
                .id = NVM_ACCESS_CMD,
                .len = { sizeof(struct iwl_nvm_access_cmd), length },
-               .flags = CMD_SYNC | CMD_SEND_IN_RFKILL,
+               .flags = CMD_SEND_IN_RFKILL,
                .data = { &nvm_access_cmd, data },
                /* data may come from vmalloc, so use _DUP */
                .dataflags = { 0, IWL_HCMD_DFL_DUP },
@@ -112,7 +118,7 @@ static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
        struct iwl_rx_packet *pkt;
        struct iwl_host_cmd cmd = {
                .id = NVM_ACCESS_CMD,
-               .flags = CMD_SYNC | CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+               .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
                .data = { &nvm_access_cmd, },
        };
        int ret, bytes_read, offset_read;
@@ -139,10 +145,26 @@ static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
        offset_read = le16_to_cpu(nvm_resp->offset);
        resp_data = nvm_resp->data;
        if (ret) {
-               IWL_ERR(mvm,
-                       "NVM access command failed with status %d (device: %s)\n",
-                       ret, mvm->cfg->name);
-               ret = -EINVAL;
+               if ((offset != 0) &&
+                   (ret == READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
+                       /*
+                        * meaning of NOT_VALID_ADDRESS:
+                        * driver try to read chunk from address that is
+                        * multiple of 2K and got an error since addr is empty.
+                        * meaning of (offset != 0): driver already
+                        * read valid data from another chunk so this case
+                        * is not an error.
+                        */
+                       IWL_DEBUG_EEPROM(mvm->trans->dev,
+                                        "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
+                                        offset);
+                       ret = 0;
+               } else {
+                       IWL_DEBUG_EEPROM(mvm->trans->dev,
+                                        "NVM access command failed with status %d (device: %s)\n",
+                                        ret, mvm->cfg->name);
+                       ret = -EIO;
+               }
                goto exit;
        }
 
@@ -211,9 +233,9 @@ static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section,
        while (ret == length) {
                ret = iwl_nvm_read_chunk(mvm, section, offset, length, data);
                if (ret < 0) {
-                       IWL_ERR(mvm,
-                               "Cannot read NVM from section %d offset %d, length %d\n",
-                               section, offset, length);
+                       IWL_DEBUG_EEPROM(mvm->trans->dev,
+                                        "Cannot read NVM from section %d offset %d, length %d\n",
+                                        section, offset, length);
                        return ret;
                }
                offset += ret;
@@ -238,13 +260,20 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
                        return NULL;
                }
        } else {
+               /* SW and REGULATORY sections are mandatory */
                if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
-                   !mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data ||
                    !mvm->nvm_sections[NVM_SECTION_TYPE_REGULATORY].data) {
                        IWL_ERR(mvm,
                                "Can't parse empty family 8000 NVM sections\n");
                        return NULL;
                }
+               /* MAC_OVERRIDE or at least HW section must exist */
+               if (!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data &&
+                   !mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
+                       IWL_ERR(mvm,
+                               "Can't parse mac_address, empty sections\n");
+                       return NULL;
+               }
        }
 
        if (WARN_ON(!mvm->cfg))
@@ -311,16 +340,16 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
         * get here after that we assume the NVM request can be satisfied
         * synchronously.
         */
-       ret = request_firmware(&fw_entry, iwlwifi_mod_params.nvm_file,
+       ret = request_firmware(&fw_entry, mvm->nvm_file_name,
                               mvm->trans->dev);
        if (ret) {
                IWL_ERR(mvm, "ERROR: %s isn't available %d\n",
-                       iwlwifi_mod_params.nvm_file, ret);
+                       mvm->nvm_file_name, ret);
                return ret;
        }
 
        IWL_INFO(mvm, "Loaded NVM file %s (%zu bytes)\n",
-                iwlwifi_mod_params.nvm_file, fw_entry->size);
+                mvm->nvm_file_name, fw_entry->size);
 
        if (fw_entry->size < sizeof(*file_sec)) {
                IWL_ERR(mvm, "NVM file too small\n");
@@ -427,53 +456,28 @@ int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm)
        return ret;
 }
 
-int iwl_nvm_init(struct iwl_mvm *mvm)
+int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
 {
-       int ret, i, section;
+       int ret, section;
        u8 *nvm_buffer, *temp;
-       int nvm_to_read[NVM_MAX_NUM_SECTIONS];
-       int num_of_sections_to_read;
 
        if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS))
                return -EINVAL;
 
-       /* load external NVM if configured */
-       if (iwlwifi_mod_params.nvm_file) {
-               /* move to External NVM flow */
-               ret = iwl_mvm_read_external_nvm(mvm);
-               if (ret)
-                       return ret;
-       } else {
-               /* list of NVM sections we are allowed/need to read */
-               if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
-                       nvm_to_read[0] = mvm->cfg->nvm_hw_section_num;
-                       nvm_to_read[1] = NVM_SECTION_TYPE_SW;
-                       nvm_to_read[2] = NVM_SECTION_TYPE_CALIBRATION;
-                       nvm_to_read[3] = NVM_SECTION_TYPE_PRODUCTION;
-                       num_of_sections_to_read = 4;
-               } else {
-                       nvm_to_read[0] = NVM_SECTION_TYPE_SW;
-                       nvm_to_read[1] = NVM_SECTION_TYPE_CALIBRATION;
-                       nvm_to_read[2] = NVM_SECTION_TYPE_PRODUCTION;
-                       nvm_to_read[3] = NVM_SECTION_TYPE_REGULATORY;
-                       nvm_to_read[4] = NVM_SECTION_TYPE_MAC_OVERRIDE;
-                       num_of_sections_to_read = 5;
-               }
-
+       /* load NVM values from nic */
+       if (read_nvm_from_nic) {
                /* Read From FW NVM */
                IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
 
-               /* TODO: find correct NVM max size for a section */
                nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size,
                                     GFP_KERNEL);
                if (!nvm_buffer)
                        return -ENOMEM;
-               for (i = 0; i < num_of_sections_to_read; i++) {
-                       section = nvm_to_read[i];
+               for (section = 0; section < NVM_MAX_NUM_SECTIONS; section++) {
                        /* we override the constness for initial read */
                        ret = iwl_nvm_read_section(mvm, section, nvm_buffer);
                        if (ret < 0)
-                               break;
+                               continue;
                        temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
                        if (!temp) {
                                ret = -ENOMEM;
@@ -502,15 +506,21 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
                                        mvm->nvm_hw_blob.size = ret;
                                        break;
                                }
-                               WARN(1, "section: %d", section);
                        }
 #endif
                }
                kfree(nvm_buffer);
-               if (ret < 0)
+       }
+
+       /* load external NVM if configured */
+       if (mvm->nvm_file_name) {
+               /* move to External NVM flow */
+               ret = iwl_mvm_read_external_nvm(mvm);
+               if (ret)
                        return ret;
        }
 
+       /* parse the relevant nvm sections */
        mvm->nvm_data = iwl_parse_nvm_sections(mvm);
        if (!mvm->nvm_data)
                return -ENODATA;
index 9545d7fdd4bfc69dfb1fb8c4e07de097d58b6ea7..cc2f7de396deb396d2b20e261b4c2877137dfa3e 100644 (file)
@@ -79,8 +79,8 @@
 #include "iwl-prph.h"
 #include "rs.h"
 #include "fw-api-scan.h"
-#include "fw-error-dump.h"
 #include "time-event.h"
+#include "iwl-fw-error-dump.h"
 
 /*
  * module name, copyright, version, etc.
@@ -220,7 +220,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
        RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false),
 
        RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true),
-       RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, false),
+       RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, true),
        RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, true),
        RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION,
                   iwl_mvm_rx_ant_coupling_notif, true),
@@ -402,6 +402,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        mvm->sf_state = SF_UNINIT;
 
        mutex_init(&mvm->mutex);
+       mutex_init(&mvm->d0i3_suspend_mutex);
        spin_lock_init(&mvm->async_handlers_lock);
        INIT_LIST_HEAD(&mvm->time_event_list);
        INIT_LIST_HEAD(&mvm->async_handlers_list);
@@ -465,13 +466,24 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 
        min_backoff = calc_min_backoff(trans, cfg);
        iwl_mvm_tt_initialize(mvm, min_backoff);
+       /* set the nvm_file_name according to priority */
+       if (iwlwifi_mod_params.nvm_file)
+               mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
+       else
+               mvm->nvm_file_name = mvm->cfg->default_nvm_file;
+
+       if (WARN(cfg->no_power_up_nic_in_init && !mvm->nvm_file_name,
+                "not allowing power-up and not having nvm_file\n"))
+               goto out_free;
 
        /*
-        * If the NVM exists in an external file,
-        * there is no need to unnecessarily power up the NIC at driver load
+        * Even if nvm exists in the nvm_file driver should read agin the nvm
+        * from the nic because there might be entries that exist in the OTP
+        * and not in the file.
+        * for nics with no_power_up_nic_in_init: rely completley on nvm_file
         */
-       if (iwlwifi_mod_params.nvm_file) {
-               err = iwl_nvm_init(mvm);
+       if (cfg->no_power_up_nic_in_init && mvm->nvm_file_name) {
+               err = iwl_nvm_init(mvm, false);
                if (err)
                        goto out_free;
        } else {
@@ -518,7 +530,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
  out_free:
        iwl_phy_db_free(mvm->phy_db);
        kfree(mvm->scan_cmd);
-       if (!iwlwifi_mod_params.nvm_file)
+       if (!cfg->no_power_up_nic_in_init || !mvm->nvm_file_name)
                iwl_trans_op_mode_leave(trans);
        ieee80211_free_hw(mvm->hw);
        return NULL;
@@ -538,6 +550,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
        kfree(mvm->scan_cmd);
        vfree(mvm->fw_error_dump);
        kfree(mvm->fw_error_sram);
+       kfree(mvm->fw_error_rxf);
        kfree(mvm->mcast_filter_cmd);
        mvm->mcast_filter_cmd = NULL;
 
@@ -814,6 +827,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
        struct iwl_fw_error_dump_file *dump_file;
        struct iwl_fw_error_dump_data *dump_data;
        u32 file_len;
+       u32 trans_len;
 
        lockdep_assert_held(&mvm->mutex);
 
@@ -821,8 +835,13 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                return;
 
        file_len = mvm->fw_error_sram_len +
+                  mvm->fw_error_rxf_len +
                   sizeof(*dump_file) +
-                  sizeof(*dump_data);
+                  sizeof(*dump_data) * 2;
+
+       trans_len = iwl_trans_dump_data(mvm->trans, NULL, 0);
+       if (trans_len)
+               file_len += trans_len;
 
        dump_file = vmalloc(file_len);
        if (!dump_file)
@@ -833,7 +852,12 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
        dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
        dump_file->file_len = cpu_to_le32(file_len);
        dump_data = (void *)dump_file->data;
-       dump_data->type = IWL_FW_ERROR_DUMP_SRAM;
+       dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
+       dump_data->len = cpu_to_le32(mvm->fw_error_rxf_len);
+       memcpy(dump_data->data, mvm->fw_error_rxf, mvm->fw_error_rxf_len);
+
+       dump_data = iwl_mvm_fw_error_next_data(dump_data);
+       dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_SRAM);
        dump_data->len = cpu_to_le32(mvm->fw_error_sram_len);
 
        /*
@@ -842,6 +866,23 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
         * mvm->fw_error_sram right now.
         */
        memcpy(dump_data->data, mvm->fw_error_sram, mvm->fw_error_sram_len);
+
+       kfree(mvm->fw_error_rxf);
+       mvm->fw_error_rxf = NULL;
+       mvm->fw_error_rxf_len = 0;
+
+       kfree(mvm->fw_error_sram);
+       mvm->fw_error_sram = NULL;
+       mvm->fw_error_sram_len = 0;
+
+       if (trans_len) {
+               void *buf = iwl_mvm_fw_error_next_data(dump_data);
+               u32 real_trans_len = iwl_trans_dump_data(mvm->trans, buf,
+                                                        trans_len);
+               dump_data = (void *)((u8 *)buf + real_trans_len);
+               dump_file->file_len =
+                       cpu_to_le32(file_len - trans_len + real_trans_len);
+       }
 }
 #endif
 
@@ -853,6 +894,7 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
        iwl_mvm_fw_error_sram_dump(mvm);
+       iwl_mvm_fw_error_rxf_dump(mvm);
 #endif
 
        iwl_mvm_nic_restart(mvm);
@@ -1126,9 +1168,9 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
        struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work);
        struct iwl_host_cmd get_status_cmd = {
                .id = WOWLAN_GET_STATUSES,
-               .flags = CMD_SYNC | CMD_HIGH_PRIO | CMD_WANT_SKB,
+               .flags = CMD_HIGH_PRIO | CMD_WANT_SKB,
        };
-       struct iwl_wowlan_status_v6 *status;
+       struct iwl_wowlan_status *status;
        int ret;
        u32 disconnection_reasons, wakeup_reasons;
        __le16 *qos_seq = NULL;
@@ -1158,18 +1200,27 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
        iwl_free_resp(&get_status_cmd);
 out:
        iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
+       iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK);
        mutex_unlock(&mvm->mutex);
 }
 
-static int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
+int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm)
 {
-       struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
        u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE |
                    CMD_WAKE_UP_TRANS;
        int ret;
 
        IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n");
 
+       mutex_lock(&mvm->d0i3_suspend_mutex);
+       if (test_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags)) {
+               IWL_DEBUG_RPM(mvm, "Deferring d0i3 exit until resume\n");
+               __set_bit(D0I3_PENDING_WAKEUP, &mvm->d0i3_suspend_flags);
+               mutex_unlock(&mvm->d0i3_suspend_mutex);
+               return 0;
+       }
+       mutex_unlock(&mvm->d0i3_suspend_mutex);
+
        ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
        if (ret)
                goto out;
@@ -1183,6 +1234,25 @@ out:
        return ret;
 }
 
+static int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
+{
+       struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+       iwl_mvm_ref(mvm, IWL_MVM_REF_EXIT_WORK);
+       return _iwl_mvm_exit_d0i3(mvm);
+}
+
+static void iwl_mvm_napi_add(struct iwl_op_mode *op_mode,
+                            struct napi_struct *napi,
+                            struct net_device *napi_dev,
+                            int (*poll)(struct napi_struct *, int),
+                            int weight)
+{
+       struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+       ieee80211_napi_add(mvm->hw, napi, napi_dev, poll, weight);
+}
+
 static const struct iwl_op_mode_ops iwl_mvm_ops = {
        .start = iwl_op_mode_mvm_start,
        .stop = iwl_op_mode_mvm_stop,
@@ -1196,4 +1266,5 @@ static const struct iwl_op_mode_ops iwl_mvm_ops = {
        .nic_config = iwl_mvm_nic_config,
        .enter_d0i3 = iwl_mvm_enter_d0i3,
        .exit_d0i3 = iwl_mvm_exit_d0i3,
+       .napi_add = iwl_mvm_napi_add,
 };
index 237efe0ac1c44dab52d375ced3cb84912aabe082..539f3a942d437565ab6ba9accd06f71874985af4 100644 (file)
@@ -156,6 +156,18 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
        idle_cnt = chains_static;
        active_cnt = chains_dynamic;
 
+       /* In scenarios where we only ever use a single-stream rates,
+        * i.e. legacy 11b/g/a associations, single-stream APs or even
+        * static SMPS, enable both chains to get diversity, improving
+        * the case where we're far enough from the AP that attenuation
+        * between the two antennas is sufficiently different to impact
+        * performance.
+        */
+       if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm)) {
+               idle_cnt = 2;
+               active_cnt = 2;
+       }
+
        cmd->rxchain_info = cpu_to_le32(mvm->fw->valid_rx_ant <<
                                        PHY_RX_CHAIN_VALID_POS);
        cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
@@ -187,7 +199,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
        iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef,
                                  chains_static, chains_dynamic);
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, CMD_SYNC,
+       ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, 0,
                                   sizeof(struct iwl_phy_context_cmd),
                                   &cmd);
        if (ret)
@@ -202,18 +214,15 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
                         struct cfg80211_chan_def *chandef,
                         u8 chains_static, u8 chains_dynamic)
 {
-       int ret;
-
        WARN_ON(!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
                ctxt->ref);
        lockdep_assert_held(&mvm->mutex);
 
        ctxt->channel = chandef->chan;
-       ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
-                                    chains_static, chains_dynamic,
-                                    FW_CTXT_ACTION_ADD, 0);
 
-       return ret;
+       return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+                                     chains_static, chains_dynamic,
+                                     FW_CTXT_ACTION_ADD, 0);
 }
 
 /*
index 6b636eab33391cbec4957180efe2e74d2ad07388..c182a8baf685857d3c2857443d53ae978e8646a8 100644 (file)
@@ -123,28 +123,6 @@ void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm,
        cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->bf_data.ba_enabled);
 }
 
-int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
-                               struct ieee80211_vif *vif, bool enable)
-{
-       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       struct iwl_beacon_filter_cmd cmd = {
-               IWL_BF_CMD_CONFIG_DEFAULTS,
-               .bf_enable_beacon_filter = cpu_to_le32(1),
-               .ba_enable_beacon_abort = cpu_to_le32(enable),
-       };
-
-       if (!mvmvif->bf_data.bf_enabled)
-               return 0;
-
-       if (mvm->cur_ucode == IWL_UCODE_WOWLAN)
-               cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
-
-       mvmvif->bf_data.ba_enabled = enable;
-       iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd);
-       iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
-       return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, CMD_SYNC);
-}
-
 static void iwl_mvm_power_log(struct iwl_mvm *mvm,
                              struct iwl_mac_power_cmd *cmd)
 {
@@ -268,6 +246,57 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
                IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
 }
 
+static void iwl_mvm_binding_iterator(void *_data, u8 *mac,
+                                     struct ieee80211_vif *vif)
+{
+       unsigned long *data = _data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       if (!mvmvif->phy_ctxt)
+               return;
+
+       if (vif->type == NL80211_IFTYPE_STATION ||
+           vif->type == NL80211_IFTYPE_AP)
+               __set_bit(mvmvif->phy_ctxt->id, data);
+}
+
+static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
+                                      struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       unsigned long phy_ctxt_counter = 0;
+
+       ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+                                                  IEEE80211_IFACE_ITER_NORMAL,
+                                                  iwl_mvm_binding_iterator,
+                                                  &phy_ctxt_counter);
+
+       if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid,
+                   ETH_ALEN))
+               return false;
+
+       if (vif->p2p &&
+           !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD))
+               return false;
+       /*
+        * Avoid using uAPSD if P2P client is associated to GO that uses
+        * opportunistic power save. This is due to current FW limitation.
+        */
+       if (vif->p2p &&
+           (vif->bss_conf.p2p_noa_attr.oppps_ctwindow &
+           IEEE80211_P2P_OPPPS_ENABLE_BIT))
+               return false;
+
+       /*
+        * Avoid using uAPSD if client is in DCM -
+        * low latency issue in Miracast
+        */
+       if (hweight8(phy_ctxt_counter) >= 2)
+               return false;
+
+       return true;
+}
+
 static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
                                    struct ieee80211_vif *vif,
                                    struct iwl_mac_power_cmd *cmd)
@@ -280,7 +309,6 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
        bool radar_detect = false;
        struct iwl_mvm_vif *mvmvif __maybe_unused =
                iwl_mvm_vif_from_mac80211(vif);
-       bool allow_uapsd = true;
 
        cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
                                                            mvmvif->color));
@@ -303,13 +331,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
 
        cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
 
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-       if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
-           mvmvif->dbgfs_pm.disable_power_off)
-               cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
-#endif
        if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) ||
-           mvm->pm_disabled)
+           !mvmvif->pm_enabled)
                return;
 
        cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
@@ -351,23 +374,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
                        cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
        }
 
-       if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid,
-                   ETH_ALEN))
-               allow_uapsd = false;
-
-       if (vif->p2p &&
-           !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD))
-               allow_uapsd = false;
-       /*
-        * Avoid using uAPSD if P2P client is associated to GO that uses
-        * opportunistic power save. This is due to current FW limitation.
-        */
-       if (vif->p2p &&
-           vif->bss_conf.p2p_noa_attr.oppps_ctwindow &
-           IEEE80211_P2P_OPPPS_ENABLE_BIT)
-               allow_uapsd = false;
-
-       if (allow_uapsd)
+       if (iwl_mvm_power_allow_uapsd(mvm, vif))
                iwl_mvm_power_configure_uapsd(mvm, vif, cmd);
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -421,20 +428,13 @@ static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm,
 {
        struct iwl_mac_power_cmd cmd = {};
 
-       if (vif->type != NL80211_IFTYPE_STATION)
-               return 0;
-
-       if (vif->p2p &&
-           !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM))
-               return 0;
-
        iwl_mvm_power_build_cmd(mvm, vif, &cmd);
        iwl_mvm_power_log(mvm, &cmd);
 #ifdef CONFIG_IWLWIFI_DEBUGFS
        memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd));
 #endif
 
-       return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_SYNC,
+       return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, 0,
                                    sizeof(cmd), &cmd);
 }
 
@@ -444,12 +444,6 @@ int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
                .flags = cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
        };
 
-       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT))
-               return 0;
-
-       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
-               return 0;
-
        if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
                mvm->ps_disabled = true;
 
@@ -466,7 +460,7 @@ int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
                        "Sending device power command with flags = 0x%X\n",
                        cmd.flags);
 
-       return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC, sizeof(cmd),
+       return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, 0, sizeof(cmd),
                                    &cmd);
 }
 
@@ -508,86 +502,69 @@ int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
        return 0;
 }
 
-struct iwl_power_constraint {
+struct iwl_power_vifs {
        struct ieee80211_vif *bf_vif;
        struct ieee80211_vif *bss_vif;
        struct ieee80211_vif *p2p_vif;
-       u16 bss_phyctx_id;
-       u16 p2p_phyctx_id;
-       bool pm_disabled;
-       bool ps_disabled;
-       struct iwl_mvm *mvm;
+       struct ieee80211_vif *ap_vif;
+       struct ieee80211_vif *monitor_vif;
+       bool p2p_active;
+       bool bss_active;
+       bool ap_active;
+       bool monitor_active;
 };
 
 static void iwl_mvm_power_iterator(void *_data, u8 *mac,
                                   struct ieee80211_vif *vif)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       struct iwl_power_constraint *power_iterator = _data;
-       struct iwl_mvm *mvm = power_iterator->mvm;
+       struct iwl_power_vifs *power_iterator = _data;
 
+       mvmvif->pm_enabled = false;
        switch (ieee80211_vif_type_p2p(vif)) {
        case NL80211_IFTYPE_P2P_DEVICE:
                break;
 
        case NL80211_IFTYPE_P2P_GO:
        case NL80211_IFTYPE_AP:
-               /* no BSS power mgmt if we have an active AP */
-               if (mvmvif->ap_ibss_active)
-                       power_iterator->pm_disabled = true;
+               /* only a single MAC of the same type */
+               WARN_ON(power_iterator->ap_vif);
+               power_iterator->ap_vif = vif;
+               if (mvmvif->phy_ctxt)
+                       if (mvmvif->phy_ctxt->id < MAX_PHYS)
+                               power_iterator->ap_active = true;
                break;
 
        case NL80211_IFTYPE_MONITOR:
-               /* no BSS power mgmt and no device power save */
-               power_iterator->pm_disabled = true;
-               power_iterator->ps_disabled = true;
+               /* only a single MAC of the same type */
+               WARN_ON(power_iterator->monitor_vif);
+               power_iterator->monitor_vif = vif;
+               if (mvmvif->phy_ctxt)
+                       if (mvmvif->phy_ctxt->id < MAX_PHYS)
+                               power_iterator->monitor_active = true;
                break;
 
        case NL80211_IFTYPE_P2P_CLIENT:
-               if (mvmvif->phy_ctxt)
-                       power_iterator->p2p_phyctx_id = mvmvif->phy_ctxt->id;
-
-               /* we should have only one P2P vif */
+               /* only a single MAC of the same type */
                WARN_ON(power_iterator->p2p_vif);
                power_iterator->p2p_vif = vif;
-
-               IWL_DEBUG_POWER(mvm, "p2p: p2p_id=%d, bss_id=%d\n",
-                               power_iterator->p2p_phyctx_id,
-                               power_iterator->bss_phyctx_id);
-               if (!(mvm->fw->ucode_capa.flags &
-                     IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)) {
-                       /* no BSS power mgmt if we have a P2P client*/
-                       power_iterator->pm_disabled = true;
-               } else if (power_iterator->p2p_phyctx_id < MAX_PHYS &&
-                          power_iterator->bss_phyctx_id < MAX_PHYS &&
-                          power_iterator->p2p_phyctx_id ==
-                          power_iterator->bss_phyctx_id) {
-                       power_iterator->pm_disabled = true;
-               }
+               if (mvmvif->phy_ctxt)
+                       if (mvmvif->phy_ctxt->id < MAX_PHYS)
+                               power_iterator->p2p_active = true;
                break;
 
        case NL80211_IFTYPE_STATION:
-               if (mvmvif->phy_ctxt)
-                       power_iterator->bss_phyctx_id = mvmvif->phy_ctxt->id;
-
-               /* we should have only one BSS vif */
+               /* only a single MAC of the same type */
                WARN_ON(power_iterator->bss_vif);
                power_iterator->bss_vif = vif;
+               if (mvmvif->phy_ctxt)
+                       if (mvmvif->phy_ctxt->id < MAX_PHYS)
+                               power_iterator->bss_active = true;
 
                if (mvmvif->bf_data.bf_enabled &&
                    !WARN_ON(power_iterator->bf_vif))
                        power_iterator->bf_vif = vif;
 
-               IWL_DEBUG_POWER(mvm, "bss: p2p_id=%d, bss_id=%d\n",
-                               power_iterator->p2p_phyctx_id,
-                               power_iterator->bss_phyctx_id);
-               if (mvm->fw->ucode_capa.flags &
-                   IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM &&
-                       (power_iterator->p2p_phyctx_id < MAX_PHYS &&
-                        power_iterator->bss_phyctx_id < MAX_PHYS &&
-                        power_iterator->p2p_phyctx_id ==
-                        power_iterator->bss_phyctx_id))
-                       power_iterator->pm_disabled = true;
                break;
 
        default:
@@ -596,70 +573,73 @@ static void iwl_mvm_power_iterator(void *_data, u8 *mac,
 }
 
 static void
-iwl_mvm_power_get_global_constraint(struct iwl_mvm *mvm,
-                                   struct iwl_power_constraint *constraint)
+iwl_mvm_power_set_pm(struct iwl_mvm *mvm,
+                                   struct iwl_power_vifs *vifs)
 {
-       lockdep_assert_held(&mvm->mutex);
+       struct iwl_mvm_vif *bss_mvmvif = NULL;
+       struct iwl_mvm_vif *p2p_mvmvif = NULL;
+       struct iwl_mvm_vif *ap_mvmvif = NULL;
+       bool client_same_channel = false;
+       bool ap_same_channel = false;
 
-       if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) {
-               constraint->pm_disabled = true;
-               constraint->ps_disabled = true;
-       }
+       lockdep_assert_held(&mvm->mutex);
 
+       /* get vifs info + set pm_enable to false */
        ieee80211_iterate_active_interfaces_atomic(mvm->hw,
                                            IEEE80211_IFACE_ITER_NORMAL,
-                                           iwl_mvm_power_iterator, constraint);
-}
-
-int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
-{
-       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       struct iwl_power_constraint constraint = {
-                   .p2p_phyctx_id = MAX_PHYS,
-                   .bss_phyctx_id = MAX_PHYS,
-                   .mvm = mvm,
-       };
-       bool ba_enable;
-       int ret;
+                                           iwl_mvm_power_iterator, vifs);
 
-       lockdep_assert_held(&mvm->mutex);
+       if (vifs->bss_vif)
+               bss_mvmvif = iwl_mvm_vif_from_mac80211(vifs->bss_vif);
 
-       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT))
-               return 0;
+       if (vifs->p2p_vif)
+               p2p_mvmvif = iwl_mvm_vif_from_mac80211(vifs->p2p_vif);
 
-       iwl_mvm_power_get_global_constraint(mvm, &constraint);
-       mvm->ps_disabled = constraint.ps_disabled;
-       mvm->pm_disabled = constraint.pm_disabled;
+       if (vifs->ap_vif)
+               ap_mvmvif = iwl_mvm_vif_from_mac80211(vifs->ap_vif);
 
-       /* don't update device power state unless we add / remove monitor */
-       if (vif->type == NL80211_IFTYPE_MONITOR) {
-               ret = iwl_mvm_power_update_device(mvm);
-               if (ret)
-                       return ret;
+       /* enable PM on bss if bss stand alone */
+       if (vifs->bss_active && !vifs->p2p_active && !vifs->ap_active) {
+               bss_mvmvif->pm_enabled = true;
+               return;
        }
 
-       if (constraint.bss_vif) {
-               ret = iwl_mvm_power_send_cmd(mvm, constraint.bss_vif);
-               if (ret)
-                       return ret;
+       /* enable PM on p2p if p2p stand alone */
+       if (vifs->p2p_active && !vifs->bss_active && !vifs->ap_active) {
+               if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM)
+                       p2p_mvmvif->pm_enabled = true;
+               return;
        }
 
-       if (constraint.p2p_vif) {
-               ret = iwl_mvm_power_send_cmd(mvm, constraint.p2p_vif);
-               if (ret)
-                       return ret;
+       if (vifs->bss_active && vifs->p2p_active)
+               client_same_channel = (bss_mvmvif->phy_ctxt->id ==
+                                      p2p_mvmvif->phy_ctxt->id);
+       if (vifs->bss_active && vifs->ap_active)
+               ap_same_channel = (bss_mvmvif->phy_ctxt->id ==
+                                  ap_mvmvif->phy_ctxt->id);
+
+       /* clients are not stand alone: enable PM if DCM */
+       if (!(client_same_channel || ap_same_channel) &&
+           (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)) {
+               if (vifs->bss_active)
+                       bss_mvmvif->pm_enabled = true;
+               if (vifs->p2p_active &&
+                   (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM))
+                       p2p_mvmvif->pm_enabled = true;
+               return;
        }
 
-       if (!constraint.bf_vif)
-               return 0;
-
-       vif = constraint.bf_vif;
-       mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
-       ba_enable = !(constraint.pm_disabled || constraint.ps_disabled ||
-                     !vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif));
-
-       return iwl_mvm_update_beacon_abort(mvm, constraint.bf_vif, ba_enable);
+       /*
+        * There is only one channel in the system and there are only
+        * bss and p2p clients that share it
+        */
+       if (client_same_channel && !vifs->ap_active &&
+           (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM)) {
+               /* share same channel*/
+               bss_mvmvif->pm_enabled = true;
+               if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM)
+                       p2p_mvmvif->pm_enabled = true;
+       }
 }
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -671,19 +651,10 @@ int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
        struct iwl_mac_power_cmd cmd = {};
        int pos = 0;
 
-       if (WARN_ON(!(mvm->fw->ucode_capa.flags &
-                     IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)))
-               return 0;
-
        mutex_lock(&mvm->mutex);
        memcpy(&cmd, &mvmvif->mac_pwr_cmd, sizeof(cmd));
        mutex_unlock(&mvm->mutex);
 
-       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
-               pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
-                                (cmd.flags &
-                                cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
-                                0 : 1);
        pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
                         iwlmvm_mod_params.power_scheme);
        pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
@@ -790,7 +761,7 @@ static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        int ret;
 
-       if (mvmvif != mvm->bf_allowed_vif ||
+       if (mvmvif != mvm->bf_allowed_vif || !vif->bss_conf.dtim_period ||
            vif->type != NL80211_IFTYPE_STATION || vif->p2p)
                return 0;
 
@@ -818,6 +789,26 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
        return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags, false);
 }
 
+static int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
+                                      struct ieee80211_vif *vif,
+                                      bool enable)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_beacon_filter_cmd cmd = {
+               IWL_BF_CMD_CONFIG_DEFAULTS,
+               .bf_enable_beacon_filter = cpu_to_le32(1),
+       };
+
+       if (!mvmvif->bf_data.bf_enabled)
+               return 0;
+
+       if (mvm->cur_ucode == IWL_UCODE_WOWLAN)
+               cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
+
+       mvmvif->bf_data.ba_enabled = enable;
+       return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0, false);
+}
+
 int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
                                  struct ieee80211_vif *vif,
                                  u32 flags)
@@ -826,8 +817,7 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        int ret;
 
-       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED) ||
-           vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+       if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
                return 0;
 
        ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags);
@@ -838,6 +828,55 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
        return ret;
 }
 
+int iwl_mvm_power_update_mac(struct iwl_mvm *mvm)
+{
+       struct iwl_mvm_vif *mvmvif;
+       struct iwl_power_vifs vifs = {};
+       bool ba_enable;
+       int ret;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       iwl_mvm_power_set_pm(mvm, &vifs);
+
+       /* disable PS if CAM */
+       if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) {
+               mvm->ps_disabled = true;
+       } else {
+       /* don't update device power state unless we add / remove monitor */
+               if (vifs.monitor_vif) {
+                       if (vifs.monitor_active)
+                               mvm->ps_disabled = true;
+                       ret = iwl_mvm_power_update_device(mvm);
+                       if (ret)
+                               return ret;
+               }
+       }
+
+       if (vifs.bss_vif) {
+               ret = iwl_mvm_power_send_cmd(mvm, vifs.bss_vif);
+               if (ret)
+                       return ret;
+       }
+
+       if (vifs.p2p_vif) {
+               ret = iwl_mvm_power_send_cmd(mvm, vifs.p2p_vif);
+               if (ret)
+                       return ret;
+       }
+
+       if (!vifs.bf_vif)
+               return 0;
+
+       mvmvif = iwl_mvm_vif_from_mac80211(vifs.bf_vif);
+
+       ba_enable = !(!mvmvif->pm_enabled || mvm->ps_disabled ||
+                     !vifs.bf_vif->bss_conf.ps ||
+                     iwl_mvm_vif_low_latency(mvmvif));
+
+       return iwl_mvm_update_beacon_abort(mvm, vifs.bf_vif, ba_enable);
+}
+
 int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
                                   struct ieee80211_vif *vif,
                                   bool enable, u32 flags)
@@ -861,9 +900,10 @@ int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
                if (WARN_ON(!dtimper_msec))
                        return 0;
 
-               cmd.flags |=
-                       cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
                cmd.skip_dtim_periods = 300 / dtimper_msec;
+               if (cmd.skip_dtim_periods)
+                       cmd.flags |=
+                               cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
        }
        iwl_mvm_power_log(mvm, &cmd);
 #ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -894,33 +934,3 @@ int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
 
        return ret;
 }
-
-int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
-                                struct ieee80211_vif *vif,
-                                bool force,
-                                u32 flags)
-{
-       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
-       if (mvmvif != mvm->bf_allowed_vif)
-               return 0;
-
-       if (!mvmvif->bf_data.bf_enabled) {
-               /* disable beacon filtering explicitly if force is true */
-               if (force)
-                       return iwl_mvm_disable_beacon_filter(mvm, vif, flags);
-               return 0;
-       }
-
-       return iwl_mvm_enable_beacon_filter(mvm, vif, flags);
-}
-
-int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm)
-{
-       struct iwl_powertable_cmd cmd = {
-               .keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC,
-       };
-
-       return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
-                                   sizeof(cmd), &cmd);
-}
index 35e86e06dffda924f9f4ba75d36717ac311d3602..ba68d7b8450508d9c7b123500b654d2195b5613a 100644 (file)
@@ -285,7 +285,7 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
 
        iwl_mvm_adjust_quota_for_noa(mvm, &cmd);
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, CMD_SYNC,
+       ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0,
                                   sizeof(cmd), &cmd);
        if (ret)
                IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
index 9f52c5b3f0ec0e9b2da5949f2af88bbcd13d89ce..306a6caa486889b4dc27ea53fc350f246a7433f8 100644 (file)
@@ -211,7 +211,7 @@ static const struct rs_tx_column rs_tx_columns[] = {
                .next_columns = {
                        RS_COLUMN_LEGACY_ANT_B,
                        RS_COLUMN_SISO_ANT_A,
-                       RS_COLUMN_SISO_ANT_B,
+                       RS_COLUMN_MIMO2,
                        RS_COLUMN_INVALID,
                        RS_COLUMN_INVALID,
                        RS_COLUMN_INVALID,
@@ -223,8 +223,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
                .ant = ANT_B,
                .next_columns = {
                        RS_COLUMN_LEGACY_ANT_A,
-                       RS_COLUMN_SISO_ANT_A,
                        RS_COLUMN_SISO_ANT_B,
+                       RS_COLUMN_MIMO2,
                        RS_COLUMN_INVALID,
                        RS_COLUMN_INVALID,
                        RS_COLUMN_INVALID,
@@ -238,10 +238,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
                        RS_COLUMN_SISO_ANT_B,
                        RS_COLUMN_MIMO2,
                        RS_COLUMN_SISO_ANT_A_SGI,
-                       RS_COLUMN_SISO_ANT_B_SGI,
                        RS_COLUMN_LEGACY_ANT_A,
                        RS_COLUMN_LEGACY_ANT_B,
                        RS_COLUMN_INVALID,
+                       RS_COLUMN_INVALID,
                },
                .checks = {
                        rs_siso_allow,
@@ -254,10 +254,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
                        RS_COLUMN_SISO_ANT_A,
                        RS_COLUMN_MIMO2,
                        RS_COLUMN_SISO_ANT_B_SGI,
-                       RS_COLUMN_SISO_ANT_A_SGI,
                        RS_COLUMN_LEGACY_ANT_A,
                        RS_COLUMN_LEGACY_ANT_B,
                        RS_COLUMN_INVALID,
+                       RS_COLUMN_INVALID,
                },
                .checks = {
                        rs_siso_allow,
@@ -271,10 +271,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
                        RS_COLUMN_SISO_ANT_B_SGI,
                        RS_COLUMN_MIMO2_SGI,
                        RS_COLUMN_SISO_ANT_A,
-                       RS_COLUMN_SISO_ANT_B,
-                       RS_COLUMN_MIMO2,
                        RS_COLUMN_LEGACY_ANT_A,
                        RS_COLUMN_LEGACY_ANT_B,
+                       RS_COLUMN_INVALID,
+                       RS_COLUMN_INVALID,
                },
                .checks = {
                        rs_siso_allow,
@@ -289,10 +289,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
                        RS_COLUMN_SISO_ANT_A_SGI,
                        RS_COLUMN_MIMO2_SGI,
                        RS_COLUMN_SISO_ANT_B,
-                       RS_COLUMN_SISO_ANT_A,
-                       RS_COLUMN_MIMO2,
                        RS_COLUMN_LEGACY_ANT_A,
                        RS_COLUMN_LEGACY_ANT_B,
+                       RS_COLUMN_INVALID,
+                       RS_COLUMN_INVALID,
                },
                .checks = {
                        rs_siso_allow,
@@ -304,12 +304,12 @@ static const struct rs_tx_column rs_tx_columns[] = {
                .ant = ANT_AB,
                .next_columns = {
                        RS_COLUMN_SISO_ANT_A,
-                       RS_COLUMN_SISO_ANT_B,
-                       RS_COLUMN_SISO_ANT_A_SGI,
-                       RS_COLUMN_SISO_ANT_B_SGI,
                        RS_COLUMN_MIMO2_SGI,
                        RS_COLUMN_LEGACY_ANT_A,
                        RS_COLUMN_LEGACY_ANT_B,
+                       RS_COLUMN_INVALID,
+                       RS_COLUMN_INVALID,
+                       RS_COLUMN_INVALID,
                },
                .checks = {
                        rs_mimo_allow,
@@ -321,12 +321,12 @@ static const struct rs_tx_column rs_tx_columns[] = {
                .sgi = true,
                .next_columns = {
                        RS_COLUMN_SISO_ANT_A_SGI,
-                       RS_COLUMN_SISO_ANT_B_SGI,
-                       RS_COLUMN_SISO_ANT_A,
-                       RS_COLUMN_SISO_ANT_B,
                        RS_COLUMN_MIMO2,
                        RS_COLUMN_LEGACY_ANT_A,
                        RS_COLUMN_LEGACY_ANT_B,
+                       RS_COLUMN_INVALID,
+                       RS_COLUMN_INVALID,
+                       RS_COLUMN_INVALID,
                },
                .checks = {
                        rs_mimo_allow,
@@ -527,6 +527,9 @@ static void rs_rate_scale_clear_tbl_windows(struct iwl_mvm *mvm,
        IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
        for (i = 0; i < IWL_RATE_COUNT; i++)
                rs_rate_scale_clear_window(&tbl->win[i]);
+
+       for (i = 0; i < ARRAY_SIZE(tbl->tpc_win); i++)
+               rs_rate_scale_clear_window(&tbl->tpc_win[i]);
 }
 
 static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
@@ -656,17 +659,34 @@ static int _rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
        return 0;
 }
 
-static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
-                             int scale_index, int attempts, int successes)
+static int rs_collect_tx_data(struct iwl_lq_sta *lq_sta,
+                             struct iwl_scale_tbl_info *tbl,
+                             int scale_index, int attempts, int successes,
+                             u8 reduced_txp)
 {
        struct iwl_rate_scale_data *window = NULL;
+       int ret;
 
        if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
                return -EINVAL;
 
+       if (tbl->column != RS_COLUMN_INVALID) {
+               lq_sta->tx_stats[tbl->column][scale_index].total += attempts;
+               lq_sta->tx_stats[tbl->column][scale_index].success += successes;
+       }
+
        /* Select window for current tx bit rate */
        window = &(tbl->win[scale_index]);
 
+       ret = _rs_collect_tx_data(tbl, scale_index, attempts, successes,
+                                 window);
+       if (ret)
+               return ret;
+
+       if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION))
+               return -EINVAL;
+
+       window = &tbl->tpc_win[reduced_txp];
        return _rs_collect_tx_data(tbl, scale_index, attempts, successes,
                                   window);
 }
@@ -1000,6 +1020,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
        u32 ucode_rate;
        struct rs_rate rate;
        struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
+       u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0];
 
        /* Treat uninitialized rate scaling data same as non-existing. */
        if (!lq_sta) {
@@ -1010,7 +1031,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
                return;
        }
 
-#ifdef CPTCFG_MAC80211_DEBUGFS
+#ifdef CONFIG_MAC80211_DEBUGFS
        /* Disable last tx check if we are debugging with fixed rate */
        if (lq_sta->dbg_fixed_rate) {
                IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n");
@@ -1141,9 +1162,10 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
        if (info->flags & IEEE80211_TX_STAT_AMPDU) {
                ucode_rate = le32_to_cpu(table->rs_table[0]);
                rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
-               rs_collect_tx_data(curr_tbl, rate.index,
+               rs_collect_tx_data(lq_sta, curr_tbl, rate.index,
                                   info->status.ampdu_len,
-                                  info->status.ampdu_ack_len);
+                                  info->status.ampdu_ack_len,
+                                  reduced_txp);
 
                /* Update success/fail counts if not searching for new mode */
                if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
@@ -1176,8 +1198,9 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
                        else
                                continue;
 
-                       rs_collect_tx_data(tmp_tbl, rate.index, 1,
-                                          i < retries ? 0 : legacy_success);
+                       rs_collect_tx_data(lq_sta, tmp_tbl, rate.index, 1,
+                                          i < retries ? 0 : legacy_success,
+                                          reduced_txp);
                }
 
                /* Update success/fail counts if not searching for new mode */
@@ -1188,6 +1211,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
        }
        /* The last TX rate is cached in lq_sta; it's set in if/else above */
        lq_sta->last_rate_n_flags = ucode_rate;
+       IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
 done:
        /* See if there's a better rate or modulation mode to try. */
        if (sta && sta->supp_rates[sband->band])
@@ -1311,105 +1335,50 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
        tbl->expected_tpt = rs_get_expected_tpt_table(lq_sta, column, rate->bw);
 }
 
-/*
- * Find starting rate for new "search" high-throughput mode of modulation.
- * Goal is to find lowest expected rate (under perfect conditions) that is
- * above the current measured throughput of "active" mode, to give new mode
- * a fair chance to prove itself without too many challenges.
- *
- * This gets called when transitioning to more aggressive modulation
- * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
- * (i.e. MIMO to SISO).  When moving to MIMO, bit rate will typically need
- * to decrease to match "active" throughput.  When moving from MIMO to SISO,
- * bit rate will typically need to increase, but not if performance was bad.
- */
 static s32 rs_get_best_rate(struct iwl_mvm *mvm,
                            struct iwl_lq_sta *lq_sta,
                            struct iwl_scale_tbl_info *tbl,     /* "search" */
-                           u16 rate_mask, s8 index)
+                           unsigned long rate_mask, s8 index)
 {
-       /* "active" values */
        struct iwl_scale_tbl_info *active_tbl =
            &(lq_sta->lq_info[lq_sta->active_tbl]);
-       s32 active_sr = active_tbl->win[index].success_ratio;
-       s32 active_tpt = active_tbl->expected_tpt[index];
-       /* expected "search" throughput */
+       s32 success_ratio = active_tbl->win[index].success_ratio;
+       u16 expected_current_tpt = active_tbl->expected_tpt[index];
        const u16 *tpt_tbl = tbl->expected_tpt;
-
-       s32 new_rate, high, low, start_hi;
        u16 high_low;
-       s8 rate = index;
-
-       new_rate = high = low = start_hi = IWL_RATE_INVALID;
-
-       while (1) {
-               high_low = rs_get_adjacent_rate(mvm, rate, rate_mask,
-                                               tbl->rate.type);
+       u32 target_tpt;
+       int rate_idx;
 
-               low = high_low & 0xff;
-               high = (high_low >> 8) & 0xff;
-
-               /*
-                * Lower the "search" bit rate, to give new "search" mode
-                * approximately the same throughput as "active" if:
-                *
-                * 1) "Active" mode has been working modestly well (but not
-                *    great), and expected "search" throughput (under perfect
-                *    conditions) at candidate rate is above the actual
-                *    measured "active" throughput (but less than expected
-                *    "active" throughput under perfect conditions).
-                * OR
-                * 2) "Active" mode has been working perfectly or very well
-                *    and expected "search" throughput (under perfect
-                *    conditions) at candidate rate is above expected
-                *    "active" throughput (under perfect conditions).
-                */
-               if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
-                    ((active_sr > RS_SR_FORCE_DECREASE) &&
-                     (active_sr <= IWL_RATE_HIGH_TH) &&
-                     (tpt_tbl[rate] <= active_tpt))) ||
-                   ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
-                    (tpt_tbl[rate] > active_tpt))) {
-                       /* (2nd or later pass)
-                        * If we've already tried to raise the rate, and are
-                        * now trying to lower it, use the higher rate. */
-                       if (start_hi != IWL_RATE_INVALID) {
-                               new_rate = start_hi;
-                               break;
-                       }
-
-                       new_rate = rate;
+       if (success_ratio > RS_SR_NO_DECREASE) {
+               target_tpt = 100 * expected_current_tpt;
+               IWL_DEBUG_RATE(mvm,
+                              "SR %d high. Find rate exceeding EXPECTED_CURRENT %d\n",
+                              success_ratio, target_tpt);
+       } else {
+               target_tpt = lq_sta->last_tpt;
+               IWL_DEBUG_RATE(mvm,
+                              "SR %d not thag good. Find rate exceeding ACTUAL_TPT %d\n",
+                              success_ratio, target_tpt);
+       }
 
-                       /* Loop again with lower rate */
-                       if (low != IWL_RATE_INVALID)
-                               rate = low;
+       rate_idx = find_first_bit(&rate_mask, BITS_PER_LONG);
 
-                       /* Lower rate not available, use the original */
-                       else
-                               break;
-
-               /* Else try to raise the "search" rate to match "active" */
-               } else {
-                       /* (2nd or later pass)
-                        * If we've already tried to lower the rate, and are
-                        * now trying to raise it, use the lower rate. */
-                       if (new_rate != IWL_RATE_INVALID)
-                               break;
+       while (rate_idx != IWL_RATE_INVALID) {
+               if (target_tpt < (100 * tpt_tbl[rate_idx]))
+                       break;
 
-                       /* Loop again with higher rate */
-                       else if (high != IWL_RATE_INVALID) {
-                               start_hi = high;
-                               rate = high;
+               high_low = rs_get_adjacent_rate(mvm, rate_idx, rate_mask,
+                                               tbl->rate.type);
 
-                       /* Higher rate not available, use the original */
-                       } else {
-                               new_rate = rate;
-                               break;
-                       }
-               }
+               rate_idx = (high_low >> 8) & 0xff;
        }
 
-       return new_rate;
+       IWL_DEBUG_RATE(mvm, "Best rate found %d target_tp %d expected_new %d\n",
+                      rate_idx, target_tpt,
+                      rate_idx != IWL_RATE_INVALID ?
+                      100 * tpt_tbl[rate_idx] : IWL_INVALID_VALUE);
+
+       return rate_idx;
 }
 
 static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta)
@@ -1584,7 +1553,7 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
 
                tpt = lq_sta->last_tpt / 100;
                expected_tpt_tbl = rs_get_expected_tpt_table(lq_sta, next_col,
-                                                            tbl->rate.bw);
+                                                    rs_bw_from_sta_bw(sta));
                if (WARN_ON_ONCE(!expected_tpt_tbl))
                        continue;
 
@@ -1625,7 +1594,7 @@ static int rs_switch_to_column(struct iwl_mvm *mvm,
        const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column];
        u32 sz = (sizeof(struct iwl_scale_tbl_info) -
                  (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
-       u16 rate_mask = 0;
+       unsigned long rate_mask = 0;
        u32 rate_idx = 0;
 
        memcpy(search_tbl, tbl, sz);
@@ -1667,7 +1636,7 @@ static int rs_switch_to_column(struct iwl_mvm *mvm,
                    !(BIT(rate_idx) & rate_mask)) {
                        IWL_DEBUG_RATE(mvm,
                                       "can not switch with index %d"
-                                      " rate mask %x\n",
+                                      " rate mask %lx\n",
                                       rate_idx, rate_mask);
 
                        goto err;
@@ -1769,6 +1738,203 @@ out:
        return action;
 }
 
+static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index,
+                               int *weaker, int *stronger)
+{
+       *weaker = index + TPC_TX_POWER_STEP;
+       if (*weaker > TPC_MAX_REDUCTION)
+               *weaker = TPC_INVALID;
+
+       *stronger = index - TPC_TX_POWER_STEP;
+       if (*stronger < 0)
+               *stronger = TPC_INVALID;
+}
+
+static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                          struct rs_rate *rate, enum ieee80211_band band)
+{
+       int index = rate->index;
+       bool cam = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM);
+       bool sta_ps_disabled = (vif->type == NL80211_IFTYPE_STATION &&
+                               !vif->bss_conf.ps);
+
+       IWL_DEBUG_RATE(mvm, "cam: %d sta_ps_disabled %d\n",
+                      cam, sta_ps_disabled);
+       /*
+        * allow tpc only if power management is enabled, or bt coex
+        * activity grade allows it and we are on 2.4Ghz.
+        */
+       if ((cam || sta_ps_disabled) &&
+           !iwl_mvm_bt_coex_is_tpc_allowed(mvm, band))
+               return false;
+
+       IWL_DEBUG_RATE(mvm, "check rate, table type: %d\n", rate->type);
+       if (is_legacy(rate))
+               return index == IWL_RATE_54M_INDEX;
+       if (is_ht(rate))
+               return index == IWL_RATE_MCS_7_INDEX;
+       if (is_vht(rate))
+               return index == IWL_RATE_MCS_7_INDEX ||
+                      index == IWL_RATE_MCS_8_INDEX ||
+                      index == IWL_RATE_MCS_9_INDEX;
+
+       WARN_ON_ONCE(1);
+       return false;
+}
+
+enum tpc_action {
+       TPC_ACTION_STAY,
+       TPC_ACTION_DECREASE,
+       TPC_ACTION_INCREASE,
+       TPC_ACTION_NO_RESTIRCTION,
+};
+
+static enum tpc_action rs_get_tpc_action(struct iwl_mvm *mvm,
+                                        s32 sr, int weak, int strong,
+                                        int current_tpt,
+                                        int weak_tpt, int strong_tpt)
+{
+       /* stay until we have valid tpt */
+       if (current_tpt == IWL_INVALID_VALUE) {
+               IWL_DEBUG_RATE(mvm, "no current tpt. stay.\n");
+               return TPC_ACTION_STAY;
+       }
+
+       /* Too many failures, increase txp */
+       if (sr <= TPC_SR_FORCE_INCREASE || current_tpt == 0) {
+               IWL_DEBUG_RATE(mvm, "increase txp because of weak SR\n");
+               return TPC_ACTION_NO_RESTIRCTION;
+       }
+
+       /* try decreasing first if applicable */
+       if (weak != TPC_INVALID) {
+               if (weak_tpt == IWL_INVALID_VALUE &&
+                   (strong_tpt == IWL_INVALID_VALUE ||
+                    current_tpt >= strong_tpt)) {
+                       IWL_DEBUG_RATE(mvm,
+                                      "no weak txp measurement. decrease txp\n");
+                       return TPC_ACTION_DECREASE;
+               }
+
+               if (weak_tpt > current_tpt) {
+                       IWL_DEBUG_RATE(mvm,
+                                      "lower txp has better tpt. decrease txp\n");
+                       return TPC_ACTION_DECREASE;
+               }
+       }
+
+       /* next, increase if needed */
+       if (sr < TPC_SR_NO_INCREASE && strong != TPC_INVALID) {
+               if (weak_tpt == IWL_INVALID_VALUE &&
+                   strong_tpt != IWL_INVALID_VALUE &&
+                   current_tpt < strong_tpt) {
+                       IWL_DEBUG_RATE(mvm,
+                                      "higher txp has better tpt. increase txp\n");
+                       return TPC_ACTION_INCREASE;
+               }
+
+               if (weak_tpt < current_tpt &&
+                   (strong_tpt == IWL_INVALID_VALUE ||
+                    strong_tpt > current_tpt)) {
+                       IWL_DEBUG_RATE(mvm,
+                                      "lower txp has worse tpt. increase txp\n");
+                       return TPC_ACTION_INCREASE;
+               }
+       }
+
+       IWL_DEBUG_RATE(mvm, "no need to increase or decrease txp - stay\n");
+       return TPC_ACTION_STAY;
+}
+
+static bool rs_tpc_perform(struct iwl_mvm *mvm,
+                          struct ieee80211_sta *sta,
+                          struct iwl_lq_sta *lq_sta,
+                          struct iwl_scale_tbl_info *tbl)
+{
+       struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+       struct ieee80211_vif *vif = mvm_sta->vif;
+       struct ieee80211_chanctx_conf *chanctx_conf;
+       enum ieee80211_band band;
+       struct iwl_rate_scale_data *window;
+       struct rs_rate *rate = &tbl->rate;
+       enum tpc_action action;
+       s32 sr;
+       u8 cur = lq_sta->lq.reduced_tpc;
+       int current_tpt;
+       int weak, strong;
+       int weak_tpt = IWL_INVALID_VALUE, strong_tpt = IWL_INVALID_VALUE;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+       if (lq_sta->dbg_fixed_txp_reduction <= TPC_MAX_REDUCTION) {
+               IWL_DEBUG_RATE(mvm, "fixed tpc: %d\n",
+                              lq_sta->dbg_fixed_txp_reduction);
+               lq_sta->lq.reduced_tpc = lq_sta->dbg_fixed_txp_reduction;
+               return cur != lq_sta->dbg_fixed_txp_reduction;
+       }
+#endif
+
+       rcu_read_lock();
+       chanctx_conf = rcu_dereference(vif->chanctx_conf);
+       if (WARN_ON(!chanctx_conf))
+               band = IEEE80211_NUM_BANDS;
+       else
+               band = chanctx_conf->def.chan->band;
+       rcu_read_unlock();
+
+       if (!rs_tpc_allowed(mvm, vif, rate, band)) {
+               IWL_DEBUG_RATE(mvm,
+                              "tpc is not allowed. remove txp restrictions\n");
+               lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
+               return cur != TPC_NO_REDUCTION;
+       }
+
+       rs_get_adjacent_txp(mvm, cur, &weak, &strong);
+
+       /* Collect measured throughputs for current and adjacent rates */
+       window = tbl->tpc_win;
+       sr = window[cur].success_ratio;
+       current_tpt = window[cur].average_tpt;
+       if (weak != TPC_INVALID)
+               weak_tpt = window[weak].average_tpt;
+       if (strong != TPC_INVALID)
+               strong_tpt = window[strong].average_tpt;
+
+       IWL_DEBUG_RATE(mvm,
+                      "(TPC: %d): cur_tpt %d SR %d weak %d strong %d weak_tpt %d strong_tpt %d\n",
+                      cur, current_tpt, sr, weak, strong,
+                      weak_tpt, strong_tpt);
+
+       action = rs_get_tpc_action(mvm, sr, weak, strong,
+                                  current_tpt, weak_tpt, strong_tpt);
+
+       /* override actions if we are on the edge */
+       if (weak == TPC_INVALID && action == TPC_ACTION_DECREASE) {
+               IWL_DEBUG_RATE(mvm, "already in lowest txp, stay\n");
+               action = TPC_ACTION_STAY;
+       } else if (strong == TPC_INVALID &&
+                  (action == TPC_ACTION_INCREASE ||
+                   action == TPC_ACTION_NO_RESTIRCTION)) {
+               IWL_DEBUG_RATE(mvm, "already in highest txp, stay\n");
+               action = TPC_ACTION_STAY;
+       }
+
+       switch (action) {
+       case TPC_ACTION_DECREASE:
+               lq_sta->lq.reduced_tpc = weak;
+               return true;
+       case TPC_ACTION_INCREASE:
+               lq_sta->lq.reduced_tpc = strong;
+               return true;
+       case TPC_ACTION_NO_RESTIRCTION:
+               lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
+               return true;
+       case TPC_ACTION_STAY:
+               /* do nothing */
+               break;
+       }
+       return false;
+}
+
 /*
  * Do rate scaling and search for new modulation mode.
  */
@@ -2019,6 +2185,9 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
                break;
        case RS_ACTION_STAY:
                /* No change */
+               if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN)
+                       update_lq = rs_tpc_perform(mvm, sta, lq_sta, tbl);
+               break;
        default:
                break;
        }
@@ -2271,10 +2440,6 @@ static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta,
                        if (i == IWL_RATE_9M_INDEX)
                                continue;
 
-                       /* Disable MCS9 as a workaround */
-                       if (i == IWL_RATE_MCS_9_INDEX)
-                               continue;
-
                        /* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
                        if (i == IWL_RATE_MCS_9_INDEX &&
                            sta->bandwidth == IEEE80211_STA_RX_BW_20)
@@ -2293,10 +2458,6 @@ static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta,
                        if (i == IWL_RATE_9M_INDEX)
                                continue;
 
-                       /* Disable MCS9 as a workaround */
-                       if (i == IWL_RATE_MCS_9_INDEX)
-                               continue;
-
                        /* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
                        if (i == IWL_RATE_MCS_9_INDEX &&
                            sta->bandwidth == IEEE80211_STA_RX_BW_20)
@@ -2478,6 +2639,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        lq_sta->is_agg = 0;
 #ifdef CONFIG_MAC80211_DEBUGFS
        lq_sta->dbg_fixed_rate = 0;
+       lq_sta->dbg_fixed_txp_reduction = TPC_INVALID;
 #endif
 #ifdef CONFIG_IWLWIFI_DEBUGFS
        iwl_mvm_reset_frame_stats(mvm, &mvm->drv_rx_stats);
@@ -2653,6 +2815,7 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
                rs_build_rates_table_from_fixed(mvm, lq_cmd,
                                                lq_sta->band,
                                                lq_sta->dbg_fixed_rate);
+               lq_cmd->reduced_tpc = 0;
                ant = (lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >>
                        RATE_MCS_ANT_POS;
        } else
@@ -2783,7 +2946,6 @@ static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
        size_t buf_size;
        u32 parsed_rate;
 
-
        mvm = lq_sta->drv;
        memset(buf, 0, sizeof(buf));
        buf_size = min(count, sizeof(buf) -  1);
@@ -2856,6 +3018,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
                        lq_sta->lq.agg_disable_start_th,
                        lq_sta->lq.agg_frame_cnt_limit);
 
+       desc += sprintf(buff+desc, "reduced tpc=%d\n", lq_sta->lq.reduced_tpc);
        desc += sprintf(buff+desc,
                        "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
                        lq_sta->lq.initial_rate_index[0],
@@ -2928,6 +3091,94 @@ static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
        .llseek = default_llseek,
 };
 
+static ssize_t rs_sta_dbgfs_drv_tx_stats_read(struct file *file,
+                                             char __user *user_buf,
+                                             size_t count, loff_t *ppos)
+{
+       static const char * const column_name[] = {
+               [RS_COLUMN_LEGACY_ANT_A] = "LEGACY_ANT_A",
+               [RS_COLUMN_LEGACY_ANT_B] = "LEGACY_ANT_B",
+               [RS_COLUMN_SISO_ANT_A] = "SISO_ANT_A",
+               [RS_COLUMN_SISO_ANT_B] = "SISO_ANT_B",
+               [RS_COLUMN_SISO_ANT_A_SGI] = "SISO_ANT_A_SGI",
+               [RS_COLUMN_SISO_ANT_B_SGI] = "SISO_ANT_B_SGI",
+               [RS_COLUMN_MIMO2] = "MIMO2",
+               [RS_COLUMN_MIMO2_SGI] = "MIMO2_SGI",
+       };
+
+       static const char * const rate_name[] = {
+               [IWL_RATE_1M_INDEX] = "1M",
+               [IWL_RATE_2M_INDEX] = "2M",
+               [IWL_RATE_5M_INDEX] = "5.5M",
+               [IWL_RATE_11M_INDEX] = "11M",
+               [IWL_RATE_6M_INDEX] = "6M|MCS0",
+               [IWL_RATE_9M_INDEX] = "9M",
+               [IWL_RATE_12M_INDEX] = "12M|MCS1",
+               [IWL_RATE_18M_INDEX] = "18M|MCS2",
+               [IWL_RATE_24M_INDEX] = "24M|MCS3",
+               [IWL_RATE_36M_INDEX] = "36M|MCS4",
+               [IWL_RATE_48M_INDEX] = "48M|MCS5",
+               [IWL_RATE_54M_INDEX] = "54M|MCS6",
+               [IWL_RATE_MCS_7_INDEX] = "MCS7",
+               [IWL_RATE_MCS_8_INDEX] = "MCS8",
+               [IWL_RATE_MCS_9_INDEX] = "MCS9",
+       };
+
+       char *buff, *pos, *endpos;
+       int col, rate;
+       ssize_t ret;
+       struct iwl_lq_sta *lq_sta = file->private_data;
+       struct rs_rate_stats *stats;
+       static const size_t bufsz = 1024;
+
+       buff = kmalloc(bufsz, GFP_KERNEL);
+       if (!buff)
+               return -ENOMEM;
+
+       pos = buff;
+       endpos = pos + bufsz;
+
+       pos += scnprintf(pos, endpos - pos, "COLUMN,");
+       for (rate = 0; rate < IWL_RATE_COUNT; rate++)
+               pos += scnprintf(pos, endpos - pos, "%s,", rate_name[rate]);
+       pos += scnprintf(pos, endpos - pos, "\n");
+
+       for (col = 0; col < RS_COLUMN_COUNT; col++) {
+               pos += scnprintf(pos, endpos - pos,
+                                "%s,", column_name[col]);
+
+               for (rate = 0; rate < IWL_RATE_COUNT; rate++) {
+                       stats = &(lq_sta->tx_stats[col][rate]);
+                       pos += scnprintf(pos, endpos - pos,
+                                        "%llu/%llu,",
+                                        stats->success,
+                                        stats->total);
+               }
+               pos += scnprintf(pos, endpos - pos, "\n");
+       }
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
+       kfree(buff);
+       return ret;
+}
+
+static ssize_t rs_sta_dbgfs_drv_tx_stats_write(struct file *file,
+                                              const char __user *user_buf,
+                                              size_t count, loff_t *ppos)
+{
+       struct iwl_lq_sta *lq_sta = file->private_data;
+       memset(lq_sta->tx_stats, 0, sizeof(lq_sta->tx_stats));
+
+       return count;
+}
+
+static const struct file_operations rs_sta_dbgfs_drv_tx_stats_ops = {
+       .read = rs_sta_dbgfs_drv_tx_stats_read,
+       .write = rs_sta_dbgfs_drv_tx_stats_write,
+       .open = simple_open,
+       .llseek = default_llseek,
+};
+
 static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
 {
        struct iwl_lq_sta *lq_sta = mvm_sta;
@@ -2937,9 +3188,15 @@ static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
        lq_sta->rs_sta_dbgfs_stats_table_file =
                debugfs_create_file("rate_stats_table", S_IRUSR, dir,
                                    lq_sta, &rs_sta_dbgfs_stats_table_ops);
+       lq_sta->rs_sta_dbgfs_drv_tx_stats_file =
+               debugfs_create_file("drv_tx_stats", S_IRUSR | S_IWUSR, dir,
+                                   lq_sta, &rs_sta_dbgfs_drv_tx_stats_ops);
        lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
                debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
                                  &lq_sta->tx_agg_tid_en);
+       lq_sta->rs_sta_dbgfs_reduced_txp_file =
+               debugfs_create_u8("reduced_tpc", S_IRUSR | S_IWUSR, dir,
+                                 &lq_sta->dbg_fixed_txp_reduction);
 }
 
 static void rs_remove_debugfs(void *mvm, void *mvm_sta)
@@ -2947,7 +3204,9 @@ static void rs_remove_debugfs(void *mvm, void *mvm_sta)
        struct iwl_lq_sta *lq_sta = mvm_sta;
        debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
        debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
+       debugfs_remove(lq_sta->rs_sta_dbgfs_drv_tx_stats_file);
        debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
+       debugfs_remove(lq_sta->rs_sta_dbgfs_reduced_txp_file);
 }
 #endif
 
index 0acfac96a56c6dca2d2799812231404921abd15b..374a83d7db25a98dd76da34d3fdff9557e48664f 100644 (file)
@@ -158,6 +158,13 @@ enum {
 #define RS_SR_FORCE_DECREASE           1920    /*  15% */
 #define RS_SR_NO_DECREASE              10880   /*  85% */
 
+#define TPC_SR_FORCE_INCREASE          9600    /* 75% */
+#define TPC_SR_NO_INCREASE             10880   /* 85% */
+#define TPC_TX_POWER_STEP              3
+#define TPC_MAX_REDUCTION              15
+#define TPC_NO_REDUCTION               0
+#define TPC_INVALID                    0xff
+
 #define LINK_QUAL_AGG_TIME_LIMIT_DEF   (4000) /* 4 milliseconds */
 #define LINK_QUAL_AGG_TIME_LIMIT_MAX   (8000)
 #define LINK_QUAL_AGG_TIME_LIMIT_MIN   (100)
@@ -266,9 +273,16 @@ enum rs_column {
        RS_COLUMN_MIMO2_SGI,
 
        RS_COLUMN_LAST = RS_COLUMN_MIMO2_SGI,
+       RS_COLUMN_COUNT = RS_COLUMN_LAST + 1,
        RS_COLUMN_INVALID,
 };
 
+/* Packet stats per rate */
+struct rs_rate_stats {
+       u64 success;
+       u64 total;
+};
+
 /**
  * struct iwl_scale_tbl_info -- tx params and success history for all rates
  *
@@ -280,6 +294,8 @@ struct iwl_scale_tbl_info {
        enum rs_column column;
        const u16 *expected_tpt;        /* throughput metrics; expected_tpt_G, etc. */
        struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
+       /* per txpower-reduction history */
+       struct iwl_rate_scale_data tpc_win[TPC_MAX_REDUCTION + 1];
 };
 
 enum {
@@ -315,6 +331,8 @@ struct iwl_lq_sta {
        bool is_vht;
        enum ieee80211_band band;
 
+       struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT];
+
        /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
        unsigned long active_legacy_rate;
        unsigned long active_siso_rate;
@@ -334,8 +352,11 @@ struct iwl_lq_sta {
 #ifdef CONFIG_MAC80211_DEBUGFS
        struct dentry *rs_sta_dbgfs_scale_table_file;
        struct dentry *rs_sta_dbgfs_stats_table_file;
+       struct dentry *rs_sta_dbgfs_drv_tx_stats_file;
        struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
+       struct dentry *rs_sta_dbgfs_reduced_txp_file;
        u32 dbg_fixed_rate;
+       u8 dbg_fixed_txp_reduction;
 #endif
        struct iwl_mvm *drv;
 
@@ -345,6 +366,9 @@ struct iwl_lq_sta {
        u32 last_rate_n_flags;
        /* packets destined for this STA are aggregated */
        u8 is_agg;
+
+       /* tx power reduce for this sta */
+       int tpc_reduce;
 };
 
 /* Initialize station's rate scaling information after adding station */
index 6061553a5e444956c7b5d626695a2950fb1f3fd1..cf7276967acdec6439392c82e44e94de52d453c8 100644 (file)
@@ -60,7 +60,6 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *****************************************************************************/
 #include "iwl-trans.h"
-
 #include "mvm.h"
 #include "fw-api.h"
 
@@ -130,42 +129,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
 
        memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
 
-       ieee80211_rx_ni(mvm->hw, skb);
-}
-
-static void iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
-                             struct iwl_rx_phy_info *phy_info,
-                             struct ieee80211_rx_status *rx_status)
-{
-       int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
-       u32 agc_a, agc_b;
-       u32 val;
-
-       val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
-       agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS;
-       agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS;
-
-       val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]);
-       rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS;
-       rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS;
-
-       /*
-        * dBm = rssi dB - agc dB - constant.
-        * Higher AGC (higher radio gain) means lower signal.
-        */
-       rssi_a_dbm = rssi_a - IWL_RSSI_OFFSET - agc_a;
-       rssi_b_dbm = rssi_b - IWL_RSSI_OFFSET - agc_b;
-       max_rssi_dbm = max_t(int, rssi_a_dbm, rssi_b_dbm);
-
-       IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
-                       rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
-
-       rx_status->signal = max_rssi_dbm;
-       rx_status->chains = (le16_to_cpu(phy_info->phy_flags) &
-                               RX_RES_PHY_FLAGS_ANTENNA)
-                                       >> RX_RES_PHY_FLAGS_ANTENNA_POS;
-       rx_status->chain_signal[0] = rssi_a_dbm;
-       rx_status->chain_signal[1] = rssi_b_dbm;
+       ieee80211_rx(mvm->hw, skb);
 }
 
 /*
@@ -337,10 +301,7 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
         */
        /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
 
-       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_RX_ENERGY_API)
-               iwl_mvm_get_signal_strength(mvm, phy_info, &rx_status);
-       else
-               iwl_mvm_calc_rssi(mvm, phy_info, &rx_status);
+       iwl_mvm_get_signal_strength(mvm, phy_info, &rx_status);
 
        IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status.signal,
                              (unsigned long long)rx_status.mactime);
@@ -394,6 +355,8 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                rx_status.rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
                rx_status.flag |= RX_FLAG_VHT;
                rx_status.flag |= stbc << RX_FLAG_STBC_SHIFT;
+               if (rate_n_flags & RATE_MCS_BF_MSK)
+                       rx_status.vht_flag |= RX_VHT_FLAG_BF;
        } else {
                rx_status.rate_idx =
                        iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
index c91dc8498852c46653cc43fddb57c382d3d7f3f0..4b6c7d4bd199ef4dbc20defd15f966e59545d6e3 100644 (file)
@@ -277,51 +277,22 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
                                            IEEE80211_IFACE_ITER_NORMAL,
                                            iwl_mvm_scan_condition_iterator,
                                            &global_bound);
-       /*
-        * Under low latency traffic passive scan is fragmented meaning
-        * that dwell on a particular channel will be fragmented. Each fragment
-        * dwell time is 20ms and fragments period is 105ms. Skipping to next
-        * channel will be delayed by the same period - 105ms. So suspend_time
-        * parameter describing both fragments and channels skipping periods is
-        * set to 105ms. This value is chosen so that overall passive scan
-        * duration will not be too long. Max_out_time in this case is set to
-        * 70ms, so for active scanning operating channel will be left for 70ms
-        * while for passive still for 20ms (fragment dwell).
-        */
-       if (global_bound) {
-               if (!iwl_mvm_low_latency(mvm)) {
-                       params->suspend_time = ieee80211_tu_to_usec(100);
-                       params->max_out_time = ieee80211_tu_to_usec(600);
-               } else {
-                       params->suspend_time = ieee80211_tu_to_usec(105);
-                       /* P2P doesn't support fragmented passive scan, so
-                        * configure max_out_time to be at least longest dwell
-                        * time for passive scan.
-                        */
-                       if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
-                               params->max_out_time = ieee80211_tu_to_usec(70);
-                               params->passive_fragmented = true;
-                       } else {
-                               u32 passive_dwell;
 
-                               /*
-                                * Use band G so that passive channel dwell time
-                                * will be assigned with maximum value.
-                                */
-                               band = IEEE80211_BAND_2GHZ;
-                               passive_dwell = iwl_mvm_get_passive_dwell(band);
-                               params->max_out_time =
-                                       ieee80211_tu_to_usec(passive_dwell);
-                       }
-               }
+       if (!global_bound)
+               goto not_bound;
+
+       params->suspend_time = 100;
+       params->max_out_time = 600;
+
+       if (iwl_mvm_low_latency(mvm)) {
+               params->suspend_time = 250;
+               params->max_out_time = 250;
        }
 
+not_bound:
+
        for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
-               if (params->passive_fragmented)
-                       params->dwell[band].passive = 20;
-               else
-                       params->dwell[band].passive =
-                               iwl_mvm_get_passive_dwell(band);
+               params->dwell[band].passive = iwl_mvm_get_passive_dwell(band);
                params->dwell[band].active = iwl_mvm_get_active_dwell(band,
                                                                      n_ssids);
        }
@@ -335,7 +306,6 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
                .id = SCAN_REQUEST_CMD,
                .len = { 0, },
                .data = { mvm->scan_cmd, },
-               .flags = CMD_SYNC,
                .dataflags = { IWL_HCMD_DFL_NOCOPY, },
        };
        struct iwl_scan_cmd *cmd = mvm->scan_cmd;
@@ -348,7 +318,10 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
        struct iwl_mvm_scan_params params = {};
 
        lockdep_assert_held(&mvm->mutex);
-       BUG_ON(mvm->scan_cmd == NULL);
+
+       /* we should have failed registration if scan_cmd was NULL */
+       if (WARN_ON(mvm->scan_cmd == NULL))
+               return -ENOMEM;
 
        IWL_DEBUG_SCAN(mvm, "Handling mac80211 scan request\n");
        mvm->scan_status = IWL_MVM_SCAN_OS;
@@ -543,7 +516,7 @@ int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
                                   ARRAY_SIZE(scan_abort_notif),
                                   iwl_mvm_scan_abort_notif, NULL);
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, CMD_SYNC, 0, NULL);
+       ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, 0, 0, NULL);
        if (ret) {
                IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
                /* mac80211's state will be cleaned in the nic_restart flow */
@@ -567,9 +540,13 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
        /* scan status must be locked for proper checking */
        lockdep_assert_held(&mvm->mutex);
 
-       IWL_DEBUG_SCAN(mvm, "Scheduled scan completed, status %s\n",
+       IWL_DEBUG_SCAN(mvm,
+                      "Scheduled scan completed, status %s EBS status %s:%d\n",
                       scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
-                      "completed" : "aborted");
+                      "completed" : "aborted", scan_notif->ebs_status ==
+                      IWL_SCAN_EBS_SUCCESS ? "success" : "failed",
+                      scan_notif->ebs_status);
+
 
        /* only call mac80211 completion if the stop was initiated by FW */
        if (mvm->scan_status == IWL_MVM_SCAN_SCHED) {
@@ -577,6 +554,8 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
                ieee80211_sched_scan_stopped(mvm->hw);
        }
 
+       mvm->last_ebs_successful = !scan_notif->ebs_status;
+
        return 0;
 }
 
@@ -761,7 +740,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
        int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
        int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
        int head = 0;
-       int tail = band_2ghz + band_5ghz;
+       int tail = band_2ghz + band_5ghz - 1;
        u32 ssid_bitmap;
        int cmd_len;
        int ret;
@@ -769,7 +748,6 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
        struct iwl_scan_offload_cfg *scan_cfg;
        struct iwl_host_cmd cmd = {
                .id = SCAN_OFFLOAD_CONFIG_CMD,
-               .flags = CMD_SYNC,
        };
        struct iwl_mvm_scan_params params = {};
 
@@ -827,7 +805,6 @@ int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
        struct iwl_scan_offload_blacklist *blacklist;
        struct iwl_host_cmd cmd = {
                .id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
-               .flags = CMD_SYNC,
                .len[1] = sizeof(*profile_cfg),
                .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
                .dataflags[1] = IWL_HCMD_DFL_NOCOPY,
@@ -913,7 +890,12 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
                scan_req.flags |= cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_PASS_ALL);
        }
 
-       return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, CMD_SYNC,
+       if (mvm->last_ebs_successful &&
+           mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT)
+               scan_req.flags |=
+                       cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE);
+
+       return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, 0,
                                    sizeof(scan_req), &scan_req);
 }
 
@@ -922,7 +904,6 @@ static int iwl_mvm_send_sched_scan_abort(struct iwl_mvm *mvm)
        int ret;
        struct iwl_host_cmd cmd = {
                .id = SCAN_OFFLOAD_ABORT_CMD,
-               .flags = CMD_SYNC,
        };
        u32 status;
 
@@ -951,7 +932,7 @@ static int iwl_mvm_send_sched_scan_abort(struct iwl_mvm *mvm)
        return ret;
 }
 
-int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm)
+int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm, bool notify)
 {
        int ret;
        struct iwl_notification_wait wait_scan_done;
@@ -989,5 +970,8 @@ int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm)
         */
        mvm->scan_status = IWL_MVM_SCAN_NONE;
 
+       if (notify)
+               ieee80211_sched_scan_stopped(mvm->hw);
+
        return 0;
 }
index 88809b2d165445fcf9188c8f91bcf755a9e6704f..7edfd15efc9d001f227ea2c35b046c0f47cb55af 100644 (file)
@@ -237,9 +237,6 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
                .sta_vif_ap_sta_id = IWL_MVM_STATION_COUNT,
        };
 
-       if (IWL_UCODE_API(mvm->fw->ucode_ver) < 8)
-               return 0;
-
        /*
         * Ignore the call if we are in HW Restart flow, or if the handled
         * vif is a p2p device.
index f339ef8842508774e2ff7d51c9bdce069e014a1b..1fb01ea2e7047201324faefdc1f181408a77d070 100644 (file)
 #include "sta.h"
 #include "rs.h"
 
-static void iwl_mvm_add_sta_cmd_v7_to_v5(struct iwl_mvm_add_sta_cmd_v7 *cmd_v7,
-                                        struct iwl_mvm_add_sta_cmd_v5 *cmd_v5)
-{
-       memset(cmd_v5, 0, sizeof(*cmd_v5));
-
-       cmd_v5->add_modify = cmd_v7->add_modify;
-       cmd_v5->tid_disable_tx = cmd_v7->tid_disable_tx;
-       cmd_v5->mac_id_n_color = cmd_v7->mac_id_n_color;
-       memcpy(cmd_v5->addr, cmd_v7->addr, ETH_ALEN);
-       cmd_v5->sta_id = cmd_v7->sta_id;
-       cmd_v5->modify_mask = cmd_v7->modify_mask;
-       cmd_v5->station_flags = cmd_v7->station_flags;
-       cmd_v5->station_flags_msk = cmd_v7->station_flags_msk;
-       cmd_v5->add_immediate_ba_tid = cmd_v7->add_immediate_ba_tid;
-       cmd_v5->remove_immediate_ba_tid = cmd_v7->remove_immediate_ba_tid;
-       cmd_v5->add_immediate_ba_ssn = cmd_v7->add_immediate_ba_ssn;
-       cmd_v5->sleep_tx_count = cmd_v7->sleep_tx_count;
-       cmd_v5->sleep_state_flags = cmd_v7->sleep_state_flags;
-       cmd_v5->assoc_id = cmd_v7->assoc_id;
-       cmd_v5->beamform_flags = cmd_v7->beamform_flags;
-       cmd_v5->tfd_queue_msk = cmd_v7->tfd_queue_msk;
-}
-
-static void
-iwl_mvm_add_sta_key_to_add_sta_cmd_v5(struct iwl_mvm_add_sta_key_cmd *key_cmd,
-                                     struct iwl_mvm_add_sta_cmd_v5 *sta_cmd,
-                                     u32 mac_id_n_color)
-{
-       memset(sta_cmd, 0, sizeof(*sta_cmd));
-
-       sta_cmd->sta_id = key_cmd->sta_id;
-       sta_cmd->add_modify = STA_MODE_MODIFY;
-       sta_cmd->modify_mask = STA_MODIFY_KEY;
-       sta_cmd->mac_id_n_color = cpu_to_le32(mac_id_n_color);
-
-       sta_cmd->key.key_offset = key_cmd->key_offset;
-       sta_cmd->key.key_flags = key_cmd->key_flags;
-       memcpy(sta_cmd->key.key, key_cmd->key, sizeof(sta_cmd->key.key));
-       sta_cmd->key.tkip_rx_tsc_byte2 = key_cmd->tkip_rx_tsc_byte2;
-       memcpy(sta_cmd->key.tkip_rx_ttak, key_cmd->tkip_rx_ttak,
-              sizeof(sta_cmd->key.tkip_rx_ttak));
-}
-
-static int iwl_mvm_send_add_sta_cmd_status(struct iwl_mvm *mvm,
-                                          struct iwl_mvm_add_sta_cmd_v7 *cmd,
-                                          int *status)
-{
-       struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
-
-       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
-               return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(*cmd),
-                                                  cmd, status);
-
-       iwl_mvm_add_sta_cmd_v7_to_v5(cmd, &cmd_v5);
-
-       return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd_v5),
-                                          &cmd_v5, status);
-}
-
-static int iwl_mvm_send_add_sta_cmd(struct iwl_mvm *mvm, u32 flags,
-                                   struct iwl_mvm_add_sta_cmd_v7 *cmd)
-{
-       struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
-
-       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
-               return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags,
-                                           sizeof(*cmd), cmd);
-
-       iwl_mvm_add_sta_cmd_v7_to_v5(cmd, &cmd_v5);
-
-       return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(cmd_v5),
-                                   &cmd_v5);
-}
-
-static int
-iwl_mvm_send_add_sta_key_cmd_status(struct iwl_mvm *mvm,
-                                   struct iwl_mvm_add_sta_key_cmd *cmd,
-                                   u32 mac_id_n_color,
-                                   int *status)
-{
-       struct iwl_mvm_add_sta_cmd_v5 sta_cmd;
-
-       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
-               return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY,
-                                                  sizeof(*cmd), cmd, status);
-
-       iwl_mvm_add_sta_key_to_add_sta_cmd_v5(cmd, &sta_cmd, mac_id_n_color);
-
-       return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(sta_cmd),
-                                          &sta_cmd, status);
-}
-
-static int iwl_mvm_send_add_sta_key_cmd(struct iwl_mvm *mvm,
-                                       u32 flags,
-                                       struct iwl_mvm_add_sta_key_cmd *cmd,
-                                       u32 mac_id_n_color)
-{
-       struct iwl_mvm_add_sta_cmd_v5 sta_cmd;
-
-       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
-               return iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, flags,
-                                           sizeof(*cmd), cmd);
-
-       iwl_mvm_add_sta_key_to_add_sta_cmd_v5(cmd, &sta_cmd, mac_id_n_color);
-
-       return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(sta_cmd),
-                                   &sta_cmd);
-}
-
 static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
                                    enum nl80211_iftype iftype)
 {
@@ -207,7 +98,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                           bool update)
 {
        struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
-       struct iwl_mvm_add_sta_cmd_v7 add_sta_cmd;
+       struct iwl_mvm_add_sta_cmd add_sta_cmd;
        int ret;
        u32 status;
        u32 agg_size = 0, mpdu_dens = 0;
@@ -295,7 +186,8 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
 
        status = ADD_STA_SUCCESS;
-       ret = iwl_mvm_send_add_sta_cmd_status(mvm, &add_sta_cmd, &status);
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(add_sta_cmd),
+                                         &add_sta_cmd, &status);
        if (ret)
                return ret;
 
@@ -380,7 +272,7 @@ int iwl_mvm_update_sta(struct iwl_mvm *mvm,
 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
                      bool drain)
 {
-       struct iwl_mvm_add_sta_cmd_v7 cmd = {};
+       struct iwl_mvm_add_sta_cmd cmd = {};
        int ret;
        u32 status;
 
@@ -393,7 +285,8 @@ int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
        cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
 
        status = ADD_STA_SUCCESS;
-       ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+                                         &cmd, &status);
        if (ret)
                return ret;
 
@@ -434,7 +327,7 @@ static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
                return -EINVAL;
        }
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, CMD_SYNC,
+       ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
                                   sizeof(rm_sta_cmd), &rm_sta_cmd);
        if (ret) {
                IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
@@ -498,7 +391,7 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk)
                                sta_id);
                        continue;
                }
-               rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], NULL);
+               RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
                clear_bit(sta_id, mvm->sta_drained);
        }
 
@@ -520,14 +413,6 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
                /* flush its queues here since we are freeing mvm_sta */
                ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, true);
 
-               /*
-                * Put a non-NULL since the fw station isn't removed.
-                * It will be removed after the MAC will be set as
-                * unassoc.
-                */
-               rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
-                                  ERR_PTR(-EINVAL));
-
                /* if we are associated - we can't remove the AP STA now */
                if (vif->bss_conf.assoc)
                        return ret;
@@ -557,7 +442,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
        } else {
                spin_unlock_bh(&mvm_sta->lock);
                ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
-               rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
+               RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
        }
 
        return ret;
@@ -571,7 +456,7 @@ int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
 
        lockdep_assert_held(&mvm->mutex);
 
-       rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], NULL);
+       RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
        return ret;
 }
 
@@ -593,7 +478,7 @@ int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta,
 
 void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
 {
-       rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
+       RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
        memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
        sta->sta_id = IWL_MVM_STATION_COUNT;
 }
@@ -603,13 +488,13 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
                                      const u8 *addr,
                                      u16 mac_id, u16 color)
 {
-       struct iwl_mvm_add_sta_cmd_v7 cmd;
+       struct iwl_mvm_add_sta_cmd cmd;
        int ret;
        u32 status;
 
        lockdep_assert_held(&mvm->mutex);
 
-       memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd_v7));
+       memset(&cmd, 0, sizeof(cmd));
        cmd.sta_id = sta->sta_id;
        cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
                                                             color));
@@ -619,7 +504,8 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
        if (addr)
                memcpy(cmd.addr, addr, ETH_ALEN);
 
-       ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+                                         &cmd, &status);
        if (ret)
                return ret;
 
@@ -753,7 +639,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                       int tid, u16 ssn, bool start)
 {
        struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
-       struct iwl_mvm_add_sta_cmd_v7 cmd = {};
+       struct iwl_mvm_add_sta_cmd cmd = {};
        int ret;
        u32 status;
 
@@ -777,7 +663,8 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                                  STA_MODIFY_REMOVE_BA_TID;
 
        status = ADD_STA_SUCCESS;
-       ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+                                         &cmd, &status);
        if (ret)
                return ret;
 
@@ -812,7 +699,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                              int tid, u8 queue, bool start)
 {
        struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
-       struct iwl_mvm_add_sta_cmd_v7 cmd = {};
+       struct iwl_mvm_add_sta_cmd cmd = {};
        int ret;
        u32 status;
 
@@ -834,7 +721,8 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
 
        status = ADD_STA_SUCCESS;
-       ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+                                         &cmd, &status);
        if (ret)
                return ret;
 
@@ -1129,12 +1017,11 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
                                u8 sta_id, u32 tkip_iv32, u16 *tkip_p1k,
                                u32 cmd_flags)
 {
-       __le16 key_flags;
        struct iwl_mvm_add_sta_key_cmd cmd = {};
+       __le16 key_flags;
        int ret, status;
        u16 keyidx;
        int i;
-       u32 mac_id_n_color = mvm_sta->mac_id_n_color;
 
        keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
                 STA_KEY_FLG_KEYID_MSK;
@@ -1166,13 +1053,12 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
        cmd.sta_id = sta_id;
 
        status = ADD_STA_SUCCESS;
-       if (cmd_flags == CMD_SYNC)
-               ret = iwl_mvm_send_add_sta_key_cmd_status(mvm, &cmd,
-                                                         mac_id_n_color,
-                                                         &status);
+       if (cmd_flags & CMD_ASYNC)
+               ret =  iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC,
+                                           sizeof(cmd), &cmd);
        else
-               ret = iwl_mvm_send_add_sta_key_cmd(mvm, CMD_ASYNC, &cmd,
-                                                  mac_id_n_color);
+               ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
+                                                 &cmd, &status);
 
        switch (status) {
        case ADD_STA_SUCCESS:
@@ -1225,7 +1111,7 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
                       remove_key ? "removing" : "installing",
                       igtk_cmd.sta_id);
 
-       return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, CMD_SYNC,
+       return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
                                    sizeof(igtk_cmd), &igtk_cmd);
 }
 
@@ -1312,15 +1198,15 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
                ieee80211_get_key_rx_seq(keyconf, 0, &seq);
                ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
                ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id,
-                                          seq.tkip.iv32, p1k, CMD_SYNC);
+                                          seq.tkip.iv32, p1k, 0);
                break;
        case WLAN_CIPHER_SUITE_CCMP:
                ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id,
-                                          0, NULL, CMD_SYNC);
+                                          0, NULL, 0);
                break;
        default:
                ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf,
-                                          sta_id, 0, NULL, CMD_SYNC);
+                                          sta_id, 0, NULL, 0);
        }
 
        if (ret)
@@ -1399,9 +1285,8 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
        cmd.sta_id = sta_id;
 
        status = ADD_STA_SUCCESS;
-       ret = iwl_mvm_send_add_sta_key_cmd_status(mvm, &cmd,
-                                                 mvm_sta->mac_id_n_color,
-                                                 &status);
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
+                                         &cmd, &status);
 
        switch (status) {
        case ADD_STA_SUCCESS:
@@ -1448,7 +1333,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
                                struct ieee80211_sta *sta)
 {
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-       struct iwl_mvm_add_sta_cmd_v7 cmd = {
+       struct iwl_mvm_add_sta_cmd cmd = {
                .add_modify = STA_MODE_MODIFY,
                .sta_id = mvmsta->sta_id,
                .station_flags_msk = cpu_to_le32(STA_FLG_PS),
@@ -1456,7 +1341,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
        };
        int ret;
 
-       ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd);
+       ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
        if (ret)
                IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
 }
@@ -1468,7 +1353,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
                                       bool agg)
 {
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-       struct iwl_mvm_add_sta_cmd_v7 cmd = {
+       struct iwl_mvm_add_sta_cmd cmd = {
                .add_modify = STA_MODE_MODIFY,
                .sta_id = mvmsta->sta_id,
                .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
@@ -1538,7 +1423,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
                cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
        }
 
-       ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd);
+       ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
        if (ret)
                IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
 }
index 2ed84c421481c79dfa1e847b1940ac78da8a9d9c..d98e8a2142b8c6b1e3b9e0568956e0bcaa7a357c 100644 (file)
@@ -253,6 +253,8 @@ enum iwl_mvm_agg_state {
  *     This is basically (last acked packet++).
  * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
  *     Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
+ * @reduced_tpc: Reduced tx power. Holds the data between the
+ *     Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
  * @state: state of the BA agreement establishment / tear down.
  * @txq_id: Tx queue used by the BA session
  * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
@@ -265,6 +267,7 @@ struct iwl_mvm_tid_data {
        u16 next_reclaimed;
        /* The rest is Tx AGG related */
        u32 rate_n_flags;
+       u8 reduced_tpc;
        enum iwl_mvm_agg_state state;
        u16 txq_id;
        u16 ssn;
@@ -284,8 +287,6 @@ static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data)
  * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for
  *     tid.
  * @max_agg_bufsize: the maximal size of the AGG buffer for this station
- * @bt_reduced_txpower_dbg: debug mode in which %bt_reduced_txpower is forced
- *     by debugfs.
  * @bt_reduced_txpower: is reduced tx power enabled for this station
  * @next_status_eosp: the next reclaimed packet is a PS-Poll response and
  *     we need to signal the EOSP
@@ -306,7 +307,6 @@ struct iwl_mvm_sta {
        u32 mac_id_n_color;
        u16 tid_disable_agg;
        u8 max_agg_bufsize;
-       bool bt_reduced_txpower_dbg;
        bool bt_reduced_txpower;
        bool next_status_eosp;
        spinlock_t lock;
index 61331245ad9324f29ec5a86f12a3239725619673..80100f6cc12a85a79fc71bd83cfe2f6962f8f1c0 100644 (file)
@@ -273,67 +273,10 @@ static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
        return true;
 }
 
-/* used to convert from time event API v2 to v1 */
-#define TE_V2_DEP_POLICY_MSK (TE_V2_DEP_OTHER | TE_V2_DEP_TSF |\
-                            TE_V2_EVENT_SOCIOPATHIC)
-static inline u16 te_v2_get_notify(__le16 policy)
-{
-       return le16_to_cpu(policy) & TE_V2_NOTIF_MSK;
-}
-
-static inline u16 te_v2_get_dep_policy(__le16 policy)
-{
-       return (le16_to_cpu(policy) & TE_V2_DEP_POLICY_MSK) >>
-               TE_V2_PLACEMENT_POS;
-}
-
-static inline u16 te_v2_get_absence(__le16 policy)
-{
-       return (le16_to_cpu(policy) & TE_V2_ABSENCE) >> TE_V2_ABSENCE_POS;
-}
-
-static void iwl_mvm_te_v2_to_v1(const struct iwl_time_event_cmd_v2 *cmd_v2,
-                               struct iwl_time_event_cmd_v1 *cmd_v1)
-{
-       cmd_v1->id_and_color = cmd_v2->id_and_color;
-       cmd_v1->action = cmd_v2->action;
-       cmd_v1->id = cmd_v2->id;
-       cmd_v1->apply_time = cmd_v2->apply_time;
-       cmd_v1->max_delay = cmd_v2->max_delay;
-       cmd_v1->depends_on = cmd_v2->depends_on;
-       cmd_v1->interval = cmd_v2->interval;
-       cmd_v1->duration = cmd_v2->duration;
-       if (cmd_v2->repeat == TE_V2_REPEAT_ENDLESS)
-               cmd_v1->repeat = cpu_to_le32(TE_V1_REPEAT_ENDLESS);
-       else
-               cmd_v1->repeat = cpu_to_le32(cmd_v2->repeat);
-       cmd_v1->max_frags = cpu_to_le32(cmd_v2->max_frags);
-       cmd_v1->interval_reciprocal = 0; /* unused */
-
-       cmd_v1->dep_policy = cpu_to_le32(te_v2_get_dep_policy(cmd_v2->policy));
-       cmd_v1->is_present = cpu_to_le32(!te_v2_get_absence(cmd_v2->policy));
-       cmd_v1->notify = cpu_to_le32(te_v2_get_notify(cmd_v2->policy));
-}
-
-static int iwl_mvm_send_time_event_cmd(struct iwl_mvm *mvm,
-                                      const struct iwl_time_event_cmd_v2 *cmd)
-{
-       struct iwl_time_event_cmd_v1 cmd_v1;
-
-       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
-               return iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
-                                           sizeof(*cmd), cmd);
-
-       iwl_mvm_te_v2_to_v1(cmd, &cmd_v1);
-       return iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
-                                   sizeof(cmd_v1), &cmd_v1);
-}
-
-
 static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
                                       struct ieee80211_vif *vif,
                                       struct iwl_mvm_time_event_data *te_data,
-                                      struct iwl_time_event_cmd_v2 *te_cmd)
+                                      struct iwl_time_event_cmd *te_cmd)
 {
        static const u8 time_event_response[] = { TIME_EVENT_CMD };
        struct iwl_notification_wait wait_time_event;
@@ -369,7 +312,8 @@ static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
                                   ARRAY_SIZE(time_event_response),
                                   iwl_mvm_time_event_response, te_data);
 
-       ret = iwl_mvm_send_time_event_cmd(mvm, te_cmd);
+       ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
+                                           sizeof(*te_cmd), te_cmd);
        if (ret) {
                IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
                iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
@@ -397,7 +341,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
-       struct iwl_time_event_cmd_v2 time_cmd = {};
+       struct iwl_time_event_cmd time_cmd = {};
 
        lockdep_assert_held(&mvm->mutex);
 
@@ -453,7 +397,7 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
                               struct iwl_mvm_vif *mvmvif,
                               struct iwl_mvm_time_event_data *te_data)
 {
-       struct iwl_time_event_cmd_v2 time_cmd = {};
+       struct iwl_time_event_cmd time_cmd = {};
        u32 id, uid;
        int ret;
 
@@ -490,7 +434,8 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
                cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
 
        IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
-       ret = iwl_mvm_send_time_event_cmd(mvm, &time_cmd);
+       ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
+                                  sizeof(time_cmd), &time_cmd);
        if (WARN_ON(ret))
                return;
 }
@@ -510,7 +455,7 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
-       struct iwl_time_event_cmd_v2 time_cmd = {};
+       struct iwl_time_event_cmd time_cmd = {};
 
        lockdep_assert_held(&mvm->mutex);
        if (te_data->running) {
index 7a99fa361954e0bc1d5e9e82bf94130b0692ac6f..868561512783956617f5cae55d294f7c5207918d 100644 (file)
@@ -409,7 +409,6 @@ void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
                .id = REPLY_THERMAL_MNG_BACKOFF,
                .len = { sizeof(u32), },
                .data = { &backoff, },
-               .flags = CMD_SYNC,
        };
 
        backoff = max(backoff, mvm->thermal_throttle.min_backoff);
@@ -468,13 +467,14 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
        }
 
        if (params->support_tx_backoff) {
-               tx_backoff = 0;
+               tx_backoff = tt->min_backoff;
                for (i = 0; i < TT_TX_BACKOFF_SIZE; i++) {
                        if (temperature < params->tx_backoff[i].temperature)
                                break;
-                       tx_backoff = params->tx_backoff[i].backoff;
+                       tx_backoff = max(tt->min_backoff,
+                                        params->tx_backoff[i].backoff);
                }
-               if (tx_backoff != 0)
+               if (tx_backoff != tt->min_backoff)
                        throttle_enable = true;
                if (tt->tx_backoff != tx_backoff)
                        iwl_mvm_tt_tx_backoff(mvm, tx_backoff);
@@ -484,7 +484,8 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
                IWL_WARN(mvm,
                         "Due to high temperature thermal throttling initiated\n");
                tt->throttle = true;
-       } else if (tt->throttle && !tt->dynamic_smps && tt->tx_backoff == 0 &&
+       } else if (tt->throttle && !tt->dynamic_smps &&
+                  tt->tx_backoff == tt->min_backoff &&
                   temperature <= params->tx_protection_exit) {
                IWL_WARN(mvm,
                         "Temperature is back to normal thermal throttling stopped\n");
index 879aeac46cc103112fef914bcc2b38df9f028b06..3846a6c41eb165ffbb8ede0ff102547f36911e65 100644 (file)
@@ -636,7 +636,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
                        seq_ctl = le16_to_cpu(hdr->seq_ctrl);
                }
 
-               ieee80211_tx_status_ni(mvm->hw, skb);
+               BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
+               info->status.status_driver_data[0] =
+                               (void *)(uintptr_t)tx_resp->reduced_tpc;
+
+               ieee80211_tx_status(mvm->hw, skb);
        }
 
        if (txq_id >= mvm->first_agg_queue) {
@@ -815,6 +819,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
                struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
                mvmsta->tid_data[tid].rate_n_flags =
                        le32_to_cpu(tx_resp->initial_rate);
+               mvmsta->tid_data[tid].reduced_tpc = tx_resp->reduced_tpc;
        }
 
        rcu_read_unlock();
@@ -928,6 +933,8 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                        info->status.ampdu_len = ba_notif->txed;
                        iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags,
                                                    info);
+                       info->status.status_driver_data[0] =
+                               (void *)(uintptr_t)tid_data->reduced_tpc;
                }
        }
 
@@ -937,7 +944,7 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
 
        while (!skb_queue_empty(&reclaimed_skbs)) {
                skb = __skb_dequeue(&reclaimed_skbs);
-               ieee80211_tx_status_ni(mvm->hw, skb);
+               ieee80211_tx_status(mvm->hw, skb);
        }
 
        return 0;
@@ -951,7 +958,7 @@ int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync)
                .flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH),
        };
 
-       u32 flags = sync ? CMD_SYNC : CMD_ASYNC;
+       u32 flags = sync ? 0 : CMD_ASYNC;
 
        ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
                                   sizeof(flush_cmd), &flush_cmd);
index d619851745a19ba6d3bf605555fcdbd5a09f8341..aa9fc77e8413b607861e370169e0b55ba4b697d1 100644 (file)
@@ -64,6 +64,7 @@
 
 #include "iwl-debug.h"
 #include "iwl-io.h"
+#include "iwl-prph.h"
 
 #include "mvm.h"
 #include "fw-api-rs.h"
@@ -143,7 +144,7 @@ int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
                      "cmd flags %x", cmd->flags))
                return -EINVAL;
 
-       cmd->flags |= CMD_SYNC | CMD_WANT_SKB;
+       cmd->flags |= CMD_WANT_SKB;
 
        ret = iwl_trans_send_cmd(mvm->trans, cmd);
        if (ret == -ERFKILL) {
@@ -469,6 +470,8 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
                        mvm->status, table.valid);
        }
 
+       /* Do not change this output - scripts rely on it */
+
        IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
 
        trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
@@ -516,13 +519,14 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
                iwl_mvm_dump_umac_error_log(mvm);
 }
 
+#ifdef CONFIG_IWLWIFI_DEBUGFS
 void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm)
 {
        const struct fw_img *img;
        u32 ofs, sram_len;
        void *sram;
 
-       if (!mvm->ucode_loaded || mvm->fw_error_sram)
+       if (!mvm->ucode_loaded || mvm->fw_error_sram || mvm->fw_error_dump)
                return;
 
        img = &mvm->fw->img[mvm->cur_ucode];
@@ -538,6 +542,48 @@ void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm)
        mvm->fw_error_sram_len = sram_len;
 }
 
+void iwl_mvm_fw_error_rxf_dump(struct iwl_mvm *mvm)
+{
+       int i, reg_val;
+       unsigned long flags;
+
+       if (!mvm->ucode_loaded || mvm->fw_error_rxf || mvm->fw_error_dump)
+               return;
+
+       /* reading buffer size */
+       reg_val = iwl_trans_read_prph(mvm->trans, RXF_SIZE_ADDR);
+       mvm->fw_error_rxf_len =
+               (reg_val & RXF_SIZE_BYTE_CNT_MSK) >> RXF_SIZE_BYTE_CND_POS;
+
+       /* the register holds the value divided by 128 */
+       mvm->fw_error_rxf_len = mvm->fw_error_rxf_len << 7;
+
+       if (!mvm->fw_error_rxf_len)
+               return;
+
+       mvm->fw_error_rxf =  kzalloc(mvm->fw_error_rxf_len, GFP_ATOMIC);
+       if (!mvm->fw_error_rxf) {
+               mvm->fw_error_rxf_len = 0;
+               return;
+       }
+
+       if (!iwl_trans_grab_nic_access(mvm->trans, false, &flags)) {
+               kfree(mvm->fw_error_rxf);
+               mvm->fw_error_rxf = NULL;
+               mvm->fw_error_rxf_len = 0;
+               return;
+       }
+
+       for (i = 0; i < (mvm->fw_error_rxf_len / sizeof(u32)); i++) {
+               iwl_trans_write_prph(mvm->trans, RXF_LD_FENCE_OFFSET_ADDR,
+                                    i * sizeof(u32));
+               mvm->fw_error_rxf[i] =
+                       iwl_trans_read_prph(mvm->trans, RXF_FIFO_RD_FENCE_ADDR);
+       }
+       iwl_trans_release_nic_access(mvm->trans, &flags);
+}
+#endif
+
 /**
  * iwl_mvm_send_lq_cmd() - Send link quality command
  * @init: This command is sent as part of station initialization right
@@ -553,7 +599,7 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
        struct iwl_host_cmd cmd = {
                .id = LQ_CMD,
                .len = { sizeof(struct iwl_lq_cmd), },
-               .flags = init ? CMD_SYNC : CMD_ASYNC,
+               .flags = init ? 0 : CMD_ASYNC,
                .data = { lq, },
        };
 
@@ -604,6 +650,39 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        ieee80211_request_smps(vif, smps_mode);
 }
 
+static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
+                                  struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       bool *result = _data;
+       int i;
+
+       for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
+               if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC ||
+                   mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
+                       *result = false;
+       }
+}
+
+bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
+{
+       bool result = true;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (num_of_ant(mvm->fw->valid_rx_ant) == 1)
+               return false;
+
+       if (!mvm->cfg->rx_with_siso_diversity)
+               return false;
+
+       ieee80211_iterate_active_interfaces_atomic(
+                       mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                       iwl_mvm_diversity_iter, &result);
+
+       return result;
+}
+
 int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                               bool value)
 {
@@ -623,7 +702,7 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
        iwl_mvm_bt_coex_vif_change(mvm);
 
-       return iwl_mvm_power_update_mac(mvm, vif);
+       return iwl_mvm_power_update_mac(mvm);
 }
 
 static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
@@ -644,3 +723,22 @@ bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
 
        return result;
 }
+
+static void iwl_mvm_idle_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
+{
+       bool *idle = _data;
+
+       if (!vif->bss_conf.idle)
+               *idle = false;
+}
+
+bool iwl_mvm_is_idle(struct iwl_mvm *mvm)
+{
+       bool idle = true;
+
+       ieee80211_iterate_active_interfaces_atomic(
+                       mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                       iwl_mvm_idle_iter, &idle);
+
+       return idle;
+}
index 3d1d57f9f5bc539d7350f518f5c13566917b6860..7091a18d5a72f9880f7a47f7a949fefd9de3b9f1 100644 (file)
@@ -417,7 +417,7 @@ static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx)
            splx->package.count != 2 ||
            splx->package.elements[0].type != ACPI_TYPE_INTEGER ||
            splx->package.elements[0].integer.value != 0) {
-               IWL_ERR(trans, "Unsupported splx structure");
+               IWL_ERR(trans, "Unsupported splx structure\n");
                return 0;
        }
 
@@ -426,14 +426,14 @@ static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx)
            limits->package.count < 2 ||
            limits->package.elements[0].type != ACPI_TYPE_INTEGER ||
            limits->package.elements[1].type != ACPI_TYPE_INTEGER) {
-               IWL_ERR(trans, "Invalid limits element");
+               IWL_ERR(trans, "Invalid limits element\n");
                return 0;
        }
 
        domain_type = &limits->package.elements[0];
        power_limit = &limits->package.elements[1];
        if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) {
-               IWL_DEBUG_INFO(trans, "WiFi power is not limited");
+               IWL_DEBUG_INFO(trans, "WiFi power is not limited\n");
                return 0;
        }
 
@@ -450,26 +450,26 @@ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
        pxsx_handle = ACPI_HANDLE(&pdev->dev);
        if (!pxsx_handle) {
                IWL_DEBUG_INFO(trans,
-                              "Could not retrieve root port ACPI handle");
+                              "Could not retrieve root port ACPI handle\n");
                return;
        }
 
        /* Get the method's handle */
        status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle);
        if (ACPI_FAILURE(status)) {
-               IWL_DEBUG_INFO(trans, "SPL method not found");
+               IWL_DEBUG_INFO(trans, "SPL method not found\n");
                return;
        }
 
        /* Call SPLC with no arguments */
        status = acpi_evaluate_object(handle, NULL, NULL, &splx);
        if (ACPI_FAILURE(status)) {
-               IWL_ERR(trans, "SPLC invocation failed (0x%x)", status);
+               IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status);
                return;
        }
 
        trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer);
-       IWL_DEBUG_INFO(trans, "Default power limit set to %lld",
+       IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n",
                       trans->dflt_pwr_limit);
        kfree(splx.pointer);
 }
index 9091513ea7388ce11f2294fbb609b3581073e2a0..6c22b23a2845723c33df6757a1ced2c545747e94 100644 (file)
@@ -102,7 +102,7 @@ struct iwl_rxq {
        u32 write_actual;
        struct list_head rx_free;
        struct list_head rx_used;
-       int need_update;
+       bool need_update;
        struct iwl_rb_status *rb_stts;
        dma_addr_t rb_stts_dma;
        spinlock_t lock;
@@ -117,21 +117,19 @@ struct iwl_dma_ptr {
 /**
  * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
  * @index -- current index
- * @n_bd -- total number of entries in queue (must be power of 2)
  */
-static inline int iwl_queue_inc_wrap(int index, int n_bd)
+static inline int iwl_queue_inc_wrap(int index)
 {
-       return ++index & (n_bd - 1);
+       return ++index & (TFD_QUEUE_SIZE_MAX - 1);
 }
 
 /**
  * iwl_queue_dec_wrap - decrement queue index, wrap back to end
  * @index -- current index
- * @n_bd -- total number of entries in queue (must be power of 2)
  */
-static inline int iwl_queue_dec_wrap(int index, int n_bd)
+static inline int iwl_queue_dec_wrap(int index)
 {
-       return --index & (n_bd - 1);
+       return --index & (TFD_QUEUE_SIZE_MAX - 1);
 }
 
 struct iwl_cmd_meta {
@@ -145,13 +143,13 @@ struct iwl_cmd_meta {
  *
  * Contains common data for Rx and Tx queues.
  *
- * Note the difference between n_bd and n_window: the hardware
- * always assumes 256 descriptors, so n_bd is always 256 (unless
+ * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
+ * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
  * there might be HW changes in the future). For the normal TX
  * queues, n_window, which is the size of the software queue data
  * is also 256; however, for the command queue, n_window is only
  * 32 since we don't need so many commands pending. Since the HW
- * still uses 256 BDs for DMA though, n_bd stays 256. As a result,
+ * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result,
  * the software buffers (in the variables @meta, @txb in struct
  * iwl_txq) only have 32 entries, while the HW buffers (@tfds in
  * the same struct) have 256.
@@ -162,7 +160,6 @@ struct iwl_cmd_meta {
  * data is a window overlayed over the HW queue.
  */
 struct iwl_queue {
-       int n_bd;              /* number of BDs in this queue */
        int write_ptr;       /* 1-st empty entry (index) host_w*/
        int read_ptr;         /* last used entry (index) host_r*/
        /* use for monitoring and recovering the stuck queue */
@@ -231,7 +228,7 @@ struct iwl_txq {
        spinlock_t lock;
        struct timer_list stuck_timer;
        struct iwl_trans_pcie *trans_pcie;
-       u8 need_update;
+       bool need_update;
        u8 active;
        bool ampdu;
 };
@@ -270,6 +267,9 @@ struct iwl_trans_pcie {
        struct iwl_trans *trans;
        struct iwl_drv *drv;
 
+       struct net_device napi_dev;
+       struct napi_struct napi;
+
        /* INT ICT Table */
        __le32 *ict_tbl;
        dma_addr_t ict_tbl_dma;
@@ -362,7 +362,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue);
 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
                      struct iwl_device_cmd *dev_cmd, int txq_id);
-void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
+void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
                            struct iwl_rx_cmd_buffer *rxb, int handler_status);
@@ -370,6 +370,13 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
                            struct sk_buff_head *skbs);
 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
 
+static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
+{
+       struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+
+       return le16_to_cpu(tb->hi_n_len) >> 4;
+}
+
 /*****************************************************
 * Error handling
 ******************************************************/
index fdfa3969cac986c1824bd65c41512a9ac4ba7b39..a2698e5e062c990524d12e1d112818977aeccc2b 100644 (file)
@@ -145,15 +145,13 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans)
 /*
  * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
  */
-static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
-                                   struct iwl_rxq *rxq)
+static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans)
 {
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_rxq *rxq = &trans_pcie->rxq;
        u32 reg;
 
-       spin_lock(&rxq->lock);
-
-       if (rxq->need_update == 0)
-               goto exit_unlock;
+       lockdep_assert_held(&rxq->lock);
 
        /*
         * explicitly wake up the NIC if:
@@ -169,13 +167,27 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
                                       reg);
                        iwl_set_bit(trans, CSR_GP_CNTRL,
                                    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-                       goto exit_unlock;
+                       rxq->need_update = true;
+                       return;
                }
        }
 
        rxq->write_actual = round_down(rxq->write, 8);
        iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
-       rxq->need_update = 0;
+}
+
+static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_rxq *rxq = &trans_pcie->rxq;
+
+       spin_lock(&rxq->lock);
+
+       if (!rxq->need_update)
+               goto exit_unlock;
+
+       iwl_pcie_rxq_inc_wr_ptr(trans);
+       rxq->need_update = false;
 
  exit_unlock:
        spin_unlock(&rxq->lock);
@@ -236,9 +248,8 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
         * Increment device's write pointer in multiples of 8. */
        if (rxq->write_actual != (rxq->write & ~0x7)) {
                spin_lock(&rxq->lock);
-               rxq->need_update = 1;
+               iwl_pcie_rxq_inc_wr_ptr(trans);
                spin_unlock(&rxq->lock);
-               iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
        }
 }
 
@@ -362,20 +373,9 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
  * Also restock the Rx queue via iwl_pcie_rxq_restock.
  * This is called as a scheduled work item (except for during initialization)
  */
-static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
-{
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
-       iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
-
-       spin_lock(&trans_pcie->irq_lock);
-       iwl_pcie_rxq_restock(trans);
-       spin_unlock(&trans_pcie->irq_lock);
-}
-
-static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
+static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
 {
-       iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
+       iwl_pcie_rxq_alloc_rbs(trans, gfp);
 
        iwl_pcie_rxq_restock(trans);
 }
@@ -385,7 +385,7 @@ static void iwl_pcie_rx_replenish_work(struct work_struct *data)
        struct iwl_trans_pcie *trans_pcie =
            container_of(data, struct iwl_trans_pcie, rx_replenish);
 
-       iwl_pcie_rx_replenish(trans_pcie->trans);
+       iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
 }
 
 static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
@@ -521,14 +521,13 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
        memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
        spin_unlock(&rxq->lock);
 
-       iwl_pcie_rx_replenish(trans);
+       iwl_pcie_rx_replenish(trans, GFP_KERNEL);
 
        iwl_pcie_rx_hw_init(trans, rxq);
 
-       spin_lock(&trans_pcie->irq_lock);
-       rxq->need_update = 1;
-       iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
-       spin_unlock(&trans_pcie->irq_lock);
+       spin_lock(&rxq->lock);
+       iwl_pcie_rxq_inc_wr_ptr(trans);
+       spin_unlock(&rxq->lock);
 
        return 0;
 }
@@ -673,7 +672,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
        /* Reuse the page if possible. For notification packets and
         * SKBs that fail to Rx correctly, add them back into the
         * rx_free list for reuse later. */
-       spin_lock(&rxq->lock);
        if (rxb->page != NULL) {
                rxb->page_dma =
                        dma_map_page(trans->dev, rxb->page, 0,
@@ -694,7 +692,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                }
        } else
                list_add_tail(&rxb->list, &rxq->rx_used);
-       spin_unlock(&rxq->lock);
 }
 
 /*
@@ -709,6 +706,8 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
        u32 count = 8;
        int total_empty;
 
+restart:
+       spin_lock(&rxq->lock);
        /* uCode's read index (stored in shared DRAM) indicates the last Rx
         * buffer that the driver may process (last buffer filled by ucode). */
        r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
@@ -743,18 +742,25 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
                        count++;
                        if (count >= 8) {
                                rxq->read = i;
-                               iwl_pcie_rx_replenish_now(trans);
+                               spin_unlock(&rxq->lock);
+                               iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
                                count = 0;
+                               goto restart;
                        }
                }
        }
 
        /* Backtrack one entry */
        rxq->read = i;
+       spin_unlock(&rxq->lock);
+
        if (fill_rx)
-               iwl_pcie_rx_replenish_now(trans);
+               iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
        else
                iwl_pcie_rxq_restock(trans);
+
+       if (trans_pcie->napi.poll)
+               napi_gro_flush(&trans_pcie->napi, false);
 }
 
 /*
@@ -844,7 +850,7 @@ static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
                                trans_pcie->ict_index, read);
                trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
                trans_pcie->ict_index =
-                       iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
+                       ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
 
                read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
                trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
@@ -876,7 +882,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
        struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
        u32 inta = 0;
        u32 handled = 0;
-       u32 i;
 
        lock_map_acquire(&trans->sync_cmd_lockdep_map);
 
@@ -1028,9 +1033,8 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
        /* uCode wakes up after power-down sleep */
        if (inta & CSR_INT_BIT_WAKEUP) {
                IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
-               iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq);
-               for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
-                       iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]);
+               iwl_pcie_rxq_check_wrptr(trans);
+               iwl_pcie_txq_check_wrptrs(trans);
 
                isr_stats->wakeup++;
 
@@ -1068,8 +1072,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
                iwl_write8(trans, CSR_INT_PERIODIC_REG,
                            CSR_INT_PERIODIC_DIS);
 
-               iwl_pcie_rx_handle(trans);
-
                /*
                 * Enable periodic interrupt in 8 msec only if we received
                 * real RX interrupt (instead of just periodic int), to catch
@@ -1082,6 +1084,10 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
                                   CSR_INT_PERIODIC_ENA);
 
                isr_stats->rx++;
+
+               local_bh_disable();
+               iwl_pcie_rx_handle(trans);
+               local_bh_enable();
        }
 
        /* This "Tx" DMA channel is used only for loading uCode */
index dcfd6d866d095081d7001795c4ec802c3044926f..788085bc65d78e3382c7fa76ca74de30abd4cd84 100644 (file)
@@ -73,6 +73,7 @@
 #include "iwl-csr.h"
 #include "iwl-prph.h"
 #include "iwl-agn-hw.h"
+#include "iwl-fw-error-dump.h"
 #include "internal.h"
 
 static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
@@ -103,7 +104,6 @@ static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
 
 /* PCI registers */
 #define PCI_CFG_RETRY_TIMEOUT  0x041
-#define CPU1_CPU2_SEPARATOR_SECTION    0xFFFFCCCC
 
 static void iwl_pcie_apm_config(struct iwl_trans *trans)
 {
@@ -454,6 +454,7 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
 {
        int ret;
        int t = 0;
+       int iter;
 
        IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
 
@@ -462,18 +463,23 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
        if (ret >= 0)
                return 0;
 
-       /* If HW is not ready, prepare the conditions to check again */
-       iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
-                   CSR_HW_IF_CONFIG_REG_PREPARE);
+       for (iter = 0; iter < 10; iter++) {
+               /* If HW is not ready, prepare the conditions to check again */
+               iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+                           CSR_HW_IF_CONFIG_REG_PREPARE);
+
+               do {
+                       ret = iwl_pcie_set_hw_ready(trans);
+                       if (ret >= 0)
+                               return 0;
 
-       do {
-               ret = iwl_pcie_set_hw_ready(trans);
-               if (ret >= 0)
-                       return 0;
+                       usleep_range(200, 1000);
+                       t += 200;
+               } while (t < 150000);
+               msleep(25);
+       }
 
-               usleep_range(200, 1000);
-               t += 200;
-       } while (t < 150000);
+       IWL_DEBUG_INFO(trans, "got NIC after %d iterations\n", iter);
 
        return ret;
 }
@@ -1053,6 +1059,12 @@ static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
        iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
 }
 
+static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
+{
+       WARN_ON(1);
+       return 0;
+}
+
 static void iwl_trans_pcie_configure(struct iwl_trans *trans,
                                     const struct iwl_trans_config *trans_cfg)
 {
@@ -1079,6 +1091,18 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
 
        trans_pcie->command_names = trans_cfg->command_names;
        trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
+
+       /* Initialize NAPI here - it should be before registering to mac80211
+        * in the opmode but after the HW struct is allocated.
+        * As this function may be called again in some corner cases don't
+        * do anything if NAPI was already initialized.
+        */
+       if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) {
+               init_dummy_netdev(&trans_pcie->napi_dev);
+               iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi,
+                                    &trans_pcie->napi_dev,
+                                    iwl_pcie_dummy_napi_poll, 64);
+       }
 }
 
 void iwl_trans_pcie_free(struct iwl_trans *trans)
@@ -1099,6 +1123,9 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
        pci_disable_device(trans_pcie->pci_dev);
        kmem_cache_destroy(trans->dev_cmd_pool);
 
+       if (trans_pcie->napi.poll)
+               netif_napi_del(&trans_pcie->napi);
+
        kfree(trans);
 }
 
@@ -1237,7 +1264,7 @@ static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
 
 #define IWL_FLUSH_WAIT_MS      2000
 
-static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
+static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_txq *txq;
@@ -1250,13 +1277,31 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
 
        /* waiting for all the tx frames complete might take a while */
        for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
+               u8 wr_ptr;
+
                if (cnt == trans_pcie->cmd_queue)
                        continue;
+               if (!test_bit(cnt, trans_pcie->queue_used))
+                       continue;
+               if (!(BIT(cnt) & txq_bm))
+                       continue;
+
+               IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
                txq = &trans_pcie->txq[cnt];
                q = &txq->q;
-               while (q->read_ptr != q->write_ptr && !time_after(jiffies,
-                      now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
+               wr_ptr = ACCESS_ONCE(q->write_ptr);
+
+               while (q->read_ptr != ACCESS_ONCE(q->write_ptr) &&
+                      !time_after(jiffies,
+                                  now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
+                       u8 write_ptr = ACCESS_ONCE(q->write_ptr);
+
+                       if (WARN_ONCE(wr_ptr != write_ptr,
+                                     "WR pointer moved while flushing %d -> %d\n",
+                                     wr_ptr, write_ptr))
+                               return -ETIMEDOUT;
                        msleep(1);
+               }
 
                if (q->read_ptr != q->write_ptr) {
                        IWL_ERR(trans,
@@ -1264,6 +1309,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
                        ret = -ETIMEDOUT;
                        break;
                }
+               IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);
        }
 
        if (!ret)
@@ -1298,8 +1344,8 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
                IWL_ERR(trans,
                        "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
                        cnt, active ? "" : "in", fifo, tbl_dw,
-                       iwl_read_prph(trans,
-                                     SCD_QUEUE_RDPTR(cnt)) & (txq->q.n_bd - 1),
+                       iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) &
+                               (TFD_QUEUE_SIZE_MAX - 1),
                        iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
        }
 
@@ -1630,6 +1676,61 @@ err:
        IWL_ERR(trans, "failed to create the trans debugfs entry\n");
        return -ENOMEM;
 }
+
+static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd)
+{
+       u32 cmdlen = 0;
+       int i;
+
+       for (i = 0; i < IWL_NUM_OF_TBS; i++)
+               cmdlen += iwl_pcie_tfd_tb_get_len(tfd, i);
+
+       return cmdlen;
+}
+
+static u32 iwl_trans_pcie_dump_data(struct iwl_trans *trans,
+                                   void *buf, u32 buflen)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_fw_error_dump_data *data;
+       struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
+       struct iwl_fw_error_dump_txcmd *txcmd;
+       u32 len;
+       int i, ptr;
+
+       if (!buf)
+               return sizeof(*data) +
+                      cmdq->q.n_window * (sizeof(*txcmd) +
+                                          TFD_MAX_PAYLOAD_SIZE);
+
+       len = 0;
+       data = buf;
+       data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
+       txcmd = (void *)data->data;
+       spin_lock_bh(&cmdq->lock);
+       ptr = cmdq->q.write_ptr;
+       for (i = 0; i < cmdq->q.n_window; i++) {
+               u8 idx = get_cmd_index(&cmdq->q, ptr);
+               u32 caplen, cmdlen;
+
+               cmdlen = iwl_trans_pcie_get_cmdlen(&cmdq->tfds[ptr]);
+               caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
+
+               if (cmdlen) {
+                       len += sizeof(*txcmd) + caplen;
+                       txcmd->cmdlen = cpu_to_le32(cmdlen);
+                       txcmd->caplen = cpu_to_le32(caplen);
+                       memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen);
+                       txcmd = (void *)((u8 *)txcmd->data + caplen);
+               }
+
+               ptr = iwl_queue_dec_wrap(ptr);
+       }
+       spin_unlock_bh(&cmdq->lock);
+
+       data->len = cpu_to_le32(len);
+       return sizeof(*data) + len;
+}
 #else
 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
                                         struct dentry *dir)
@@ -1672,6 +1773,10 @@ static const struct iwl_trans_ops trans_ops_pcie = {
        .grab_nic_access = iwl_trans_pcie_grab_nic_access,
        .release_nic_access = iwl_trans_pcie_release_nic_access,
        .set_bits_mask = iwl_trans_pcie_set_bits_mask,
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       .dump_data = iwl_trans_pcie_dump_data,
+#endif
 };
 
 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
@@ -1749,6 +1854,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
         * PCI Tx retries from interfering with C3 CPU state */
        pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
 
+       trans->dev = &pdev->dev;
+       trans_pcie->pci_dev = pdev;
+       iwl_disable_interrupts(trans);
+
        err = pci_enable_msi(pdev);
        if (err) {
                dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
@@ -1760,8 +1869,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                }
        }
 
-       trans->dev = &pdev->dev;
-       trans_pcie->pci_dev = pdev;
        trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
        trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
        snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
@@ -1787,8 +1894,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                goto out_pci_disable_msi;
        }
 
-       trans_pcie->inta_mask = CSR_INI_SET_MASK;
-
        if (iwl_pcie_alloc_ict(trans))
                goto out_free_cmd_pool;
 
@@ -1800,6 +1905,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                goto out_free_ict;
        }
 
+       trans_pcie->inta_mask = CSR_INI_SET_MASK;
+
        return trans;
 
 out_free_ict:
index 3b0c72c1005446d2d93f7eb15ed6d3207e1c3ea1..038940afbdc57d8d176908bb4869fc8791f250eb 100644 (file)
@@ -70,20 +70,20 @@ static int iwl_queue_space(const struct iwl_queue *q)
 
        /*
         * To avoid ambiguity between empty and completely full queues, there
-        * should always be less than q->n_bd elements in the queue.
-        * If q->n_window is smaller than q->n_bd, there is no need to reserve
-        * any queue entries for this purpose.
+        * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue.
+        * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need
+        * to reserve any queue entries for this purpose.
         */
-       if (q->n_window < q->n_bd)
+       if (q->n_window < TFD_QUEUE_SIZE_MAX)
                max = q->n_window;
        else
-               max = q->n_bd - 1;
+               max = TFD_QUEUE_SIZE_MAX - 1;
 
        /*
-        * q->n_bd is a power of 2, so the following is equivalent to modulo by
-        * q->n_bd and is well defined for negative dividends.
+        * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to
+        * modulo by TFD_QUEUE_SIZE_MAX and is well defined.
         */
-       used = (q->write_ptr - q->read_ptr) & (q->n_bd - 1);
+       used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1);
 
        if (WARN_ON(used > max))
                return 0;
@@ -94,17 +94,11 @@ static int iwl_queue_space(const struct iwl_queue *q)
 /*
  * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
  */
-static int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
+static int iwl_queue_init(struct iwl_queue *q, int slots_num, u32 id)
 {
-       q->n_bd = count;
        q->n_window = slots_num;
        q->id = id;
 
-       /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
-        * and iwl_queue_dec_wrap are broken. */
-       if (WARN_ON(!is_power_of_2(count)))
-               return -EINVAL;
-
        /* slots_num must be power-of-two size, otherwise
         * get_cmd_index is broken. */
        if (WARN_ON(!is_power_of_2(slots_num)))
@@ -197,17 +191,17 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
                IWL_ERR(trans,
                        "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
                        i, active ? "" : "in", fifo, tbl_dw,
-                       iwl_read_prph(trans,
-                                     SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1),
+                       iwl_read_prph(trans, SCD_QUEUE_RDPTR(i)) &
+                               (TFD_QUEUE_SIZE_MAX - 1),
                        iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
        }
 
        for (i = q->read_ptr; i != q->write_ptr;
-            i = iwl_queue_inc_wrap(i, q->n_bd))
+            i = iwl_queue_inc_wrap(i))
                IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
                        le32_to_cpu(txq->scratchbufs[i].scratch));
 
-       iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1);
+       iwl_force_nmi(trans);
 }
 
 /*
@@ -287,14 +281,14 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
 /*
  * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
  */
-void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
+static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
+                                   struct iwl_txq *txq)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        u32 reg = 0;
        int txq_id = txq->q.id;
 
-       if (txq->need_update == 0)
-               return;
+       lockdep_assert_held(&txq->lock);
 
        /*
         * explicitly wake up the NIC if:
@@ -317,6 +311,7 @@ void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
                                       txq_id, reg);
                        iwl_set_bit(trans, CSR_GP_CNTRL,
                                    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+                       txq->need_update = true;
                        return;
                }
        }
@@ -327,8 +322,23 @@ void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
         */
        IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr);
        iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
+}
+
+void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       int i;
+
+       for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
+               struct iwl_txq *txq = &trans_pcie->txq[i];
 
-       txq->need_update = 0;
+               spin_lock_bh(&txq->lock);
+               if (trans_pcie->txq[i].need_update) {
+                       iwl_pcie_txq_inc_wr_ptr(trans, txq);
+                       trans_pcie->txq[i].need_update = false;
+               }
+               spin_unlock_bh(&txq->lock);
+       }
 }
 
 static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
@@ -343,13 +353,6 @@ static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
        return addr;
 }
 
-static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
-{
-       struct iwl_tfd_tb *tb = &tfd->tbs[idx];
-
-       return le16_to_cpu(tb->hi_n_len) >> 4;
-}
-
 static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
                                       dma_addr_t addr, u16 len)
 {
@@ -409,13 +412,17 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
 {
        struct iwl_tfd *tfd_tmp = txq->tfds;
 
-       /* rd_ptr is bounded by n_bd and idx is bounded by n_window */
+       /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
+        * idx is bounded by n_window
+        */
        int rd_ptr = txq->q.read_ptr;
        int idx = get_cmd_index(&txq->q, rd_ptr);
 
        lockdep_assert_held(&txq->lock);
 
-       /* We have only q->n_window txq->entries, but we use q->n_bd tfds */
+       /* We have only q->n_window txq->entries, but we use
+        * TFD_QUEUE_SIZE_MAX tfds
+        */
        iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
 
        /* free SKB */
@@ -436,7 +443,7 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
 }
 
 static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
-                                 dma_addr_t addr, u16 len, u8 reset)
+                                 dma_addr_t addr, u16 len, bool reset)
 {
        struct iwl_queue *q;
        struct iwl_tfd *tfd, *tfd_tmp;
@@ -542,15 +549,14 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
 {
        int ret;
 
-       txq->need_update = 0;
+       txq->need_update = false;
 
        /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
         * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
        BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
 
        /* Initialize queue's high/low-water marks, and head/tail indexes */
-       ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
-                       txq_id);
+       ret = iwl_queue_init(&txq->q, slots_num, txq_id);
        if (ret)
                return ret;
 
@@ -575,15 +581,12 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
        struct iwl_txq *txq = &trans_pcie->txq[txq_id];
        struct iwl_queue *q = &txq->q;
 
-       if (!q->n_bd)
-               return;
-
        spin_lock_bh(&txq->lock);
        while (q->write_ptr != q->read_ptr) {
                IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
                                   txq_id, q->read_ptr);
                iwl_pcie_txq_free_tfd(trans, txq);
-               q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
+               q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
        }
        txq->active = false;
        spin_unlock_bh(&txq->lock);
@@ -620,10 +623,12 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
                }
 
        /* De-alloc circular buffer of TFDs */
-       if (txq->q.n_bd) {
-               dma_free_coherent(dev, sizeof(struct iwl_tfd) *
-                                 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
+       if (txq->tfds) {
+               dma_free_coherent(dev,
+                                 sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX,
+                                 txq->tfds, txq->q.dma_addr);
                txq->q.dma_addr = 0;
+               txq->tfds = NULL;
 
                dma_free_coherent(dev,
                                  sizeof(*txq->scratchbufs) * txq->q.n_window,
@@ -680,7 +685,8 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
        /* The chain extension of the SCD doesn't work well. This feature is
         * enabled by default by the HW, so we need to disable it manually.
         */
-       iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
+       if (trans->cfg->base_params->scd_chain_ext_wa)
+               iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
 
        iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
                                trans_pcie->cmd_fifo);
@@ -931,8 +937,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_txq *txq = &trans_pcie->txq[txq_id];
-       /* n_bd is usually 256 => n_bd - 1 = 0xff */
-       int tfd_num = ssn & (txq->q.n_bd - 1);
+       int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
        struct iwl_queue *q = &txq->q;
        int last_to_free;
 
@@ -956,12 +961,12 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
 
        /*Since we free until index _not_ inclusive, the one before index is
         * the last we will free. This one must be used */
-       last_to_free = iwl_queue_dec_wrap(tfd_num, q->n_bd);
+       last_to_free = iwl_queue_dec_wrap(tfd_num);
 
        if (!iwl_queue_used(q, last_to_free)) {
                IWL_ERR(trans,
                        "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
-                       __func__, txq_id, last_to_free, q->n_bd,
+                       __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX,
                        q->write_ptr, q->read_ptr);
                goto out;
        }
@@ -971,7 +976,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
 
        for (;
             q->read_ptr != tfd_num;
-            q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+            q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
 
                if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
                        continue;
@@ -1010,25 +1015,26 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
 
        lockdep_assert_held(&txq->lock);
 
-       if ((idx >= q->n_bd) || (!iwl_queue_used(q, idx))) {
+       if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(q, idx))) {
                IWL_ERR(trans,
                        "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
-                       __func__, txq_id, idx, q->n_bd,
+                       __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX,
                        q->write_ptr, q->read_ptr);
                return;
        }
 
-       for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
-            q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+       for (idx = iwl_queue_inc_wrap(idx); q->read_ptr != idx;
+            q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
 
                if (nfreed++ > 0) {
                        IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
                                idx, q->write_ptr, q->read_ptr);
-                       iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1);
+                       iwl_force_nmi(trans);
                }
        }
 
-       if (q->read_ptr == q->write_ptr) {
+       if (trans->cfg->base_params->apmg_wake_up_wa &&
+           q->read_ptr == q->write_ptr) {
                spin_lock_irqsave(&trans_pcie->reg_lock, flags);
                WARN_ON(!trans_pcie->cmd_in_flight);
                trans_pcie->cmd_in_flight = false;
@@ -1309,28 +1315,39 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
        cmd_pos = offsetof(struct iwl_device_cmd, payload);
        copy_size = sizeof(out_cmd->hdr);
        for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
-               int copy = 0;
+               int copy;
 
                if (!cmd->len[i])
                        continue;
 
-               /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
-               if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
-                       copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
-
-                       if (copy > cmd->len[i])
-                               copy = cmd->len[i];
-               }
-
                /* copy everything if not nocopy/dup */
                if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
-                                          IWL_HCMD_DFL_DUP)))
+                                          IWL_HCMD_DFL_DUP))) {
                        copy = cmd->len[i];
 
-               if (copy) {
                        memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
                        cmd_pos += copy;
                        copy_size += copy;
+                       continue;
+               }
+
+               /*
+                * Otherwise we need at least IWL_HCMD_SCRATCHBUF_SIZE copied
+                * in total (for the scratchbuf handling), but copy up to what
+                * we can fit into the payload for debug dump purposes.
+                */
+               copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
+
+               memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
+               cmd_pos += copy;
+
+               /* However, treat copy_size the proper way, we need it below */
+               if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
+                       copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
+
+                       if (copy > cmd->len[i])
+                               copy = cmd->len[i];
+                       copy_size += copy;
                }
        }
 
@@ -1345,7 +1362,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
        memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size);
        iwl_pcie_txq_build_tfd(trans, txq,
                               iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr),
-                              scratch_size, 1);
+                              scratch_size, true);
 
        /* map first command fragment, if any remains */
        if (copy_size > scratch_size) {
@@ -1361,7 +1378,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
                }
 
                iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
-                                      copy_size - scratch_size, 0);
+                                      copy_size - scratch_size, false);
        }
 
        /* map the remaining (adjusted) nocopy/dup fragments */
@@ -1384,7 +1401,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
                        goto out;
                }
 
-               iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], 0);
+               iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
        }
 
        out_meta->flags = cmd->flags;
@@ -1392,8 +1409,6 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
                kfree(txq->entries[idx].free_buf);
        txq->entries[idx].free_buf = dup_buf;
 
-       txq->need_update = 1;
-
        trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
 
        /* start timer if queue currently empty */
@@ -1405,9 +1420,11 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
        /*
         * wake up the NIC to make sure that the firmware will see the host
         * command - we will let the NIC sleep once all the host commands
-        * returned.
+        * returned. This needs to be done only on NICs that have
+        * apmg_wake_up_wa set.
         */
-       if (!trans_pcie->cmd_in_flight) {
+       if (trans->cfg->base_params->apmg_wake_up_wa &&
+           !trans_pcie->cmd_in_flight) {
                trans_pcie->cmd_in_flight = true;
                __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
                                         CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
@@ -1427,7 +1444,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
        }
 
        /* Increment and update queue's write index */
-       q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
+       q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
        iwl_pcie_txq_inc_wr_ptr(trans, txq);
 
        spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
@@ -1583,7 +1600,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
                               get_cmd_string(trans_pcie, cmd->id));
                ret = -ETIMEDOUT;
 
-               iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1);
+               iwl_force_nmi(trans);
                iwl_trans_fw_error(trans);
 
                goto cancel;
@@ -1661,7 +1678,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
        dma_addr_t tb0_phys, tb1_phys, scratch_phys;
        void *tb1_addr;
        u16 len, tb1_len, tb2_len;
-       u8 wait_write_ptr = 0;
+       bool wait_write_ptr;
        __le16 fc = hdr->frame_control;
        u8 hdr_len = ieee80211_hdrlen(fc);
        u16 wifi_seq;
@@ -1722,7 +1739,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
        memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
               IWL_HCMD_SCRATCHBUF_SIZE);
        iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
-                              IWL_HCMD_SCRATCHBUF_SIZE, 1);
+                              IWL_HCMD_SCRATCHBUF_SIZE, true);
 
        /* there must be data left over for TB1 or this code must be changed */
        BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE);
@@ -1732,7 +1749,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
        tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
        if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
                goto out_err;
-       iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, 0);
+       iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
 
        /*
         * Set up TFD's third entry to point directly to remainder
@@ -1748,7 +1765,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
                                           &txq->tfds[q->write_ptr]);
                        goto out_err;
                }
-               iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, 0);
+               iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
        }
 
        /* Set up entry for this TFD in Tx byte-count array */
@@ -1762,12 +1779,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
        trace_iwlwifi_dev_tx_data(trans->dev, skb,
                                  skb->data + hdr_len, tb2_len);
 
-       if (!ieee80211_has_morefrags(fc)) {
-               txq->need_update = 1;
-       } else {
-               wait_write_ptr = 1;
-               txq->need_update = 0;
-       }
+       wait_write_ptr = ieee80211_has_morefrags(fc);
 
        /* start timer if queue currently empty */
        if (txq->need_update && q->read_ptr == q->write_ptr &&
@@ -1775,22 +1787,19 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
                mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
 
        /* Tell device the write index *just past* this latest filled TFD */
-       q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
-       iwl_pcie_txq_inc_wr_ptr(trans, txq);
+       q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
+       if (!wait_write_ptr)
+               iwl_pcie_txq_inc_wr_ptr(trans, txq);
 
        /*
         * At this point the frame is "transmitted" successfully
-        * and we will get a TX status notification eventually,
-        * regardless of the value of ret. "ret" only indicates
-        * whether or not we should update the write pointer.
+        * and we will get a TX status notification eventually.
         */
        if (iwl_queue_space(q) < q->high_mark) {
-               if (wait_write_ptr) {
-                       txq->need_update = 1;
+               if (wait_write_ptr)
                        iwl_pcie_txq_inc_wr_ptr(trans, txq);
-               } else {
+               else
                        iwl_stop_queue(trans, txq);
-               }
        }
        spin_unlock(&txq->lock);
        return 0;
index 54e344aed6e05d097c3eb60462d74064757b81fc..47a998d8f99e75bd5f59521a6593ca183cebb77a 100644 (file)
@@ -1006,9 +1006,8 @@ struct cmd_key_material {
 } __packed;
 
 static int lbs_set_key_material(struct lbs_private *priv,
-                               int key_type,
-                               int key_info,
-                               u8 *key, u16 key_len)
+                               int key_type, int key_info,
+                               const u8 *key, u16 key_len)
 {
        struct cmd_key_material cmd;
        int ret;
@@ -1610,7 +1609,7 @@ static int lbs_cfg_del_key(struct wiphy *wiphy, struct net_device *netdev,
  */
 
 static int lbs_cfg_get_station(struct wiphy *wiphy, struct net_device *dev,
-                             u8 *mac, struct station_info *sinfo)
+                              const u8 *mac, struct station_info *sinfo)
 {
        struct lbs_private *priv = wiphy_priv(wiphy);
        s8 signal, noise;
index ab966f08024a0013194c8eaf82e472d866f3492e..407784aca627bcd69b8a8632b45be53390a17481 100644 (file)
@@ -90,7 +90,8 @@ do { if ((lbs_debug & (grp)) == (grp)) \
 #define lbs_deb_cfg80211(fmt, args...)  LBS_DEB_LL(LBS_DEB_CFG80211, " cfg80211", fmt, ##args)
 
 #ifdef DEBUG
-static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, int len)
+static inline void lbs_deb_hex(unsigned int grp, const char *prompt,
+                              const u8 *buf, int len)
 {
        int i = 0;
 
index c7366b07b568ab303985c651679b3bcdb409e051..e446fed7b3459b854b05ab7728c0c15c3ac51e7e 100644 (file)
@@ -71,8 +71,10 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
 
        skb->ip_summed = CHECKSUM_NONE;
 
-       if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR)
-               return process_rxed_802_11_packet(priv, skb);
+       if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) {
+               ret = process_rxed_802_11_packet(priv, skb);
+               goto done;
+       }
 
        p_rx_pd = (struct rxpd *) skb->data;
        p_rx_pkt = (struct rxpackethdr *) ((u8 *)p_rx_pd +
@@ -86,7 +88,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
        if (skb->len < (ETH_HLEN + 8 + sizeof(struct rxpd))) {
                lbs_deb_rx("rx err: frame received with bad length\n");
                dev->stats.rx_length_errors++;
-               ret = 0;
+               ret = -EINVAL;
                dev_kfree_skb(skb);
                goto done;
        }
index 9d7a52f5a4102abedd2dbebc03c26c3866da2a64..a312c653d1163fcc5c4ff394a54b0c7a96370d8f 100644 (file)
@@ -1676,7 +1676,9 @@ static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw,
        return 0;
 }
 
-static void mac80211_hwsim_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void mac80211_hwsim_flush(struct ieee80211_hw *hw,
+                                struct ieee80211_vif *vif,
+                                u32 queues, bool drop)
 {
        /* Not implemented, queues only on kernel side */
 }
@@ -2056,6 +2058,7 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
                            WIPHY_FLAG_AP_UAPSD |
                            WIPHY_FLAG_HAS_CHANNEL_SWITCH;
        hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+       hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
 
        /* ask mac80211 to reserve space for magic */
        hw->vif_data_size = sizeof(struct hwsim_vif_priv);
index c92f27aa71ede1f049c101a0ba185f31d982aaa3..706831df1fa2a4183cb3c5ad849f1aa8df8dbb14 100644 (file)
@@ -212,8 +212,7 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv,
                                      sizeof(struct mwifiex_ie_types_header));
                        memcpy((u8 *)vht_op +
                                sizeof(struct mwifiex_ie_types_header),
-                              (u8 *)bss_desc->bcn_vht_oper +
-                              sizeof(struct ieee_types_header),
+                              (u8 *)bss_desc->bcn_vht_oper,
                               le16_to_cpu(vht_op->header.len));
 
                        /* negotiate the channel width and central freq
index d14ead8beca860dba6c984d26df095b104bb1375..e1c2f67ae85e694d52b1f9e4ad69f2d50ab6ba91 100644 (file)
@@ -345,8 +345,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
 
                        memcpy((u8 *) ht_info +
                               sizeof(struct mwifiex_ie_types_header),
-                              (u8 *) bss_desc->bcn_ht_oper +
-                              sizeof(struct ieee_types_header),
+                              (u8 *)bss_desc->bcn_ht_oper,
                               le16_to_cpu(ht_info->header.len));
 
                        if (!(sband->ht_cap.cap &
@@ -750,3 +749,45 @@ void mwifiex_set_ba_params(struct mwifiex_private *priv)
 
        return;
 }
+
+u8 mwifiex_get_sec_chan_offset(int chan)
+{
+       u8 sec_offset;
+
+       switch (chan) {
+       case 36:
+       case 44:
+       case 52:
+       case 60:
+       case 100:
+       case 108:
+       case 116:
+       case 124:
+       case 132:
+       case 140:
+       case 149:
+       case 157:
+               sec_offset = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+               break;
+       case 40:
+       case 48:
+       case 56:
+       case 64:
+       case 104:
+       case 112:
+       case 120:
+       case 128:
+       case 136:
+       case 144:
+       case 153:
+       case 161:
+               sec_offset = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+               break;
+       case 165:
+       default:
+               sec_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
+               break;
+       }
+
+       return sec_offset;
+}
index 40b007a00f4bd9e786c24f8f53059c38e29e79a4..0b73fa08f5d466b98d5292d0a5b16e1011c30be3 100644 (file)
@@ -63,6 +63,7 @@ int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd,
                                int cmd_action,
                                struct mwifiex_ds_11n_amsdu_aggr_ctrl *aa_ctrl);
 void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra);
+u8 mwifiex_get_sec_chan_offset(int chan);
 
 static inline u8
 mwifiex_is_station_ampdu_allowed(struct mwifiex_private *priv,
@@ -199,7 +200,7 @@ static inline int mwifiex_is_sta_11n_enabled(struct mwifiex_private *priv,
 }
 
 static inline u8
-mwifiex_tdls_peer_11n_enabled(struct mwifiex_private *priv, u8 *ra)
+mwifiex_tdls_peer_11n_enabled(struct mwifiex_private *priv, const u8 *ra)
 {
        struct mwifiex_sta_node *node = mwifiex_get_sta_entry(priv, ra);
        if (node)
index 63211707f93955c851bfb96d71f12d5ef1f4a313..5b32106182f81c11fbc2bd985166dad198f341b1 100644 (file)
@@ -100,6 +100,7 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
                            struct sk_buff *skb)
 {
        struct txpd *local_tx_pd;
+       struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
 
        skb_push(skb, sizeof(*local_tx_pd));
 
@@ -118,6 +119,9 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
        local_tx_pd->tx_pkt_length = cpu_to_le16(skb->len -
                                                 sizeof(*local_tx_pd));
 
+       if (tx_info->flags & MWIFIEX_BUF_FLAG_TDLS_PKT)
+               local_tx_pd->flags |= MWIFIEX_TXPD_FLAGS_TDLS_PACKET;
+
        if (local_tx_pd->tx_control == 0)
                /* TxCtrl set by user or default */
                local_tx_pd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl);
@@ -160,6 +164,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
        int pad = 0, ret;
        struct mwifiex_tx_param tx_param;
        struct txpd *ptx_pd = NULL;
+       struct timeval tv;
        int headroom = adapter->iface_type == MWIFIEX_USB ? 0 : INTF_HEADER_LEN;
 
        skb_src = skb_peek(&pra_list->skb_head);
@@ -182,8 +187,14 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
 
        tx_info_aggr->bss_type = tx_info_src->bss_type;
        tx_info_aggr->bss_num = tx_info_src->bss_num;
+
+       if (tx_info_src->flags & MWIFIEX_BUF_FLAG_TDLS_PKT)
+               tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
        skb_aggr->priority = skb_src->priority;
 
+       do_gettimeofday(&tv);
+       skb_aggr->tstamp = timeval_to_ktime(tv);
+
        do {
                /* Check if AMSDU can accommodate this MSDU */
                if (skb_tailroom(skb_aggr) < (skb_src->len + LLC_SNAP_LEN))
@@ -236,18 +247,11 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
                ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA,
                                                   skb_aggr, NULL);
        } else {
-               /*
-                * Padding per MSDU will affect the length of next
-                * packet and hence the exact length of next packet
-                * is uncertain here.
-                *
-                * Also, aggregation of transmission buffer, while
-                * downloading the data to the card, wont gain much
-                * on the AMSDU packets as the AMSDU packets utilizes
-                * the transmission buffer space to the maximum
-                * (adapter->tx_buf_size).
-                */
-               tx_param.next_pkt_len = 0;
+               if (skb_src)
+                       tx_param.next_pkt_len =
+                                       skb_src->len + sizeof(struct txpd);
+               else
+                       tx_param.next_pkt_len = 0;
 
                ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
                                                   skb_aggr, &tx_param);
index b9242c3dca435ee9a4d5123fd57ad0733a96a24d..3b55ce5690a54e226c5482f523a3c80d1e95d7bf 100644 (file)
@@ -200,4 +200,11 @@ getlog
 
        cat getlog
 
+fw_dump
+       This command is used to dump firmware memory into files.
+       Separate file will be created for each memory segment.
+       Usage:
+
+       cat fw_dump
+
 ===============================================================================
index 21ee27ab7b745261f9a398bcaacc92b24ce7c8f7..e95dec91a561e1172289dca0d6bbfb35b38add56 100644 (file)
@@ -994,7 +994,7 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
  */
 static int
 mwifiex_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
-                            u8 *mac, struct station_info *sinfo)
+                            const u8 *mac, struct station_info *sinfo)
 {
        struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
 
@@ -1270,7 +1270,7 @@ static int mwifiex_cfg80211_change_beacon(struct wiphy *wiphy,
  */
 static int
 mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev,
-                            u8 *mac)
+                            const u8 *mac)
 {
        struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
        struct mwifiex_sta_node *sta_node;
@@ -2629,7 +2629,7 @@ static int mwifiex_cfg80211_set_coalesce(struct wiphy *wiphy,
  */
 static int
 mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
-                          u8 *peer, u8 action_code, u8 dialog_token,
+                          const u8 *peer, u8 action_code, u8 dialog_token,
                           u16 status_code, u32 peer_capability,
                           const u8 *extra_ies, size_t extra_ies_len)
 {
@@ -2701,7 +2701,7 @@ mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
 
 static int
 mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
-                          u8 *peer, enum nl80211_tdls_operation action)
+                          const u8 *peer, enum nl80211_tdls_operation action)
 {
        struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
 
@@ -2748,9 +2748,8 @@ mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
 }
 
 static int
-mwifiex_cfg80211_add_station(struct wiphy *wiphy,
-                            struct net_device *dev,
-                            u8 *mac, struct station_parameters *params)
+mwifiex_cfg80211_add_station(struct wiphy *wiphy, struct net_device *dev,
+                            const u8 *mac, struct station_parameters *params)
 {
        struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
 
@@ -2765,9 +2764,9 @@ mwifiex_cfg80211_add_station(struct wiphy *wiphy,
 }
 
 static int
-mwifiex_cfg80211_change_station(struct wiphy *wiphy,
-                               struct net_device *dev,
-                               u8 *mac, struct station_parameters *params)
+mwifiex_cfg80211_change_station(struct wiphy *wiphy, struct net_device *dev,
+                               const u8 *mac,
+                               struct station_parameters *params)
 {
        int ret;
        struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
index 1062c918a7bffb19cf93c1aba0daa4490856ba65..8dee6c86f4f1dc91e65978b6f7443ac9f00c2118 100644 (file)
@@ -955,8 +955,6 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
                        adapter->cmd_wait_q.status = -ETIMEDOUT;
                        wake_up_interruptible(&adapter->cmd_wait_q.wait);
                        mwifiex_cancel_pending_ioctl(adapter);
-                       /* reset cmd_sent flag to unblock new commands */
-                       adapter->cmd_sent = false;
                }
        }
        if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
index b8a49aad12fd662434ce2a29aaa8edecfb52ba0b..7b419bbcd5444f5c5abdf40ffb2368087b77e89a 100644 (file)
@@ -256,6 +256,29 @@ free_and_exit:
        return ret;
 }
 
+/*
+ * Proc firmware dump read handler.
+ *
+ * This function is called when the 'fw_dump' file is opened for
+ * reading.
+ * This function dumps firmware memory in different files
+ * (ex. DTCM, ITCM, SQRAM etc.) based on the the segments for
+ * debugging.
+ */
+static ssize_t
+mwifiex_fw_dump_read(struct file *file, char __user *ubuf,
+                    size_t count, loff_t *ppos)
+{
+       struct mwifiex_private *priv = file->private_data;
+
+       if (!priv->adapter->if_ops.fw_dump)
+               return -EIO;
+
+       priv->adapter->if_ops.fw_dump(priv->adapter);
+
+       return 0;
+}
+
 /*
  * Proc getlog file read handler.
  *
@@ -699,6 +722,7 @@ static const struct file_operations mwifiex_dfs_##name##_fops = {       \
 MWIFIEX_DFS_FILE_READ_OPS(info);
 MWIFIEX_DFS_FILE_READ_OPS(debug);
 MWIFIEX_DFS_FILE_READ_OPS(getlog);
+MWIFIEX_DFS_FILE_READ_OPS(fw_dump);
 MWIFIEX_DFS_FILE_OPS(regrdwr);
 MWIFIEX_DFS_FILE_OPS(rdeeprom);
 
@@ -722,6 +746,7 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv)
        MWIFIEX_DFS_ADD_FILE(getlog);
        MWIFIEX_DFS_ADD_FILE(regrdwr);
        MWIFIEX_DFS_ADD_FILE(rdeeprom);
+       MWIFIEX_DFS_ADD_FILE(fw_dump);
 }
 
 /*
index e7b3e16e5d34f1f8703ec2f6d4e21e9388c49edd..38da6ff6f41623618efa22add335ffef1fa46828 100644 (file)
 #define MWIFIEX_MAX_TX_BASTREAM_SUPPORTED      2
 #define MWIFIEX_MAX_RX_BASTREAM_SUPPORTED      16
 
-#define MWIFIEX_STA_AMPDU_DEF_TXWINSIZE        16
-#define MWIFIEX_STA_AMPDU_DEF_RXWINSIZE        32
+#define MWIFIEX_STA_AMPDU_DEF_TXWINSIZE        64
+#define MWIFIEX_STA_AMPDU_DEF_RXWINSIZE        64
 #define MWIFIEX_UAP_AMPDU_DEF_TXWINSIZE        32
 #define MWIFIEX_UAP_AMPDU_DEF_RXWINSIZE        16
-#define MWIFIEX_11AC_STA_AMPDU_DEF_TXWINSIZE   32
-#define MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE   48
+#define MWIFIEX_11AC_STA_AMPDU_DEF_TXWINSIZE   64
+#define MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE   64
 #define MWIFIEX_11AC_UAP_AMPDU_DEF_TXWINSIZE   48
 #define MWIFIEX_11AC_UAP_AMPDU_DEF_RXWINSIZE   32
 
index b485dc1ae5ebc42c5606e37723189d20e7d98b16..42eaeda1dc823d1007d97111f46bda16a9d45ca8 100644 (file)
@@ -169,6 +169,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define TLV_TYPE_GWK_CIPHER         (PROPRIETARY_TLV_BASE_ID + 146)
 #define TLV_TYPE_COALESCE_RULE      (PROPRIETARY_TLV_BASE_ID + 154)
 #define TLV_TYPE_KEY_PARAM_V2       (PROPRIETARY_TLV_BASE_ID + 156)
+#define TLV_TYPE_TDLS_IDLE_TIMEOUT  (PROPRIETARY_TLV_BASE_ID + 194)
 #define TLV_TYPE_FW_API_REV         (PROPRIETARY_TLV_BASE_ID + 199)
 
 #define MWIFIEX_TX_DATA_BUF_SIZE_2K        2048
@@ -229,6 +230,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define ISENABLED_40MHZ_INTOLERANT(Dot11nDevCap) (Dot11nDevCap & BIT(8))
 #define ISSUPP_RXLDPC(Dot11nDevCap) (Dot11nDevCap & BIT(22))
 #define ISSUPP_BEAMFORMING(Dot11nDevCap) (Dot11nDevCap & BIT(30))
+#define ISALLOWED_CHANWIDTH40(ht_param) (ht_param & BIT(2))
 
 /* httxcfg bitmap
  * 0           reserved
@@ -487,6 +489,7 @@ enum P2P_MODES {
 #define EVENT_UAP_MIC_COUNTERMEASURES   0x0000004c
 #define EVENT_HOSTWAKE_STAIE           0x0000004d
 #define EVENT_CHANNEL_SWITCH_ANN        0x00000050
+#define EVENT_TDLS_GENERIC_EVENT        0x00000052
 #define EVENT_EXT_SCAN_REPORT           0x00000058
 #define EVENT_REMAIN_ON_CHAN_EXPIRED    0x0000005f
 
@@ -519,6 +522,7 @@ enum P2P_MODES {
 #define ACT_TDLS_DELETE            0x00
 #define ACT_TDLS_CREATE            0x01
 #define ACT_TDLS_CONFIG            0x02
+#define TDLS_EVENT_LINK_TEAR_DOWN  3
 
 #define MWIFIEX_FW_V15            15
 
@@ -535,6 +539,7 @@ struct mwifiex_ie_types_data {
 #define MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET 0x01
 #define MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET 0x08
 #define MWIFIEX_TXPD_FLAGS_TDLS_PACKET      0x10
+#define MWIFIEX_RXPD_FLAGS_TDLS_PACKET      0x01
 
 struct txpd {
        u8 bss_type;
@@ -577,7 +582,7 @@ struct rxpd {
         * [Bit 7] Reserved
         */
        u8 ht_info;
-       u8 reserved;
+       u8 flags;
 } __packed;
 
 struct uap_txpd {
@@ -708,6 +713,13 @@ struct mwifiex_ie_types_vendor_param_set {
        u8 ie[MWIFIEX_MAX_VSIE_LEN];
 };
 
+#define MWIFIEX_TDLS_IDLE_TIMEOUT      60
+
+struct mwifiex_ie_types_tdls_idle_timeout {
+       struct mwifiex_ie_types_header header;
+       __le16 value;
+} __packed;
+
 struct mwifiex_ie_types_rsn_param_set {
        struct mwifiex_ie_types_header header;
        u8 rsn_ie[1];
@@ -1745,6 +1757,15 @@ struct host_cmd_ds_802_11_subsc_evt {
        __le16 events;
 } __packed;
 
+struct mwifiex_tdls_generic_event {
+       __le16 type;
+       u8 peer_mac[ETH_ALEN];
+       union {
+               __le16 reason_code;
+               __le16 reserved;
+       } u;
+} __packed;
+
 struct mwifiex_ie {
        __le16 ie_index;
        __le16 mgmt_subtype_mask;
index ee494db5406097c35a0f22b365e2ad2e1ac674f8..1b576722671d5e6f228363c36c94c4ef47867980 100644 (file)
@@ -303,7 +303,7 @@ struct mwifiex_ds_ant_cfg {
        u32 rx_ant;
 };
 
-#define MWIFIEX_NUM_OF_CMD_BUFFER      20
+#define MWIFIEX_NUM_OF_CMD_BUFFER      50
 #define MWIFIEX_SIZE_OF_CMD_BUFFER     2048
 
 enum {
index 9c771b3e99186ffe838f771b217216ebc5a43454..cbabc12fbda390d063218375eb2b4cadc3911b8f 100644 (file)
@@ -521,7 +521,6 @@ done:
                release_firmware(adapter->firmware);
                adapter->firmware = NULL;
        }
-       complete(&adapter->fw_load);
        if (init_failed)
                mwifiex_free_adapter(adapter);
        up(sem);
@@ -535,7 +534,6 @@ static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter)
 {
        int ret;
 
-       init_completion(&adapter->fw_load);
        ret = request_firmware_nowait(THIS_MODULE, 1, adapter->fw_name,
                                      adapter->dev, GFP_KERNEL, adapter,
                                      mwifiex_fw_dpc);
index d53e1e8c9467a62663c4d28df86e623237cdc45f..3f25feb1508ef52fc704b3779988a1dbb6d4304e 100644 (file)
@@ -672,6 +672,7 @@ struct mwifiex_if_ops {
        int (*init_fw_port) (struct mwifiex_adapter *);
        int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
        void (*card_reset) (struct mwifiex_adapter *);
+       void (*fw_dump)(struct mwifiex_adapter *);
        int (*clean_pcie_ring) (struct mwifiex_adapter *adapter);
 };
 
@@ -787,7 +788,6 @@ struct mwifiex_adapter {
        struct mwifiex_wait_queue cmd_wait_q;
        u8 scan_wait_q_woken;
        spinlock_t queue_lock;          /* lock for tx queues */
-       struct completion fw_load;
        u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
        u16 max_mgmt_ie_index;
        u8 scan_delay_cnt;
@@ -910,8 +910,6 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
                                  struct sk_buff *skb);
 int mwifiex_process_sta_event(struct mwifiex_private *);
 int mwifiex_process_uap_event(struct mwifiex_private *);
-struct mwifiex_sta_node *
-mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac);
 void mwifiex_delete_all_station_list(struct mwifiex_private *priv);
 void *mwifiex_process_sta_txpd(struct mwifiex_private *, struct sk_buff *skb);
 void *mwifiex_process_uap_txpd(struct mwifiex_private *, struct sk_buff *skb);
@@ -1220,26 +1218,26 @@ void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv);
 extern const struct ethtool_ops mwifiex_ethtool_ops;
 
 void mwifiex_del_all_sta_list(struct mwifiex_private *priv);
-void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac);
+void mwifiex_del_sta_entry(struct mwifiex_private *priv, const u8 *mac);
 void
 mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
                       int ies_len, struct mwifiex_sta_node *node);
 struct mwifiex_sta_node *
-mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac);
+mwifiex_add_sta_entry(struct mwifiex_private *priv, const u8 *mac);
 struct mwifiex_sta_node *
-mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac);
-int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, u8 *peer,
+mwifiex_get_sta_entry(struct mwifiex_private *priv, const u8 *mac);
+int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
                                 u8 action_code, u8 dialog_token,
                                 u16 status_code, const u8 *extra_ies,
                                 size_t extra_ies_len);
-int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv,
-                                u8 *peer, u8 action_code, u8 dialog_token,
-                                u16 status_code, const u8 *extra_ies,
-                                size_t extra_ies_len);
+int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, const u8 *peer,
+                                  u8 action_code, u8 dialog_token,
+                                  u16 status_code, const u8 *extra_ies,
+                                  size_t extra_ies_len);
 void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
                                       u8 *buf, int len);
-int mwifiex_tdls_oper(struct mwifiex_private *priv, u8 *peer, u8 action);
-int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, u8 *mac);
+int mwifiex_tdls_oper(struct mwifiex_private *priv, const u8 *peer, u8 action);
+int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, const u8 *mac);
 void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv);
 bool mwifiex_is_bss_in_11ac_mode(struct mwifiex_private *priv);
 u8 mwifiex_get_center_freq_index(struct mwifiex_private *priv, u8 band,
index a7e8b96b2d9024de8c34e5e04b317c66d2e22820..574d4b59746801cc34ac78e6e4550c7d92aa6e9d 100644 (file)
@@ -221,9 +221,6 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
        if (!adapter || !adapter->priv_num)
                return;
 
-       /* In case driver is removed when asynchronous FW load is in progress */
-       wait_for_completion(&adapter->fw_load);
-
        if (user_rmmod) {
 #ifdef CONFIG_PM_SLEEP
                if (adapter->is_suspended)
@@ -1074,6 +1071,7 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
  * is mapped to PCI device memory. Tx ring pointers are advanced accordingly.
  * Download ready interrupt to FW is deffered if Tx ring is not full and
  * additional payload can be accomodated.
+ * Caller must ensure tx_param parameter to this function is not NULL.
  */
 static int
 mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
index 7b3af3d29ded478ad658eed5a3836403d6dd7542..d75f4ebd4bdce7767c797fe28c9fd3f9eb4fa2a9 100644 (file)
@@ -29,9 +29,6 @@
 #define MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN   14
 
 #define MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD      4
-#define MWIFIEX_LIMIT_1_CHANNEL_PER_SCAN_CMD   15
-#define MWIFIEX_LIMIT_2_CHANNELS_PER_SCAN_CMD  27
-#define MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD  35
 
 /* Memory needed to store a max sized Channel List TLV for a firmware scan */
 #define CHAN_TLV_MAX_SIZE  (sizeof(struct mwifiex_ie_types_header)         \
@@ -1055,20 +1052,10 @@ mwifiex_config_scan(struct mwifiex_private *priv,
 
        /*
         * In associated state we will reduce the number of channels scanned per
-        * scan command to avoid any traffic delay/loss. This number is decided
-        * based on total number of channels to be scanned due to constraints
-        * of command buffers.
+        * scan command to 1 to avoid any traffic delay/loss.
         */
-       if (priv->media_connected) {
-               if (chan_num < MWIFIEX_LIMIT_1_CHANNEL_PER_SCAN_CMD)
+       if (priv->media_connected)
                        *max_chan_per_scan = 1;
-               else if (chan_num < MWIFIEX_LIMIT_2_CHANNELS_PER_SCAN_CMD)
-                       *max_chan_per_scan = 2;
-               else if (chan_num < MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD)
-                       *max_chan_per_scan = 3;
-               else
-                       *max_chan_per_scan = 4;
-       }
 }
 
 /*
@@ -1353,23 +1340,17 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
                                              bss_entry->beacon_buf);
                        break;
                case WLAN_EID_BSS_COEX_2040:
-                       bss_entry->bcn_bss_co_2040 = current_ptr +
-                               sizeof(struct ieee_types_header);
-                       bss_entry->bss_co_2040_offset = (u16) (current_ptr +
-                                       sizeof(struct ieee_types_header) -
-                                               bss_entry->beacon_buf);
+                       bss_entry->bcn_bss_co_2040 = current_ptr;
+                       bss_entry->bss_co_2040_offset =
+                               (u16) (current_ptr - bss_entry->beacon_buf);
                        break;
                case WLAN_EID_EXT_CAPABILITY:
-                       bss_entry->bcn_ext_cap = current_ptr +
-                               sizeof(struct ieee_types_header);
-                       bss_entry->ext_cap_offset = (u16) (current_ptr +
-                                       sizeof(struct ieee_types_header) -
-                                       bss_entry->beacon_buf);
+                       bss_entry->bcn_ext_cap = current_ptr;
+                       bss_entry->ext_cap_offset =
+                               (u16) (current_ptr - bss_entry->beacon_buf);
                        break;
                case WLAN_EID_OPMODE_NOTIF:
-                       bss_entry->oper_mode =
-                               (void *)(current_ptr +
-                                        sizeof(struct ieee_types_header));
+                       bss_entry->oper_mode = (void *)current_ptr;
                        bss_entry->oper_mode_offset =
                                        (u16)((u8 *)bss_entry->oper_mode -
                                              bss_entry->beacon_buf);
index d206f04d499498d6d9c7ba92a80685588ae7bc96..4ce3d7b33991ace2cdd3ba0bcfe50c728e59220d 100644 (file)
@@ -85,6 +85,8 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
                card->supports_sdio_new_mode = data->supports_sdio_new_mode;
                card->has_control_mask = data->has_control_mask;
                card->tx_buf_size = data->tx_buf_size;
+               card->mp_tx_agg_buf_size = data->mp_tx_agg_buf_size;
+               card->mp_rx_agg_buf_size = data->mp_rx_agg_buf_size;
        }
 
        sdio_claim_host(func);
@@ -177,9 +179,6 @@ mwifiex_sdio_remove(struct sdio_func *func)
        if (!adapter || !adapter->priv_num)
                return;
 
-       /* In case driver is removed when asynchronous FW load is in progress */
-       wait_for_completion(&adapter->fw_load);
-
        if (user_rmmod) {
                if (adapter->is_suspended)
                        mwifiex_sdio_resume(adapter->dev);
@@ -1679,8 +1678,12 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
        if (ret) {
                if (type == MWIFIEX_TYPE_CMD)
                        adapter->cmd_sent = false;
-               if (type == MWIFIEX_TYPE_DATA)
+               if (type == MWIFIEX_TYPE_DATA) {
                        adapter->data_sent = false;
+                       /* restore curr_wr_port in error cases */
+                       card->curr_wr_port = port;
+                       card->mp_wr_bitmap |= (u32)(1 << card->curr_wr_port);
+               }
        } else {
                if (type == MWIFIEX_TYPE_DATA) {
                        if (!(card->mp_wr_bitmap & (1 << card->curr_wr_port)))
@@ -1842,8 +1845,8 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
        card->mpa_rx.len_arr = kzalloc(sizeof(*card->mpa_rx.len_arr) *
                                       card->mp_agg_pkt_limit, GFP_KERNEL);
        ret = mwifiex_alloc_sdio_mpa_buffers(adapter,
-                                            SDIO_MP_TX_AGGR_DEF_BUF_SIZE,
-                                            SDIO_MP_RX_AGGR_DEF_BUF_SIZE);
+                                            card->mp_tx_agg_buf_size,
+                                            card->mp_rx_agg_buf_size);
        if (ret) {
                dev_err(adapter->dev, "failed to alloc sdio mp-a buffers\n");
                kfree(card->mp_regs);
index c71201b2e2a333c20f926bacb4842395230c6926..6eea30b43ed714f3bd81f9453a5008001a11b33d 100644 (file)
 #define UP_LD_CMD_PORT_HOST_INT_STATUS (0x40U)
 #define DN_LD_CMD_PORT_HOST_INT_STATUS (0x80U)
 
-#define SDIO_MP_TX_AGGR_DEF_BUF_SIZE        (8192)     /* 8K */
-
-/* Multi port RX aggregation buffer size */
-#define SDIO_MP_RX_AGGR_DEF_BUF_SIZE        (16384)    /* 16K */
+#define MWIFIEX_MP_AGGR_BUF_SIZE_16K   (16384)
+#define MWIFIEX_MP_AGGR_BUF_SIZE_32K   (32768)
 
 /* Misc. Config Register : Auto Re-enable interrupts */
 #define AUTO_RE_ENABLE_INT              BIT(4)
@@ -234,6 +232,8 @@ struct sdio_mmc_card {
        bool supports_sdio_new_mode;
        bool has_control_mask;
        u16 tx_buf_size;
+       u32 mp_tx_agg_buf_size;
+       u32 mp_rx_agg_buf_size;
 
        u32 mp_rd_bitmap;
        u32 mp_wr_bitmap;
@@ -258,6 +258,8 @@ struct mwifiex_sdio_device {
        bool supports_sdio_new_mode;
        bool has_control_mask;
        u16 tx_buf_size;
+       u32 mp_tx_agg_buf_size;
+       u32 mp_rx_agg_buf_size;
 };
 
 static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = {
@@ -315,6 +317,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
        .supports_sdio_new_mode = false,
        .has_control_mask = true,
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+       .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+       .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
 };
 
 static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
@@ -325,6 +329,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
        .supports_sdio_new_mode = false,
        .has_control_mask = true,
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+       .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+       .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
 };
 
 static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
@@ -335,6 +341,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
        .supports_sdio_new_mode = false,
        .has_control_mask = true,
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+       .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+       .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
 };
 
 static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
@@ -345,6 +353,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
        .supports_sdio_new_mode = true,
        .has_control_mask = false,
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+       .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
+       .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
 };
 
 /*
index e3cac1495cc705bcf3c05b479a16981578deff98..88202ce0c13965fdff679c506a3772ef287e6d54 100644 (file)
@@ -1546,6 +1546,7 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
        struct mwifiex_ie_types_extcap *extcap;
        struct mwifiex_ie_types_vhtcap *vht_capab;
        struct mwifiex_ie_types_aid *aid;
+       struct mwifiex_ie_types_tdls_idle_timeout *timeout;
        u8 *pos, qos_info;
        u16 config_len = 0;
        struct station_parameters *params = priv->sta_params;
@@ -1643,6 +1644,12 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
                        config_len += sizeof(struct mwifiex_ie_types_aid);
                }
 
+               timeout = (void *)(pos + config_len);
+               timeout->header.type = cpu_to_le16(TLV_TYPE_TDLS_IDLE_TIMEOUT);
+               timeout->header.len = cpu_to_le16(sizeof(timeout->value));
+               timeout->value = cpu_to_le16(MWIFIEX_TDLS_IDLE_TIMEOUT);
+               config_len += sizeof(struct mwifiex_ie_types_tdls_idle_timeout);
+
                break;
        default:
                dev_err(priv->adapter->dev, "Unknown TDLS operation\n");
index bfebb0144df5ac5ae88bd78dd909888d4bb0a0ea..577f2979ed8f2bcacbacc6af6ce9af03d3f86137 100644 (file)
@@ -865,14 +865,20 @@ static int mwifiex_ret_tdls_oper(struct mwifiex_private *priv,
 
        switch (action) {
        case ACT_TDLS_DELETE:
-               if (reason)
-                       dev_err(priv->adapter->dev,
-                               "TDLS link delete for %pM failed: reason %d\n",
-                               cmd_tdls_oper->peer_mac, reason);
-               else
+               if (reason) {
+                       if (!node || reason == TDLS_ERR_LINK_NONEXISTENT)
+                               dev_dbg(priv->adapter->dev,
+                                       "TDLS link delete for %pM failed: reason %d\n",
+                                       cmd_tdls_oper->peer_mac, reason);
+                       else
+                               dev_err(priv->adapter->dev,
+                                       "TDLS link delete for %pM failed: reason %d\n",
+                                       cmd_tdls_oper->peer_mac, reason);
+               } else {
                        dev_dbg(priv->adapter->dev,
-                               "TDLS link config for %pM successful\n",
+                               "TDLS link delete for %pM successful\n",
                                cmd_tdls_oper->peer_mac);
+               }
                break;
        case ACT_TDLS_CREATE:
                if (reason) {
index 368450cc56c7d9e19b8c2a74c47d43a9c5c91b1f..f6395ef11a721b8fc6d8ee797fb34a72b7c2f43d 100644 (file)
@@ -134,6 +134,46 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
                netif_carrier_off(priv->netdev);
 }
 
+static int mwifiex_parse_tdls_event(struct mwifiex_private *priv,
+                                   struct sk_buff *event_skb)
+{
+       int ret = 0;
+       struct mwifiex_adapter *adapter = priv->adapter;
+       struct mwifiex_sta_node *sta_ptr;
+       struct mwifiex_tdls_generic_event *tdls_evt =
+                       (void *)event_skb->data + sizeof(adapter->event_cause);
+
+       /* reserved 2 bytes are not mandatory in tdls event */
+       if (event_skb->len < (sizeof(struct mwifiex_tdls_generic_event) -
+                             sizeof(u16) - sizeof(adapter->event_cause))) {
+               dev_err(adapter->dev, "Invalid event length!\n");
+               return -1;
+       }
+
+       sta_ptr = mwifiex_get_sta_entry(priv, tdls_evt->peer_mac);
+       if (!sta_ptr) {
+               dev_err(adapter->dev, "cannot get sta entry!\n");
+               return -1;
+       }
+
+       switch (le16_to_cpu(tdls_evt->type)) {
+       case TDLS_EVENT_LINK_TEAR_DOWN:
+               cfg80211_tdls_oper_request(priv->netdev,
+                                          tdls_evt->peer_mac,
+                                          NL80211_TDLS_TEARDOWN,
+                                          le16_to_cpu(tdls_evt->u.reason_code),
+                                          GFP_KERNEL);
+               ret = mwifiex_tdls_oper(priv, tdls_evt->peer_mac,
+                                       MWIFIEX_TDLS_DISABLE_LINK);
+               queue_work(adapter->workqueue, &adapter->main_work);
+               break;
+       default:
+               break;
+       }
+
+       return ret;
+}
+
 /*
  * This function handles events generated by firmware.
  *
@@ -459,6 +499,10 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                        false);
                break;
 
+       case EVENT_TDLS_GENERIC_EVENT:
+               ret = mwifiex_parse_tdls_event(priv, adapter->event_skb);
+               break;
+
        default:
                dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
                        eventcause);
index ed26387eccf56db59bca98f7ed6fd7e6a4065aae..8b639d7fe6df263814901554a2c43363be6791e9 100644 (file)
@@ -183,6 +183,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
        struct rx_packet_hdr *rx_pkt_hdr;
        u8 ta[ETH_ALEN];
        u16 rx_pkt_type, rx_pkt_offset, rx_pkt_length, seq_num;
+       struct mwifiex_sta_node *sta_ptr;
 
        local_rx_pd = (struct rxpd *) (skb->data);
        rx_pkt_type = le16_to_cpu(local_rx_pd->rx_pkt_type);
@@ -213,14 +214,25 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
         * If the packet is not an unicast packet then send the packet
         * directly to os. Don't pass thru rx reordering
         */
-       if (!IS_11N_ENABLED(priv) ||
+       if ((!IS_11N_ENABLED(priv) &&
+            !(ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+              !(local_rx_pd->flags & MWIFIEX_RXPD_FLAGS_TDLS_PACKET))) ||
            !ether_addr_equal_unaligned(priv->curr_addr, rx_pkt_hdr->eth803_hdr.h_dest)) {
                mwifiex_process_rx_packet(priv, skb);
                return ret;
        }
 
-       if (mwifiex_queuing_ra_based(priv)) {
+       if (mwifiex_queuing_ra_based(priv) ||
+           (ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+            local_rx_pd->flags & MWIFIEX_RXPD_FLAGS_TDLS_PACKET)) {
                memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN);
+               if (local_rx_pd->flags & MWIFIEX_RXPD_FLAGS_TDLS_PACKET &&
+                   local_rx_pd->priority < MAX_NUM_TID) {
+                       sta_ptr = mwifiex_get_sta_entry(priv, ta);
+                       if (sta_ptr)
+                               sta_ptr->rx_seq[local_rx_pd->priority] =
+                                             le16_to_cpu(local_rx_pd->seq_num);
+               }
        } else {
                if (rx_pkt_type != PKT_TYPE_BAR)
                        priv->rx_seq[local_rx_pd->priority] = seq_num;
index 1236a5de7bca833adfd0eab1ed6cce865047479b..5fce7e78a36e773c28875a7636a666b50ced36d5 100644 (file)
@@ -128,6 +128,7 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
 {
        struct mwifiex_adapter *adapter = priv->adapter;
        struct txpd *local_tx_pd;
+       struct mwifiex_tx_param tx_param;
 /* sizeof(struct txpd) + Interface specific header */
 #define NULL_PACKET_HDR 64
        u32 data_len = NULL_PACKET_HDR;
@@ -168,8 +169,9 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
                                                   skb, NULL);
        } else {
                skb_push(skb, INTF_HEADER_LEN);
+               tx_param.next_pkt_len = 0;
                ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
-                                                  skb, NULL);
+                                                  skb, &tx_param);
        }
        switch (ret) {
        case -EBUSY:
index 97662a1ba58cf06a08ff3372f4808e5a0c7cbcd9..e73034fbbde9263b8e234ee7cd7747a1404c8a40 100644 (file)
@@ -25,8 +25,8 @@
 #define TDLS_RESP_FIX_LEN     8
 #define TDLS_CONFIRM_FIX_LEN  6
 
-static void
-mwifiex_restore_tdls_packets(struct mwifiex_private *priv, u8 *mac, u8 status)
+static void mwifiex_restore_tdls_packets(struct mwifiex_private *priv,
+                                        const u8 *mac, u8 status)
 {
        struct mwifiex_ra_list_tbl *ra_list;
        struct list_head *tid_list;
@@ -84,7 +84,8 @@ mwifiex_restore_tdls_packets(struct mwifiex_private *priv, u8 *mac, u8 status)
        return;
 }
 
-static void mwifiex_hold_tdls_packets(struct mwifiex_private *priv, u8 *mac)
+static void mwifiex_hold_tdls_packets(struct mwifiex_private *priv,
+                                     const u8 *mac)
 {
        struct mwifiex_ra_list_tbl *ra_list;
        struct list_head *ra_list_head;
@@ -185,8 +186,50 @@ static int mwifiex_tdls_add_vht_capab(struct mwifiex_private *priv,
        return 0;
 }
 
+static int
+mwifiex_tdls_add_ht_oper(struct mwifiex_private *priv, const u8 *mac,
+                        u8 vht_enabled, struct sk_buff *skb)
+{
+       struct ieee80211_ht_operation *ht_oper;
+       struct mwifiex_sta_node *sta_ptr;
+       struct mwifiex_bssdescriptor *bss_desc =
+                                       &priv->curr_bss_params.bss_descriptor;
+       u8 *pos;
+
+       sta_ptr = mwifiex_get_sta_entry(priv, mac);
+       if (unlikely(!sta_ptr)) {
+               dev_warn(priv->adapter->dev,
+                        "TDLS peer station not found in list\n");
+               return -1;
+       }
+
+       pos = (void *)skb_put(skb, sizeof(struct ieee80211_ht_operation) + 2);
+       *pos++ = WLAN_EID_HT_OPERATION;
+       *pos++ = sizeof(struct ieee80211_ht_operation);
+       ht_oper = (void *)pos;
+
+       ht_oper->primary_chan = bss_desc->channel;
+
+       /* follow AP's channel bandwidth */
+       if (ISSUPP_CHANWIDTH40(priv->adapter->hw_dot_11n_dev_cap) &&
+           bss_desc->bcn_ht_cap &&
+           ISALLOWED_CHANWIDTH40(bss_desc->bcn_ht_oper->ht_param))
+               ht_oper->ht_param = bss_desc->bcn_ht_oper->ht_param;
+
+       if (vht_enabled) {
+               ht_oper->ht_param =
+                         mwifiex_get_sec_chan_offset(bss_desc->channel);
+               ht_oper->ht_param |= BIT(2);
+       }
+
+       memcpy(&sta_ptr->tdls_cap.ht_oper, ht_oper,
+              sizeof(struct ieee80211_ht_operation));
+
+       return 0;
+}
+
 static int mwifiex_tdls_add_vht_oper(struct mwifiex_private *priv,
-                                    u8 *mac, struct sk_buff *skb)
+                                    const u8 *mac, struct sk_buff *skb)
 {
        struct mwifiex_bssdescriptor *bss_desc;
        struct ieee80211_vht_operation *vht_oper;
@@ -325,8 +368,9 @@ static void mwifiex_tdls_add_qos_capab(struct sk_buff *skb)
 }
 
 static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
-                            u8 *peer, u8 action_code, u8 dialog_token,
-                            u16 status_code, struct sk_buff *skb)
+                                       const u8 *peer, u8 action_code,
+                                       u8 dialog_token,
+                                       u16 status_code, struct sk_buff *skb)
 {
        struct ieee80211_tdls_data *tf;
        int ret;
@@ -428,6 +472,17 @@ static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
                                dev_kfree_skb_any(skb);
                                return ret;
                        }
+                       ret = mwifiex_tdls_add_ht_oper(priv, peer, 1, skb);
+                       if (ret) {
+                               dev_kfree_skb_any(skb);
+                               return ret;
+                       }
+               } else {
+                       ret = mwifiex_tdls_add_ht_oper(priv, peer, 0, skb);
+                       if (ret) {
+                               dev_kfree_skb_any(skb);
+                               return ret;
+                       }
                }
                break;
 
@@ -453,7 +508,8 @@ static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
 }
 
 static void
-mwifiex_tdls_add_link_ie(struct sk_buff *skb, u8 *src_addr, u8 *peer, u8 *bssid)
+mwifiex_tdls_add_link_ie(struct sk_buff *skb, const u8 *src_addr,
+                        const u8 *peer, const u8 *bssid)
 {
        struct ieee80211_tdls_lnkie *lnkid;
 
@@ -467,8 +523,8 @@ mwifiex_tdls_add_link_ie(struct sk_buff *skb, u8 *src_addr, u8 *peer, u8 *bssid)
        memcpy(lnkid->resp_sta, peer, ETH_ALEN);
 }
 
-int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv,
-                                u8 *peer, u8 action_code, u8 dialog_token,
+int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
+                                u8 action_code, u8 dialog_token,
                                 u16 status_code, const u8 *extra_ies,
                                 size_t extra_ies_len)
 {
@@ -560,7 +616,8 @@ int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv,
 }
 
 static int
-mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv, u8 *peer,
+mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv,
+                                   const u8 *peer,
                                    u8 action_code, u8 dialog_token,
                                    u16 status_code, struct sk_buff *skb)
 {
@@ -638,10 +695,10 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv, u8 *peer,
        return 0;
 }
 
-int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv,
-                                u8 *peer, u8 action_code, u8 dialog_token,
-                                u16 status_code, const u8 *extra_ies,
-                                size_t extra_ies_len)
+int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, const u8 *peer,
+                                  u8 action_code, u8 dialog_token,
+                                  u16 status_code, const u8 *extra_ies,
+                                  size_t extra_ies_len)
 {
        struct sk_buff *skb;
        struct mwifiex_txinfo *tx_info;
@@ -848,7 +905,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
 }
 
 static int
-mwifiex_tdls_process_config_link(struct mwifiex_private *priv, u8 *peer)
+mwifiex_tdls_process_config_link(struct mwifiex_private *priv, const u8 *peer)
 {
        struct mwifiex_sta_node *sta_ptr;
        struct mwifiex_ds_tdls_oper tdls_oper;
@@ -869,7 +926,7 @@ mwifiex_tdls_process_config_link(struct mwifiex_private *priv, u8 *peer)
 }
 
 static int
-mwifiex_tdls_process_create_link(struct mwifiex_private *priv, u8 *peer)
+mwifiex_tdls_process_create_link(struct mwifiex_private *priv, const u8 *peer)
 {
        struct mwifiex_sta_node *sta_ptr;
        struct mwifiex_ds_tdls_oper tdls_oper;
@@ -896,7 +953,7 @@ mwifiex_tdls_process_create_link(struct mwifiex_private *priv, u8 *peer)
 }
 
 static int
-mwifiex_tdls_process_disable_link(struct mwifiex_private *priv, u8 *peer)
+mwifiex_tdls_process_disable_link(struct mwifiex_private *priv, const u8 *peer)
 {
        struct mwifiex_sta_node *sta_ptr;
        struct mwifiex_ds_tdls_oper tdls_oper;
@@ -925,7 +982,7 @@ mwifiex_tdls_process_disable_link(struct mwifiex_private *priv, u8 *peer)
 }
 
 static int
-mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, u8 *peer)
+mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, const u8 *peer)
 {
        struct mwifiex_sta_node *sta_ptr;
        struct ieee80211_mcs_info mcs;
@@ -982,7 +1039,7 @@ mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, u8 *peer)
        return 0;
 }
 
-int mwifiex_tdls_oper(struct mwifiex_private *priv, u8 *peer, u8 action)
+int mwifiex_tdls_oper(struct mwifiex_private *priv, const u8 *peer, u8 action)
 {
        switch (action) {
        case MWIFIEX_TDLS_ENABLE_LINK:
@@ -997,7 +1054,7 @@ int mwifiex_tdls_oper(struct mwifiex_private *priv, u8 *peer, u8 action)
        return 0;
 }
 
-int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, u8 *mac)
+int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, const u8 *mac)
 {
        struct mwifiex_sta_node *sta_ptr;
 
index 9be6544bddedf9371e79fd468ebb0aa2dbcabe54..32643555dd2a32a302d1301427e463d877c8260a 100644 (file)
@@ -175,17 +175,19 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
                switch (GET_RXSTBC(cap_info)) {
                case MWIFIEX_RX_STBC1:
                        /* HT_CAP 1X1 mode */
-                       memset(&bss_cfg->ht_cap.mcs, 0xff, 1);
+                       bss_cfg->ht_cap.mcs.rx_mask[0] = 0xff;
                        break;
                case MWIFIEX_RX_STBC12: /* fall through */
                case MWIFIEX_RX_STBC123:
                        /* HT_CAP 2X2 mode */
-                       memset(&bss_cfg->ht_cap.mcs, 0xff, 2);
+                       bss_cfg->ht_cap.mcs.rx_mask[0] = 0xff;
+                       bss_cfg->ht_cap.mcs.rx_mask[1] = 0xff;
                        break;
                default:
                        dev_warn(priv->adapter->dev,
                                 "Unsupported RX-STBC, default to 2x2\n");
-                       memset(&bss_cfg->ht_cap.mcs, 0xff, 2);
+                       bss_cfg->ht_cap.mcs.rx_mask[0] = 0xff;
+                       bss_cfg->ht_cap.mcs.rx_mask[1] = 0xff;
                        break;
                }
                priv->ap_11n_enabled = 1;
index edbe4aff00d85b569534372ea34e7e017552b234..a8ce8130cfaeeda08a2a08f7b693540fe79d9f85 100644 (file)
@@ -22,9 +22,9 @@
 
 #define USB_VERSION    "1.0"
 
+static u8 user_rmmod;
 static struct mwifiex_if_ops usb_ops;
 static struct semaphore add_remove_card_sem;
-static struct usb_card_rec *usb_card;
 
 static struct usb_device_id mwifiex_usb_table[] = {
        /* 8797 */
@@ -532,28 +532,38 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
 static void mwifiex_usb_disconnect(struct usb_interface *intf)
 {
        struct usb_card_rec *card = usb_get_intfdata(intf);
+       struct mwifiex_adapter *adapter;
 
-       if (!card) {
-               pr_err("%s: card is NULL\n", __func__);
+       if (!card || !card->adapter) {
+               pr_err("%s: card or card->adapter is NULL\n", __func__);
                return;
        }
 
-       mwifiex_usb_free(card);
+       adapter = card->adapter;
+       if (!adapter->priv_num)
+               return;
 
-       if (card->adapter) {
-               struct mwifiex_adapter *adapter = card->adapter;
+       if (user_rmmod) {
+#ifdef CONFIG_PM
+               if (adapter->is_suspended)
+                       mwifiex_usb_resume(intf);
+#endif
 
-               if (!adapter->priv_num)
-                       return;
+               mwifiex_deauthenticate_all(adapter);
 
-               dev_dbg(adapter->dev, "%s: removing card\n", __func__);
-               mwifiex_remove_card(adapter, &add_remove_card_sem);
+               mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
+                                                         MWIFIEX_BSS_ROLE_ANY),
+                                        MWIFIEX_FUNC_SHUTDOWN);
        }
 
+       mwifiex_usb_free(card);
+
+       dev_dbg(adapter->dev, "%s: removing card\n", __func__);
+       mwifiex_remove_card(adapter, &add_remove_card_sem);
+
        usb_set_intfdata(intf, NULL);
        usb_put_dev(interface_to_usbdev(intf));
        kfree(card);
-       usb_card = NULL;
 
        return;
 }
@@ -565,6 +575,7 @@ static struct usb_driver mwifiex_usb_driver = {
        .id_table = mwifiex_usb_table,
        .suspend = mwifiex_usb_suspend,
        .resume = mwifiex_usb_resume,
+       .soft_unbind = 1,
 };
 
 static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
@@ -762,7 +773,6 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
 
        card->adapter = adapter;
        adapter->dev = &card->udev->dev;
-       usb_card = card;
 
        switch (le16_to_cpu(card->udev->descriptor.idProduct)) {
        case USB8897_PID_1:
@@ -1025,25 +1035,8 @@ static void mwifiex_usb_cleanup_module(void)
        if (!down_interruptible(&add_remove_card_sem))
                up(&add_remove_card_sem);
 
-       if (usb_card && usb_card->adapter) {
-               struct mwifiex_adapter *adapter = usb_card->adapter;
-
-               /* In case driver is removed when asynchronous FW downloading is
-                * in progress
-                */
-               wait_for_completion(&adapter->fw_load);
-
-#ifdef CONFIG_PM
-               if (adapter->is_suspended)
-                       mwifiex_usb_resume(usb_card->intf);
-#endif
-
-               mwifiex_deauthenticate_all(adapter);
-
-               mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
-                                                         MWIFIEX_BSS_ROLE_ANY),
-                                        MWIFIEX_FUNC_SHUTDOWN);
-       }
+       /* set the flag as user is removing this module */
+       user_rmmod = 1;
 
        usb_deregister(&mwifiex_usb_driver);
 }
index c3824e37f3f24a30d951510a75cd4640346620d8..6da5abf52e61a4360b236411b09310205e8316ea 100644 (file)
@@ -259,7 +259,7 @@ int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
  * NULL is returned if station entry is not found in associated STA list.
  */
 struct mwifiex_sta_node *
-mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac)
+mwifiex_get_sta_entry(struct mwifiex_private *priv, const u8 *mac)
 {
        struct mwifiex_sta_node *node;
 
@@ -280,7 +280,7 @@ mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac)
  * If received mac address is NULL, NULL is returned.
  */
 struct mwifiex_sta_node *
-mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac)
+mwifiex_add_sta_entry(struct mwifiex_private *priv, const u8 *mac)
 {
        struct mwifiex_sta_node *node;
        unsigned long flags;
@@ -332,7 +332,7 @@ mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
 }
 
 /* This function will delete a station entry from station list */
-void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac)
+void mwifiex_del_sta_entry(struct mwifiex_private *priv, const u8 *mac)
 {
        struct mwifiex_sta_node *node;
        unsigned long flags;
index 0a7cc742aed71e0fd31267305be7a26ec71b8b52..6d9738a5dc311b73e57b010d54fcaa140cd75ec2 100644 (file)
@@ -92,7 +92,7 @@ mwifiex_wmm_ac_debug_print(const struct ieee_types_wmm_ac_parameters *ac_param)
  * The function also initializes the list with the provided RA.
  */
 static struct mwifiex_ra_list_tbl *
-mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, u8 *ra)
+mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, const u8 *ra)
 {
        struct mwifiex_ra_list_tbl *ra_list;
 
@@ -139,8 +139,7 @@ static u8 mwifiex_get_random_ba_threshold(void)
  * This function allocates and adds a RA list for all TIDs
  * with the given RA.
  */
-void
-mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
+void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
 {
        int i;
        struct mwifiex_ra_list_tbl *ra_list;
@@ -426,15 +425,6 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
                                                        priv->tos_to_tid_inv[i];
                }
 
-               priv->aggr_prio_tbl[6].amsdu
-                                       = priv->aggr_prio_tbl[6].ampdu_ap
-                                       = priv->aggr_prio_tbl[6].ampdu_user
-                                       = BA_STREAM_NOT_ALLOWED;
-
-               priv->aggr_prio_tbl[7].amsdu = priv->aggr_prio_tbl[7].ampdu_ap
-                                       = priv->aggr_prio_tbl[7].ampdu_user
-                                       = BA_STREAM_NOT_ALLOWED;
-
                mwifiex_set_ba_params(priv);
                mwifiex_reset_11n_rx_seq_num(priv);
 
@@ -575,7 +565,7 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
  */
 static struct mwifiex_ra_list_tbl *
 mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
-                           u8 *ra_addr)
+                           const u8 *ra_addr)
 {
        struct mwifiex_ra_list_tbl *ra_list;
 
@@ -596,7 +586,8 @@ mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
  * retrieved.
  */
 struct mwifiex_ra_list_tbl *
-mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid, u8 *ra_addr)
+mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid,
+                           const u8 *ra_addr)
 {
        struct mwifiex_ra_list_tbl *ra_list;
 
@@ -657,7 +648,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
                if (ntohs(eth_hdr->h_proto) == ETH_P_TDLS)
                        dev_dbg(adapter->dev,
                                "TDLS setup packet for %pM. Don't block\n", ra);
-               else
+               else if (memcmp(priv->cfg_bssid, ra, ETH_ALEN))
                        tdls_status = mwifiex_get_tdls_link_status(priv, ra);
        }
 
index 83e42083ebff8cbc3b248f333111691e8a21fc1d..eca56e371a57bb5afb2df6165c24fbbc4d34e561 100644 (file)
@@ -99,7 +99,7 @@ mwifiex_wmm_is_ra_list_empty(struct list_head *ra_list_hhead)
 
 void mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
                                 struct sk_buff *skb);
-void mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra);
+void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra);
 void mwifiex_rotate_priolists(struct mwifiex_private *priv,
                              struct mwifiex_ra_list_tbl *ra, int tid);
 
@@ -123,7 +123,8 @@ void mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv);
 int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
                               const struct host_cmd_ds_command *resp);
 struct mwifiex_ra_list_tbl *
-mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid, u8 *ra_addr);
+mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid,
+                           const u8 *ra_addr);
 u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid);
 
 #endif /* !_MWIFIEX_WMM_H_ */
index 49300d04efdf0e7221a2b0f64f0475ad6f73ee59..e27e32851f1e50f8116c14c55f3bd2f405aee17f 100644 (file)
@@ -988,8 +988,8 @@ int __orinoco_hw_setup_enc(struct orinoco_private *priv)
  * tsc must be NULL or up to 8 bytes
  */
 int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
-                             int set_tx, u8 *key, u8 *rsc, size_t rsc_len,
-                             u8 *tsc, size_t tsc_len)
+                             int set_tx, const u8 *key, const u8 *rsc,
+                             size_t rsc_len, const u8 *tsc, size_t tsc_len)
 {
        struct {
                __le16 idx;
index 8f6831f4e328a8b5761611a6c79965ccdfa372ac..466d1ede76f16ee5436b680631e77047af2798af 100644 (file)
@@ -38,8 +38,8 @@ int __orinoco_hw_set_wap(struct orinoco_private *priv);
 int __orinoco_hw_setup_wepkeys(struct orinoco_private *priv);
 int __orinoco_hw_setup_enc(struct orinoco_private *priv);
 int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
-                             int set_tx, u8 *key, u8 *rsc, size_t rsc_len,
-                             u8 *tsc, size_t tsc_len);
+                             int set_tx, const u8 *key, const u8 *rsc,
+                             size_t rsc_len, const u8 *tsc, size_t tsc_len);
 int orinoco_clear_tkip_key(struct orinoco_private *priv, int key_idx);
 int __orinoco_hw_set_multicast_list(struct orinoco_private *priv,
                                    struct net_device *dev,
index f9805c9353d2295f076d1f5cbe1eaaa5a1f0be72..1cbb7835806f7c0d96315279c43dc34167bb180f 100644 (file)
@@ -1687,7 +1687,7 @@ static int ezusb_probe(struct usb_interface *interface,
                firmware.code = fw_entry->data;
        }
        if (firmware.size && firmware.code) {
-               if (ezusb_firmware_download(upriv, &firmware))
+               if (ezusb_firmware_download(upriv, &firmware) < 0)
                        goto error;
        } else {
                err("No firmware to download");
index b7a867b50b9476fb2c54ad9ccbb28815e6fa70e6..6abdaf0aa052253800697eb1631d100683ba2ec7 100644 (file)
@@ -52,9 +52,9 @@ static int orinoco_set_key(struct orinoco_private *priv, int index,
        priv->keys[index].seq_len = seq_len;
 
        if (key_len)
-               memcpy(priv->keys[index].key, key, key_len);
+               memcpy((void *)priv->keys[index].key, key, key_len);
        if (seq_len)
-               memcpy(priv->keys[index].seq, seq, seq_len);
+               memcpy((void *)priv->keys[index].seq, seq, seq_len);
 
        switch (alg) {
        case ORINOCO_ALG_TKIP:
index eede90b63f847934a8a0bc45e63695696d88a82d..7be3a4839640c6eda6b8254fd7b81bbe5d01fd33 100644 (file)
@@ -669,7 +669,8 @@ static unsigned int p54_flush_count(struct p54_common *priv)
        return total;
 }
 
-static void p54_flush(struct ieee80211_hw *dev, u32 queues, bool drop)
+static void p54_flush(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
+                     u32 queues, bool drop)
 {
        struct p54_common *priv = dev->priv;
        unsigned int total, i;
index cbf0a589d32af08c392271c7bbc3afa50f8e9c84..8330fa33e50b1e2f933f813ee187c407184780ae 100644 (file)
@@ -343,7 +343,7 @@ static void ray_detach(struct pcmcia_device *link)
        ray_release(link);
 
        local = netdev_priv(dev);
-       del_timer(&local->timer);
+       del_timer_sync(&local->timer);
 
        if (link->priv) {
                unregister_netdev(dev);
index 39d22a154341019fe8038bdfa1f7e1b4cb560cf2..d2a9a08210be1379b4e56ad266c1695ca6486d17 100644 (file)
@@ -517,7 +517,7 @@ static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
                                 u8 key_index, bool unicast, bool multicast);
 
 static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev,
-                                       u8 *mac, struct station_info *sinfo);
+                            const u8 *mac, struct station_info *sinfo);
 
 static int rndis_dump_station(struct wiphy *wiphy, struct net_device *dev,
                               int idx, u8 *mac, struct station_info *sinfo);
@@ -2490,7 +2490,7 @@ static void rndis_fill_station_info(struct usbnet *usbdev,
 }
 
 static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev,
-                                       u8 *mac, struct station_info *sinfo)
+                            const u8 *mac, struct station_info *sinfo)
 {
        struct rndis_wlan_private *priv = wiphy_priv(wiphy);
        struct usbnet *usbdev = priv->usbdev;
index 84164747ace057ee11be9bc25bf8b63bb30407b2..54aaeb09debf568c9dcad47ae3892153c2d827d6 100644 (file)
@@ -656,6 +656,7 @@ static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw,
        case IEEE80211_AMPDU_TX_START:
                common->vif_info[ii].seq_start = seq_no;
                ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+               status = 0;
                break;
 
        case IEEE80211_AMPDU_TX_STOP_CONT:
index 1b28cda6ca88124deff6c112a060f5af6692cefa..2eefbf159bc0d0abdcead9ff1caf6b867d95793f 100644 (file)
@@ -1083,7 +1083,7 @@ void rsi_inform_bss_status(struct rsi_common *common,
 {
        if (status) {
                rsi_hal_send_sta_notify_frame(common,
-                                             NL80211_IFTYPE_STATION,
+                                             RSI_IFTYPE_STATION,
                                              STA_CONNECTED,
                                              bssid,
                                              qos_enable,
@@ -1092,7 +1092,7 @@ void rsi_inform_bss_status(struct rsi_common *common,
                        rsi_send_auto_rate_request(common);
        } else {
                rsi_hal_send_sta_notify_frame(common,
-                                             NL80211_IFTYPE_STATION,
+                                             RSI_IFTYPE_STATION,
                                              STA_DISCONNECTED,
                                              bssid,
                                              qos_enable,
index ac67c4ad63c2d3177e3e70386ff27538b69b536f..225215a3b8bb484d76b47ed853afb3aeb6eb2130 100644 (file)
@@ -73,6 +73,7 @@
 #define RX_BA_INDICATION                1
 #define RSI_TBL_SZ                      40
 #define MAX_RETRIES                     8
+#define RSI_IFTYPE_STATION              0
 
 #define STD_RATE_MCS7                   0x07
 #define STD_RATE_MCS6                   0x06
index 41d4a8167dc32f368a8fdf061bea4fe9944fd0f1..c17fcf272728cb06ae25e95787003f6f59f52dba 100644 (file)
@@ -1005,10 +1005,9 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
                                   entry->skb->len + padding_len);
 
        /*
-        * Enable beaconing again.
+        * Restore beaconing state.
         */
-       rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
-       rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+       rt2800_register_write(rt2x00dev, BCN_TIME_CFG, orig_reg);
 
        /*
         * Clean up beacon skb.
@@ -1039,13 +1038,14 @@ static inline void rt2800_clear_beacon_register(struct rt2x00_dev *rt2x00dev,
 void rt2800_clear_beacon(struct queue_entry *entry)
 {
        struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
-       u32 reg;
+       u32 orig_reg, reg;
 
        /*
         * Disable beaconing while we are reloading the beacon data,
         * otherwise we might be sending out invalid data.
         */
-       rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+       rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &orig_reg);
+       reg = orig_reg;
        rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
        rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
 
@@ -1055,10 +1055,9 @@ void rt2800_clear_beacon(struct queue_entry *entry)
        rt2800_clear_beacon_register(rt2x00dev, entry->entry_idx);
 
        /*
-        * Enabled beaconing again.
+        * Restore beaconing state.
         */
-       rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
-       rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+       rt2800_register_write(rt2x00dev, BCN_TIME_CFG, orig_reg);
 }
 EXPORT_SYMBOL_GPL(rt2800_clear_beacon);
 
index e3b885d8f7dbfddda2f4ae71161b24edeefdc02c..010b76505243ed1cf15d1f176033cabd5ac23f3d 100644 (file)
@@ -1448,7 +1448,8 @@ int rt2x00mac_conf_tx(struct ieee80211_hw *hw,
                      struct ieee80211_vif *vif, u16 queue,
                      const struct ieee80211_tx_queue_params *params);
 void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw);
-void rt2x00mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop);
+void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                    u32 queues, bool drop);
 int rt2x00mac_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant);
 int rt2x00mac_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
 void rt2x00mac_get_ringparam(struct ieee80211_hw *hw,
index ddeb5a709aa36d6375e58597104f4cc3cb632d12..212ac4842c1628a0d141104188626d55c616c487 100644 (file)
@@ -620,21 +620,19 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
                rt2x00lib_config_intf(rt2x00dev, intf, vif->type, NULL,
                                      bss_conf->bssid);
 
-       /*
-        * Update the beacon. This is only required on USB devices. PCI
-        * devices fetch beacons periodically.
-        */
-       if (changes & BSS_CHANGED_BEACON && rt2x00_is_usb(rt2x00dev))
-               rt2x00queue_update_beacon(rt2x00dev, vif);
-
        /*
         * Start/stop beaconing.
         */
        if (changes & BSS_CHANGED_BEACON_ENABLED) {
                if (!bss_conf->enable_beacon && intf->enable_beacon) {
-                       rt2x00queue_clear_beacon(rt2x00dev, vif);
                        rt2x00dev->intf_beaconing--;
                        intf->enable_beacon = false;
+                       /*
+                        * Clear beacon in the H/W for this vif. This is needed
+                        * to disable beaconing on this particular interface
+                        * and keep it running on other interfaces.
+                        */
+                       rt2x00queue_clear_beacon(rt2x00dev, vif);
 
                        if (rt2x00dev->intf_beaconing == 0) {
                                /*
@@ -645,11 +643,15 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
                                rt2x00queue_stop_queue(rt2x00dev->bcn);
                                mutex_unlock(&intf->beacon_skb_mutex);
                        }
-
-
                } else if (bss_conf->enable_beacon && !intf->enable_beacon) {
                        rt2x00dev->intf_beaconing++;
                        intf->enable_beacon = true;
+                       /*
+                        * Upload beacon to the H/W. This is only required on
+                        * USB devices. PCI devices fetch beacons periodically.
+                        */
+                       if (rt2x00_is_usb(rt2x00dev))
+                               rt2x00queue_update_beacon(rt2x00dev, vif);
 
                        if (rt2x00dev->intf_beaconing == 1) {
                                /*
@@ -747,7 +749,8 @@ void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw)
 }
 EXPORT_SYMBOL_GPL(rt2x00mac_rfkill_poll);
 
-void rt2x00mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                    u32 queues, bool drop)
 {
        struct rt2x00_dev *rt2x00dev = hw->priv;
        struct data_queue *queue;
index 10572452cc21b76cc8fe4457298ea06bc3c875be..86c43d112a4b7d4f25bfbf123b7cbbfb98bd75b9 100644 (file)
@@ -68,6 +68,12 @@ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
                }
        }
 
+       /* If the port is powered down, we get a -EPROTO error, and this
+        * leads to a endless loop. So just say that the device is gone.
+        */
+       if (status == -EPROTO)
+               clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
+
        rt2x00_err(rt2x00dev,
                   "Vendor Request 0x%02x failed for offset 0x%04x with error %d\n",
                   request, offset, status);
index 24402984ee5749f272609d82907cda4a68f750f6..9048a9cbe52cb929cfbd60797a00baa40c5d6583 100644 (file)
@@ -2031,13 +2031,14 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
 static void rt61pci_clear_beacon(struct queue_entry *entry)
 {
        struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
-       u32 reg;
+       u32 orig_reg, reg;
 
        /*
         * Disable beaconing while we are reloading the beacon data,
         * otherwise we might be sending out invalid data.
         */
-       rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9, &reg);
+       rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9, &orig_reg);
+       reg = orig_reg;
        rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
        rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg);
 
@@ -2048,10 +2049,9 @@ static void rt61pci_clear_beacon(struct queue_entry *entry)
                                  HW_BEACON_OFFSET(entry->entry_idx), 0);
 
        /*
-        * Enable beaconing again.
+        * Restore global beaconing state.
         */
-       rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
-       rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg);
+       rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, orig_reg);
 }
 
 /*
index a140170b1eb3e63625ecde7b4cc43ec6bf1b87b1..95724ff9c7268700628866f433b66e82a8a9f7c4 100644 (file)
@@ -1597,13 +1597,14 @@ static void rt73usb_clear_beacon(struct queue_entry *entry)
 {
        struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
        unsigned int beacon_base;
-       u32 reg;
+       u32 orig_reg, reg;
 
        /*
         * Disable beaconing while we are reloading the beacon data,
         * otherwise we might be sending out invalid data.
         */
-       rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
+       rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &orig_reg);
+       reg = orig_reg;
        rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
        rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
 
@@ -1614,10 +1615,9 @@ static void rt73usb_clear_beacon(struct queue_entry *entry)
        rt2x00usb_register_write(rt2x00dev, beacon_base, 0);
 
        /*
-        * Enable beaconing again.
+        * Restore beaconing state.
         */
-       rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
-       rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
+       rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, orig_reg);
 }
 
 static int rt73usb_get_tx_data_len(struct queue_entry *entry)
index 08b056db4a3b795282d1d9e878ab400d7090bcf2..21005bd8b43c973da6ebd24556da848032727bcf 100644 (file)
@@ -1,5 +1,5 @@
-rtl8180-objs           := dev.o rtl8225.o sa2400.o max2820.o grf5101.o rtl8225se.o
+rtl818x_pci-objs       := dev.o rtl8225.o sa2400.o max2820.o grf5101.o rtl8225se.o
 
-obj-$(CONFIG_RTL8180)  += rtl8180.o
+obj-$(CONFIG_RTL8180)  += rtl818x_pci.o
 
 ccflags-y += -Idrivers/net/wireless/rtl818x
index 98d8256f037788a4d9af76c02a4e939758a08e0e..2c1c02bafa10bbfe1f198279400a84ad08256bb4 100644 (file)
@@ -284,6 +284,8 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
                        rx_status.band = dev->conf.chandef.chan->band;
                        rx_status.mactime = tsft;
                        rx_status.flag |= RX_FLAG_MACTIME_START;
+                       if (flags & RTL818X_RX_DESC_FLAG_SPLCP)
+                               rx_status.flag |= RX_FLAG_SHORTPRE;
                        if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
                                rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
 
@@ -461,18 +463,23 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
                            RTL818X_TX_DESC_FLAG_NO_ENC;
 
        rc_flags = info->control.rates[0].flags;
+
+       /* HW will perform RTS-CTS when only RTS flags is set.
+        * HW will perform CTS-to-self when both RTS and CTS flags are set.
+        * RTS rate and RTS duration will be used also for CTS-to-self.
+        */
        if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
                tx_flags |= RTL818X_TX_DESC_FLAG_RTS;
                tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
+               rts_duration = ieee80211_rts_duration(dev, priv->vif,
+                                               skb->len, info);
        } else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
-               tx_flags |= RTL818X_TX_DESC_FLAG_CTS;
+               tx_flags |= RTL818X_TX_DESC_FLAG_RTS | RTL818X_TX_DESC_FLAG_CTS;
                tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
+               rts_duration = ieee80211_ctstoself_duration(dev, priv->vif,
+                                               skb->len, info);
        }
 
-       if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS)
-               rts_duration = ieee80211_rts_duration(dev, priv->vif, skb->len,
-                                                     info);
-
        if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180) {
                unsigned int remainder;
 
@@ -683,9 +690,8 @@ static void rtl8180_int_enable(struct ieee80211_hw *dev)
        struct rtl8180_priv *priv = dev->priv;
 
        if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
-               rtl818x_iowrite32(priv, &priv->map->IMR, IMR_TMGDOK |
-                         IMR_TBDER | IMR_THPDER |
-                         IMR_THPDER | IMR_THPDOK |
+               rtl818x_iowrite32(priv, &priv->map->IMR,
+                         IMR_TBDER | IMR_TBDOK |
                          IMR_TVODER | IMR_TVODOK |
                          IMR_TVIDER | IMR_TVIDOK |
                          IMR_TBEDER | IMR_TBEDOK |
@@ -911,7 +917,10 @@ static int rtl8180_init_hw(struct ieee80211_hw *dev)
                reg32 &= 0x00ffff00;
                reg32 |= 0xb8000054;
                rtl818x_iowrite32(priv, &priv->map->RF_PARA, reg32);
-       }
+       } else
+               /* stop unused queus (no dma alloc) */
+               rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING,
+                           (1<<1) | (1<<2));
 
        priv->rf->init(dev);
 
index 0ca17cda48fa1c01b3a8dd2ed98c5fe0c5646640..629ad8cfa17b59bf4b5f8b11202602e34c9cce33 100644 (file)
@@ -253,14 +253,21 @@ static void rtl8187_tx(struct ieee80211_hw *dev,
        flags |= ieee80211_get_tx_rate(dev, info)->hw_value << 24;
        if (ieee80211_has_morefrags(tx_hdr->frame_control))
                flags |= RTL818X_TX_DESC_FLAG_MOREFRAG;
+
+       /* HW will perform RTS-CTS when only RTS flags is set.
+        * HW will perform CTS-to-self when both RTS and CTS flags are set.
+        * RTS rate and RTS duration will be used also for CTS-to-self.
+        */
        if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
                flags |= RTL818X_TX_DESC_FLAG_RTS;
                flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
                rts_dur = ieee80211_rts_duration(dev, priv->vif,
                                                 skb->len, info);
        } else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
-               flags |= RTL818X_TX_DESC_FLAG_CTS;
+               flags |= RTL818X_TX_DESC_FLAG_RTS | RTL818X_TX_DESC_FLAG_CTS;
                flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
+               rts_dur = ieee80211_ctstoself_duration(dev, priv->vif,
+                                                skb->len, info);
        }
 
        if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
@@ -381,6 +388,8 @@ static void rtl8187_rx_cb(struct urb *urb)
        rx_status.freq = dev->conf.chandef.chan->center_freq;
        rx_status.band = dev->conf.chandef.chan->band;
        rx_status.flag |= RX_FLAG_MACTIME_START;
+       if (flags & RTL818X_RX_DESC_FLAG_SPLCP)
+               rx_status.flag |= RX_FLAG_SHORTPRE;
        if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
                rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
        memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
index 45ea4e1c4abe157ad952be2d8a5022efc1e1715a..7abef95d278bc61336d2829f40520afc1af81a1a 100644 (file)
@@ -334,9 +334,9 @@ struct rtl818x_csr {
  * I don't like to introduce a ton of "reserved"..
  * They are for RTL8187SE
  */
-#define REG_ADDR1(addr)        ((u8 __iomem *)priv->map + addr)
-#define REG_ADDR2(addr)        ((__le16 __iomem *)priv->map + (addr >> 1))
-#define REG_ADDR4(addr)        ((__le32 __iomem *)priv->map + (addr >> 2))
+#define REG_ADDR1(addr)        ((u8 __iomem *)priv->map + (addr))
+#define REG_ADDR2(addr)        ((__le16 __iomem *)priv->map + ((addr) >> 1))
+#define REG_ADDR4(addr)        ((__le32 __iomem *)priv->map + ((addr) >> 2))
 
 #define FEMR_SE                REG_ADDR2(0x1D4)
 #define ARFR           REG_ADDR2(0x1E0)
index 4ec424f26672028550ab8b19b944d451766ee08c..b1ed6d0796f67e187fb928423edda6977c91f863 100644 (file)
@@ -1387,7 +1387,8 @@ static void rtl_op_rfkill_poll(struct ieee80211_hw *hw)
  * before switch channel or power save, or tx buffer packet
  * maybe send after offchannel or rf sleep, this may cause
  * dis-association by AP */
-static void rtl_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void rtl_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                        u32 queues, bool drop)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
 
index 94cd9df98381008e53f6ecb60cbcb7f56a6aef1b..b14cf5a10f4421127e8f6ce414eee52054129163 100644 (file)
@@ -2515,23 +2515,3 @@ void rtl88ee_suspend(struct ieee80211_hw *hw)
 void rtl88ee_resume(struct ieee80211_hw *hw)
 {
 }
-
-/* Turn on AAP (RCR:bit 0) for promicuous mode. */
-void rtl88ee_allow_all_destaddr(struct ieee80211_hw *hw,
-                               bool allow_all_da, bool write_into_reg)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-
-       if (allow_all_da) /* Set BIT0 */
-               rtlpci->receive_config |= RCR_AAP;
-        else /* Clear BIT0 */
-               rtlpci->receive_config &= ~RCR_AAP;
-
-       if (write_into_reg)
-               rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
-
-       RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
-                "receive_config = 0x%08X, write_into_reg =%d\n",
-                rtlpci->receive_config, write_into_reg);
-}
index b4460a41bd0159cac18b1161e489803c98afe244..1850fde881b587c6bbb15a630a58dfaaa16ece07 100644 (file)
@@ -61,8 +61,6 @@ void rtl8188ee_bt_reg_init(struct ieee80211_hw *hw);
 void rtl8188ee_bt_hw_init(struct ieee80211_hw *hw);
 void rtl88ee_suspend(struct ieee80211_hw *hw);
 void rtl88ee_resume(struct ieee80211_hw *hw);
-void rtl88ee_allow_all_destaddr(struct ieee80211_hw *hw,
-                               bool allow_all_da, bool write_into_reg);
 void rtl88ee_fw_clk_off_timer_callback(unsigned long data);
 
 #endif
index 1b4101bf9974e8243124f70f40e295e841a3f0a1..842d69349a37ca0812731beb9d4b6effe2173e39 100644 (file)
@@ -93,7 +93,7 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
        u8 tid;
 
        rtl8188ee_bt_reg_init(hw);
-       rtlpci->msi_support = true;
+       rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
 
        rtlpriv->dm.dm_initialgain_enable = 1;
        rtlpriv->dm.dm_flag = 0;
@@ -255,7 +255,6 @@ static struct rtl_hal_ops rtl8188ee_hal_ops = {
        .enable_hw_sec = rtl88ee_enable_hw_security_config,
        .set_key = rtl88ee_set_key,
        .init_sw_leds = rtl88ee_init_sw_leds,
-       .allow_all_destaddr = rtl88ee_allow_all_destaddr,
        .get_bbreg = rtl88e_phy_query_bb_reg,
        .set_bbreg = rtl88e_phy_set_bb_reg,
        .get_rfreg = rtl88e_phy_query_rf_reg,
@@ -267,6 +266,7 @@ static struct rtl_mod_params rtl88ee_mod_params = {
        .inactiveps = true,
        .swctrl_lps = false,
        .fwctrl_lps = true,
+       .msi_support = false,
        .debug = DBG_EMERG,
 };
 
@@ -383,10 +383,12 @@ module_param_named(debug, rtl88ee_mod_params.debug, int, 0444);
 module_param_named(ips, rtl88ee_mod_params.inactiveps, bool, 0444);
 module_param_named(swlps, rtl88ee_mod_params.swctrl_lps, bool, 0444);
 module_param_named(fwlps, rtl88ee_mod_params.fwctrl_lps, bool, 0444);
+module_param_named(msi, rtl88ee_mod_params.msi_support, bool, 0444);
 MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
 MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
 MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
 MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
+MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n");
 MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
 
 static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
index 06ef47cd62038cc9695078441d545be0ca1b9348..5b4c225396f244cea599bade9755951daa7d15ec 100644 (file)
@@ -293,7 +293,7 @@ static void _rtl88ee_translate_rx_signal_stuff(struct ieee80211_hw *hw,
        u8 *psaddr;
        __le16 fc;
        u16 type, ufc;
-       bool match_bssid, packet_toself, packet_beacon, addr;
+       bool match_bssid, packet_toself, packet_beacon = false, addr;
 
        tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift;
 
index 55adf043aef7e250759d9c993b224217ad974d37..cdecb0fd4d8edb531c34cb929b8a13f568f40eed 100644 (file)
@@ -2423,24 +2423,3 @@ void rtl92ce_suspend(struct ieee80211_hw *hw)
 void rtl92ce_resume(struct ieee80211_hw *hw)
 {
 }
-
-/* Turn on AAP (RCR:bit 0) for promicuous mode. */
-void rtl92ce_allow_all_destaddr(struct ieee80211_hw *hw,
-       bool allow_all_da, bool write_into_reg)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-
-       if (allow_all_da) {/* Set BIT0 */
-               rtlpci->receive_config |= RCR_AAP;
-       } else {/* Clear BIT0 */
-               rtlpci->receive_config &= ~RCR_AAP;
-       }
-
-       if (write_into_reg)
-               rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
-
-       RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
-                "receive_config=0x%08X, write_into_reg=%d\n",
-                rtlpci->receive_config, write_into_reg);
-}
index 2d063b0c77609a608adc8d831bde0d946263a7a1..5533070f266c4c0706b4d83c2477d4c8421f17eb 100644 (file)
@@ -76,7 +76,5 @@ void rtl8192ce_bt_reg_init(struct ieee80211_hw *hw);
 void rtl8192ce_bt_hw_init(struct ieee80211_hw *hw);
 void rtl92ce_suspend(struct ieee80211_hw *hw);
 void rtl92ce_resume(struct ieee80211_hw *hw);
-void rtl92ce_allow_all_destaddr(struct ieee80211_hw *hw,
-                               bool allow_all_da, bool write_into_reg);
 
 #endif
index b790320d20305427c5ed4fa5fd9f37d944957e31..12f21f4073e887c497d4e7efda968d9217515936 100644 (file)
@@ -229,7 +229,6 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = {
        .enable_hw_sec = rtl92ce_enable_hw_security_config,
        .set_key = rtl92ce_set_key,
        .init_sw_leds = rtl92ce_init_sw_leds,
-       .allow_all_destaddr = rtl92ce_allow_all_destaddr,
        .get_bbreg = rtl92c_phy_query_bb_reg,
        .set_bbreg = rtl92c_phy_set_bb_reg,
        .set_rfreg = rtl92ce_phy_set_rf_reg,
index 68b5c7e92cfbc2c6a76580a2d1dbb3297730c64f..a903c2671b4d1701c0c71416748b5d4fa8ec6bf9 100644 (file)
@@ -511,7 +511,7 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw)
                        pr_info("MAC auto ON okay!\n");
                        break;
                }
-               if (pollingCount++ > 100) {
+               if (pollingCount++ > 1000) {
                        RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
                                 "Failed to polling REG_APS_FSMCO[APFM_ONMAC] done!\n");
                        return -ENODEV;
@@ -1001,7 +1001,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
        err = _rtl92cu_init_mac(hw);
        if (err) {
                RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "init mac failed!\n");
-               return err;
+               goto exit;
        }
        err = rtl92c_download_fw(hw);
        if (err) {
index 9098558d916dee6ae6daf3567eb91291ad234b72..1c7101bcd79034c486be9713d99c19c335478cb6 100644 (file)
@@ -2544,23 +2544,3 @@ void rtl92se_resume(struct ieee80211_hw *hw)
                pci_write_config_dword(rtlpci->pdev, 0x40,
                        val & 0xffff00ff);
 }
-
-/* Turn on AAP (RCR:bit 0) for promicuous mode. */
-void rtl92se_allow_all_destaddr(struct ieee80211_hw *hw,
-                               bool allow_all_da, bool write_into_reg)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-
-       if (allow_all_da) /* Set BIT0 */
-               rtlpci->receive_config |= RCR_AAP;
-       else /* Clear BIT0 */
-               rtlpci->receive_config &= ~RCR_AAP;
-
-       if (write_into_reg)
-               rtl_write_dword(rtlpriv, RCR, rtlpci->receive_config);
-
-       RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
-                "receive_config=0x%08X, write_into_reg=%d\n",
-                rtlpci->receive_config, write_into_reg);
-}
index da48aa8cbe6f8cb8e97ca2943e643217bfaea170..4cacee10f31eb81dd3a34bf8c2cbfd4cab22d60b 100644 (file)
@@ -74,7 +74,5 @@ void rtl92se_set_key(struct ieee80211_hw *hw,
                     u8 enc_algo, bool is_wepkey, bool clear_all);
 void rtl92se_suspend(struct ieee80211_hw *hw);
 void rtl92se_resume(struct ieee80211_hw *hw);
-void rtl92se_allow_all_destaddr(struct ieee80211_hw *hw,
-                               bool allow_all_da, bool write_into_reg);
 
 #endif
index 2e8e6f8d2d513e18a77a3710542656c2e1953ec5..1bff2a0f760087d0a510e247f01d1c6a33eee891 100644 (file)
@@ -290,7 +290,6 @@ static struct rtl_hal_ops rtl8192se_hal_ops = {
        .enable_hw_sec = rtl92se_enable_hw_security_config,
        .set_key = rtl92se_set_key,
        .init_sw_leds = rtl92se_init_sw_leds,
-       .allow_all_destaddr = rtl92se_allow_all_destaddr,
        .get_bbreg = rtl92s_phy_query_bb_reg,
        .set_bbreg = rtl92s_phy_set_bb_reg,
        .get_rfreg = rtl92s_phy_query_rf_reg,
index 36b48be8329c08dad5474f43600f2b11d8fcf279..2b3c78baa9f8b3742020aa377b0449785eeb8ab7 100644 (file)
@@ -49,6 +49,12 @@ static u8 _rtl92se_map_hwqueue_to_fwqueue(struct sk_buff *skb,       u8 skb_queue)
        if (ieee80211_is_nullfunc(fc))
                return QSLT_HIGH;
 
+       /* Kernel commit 1bf4bbb4024dcdab changed EAPOL packets to use
+        * queue V0 at priority 7; however, the RTL8192SE appears to have
+        * that queue at priority 6
+        */
+       if (skb->priority == 7)
+               return QSLT_VO;
        return skb->priority;
 }
 
index 65c9e80e1f78ad23988bb962732396153bf1f137..87f69166a7eda86b2517feb6237da073c0ebe9da 100644 (file)
@@ -2383,24 +2383,3 @@ void rtl8723ae_suspend(struct ieee80211_hw *hw)
 void rtl8723ae_resume(struct ieee80211_hw *hw)
 {
 }
-
-/* Turn on AAP (RCR:bit 0) for promicuous mode. */
-void rtl8723ae_allow_all_destaddr(struct ieee80211_hw *hw,
-       bool allow_all_da, bool write_into_reg)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-
-       if (allow_all_da) /* Set BIT0 */
-               rtlpci->receive_config |= RCR_AAP;
-       else /* Clear BIT0 */
-               rtlpci->receive_config &= ~RCR_AAP;
-
-       if (write_into_reg)
-               rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
-
-
-       RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
-                "receive_config=0x%08X, write_into_reg=%d\n",
-                rtlpci->receive_config, write_into_reg);
-}
index 6fa24f79b1d73551d9b40f4b355d5d1590636b68..d3bc39fb27a559dd0b527ebd5a377bb29a394dfe 100644 (file)
@@ -67,7 +67,5 @@ void rtl8723ae_bt_reg_init(struct ieee80211_hw *hw);
 void rtl8723ae_bt_hw_init(struct ieee80211_hw *hw);
 void rtl8723ae_suspend(struct ieee80211_hw *hw);
 void rtl8723ae_resume(struct ieee80211_hw *hw);
-void rtl8723ae_allow_all_destaddr(struct ieee80211_hw *hw,
-                                 bool allow_all_da, bool write_into_reg);
 
 #endif
index 1087a3bd07fa2dbd51f9983540ac44351374c58d..73cba1eec8cf9ce331afde11c69e6fdc7f75e59d 100644 (file)
@@ -238,7 +238,6 @@ static struct rtl_hal_ops rtl8723ae_hal_ops = {
        .enable_hw_sec = rtl8723ae_enable_hw_security_config,
        .set_key = rtl8723ae_set_key,
        .init_sw_leds = rtl8723ae_init_sw_leds,
-       .allow_all_destaddr = rtl8723ae_allow_all_destaddr,
        .get_bbreg = rtl8723_phy_query_bb_reg,
        .set_bbreg = rtl8723_phy_set_bb_reg,
        .get_rfreg = rtl8723ae_phy_query_rf_reg,
index 0fdf0909321f234c827107a13fd0a839c7c86be8..3d555495b45319b8d287d9edd5e1bc1c6162e625 100644 (file)
@@ -2501,23 +2501,3 @@ void rtl8723be_suspend(struct ieee80211_hw *hw)
 void rtl8723be_resume(struct ieee80211_hw *hw)
 {
 }
-
-/* Turn on AAP (RCR:bit 0) for promicuous mode. */
-void rtl8723be_allow_all_destaddr(struct ieee80211_hw *hw, bool allow_all_da,
-                                 bool write_into_reg)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-
-       if (allow_all_da) /* Set BIT0 */
-               rtlpci->receive_config |= RCR_AAP;
-       else /* Clear BIT0 */
-               rtlpci->receive_config &= ~RCR_AAP;
-
-       if (write_into_reg)
-               rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
-
-       RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
-                "receive_config = 0x%08X, write_into_reg =%d\n",
-                rtlpci->receive_config, write_into_reg);
-}
index b7449a9b57e47652489b18df83ab5a4502e40d0e..64c7551af6b797969e98086e600d2ece0240c9f4 100644 (file)
@@ -59,6 +59,4 @@ void rtl8723be_bt_reg_init(struct ieee80211_hw *hw);
 void rtl8723be_bt_hw_init(struct ieee80211_hw *hw);
 void rtl8723be_suspend(struct ieee80211_hw *hw);
 void rtl8723be_resume(struct ieee80211_hw *hw);
-void rtl8723be_allow_all_destaddr(struct ieee80211_hw *hw, bool allow_all_da,
-                                 bool write_into_reg);
 #endif
index b4577ebc4bb0bb5c16d068039cddc7f3fe41b744..ff12bf41644bbfd3909bb6506f825f1d0d7e81fa 100644 (file)
@@ -92,7 +92,7 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 
        rtl8723be_bt_reg_init(hw);
-       rtlpci->msi_support = true;
+       rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
        rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
 
        rtlpriv->dm.dm_initialgain_enable = 1;
@@ -253,6 +253,7 @@ static struct rtl_mod_params rtl8723be_mod_params = {
        .inactiveps = true,
        .swctrl_lps = false,
        .fwctrl_lps = true,
+       .msi_support = false,
        .debug = DBG_EMERG,
 };
 
@@ -365,9 +366,11 @@ module_param_named(debug, rtl8723be_mod_params.debug, int, 0444);
 module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444);
 module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444);
 module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
+module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444);
 MODULE_PARM_DESC(swenc, "using hardware crypto (default 0 [hardware])\n");
 MODULE_PARM_DESC(ips, "using no link power save (default 1 is open)\n");
 MODULE_PARM_DESC(fwlps, "using linked fw control power save (default 1 is open)\n");
+MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n");
 MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
 
 static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
index 6965afdf572a9d57c06fabd8b070fbc8cc58b9ce..407a7936d3642f39fb4fb0364ab0568e20f75153 100644 (file)
@@ -1960,8 +1960,6 @@ struct rtl_hal_ops {
                          u32 regaddr, u32 bitmask);
        void (*set_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath,
                           u32 regaddr, u32 bitmask, u32 data);
-       void (*allow_all_destaddr)(struct ieee80211_hw *hw,
-               bool allow_all_da, bool write_into_reg);
        void (*linked_set_reg) (struct ieee80211_hw *hw);
        void (*chk_switch_dmdp) (struct ieee80211_hw *hw);
        void (*dualmac_easy_concurrent) (struct ieee80211_hw *hw);
@@ -2030,6 +2028,10 @@ struct rtl_mod_params {
 
        /* default: 1 = using linked fw power save */
        bool fwctrl_lps;
+
+       /* default: 0 = not using MSI interrupts mode */
+       /* submodules should set their own defalut value */
+       bool msi_support;
 };
 
 struct rtl_hal_usbint_cfg {
index 5a4ec56c83d0aa15e7e00226a6cfe69c9146696b..5695628757ee17db6c4da9ba40d81000f9fa57b5 100644 (file)
@@ -2,7 +2,6 @@
 
 #include <linux/module.h>
 #include <linux/slab.h>
-#include <linux/crc7.h>
 
 #include "wl1251.h"
 #include "reg.h"
index bf1fa18b9786253159d634ec1f846f70d8551bc8..ede31f048ef98d28b96756f912edbdd2b4df8eb5 100644 (file)
@@ -2,7 +2,6 @@
 
 #include <linux/module.h>
 #include <linux/slab.h>
-#include <linux/crc7.h>
 #include <linux/etherdevice.h>
 
 #include "wl1251.h"
index db0105313745f08a02c9d408564c5d8029aa05ca..c98630394a1a299b77b907ce53851b62b7d84030 100644 (file)
@@ -124,11 +124,12 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
                        return ret;
        }
 
-       if (wl->vif && vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID) {
+       if (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID) {
                wl1251_debug(DEBUG_EVENT, "SYNCHRONIZATION_TIMEOUT_EVENT");
 
                /* indicate to the stack, that beacons have been lost */
-               ieee80211_beacon_loss(wl->vif);
+               if (wl->vif && wl->vif->type == NL80211_IFTYPE_STATION)
+                       ieee80211_beacon_loss(wl->vif);
        }
 
        if (vector & REGAINED_BSS_EVENT_ID) {
index 757e25784a8a22f503b3483c0b55cedca5a05d3f..4e782f18ae3431600a66923216faa536d42c46b6 100644 (file)
@@ -550,6 +550,34 @@ static void wl1251_op_remove_interface(struct ieee80211_hw *hw,
        mutex_unlock(&wl->mutex);
 }
 
+static int wl1251_build_null_data(struct wl1251 *wl)
+{
+       struct sk_buff *skb = NULL;
+       int size;
+       void *ptr;
+       int ret = -ENOMEM;
+
+       if (wl->bss_type == BSS_TYPE_IBSS) {
+               size = sizeof(struct wl12xx_null_data_template);
+               ptr = NULL;
+       } else {
+               skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+               if (!skb)
+                       goto out;
+               size = skb->len;
+               ptr = skb->data;
+       }
+
+       ret = wl1251_cmd_template_set(wl, CMD_NULL_DATA, ptr, size);
+
+out:
+       dev_kfree_skb(skb);
+       if (ret)
+               wl1251_warning("cmd buld null data failed: %d", ret);
+
+       return ret;
+}
+
 static int wl1251_build_qos_null_data(struct wl1251 *wl)
 {
        struct ieee80211_qos_hdr template;
@@ -687,16 +715,6 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
                wl->power_level = conf->power_level;
        }
 
-       /*
-        * Tell stack that connection is lost because hw encryption isn't
-        * supported in monitor mode.
-        * This requires temporary enabling of the hw connection monitor flag
-        */
-       if ((changed & IEEE80211_CONF_CHANGE_MONITOR) && wl->vif) {
-               wl->hw->flags |= IEEE80211_HW_CONNECTION_MONITOR;
-               ieee80211_connection_loss(wl->vif);
-       }
-
 out_sleep:
        wl1251_ps_elp_sleep(wl);
 
@@ -1103,24 +1121,19 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
                wl->rssi_thold = bss_conf->cqm_rssi_thold;
        }
 
-       if (changed & BSS_CHANGED_BSSID) {
+       if ((changed & BSS_CHANGED_BSSID) &&
+           memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
                memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
 
-               skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
-               if (!skb)
-                       goto out_sleep;
-
-               ret = wl1251_cmd_template_set(wl, CMD_NULL_DATA,
-                                             skb->data, skb->len);
-               dev_kfree_skb(skb);
-               if (ret < 0)
-                       goto out_sleep;
+               if (!is_zero_ether_addr(wl->bssid)) {
+                       ret = wl1251_build_null_data(wl);
+                       if (ret < 0)
+                               goto out_sleep;
 
-               ret = wl1251_build_qos_null_data(wl);
-               if (ret < 0)
-                       goto out;
+                       ret = wl1251_build_qos_null_data(wl);
+                       if (ret < 0)
+                               goto out_sleep;
 
-               if (wl->bss_type != BSS_TYPE_IBSS) {
                        ret = wl1251_join(wl, wl->bss_type, wl->channel,
                                          wl->beacon_int, wl->dtim_period);
                        if (ret < 0)
@@ -1129,9 +1142,6 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
        }
 
        if (changed & BSS_CHANGED_ASSOC) {
-               /* Disable temporary enabled hw connection monitor flag */
-               wl->hw->flags &= ~IEEE80211_HW_CONNECTION_MONITOR;
-
                if (bss_conf->assoc) {
                        wl->beacon_int = bss_conf->beacon_int;
 
@@ -1216,8 +1226,8 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
                if (ret < 0)
                        goto out_sleep;
 
-               ret = wl1251_join(wl, wl->bss_type, wl->beacon_int,
-                                 wl->channel, wl->dtim_period);
+               ret = wl1251_join(wl, wl->bss_type, wl->channel,
+                                 wl->beacon_int, wl->dtim_period);
 
                if (ret < 0)
                        goto out_sleep;
index b06d36d99362703c1b7aa35e6aadd5cab29e0d8f..a0aa8fa72392830f65f8c8b52303be5ad44019b9 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/irq.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/swab.h>
 #include <linux/crc7.h>
 #include <linux/spi/spi.h>
 #include <linux/wl12xx.h>
@@ -83,47 +84,44 @@ static void wl1251_spi_reset(struct wl1251 *wl)
 
 static void wl1251_spi_wake(struct wl1251 *wl)
 {
-       u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd;
        struct spi_transfer t;
        struct spi_message m;
+       u8 *cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
 
-       cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
        if (!cmd) {
                wl1251_error("could not allocate cmd for spi init");
                return;
        }
 
-       memset(crc, 0, sizeof(crc));
        memset(&t, 0, sizeof(t));
        spi_message_init(&m);
 
        /* Set WSPI_INIT_COMMAND
         * the data is being send from the MSB to LSB
         */
-       cmd[2] = 0xff;
-       cmd[3] = 0xff;
-       cmd[1] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX;
-       cmd[0] = 0;
-       cmd[7] = 0;
-       cmd[6] |= HW_ACCESS_WSPI_INIT_CMD_MASK << 3;
-       cmd[6] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN;
+       cmd[0] = 0xff;
+       cmd[1] = 0xff;
+       cmd[2] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX;
+       cmd[3] = 0;
+       cmd[4] = 0;
+       cmd[5] = HW_ACCESS_WSPI_INIT_CMD_MASK << 3;
+       cmd[5] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN;
+
+       cmd[6] = WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS
+               | WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS;
 
        if (HW_ACCESS_WSPI_FIXED_BUSY_LEN == 0)
-               cmd[5] |=  WSPI_INIT_CMD_DIS_FIXEDBUSY;
+               cmd[6] |= WSPI_INIT_CMD_DIS_FIXEDBUSY;
        else
-               cmd[5] |= WSPI_INIT_CMD_EN_FIXEDBUSY;
-
-       cmd[5] |= WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS
-               | WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS;
-
-       crc[0] = cmd[1];
-       crc[1] = cmd[0];
-       crc[2] = cmd[7];
-       crc[3] = cmd[6];
-       crc[4] = cmd[5];
+               cmd[6] |= WSPI_INIT_CMD_EN_FIXEDBUSY;
 
-       cmd[4] |= crc7(0, crc, WSPI_INIT_CMD_CRC_LEN) << 1;
-       cmd[4] |= WSPI_INIT_CMD_END;
+       cmd[7] = crc7_be(0, cmd+2, WSPI_INIT_CMD_CRC_LEN) | WSPI_INIT_CMD_END;
+       /*
+        * The above is the logical order; it must actually be stored
+        * in the buffer byte-swapped.
+        */
+       __swab32s((u32 *)cmd);
+       __swab32s((u32 *)cmd+1);
 
        t.tx_buf = cmd;
        t.len = WSPI_INIT_CMD_LEN;
index f7381dd69009a150e1901a876494d225e0267f5e..0f2cfb0d2a9ec38fe013872e6d4339c2db1345e3 100644 (file)
@@ -57,7 +57,7 @@ static const struct file_operations name## _ops = {                   \
                                            wl, &name## _ops);          \
                if (!entry || IS_ERR(entry))                            \
                        goto err;                                       \
-       } while (0);
+       } while (0)
 
 
 #define DEBUGFS_ADD_PREFIX(prefix, name, parent)                       \
@@ -66,7 +66,7 @@ static const struct file_operations name## _ops = {                   \
                                    wl, &prefix## _## name## _ops);     \
                if (!entry || IS_ERR(entry))                            \
                        goto err;                                       \
-       } while (0);
+       } while (0)
 
 #define DEBUGFS_FWSTATS_FILE(sub, name, fmt, struct_type)              \
 static ssize_t sub## _ ##name## _read(struct file *file,               \
index ed88d39134839e34510d83252949dbbb2a964e83..02c91d6db753bdadaa9d5ed3953dff82a7a85065 100644 (file)
@@ -1416,7 +1416,7 @@ void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
 
 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
                                 u16 offset, u8 flags,
-                                u8 *pattern, u8 len)
+                                const u8 *pattern, u8 len)
 {
        struct wl12xx_rx_filter_field *field;
 
@@ -5184,7 +5184,8 @@ out:
        mutex_unlock(&wl->mutex);
 }
 
-static void wlcore_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                           u32 queues, bool drop)
 {
        struct wl1271 *wl = hw->priv;
 
index 29ef2492951fcdaa372611449fbc4c3cde4322f1..d3dd7bfdf3f1f33efbfbd9e0be762f664c892926 100644 (file)
@@ -217,7 +217,7 @@ static struct wl1271_if_operations sdio_ops = {
 static int wl1271_probe(struct sdio_func *func,
                                  const struct sdio_device_id *id)
 {
-       struct wlcore_platdev_data *pdev_data;
+       struct wlcore_platdev_data pdev_data;
        struct wl12xx_sdio_glue *glue;
        struct resource res[1];
        mmc_pm_flag_t mmcflags;
@@ -228,16 +228,13 @@ static int wl1271_probe(struct sdio_func *func,
        if (func->num != 0x02)
                return -ENODEV;
 
-       pdev_data = kzalloc(sizeof(*pdev_data), GFP_KERNEL);
-       if (!pdev_data)
-               goto out;
-
-       pdev_data->if_ops = &sdio_ops;
+       memset(&pdev_data, 0x00, sizeof(pdev_data));
+       pdev_data.if_ops = &sdio_ops;
 
        glue = kzalloc(sizeof(*glue), GFP_KERNEL);
        if (!glue) {
                dev_err(&func->dev, "can't allocate glue\n");
-               goto out_free_pdev_data;
+               goto out;
        }
 
        glue->dev = &func->dev;
@@ -248,9 +245,9 @@ static int wl1271_probe(struct sdio_func *func,
        /* Use block mode for transferring over one block size of data */
        func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
 
-       pdev_data->pdata = wl12xx_get_platform_data();
-       if (IS_ERR(pdev_data->pdata)) {
-               ret = PTR_ERR(pdev_data->pdata);
+       pdev_data.pdata = wl12xx_get_platform_data();
+       if (IS_ERR(pdev_data.pdata)) {
+               ret = PTR_ERR(pdev_data.pdata);
                dev_err(glue->dev, "missing wlan platform data: %d\n", ret);
                goto out_free_glue;
        }
@@ -260,7 +257,7 @@ static int wl1271_probe(struct sdio_func *func,
        dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags);
 
        if (mmcflags & MMC_PM_KEEP_POWER)
-               pdev_data->pdata->pwr_in_suspend = true;
+               pdev_data.pdata->pwr_in_suspend = true;
 
        sdio_set_drvdata(func, glue);
 
@@ -289,7 +286,7 @@ static int wl1271_probe(struct sdio_func *func,
 
        memset(res, 0x00, sizeof(res));
 
-       res[0].start = pdev_data->pdata->irq;
+       res[0].start = pdev_data.pdata->irq;
        res[0].flags = IORESOURCE_IRQ;
        res[0].name = "irq";
 
@@ -299,8 +296,8 @@ static int wl1271_probe(struct sdio_func *func,
                goto out_dev_put;
        }
 
-       ret = platform_device_add_data(glue->core, pdev_data,
-                                      sizeof(*pdev_data));
+       ret = platform_device_add_data(glue->core, &pdev_data,
+                                      sizeof(pdev_data));
        if (ret) {
                dev_err(glue->dev, "can't add platform data\n");
                goto out_dev_put;
@@ -319,9 +316,6 @@ out_dev_put:
 out_free_glue:
        kfree(glue);
 
-out_free_pdev_data:
-       kfree(pdev_data);
-
 out:
        return ret;
 }
index dbe826dd7c23c49a38a08988cb24c50764d3efaa..392c882b28f03d9da2be51c6487db2d423b58976 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/swab.h>
 #include <linux/crc7.h>
 #include <linux/spi/spi.h>
 #include <linux/wl12xx.h>
 #include <linux/platform_device.h>
-#include <linux/slab.h>
 
 #include "wlcore.h"
 #include "wl12xx_80211.h"
@@ -110,18 +111,16 @@ static void wl12xx_spi_reset(struct device *child)
 static void wl12xx_spi_init(struct device *child)
 {
        struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
-       u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd;
        struct spi_transfer t;
        struct spi_message m;
+       u8 *cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
 
-       cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
        if (!cmd) {
                dev_err(child->parent,
                        "could not allocate cmd for spi init\n");
                return;
        }
 
-       memset(crc, 0, sizeof(crc));
        memset(&t, 0, sizeof(t));
        spi_message_init(&m);
 
@@ -129,30 +128,29 @@ static void wl12xx_spi_init(struct device *child)
         * Set WSPI_INIT_COMMAND
         * the data is being send from the MSB to LSB
         */
-       cmd[2] = 0xff;
-       cmd[3] = 0xff;
-       cmd[1] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX;
-       cmd[0] = 0;
-       cmd[7] = 0;
-       cmd[6] |= HW_ACCESS_WSPI_INIT_CMD_MASK << 3;
-       cmd[6] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN;
+       cmd[0] = 0xff;
+       cmd[1] = 0xff;
+       cmd[2] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX;
+       cmd[3] = 0;
+       cmd[4] = 0;
+       cmd[5] = HW_ACCESS_WSPI_INIT_CMD_MASK << 3;
+       cmd[5] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN;
+
+       cmd[6] = WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS
+               | WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS;
 
        if (HW_ACCESS_WSPI_FIXED_BUSY_LEN == 0)
-               cmd[5] |=  WSPI_INIT_CMD_DIS_FIXEDBUSY;
+               cmd[6] |= WSPI_INIT_CMD_DIS_FIXEDBUSY;
        else
-               cmd[5] |= WSPI_INIT_CMD_EN_FIXEDBUSY;
-
-       cmd[5] |= WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS
-               | WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS;
-
-       crc[0] = cmd[1];
-       crc[1] = cmd[0];
-       crc[2] = cmd[7];
-       crc[3] = cmd[6];
-       crc[4] = cmd[5];
+               cmd[6] |= WSPI_INIT_CMD_EN_FIXEDBUSY;
 
-       cmd[4] |= crc7(0, crc, WSPI_INIT_CMD_CRC_LEN) << 1;
-       cmd[4] |= WSPI_INIT_CMD_END;
+       cmd[7] = crc7_be(0, cmd+2, WSPI_INIT_CMD_CRC_LEN) | WSPI_INIT_CMD_END;
+       /*
+        * The above is the logical order; it must actually be stored
+        * in the buffer byte-swapped.
+        */
+       __swab32s((u32 *)cmd);
+       __swab32s((u32 *)cmd+1);
 
        t.tx_buf = cmd;
        t.len = WSPI_INIT_CMD_LEN;
@@ -327,27 +325,25 @@ static struct wl1271_if_operations spi_ops = {
 static int wl1271_probe(struct spi_device *spi)
 {
        struct wl12xx_spi_glue *glue;
-       struct wlcore_platdev_data *pdev_data;
+       struct wlcore_platdev_data pdev_data;
        struct resource res[1];
        int ret = -ENOMEM;
 
-       pdev_data = kzalloc(sizeof(*pdev_data), GFP_KERNEL);
-       if (!pdev_data)
-               goto out;
+       memset(&pdev_data, 0x00, sizeof(pdev_data));
 
-       pdev_data->pdata = dev_get_platdata(&spi->dev);
-       if (!pdev_data->pdata) {
+       pdev_data.pdata = dev_get_platdata(&spi->dev);
+       if (!pdev_data.pdata) {
                dev_err(&spi->dev, "no platform data\n");
                ret = -ENODEV;
-               goto out_free_pdev_data;
+               goto out;
        }
 
-       pdev_data->if_ops = &spi_ops;
+       pdev_data.if_ops = &spi_ops;
 
        glue = kzalloc(sizeof(*glue), GFP_KERNEL);
        if (!glue) {
                dev_err(&spi->dev, "can't allocate glue\n");
-               goto out_free_pdev_data;
+               goto out;
        }
 
        glue->dev = &spi->dev;
@@ -385,8 +381,8 @@ static int wl1271_probe(struct spi_device *spi)
                goto out_dev_put;
        }
 
-       ret = platform_device_add_data(glue->core, pdev_data,
-                                      sizeof(*pdev_data));
+       ret = platform_device_add_data(glue->core, &pdev_data,
+                                      sizeof(pdev_data));
        if (ret) {
                dev_err(glue->dev, "can't add platform data\n");
                goto out_dev_put;
@@ -406,9 +402,6 @@ out_dev_put:
 out_free_glue:
        kfree(glue);
 
-out_free_pdev_data:
-       kfree(pdev_data);
-
 out:
        return ret;
 }
index 756e890bc5ee2be0a5f68ef3e37de397ccc8dd52..c2c34a84ff3d4bf7231f84933564b0e78dcc415b 100644 (file)
@@ -512,8 +512,8 @@ int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 void wl12xx_queue_recovery_work(struct wl1271 *wl);
 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen);
 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
-                                       u16 offset, u8 flags,
-                                       u8 *pattern, u8 len);
+                                u16 offset, u8 flags,
+                                const u8 *pattern, u8 len);
 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter);
 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void);
 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter);
index 630a3fcf65bc8113fd6b67ce587f26fbf46c9774..0d4a285cbd7edb45408360a83952ab288debf62d 100644 (file)
@@ -226,7 +226,7 @@ int xenvif_map_frontend_rings(struct xenvif *vif,
                              grant_ref_t rx_ring_ref);
 
 /* Check for SKBs from frontend and schedule backend processing */
-void xenvif_check_rx_xenvif(struct xenvif *vif);
+void xenvif_napi_schedule_or_enable_events(struct xenvif *vif);
 
 /* Prevent the device from generating any further traffic. */
 void xenvif_carrier_off(struct xenvif *vif);
index ef05c5c49d413d5bb23a3e4adbff88cb0c9ad7cd..53cdcdf3dfa13a1c17fa663bd3da2f706c6e8967 100644 (file)
@@ -75,32 +75,8 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
        work_done = xenvif_tx_action(vif, budget);
 
        if (work_done < budget) {
-               int more_to_do = 0;
-               unsigned long flags;
-
-               /* It is necessary to disable IRQ before calling
-                * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might
-                * lose event from the frontend.
-                *
-                * Consider:
-                *   RING_HAS_UNCONSUMED_REQUESTS
-                *   <frontend generates event to trigger napi_schedule>
-                *   __napi_complete
-                *
-                * This handler is still in scheduled state so the
-                * event has no effect at all. After __napi_complete
-                * this handler is descheduled and cannot get
-                * scheduled again. We lose event in this case and the ring
-                * will be completely stalled.
-                */
-
-               local_irq_save(flags);
-
-               RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
-               if (!more_to_do)
-                       __napi_complete(napi);
-
-               local_irq_restore(flags);
+               napi_complete(napi);
+               xenvif_napi_schedule_or_enable_events(vif);
        }
 
        return work_done;
@@ -194,7 +170,7 @@ static void xenvif_up(struct xenvif *vif)
        enable_irq(vif->tx_irq);
        if (vif->tx_irq != vif->rx_irq)
                enable_irq(vif->rx_irq);
-       xenvif_check_rx_xenvif(vif);
+       xenvif_napi_schedule_or_enable_events(vif);
 }
 
 static void xenvif_down(struct xenvif *vif)
@@ -386,7 +362,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                NETIF_F_TSO | NETIF_F_TSO6;
        dev->features = dev->hw_features | NETIF_F_RXCSUM;
-       SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops);
+       dev->ethtool_ops = &xenvif_ethtool_ops;
 
        dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
 
index 76665405c5aac32d57eeadf355007cbb2b50cbec..7367208ee8cdd8b324ce661b48aa69c1d884855b 100644 (file)
@@ -104,7 +104,7 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
 
 /* Find the containing VIF's structure from a pointer in pending_tx_info array
  */
-static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf)
+static inline struct xenvif *ubuf_to_vif(const struct ubuf_info *ubuf)
 {
        u16 pending_idx = ubuf->desc;
        struct pending_tx_info *temp =
@@ -322,6 +322,35 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
        }
 }
 
+/*
+ * Find the grant ref for a given frag in a chain of struct ubuf_info's
+ * skb: the skb itself
+ * i: the frag's number
+ * ubuf: a pointer to an element in the chain. It should not be NULL
+ *
+ * Returns a pointer to the element in the chain where the page were found. If
+ * not found, returns NULL.
+ * See the definition of callback_struct in common.h for more details about
+ * the chain.
+ */
+static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
+                                               const int i,
+                                               const struct ubuf_info *ubuf)
+{
+       struct xenvif *foreign_vif = ubuf_to_vif(ubuf);
+
+       do {
+               u16 pending_idx = ubuf->desc;
+
+               if (skb_shinfo(skb)->frags[i].page.p ==
+                   foreign_vif->mmap_pages[pending_idx])
+                       break;
+               ubuf = (struct ubuf_info *) ubuf->ctx;
+       } while (ubuf);
+
+       return ubuf;
+}
+
 /*
  * Prepare an SKB to be transmitted to the frontend.
  *
@@ -346,9 +375,8 @@ static int xenvif_gop_skb(struct sk_buff *skb,
        int head = 1;
        int old_meta_prod;
        int gso_type;
-       struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
-       grant_ref_t foreign_grefs[MAX_SKB_FRAGS];
-       struct xenvif *foreign_vif = NULL;
+       const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
+       const struct ubuf_info *const head_ubuf = ubuf;
 
        old_meta_prod = npo->meta_prod;
 
@@ -386,19 +414,6 @@ static int xenvif_gop_skb(struct sk_buff *skb,
        npo->copy_off = 0;
        npo->copy_gref = req->gref;
 
-       if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
-                (ubuf->callback == &xenvif_zerocopy_callback)) {
-               int i = 0;
-               foreign_vif = ubuf_to_vif(ubuf);
-
-               do {
-                       u16 pending_idx = ubuf->desc;
-                       foreign_grefs[i++] =
-                               foreign_vif->pending_tx_info[pending_idx].req.gref;
-                       ubuf = (struct ubuf_info *) ubuf->ctx;
-               } while (ubuf);
-       }
-
        data = skb->data;
        while (data < skb_tail_pointer(skb)) {
                unsigned int offset = offset_in_page(data);
@@ -415,13 +430,60 @@ static int xenvif_gop_skb(struct sk_buff *skb,
        }
 
        for (i = 0; i < nr_frags; i++) {
+               /* This variable also signals whether foreign_gref has a real
+                * value or not.
+                */
+               struct xenvif *foreign_vif = NULL;
+               grant_ref_t foreign_gref;
+
+               if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
+                       (ubuf->callback == &xenvif_zerocopy_callback)) {
+                       const struct ubuf_info *const startpoint = ubuf;
+
+                       /* Ideally ubuf points to the chain element which
+                        * belongs to this frag. Or if frags were removed from
+                        * the beginning, then shortly before it.
+                        */
+                       ubuf = xenvif_find_gref(skb, i, ubuf);
+
+                       /* Try again from the beginning of the list, if we
+                        * haven't tried from there. This only makes sense in
+                        * the unlikely event of reordering the original frags.
+                        * For injected local pages it's an unnecessary second
+                        * run.
+                        */
+                       if (unlikely(!ubuf) && startpoint != head_ubuf)
+                               ubuf = xenvif_find_gref(skb, i, head_ubuf);
+
+                       if (likely(ubuf)) {
+                               u16 pending_idx = ubuf->desc;
+
+                               foreign_vif = ubuf_to_vif(ubuf);
+                               foreign_gref = foreign_vif->pending_tx_info[pending_idx].req.gref;
+                               /* Just a safety measure. If this was the last
+                                * element on the list, the for loop will
+                                * iterate again if a local page were added to
+                                * the end. Using head_ubuf here prevents the
+                                * second search on the chain. Or the original
+                                * frags changed order, but that's less likely.
+                                * In any way, ubuf shouldn't be NULL.
+                                */
+                               ubuf = ubuf->ctx ?
+                                       (struct ubuf_info *) ubuf->ctx :
+                                       head_ubuf;
+                       } else
+                               /* This frag was a local page, added to the
+                                * array after the skb left netback.
+                                */
+                               ubuf = head_ubuf;
+               }
                xenvif_gop_frag_copy(vif, skb, npo,
                                     skb_frag_page(&skb_shinfo(skb)->frags[i]),
                                     skb_frag_size(&skb_shinfo(skb)->frags[i]),
                                     skb_shinfo(skb)->frags[i].page_offset,
                                     &head,
                                     foreign_vif,
-                                    foreign_grefs[i]);
+                                    foreign_vif ? foreign_gref : UINT_MAX);
        }
 
        return npo->meta_prod - old_meta_prod;
@@ -654,7 +716,7 @@ done:
                notify_remote_via_irq(vif->rx_irq);
 }
 
-void xenvif_check_rx_xenvif(struct xenvif *vif)
+void xenvif_napi_schedule_or_enable_events(struct xenvif *vif)
 {
        int more_to_do;
 
@@ -688,7 +750,7 @@ static void tx_credit_callback(unsigned long data)
 {
        struct xenvif *vif = (struct xenvif *)data;
        tx_add_credit(vif);
-       xenvif_check_rx_xenvif(vif);
+       xenvif_napi_schedule_or_enable_events(vif);
 }
 
 static void xenvif_tx_err(struct xenvif *vif,
index 158b5e639fc7307d5a98580cfd65ca23b9d3db8a..895355de8ac467bdc9a04646becf0f5e85e136a1 100644 (file)
@@ -1332,7 +1332,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
          */
        netdev->features |= netdev->hw_features;
 
-       SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
+       netdev->ethtool_ops = &xennet_ethtool_ops;
        SET_NETDEV_DEV(netdev, &dev->dev);
 
        netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER);
index 65d4ca19d1328ec737c68684c30de01fc8347b8d..26c66a1265518c32f5262d3aa98afb668e2715ff 100644 (file)
@@ -71,5 +71,6 @@ config NFC_PORT100
 source "drivers/nfc/pn544/Kconfig"
 source "drivers/nfc/microread/Kconfig"
 source "drivers/nfc/nfcmrvl/Kconfig"
+source "drivers/nfc/st21nfca/Kconfig"
 
 endmenu
index ae42a3fa60c981b965bbb0cdb004b7c3c591d01d..23225b0287fdf2e7b6f809466e58b5cc5fc53d2c 100644 (file)
@@ -11,5 +11,6 @@ obj-$(CONFIG_NFC_SIM)         += nfcsim.o
 obj-$(CONFIG_NFC_PORT100)      += port100.o
 obj-$(CONFIG_NFC_MRVL)         += nfcmrvl/
 obj-$(CONFIG_NFC_TRF7970A)     += trf7970a.o
+obj-$(CONFIG_NFC_ST21NFCA)  += st21nfca/
 
 ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG
index f2acd85be86ea1d2016562c38aa4f9b6767ae754..440291ab7263202fc8017c3b9f09f8318f5f872a 100644 (file)
@@ -22,6 +22,8 @@
 #include <linux/module.h>
 #include <linux/i2c.h>
 #include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
 #include <linux/miscdevice.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
@@ -857,6 +859,92 @@ exit_state_wait_secure_write_answer:
        }
 }
 
+#ifdef CONFIG_OF
+
+static int pn544_hci_i2c_of_request_resources(struct i2c_client *client)
+{
+       struct pn544_i2c_phy *phy = i2c_get_clientdata(client);
+       struct device_node *pp;
+       int ret;
+
+       pp = client->dev.of_node;
+       if (!pp) {
+               ret = -ENODEV;
+               goto err_dt;
+       }
+
+       /* Obtention of EN GPIO from device tree */
+       ret = of_get_named_gpio(pp, "enable-gpios", 0);
+       if (ret < 0) {
+               if (ret != -EPROBE_DEFER)
+                       nfc_err(&client->dev,
+                               "Failed to get EN gpio, error: %d\n", ret);
+               goto err_dt;
+       }
+       phy->gpio_en = ret;
+
+       /* Configuration of EN GPIO */
+       ret = gpio_request(phy->gpio_en, "pn544_en");
+       if (ret) {
+               nfc_err(&client->dev, "Fail EN pin\n");
+               goto err_dt;
+       }
+       ret = gpio_direction_output(phy->gpio_en, 0);
+       if (ret) {
+               nfc_err(&client->dev, "Fail EN pin direction\n");
+               goto err_gpio_en;
+       }
+
+       /* Obtention of FW GPIO from device tree */
+       ret = of_get_named_gpio(pp, "firmware-gpios", 0);
+       if (ret < 0) {
+               if (ret != -EPROBE_DEFER)
+                       nfc_err(&client->dev,
+                               "Failed to get FW gpio, error: %d\n", ret);
+               goto err_gpio_en;
+       }
+       phy->gpio_fw = ret;
+
+       /* Configuration of FW GPIO */
+       ret = gpio_request(phy->gpio_fw, "pn544_fw");
+       if (ret) {
+               nfc_err(&client->dev, "Fail FW pin\n");
+               goto err_gpio_en;
+       }
+       ret = gpio_direction_output(phy->gpio_fw, 0);
+       if (ret) {
+               nfc_err(&client->dev, "Fail FW pin direction\n");
+               goto err_gpio_fw;
+       }
+
+       /* IRQ */
+       ret = irq_of_parse_and_map(pp, 0);
+       if (ret < 0) {
+               nfc_err(&client->dev,
+                       "Unable to get irq, error: %d\n", ret);
+               goto err_gpio_fw;
+       }
+       client->irq = ret;
+
+       return 0;
+
+err_gpio_fw:
+       gpio_free(phy->gpio_fw);
+err_gpio_en:
+       gpio_free(phy->gpio_en);
+err_dt:
+       return ret;
+}
+
+#else
+
+static int pn544_hci_i2c_of_request_resources(struct i2c_client *client)
+{
+       return -ENODEV;
+}
+
+#endif
+
 static int pn544_hci_i2c_probe(struct i2c_client *client,
                               const struct i2c_device_id *id)
 {
@@ -887,25 +975,36 @@ static int pn544_hci_i2c_probe(struct i2c_client *client,
        i2c_set_clientdata(client, phy);
 
        pdata = client->dev.platform_data;
-       if (pdata == NULL) {
-               nfc_err(&client->dev, "No platform data\n");
-               return -EINVAL;
-       }
 
-       if (pdata->request_resources == NULL) {
-               nfc_err(&client->dev, "request_resources() missing\n");
-               return -EINVAL;
-       }
+       /* No platform data, using device tree. */
+       if (!pdata && client->dev.of_node) {
+               r = pn544_hci_i2c_of_request_resources(client);
+               if (r) {
+                       nfc_err(&client->dev, "No DT data\n");
+                       return r;
+               }
+       /* Using platform data. */
+       } else if (pdata) {
 
-       r = pdata->request_resources(client);
-       if (r) {
-               nfc_err(&client->dev, "Cannot get platform resources\n");
-               return r;
-       }
+               if (pdata->request_resources == NULL) {
+                       nfc_err(&client->dev, "request_resources() missing\n");
+                       return -EINVAL;
+               }
 
-       phy->gpio_en = pdata->get_gpio(NFC_GPIO_ENABLE);
-       phy->gpio_fw = pdata->get_gpio(NFC_GPIO_FW_RESET);
-       phy->gpio_irq = pdata->get_gpio(NFC_GPIO_IRQ);
+               r = pdata->request_resources(client);
+               if (r) {
+                       nfc_err(&client->dev,
+                               "Cannot get platform resources\n");
+                       return r;
+               }
+
+               phy->gpio_en = pdata->get_gpio(NFC_GPIO_ENABLE);
+               phy->gpio_fw = pdata->get_gpio(NFC_GPIO_FW_RESET);
+               phy->gpio_irq = pdata->get_gpio(NFC_GPIO_IRQ);
+       } else {
+               nfc_err(&client->dev, "No platform data\n");
+               return -EINVAL;
+       }
 
        pn544_hci_i2c_platform_init(phy);
 
@@ -930,8 +1029,12 @@ err_hci:
        free_irq(client->irq, phy);
 
 err_rti:
-       if (pdata->free_resources != NULL)
+       if (!pdata) {
+               gpio_free(phy->gpio_en);
+               gpio_free(phy->gpio_fw);
+       } else if (pdata->free_resources) {
                pdata->free_resources();
+       }
 
        return r;
 }
@@ -953,15 +1056,30 @@ static int pn544_hci_i2c_remove(struct i2c_client *client)
                pn544_hci_i2c_disable(phy);
 
        free_irq(client->irq, phy);
-       if (pdata->free_resources)
+
+       /* No platform data, GPIOs have been requested by this driver */
+       if (!pdata) {
+               gpio_free(phy->gpio_en);
+               gpio_free(phy->gpio_fw);
+       /* Using platform data */
+       } else if (pdata->free_resources) {
                pdata->free_resources();
+       }
 
        return 0;
 }
 
+static const struct of_device_id of_pn544_i2c_match[] = {
+       { .compatible = "nxp,pn544-i2c", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, of_pn544_i2c_match);
+
 static struct i2c_driver pn544_hci_i2c_driver = {
        .driver = {
                   .name = PN544_HCI_I2C_DRIVER_NAME,
+                  .owner  = THIS_MODULE,
+                  .of_match_table = of_match_ptr(of_pn544_i2c_match),
                  },
        .probe = pn544_hci_i2c_probe,
        .id_table = pn544_hci_i2c_id_table,
diff --git a/drivers/nfc/st21nfca/Kconfig b/drivers/nfc/st21nfca/Kconfig
new file mode 100644 (file)
index 0000000..ee459f0
--- /dev/null
@@ -0,0 +1,23 @@
+config NFC_ST21NFCA
+       tristate "STMicroelectronics ST21NFCA NFC driver"
+       depends on NFC_HCI
+       select CRC_CCITT
+       default n
+       ---help---
+         STMicroelectronics ST21NFCA core driver. It implements the chipset
+         HCI logic and hooks into the NFC kernel APIs. Physical layers will
+         register against it.
+
+         To compile this driver as a module, choose m here. The module will
+         be called st21nfca.
+         Say N if unsure.
+
+config NFC_ST21NFCA_I2C
+       tristate "NFC ST21NFCA i2c support"
+       depends on NFC_ST21NFCA && I2C && NFC_SHDLC
+       ---help---
+         This module adds support for the STMicroelectronics st21nfca i2c interface.
+         Select this if your platform is using the i2c bus.
+
+         If you choose to build a module, it'll be called st21nfca_i2c.
+         Say N if unsure.
diff --git a/drivers/nfc/st21nfca/Makefile b/drivers/nfc/st21nfca/Makefile
new file mode 100644 (file)
index 0000000..038ed09
--- /dev/null
@@ -0,0 +1,8 @@
+#
+# Makefile for ST21NFCA HCI based NFC driver
+#
+
+st21nfca_i2c-objs  = i2c.o
+
+obj-$(CONFIG_NFC_ST21NFCA)     += st21nfca.o
+obj-$(CONFIG_NFC_ST21NFCA_I2C) += st21nfca_i2c.o
diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
new file mode 100644 (file)
index 0000000..3f954ed
--- /dev/null
@@ -0,0 +1,724 @@
+/*
+ * I2C Link Layer for ST21NFCA HCI based Driver
+ * Copyright (C) 2014  STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/crc-ccitt.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/miscdevice.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/nfc.h>
+#include <linux/firmware.h>
+#include <linux/unaligned/access_ok.h>
+#include <linux/platform_data/st21nfca.h>
+
+#include <net/nfc/hci.h>
+#include <net/nfc/llc.h>
+#include <net/nfc/nfc.h>
+
+#include "st21nfca.h"
+
+/*
+ * Every frame starts with ST21NFCA_SOF_EOF and ends with ST21NFCA_SOF_EOF.
+ * Because ST21NFCA_SOF_EOF is a possible data value, there is a mecanism
+ * called byte stuffing has been introduced.
+ *
+ * if byte == ST21NFCA_SOF_EOF or ST21NFCA_ESCAPE_BYTE_STUFFING
+ * - insert ST21NFCA_ESCAPE_BYTE_STUFFING (escape byte)
+ * - xor byte with ST21NFCA_BYTE_STUFFING_MASK
+ */
+#define ST21NFCA_SOF_EOF               0x7e
+#define ST21NFCA_BYTE_STUFFING_MASK    0x20
+#define ST21NFCA_ESCAPE_BYTE_STUFFING  0x7d
+
+/* SOF + 00 */
+#define ST21NFCA_FRAME_HEADROOM                        2
+
+/* 2 bytes crc + EOF */
+#define ST21NFCA_FRAME_TAILROOM 3
+#define IS_START_OF_FRAME(buf) (buf[0] == ST21NFCA_SOF_EOF && \
+                               buf[1] == 0)
+
+#define ST21NFCA_HCI_I2C_DRIVER_NAME "st21nfca_hci_i2c"
+
+static struct i2c_device_id st21nfca_hci_i2c_id_table[] = {
+       {ST21NFCA_HCI_DRIVER_NAME, 0},
+       {}
+};
+
+MODULE_DEVICE_TABLE(i2c, st21nfca_hci_i2c_id_table);
+
+struct st21nfca_i2c_phy {
+       struct i2c_client *i2c_dev;
+       struct nfc_hci_dev *hdev;
+
+       unsigned int gpio_ena;
+       unsigned int gpio_irq;
+       unsigned int irq_polarity;
+
+       struct sk_buff *pending_skb;
+       int current_read_len;
+       /*
+        * crc might have fail because i2c macro
+        * is disable due to other interface activity
+        */
+       int crc_trials;
+
+       int powered;
+       int run_mode;
+
+       /*
+        * < 0 if hardware error occured (e.g. i2c err)
+        * and prevents normal operation.
+        */
+       int hard_fault;
+       struct mutex phy_lock;
+};
+static u8 len_seq[] = { 13, 24, 15, 29 };
+static u16 wait_tab[] = { 2, 3, 5, 15, 20, 40};
+
+#define I2C_DUMP_SKB(info, skb)                                        \
+do {                                                           \
+       pr_debug("%s:\n", info);                                \
+       print_hex_dump(KERN_DEBUG, "i2c: ", DUMP_PREFIX_OFFSET, \
+                      16, 1, (skb)->data, (skb)->len, 0);      \
+} while (0)
+
+/*
+ * In order to get the CLF in a known state we generate an internal reboot
+ * using a proprietary command.
+ * Once the reboot is completed, we expect to receive a ST21NFCA_SOF_EOF
+ * fill buffer.
+ */
+static int st21nfca_hci_platform_init(struct st21nfca_i2c_phy *phy)
+{
+       u16 wait_reboot[] = { 50, 300, 1000 };
+       char reboot_cmd[] = { 0x7E, 0x66, 0x48, 0xF6, 0x7E };
+       u8 tmp[ST21NFCA_HCI_LLC_MAX_SIZE];
+       int i, r = -1;
+
+       for (i = 0; i < ARRAY_SIZE(wait_reboot) && r < 0; i++) {
+               r = i2c_master_send(phy->i2c_dev, reboot_cmd,
+                                   sizeof(reboot_cmd));
+               if (r < 0)
+                       msleep(wait_reboot[i]);
+       }
+       if (r < 0)
+               return r;
+
+       /* CLF is spending about 20ms to do an internal reboot */
+       msleep(20);
+       r = -1;
+       for (i = 0; i < ARRAY_SIZE(wait_reboot) && r < 0; i++) {
+               r = i2c_master_recv(phy->i2c_dev, tmp,
+                                   ST21NFCA_HCI_LLC_MAX_SIZE);
+               if (r < 0)
+                       msleep(wait_reboot[i]);
+       }
+       if (r < 0)
+               return r;
+
+       for (i = 0; i < ST21NFCA_HCI_LLC_MAX_SIZE &&
+               tmp[i] == ST21NFCA_SOF_EOF; i++)
+               ;
+
+       if (r != ST21NFCA_HCI_LLC_MAX_SIZE)
+               return -ENODEV;
+
+       usleep_range(1000, 1500);
+       return 0;
+}
+
+static int st21nfca_hci_i2c_enable(void *phy_id)
+{
+       struct st21nfca_i2c_phy *phy = phy_id;
+
+       gpio_set_value(phy->gpio_ena, 1);
+       phy->powered = 1;
+       phy->run_mode = ST21NFCA_HCI_MODE;
+
+       usleep_range(10000, 15000);
+
+       return 0;
+}
+
+static void st21nfca_hci_i2c_disable(void *phy_id)
+{
+       struct st21nfca_i2c_phy *phy = phy_id;
+
+       pr_info("\n");
+       gpio_set_value(phy->gpio_ena, 0);
+
+       phy->powered = 0;
+}
+
+static void st21nfca_hci_add_len_crc(struct sk_buff *skb)
+{
+       u16 crc;
+       u8 tmp;
+
+       *skb_push(skb, 1) = 0;
+
+       crc = crc_ccitt(0xffff, skb->data, skb->len);
+       crc = ~crc;
+
+       tmp = crc & 0x00ff;
+       *skb_put(skb, 1) = tmp;
+
+       tmp = (crc >> 8) & 0x00ff;
+       *skb_put(skb, 1) = tmp;
+}
+
+static void st21nfca_hci_remove_len_crc(struct sk_buff *skb)
+{
+       skb_pull(skb, ST21NFCA_FRAME_HEADROOM);
+       skb_trim(skb, skb->len - ST21NFCA_FRAME_TAILROOM);
+}
+
+/*
+ * Writing a frame must not return the number of written bytes.
+ * It must return either zero for success, or <0 for error.
+ * In addition, it must not alter the skb
+ */
+static int st21nfca_hci_i2c_write(void *phy_id, struct sk_buff *skb)
+{
+       int r = -1, i, j;
+       struct st21nfca_i2c_phy *phy = phy_id;
+       struct i2c_client *client = phy->i2c_dev;
+       u8 tmp[ST21NFCA_HCI_LLC_MAX_SIZE * 2];
+
+       I2C_DUMP_SKB("st21nfca_hci_i2c_write", skb);
+
+
+       if (phy->hard_fault != 0)
+               return phy->hard_fault;
+
+       /*
+        * Compute CRC before byte stuffing computation on frame
+        * Note st21nfca_hci_add_len_crc is doing a byte stuffing
+        * on its own value
+        */
+       st21nfca_hci_add_len_crc(skb);
+
+       /* add ST21NFCA_SOF_EOF on tail */
+       *skb_put(skb, 1) = ST21NFCA_SOF_EOF;
+       /* add ST21NFCA_SOF_EOF on head */
+       *skb_push(skb, 1) = ST21NFCA_SOF_EOF;
+
+       /*
+        * Compute byte stuffing
+        * if byte == ST21NFCA_SOF_EOF or ST21NFCA_ESCAPE_BYTE_STUFFING
+        * insert ST21NFCA_ESCAPE_BYTE_STUFFING (escape byte)
+        * xor byte with ST21NFCA_BYTE_STUFFING_MASK
+        */
+       tmp[0] = skb->data[0];
+       for (i = 1, j = 1; i < skb->len - 1; i++, j++) {
+               if (skb->data[i] == ST21NFCA_SOF_EOF
+                   || skb->data[i] == ST21NFCA_ESCAPE_BYTE_STUFFING) {
+                       tmp[j] = ST21NFCA_ESCAPE_BYTE_STUFFING;
+                       j++;
+                       tmp[j] = skb->data[i] ^ ST21NFCA_BYTE_STUFFING_MASK;
+               } else {
+                       tmp[j] = skb->data[i];
+               }
+       }
+       tmp[j] = skb->data[i];
+       j++;
+
+       /*
+        * Manage sleep mode
+        * Try 3 times to send data with delay between each
+        */
+       mutex_lock(&phy->phy_lock);
+       for (i = 0; i < ARRAY_SIZE(wait_tab) && r < 0; i++) {
+               r = i2c_master_send(client, tmp, j);
+               if (r < 0)
+                       msleep(wait_tab[i]);
+       }
+       mutex_unlock(&phy->phy_lock);
+
+       if (r >= 0) {
+               if (r != j)
+                       r = -EREMOTEIO;
+               else
+                       r = 0;
+       }
+
+       st21nfca_hci_remove_len_crc(skb);
+
+       return r;
+}
+
+static int get_frame_size(u8 *buf, int buflen)
+{
+       int len = 0;
+       if (buf[len + 1] == ST21NFCA_SOF_EOF)
+               return 0;
+
+       for (len = 1; len < buflen && buf[len] != ST21NFCA_SOF_EOF; len++)
+               ;
+
+       return len;
+}
+
+static int check_crc(u8 *buf, int buflen)
+{
+       u16 crc;
+
+       crc = crc_ccitt(0xffff, buf, buflen - 2);
+       crc = ~crc;
+
+       if (buf[buflen - 2] != (crc & 0xff) || buf[buflen - 1] != (crc >> 8)) {
+               pr_err(ST21NFCA_HCI_DRIVER_NAME
+                      ": CRC error 0x%x != 0x%x 0x%x\n", crc, buf[buflen - 1],
+                      buf[buflen - 2]);
+
+               pr_info(DRIVER_DESC ": %s : BAD CRC\n", __func__);
+               print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
+                              16, 2, buf, buflen, false);
+               return -EPERM;
+       }
+       return 0;
+}
+
+/*
+ * Prepare received data for upper layer.
+ * Received data include byte stuffing, crc and sof/eof
+ * which is not usable by hci part.
+ * returns:
+ * frame size without sof/eof, header and byte stuffing
+ * -EBADMSG : frame was incorrect and discarded
+ */
+static int st21nfca_hci_i2c_repack(struct sk_buff *skb)
+{
+       int i, j, r, size;
+       if (skb->len < 1 || (skb->len > 1 && skb->data[1] != 0))
+               return -EBADMSG;
+
+       size = get_frame_size(skb->data, skb->len);
+       if (size > 0) {
+               skb_trim(skb, size);
+               /* remove ST21NFCA byte stuffing for upper layer */
+               for (i = 1, j = 0; i < skb->len; i++) {
+                       if (skb->data[i + j] ==
+                                       (u8) ST21NFCA_ESCAPE_BYTE_STUFFING) {
+                               skb->data[i] = skb->data[i + j + 1]
+                                               | ST21NFCA_BYTE_STUFFING_MASK;
+                               i++;
+                               j++;
+                       }
+                       skb->data[i] = skb->data[i + j];
+               }
+               /* remove byte stuffing useless byte */
+               skb_trim(skb, i - j);
+               /* remove ST21NFCA_SOF_EOF from head */
+               skb_pull(skb, 1);
+
+               r = check_crc(skb->data, skb->len);
+               if (r != 0) {
+                       i = 0;
+                       return -EBADMSG;
+               }
+
+               /* remove headbyte */
+               skb_pull(skb, 1);
+               /* remove crc. Byte Stuffing is already removed here */
+               skb_trim(skb, skb->len - 2);
+               return skb->len;
+       }
+       return 0;
+}
+
+/*
+ * Reads an shdlc frame and returns it in a newly allocated sk_buff. Guarantees
+ * that i2c bus will be flushed and that next read will start on a new frame.
+ * returned skb contains only LLC header and payload.
+ * returns:
+ * frame size : if received frame is complete (find ST21NFCA_SOF_EOF at
+ * end of read)
+ * -EAGAIN : if received frame is incomplete (not find ST21NFCA_SOF_EOF
+ * at end of read)
+ * -EREMOTEIO : i2c read error (fatal)
+ * -EBADMSG : frame was incorrect and discarded
+ * (value returned from st21nfca_hci_i2c_repack)
+ * -EIO : if no ST21NFCA_SOF_EOF is found after reaching
+ * the read length end sequence
+ */
+static int st21nfca_hci_i2c_read(struct st21nfca_i2c_phy *phy,
+                                struct sk_buff *skb)
+{
+       int r, i;
+       u8 len;
+       u8 buf[ST21NFCA_HCI_LLC_MAX_PAYLOAD];
+       struct i2c_client *client = phy->i2c_dev;
+
+       if (phy->current_read_len < ARRAY_SIZE(len_seq)) {
+               len = len_seq[phy->current_read_len];
+
+               /*
+                * Add retry mecanism
+                * Operation on I2C interface may fail in case of operation on
+                * RF or SWP interface
+                */
+               r = 0;
+               mutex_lock(&phy->phy_lock);
+               for (i = 0; i < ARRAY_SIZE(wait_tab) && r <= 0; i++) {
+                       r = i2c_master_recv(client, buf, len);
+                       if (r < 0)
+                               msleep(wait_tab[i]);
+               }
+               mutex_unlock(&phy->phy_lock);
+
+               if (r != len) {
+                       phy->current_read_len = 0;
+                       return -EREMOTEIO;
+               }
+
+               /*
+                * The first read sequence does not start with SOF.
+                * Data is corrupeted so we drop it.
+                */
+               if (!phy->current_read_len && buf[0] != ST21NFCA_SOF_EOF) {
+                       skb_trim(skb, 0);
+                       phy->current_read_len = 0;
+                       return -EIO;
+               } else if (phy->current_read_len &&
+                       IS_START_OF_FRAME(buf)) {
+                       /*
+                        * Previous frame transmission was interrupted and
+                        * the frame got repeated.
+                        * Received frame start with ST21NFCA_SOF_EOF + 00.
+                        */
+                       skb_trim(skb, 0);
+                       phy->current_read_len = 0;
+               }
+
+               memcpy(skb_put(skb, len), buf, len);
+
+               if (skb->data[skb->len - 1] == ST21NFCA_SOF_EOF) {
+                       phy->current_read_len = 0;
+                       return st21nfca_hci_i2c_repack(skb);
+               }
+               phy->current_read_len++;
+               return -EAGAIN;
+       }
+       return -EIO;
+}
+
+/*
+ * Reads an shdlc frame from the chip. This is not as straightforward as it
+ * seems. The frame format is data-crc, and corruption can occur anywhere
+ * while transiting on i2c bus, such that we could read an invalid data.
+ * The tricky case is when we read a corrupted data or crc. We must detect
+ * this here in order to determine that data can be transmitted to the hci
+ * core. This is the reason why we check the crc here.
+ * The CLF will repeat a frame until we send a RR on that frame.
+ *
+ * On ST21NFCA, IRQ goes in idle when read starts. As no size information are
+ * available in the incoming data, other IRQ might come. Every IRQ will trigger
+ * a read sequence with different length and will fill the current frame.
+ * The reception is complete once we reach a ST21NFCA_SOF_EOF.
+ */
+static irqreturn_t st21nfca_hci_irq_thread_fn(int irq, void *phy_id)
+{
+       struct st21nfca_i2c_phy *phy = phy_id;
+       struct i2c_client *client;
+
+       int r;
+
+       if (!phy || irq != phy->i2c_dev->irq) {
+               WARN_ON_ONCE(1);
+               return IRQ_NONE;
+       }
+
+       client = phy->i2c_dev;
+       dev_dbg(&client->dev, "IRQ\n");
+
+       if (phy->hard_fault != 0)
+               return IRQ_HANDLED;
+
+       r = st21nfca_hci_i2c_read(phy, phy->pending_skb);
+       if (r == -EREMOTEIO) {
+               phy->hard_fault = r;
+
+               nfc_hci_recv_frame(phy->hdev, NULL);
+
+               return IRQ_HANDLED;
+       } else if (r == -EAGAIN || r == -EIO) {
+               return IRQ_HANDLED;
+       } else if (r == -EBADMSG && phy->crc_trials < ARRAY_SIZE(wait_tab)) {
+               /*
+                * With ST21NFCA, only one interface (I2C, RF or SWP)
+                * may be active at a time.
+                * Having incorrect crc is usually due to i2c macrocell
+                * deactivation in the middle of a transmission.
+                * It may generate corrupted data on i2c.
+                * We give sometime to get i2c back.
+                * The complete frame will be repeated.
+                */
+               msleep(wait_tab[phy->crc_trials]);
+               phy->crc_trials++;
+               phy->current_read_len = 0;
+               kfree_skb(phy->pending_skb);
+       } else if (r > 0) {
+               /*
+                * We succeeded to read data from the CLF and
+                * data is valid.
+                * Reset counter.
+                */
+               nfc_hci_recv_frame(phy->hdev, phy->pending_skb);
+               phy->crc_trials = 0;
+       }
+
+       phy->pending_skb = alloc_skb(ST21NFCA_HCI_LLC_MAX_SIZE * 2, GFP_KERNEL);
+       if (phy->pending_skb == NULL) {
+               phy->hard_fault = -ENOMEM;
+               nfc_hci_recv_frame(phy->hdev, NULL);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static struct nfc_phy_ops i2c_phy_ops = {
+       .write = st21nfca_hci_i2c_write,
+       .enable = st21nfca_hci_i2c_enable,
+       .disable = st21nfca_hci_i2c_disable,
+};
+
+#ifdef CONFIG_OF
+static int st21nfca_hci_i2c_of_request_resources(struct i2c_client *client)
+{
+       struct st21nfca_i2c_phy *phy = i2c_get_clientdata(client);
+       struct device_node *pp;
+       int gpio;
+       int r;
+
+       pp = client->dev.of_node;
+       if (!pp)
+               return -ENODEV;
+
+       /* Get GPIO from device tree */
+       gpio = of_get_named_gpio(pp, "enable-gpios", 0);
+       if (gpio < 0) {
+               nfc_err(&client->dev, "Failed to retrieve enable-gpios from device tree\n");
+               return gpio;
+       }
+
+       /* GPIO request and configuration */
+       r = devm_gpio_request(&client->dev, gpio, "clf_enable");
+       if (r) {
+               nfc_err(&client->dev, "Failed to request enable pin\n");
+               return -ENODEV;
+       }
+
+       r = gpio_direction_output(gpio, 1);
+       if (r) {
+               nfc_err(&client->dev, "Failed to set enable pin direction as output\n");
+               return -ENODEV;
+       }
+       phy->gpio_ena = gpio;
+
+       /* IRQ */
+       r = irq_of_parse_and_map(pp, 0);
+       if (r < 0) {
+               nfc_err(&client->dev,
+                               "Unable to get irq, error: %d\n", r);
+               return r;
+       }
+
+       phy->irq_polarity = irq_get_trigger_type(r);
+       client->irq = r;
+
+       return 0;
+}
+#else
+static int st21nfca_hci_i2c_of_request_resources(struct i2c_client *client)
+{
+       return -ENODEV;
+}
+#endif
+
+static int st21nfca_hci_i2c_request_resources(struct i2c_client *client)
+{
+       struct st21nfca_nfc_platform_data *pdata;
+       struct st21nfca_i2c_phy *phy = i2c_get_clientdata(client);
+       int r;
+       int irq;
+
+       pdata = client->dev.platform_data;
+       if (pdata == NULL) {
+               nfc_err(&client->dev, "No platform data\n");
+               return -EINVAL;
+       }
+
+       /* store for later use */
+       phy->gpio_irq = pdata->gpio_irq;
+       phy->gpio_ena = pdata->gpio_ena;
+       phy->irq_polarity = pdata->irq_polarity;
+
+       r = devm_gpio_request(&client->dev, phy->gpio_irq, "wake_up");
+       if (r) {
+               pr_err("%s : gpio_request failed\n", __FILE__);
+               return -ENODEV;
+       }
+
+       r = gpio_direction_input(phy->gpio_irq);
+       if (r) {
+               pr_err("%s : gpio_direction_input failed\n", __FILE__);
+               return -ENODEV;
+       }
+
+       if (phy->gpio_ena > 0) {
+               r = devm_gpio_request(&client->dev,
+                                       phy->gpio_ena, "clf_enable");
+               if (r) {
+                       pr_err("%s : ena gpio_request failed\n", __FILE__);
+                       return -ENODEV;
+               }
+               r = gpio_direction_output(phy->gpio_ena, 1);
+
+               if (r) {
+                       pr_err("%s : ena gpio_direction_output failed\n",
+                              __FILE__);
+                       return -ENODEV;
+               }
+       }
+
+       /* IRQ */
+       irq = gpio_to_irq(phy->gpio_irq);
+       if (irq < 0) {
+               nfc_err(&client->dev,
+                               "Unable to get irq number for GPIO %d error %d\n",
+                               phy->gpio_irq, r);
+               return -ENODEV;
+       }
+       client->irq = irq;
+
+       return 0;
+}
+
+static int st21nfca_hci_i2c_probe(struct i2c_client *client,
+                                 const struct i2c_device_id *id)
+{
+       struct st21nfca_i2c_phy *phy;
+       struct st21nfca_nfc_platform_data *pdata;
+       int r;
+
+       dev_dbg(&client->dev, "%s\n", __func__);
+       dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
+
+       if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+               nfc_err(&client->dev, "Need I2C_FUNC_I2C\n");
+               return -ENODEV;
+       }
+
+       phy = devm_kzalloc(&client->dev, sizeof(struct st21nfca_i2c_phy),
+                          GFP_KERNEL);
+       if (!phy) {
+               nfc_err(&client->dev,
+                       "Cannot allocate memory for st21nfca i2c phy.\n");
+               return -ENOMEM;
+       }
+
+       phy->i2c_dev = client;
+       phy->pending_skb = alloc_skb(ST21NFCA_HCI_LLC_MAX_SIZE * 2, GFP_KERNEL);
+       if (phy->pending_skb == NULL)
+               return -ENOMEM;
+
+       phy->current_read_len = 0;
+       phy->crc_trials = 0;
+       mutex_init(&phy->phy_lock);
+       i2c_set_clientdata(client, phy);
+
+       pdata = client->dev.platform_data;
+       if (!pdata && client->dev.of_node) {
+               r = st21nfca_hci_i2c_of_request_resources(client);
+               if (r) {
+                       nfc_err(&client->dev, "No platform data\n");
+                       return r;
+               }
+       } else if (pdata) {
+               r = st21nfca_hci_i2c_request_resources(client);
+               if (r) {
+                       nfc_err(&client->dev, "Cannot get platform resources\n");
+                       return r;
+               }
+       } else {
+               nfc_err(&client->dev, "st21nfca platform resources not available\n");
+               return -ENODEV;
+       }
+
+       r = st21nfca_hci_platform_init(phy);
+       if (r < 0) {
+               nfc_err(&client->dev, "Unable to reboot st21nfca\n");
+               return -ENODEV;
+       }
+
+       r = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+                               st21nfca_hci_irq_thread_fn,
+                               phy->irq_polarity | IRQF_ONESHOT,
+                               ST21NFCA_HCI_DRIVER_NAME, phy);
+       if (r < 0) {
+               nfc_err(&client->dev, "Unable to register IRQ handler\n");
+               return r;
+       }
+
+       return st21nfca_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME,
+                              ST21NFCA_FRAME_HEADROOM, ST21NFCA_FRAME_TAILROOM,
+                              ST21NFCA_HCI_LLC_MAX_PAYLOAD, &phy->hdev);
+}
+
+static int st21nfca_hci_i2c_remove(struct i2c_client *client)
+{
+       struct st21nfca_i2c_phy *phy = i2c_get_clientdata(client);
+
+       dev_dbg(&client->dev, "%s\n", __func__);
+
+       st21nfca_hci_remove(phy->hdev);
+
+       if (phy->powered)
+               st21nfca_hci_i2c_disable(phy);
+
+       return 0;
+}
+
+static const struct of_device_id of_st21nfca_i2c_match[] = {
+       { .compatible = "st,st21nfca_i2c", },
+       {}
+};
+
+static struct i2c_driver st21nfca_hci_i2c_driver = {
+       .driver = {
+               .owner = THIS_MODULE,
+               .name = ST21NFCA_HCI_I2C_DRIVER_NAME,
+               .owner = THIS_MODULE,
+               .of_match_table = of_match_ptr(of_st21nfca_i2c_match),
+       },
+       .probe = st21nfca_hci_i2c_probe,
+       .id_table = st21nfca_hci_i2c_id_table,
+       .remove = st21nfca_hci_i2c_remove,
+};
+
+module_i2c_driver(st21nfca_hci_i2c_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c
new file mode 100644 (file)
index 0000000..51e0f00
--- /dev/null
@@ -0,0 +1,698 @@
+/*
+ * HCI based Driver for STMicroelectronics NFC Chip
+ *
+ * Copyright (C) 2014  STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/nfc.h>
+#include <net/nfc/hci.h>
+#include <net/nfc/llc.h>
+
+#include "st21nfca.h"
+
+#define DRIVER_DESC "HCI NFC driver for ST21NFCA"
+
+#define FULL_VERSION_LEN 3
+
+/* Proprietary gates, events, commands and registers */
+
+/* Commands that apply to all RF readers */
+#define ST21NFCA_RF_READER_CMD_PRESENCE_CHECK  0x30
+
+#define ST21NFCA_RF_READER_ISO15693_GATE       0x12
+#define ST21NFCA_RF_READER_ISO15693_INVENTORY 0x01
+
+/*
+ * Reader gate for communication with contact-less cards using Type A
+ * protocol ISO14443-3 but not compliant with ISO14443-4
+ */
+#define ST21NFCA_RF_READER_14443_3_A_GATE      0x15
+#define ST21NFCA_RF_READER_14443_3_A_UID       0x02
+#define ST21NFCA_RF_READER_14443_3_A_ATQA      0x03
+#define ST21NFCA_RF_READER_14443_3_A_SAK       0x04
+
+#define ST21NFCA_DEVICE_MGNT_GATE              0x01
+#define ST21NFCA_DEVICE_MGNT_PIPE              0x02
+
+#define ST21NFCA_DM_GETINFO         0x13
+#define ST21NFCA_DM_GETINFO_PIPE_LIST       0x02
+#define ST21NFCA_DM_GETINFO_PIPE_INFO       0x01
+#define ST21NFCA_DM_PIPE_CREATED        0x02
+#define ST21NFCA_DM_PIPE_OPEN           0x04
+#define ST21NFCA_DM_RF_ACTIVE           0x80
+
+#define ST21NFCA_DM_IS_PIPE_OPEN(p) \
+       ((p & 0x0f) == (ST21NFCA_DM_PIPE_CREATED | ST21NFCA_DM_PIPE_OPEN))
+
+#define ST21NFCA_NFC_MODE      0x03    /* NFC_MODE parameter*/
+
+static DECLARE_BITMAP(dev_mask, ST21NFCA_NUM_DEVICES);
+
+static struct nfc_hci_gate st21nfca_gates[] = {
+       {NFC_HCI_ADMIN_GATE, NFC_HCI_ADMIN_PIPE},
+       {NFC_HCI_LOOPBACK_GATE, NFC_HCI_INVALID_PIPE},
+       {NFC_HCI_ID_MGMT_GATE, NFC_HCI_INVALID_PIPE},
+       {NFC_HCI_LINK_MGMT_GATE, NFC_HCI_LINK_MGMT_PIPE},
+       {NFC_HCI_RF_READER_B_GATE, NFC_HCI_INVALID_PIPE},
+       {NFC_HCI_RF_READER_A_GATE, NFC_HCI_INVALID_PIPE},
+       {ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_DEVICE_MGNT_PIPE},
+       {ST21NFCA_RF_READER_F_GATE, NFC_HCI_INVALID_PIPE},
+       {ST21NFCA_RF_READER_14443_3_A_GATE, NFC_HCI_INVALID_PIPE},
+       {ST21NFCA_RF_READER_ISO15693_GATE, NFC_HCI_INVALID_PIPE},
+};
+
+struct st21nfca_pipe_info {
+       u8 pipe_state;
+       u8 src_host_id;
+       u8 src_gate_id;
+       u8 dst_host_id;
+       u8 dst_gate_id;
+} __packed;
+
+/* Largest headroom needed for outgoing custom commands */
+#define ST21NFCA_CMDS_HEADROOM  7
+
+static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
+{
+       int i, j, r;
+       struct sk_buff *skb_pipe_list, *skb_pipe_info;
+       struct st21nfca_pipe_info *info;
+
+       u8 pipe_list[] = { ST21NFCA_DM_GETINFO_PIPE_LIST,
+               NFC_HCI_TERMINAL_HOST_ID
+       };
+       u8 pipe_info[] = { ST21NFCA_DM_GETINFO_PIPE_INFO,
+               NFC_HCI_TERMINAL_HOST_ID, 0
+       };
+
+       skb_pipe_list = alloc_skb(ST21NFCA_HCI_LLC_MAX_SIZE, GFP_KERNEL);
+       if (!skb_pipe_list) {
+               r = -ENOMEM;
+               goto free_list;
+       }
+
+       skb_pipe_info = alloc_skb(ST21NFCA_HCI_LLC_MAX_SIZE, GFP_KERNEL);
+       if (!skb_pipe_info) {
+               r = -ENOMEM;
+               goto free_info;
+       }
+
+       /* On ST21NFCA device pipes number are dynamics
+        * A maximum of 16 pipes can be created at the same time
+        * If pipes are already created, hci_dev_up will fail.
+        * Doing a clear all pipe is a bad idea because:
+        * - It does useless EEPROM cycling
+        * - It might cause issue for secure elements support
+        * (such as removing connectivity or APDU reader pipe)
+        * A better approach on ST21NFCA is to:
+        * - get a pipe list for each host.
+        * (eg: NFC_HCI_HOST_CONTROLLER_ID for now).
+        * (TODO Later on UICC HOST and eSE HOST)
+        * - get pipe information
+        * - match retrieved pipe list in st21nfca_gates
+        * ST21NFCA_DEVICE_MGNT_GATE is a proprietary gate
+        * with ST21NFCA_DEVICE_MGNT_PIPE.
+        * Pipe can be closed and need to be open.
+        */
+       r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID,
+               ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_DEVICE_MGNT_PIPE);
+       if (r < 0)
+               goto free_info;
+
+       /* Get pipe list */
+       r = nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE,
+                       ST21NFCA_DM_GETINFO, pipe_list, sizeof(pipe_list),
+                       &skb_pipe_list);
+       if (r < 0)
+               goto free_info;
+
+       /* Complete the existing gate_pipe table */
+       for (i = 0; i < skb_pipe_list->len; i++) {
+               pipe_info[2] = skb_pipe_list->data[i];
+               r = nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE,
+                                       ST21NFCA_DM_GETINFO, pipe_info,
+                                       sizeof(pipe_info), &skb_pipe_info);
+
+               if (r)
+                       continue;
+
+               /*
+                * Match pipe ID and gate ID
+                * Output format from ST21NFC_DM_GETINFO is:
+                * - pipe state (1byte)
+                * - source hid (1byte)
+                * - source gid (1byte)
+                * - destination hid (1byte)
+                * - destination gid (1byte)
+                */
+               info = (struct st21nfca_pipe_info *) skb_pipe_info->data;
+               for (j = 0; (j < ARRAY_SIZE(st21nfca_gates)) &&
+                       (st21nfca_gates[j].gate != info->dst_gate_id);
+                       j++)
+                       ;
+
+               if (j < ARRAY_SIZE(st21nfca_gates) &&
+                       st21nfca_gates[j].gate == info->dst_gate_id &&
+                       ST21NFCA_DM_IS_PIPE_OPEN(info->pipe_state)) {
+                       st21nfca_gates[j].pipe = pipe_info[2];
+                       hdev->gate2pipe[st21nfca_gates[j].gate] =
+                               st21nfca_gates[j].pipe;
+               }
+       }
+
+       /*
+        * 3 gates have a well known pipe ID.
+        * They will never appear in the pipe list
+        */
+       if (skb_pipe_list->len + 3 < ARRAY_SIZE(st21nfca_gates)) {
+               for (i = skb_pipe_list->len + 3;
+                               i < ARRAY_SIZE(st21nfca_gates); i++) {
+                       r = nfc_hci_connect_gate(hdev,
+                                       NFC_HCI_HOST_CONTROLLER_ID,
+                                       st21nfca_gates[i].gate,
+                                       st21nfca_gates[i].pipe);
+                       if (r < 0)
+                               goto free_info;
+               }
+       }
+
+       memcpy(hdev->init_data.gates, st21nfca_gates, sizeof(st21nfca_gates));
+free_info:
+       kfree_skb(skb_pipe_info);
+free_list:
+       kfree_skb(skb_pipe_list);
+       return r;
+}
+
+static int st21nfca_hci_open(struct nfc_hci_dev *hdev)
+{
+       struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+       int r;
+
+       mutex_lock(&info->info_lock);
+
+       if (info->state != ST21NFCA_ST_COLD) {
+               r = -EBUSY;
+               goto out;
+       }
+
+       r = info->phy_ops->enable(info->phy_id);
+
+       if (r == 0)
+               info->state = ST21NFCA_ST_READY;
+
+out:
+       mutex_unlock(&info->info_lock);
+       return r;
+}
+
+static void st21nfca_hci_close(struct nfc_hci_dev *hdev)
+{
+       struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+       mutex_lock(&info->info_lock);
+
+       if (info->state == ST21NFCA_ST_COLD)
+               goto out;
+
+       info->phy_ops->disable(info->phy_id);
+       info->state = ST21NFCA_ST_COLD;
+
+out:
+       mutex_unlock(&info->info_lock);
+}
+
+static int st21nfca_hci_ready(struct nfc_hci_dev *hdev)
+{
+       struct sk_buff *skb;
+
+       u8 param;
+       int r;
+
+       param = NFC_HCI_UICC_HOST_ID;
+       r = nfc_hci_set_param(hdev, NFC_HCI_ADMIN_GATE,
+                             NFC_HCI_ADMIN_WHITELIST, &param, 1);
+       if (r < 0)
+               return r;
+
+       /* Set NFC_MODE in device management gate to enable */
+       r = nfc_hci_get_param(hdev, ST21NFCA_DEVICE_MGNT_GATE,
+                             ST21NFCA_NFC_MODE, &skb);
+       if (r < 0)
+               return r;
+
+       if (skb->data[0] == 0) {
+               kfree_skb(skb);
+               param = 1;
+
+               r = nfc_hci_set_param(hdev, ST21NFCA_DEVICE_MGNT_GATE,
+                                       ST21NFCA_NFC_MODE, &param, 1);
+               if (r < 0)
+                       return r;
+       }
+
+       r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+                              NFC_HCI_EVT_END_OPERATION, NULL, 0);
+       if (r < 0)
+               return r;
+
+       r = nfc_hci_get_param(hdev, NFC_HCI_ID_MGMT_GATE,
+                             NFC_HCI_ID_MGMT_VERSION_SW, &skb);
+       if (r < 0)
+               return r;
+
+       if (skb->len != FULL_VERSION_LEN) {
+               kfree_skb(skb);
+               return -EINVAL;
+       }
+
+       print_hex_dump(KERN_DEBUG, "FULL VERSION SOFTWARE INFO: ",
+                      DUMP_PREFIX_NONE, 16, 1,
+                      skb->data, FULL_VERSION_LEN, false);
+
+       kfree_skb(skb);
+
+       return 0;
+}
+
+static int st21nfca_hci_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb)
+{
+       struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+       return info->phy_ops->write(info->phy_id, skb);
+}
+
+static int st21nfca_hci_start_poll(struct nfc_hci_dev *hdev,
+                                  u32 im_protocols, u32 tm_protocols)
+{
+       int r;
+
+       pr_info(DRIVER_DESC ": %s protocols 0x%x 0x%x\n",
+               __func__, im_protocols, tm_protocols);
+
+       r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+                              NFC_HCI_EVT_END_OPERATION, NULL, 0);
+       if (r < 0)
+               return r;
+       if (im_protocols) {
+               /*
+                * enable polling according to im_protocols & tm_protocols
+                * - CLOSE pipe according to im_protocols & tm_protocols
+                */
+               if ((NFC_HCI_RF_READER_B_GATE & im_protocols) == 0) {
+                       r = nfc_hci_disconnect_gate(hdev,
+                                       NFC_HCI_RF_READER_B_GATE);
+                       if (r < 0)
+                               return r;
+               }
+
+               if ((NFC_HCI_RF_READER_A_GATE & im_protocols) == 0) {
+                       r = nfc_hci_disconnect_gate(hdev,
+                                       NFC_HCI_RF_READER_A_GATE);
+                       if (r < 0)
+                               return r;
+               }
+
+               if ((ST21NFCA_RF_READER_F_GATE & im_protocols) == 0) {
+                       r = nfc_hci_disconnect_gate(hdev,
+                                       ST21NFCA_RF_READER_F_GATE);
+                       if (r < 0)
+                               return r;
+               }
+
+               if ((ST21NFCA_RF_READER_14443_3_A_GATE & im_protocols) == 0) {
+                       r = nfc_hci_disconnect_gate(hdev,
+                                       ST21NFCA_RF_READER_14443_3_A_GATE);
+                       if (r < 0)
+                               return r;
+               }
+
+               if ((ST21NFCA_RF_READER_ISO15693_GATE & im_protocols) == 0) {
+                       r = nfc_hci_disconnect_gate(hdev,
+                                       ST21NFCA_RF_READER_ISO15693_GATE);
+                       if (r < 0)
+                               return r;
+               }
+
+               r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+                                      NFC_HCI_EVT_READER_REQUESTED, NULL, 0);
+               if (r < 0)
+                       nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+                                          NFC_HCI_EVT_END_OPERATION, NULL, 0);
+       }
+       return r;
+}
+
+static int st21nfca_get_iso14443_3_atqa(struct nfc_hci_dev *hdev, u16 *atqa)
+{
+       int r;
+       struct sk_buff *atqa_skb = NULL;
+
+       r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_14443_3_A_GATE,
+                             ST21NFCA_RF_READER_14443_3_A_ATQA, &atqa_skb);
+       if (r < 0)
+               goto exit;
+
+       if (atqa_skb->len != 2) {
+               r = -EPROTO;
+               goto exit;
+       }
+
+       *atqa = be16_to_cpu(*(__be16 *) atqa_skb->data);
+
+exit:
+       kfree_skb(atqa_skb);
+       return r;
+}
+
+static int st21nfca_get_iso14443_3_sak(struct nfc_hci_dev *hdev, u8 *sak)
+{
+       int r;
+       struct sk_buff *sak_skb = NULL;
+
+       r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_14443_3_A_GATE,
+                             ST21NFCA_RF_READER_14443_3_A_SAK, &sak_skb);
+       if (r < 0)
+               goto exit;
+
+       if (sak_skb->len != 1) {
+               r = -EPROTO;
+               goto exit;
+       }
+
+       *sak = sak_skb->data[0];
+
+exit:
+       kfree_skb(sak_skb);
+       return r;
+}
+
+static int st21nfca_get_iso14443_3_uid(struct nfc_hci_dev *hdev, u8 *gate,
+                                      int *len)
+{
+       int r;
+       struct sk_buff *uid_skb = NULL;
+
+       r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_14443_3_A_GATE,
+                             ST21NFCA_RF_READER_14443_3_A_UID, &uid_skb);
+       if (r < 0)
+               goto exit;
+
+       if (uid_skb->len == 0 || uid_skb->len > NFC_NFCID1_MAXSIZE) {
+               r = -EPROTO;
+               goto exit;
+       }
+
+       gate = uid_skb->data;
+       *len = uid_skb->len;
+exit:
+       kfree_skb(uid_skb);
+       return r;
+}
+
+static int st21nfca_get_iso15693_inventory(struct nfc_hci_dev *hdev,
+                                          struct nfc_target *target)
+{
+       int r;
+       struct sk_buff *inventory_skb = NULL;
+
+       r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_ISO15693_GATE,
+                             ST21NFCA_RF_READER_ISO15693_INVENTORY,
+                             &inventory_skb);
+       if (r < 0)
+               goto exit;
+
+       skb_pull(inventory_skb, 2);
+
+       if (inventory_skb->len == 0 ||
+           inventory_skb->len > NFC_ISO15693_UID_MAXSIZE) {
+               r = -EPROTO;
+               goto exit;
+       }
+
+       memcpy(target->iso15693_uid, inventory_skb->data, inventory_skb->len);
+       target->iso15693_dsfid  = inventory_skb->data[1];
+       target->is_iso15693 = 1;
+exit:
+       kfree_skb(inventory_skb);
+       return r;
+}
+
+static int st21nfca_hci_target_from_gate(struct nfc_hci_dev *hdev, u8 gate,
+                                        struct nfc_target *target)
+{
+       int r, len;
+       u16 atqa;
+       u8 sak;
+       u8 uid[NFC_NFCID1_MAXSIZE];
+
+       switch (gate) {
+       case ST21NFCA_RF_READER_F_GATE:
+               target->supported_protocols = NFC_PROTO_FELICA_MASK;
+               break;
+       case ST21NFCA_RF_READER_14443_3_A_GATE:
+               /* ISO14443-3 type 1 or 2 tags */
+               r = st21nfca_get_iso14443_3_atqa(hdev, &atqa);
+               if (r < 0)
+                       return r;
+               if (atqa == 0x000c) {
+                       target->supported_protocols = NFC_PROTO_JEWEL_MASK;
+                       target->sens_res = 0x0c00;
+               } else {
+                       r = st21nfca_get_iso14443_3_sak(hdev, &sak);
+                       if (r < 0)
+                               return r;
+
+                       r = st21nfca_get_iso14443_3_uid(hdev, uid, &len);
+                       if (r < 0)
+                               return r;
+
+                       target->supported_protocols =
+                           nfc_hci_sak_to_protocol(sak);
+                       if (target->supported_protocols == 0xffffffff)
+                               return -EPROTO;
+
+                       target->sens_res = atqa;
+                       target->sel_res = sak;
+                       memcpy(target->nfcid1, uid, len);
+                       target->nfcid1_len = len;
+               }
+
+               break;
+       case ST21NFCA_RF_READER_ISO15693_GATE:
+               target->supported_protocols = NFC_PROTO_ISO15693_MASK;
+               r = st21nfca_get_iso15693_inventory(hdev, target);
+               if (r < 0)
+                       return r;
+               break;
+       default:
+               return -EPROTO;
+       }
+
+       return 0;
+}
+
+#define ST21NFCA_CB_TYPE_READER_ISO15693 1
+static void st21nfca_hci_data_exchange_cb(void *context, struct sk_buff *skb,
+                                         int err)
+{
+       struct st21nfca_hci_info *info = context;
+
+       switch (info->async_cb_type) {
+       case ST21NFCA_CB_TYPE_READER_ISO15693:
+               if (err == 0)
+                       skb_trim(skb, skb->len - 1);
+               info->async_cb(info->async_cb_context, skb, err);
+               break;
+       default:
+               if (err == 0)
+                       kfree_skb(skb);
+               break;
+       }
+}
+
+/*
+ * Returns:
+ * <= 0: driver handled the data exchange
+ *    1: driver doesn't especially handle, please do standard processing
+ */
+static int st21nfca_hci_im_transceive(struct nfc_hci_dev *hdev,
+                                     struct nfc_target *target,
+                                     struct sk_buff *skb,
+                                     data_exchange_cb_t cb, void *cb_context)
+{
+       struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+       pr_info(DRIVER_DESC ": %s for gate=%d len=%d\n", __func__,
+               target->hci_reader_gate, skb->len);
+
+       switch (target->hci_reader_gate) {
+       case ST21NFCA_RF_READER_F_GATE:
+               *skb_push(skb, 1) = 0x1a;
+               return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
+                                             ST21NFCA_WR_XCHG_DATA, skb->data,
+                                             skb->len, cb, cb_context);
+       case ST21NFCA_RF_READER_14443_3_A_GATE:
+               *skb_push(skb, 1) = 0x1a;       /* CTR, see spec:10.2.2.1 */
+
+               return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
+                                             ST21NFCA_WR_XCHG_DATA, skb->data,
+                                             skb->len, cb, cb_context);
+       case ST21NFCA_RF_READER_ISO15693_GATE:
+               info->async_cb_type = ST21NFCA_CB_TYPE_READER_ISO15693;
+               info->async_cb = cb;
+               info->async_cb_context = cb_context;
+
+               *skb_push(skb, 1) = 0x17;
+
+               return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
+                                             ST21NFCA_WR_XCHG_DATA, skb->data,
+                                             skb->len,
+                                             st21nfca_hci_data_exchange_cb,
+                                             info);
+               break;
+       default:
+               return 1;
+       }
+}
+
+static int st21nfca_hci_check_presence(struct nfc_hci_dev *hdev,
+                                      struct nfc_target *target)
+{
+       u8 fwi = 0x11;
+       switch (target->hci_reader_gate) {
+       case NFC_HCI_RF_READER_A_GATE:
+       case NFC_HCI_RF_READER_B_GATE:
+               /*
+                * PRESENCE_CHECK on those gates is available
+                * However, the answer to this command is taking 3 * fwi
+                * if the card is no present.
+                * Instead, we send an empty I-Frame with a very short
+                * configurable fwi ~604µs.
+                */
+               return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
+                                       ST21NFCA_WR_XCHG_DATA, &fwi, 1, NULL);
+       case ST21NFCA_RF_READER_14443_3_A_GATE:
+               return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
+                                       ST21NFCA_RF_READER_CMD_PRESENCE_CHECK,
+                                       NULL, 0, NULL);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static struct nfc_hci_ops st21nfca_hci_ops = {
+       .open = st21nfca_hci_open,
+       .close = st21nfca_hci_close,
+       .load_session = st21nfca_hci_load_session,
+       .hci_ready = st21nfca_hci_ready,
+       .xmit = st21nfca_hci_xmit,
+       .start_poll = st21nfca_hci_start_poll,
+       .target_from_gate = st21nfca_hci_target_from_gate,
+       .im_transceive = st21nfca_hci_im_transceive,
+       .check_presence = st21nfca_hci_check_presence,
+};
+
+int st21nfca_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops,
+                      char *llc_name, int phy_headroom, int phy_tailroom,
+                      int phy_payload, struct nfc_hci_dev **hdev)
+{
+       struct st21nfca_hci_info *info;
+       int r = 0;
+       int dev_num;
+       u32 protocols;
+       struct nfc_hci_init_data init_data;
+       unsigned long quirks = 0;
+
+       info = kzalloc(sizeof(struct st21nfca_hci_info), GFP_KERNEL);
+       if (!info) {
+               r = -ENOMEM;
+               goto err_alloc_hdev;
+       }
+
+       info->phy_ops = phy_ops;
+       info->phy_id = phy_id;
+       info->state = ST21NFCA_ST_COLD;
+       mutex_init(&info->info_lock);
+
+       init_data.gate_count = ARRAY_SIZE(st21nfca_gates);
+
+       memcpy(init_data.gates, st21nfca_gates, sizeof(st21nfca_gates));
+
+       /*
+        * Session id must include the driver name + i2c bus addr
+        * persistent info to discriminate 2 identical chips
+        */
+       dev_num = find_first_zero_bit(dev_mask, ST21NFCA_NUM_DEVICES);
+       if (dev_num >= ST21NFCA_NUM_DEVICES)
+               goto err_alloc_hdev;
+
+       scnprintf(init_data.session_id, sizeof(init_data.session_id), "%s%2x",
+                 "ST21AH", dev_num);
+
+       protocols = NFC_PROTO_JEWEL_MASK |
+           NFC_PROTO_MIFARE_MASK |
+           NFC_PROTO_FELICA_MASK |
+           NFC_PROTO_ISO14443_MASK |
+           NFC_PROTO_ISO14443_B_MASK |
+           NFC_PROTO_ISO15693_MASK;
+
+       set_bit(NFC_HCI_QUIRK_SHORT_CLEAR, &quirks);
+
+       info->hdev =
+           nfc_hci_allocate_device(&st21nfca_hci_ops, &init_data, quirks,
+                                   protocols, llc_name,
+                                   phy_headroom + ST21NFCA_CMDS_HEADROOM,
+                                   phy_tailroom, phy_payload);
+
+       if (!info->hdev) {
+               pr_err("Cannot allocate nfc hdev.\n");
+               r = -ENOMEM;
+               goto err_alloc_hdev;
+       }
+
+       nfc_hci_set_clientdata(info->hdev, info);
+
+       r = nfc_hci_register_device(info->hdev);
+       if (r)
+               goto err_regdev;
+
+       *hdev = info->hdev;
+
+       return 0;
+
+err_regdev:
+       nfc_hci_free_device(info->hdev);
+
+err_alloc_hdev:
+       kfree(info);
+
+       return r;
+}
+EXPORT_SYMBOL(st21nfca_hci_probe);
+
+void st21nfca_hci_remove(struct nfc_hci_dev *hdev)
+{
+       struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+       nfc_hci_unregister_device(hdev);
+       nfc_hci_free_device(hdev);
+       kfree(info);
+}
+EXPORT_SYMBOL(st21nfca_hci_remove);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/st21nfca/st21nfca.h b/drivers/nfc/st21nfca/st21nfca.h
new file mode 100644 (file)
index 0000000..334cd90
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2014  STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __LOCAL_ST21NFCA_H_
+#define __LOCAL_ST21NFCA_H_
+
+#include <net/nfc/hci.h>
+
+#define HCI_MODE 0
+
+/* framing in HCI mode */
+#define ST21NFCA_SOF_EOF_LEN    2
+
+/* Almost every time value is 0 */
+#define ST21NFCA_HCI_LLC_LEN    1
+
+/* Size in worst case :
+ * In normal case CRC len = 2 but byte stuffing
+ * may appear in case one CRC byte = ST21NFCA_SOF_EOF
+ */
+#define ST21NFCA_HCI_LLC_CRC    4
+
+#define ST21NFCA_HCI_LLC_LEN_CRC        (ST21NFCA_SOF_EOF_LEN + \
+                                               ST21NFCA_HCI_LLC_LEN + \
+                                               ST21NFCA_HCI_LLC_CRC)
+#define ST21NFCA_HCI_LLC_MIN_SIZE       (1 + ST21NFCA_HCI_LLC_LEN_CRC)
+
+/* Worst case when adding byte stuffing between each byte */
+#define ST21NFCA_HCI_LLC_MAX_PAYLOAD    29
+#define ST21NFCA_HCI_LLC_MAX_SIZE       (ST21NFCA_HCI_LLC_LEN_CRC + 1 + \
+                                       ST21NFCA_HCI_LLC_MAX_PAYLOAD)
+
+#define DRIVER_DESC "HCI NFC driver for ST21NFCA"
+
+#define ST21NFCA_HCI_MODE 0
+
+#define ST21NFCA_NUM_DEVICES 256
+
+int st21nfca_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops,
+                      char *llc_name, int phy_headroom, int phy_tailroom,
+                      int phy_payload, struct nfc_hci_dev **hdev);
+void st21nfca_hci_remove(struct nfc_hci_dev *hdev);
+
+enum st21nfca_state {
+       ST21NFCA_ST_COLD,
+       ST21NFCA_ST_READY,
+};
+
+struct st21nfca_hci_info {
+       struct nfc_phy_ops *phy_ops;
+       void *phy_id;
+
+       struct nfc_hci_dev *hdev;
+
+       enum st21nfca_state state;
+
+       struct mutex info_lock;
+
+       int async_cb_type;
+       data_exchange_cb_t async_cb;
+       void *async_cb_context;
+
+} __packed;
+
+/* Reader RF commands */
+#define ST21NFCA_WR_XCHG_DATA            0x10
+
+#define ST21NFCA_RF_READER_F_GATE               0x14
+#define ST21NFCA_RF_READER_F_DATARATE 0x01
+#define ST21NFCA_RF_READER_F_DATARATE_106 0x01
+#define ST21NFCA_RF_READER_F_DATARATE_212 0x02
+#define ST21NFCA_RF_READER_F_DATARATE_424 0x04
+
+#endif /* __LOCAL_ST21NFCA_H_ */
index d9babe986473f11de9a4c62eabac003a3c313e02..efb36593ecb44bf9d81fc804e9c222b46d726115 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/device.h>
 #include <linux/netdevice.h>
 #include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
 #include <linux/nfc.h>
 #include <linux/skbuff.h>
 #include <linux/delay.h>
  * only the SRX bit set, it means that all of the data has been received
  * (once what's in the fifo has been read).  However, depending on timing
  * an interrupt status with only the SRX bit set may not be recived.  In
- * those cases, the timeout mechanism is used to wait 5 ms in case more
- * data arrives.  After 5 ms, it is assumed that all of the data has been
+ * those cases, the timeout mechanism is used to wait 20 ms in case more
+ * data arrives.  After 20 ms, it is assumed that all of the data has been
  * received and the accumulated rx data is sent upstream.  The
  * 'TRF7970A_ST_WAIT_FOR_RX_DATA_CONT' state is used for this purpose
  * (i.e., it indicates that some data has been received but we're not sure
  * if there is more coming so a timeout in this state means all data has
- * been received and there isn't an error).  The delay is 5 ms since delays
- * over 2 ms have been observed during testing (a little extra just in case).
+ * been received and there isn't an error).  The delay is 20 ms since delays
+ * of ~16 ms have been observed during testing.
  *
  * Type 2 write and sector select commands respond with a 4-bit ACK or NACK.
  * Having only 4 bits in the FIFO won't normally generate an interrupt so
 
 #define TRF7970A_SUPPORTED_PROTOCOLS \
                (NFC_PROTO_MIFARE_MASK | NFC_PROTO_ISO14443_MASK |      \
-                NFC_PROTO_ISO15693_MASK)
+                NFC_PROTO_ISO14443_B_MASK | NFC_PROTO_ISO15693_MASK)
+
+#define TRF7970A_AUTOSUSPEND_DELAY             30000 /* 30 seconds */
 
 /* TX data must be prefixed with a FIFO reset cmd, a cmd that depends
  * on what the current framing is, the address of the TX length byte 1
 /* TX length is 3 nibbles long ==> 4KB - 1 bytes max */
 #define TRF7970A_TX_MAX                                (4096 - 1)
 
-#define TRF7970A_WAIT_FOR_RX_DATA_TIMEOUT      5
+#define TRF7970A_WAIT_FOR_RX_DATA_TIMEOUT      20
 #define TRF7970A_WAIT_FOR_FIFO_DRAIN_TIMEOUT   3
 #define TRF7970A_WAIT_TO_ISSUE_ISO15693_EOF    20
 
@@ -330,13 +333,15 @@ struct trf7970a {
        struct regulator                *regulator;
        struct nfc_digital_dev          *ddev;
        u32                             quirks;
-       bool                            powering_up;
        bool                            aborting;
        struct sk_buff                  *tx_skb;
        struct sk_buff                  *rx_skb;
        nfc_digital_cmd_complete_t      cb;
        void                            *cb_arg;
+       u8                              chip_status_ctrl;
        u8                              iso_ctrl;
+       u8                              iso_ctrl_tech;
+       u8                              modulator_sys_clk_ctrl;
        u8                              special_fcn_reg1;
        int                             technology;
        int                             framing;
@@ -681,7 +686,9 @@ static irqreturn_t trf7970a_irq(int irq, void *dev_id)
                        trf->ignore_timeout =
                                !cancel_delayed_work(&trf->timeout_work);
                        trf7970a_drain_fifo(trf, status);
-               } else if (!(status & TRF7970A_IRQ_STATUS_TX)) {
+               } else if (status == TRF7970A_IRQ_STATUS_TX) {
+                       trf7970a_cmd(trf, TRF7970A_CMD_FIFO_RESET);
+               } else {
                        trf7970a_send_err_upstream(trf, -EIO);
                }
                break;
@@ -757,8 +764,8 @@ static int trf7970a_init(struct trf7970a *trf)
        if (ret)
                goto err_out;
 
-       ret = trf7970a_write(trf, TRF7970A_MODULATOR_SYS_CLK_CTRL,
-                       TRF7970A_MODULATOR_DEPTH_OOK);
+       /* Must clear NFC Target Detection Level reg due to erratum */
+       ret = trf7970a_write(trf, TRF7970A_NFC_TARGET_LEVEL, 0);
        if (ret)
                goto err_out;
 
@@ -774,12 +781,7 @@ static int trf7970a_init(struct trf7970a *trf)
 
        trf->special_fcn_reg1 = 0;
 
-       ret = trf7970a_write(trf, TRF7970A_CHIP_STATUS_CTRL,
-                       TRF7970A_CHIP_STATUS_RF_ON |
-                               TRF7970A_CHIP_STATUS_VRS5_3);
-       if (ret)
-               goto err_out;
-
+       trf->iso_ctrl = 0xff;
        return 0;
 
 err_out:
@@ -791,53 +793,29 @@ static void trf7970a_switch_rf_off(struct trf7970a *trf)
 {
        dev_dbg(trf->dev, "Switching rf off\n");
 
-       gpio_set_value(trf->en_gpio, 0);
-       gpio_set_value(trf->en2_gpio, 0);
+       trf->chip_status_ctrl &= ~TRF7970A_CHIP_STATUS_RF_ON;
+
+       trf7970a_write(trf, TRF7970A_CHIP_STATUS_CTRL, trf->chip_status_ctrl);
 
        trf->aborting = false;
        trf->state = TRF7970A_ST_OFF;
+
+       pm_runtime_mark_last_busy(trf->dev);
+       pm_runtime_put_autosuspend(trf->dev);
 }
 
-static int trf7970a_switch_rf_on(struct trf7970a *trf)
+static void trf7970a_switch_rf_on(struct trf7970a *trf)
 {
-       unsigned long delay;
-       int ret;
-
        dev_dbg(trf->dev, "Switching rf on\n");
 
-       if (trf->powering_up)
-               usleep_range(5000, 6000);
-
-       gpio_set_value(trf->en2_gpio, 1);
-       usleep_range(1000, 2000);
-       gpio_set_value(trf->en_gpio, 1);
+       pm_runtime_get_sync(trf->dev);
 
-       /* The delay between enabling the trf7970a and issuing the first
-        * command is significantly longer the very first time after powering
-        * up.  Make sure the longer delay is only done the first time.
-        */
-       if (trf->powering_up) {
-               delay = 20000;
-               trf->powering_up = false;
-       } else {
-               delay = 5000;
-       }
-
-       usleep_range(delay, delay + 1000);
-
-       ret = trf7970a_init(trf);
-       if (ret)
-               trf7970a_switch_rf_off(trf);
-       else
-               trf->state = TRF7970A_ST_IDLE;
-
-       return ret;
+       trf->state = TRF7970A_ST_IDLE;
 }
 
 static int trf7970a_switch_rf(struct nfc_digital_dev *ddev, bool on)
 {
        struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
-       int ret = 0;
 
        dev_dbg(trf->dev, "Switching RF - state: %d, on: %d\n", trf->state, on);
 
@@ -846,7 +824,7 @@ static int trf7970a_switch_rf(struct nfc_digital_dev *ddev, bool on)
        if (on) {
                switch (trf->state) {
                case TRF7970A_ST_OFF:
-                       ret = trf7970a_switch_rf_on(trf);
+                       trf7970a_switch_rf_on(trf);
                        break;
                case TRF7970A_ST_IDLE:
                case TRF7970A_ST_IDLE_RX_BLOCKED:
@@ -871,7 +849,7 @@ static int trf7970a_switch_rf(struct nfc_digital_dev *ddev, bool on)
        }
 
        mutex_unlock(&trf->lock);
-       return ret;
+       return 0;
 }
 
 static int trf7970a_config_rf_tech(struct trf7970a *trf, int tech)
@@ -882,10 +860,16 @@ static int trf7970a_config_rf_tech(struct trf7970a *trf, int tech)
 
        switch (tech) {
        case NFC_DIGITAL_RF_TECH_106A:
-               trf->iso_ctrl = TRF7970A_ISO_CTRL_14443A_106;
+               trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_14443A_106;
+               trf->modulator_sys_clk_ctrl = TRF7970A_MODULATOR_DEPTH_OOK;
+               break;
+       case NFC_DIGITAL_RF_TECH_106B:
+               trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_14443B_106;
+               trf->modulator_sys_clk_ctrl = TRF7970A_MODULATOR_DEPTH_ASK10;
                break;
        case NFC_DIGITAL_RF_TECH_ISO15693:
-               trf->iso_ctrl = TRF7970A_ISO_CTRL_15693_SGL_1OF4_2648;
+               trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_15693_SGL_1OF4_2648;
+               trf->modulator_sys_clk_ctrl = TRF7970A_MODULATOR_DEPTH_OOK;
                break;
        default:
                dev_dbg(trf->dev, "Unsupported rf technology: %d\n", tech);
@@ -899,24 +883,29 @@ static int trf7970a_config_rf_tech(struct trf7970a *trf, int tech)
 
 static int trf7970a_config_framing(struct trf7970a *trf, int framing)
 {
+       u8 iso_ctrl = trf->iso_ctrl_tech;
+       int ret;
+
        dev_dbg(trf->dev, "framing: %d\n", framing);
 
        switch (framing) {
        case NFC_DIGITAL_FRAMING_NFCA_SHORT:
        case NFC_DIGITAL_FRAMING_NFCA_STANDARD:
                trf->tx_cmd = TRF7970A_CMD_TRANSMIT_NO_CRC;
-               trf->iso_ctrl |= TRF7970A_ISO_CTRL_RX_CRC_N;
+               iso_ctrl |= TRF7970A_ISO_CTRL_RX_CRC_N;
                break;
        case NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A:
        case NFC_DIGITAL_FRAMING_NFCA_T4T:
+       case NFC_DIGITAL_FRAMING_NFCB:
+       case NFC_DIGITAL_FRAMING_NFCB_T4T:
        case NFC_DIGITAL_FRAMING_ISO15693_INVENTORY:
        case NFC_DIGITAL_FRAMING_ISO15693_T5T:
                trf->tx_cmd = TRF7970A_CMD_TRANSMIT;
-               trf->iso_ctrl &= ~TRF7970A_ISO_CTRL_RX_CRC_N;
+               iso_ctrl &= ~TRF7970A_ISO_CTRL_RX_CRC_N;
                break;
        case NFC_DIGITAL_FRAMING_NFCA_T2T:
                trf->tx_cmd = TRF7970A_CMD_TRANSMIT;
-               trf->iso_ctrl |= TRF7970A_ISO_CTRL_RX_CRC_N;
+               iso_ctrl |= TRF7970A_ISO_CTRL_RX_CRC_N;
                break;
        default:
                dev_dbg(trf->dev, "Unsupported Framing: %d\n", framing);
@@ -925,24 +914,46 @@ static int trf7970a_config_framing(struct trf7970a *trf, int framing)
 
        trf->framing = framing;
 
-       return trf7970a_write(trf, TRF7970A_ISO_CTRL, trf->iso_ctrl);
+       if (iso_ctrl != trf->iso_ctrl) {
+               ret = trf7970a_write(trf, TRF7970A_ISO_CTRL, iso_ctrl);
+               if (ret)
+                       return ret;
+
+               trf->iso_ctrl = iso_ctrl;
+
+               ret = trf7970a_write(trf, TRF7970A_MODULATOR_SYS_CLK_CTRL,
+                               trf->modulator_sys_clk_ctrl);
+               if (ret)
+                       return ret;
+       }
+
+       if (!(trf->chip_status_ctrl & TRF7970A_CHIP_STATUS_RF_ON)) {
+               ret = trf7970a_write(trf, TRF7970A_CHIP_STATUS_CTRL,
+                               trf->chip_status_ctrl |
+                                       TRF7970A_CHIP_STATUS_RF_ON);
+               if (ret)
+                       return ret;
+
+               trf->chip_status_ctrl |= TRF7970A_CHIP_STATUS_RF_ON;
+
+               usleep_range(5000, 6000);
+       }
+
+       return 0;
 }
 
 static int trf7970a_in_configure_hw(struct nfc_digital_dev *ddev, int type,
                int param)
 {
        struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
-       int ret = 0;
+       int ret;
 
        dev_dbg(trf->dev, "Configure hw - type: %d, param: %d\n", type, param);
 
        mutex_lock(&trf->lock);
 
-       if (trf->state == TRF7970A_ST_OFF) {
-               ret = trf7970a_switch_rf_on(trf);
-               if (ret)
-                       goto err_out;
-       }
+       if (trf->state == TRF7970A_ST_OFF)
+               trf7970a_switch_rf_on(trf);
 
        switch (type) {
        case NFC_DIGITAL_CONFIG_RF_TECH:
@@ -956,7 +967,6 @@ static int trf7970a_in_configure_hw(struct nfc_digital_dev *ddev, int type,
                ret = -EINVAL;
        }
 
-err_out:
        mutex_unlock(&trf->lock);
        return ret;
 }
@@ -1191,7 +1201,18 @@ static void trf7970a_abort_cmd(struct nfc_digital_dev *ddev)
        dev_dbg(trf->dev, "Abort process initiated\n");
 
        mutex_lock(&trf->lock);
-       trf->aborting = true;
+
+       switch (trf->state) {
+       case TRF7970A_ST_WAIT_FOR_TX_FIFO:
+       case TRF7970A_ST_WAIT_FOR_RX_DATA:
+       case TRF7970A_ST_WAIT_FOR_RX_DATA_CONT:
+       case TRF7970A_ST_WAIT_TO_ISSUE_EOF:
+               trf->aborting = true;
+               break;
+       default:
+               break;
+       }
+
        mutex_unlock(&trf->lock);
 }
 
@@ -1206,12 +1227,25 @@ static struct nfc_digital_ops trf7970a_nfc_ops = {
        .abort_cmd              = trf7970a_abort_cmd,
 };
 
+static int trf7970a_get_autosuspend_delay(struct device_node *np)
+{
+       int autosuspend_delay, ret;
+
+       ret = of_property_read_u32(np, "autosuspend-delay", &autosuspend_delay);
+       if (ret)
+               autosuspend_delay = TRF7970A_AUTOSUSPEND_DELAY;
+
+       of_node_put(np);
+
+       return autosuspend_delay;
+}
+
 static int trf7970a_probe(struct spi_device *spi)
 {
        struct device_node *np = spi->dev.of_node;
        const struct spi_device_id *id = spi_get_device_id(spi);
        struct trf7970a *trf;
-       int ret;
+       int uvolts, autosuspend_delay, ret;
 
        if (!np) {
                dev_err(&spi->dev, "No Device Tree entry\n");
@@ -1281,7 +1315,10 @@ static int trf7970a_probe(struct spi_device *spi)
                goto err_destroy_lock;
        }
 
-       trf->powering_up = true;
+       uvolts = regulator_get_voltage(trf->regulator);
+
+       if (uvolts > 4000000)
+               trf->chip_status_ctrl = TRF7970A_CHIP_STATUS_VRS5_3;
 
        trf->ddev = nfc_digital_allocate_device(&trf7970a_nfc_ops,
                        TRF7970A_SUPPORTED_PROTOCOLS,
@@ -1297,6 +1334,12 @@ static int trf7970a_probe(struct spi_device *spi)
        nfc_digital_set_drvdata(trf->ddev, trf);
        spi_set_drvdata(spi, trf);
 
+       autosuspend_delay = trf7970a_get_autosuspend_delay(np);
+
+       pm_runtime_set_autosuspend_delay(trf->dev, autosuspend_delay);
+       pm_runtime_use_autosuspend(trf->dev);
+       pm_runtime_enable(trf->dev);
+
        ret = nfc_digital_register_device(trf->ddev);
        if (ret) {
                dev_err(trf->dev, "Can't register NFC digital device: %d\n",
@@ -1307,6 +1350,7 @@ static int trf7970a_probe(struct spi_device *spi)
        return 0;
 
 err_free_ddev:
+       pm_runtime_disable(trf->dev);
        nfc_digital_free_device(trf->ddev);
 err_disable_regulator:
        regulator_disable(trf->regulator);
@@ -1321,15 +1365,16 @@ static int trf7970a_remove(struct spi_device *spi)
 
        mutex_lock(&trf->lock);
 
-       trf7970a_switch_rf_off(trf);
-       trf7970a_init(trf);
-
        switch (trf->state) {
        case TRF7970A_ST_WAIT_FOR_TX_FIFO:
        case TRF7970A_ST_WAIT_FOR_RX_DATA:
        case TRF7970A_ST_WAIT_FOR_RX_DATA_CONT:
        case TRF7970A_ST_WAIT_TO_ISSUE_EOF:
                trf7970a_send_err_upstream(trf, -ECANCELED);
+               /* FALLTHROUGH */
+       case TRF7970A_ST_IDLE:
+       case TRF7970A_ST_IDLE_RX_BLOCKED:
+               pm_runtime_put_sync(trf->dev);
                break;
        default:
                break;
@@ -1337,6 +1382,8 @@ static int trf7970a_remove(struct spi_device *spi)
 
        mutex_unlock(&trf->lock);
 
+       pm_runtime_disable(trf->dev);
+
        nfc_digital_unregister_device(trf->ddev);
        nfc_digital_free_device(trf->ddev);
 
@@ -1347,6 +1394,70 @@ static int trf7970a_remove(struct spi_device *spi)
        return 0;
 }
 
+#ifdef CONFIG_PM_RUNTIME
+static int trf7970a_pm_runtime_suspend(struct device *dev)
+{
+       struct spi_device *spi = container_of(dev, struct spi_device, dev);
+       struct trf7970a *trf = spi_get_drvdata(spi);
+       int ret;
+
+       dev_dbg(dev, "Runtime suspend\n");
+
+       if (trf->state != TRF7970A_ST_OFF) {
+               dev_dbg(dev, "Can't suspend - not in OFF state (%d)\n",
+                               trf->state);
+               return -EBUSY;
+       }
+
+       gpio_set_value(trf->en_gpio, 0);
+       gpio_set_value(trf->en2_gpio, 0);
+
+       ret = regulator_disable(trf->regulator);
+       if (ret)
+               dev_err(dev, "%s - Can't disable VIN: %d\n", __func__, ret);
+
+       return ret;
+}
+
+static int trf7970a_pm_runtime_resume(struct device *dev)
+{
+       struct spi_device *spi = container_of(dev, struct spi_device, dev);
+       struct trf7970a *trf = spi_get_drvdata(spi);
+       int ret;
+
+       dev_dbg(dev, "Runtime resume\n");
+
+       ret = regulator_enable(trf->regulator);
+       if (ret) {
+               dev_err(dev, "%s - Can't enable VIN: %d\n", __func__, ret);
+               return ret;
+       }
+
+       usleep_range(5000, 6000);
+
+       gpio_set_value(trf->en2_gpio, 1);
+       usleep_range(1000, 2000);
+       gpio_set_value(trf->en_gpio, 1);
+
+       usleep_range(20000, 21000);
+
+       ret = trf7970a_init(trf);
+       if (ret) {
+               dev_err(dev, "%s - Can't initialize: %d\n", __func__, ret);
+               return ret;
+       }
+
+       pm_runtime_mark_last_busy(dev);
+
+       return 0;
+}
+#endif
+
+static const struct dev_pm_ops trf7970a_pm_ops = {
+       SET_RUNTIME_PM_OPS(trf7970a_pm_runtime_suspend,
+                       trf7970a_pm_runtime_resume, NULL)
+};
+
 static const struct spi_device_id trf7970a_id_table[] = {
        { "trf7970a", TRF7970A_QUIRK_IRQ_STATUS_READ_ERRATA },
        { }
@@ -1360,6 +1471,7 @@ static struct spi_driver trf7970a_spi_driver = {
        .driver         = {
                .name   = "trf7970a",
                .owner  = THIS_MODULE,
+               .pm     = &trf7970a_pm_ops,
        },
 };
 
index 6d4ee22708c93791d53860c0243d6a9258678283..32e969d9531909e575a37b80fb0debd3f4ae73d2 100644 (file)
@@ -1831,6 +1831,10 @@ int of_update_property(struct device_node *np, struct property *newprop)
        if (!found)
                return -ENODEV;
 
+       /* At early boot, bail out and defer setup to of_init() */
+       if (!of_kset)
+               return found ? 0 : -ENODEV;
+
        /* Update the sysfs attribute */
        sysfs_remove_bin_file(&np->kobj, &oldprop->attr);
        __of_add_property_sysfs(np, newprop);
index 9bcf2cf19357837dcb6dd8e5cce637150a7a32d0..5aeb89411350a4c98a2d769302e16a8d650d075f 100644 (file)
@@ -364,7 +364,7 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
 
                memset(r, 0, sizeof(*r));
                /*
-                * Get optional "interrupts-names" property to add a name
+                * Get optional "interrupt-names" property to add a name
                 * to the resource.
                 */
                of_property_read_string_index(dev, "interrupt-names", index,
@@ -379,6 +379,32 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
 }
 EXPORT_SYMBOL_GPL(of_irq_to_resource);
 
+/**
+ * of_irq_get - Decode a node's IRQ and return it as a Linux irq number
+ * @dev: pointer to device tree node
+ * @index: zero-based index of the irq
+ *
+ * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
+ * is not yet created.
+ *
+ */
+int of_irq_get(struct device_node *dev, int index)
+{
+       int rc;
+       struct of_phandle_args oirq;
+       struct irq_domain *domain;
+
+       rc = of_irq_parse_one(dev, index, &oirq);
+       if (rc)
+               return rc;
+
+       domain = irq_find_host(oirq.np);
+       if (!domain)
+               return -EPROBE_DEFER;
+
+       return irq_create_of_mapping(&oirq);
+}
+
 /**
  * of_irq_count - Count the number of IRQs a node uses
  * @dev: pointer to device tree node
index 9a95831bd065c2ba1c5af83f6a73927a3b9d8181..2fe922bfade8c04cd2bcae91a0fb8960ace41cf0 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/netdevice.h>
 #include <linux/err.h>
 #include <linux/phy.h>
+#include <linux/phy_fixed.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
 #include <linux/of_mdio.h>
 MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
 MODULE_LICENSE("GPL");
 
-static void of_set_phy_supported(struct phy_device *phydev, u32 max_speed)
-{
-       /* The default values for phydev->supported are provided by the PHY
-        * driver "features" member, we want to reset to sane defaults fist
-        * before supporting higher speeds.
-        */
-       phydev->supported &= PHY_DEFAULT_FEATURES;
-
-       switch (max_speed) {
-       default:
-               return;
-
-       case SPEED_1000:
-               phydev->supported |= PHY_1000BT_FEATURES;
-       case SPEED_100:
-               phydev->supported |= PHY_100BT_FEATURES;
-       case SPEED_10:
-               phydev->supported |= PHY_10BT_FEATURES;
-       }
-}
-
 /* Extract the clause 22 phy ID from the compatible string of the form
  * ethernet-phy-idAAAA.BBBB */
 static int of_get_phy_id(struct device_node *device, u32 *phy_id)
@@ -66,7 +46,6 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi
        struct phy_device *phy;
        bool is_c45;
        int rc;
-       u32 max_speed = 0;
        u32 phy_id;
 
        is_c45 = of_device_is_compatible(child,
@@ -103,17 +82,33 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi
                return 1;
        }
 
-       /* Set phydev->supported based on the "max-speed" property
-        * if present */
-       if (!of_property_read_u32(child, "max-speed", &max_speed))
-               of_set_phy_supported(phy, max_speed);
-
        dev_dbg(&mdio->dev, "registered phy %s at address %i\n",
                child->name, addr);
 
        return 0;
 }
 
+static int of_mdio_parse_addr(struct device *dev, const struct device_node *np)
+{
+       u32 addr;
+       int ret;
+
+       ret = of_property_read_u32(np, "reg", &addr);
+       if (ret < 0) {
+               dev_err(dev, "%s has invalid PHY address\n", np->full_name);
+               return ret;
+       }
+
+       /* A PHY must have a reg property in the range [0-31] */
+       if (addr >= PHY_MAX_ADDR) {
+               dev_err(dev, "%s PHY address %i is too large\n",
+                       np->full_name, addr);
+               return -EINVAL;
+       }
+
+       return addr;
+}
+
 /**
  * of_mdiobus_register - Register mii_bus and create PHYs from the device tree
  * @mdio: pointer to mii_bus structure
@@ -128,7 +123,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
        const __be32 *paddr;
        u32 addr;
        bool scanphys = false;
-       int rc, i, len;
+       int rc, i;
 
        /* Mask out all PHYs from auto probing.  Instead the PHYs listed in
         * the device tree are populated after the bus has been registered */
@@ -148,19 +143,9 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
 
        /* Loop over the child nodes and register a phy_device for each one */
        for_each_available_child_of_node(np, child) {
-               /* A PHY must have a reg property in the range [0-31] */
-               paddr = of_get_property(child, "reg", &len);
-               if (!paddr || len < sizeof(*paddr)) {
+               addr = of_mdio_parse_addr(&mdio->dev, child);
+               if (addr < 0) {
                        scanphys = true;
-                       dev_err(&mdio->dev, "%s has invalid PHY address\n",
-                               child->full_name);
-                       continue;
-               }
-
-               addr = be32_to_cpup(paddr);
-               if (addr >= PHY_MAX_ADDR) {
-                       dev_err(&mdio->dev, "%s PHY address %i is too large\n",
-                               child->full_name, addr);
                        continue;
                }
 
@@ -175,7 +160,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
        /* auto scan for PHYs with empty reg property */
        for_each_available_child_of_node(np, child) {
                /* Skip PHYs with reg property set */
-               paddr = of_get_property(child, "reg", &len);
+               paddr = of_get_property(child, "reg", NULL);
                if (paddr)
                        continue;
 
@@ -198,6 +183,39 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
 }
 EXPORT_SYMBOL(of_mdiobus_register);
 
+/**
+ * of_mdiobus_link_phydev - Find a device node for a phy
+ * @mdio: pointer to mii_bus structure
+ * @phydev: phydev for which the of_node pointer should be set
+ *
+ * Walk the list of subnodes of a mdio bus and look for a node that matches the
+ * phy's address with its 'reg' property. If found, set the of_node pointer for
+ * the phy. This allows auto-probed pyh devices to be supplied with information
+ * passed in via DT.
+ */
+void of_mdiobus_link_phydev(struct mii_bus *mdio,
+                           struct phy_device *phydev)
+{
+       struct device *dev = &phydev->dev;
+       struct device_node *child;
+
+       if (dev->of_node || !mdio->dev.of_node)
+               return;
+
+       for_each_available_child_of_node(mdio->dev.of_node, child) {
+               int addr;
+
+               addr = of_mdio_parse_addr(&mdio->dev, child);
+               if (addr < 0)
+                       continue;
+
+               if (addr == phydev->addr) {
+                       dev->of_node = child;
+                       return;
+               }
+       }
+}
+
 /* Helper function for of_phy_find_device */
 static int of_phy_match(struct device *dev, void *phy_np)
 {
@@ -244,44 +262,6 @@ struct phy_device *of_phy_connect(struct net_device *dev,
 }
 EXPORT_SYMBOL(of_phy_connect);
 
-/**
- * of_phy_connect_fixed_link - Parse fixed-link property and return a dummy phy
- * @dev: pointer to net_device claiming the phy
- * @hndlr: Link state callback for the network device
- * @iface: PHY data interface type
- *
- * This function is a temporary stop-gap and will be removed soon.  It is
- * only to support the fs_enet, ucc_geth and gianfar Ethernet drivers.  Do
- * not call this function from new drivers.
- */
-struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
-                                            void (*hndlr)(struct net_device *),
-                                            phy_interface_t iface)
-{
-       struct device_node *net_np;
-       char bus_id[MII_BUS_ID_SIZE + 3];
-       struct phy_device *phy;
-       const __be32 *phy_id;
-       int sz;
-
-       if (!dev->dev.parent)
-               return NULL;
-
-       net_np = dev->dev.parent->of_node;
-       if (!net_np)
-               return NULL;
-
-       phy_id = of_get_property(net_np, "fixed-link", &sz);
-       if (!phy_id || sz < sizeof(*phy_id))
-               return NULL;
-
-       sprintf(bus_id, PHY_ID_FMT, "fixed-0", be32_to_cpu(phy_id[0]));
-
-       phy = phy_connect(dev, bus_id, hndlr, iface);
-       return IS_ERR(phy) ? NULL : phy;
-}
-EXPORT_SYMBOL(of_phy_connect_fixed_link);
-
 /**
  * of_phy_attach - Attach to a PHY without starting the state machine
  * @dev: pointer to net_device claiming the phy
@@ -301,3 +281,69 @@ struct phy_device *of_phy_attach(struct net_device *dev,
        return phy_attach_direct(dev, phy, flags, iface) ? NULL : phy;
 }
 EXPORT_SYMBOL(of_phy_attach);
+
+#if defined(CONFIG_FIXED_PHY)
+/*
+ * of_phy_is_fixed_link() and of_phy_register_fixed_link() must
+ * support two DT bindings:
+ * - the old DT binding, where 'fixed-link' was a property with 5
+ *   cells encoding various informations about the fixed PHY
+ * - the new DT binding, where 'fixed-link' is a sub-node of the
+ *   Ethernet device.
+ */
+bool of_phy_is_fixed_link(struct device_node *np)
+{
+       struct device_node *dn;
+       int len;
+
+       /* New binding */
+       dn = of_get_child_by_name(np, "fixed-link");
+       if (dn) {
+               of_node_put(dn);
+               return true;
+       }
+
+       /* Old binding */
+       if (of_get_property(np, "fixed-link", &len) &&
+           len == (5 * sizeof(__be32)))
+               return true;
+
+       return false;
+}
+EXPORT_SYMBOL(of_phy_is_fixed_link);
+
+int of_phy_register_fixed_link(struct device_node *np)
+{
+       struct fixed_phy_status status = {};
+       struct device_node *fixed_link_node;
+       const __be32 *fixed_link_prop;
+       int len;
+
+       /* New binding */
+       fixed_link_node = of_get_child_by_name(np, "fixed-link");
+       if (fixed_link_node) {
+               status.link = 1;
+               status.duplex = of_property_read_bool(np, "full-duplex");
+               if (of_property_read_u32(fixed_link_node, "speed", &status.speed))
+                       return -EINVAL;
+               status.pause = of_property_read_bool(np, "pause");
+               status.asym_pause = of_property_read_bool(np, "asym-pause");
+               of_node_put(fixed_link_node);
+               return fixed_phy_register(PHY_POLL, &status, np);
+       }
+
+       /* Old binding */
+       fixed_link_prop = of_get_property(np, "fixed-link", &len);
+       if (fixed_link_prop && len == (5 * sizeof(__be32))) {
+               status.link = 1;
+               status.duplex = be32_to_cpu(fixed_link_prop[1]);
+               status.speed = be32_to_cpu(fixed_link_prop[2]);
+               status.pause = be32_to_cpu(fixed_link_prop[3]);
+               status.asym_pause = be32_to_cpu(fixed_link_prop[4]);
+               return fixed_phy_register(PHY_POLL, &status, np);
+       }
+
+       return -ENODEV;
+}
+EXPORT_SYMBOL(of_phy_register_fixed_link);
+#endif
index 404d1daebefa7d7a5d02cbfb611e9585077c00b0..bd47fbc53dc96258fba942d073ee320f25e5627f 100644 (file)
@@ -168,7 +168,9 @@ struct platform_device *of_device_alloc(struct device_node *np,
                        rc = of_address_to_resource(np, i, res);
                        WARN_ON(rc);
                }
-               WARN_ON(of_irq_to_resource_table(np, res, num_irq) != num_irq);
+               if (of_irq_to_resource_table(np, res, num_irq) != num_irq)
+                       pr_debug("not all legacy IRQ resources mapped for %s\n",
+                                np->name);
        }
 
        dev->dev.of_node = of_node_get(np);
index ae4450070503f1067579f02576ce5ec14b2a7192..fe70b86bcffb9d086edd51c758a17a60ca45cf7b 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
+#include <linux/of_platform.h>
 #include <linux/list.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
@@ -427,6 +428,36 @@ static void __init of_selftest_match_node(void)
        }
 }
 
+static void __init of_selftest_platform_populate(void)
+{
+       int irq;
+       struct device_node *np;
+       struct platform_device *pdev;
+
+       np = of_find_node_by_path("/testcase-data");
+       of_platform_populate(np, of_default_bus_match_table, NULL, NULL);
+
+       /* Test that a missing irq domain returns -EPROBE_DEFER */
+       np = of_find_node_by_path("/testcase-data/testcase-device1");
+       pdev = of_find_device_by_node(np);
+       if (!pdev)
+               selftest(0, "device 1 creation failed\n");
+       irq = platform_get_irq(pdev, 0);
+       if (irq != -EPROBE_DEFER)
+               selftest(0, "device deferred probe failed - %d\n", irq);
+
+       /* Test that a parsing failure does not return -EPROBE_DEFER */
+       np = of_find_node_by_path("/testcase-data/testcase-device2");
+       pdev = of_find_device_by_node(np);
+       if (!pdev)
+               selftest(0, "device 2 creation failed\n");
+       irq = platform_get_irq(pdev, 0);
+       if (irq >= 0 || irq == -EPROBE_DEFER)
+               selftest(0, "device parsing error failed - %d\n", irq);
+
+       selftest(1, "passed");
+}
+
 static int __init of_selftest(void)
 {
        struct device_node *np;
@@ -445,6 +476,7 @@ static int __init of_selftest(void)
        of_selftest_parse_interrupts();
        of_selftest_parse_interrupts_extended();
        of_selftest_match_node();
+       of_selftest_platform_populate();
        pr_info("end of selftest - %i passed, %i failed\n",
                selftest_results.passed, selftest_results.failed);
        return 0;
index c843720bd3e53d7e7f632f4c75152edbefbc7d5f..da4695f60351ad4c7c24aecf791a5a6c2b71edfc 100644 (file)
                                                      <&test_intmap1 1 2>;
                        };
                };
+
+               testcase-device1 {
+                       compatible = "testcase-device";
+                       interrupt-parent = <&test_intc0>;
+                       interrupts = <1>;
+               };
+
+               testcase-device2 {
+                       compatible = "testcase-device";
+                       interrupt-parent = <&test_intc2>;
+                       interrupts = <1>; /* invalid specifier - too short */
+               };
        };
+
 };
index d3d1cfd51e095f058404d96f063df76d227bd652..e384e2534594731a95eb7524fb75583fce208541 100644 (file)
@@ -293,6 +293,58 @@ static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port,
        return PCIBIOS_SUCCESSFUL;
 }
 
+/*
+ * Remove windows, starting from the largest ones to the smallest
+ * ones.
+ */
+static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port,
+                                  phys_addr_t base, size_t size)
+{
+       while (size) {
+               size_t sz = 1 << (fls(size) - 1);
+
+               mvebu_mbus_del_window(base, sz);
+               base += sz;
+               size -= sz;
+       }
+}
+
+/*
+ * MBus windows can only have a power of two size, but PCI BARs do not
+ * have this constraint. Therefore, we have to split the PCI BAR into
+ * areas each having a power of two size. We start from the largest
+ * one (i.e highest order bit set in the size).
+ */
+static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
+                                  unsigned int target, unsigned int attribute,
+                                  phys_addr_t base, size_t size,
+                                  phys_addr_t remap)
+{
+       size_t size_mapped = 0;
+
+       while (size) {
+               size_t sz = 1 << (fls(size) - 1);
+               int ret;
+
+               ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base,
+                                                       sz, remap);
+               if (ret) {
+                       dev_err(&port->pcie->pdev->dev,
+                               "Could not create MBus window at 0x%x, size 0x%x: %d\n",
+                               base, sz, ret);
+                       mvebu_pcie_del_windows(port, base - size_mapped,
+                                              size_mapped);
+                       return;
+               }
+
+               size -= sz;
+               size_mapped += sz;
+               base += sz;
+               if (remap != MVEBU_MBUS_NO_REMAP)
+                       remap += sz;
+       }
+}
+
 static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
 {
        phys_addr_t iobase;
@@ -304,8 +356,8 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
 
                /* If a window was configured, remove it */
                if (port->iowin_base) {
-                       mvebu_mbus_del_window(port->iowin_base,
-                                             port->iowin_size);
+                       mvebu_pcie_del_windows(port, port->iowin_base,
+                                              port->iowin_size);
                        port->iowin_base = 0;
                        port->iowin_size = 0;
                }
@@ -331,11 +383,11 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
        port->iowin_base = port->pcie->io.start + iobase;
        port->iowin_size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) |
                            (port->bridge.iolimitupper << 16)) -
-                           iobase);
+                           iobase) + 1;
 
-       mvebu_mbus_add_window_remap_by_id(port->io_target, port->io_attr,
-                                         port->iowin_base, port->iowin_size,
-                                         iobase);
+       mvebu_pcie_add_windows(port, port->io_target, port->io_attr,
+                              port->iowin_base, port->iowin_size,
+                              iobase);
 }
 
 static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
@@ -346,8 +398,8 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
 
                /* If a window was configured, remove it */
                if (port->memwin_base) {
-                       mvebu_mbus_del_window(port->memwin_base,
-                                             port->memwin_size);
+                       mvebu_pcie_del_windows(port, port->memwin_base,
+                                              port->memwin_size);
                        port->memwin_base = 0;
                        port->memwin_size = 0;
                }
@@ -364,10 +416,11 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
        port->memwin_base  = ((port->bridge.membase & 0xFFF0) << 16);
        port->memwin_size  =
                (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) -
-               port->memwin_base;
+               port->memwin_base + 1;
 
-       mvebu_mbus_add_window_by_id(port->mem_target, port->mem_attr,
-                                   port->memwin_base, port->memwin_size);
+       mvebu_pcie_add_windows(port, port->mem_target, port->mem_attr,
+                              port->memwin_base, port->memwin_size,
+                              MVEBU_MBUS_NO_REMAP);
 }
 
 /*
@@ -743,14 +796,21 @@ static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
 
        /*
         * On the PCI-to-PCI bridge side, the I/O windows must have at
-        * least a 64 KB size and be aligned on their size, and the
-        * memory windows must have at least a 1 MB size and be
-        * aligned on their size
+        * least a 64 KB size and the memory windows must have at
+        * least a 1 MB size. Moreover, MBus windows need to have a
+        * base address aligned on their size, and their size must be
+        * a power of two. This means that if the BAR doesn't have a
+        * power of two size, several MBus windows will actually be
+        * created. We need to ensure that the biggest MBus window
+        * (which will be the first one) is aligned on its size, which
+        * explains the rounddown_pow_of_two() being done here.
         */
        if (res->flags & IORESOURCE_IO)
-               return round_up(start, max_t(resource_size_t, SZ_64K, size));
+               return round_up(start, max_t(resource_size_t, SZ_64K,
+                                            rounddown_pow_of_two(size)));
        else if (res->flags & IORESOURCE_MEM)
-               return round_up(start, max_t(resource_size_t, SZ_1M, size));
+               return round_up(start, max_t(resource_size_t, SZ_1M,
+                                            rounddown_pow_of_two(size)));
        else
                return start;
 }
index 58499277903a4ab4a6225982d88876de399120e2..6efc2ec5e4db0823758a409eb95c2d3054a8ba48 100644 (file)
@@ -282,8 +282,8 @@ static int board_added(struct slot *p_slot)
                return WRONG_BUS_FREQUENCY;
        }
 
-       bsp = ctrl->pci_dev->bus->cur_bus_speed;
-       msp = ctrl->pci_dev->bus->max_bus_speed;
+       bsp = ctrl->pci_dev->subordinate->cur_bus_speed;
+       msp = ctrl->pci_dev->subordinate->max_bus_speed;
 
        /* Check if there are other slots or devices on the same bus */
        if (!list_empty(&ctrl->pci_dev->subordinate->devices))
index 7325d43bf030ce65d5f386f6aeeeb3bfa4d5c482..759475ef6ff3206bd04103043cfed21092766493 100644 (file)
@@ -3067,7 +3067,8 @@ int pci_wait_for_pending_transaction(struct pci_dev *dev)
        if (!pci_is_pcie(dev))
                return 1;
 
-       return pci_wait_for_pending(dev, PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_TRPND);
+       return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
+                                   PCI_EXP_DEVSTA_TRPND);
 }
 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
 
@@ -3109,7 +3110,7 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
                return 0;
 
        /* Wait for Transaction Pending bit clean */
-       if (pci_wait_for_pending(dev, PCI_AF_STATUS, PCI_AF_STATUS_TP))
+       if (pci_wait_for_pending(dev, pos + PCI_AF_STATUS, PCI_AF_STATUS_TP))
                goto clear;
 
        dev_err(&dev->dev, "transaction is not cleared; "
index 3bb05f17b9b4edcd05cc15e582aa049e64d01df5..4906c27fa3bd9f5a8fb4da35981fabd170ccbc60 100644 (file)
@@ -33,6 +33,7 @@ config PHY_MVEBU_SATA
 
 config OMAP_CONTROL_PHY
        tristate "OMAP CONTROL PHY Driver"
+       depends on ARCH_OMAP2PLUS || COMPILE_TEST
        help
          Enable this to add support for the PHY part present in the control
          module. This driver has API to power on the USB2 PHY and to write to
index 2faf78edc8649f2c5ef79c3093aaa14de04b0eea..7728518572a4ea4c0fdb9e50ccde640c654d47b1 100644 (file)
@@ -13,8 +13,9 @@ obj-$(CONFIG_TI_PIPE3)                        += phy-ti-pipe3.o
 obj-$(CONFIG_TWL4030_USB)              += phy-twl4030-usb.o
 obj-$(CONFIG_PHY_EXYNOS5250_SATA)      += phy-exynos5250-sata.o
 obj-$(CONFIG_PHY_SUN4I_USB)            += phy-sun4i-usb.o
-obj-$(CONFIG_PHY_SAMSUNG_USB2)         += phy-samsung-usb2.o
-obj-$(CONFIG_PHY_EXYNOS4210_USB2)      += phy-exynos4210-usb2.o
-obj-$(CONFIG_PHY_EXYNOS4X12_USB2)      += phy-exynos4x12-usb2.o
-obj-$(CONFIG_PHY_EXYNOS5250_USB2)      += phy-exynos5250-usb2.o
+obj-$(CONFIG_PHY_SAMSUNG_USB2)         += phy-exynos-usb2.o
+phy-exynos-usb2-y                      += phy-samsung-usb2.o
+phy-exynos-usb2-$(CONFIG_PHY_EXYNOS4210_USB2)  += phy-exynos4210-usb2.o
+phy-exynos-usb2-$(CONFIG_PHY_EXYNOS4X12_USB2)  += phy-exynos4x12-usb2.o
+phy-exynos-usb2-$(CONFIG_PHY_EXYNOS5250_USB2)  += phy-exynos5250-usb2.o
 obj-$(CONFIG_PHY_XGENE)                        += phy-xgene.o
index 623b71c54b3e5f0a865d8563c8557919e9603900..c64a2f3b2d624fb9d8c266b979416d6b53ed5b9f 100644 (file)
@@ -64,6 +64,9 @@ static struct phy *phy_lookup(struct device *device, const char *port)
        class_dev_iter_init(&iter, phy_class, NULL, NULL);
        while ((dev = class_dev_iter_next(&iter))) {
                phy = to_phy(dev);
+
+               if (!phy->init_data)
+                       continue;
                count = phy->init_data->num_consumers;
                consumers = phy->init_data->consumers;
                while (count--) {
index 92ed4b2e3c0716cf3f21580b49434fc6e9952fc6..c862f9c0e9ce4cb356222b496c40ca1ac26b0699 100644 (file)
@@ -64,7 +64,6 @@ struct as3722_pin_function {
 };
 
 struct as3722_gpio_pin_control {
-       bool enable_gpio_invert;
        unsigned mode_prop;
        int io_function;
 };
@@ -320,10 +319,8 @@ static int as3722_pinctrl_gpio_set_direction(struct pinctrl_dev *pctldev,
                return mode;
        }
 
-       if (as_pci->gpio_control[offset].enable_gpio_invert)
-               mode |= AS3722_GPIO_INV;
-
-       return as3722_write(as3722, AS3722_GPIOn_CONTROL_REG(offset), mode);
+       return as3722_update_bits(as3722, AS3722_GPIOn_CONTROL_REG(offset),
+                               AS3722_GPIO_MODE_MASK, mode);
 }
 
 static const struct pinmux_ops as3722_pinmux_ops = {
@@ -496,10 +493,18 @@ static void as3722_gpio_set(struct gpio_chip *chip, unsigned offset,
 {
        struct as3722_pctrl_info *as_pci = to_as_pci(chip);
        struct as3722 *as3722 = as_pci->as3722;
-       int en_invert = as_pci->gpio_control[offset].enable_gpio_invert;
+       int en_invert;
        u32 val;
        int ret;
 
+       ret = as3722_read(as3722, AS3722_GPIOn_CONTROL_REG(offset), &val);
+       if (ret < 0) {
+               dev_err(as_pci->dev,
+                       "GPIO_CONTROL%d_REG read failed: %d\n", offset, ret);
+               return;
+       }
+       en_invert = !!(val & AS3722_GPIO_INV);
+
        if (value)
                val = (en_invert) ? 0 : AS3722_GPIOn_SIGNAL(offset);
        else
index 81075f2a1d3f87d9ac9d2cf4d62edf94e21bf75f..2960557bfed95c6d79f316c020ec98c426f38f83 100644 (file)
@@ -810,6 +810,7 @@ static const struct pinconf_ops pcs_pinconf_ops = {
 static int pcs_add_pin(struct pcs_device *pcs, unsigned offset,
                unsigned pin_pos)
 {
+       struct pcs_soc_data *pcs_soc = &pcs->socdata;
        struct pinctrl_pin_desc *pin;
        struct pcs_name *pn;
        int i;
@@ -821,6 +822,18 @@ static int pcs_add_pin(struct pcs_device *pcs, unsigned offset,
                return -ENOMEM;
        }
 
+       if (pcs_soc->irq_enable_mask) {
+               unsigned val;
+
+               val = pcs->read(pcs->base + offset);
+               if (val & pcs_soc->irq_enable_mask) {
+                       dev_dbg(pcs->dev, "irq enabled at boot for pin at %lx (%x), clearing\n",
+                               (unsigned long)pcs->res->start + offset, val);
+                       val &= ~pcs_soc->irq_enable_mask;
+                       pcs->write(val, pcs->base + offset);
+               }
+       }
+
        pin = &pcs->pins.pa[i];
        pn = &pcs->names[i];
        sprintf(pn->name, "%lx.%d",
index c5e0f6973a3b06c3e197eccbe7ba917a27d1c3ec..26ca6855f478d3018f79ab8aab87550e1eb52610 100644 (file)
@@ -629,9 +629,8 @@ static int tb10x_gpio_request_enable(struct pinctrl_dev *pctl,
         */
        for (i = 0; i < state->pinfuncgrpcnt; i++) {
                const struct tb10x_pinfuncgrp *pfg = &state->pingroups[i];
-               unsigned int port = pfg->port;
                unsigned int mode = pfg->mode;
-               int j;
+               int j, port = pfg->port;
 
                /*
                 * Skip pin groups which are always mapped and don't need
index 48093719167abd91e27f93eb869eab3f1edd5c51..f5cd3f9618083bacca6414489db5e83b3dc128d9 100644 (file)
@@ -4794,8 +4794,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
                FN_MSIOF0_SCK_B, 0,
                /* IP5_23_21 [3] */
                FN_WE1_N, FN_IERX, FN_CAN1_RX, FN_VI1_G4,
-               FN_VI1_G4_B, FN_VI2_R6, FN_SCIFA0_CTS_N_B,
-               FN_IERX_C, 0,
+               FN_VI1_G4_B, FN_VI2_R6, FN_SCIFA0_CTS_N_B, FN_IERX_C,
                /* IP5_20_18 [3] */
                FN_WE0_N, FN_IECLK, FN_CAN_CLK,
                FN_VI2_VSYNC_N, FN_SCIFA0_TXD_B, FN_VI2_VSYNC_N_B, 0, 0,
index 5186d70c49d43326bc0a3e1f0405332d512cb989..7868bf3a0f911dccfbe7b516c469ac63f6422e4c 100644 (file)
@@ -5288,7 +5288,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
                /* SEL_SCIF3 [2] */
                FN_SEL_SCIF3_0, FN_SEL_SCIF3_1, FN_SEL_SCIF3_2, FN_SEL_SCIF3_3,
                /* SEL_IEB [2] */
-               FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2,
+               FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2, 0,
                /* SEL_MMC [1] */
                FN_SEL_MMC_0, FN_SEL_MMC_1,
                /* SEL_SCIF5 [1] */
index 9f611cbbc294ea8c5ae84023e132e02152e36f85..c31aa07b3ba55541ff434adf45aed76adb0adee3 100644 (file)
@@ -83,8 +83,7 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
 {
        struct acpi_device *acpi_dev;
        acpi_handle handle;
-       struct acpi_buffer buffer;
-       int ret;
+       int ret = 0;
 
        pnp_dbg(&dev->dev, "set resources\n");
 
@@ -97,19 +96,26 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
        if (WARN_ON_ONCE(acpi_dev != dev->data))
                dev->data = acpi_dev;
 
-       ret = pnpacpi_build_resource_template(dev, &buffer);
-       if (ret)
-               return ret;
-       ret = pnpacpi_encode_resources(dev, &buffer);
-       if (ret) {
+       if (acpi_has_method(handle, METHOD_NAME__SRS)) {
+               struct acpi_buffer buffer;
+
+               ret = pnpacpi_build_resource_template(dev, &buffer);
+               if (ret)
+                       return ret;
+
+               ret = pnpacpi_encode_resources(dev, &buffer);
+               if (!ret) {
+                       acpi_status status;
+
+                       status = acpi_set_current_resources(handle, &buffer);
+                       if (ACPI_FAILURE(status))
+                               ret = -EIO;
+               }
                kfree(buffer.pointer);
-               return ret;
        }
-       if (ACPI_FAILURE(acpi_set_current_resources(handle, &buffer)))
-               ret = -EINVAL;
-       else if (acpi_bus_power_manageable(handle))
+       if (!ret && acpi_bus_power_manageable(handle))
                ret = acpi_bus_set_power(handle, ACPI_STATE_D0);
-       kfree(buffer.pointer);
+
        return ret;
 }
 
@@ -117,7 +123,7 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
 {
        struct acpi_device *acpi_dev;
        acpi_handle handle;
-       int ret;
+       acpi_status status;
 
        dev_dbg(&dev->dev, "disable resources\n");
 
@@ -128,13 +134,15 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
        }
 
        /* acpi_unregister_gsi(pnp_irq(dev, 0)); */
-       ret = 0;
        if (acpi_bus_power_manageable(handle))
                acpi_bus_set_power(handle, ACPI_STATE_D3_COLD);
-               /* continue even if acpi_bus_set_power() fails */
-       if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DIS", NULL, NULL)))
-               ret = -ENODEV;
-       return ret;
+
+       /* continue even if acpi_bus_set_power() fails */
+       status = acpi_evaluate_object(handle, "_DIS", NULL, NULL);
+       if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
+               return -ENODEV;
+
+       return 0;
 }
 
 #ifdef CONFIG_ACPI_SLEEP
index deb7f4bcdb7b6b6a770ce08f1d2535dccfa82100..438d4c72c7b36c27982ed8e60b9a42cbff58dc8f 100644 (file)
@@ -37,7 +37,7 @@ __visible struct {
  * kernel begins at offset 3GB...
  */
 
-asmlinkage void pnp_bios_callfunc(void);
+asmlinkage __visible void pnp_bios_callfunc(void);
 
 __asm__(".text                 \n"
        __ALIGN_STR "\n"
index 258fef272ea7d6b61faa565260c8fc7b7a4f995d..ebf0d6710b5a0d4150627313a3edca99e50a8745 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <linux/types.h>
 #include <linux/kernel.h>
+#include <linux/pci.h>
 #include <linux/string.h>
 #include <linux/slab.h>
 #include <linux/pnp.h>
@@ -334,6 +335,81 @@ static void quirk_amd_mmconfig_area(struct pnp_dev *dev)
 }
 #endif
 
+#ifdef CONFIG_PCI
+/* Device IDs of parts that have 32KB MCH space */
+static const unsigned int mch_quirk_devices[] = {
+       0x0154, /* Ivy Bridge */
+       0x0c00, /* Haswell */
+};
+
+static struct pci_dev *get_intel_host(void)
+{
+       int i;
+       struct pci_dev *host;
+
+       for (i = 0; i < ARRAY_SIZE(mch_quirk_devices); i++) {
+               host = pci_get_device(PCI_VENDOR_ID_INTEL, mch_quirk_devices[i],
+                                     NULL);
+               if (host)
+                       return host;
+       }
+       return NULL;
+}
+
+static void quirk_intel_mch(struct pnp_dev *dev)
+{
+       struct pci_dev *host;
+       u32 addr_lo, addr_hi;
+       struct pci_bus_region region;
+       struct resource mch;
+       struct pnp_resource *pnp_res;
+       struct resource *res;
+
+       host = get_intel_host();
+       if (!host)
+               return;
+
+       /*
+        * MCHBAR is not an architected PCI BAR, so MCH space is usually
+        * reported as a PNP0C02 resource.  The MCH space was originally
+        * 16KB, but is 32KB in newer parts.  Some BIOSes still report a
+        * PNP0C02 resource that is only 16KB, which means the rest of the
+        * MCH space is consumed but unreported.
+        */
+
+       /*
+        * Read MCHBAR for Host Member Mapped Register Range Base
+        * https://www-ssl.intel.com/content/www/us/en/processors/core/4th-gen-core-family-desktop-vol-2-datasheet
+        * Sec 3.1.12.
+        */
+       pci_read_config_dword(host, 0x48, &addr_lo);
+       region.start = addr_lo & ~0x7fff;
+       pci_read_config_dword(host, 0x4c, &addr_hi);
+       region.start |= (u64) addr_hi << 32;
+       region.end = region.start + 32*1024 - 1;
+
+       memset(&mch, 0, sizeof(mch));
+       mch.flags = IORESOURCE_MEM;
+       pcibios_bus_to_resource(host->bus, &mch, &region);
+
+       list_for_each_entry(pnp_res, &dev->resources, list) {
+               res = &pnp_res->res;
+               if (res->end < mch.start || res->start > mch.end)
+                       continue;       /* no overlap */
+               if (res->start == mch.start && res->end == mch.end)
+                       continue;       /* exact match */
+
+               dev_info(&dev->dev, FW_BUG "PNP resource %pR covers only part of %s Intel MCH; extending to %pR\n",
+                        res, pci_name(host), &mch);
+               res->start = mch.start;
+               res->end = mch.end;
+               break;
+       }
+
+       pci_dev_put(host);
+}
+#endif
+
 /*
  *  PnP Quirks
  *  Cards or devices that need some tweaking due to incomplete resource info
@@ -363,6 +439,9 @@ static struct pnp_fixup pnp_fixups[] = {
        {"PNP0c02", quirk_system_pci_resources},
 #ifdef CONFIG_AMD_NB
        {"PNP0c01", quirk_amd_mmconfig_area},
+#endif
+#ifdef CONFIG_PCI
+       {"PNP0c02", quirk_intel_mch},
 #endif
        {""}
 };
index 476aa495c110d5814fdf2e3c4ba331cb026baf4f..b95cf71ed69554e8b7c53d77271ba92e39b1e039 100644 (file)
@@ -11,7 +11,7 @@
  * Copyright (C) 2012 ARM Limited
  */
 
-#include <linux/jiffies.h>
+#include <linux/delay.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 static void vexpress_reset_do(struct device *dev, const char *what)
 {
        int err = -ENOENT;
-       struct vexpress_config_func *func =
-                       vexpress_config_func_get_by_dev(dev);
+       struct vexpress_config_func *func = dev_get_drvdata(dev);
 
        if (func) {
-               unsigned long timeout;
-
                err = vexpress_config_write(func, 0, 0);
-
-               timeout = jiffies + HZ;
-               while (time_before(jiffies, timeout))
-                       cpu_relax();
+               if (!err)
+                       mdelay(1000);
        }
 
        dev_emerg(dev, "Unable to %s (%d)\n", what, err);
@@ -96,12 +91,18 @@ static int vexpress_reset_probe(struct platform_device *pdev)
        enum vexpress_reset_func func;
        const struct of_device_id *match =
                        of_match_device(vexpress_reset_of_match, &pdev->dev);
+       struct vexpress_config_func *config_func;
 
        if (match)
                func = (enum vexpress_reset_func)match->data;
        else
                func = pdev->id_entry->driver_data;
 
+       config_func = vexpress_config_func_get_by_dev(&pdev->dev);
+       if (!config_func)
+               return -EINVAL;
+       dev_set_drvdata(&pdev->dev, config_func);
+
        switch (func) {
        case FUNC_SHUTDOWN:
                vexpress_power_off_device = &pdev->dev;
index 6963bdf5417593921122694d8ae425ff7a599f7e..6aea373547f65f3743faa7236b5035a83e178966 100644 (file)
@@ -6,6 +6,7 @@ menu "PTP clock support"
 
 config PTP_1588_CLOCK
        tristate "PTP clock support"
+       depends on NET
        select PPS
        select NET_PTP_CLASSIFY
        help
@@ -74,7 +75,7 @@ config DP83640_PHY
 config PTP_1588_CLOCK_PCH
        tristate "Intel PCH EG20T as PTP clock"
        depends on X86 || COMPILE_TEST
-       depends on HAS_IOMEM
+       depends on HAS_IOMEM && NET
        select PTP_1588_CLOCK
        help
          This driver adds support for using the PCH EG20T as a PTP
index e25d2bc898e5b6e4eb7b43e6df87129a2a19781e..296b0ec8744da915763f8444c2ae8e902376c33e 100644 (file)
@@ -142,7 +142,10 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct timex *tx)
                delta = ktime_to_ns(kt);
                err = ops->adjtime(ops, delta);
        } else if (tx->modes & ADJ_FREQUENCY) {
-               err = ops->adjfreq(ops, scaled_ppm_to_ppb(tx->freq));
+               s32 ppb = scaled_ppm_to_ppb(tx->freq);
+               if (ppb > ops->max_adj || ppb < -ops->max_adj)
+                       return -ERANGE;
+               err = ops->adjfreq(ops, ppb);
                ptp->dialed_frequency = tx->freq;
        } else if (tx->modes == 0) {
                tx->freq = ptp->dialed_frequency;
index ded3b35742091dac92192c45f1c0e7f24565f924..6d38be3d970ca72b48d0a44c950da341071edb53 100644 (file)
@@ -38,66 +38,24 @@ struct pbias_reg_info {
 struct pbias_regulator_data {
        struct regulator_desc desc;
        void __iomem *pbias_addr;
-       unsigned int pbias_reg;
        struct regulator_dev *dev;
        struct regmap *syscon;
        const struct pbias_reg_info *info;
        int voltage;
 };
 
-static int pbias_regulator_set_voltage(struct regulator_dev *dev,
-                       int min_uV, int max_uV, unsigned *selector)
-{
-       struct pbias_regulator_data *data = rdev_get_drvdata(dev);
-       const struct pbias_reg_info *info = data->info;
-       int ret, vmode;
-
-       if (min_uV <= 1800000)
-               vmode = 0;
-       else if (min_uV > 1800000)
-               vmode = info->vmode;
-
-       ret = regmap_update_bits(data->syscon, data->pbias_reg,
-                                               info->vmode, vmode);
-
-       return ret;
-}
-
-static int pbias_regulator_get_voltage(struct regulator_dev *rdev)
-{
-       struct pbias_regulator_data *data = rdev_get_drvdata(rdev);
-       const struct pbias_reg_info *info = data->info;
-       int value, voltage;
-
-       regmap_read(data->syscon, data->pbias_reg, &value);
-       value &= info->vmode;
-
-       voltage = value ? 3000000 : 1800000;
-
-       return voltage;
-}
+static const unsigned int pbias_volt_table[] = {
+       1800000,
+       3000000
+};
 
 static int pbias_regulator_enable(struct regulator_dev *rdev)
 {
        struct pbias_regulator_data *data = rdev_get_drvdata(rdev);
        const struct pbias_reg_info *info = data->info;
-       int ret;
-
-       ret = regmap_update_bits(data->syscon, data->pbias_reg,
-                                       info->enable_mask, info->enable);
-
-       return ret;
-}
-
-static int pbias_regulator_disable(struct regulator_dev *rdev)
-{
-       struct pbias_regulator_data *data = rdev_get_drvdata(rdev);
-       const struct pbias_reg_info *info = data->info;
-       int ret;
 
-       ret = regmap_update_bits(data->syscon, data->pbias_reg,
-                                               info->enable_mask, 0);
-       return ret;
+       return regmap_update_bits(data->syscon, rdev->desc->enable_reg,
+                                 info->enable_mask, info->enable);
 }
 
 static int pbias_regulator_is_enable(struct regulator_dev *rdev)
@@ -106,17 +64,18 @@ static int pbias_regulator_is_enable(struct regulator_dev *rdev)
        const struct pbias_reg_info *info = data->info;
        int value;
 
-       regmap_read(data->syscon, data->pbias_reg, &value);
+       regmap_read(data->syscon, rdev->desc->enable_reg, &value);
 
-       return (value & info->enable_mask) == info->enable_mask;
+       return (value & info->enable_mask) == info->enable;
 }
 
 static struct regulator_ops pbias_regulator_voltage_ops = {
-       .set_voltage    = pbias_regulator_set_voltage,
-       .get_voltage    = pbias_regulator_get_voltage,
-       .enable         = pbias_regulator_enable,
-       .disable        = pbias_regulator_disable,
-       .is_enabled     = pbias_regulator_is_enable,
+       .list_voltage = regulator_list_voltage_table,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .enable = pbias_regulator_enable,
+       .disable = regulator_disable_regmap,
+       .is_enabled = pbias_regulator_is_enable,
 };
 
 static const struct pbias_reg_info pbias_mmc_omap2430 = {
@@ -192,6 +151,7 @@ static int pbias_regulator_probe(struct platform_device *pdev)
        if (IS_ERR(syscon))
                return PTR_ERR(syscon);
 
+       cfg.regmap = syscon;
        cfg.dev = &pdev->dev;
 
        for (idx = 0; idx < PBIAS_NUM_REGS && data_idx < count; idx++) {
@@ -207,15 +167,19 @@ static int pbias_regulator_probe(struct platform_device *pdev)
                if (!res)
                        return -EINVAL;
 
-               drvdata[data_idx].pbias_reg = res->start;
                drvdata[data_idx].syscon = syscon;
                drvdata[data_idx].info = info;
                drvdata[data_idx].desc.name = info->name;
                drvdata[data_idx].desc.owner = THIS_MODULE;
                drvdata[data_idx].desc.type = REGULATOR_VOLTAGE;
                drvdata[data_idx].desc.ops = &pbias_regulator_voltage_ops;
+               drvdata[data_idx].desc.volt_table = pbias_volt_table;
                drvdata[data_idx].desc.n_voltages = 2;
                drvdata[data_idx].desc.enable_time = info->enable_time;
+               drvdata[data_idx].desc.vsel_reg = res->start;
+               drvdata[data_idx].desc.vsel_mask = info->vmode;
+               drvdata[data_idx].desc.enable_reg = res->start;
+               drvdata[data_idx].desc.enable_mask = info->enable_mask;
 
                cfg.init_data = pbias_matches[idx].init_data;
                cfg.driver_data = &drvdata[data_idx];
index bd628a6f981d8b550c7e651c845232fcaf5a57ff..e5f13c4310feb368b5855269a043c88bc698da74 100644 (file)
@@ -569,6 +569,9 @@ static int hym8563_probe(struct i2c_client *client,
        if (IS_ERR(hym8563->rtc))
                return PTR_ERR(hym8563->rtc);
 
+       /* the hym8563 alarm only supports a minute accuracy */
+       hym8563->rtc->uie_unsupported = 1;
+
 #ifdef CONFIG_COMMON_CLK
        hym8563_clkout_register_clk(hym8563);
 #endif
index 5c8f8226c8485af61dc6b82b52c56861619a3f52..4cdb64be061bd7d175417581010d7f702ed6b006 100644 (file)
@@ -206,7 +206,7 @@ static int pcf8523_rtc_read_time(struct device *dev, struct rtc_time *tm)
        tm->tm_hour = bcd2bin(regs[2] & 0x3f);
        tm->tm_mday = bcd2bin(regs[3] & 0x3f);
        tm->tm_wday = regs[4] & 0x7;
-       tm->tm_mon = bcd2bin(regs[5] & 0x1f);
+       tm->tm_mon = bcd2bin(regs[5] & 0x1f) - 1;
        tm->tm_year = bcd2bin(regs[6]) + 100;
 
        return rtc_valid_tm(tm);
@@ -229,7 +229,7 @@ static int pcf8523_rtc_set_time(struct device *dev, struct rtc_time *tm)
        regs[3] = bin2bcd(tm->tm_hour);
        regs[4] = bin2bcd(tm->tm_mday);
        regs[5] = tm->tm_wday;
-       regs[6] = bin2bcd(tm->tm_mon);
+       regs[6] = bin2bcd(tm->tm_mon + 1);
        regs[7] = bin2bcd(tm->tm_year - 100);
 
        msg.addr = client->addr;
index 9f0ea6cb6922619dfe04803c284002431110e11f..e3bf885f4a6c29fd77f29b51c219a47c6a04ca7e 100644 (file)
@@ -541,18 +541,27 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
 
 static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
 {
-       do {
+       static int ntsm_unsupported;
+
+       while (true) {
                memset(sei, 0, sizeof(*sei));
                sei->request.length = 0x0010;
                sei->request.code = 0x000e;
-               sei->ntsm = ntsm;
+               if (!ntsm_unsupported)
+                       sei->ntsm = ntsm;
 
                if (chsc(sei))
                        break;
 
                if (sei->response.code != 0x0001) {
-                       CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
-                                     sei->response.code);
+                       CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n",
+                                     sei->response.code, sei->ntsm);
+
+                       if (sei->response.code == 3 && sei->ntsm) {
+                               /* Fallback for old firmware. */
+                               ntsm_unsupported = 1;
+                               continue;
+                       }
                        break;
                }
 
@@ -568,7 +577,10 @@ static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
                        CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt);
                        break;
                }
-       } while (sei->u.nt0_area.flags & 0x80);
+
+               if (!(sei->u.nt0_area.flags & 0x80))
+                       break;
+       }
 }
 
 /*
index fd7b3bd807896556d0743660069aff8a44c3e072..d837c3c5330fab5c2c77548f31996db3dacad763 100644 (file)
@@ -3348,7 +3348,7 @@ static int __init claw_init(void)
        }
        CLAW_DBF_TEXT(2, setup, "init_mod");
        claw_root_dev = root_device_register("claw");
-       ret = PTR_RET(claw_root_dev);
+       ret = PTR_ERR_OR_ZERO(claw_root_dev);
        if (ret)
                goto register_err;
        ret = ccw_driver_register(&claw_ccw_driver);
index 70b3a023100ef769180d8234f2ac39c3caa91232..03b6ad035577e28553da16fbf4481d9c249a9e6d 100644 (file)
@@ -1837,7 +1837,7 @@ static int __init ctcm_init(void)
        if (ret)
                goto out_err;
        ctcm_root_dev = root_device_register("ctcm");
-       ret = PTR_RET(ctcm_root_dev);
+       ret = PTR_ERR_OR_ZERO(ctcm_root_dev);
        if (ret)
                goto register_err;
        ret = ccw_driver_register(&ctcm_ccw_driver);
index 985b5dcbdac8b348dc6394898ad55bcddf9b8aa9..6bcfbbb20f04c6525fa3363e66422009fe6072b6 100644 (file)
@@ -34,8 +34,9 @@ static ssize_t ctcm_buffer_write(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
 {
        struct net_device *ndev;
-       int bs1;
+       unsigned int bs1;
        struct ctcm_priv *priv = dev_get_drvdata(dev);
+       int rc;
 
        ndev = priv->channel[CTCM_READ]->netdev;
        if (!(priv && priv->channel[CTCM_READ] && ndev)) {
@@ -43,7 +44,9 @@ static ssize_t ctcm_buffer_write(struct device *dev,
                return -ENODEV;
        }
 
-       sscanf(buf, "%u", &bs1);
+       rc = sscanf(buf, "%u", &bs1);
+       if (rc != 1)
+               goto einval;
        if (bs1 > CTCM_BUFSIZE_LIMIT)
                                        goto einval;
        if (bs1 < (576 + LL_HEADER_LENGTH + 2))
@@ -143,13 +146,14 @@ static ssize_t ctcm_proto_show(struct device *dev,
 static ssize_t ctcm_proto_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
 {
-       int value;
+       int value, rc;
        struct ctcm_priv *priv = dev_get_drvdata(dev);
 
        if (!priv)
                return -ENODEV;
-       sscanf(buf, "%u", &value);
-       if (!((value == CTCM_PROTO_S390)  ||
+       rc = sscanf(buf, "%d", &value);
+       if ((rc != 1) ||
+           !((value == CTCM_PROTO_S390)  ||
              (value == CTCM_PROTO_LINUX) ||
              (value == CTCM_PROTO_MPC) ||
              (value == CTCM_PROTO_OS390)))
index c461f2aac610ea6a8580c504c9a2fbb9dcb92979..0a7d87c372b8414e17c41e05b50cf974e5ba0bd3 100644 (file)
@@ -1943,14 +1943,16 @@ static ssize_t
 lcs_portno_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
 {
         struct lcs_card *card;
-        int value;
+       int value, rc;
 
        card = dev_get_drvdata(dev);
 
         if (!card)
                 return 0;
 
-        sscanf(buf, "%u", &value);
+       rc = sscanf(buf, "%d", &value);
+       if (rc != 1)
+               return -EINVAL;
         /* TODO: sanity checks */
         card->portno = value;
 
@@ -1997,14 +1999,17 @@ static ssize_t
 lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
 {
         struct lcs_card *card;
-        int value;
+       unsigned int value;
+       int rc;
 
        card = dev_get_drvdata(dev);
 
         if (!card)
                 return 0;
 
-        sscanf(buf, "%u", &value);
+       rc = sscanf(buf, "%u", &value);
+       if (rc != 1)
+               return -EINVAL;
         /* TODO: sanity checks */
         card->lancmd_timeout = value;
 
@@ -2442,7 +2447,7 @@ __init lcs_init_module(void)
        if (rc)
                goto out_err;
        lcs_root_dev = root_device_register("lcs");
-       rc = PTR_RET(lcs_root_dev);
+       rc = PTR_ERR_OR_ZERO(lcs_root_dev);
        if (rc)
                goto register_err;
        rc = ccw_driver_register(&lcs_ccw_driver);
index 5333b2c018e781541905e855c7cf3ff0a5d84a9e..a2088af51cc5d809d7513f0c0cb628c0b81910b6 100644 (file)
@@ -268,10 +268,8 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
 #define QETH_NO_PRIO_QUEUEING 0
 #define QETH_PRIO_Q_ING_PREC  1
 #define QETH_PRIO_Q_ING_TOS   2
-#define IP_TOS_LOWDELAY 0x10
-#define IP_TOS_HIGHTHROUGHPUT 0x08
-#define IP_TOS_HIGHRELIABILITY 0x04
-#define IP_TOS_NOTIMPORTANT 0x02
+#define QETH_PRIO_Q_ING_SKB   3
+#define QETH_PRIO_Q_ING_VLAN  4
 
 /* Packing */
 #define QETH_LOW_WATERMARK_PACK  2
index 22470a3b182f0ba04c9a724d1fb26fc716bc19de..549e9fd5bfdcab566a06f8e5beeefc09114d9bd8 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/kthread.h>
 #include <linux/slab.h>
 #include <net/iucv/af_iucv.h>
+#include <net/dsfield.h>
 
 #include <asm/ebcdic.h>
 #include <asm/io.h>
@@ -1012,7 +1013,7 @@ static long __qeth_check_irb_error(struct ccw_device *cdev,
 
        card = CARD_FROM_CDEV(cdev);
 
-       if (!IS_ERR(irb))
+       if (!card || !IS_ERR(irb))
                return 0;
 
        switch (PTR_ERR(irb)) {
@@ -1028,7 +1029,7 @@ static long __qeth_check_irb_error(struct ccw_device *cdev,
                QETH_CARD_TEXT(card, 2, "ckirberr");
                QETH_CARD_TEXT_(card, 2, "  rc%d", -ETIMEDOUT);
                if (intparm == QETH_RCD_PARM) {
-                       if (card && (card->data.ccwdev == cdev)) {
+                       if (card->data.ccwdev == cdev) {
                                card->data.state = CH_STATE_DOWN;
                                wake_up(&card->wait_q);
                        }
@@ -3670,42 +3671,56 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
 }
 EXPORT_SYMBOL_GPL(qeth_qdio_output_handler);
 
+/**
+ * Note: Function assumes that we have 4 outbound queues.
+ */
 int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
                        int ipv, int cast_type)
 {
-       if (!ipv && (card->info.type == QETH_CARD_TYPE_OSD ||
-                    card->info.type == QETH_CARD_TYPE_OSX))
-               return card->qdio.default_out_queue;
-       switch (card->qdio.no_out_queues) {
-       case 4:
-               if (cast_type && card->info.is_multicast_different)
-                       return card->info.is_multicast_different &
-                               (card->qdio.no_out_queues - 1);
-               if (card->qdio.do_prio_queueing && (ipv == 4)) {
-                       const u8 tos = ip_hdr(skb)->tos;
-
-                       if (card->qdio.do_prio_queueing ==
-                               QETH_PRIO_Q_ING_TOS) {
-                               if (tos & IP_TOS_NOTIMPORTANT)
-                                       return 3;
-                               if (tos & IP_TOS_HIGHRELIABILITY)
-                                       return 2;
-                               if (tos & IP_TOS_HIGHTHROUGHPUT)
-                                       return 1;
-                               if (tos & IP_TOS_LOWDELAY)
-                                       return 0;
-                       }
-                       if (card->qdio.do_prio_queueing ==
-                               QETH_PRIO_Q_ING_PREC)
-                               return 3 - (tos >> 6);
-               } else if (card->qdio.do_prio_queueing && (ipv == 6)) {
-                       /* TODO: IPv6!!! */
+       __be16 *tci;
+       u8 tos;
+
+       if (cast_type && card->info.is_multicast_different)
+               return card->info.is_multicast_different &
+                       (card->qdio.no_out_queues - 1);
+
+       switch (card->qdio.do_prio_queueing) {
+       case QETH_PRIO_Q_ING_TOS:
+       case QETH_PRIO_Q_ING_PREC:
+               switch (ipv) {
+               case 4:
+                       tos = ipv4_get_dsfield(ip_hdr(skb));
+                       break;
+               case 6:
+                       tos = ipv6_get_dsfield(ipv6_hdr(skb));
+                       break;
+               default:
+                       return card->qdio.default_out_queue;
                }
-               return card->qdio.default_out_queue;
-       case 1: /* fallthrough for single-out-queue 1920-device */
+               if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
+                       return ~tos >> 6 & 3;
+               if (tos & IPTOS_MINCOST)
+                       return 3;
+               if (tos & IPTOS_RELIABILITY)
+                       return 2;
+               if (tos & IPTOS_THROUGHPUT)
+                       return 1;
+               if (tos & IPTOS_LOWDELAY)
+                       return 0;
+               break;
+       case QETH_PRIO_Q_ING_SKB:
+               if (skb->priority > 5)
+                       return 0;
+               return ~skb->priority >> 1 & 3;
+       case QETH_PRIO_Q_ING_VLAN:
+               tci = &((struct ethhdr *)skb->data)->h_proto;
+               if (*tci == ETH_P_8021Q)
+                       return ~*(tci + 1) >> (VLAN_PRIO_SHIFT + 1) & 3;
+               break;
        default:
-               return card->qdio.default_out_queue;
+               break;
        }
+       return card->qdio.default_out_queue;
 }
 EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
 
@@ -5824,7 +5839,7 @@ static int __init qeth_core_init(void)
        if (rc)
                goto out_err;
        qeth_core_root_dev = root_device_register("qeth");
-       rc = PTR_RET(qeth_core_root_dev);
+       rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
        if (rc)
                goto register_err;
        qeth_core_header_cache = kmem_cache_create("qeth_hdr",
index 425c0ecf1f3b9fd2ae3c2c55f61b1ad10f08c028..8a25a2be9890e7e09af1c9845c5b9b9773472f00 100644 (file)
@@ -217,6 +217,10 @@ static ssize_t qeth_dev_prioqing_show(struct device *dev,
                return sprintf(buf, "%s\n", "by precedence");
        case QETH_PRIO_Q_ING_TOS:
                return sprintf(buf, "%s\n", "by type of service");
+       case QETH_PRIO_Q_ING_SKB:
+               return sprintf(buf, "%s\n", "by skb-priority");
+       case QETH_PRIO_Q_ING_VLAN:
+               return sprintf(buf, "%s\n", "by VLAN headers");
        default:
                return sprintf(buf, "always queue %i\n",
                               card->qdio.default_out_queue);
@@ -250,11 +254,23 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
        }
 
        tmp = strsep((char **) &buf, "\n");
-       if (!strcmp(tmp, "prio_queueing_prec"))
+       if (!strcmp(tmp, "prio_queueing_prec")) {
                card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_PREC;
-       else if (!strcmp(tmp, "prio_queueing_tos"))
+               card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
+       } else if (!strcmp(tmp, "prio_queueing_skb")) {
+               card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_SKB;
+               card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
+       } else if (!strcmp(tmp, "prio_queueing_tos")) {
                card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_TOS;
-       else if (!strcmp(tmp, "no_prio_queueing:0")) {
+               card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
+       } else if (!strcmp(tmp, "prio_queueing_vlan")) {
+               if (!card->options.layer2) {
+                       rc = -ENOTSUPP;
+                       goto out;
+               }
+               card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_VLAN;
+               card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
+       } else if (!strcmp(tmp, "no_prio_queueing:0")) {
                card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
                card->qdio.default_out_queue = 0;
        } else if (!strcmp(tmp, "no_prio_queueing:1")) {
index 8dea3f12ccc1714defe7d4d65869817dd6b69135..5ef5b4f45758cd226bb58becde5a3fa81ff33bb2 100644 (file)
@@ -725,15 +725,20 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
        int elements = 0;
        struct qeth_card *card = dev->ml_priv;
        struct sk_buff *new_skb = skb;
-       int ipv = qeth_get_ip_version(skb);
        int cast_type = qeth_l2_get_cast_type(card, skb);
-       struct qeth_qdio_out_q *queue = card->qdio.out_qs
-               [qeth_get_priority_queue(card, skb, ipv, cast_type)];
+       struct qeth_qdio_out_q *queue;
        int tx_bytes = skb->len;
        int data_offset = -1;
        int elements_needed = 0;
        int hd_len = 0;
 
+       if (card->qdio.do_prio_queueing || (cast_type &&
+                                       card->info.is_multicast_different))
+               queue = card->qdio.out_qs[qeth_get_priority_queue(card, skb,
+                                       qeth_get_ip_version(skb), cast_type)];
+       else
+               queue = card->qdio.out_qs[card->qdio.default_out_queue];
+
        if ((card->state != CARD_STATE_UP) || !card->lan_online) {
                card->stats.tx_carrier_errors++;
                goto tx_drop;
@@ -964,10 +969,9 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
        card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
        card->dev->mtu = card->info.initial_mtu;
        card->dev->netdev_ops = &qeth_l2_netdev_ops;
-       if (card->info.type != QETH_CARD_TYPE_OSN)
-               SET_ETHTOOL_OPS(card->dev, &qeth_l2_ethtool_ops);
-       else
-               SET_ETHTOOL_OPS(card->dev, &qeth_l2_osn_ops);
+       card->dev->ethtool_ops =
+               (card->info.type != QETH_CARD_TYPE_OSN) ?
+               &qeth_l2_ethtool_ops : &qeth_l2_osn_ops;
        card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
        card->info.broadcast_capable = 1;
        qeth_l2_request_initial_mac(card);
index 3524d34ff694c273afefc7d85bfa17b6bce38af9..14e0b5810e8c1cde11a8835552158bab8b59dd1a 100644 (file)
@@ -63,7 +63,7 @@ void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf)
 int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr)
 {
        int count = 0, rc = 0;
-       int in[4];
+       unsigned int in[4];
        char c;
 
        rc = sscanf(buf, "%u.%u.%u.%u%c",
@@ -1659,7 +1659,7 @@ static void qeth_l3_add_vlan_mc(struct qeth_card *card)
        for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
                struct net_device *netdev;
 
-               netdev = __vlan_find_dev_deep(card->dev, htons(ETH_P_8021Q),
+               netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q),
                                              vid);
                if (netdev == NULL ||
                    !(netdev->flags & IFF_UP))
@@ -1721,7 +1721,7 @@ static void qeth_l3_add_vlan_mc6(struct qeth_card *card)
        for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
                struct net_device *netdev;
 
-               netdev = __vlan_find_dev_deep(card->dev, htons(ETH_P_8021Q),
+               netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q),
                                              vid);
                if (netdev == NULL ||
                    !(netdev->flags & IFF_UP))
@@ -1766,7 +1766,7 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
 
        QETH_CARD_TEXT(card, 4, "frvaddr4");
 
-       netdev = __vlan_find_dev_deep(card->dev, htons(ETH_P_8021Q), vid);
+       netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), vid);
        if (!netdev)
                return;
        in_dev = in_dev_get(netdev);
@@ -1796,7 +1796,7 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
 
        QETH_CARD_TEXT(card, 4, "frvaddr6");
 
-       netdev = __vlan_find_dev_deep(card->dev, htons(ETH_P_8021Q), vid);
+       netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), vid);
        if (!netdev)
                return;
        in6_dev = in6_dev_get(netdev);
@@ -2089,7 +2089,7 @@ static int qeth_l3_verify_vlan_dev(struct net_device *dev,
                struct net_device *netdev;
 
                rcu_read_lock();
-               netdev = __vlan_find_dev_deep(card->dev, htons(ETH_P_8021Q),
+               netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q),
                                              vid);
                rcu_read_unlock();
                if (netdev == dev) {
@@ -2926,8 +2926,11 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
        struct sk_buff *new_skb = NULL;
        int ipv = qeth_get_ip_version(skb);
        int cast_type = qeth_l3_get_cast_type(card, skb);
-       struct qeth_qdio_out_q *queue = card->qdio.out_qs
-               [qeth_get_priority_queue(card, skb, ipv, cast_type)];
+       struct qeth_qdio_out_q *queue =
+               card->qdio.out_qs[card->qdio.do_prio_queueing
+                       || (cast_type && card->info.is_multicast_different) ?
+                       qeth_get_priority_queue(card, skb, ipv, cast_type) :
+                       card->qdio.default_out_queue];
        int tx_bytes = skb->len;
        bool large_send;
        int data_offset = -1;
@@ -3298,7 +3301,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
        card->dev->ml_priv = card;
        card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
        card->dev->mtu = card->info.initial_mtu;
-       SET_ETHTOOL_OPS(card->dev, &qeth_l3_ethtool_ops);
+       card->dev->ethtool_ops = &qeth_l3_ethtool_ops;
        card->dev->features |=  NETIF_F_HW_VLAN_CTAG_TX |
                                NETIF_F_HW_VLAN_CTAG_RX |
                                NETIF_F_HW_VLAN_CTAG_FILTER;
index 8cf4a0c69baf4cebfc5ecb4fb28c029c4455d886..9a6e4a2cd072421df1980edfa4c8f914398b3991 100644 (file)
@@ -7463,6 +7463,10 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
        if (hpsa_simple_mode)
                return;
 
+       trans_support = readl(&(h->cfgtable->TransportSupport));
+       if (!(trans_support & PERFORMANT_MODE))
+               return;
+
        /* Check for I/O accelerator mode support */
        if (trans_support & CFGTBL_Trans_io_accel1) {
                transMethod |= CFGTBL_Trans_io_accel1 |
@@ -7479,10 +7483,6 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
        }
 
        /* TODO, check that this next line h->nreply_queues is correct */
-       trans_support = readl(&(h->cfgtable->TransportSupport));
-       if (!(trans_support & PERFORMANT_MODE))
-               return;
-
        h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
        hpsa_get_max_perf_mode_cmds(h);
        /* Performant mode ring buffer and supporting data structures */
index 11854845393bf9cc13688ff3895339997822fa61..a669f2d11c314e380eb2aa90714b91eb0064f781 100644 (file)
@@ -244,7 +244,7 @@ iscsi_sw_tcp_conn_restore_callbacks(struct iscsi_conn *conn)
        sk->sk_data_ready   = tcp_sw_conn->old_data_ready;
        sk->sk_state_change = tcp_sw_conn->old_state_change;
        sk->sk_write_space  = tcp_sw_conn->old_write_space;
-       sk->sk_no_check  = 0;
+       sk->sk_no_check_tx = 0;
        write_unlock_bh(&sk->sk_callback_lock);
 }
 
index 7f0af4fcc0019127ab4d60fc0550ed6daa86ffb6..6fd7d40b2c4dea102e15a2e9c76fef3500c09435 100644 (file)
@@ -8293,7 +8293,6 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state)
 
        mpt2sas_base_free_resources(ioc);
        pci_save_state(pdev);
-       pci_disable_device(pdev);
        pci_set_power_state(pdev, device_state);
        return 0;
 }
index 771c16bfdbac4be2fac180459ff5711407c98a01..f17aa7aa78796e7f6d358b8cd5f68fd43cfee4d4 100644 (file)
@@ -189,6 +189,7 @@ scsi_abort_command(struct scsi_cmnd *scmd)
                /*
                 * Retry after abort failed, escalate to next level.
                 */
+               scmd->eh_eflags &= ~SCSI_EH_ABORT_SCHEDULED;
                SCSI_LOG_ERROR_RECOVERY(3,
                        scmd_printk(KERN_INFO, scmd,
                                    "scmd %p previous abort failed\n", scmd));
@@ -920,10 +921,12 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
        ses->prot_op = scmd->prot_op;
 
        scmd->prot_op = SCSI_PROT_NORMAL;
+       scmd->eh_eflags = 0;
        scmd->cmnd = ses->eh_cmnd;
        memset(scmd->cmnd, 0, BLK_MAX_CDB);
        memset(&scmd->sdb, 0, sizeof(scmd->sdb));
        scmd->request->next_rq = NULL;
+       scmd->result = 0;
 
        if (sense_bytes) {
                scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
@@ -1157,6 +1160,15 @@ int scsi_eh_get_sense(struct list_head *work_q,
                                             __func__));
                        break;
                }
+               if (status_byte(scmd->result) != CHECK_CONDITION)
+                       /*
+                        * don't request sense if there's no check condition
+                        * status because the error we're processing isn't one
+                        * that has a sense code (and some devices get
+                        * confused by sense requests out of the blue)
+                        */
+                       continue;
+
                SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd,
                                                  "%s: requesting sense\n",
                                                  current->comm));
index 65a123d9c67649822e2ab0333bf534f820b8b212..9db097a28a74588c793c0521c7f80f8540820f61 100644 (file)
@@ -137,6 +137,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
         * lock such that the kblockd_schedule_work() call happens
         * before blk_cleanup_queue() finishes.
         */
+       cmd->result = 0;
        spin_lock_irqsave(q->queue_lock, flags);
        blk_requeue_request(q, cmd->request);
        kblockd_schedule_work(q, &device->requeue_work);
@@ -1044,6 +1045,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
  */
 int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
 {
+       struct scsi_device *sdev = cmd->device;
        struct request *rq = cmd->request;
 
        int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask);
@@ -1091,7 +1093,7 @@ err_exit:
        scsi_release_buffers(cmd);
        cmd->request->special = NULL;
        scsi_put_command(cmd);
-       put_device(&cmd->device->sdev_gendev);
+       put_device(&sdev->sdev_gendev);
        return error;
 }
 EXPORT_SYMBOL(scsi_init_io);
@@ -1273,7 +1275,7 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
                        struct scsi_cmnd *cmd = req->special;
                        scsi_release_buffers(cmd);
                        scsi_put_command(cmd);
-                       put_device(&cmd->device->sdev_gendev);
+                       put_device(&sdev->sdev_gendev);
                        req->special = NULL;
                }
                break;
index fe30ea94ffe67ef4e5d355fdc9cdcb71eee9e0d7..109802f776ed71cea6857eda9ae6ccc3e0b41f80 100644 (file)
@@ -77,7 +77,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
                        goto next_msg;
                }
 
-               if (!capable(CAP_SYS_ADMIN)) {
+               if (!netlink_capable(skb, CAP_SYS_ADMIN)) {
                        err = -EPERM;
                        goto next_msg;
                }
index 16bfd50cd3fe65644c5443698d3aa3e96dfd5925..db3b494e5926a423866e0ad3a18b15b6378d3cca 100644 (file)
@@ -750,8 +750,12 @@ static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
 
                vscsi->affinity_hint_set = true;
        } else {
-               for (i = 0; i < vscsi->num_queues; i++)
+               for (i = 0; i < vscsi->num_queues; i++) {
+                       if (!vscsi->req_vqs[i].vq)
+                               continue;
+
                        virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1);
+               }
 
                vscsi->affinity_hint_set = false;
        }
index fc67f564f02cf77eec2350f4435836d409020314..788ed9b59b4e3f04c3a485fefe6d31dfdb6b681f 100644 (file)
@@ -1,10 +1,12 @@
 #
 # Makefile for the SuperH specific drivers.
 #
-obj-y  := intc/
+obj-$(CONFIG_SUPERH)                   += intc/
+obj-$(CONFIG_ARCH_SHMOBILE_LEGACY)     += intc/
+ifneq ($(CONFIG_COMMON_CLK),y)
+obj-$(CONFIG_HAVE_CLK)                 += clk/
+endif
+obj-$(CONFIG_MAPLE)                    += maple/
+obj-$(CONFIG_SUPERHYWAY)               += superhyway/
 
-obj-$(CONFIG_HAVE_CLK)         += clk/
-obj-$(CONFIG_MAPLE)            += maple/
-obj-$(CONFIG_SUPERHYWAY)       += superhyway/
-
-obj-y                          += pm_runtime.o
+obj-y                                  += pm_runtime.o
index 8afa5a4589f2dd03771acaac8be585447e8033c0..10c65eb51f8587ca79b15291a75c04c16e4604a2 100644 (file)
@@ -50,8 +50,25 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
        .con_ids = { NULL, },
 };
 
+static bool default_pm_on;
+
 static int __init sh_pm_runtime_init(void)
 {
+       if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) {
+               if (!of_machine_is_compatible("renesas,emev2") &&
+                   !of_machine_is_compatible("renesas,r7s72100") &&
+                   !of_machine_is_compatible("renesas,r8a73a4") &&
+                   !of_machine_is_compatible("renesas,r8a7740") &&
+                   !of_machine_is_compatible("renesas,r8a7778") &&
+                   !of_machine_is_compatible("renesas,r8a7779") &&
+                   !of_machine_is_compatible("renesas,r8a7790") &&
+                   !of_machine_is_compatible("renesas,r8a7791") &&
+                   !of_machine_is_compatible("renesas,sh7372") &&
+                   !of_machine_is_compatible("renesas,sh73a0"))
+                       return 0;
+       }
+
+       default_pm_on = true;
        pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
        return 0;
 }
@@ -59,7 +76,8 @@ core_initcall(sh_pm_runtime_init);
 
 static int __init sh_pm_runtime_late_init(void)
 {
-       pm_genpd_poweroff_unused();
+       if (default_pm_on)
+               pm_genpd_poweroff_unused();
        return 0;
 }
 late_initcall(sh_pm_runtime_late_init);
index 8005f986948173e55d5bc5a5387da80c8a5b9185..079e6b1b0cdb6fbc2e05532f3d8548653f4d4694 100644 (file)
@@ -1115,8 +1115,11 @@ static int atmel_spi_one_transfer(struct spi_master *master,
                        atmel_spi_next_xfer_pio(master, xfer);
                }
 
+               /* interrupts are disabled, so free the lock for schedule */
+               atmel_spi_unlock(as);
                ret = wait_for_completion_timeout(&as->xfer_completion,
                                                        SPI_DMA_TIMEOUT);
+               atmel_spi_lock(as);
                if (WARN_ON(ret == 0)) {
                        dev_err(&spi->dev,
                                "spi trasfer timeout, err %d\n", ret);
index 55e57c3eb9bd051bc7fcca7d5090a2d999bf688b..ebf720b88a2a5ca5c47c389379c50ab18e4cbd2b 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/device.h>
+#include <linux/gpio.h>
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/ioport.h>
index 713af4806f265e10b87dcfade6b6989055c2f283..f6759dc0153b4a8c45fb83f7660e34be2a47641b 100644 (file)
@@ -29,18 +29,6 @@ static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data,
        struct sg_table *sgt;
        void *buf, *pbuf;
 
-       /*
-        * Some DMA controllers have problems transferring buffers that are
-        * not multiple of 4 bytes. So we truncate the transfer so that it
-        * is suitable for such controllers, and handle the trailing bytes
-        * manually after the DMA completes.
-        *
-        * REVISIT: It would be better if this information could be
-        * retrieved directly from the DMA device in a similar way than
-        * ->copy_align etc. is done.
-        */
-       len = ALIGN(drv_data->len, 4);
-
        if (dir == DMA_TO_DEVICE) {
                dmadev = drv_data->tx_chan->device->dev;
                sgt = &drv_data->tx_sgt;
@@ -144,12 +132,8 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
                if (!error) {
                        pxa2xx_spi_unmap_dma_buffers(drv_data);
 
-                       /* Handle the last bytes of unaligned transfer */
                        drv_data->tx += drv_data->tx_map_len;
-                       drv_data->write(drv_data);
-
                        drv_data->rx += drv_data->rx_map_len;
-                       drv_data->read(drv_data);
 
                        msg->actual_length += drv_data->len;
                        msg->state = pxa2xx_spi_next_transfer(drv_data);
index b032e8885e2435b3585810f1266bca9aa3fce6a8..78c66e3c53ed5f88d8d79ac3c16cdb4800af71f0 100644 (file)
@@ -734,7 +734,7 @@ static int spi_qup_remove(struct platform_device *pdev)
        int ret;
 
        ret = pm_runtime_get_sync(&pdev->dev);
-       if (ret)
+       if (ret < 0)
                return ret;
 
        ret = spi_qup_set_state(controller, QUP_STATE_RESET);
index 9009456bdf4d29c6febf56b162113fa5e4bf78ef..c8e795ef2e132fcb10dd4ac196decbd887861f5d 100644 (file)
@@ -244,9 +244,9 @@ static int hspi_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
 
-       clk = clk_get(NULL, "shyway_clk");
+       clk = clk_get(&pdev->dev, NULL);
        if (IS_ERR(clk)) {
-               dev_err(&pdev->dev, "shyway_clk is required\n");
+               dev_err(&pdev->dev, "couldn't get clock\n");
                ret = -EINVAL;
                goto error0;
        }
index 1a77ad52812fd79d3a2524264e7a2a8678ca1464..67d8909dcf3946a4d516d607fd83cef417aaabc1 100644 (file)
@@ -287,8 +287,8 @@ static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
                                sspi->left_rx_word)
                        sspi->rx_word(sspi);
 
-       if (spi_stat & (SIRFSOC_SPI_FIFO_EMPTY
-                       SIRFSOC_SPI_TXFIFO_THD_REACH))
+       if (spi_stat & (SIRFSOC_SPI_TXFIFO_EMPTY |
+                       SIRFSOC_SPI_TXFIFO_THD_REACH))
                while (!((readl(sspi->base + SIRFSOC_SPI_TXFIFO_STATUS)
                                & SIRFSOC_SPI_FIFO_FULL)) &&
                                sspi->left_tx_word)
@@ -470,7 +470,16 @@ static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
                writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
        } else {
                int gpio = sspi->chipselect[spi->chip_select];
-               gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1);
+               switch (value) {
+               case BITBANG_CS_ACTIVE:
+                       gpio_direction_output(gpio,
+                                       spi->mode & SPI_CS_HIGH ? 1 : 0);
+                       break;
+               case BITBANG_CS_INACTIVE:
+                       gpio_direction_output(gpio,
+                                       spi->mode & SPI_CS_HIGH ? 0 : 1);
+                       break;
+               }
        }
 }
 
@@ -559,6 +568,11 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
                regval &= ~SIRFSOC_SPI_CMD_MODE;
                sspi->tx_by_cmd = false;
        }
+       /*
+        * set spi controller in RISC chipselect mode, we are controlling CS by
+        * software BITBANG_CS_ACTIVE and BITBANG_CS_INACTIVE.
+        */
+       regval |= SIRFSOC_SPI_CS_IO_MODE;
        writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
 
        if (IS_DMA_VALID(t)) {
index 4eb9bf02996cf179cf3e6365421867aaf8a10593..939edf473235dca2fb7692fe0fec6dbdd649bbd0 100644 (file)
@@ -580,6 +580,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
                spi->master->set_cs(spi, !enable);
 }
 
+#ifdef CONFIG_HAS_DMA
 static int spi_map_buf(struct spi_master *master, struct device *dev,
                       struct sg_table *sgt, void *buf, size_t len,
                       enum dma_data_direction dir)
@@ -637,55 +638,12 @@ static void spi_unmap_buf(struct spi_master *master, struct device *dev,
        }
 }
 
-static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
+static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
 {
        struct device *tx_dev, *rx_dev;
        struct spi_transfer *xfer;
-       void *tmp;
-       unsigned int max_tx, max_rx;
        int ret;
 
-       if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
-               max_tx = 0;
-               max_rx = 0;
-
-               list_for_each_entry(xfer, &msg->transfers, transfer_list) {
-                       if ((master->flags & SPI_MASTER_MUST_TX) &&
-                           !xfer->tx_buf)
-                               max_tx = max(xfer->len, max_tx);
-                       if ((master->flags & SPI_MASTER_MUST_RX) &&
-                           !xfer->rx_buf)
-                               max_rx = max(xfer->len, max_rx);
-               }
-
-               if (max_tx) {
-                       tmp = krealloc(master->dummy_tx, max_tx,
-                                      GFP_KERNEL | GFP_DMA);
-                       if (!tmp)
-                               return -ENOMEM;
-                       master->dummy_tx = tmp;
-                       memset(tmp, 0, max_tx);
-               }
-
-               if (max_rx) {
-                       tmp = krealloc(master->dummy_rx, max_rx,
-                                      GFP_KERNEL | GFP_DMA);
-                       if (!tmp)
-                               return -ENOMEM;
-                       master->dummy_rx = tmp;
-               }
-
-               if (max_tx || max_rx) {
-                       list_for_each_entry(xfer, &msg->transfers,
-                                           transfer_list) {
-                               if (!xfer->tx_buf)
-                                       xfer->tx_buf = master->dummy_tx;
-                               if (!xfer->rx_buf)
-                                       xfer->rx_buf = master->dummy_rx;
-                       }
-               }
-       }
-
        if (!master->can_dma)
                return 0;
 
@@ -742,6 +700,69 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
 
        return 0;
 }
+#else /* !CONFIG_HAS_DMA */
+static inline int __spi_map_msg(struct spi_master *master,
+                               struct spi_message *msg)
+{
+       return 0;
+}
+
+static inline int spi_unmap_msg(struct spi_master *master,
+                               struct spi_message *msg)
+{
+       return 0;
+}
+#endif /* !CONFIG_HAS_DMA */
+
+static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
+{
+       struct spi_transfer *xfer;
+       void *tmp;
+       unsigned int max_tx, max_rx;
+
+       if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
+               max_tx = 0;
+               max_rx = 0;
+
+               list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+                       if ((master->flags & SPI_MASTER_MUST_TX) &&
+                           !xfer->tx_buf)
+                               max_tx = max(xfer->len, max_tx);
+                       if ((master->flags & SPI_MASTER_MUST_RX) &&
+                           !xfer->rx_buf)
+                               max_rx = max(xfer->len, max_rx);
+               }
+
+               if (max_tx) {
+                       tmp = krealloc(master->dummy_tx, max_tx,
+                                      GFP_KERNEL | GFP_DMA);
+                       if (!tmp)
+                               return -ENOMEM;
+                       master->dummy_tx = tmp;
+                       memset(tmp, 0, max_tx);
+               }
+
+               if (max_rx) {
+                       tmp = krealloc(master->dummy_rx, max_rx,
+                                      GFP_KERNEL | GFP_DMA);
+                       if (!tmp)
+                               return -ENOMEM;
+                       master->dummy_rx = tmp;
+               }
+
+               if (max_tx || max_rx) {
+                       list_for_each_entry(xfer, &msg->transfers,
+                                           transfer_list) {
+                               if (!xfer->tx_buf)
+                                       xfer->tx_buf = master->dummy_tx;
+                               if (!xfer->rx_buf)
+                                       xfer->rx_buf = master->dummy_rx;
+                       }
+               }
+       }
+
+       return __spi_map_msg(master, msg);
+}
 
 /*
  * spi_transfer_one_message - Default implementation of transfer_one_message()
@@ -1151,7 +1172,6 @@ static int spi_master_initialize_queue(struct spi_master *master)
 {
        int ret;
 
-       master->queued = true;
        master->transfer = spi_queued_transfer;
        if (!master->transfer_one_message)
                master->transfer_one_message = spi_transfer_one_message;
@@ -1162,6 +1182,7 @@ static int spi_master_initialize_queue(struct spi_master *master)
                dev_err(&master->dev, "problem initializing queue\n");
                goto err_init_queue;
        }
+       master->queued = true;
        ret = spi_start_queue(master);
        if (ret) {
                dev_err(&master->dev, "problem starting queue\n");
@@ -1171,8 +1192,8 @@ static int spi_master_initialize_queue(struct spi_master *master)
        return 0;
 
 err_start_queue:
-err_init_queue:
        spi_destroy_queue(master);
+err_init_queue:
        return ret;
 }
 
@@ -1756,7 +1777,7 @@ EXPORT_SYMBOL_GPL(spi_busnum_to_master);
  */
 int spi_setup(struct spi_device *spi)
 {
-       unsigned        bad_bits;
+       unsigned        bad_bits, ugly_bits;
        int             status = 0;
 
        /* check mode to prevent that DUAL and QUAD set at the same time
@@ -1776,6 +1797,15 @@ int spi_setup(struct spi_device *spi)
         * that aren't supported with their current master
         */
        bad_bits = spi->mode & ~spi->master->mode_bits;
+       ugly_bits = bad_bits &
+                   (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
+       if (ugly_bits) {
+               dev_warn(&spi->dev,
+                        "setup: ignoring unsupported mode bits %x\n",
+                        ugly_bits);
+               spi->mode &= ~ugly_bits;
+               bad_bits &= ~ugly_bits;
+       }
        if (bad_bits) {
                dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
                        bad_bits);
index 71db683098d67399b3c6918256e977af92ab6e85..b59af030358190a9539b16c7fd8d8b20fb16c30a 100644 (file)
@@ -493,7 +493,7 @@ static void usbduxsub_ao_isoc_irq(struct urb *urb)
                        /* pointer to the DA */
                        *datap++ = val & 0xff;
                        *datap++ = (val >> 8) & 0xff;
-                       *datap++ = chan;
+                       *datap++ = chan << 6;
                        devpriv->ao_readback[chan] = val;
 
                        s->async->events |= COMEDI_CB_BLOCK;
@@ -1040,11 +1040,8 @@ static int usbdux_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
        /* set current channel of the running acquisition to zero */
        s->async->cur_chan = 0;
 
-       for (i = 0; i < cmd->chanlist_len; ++i) {
-               unsigned int chan = CR_CHAN(cmd->chanlist[i]);
-
-               devpriv->ao_chanlist[i] = chan << 6;
-       }
+       for (i = 0; i < cmd->chanlist_len; ++i)
+               devpriv->ao_chanlist[i] = CR_CHAN(cmd->chanlist[i]);
 
        /* we count in steps of 1ms (125us) */
        /* 125us mode not used yet */
index d329cf31436048598f6f0221de7ad4b586c58861..15e0f4da3ce07ba4ea597b021735e025fe83e0fa 100644 (file)
@@ -4604,7 +4604,7 @@ static int et131x_pci_setup(struct pci_dev *pdev,
        netdev->netdev_ops     = &et131x_netdev_ops;
 
        SET_NETDEV_DEV(netdev, &pdev->dev);
-       SET_ETHTOOL_OPS(netdev, &et131x_ethtool_ops);
+       netdev->ethtool_ops = &et131x_ethtool_ops;
 
        adapter = et131x_adapter_init(netdev, pdev);
 
index d6421b9b5981c2265a1c796a03ade4ec5416e878..a6158bef58e54c63dc19ebfcb3ef89daf2b0874e 100644 (file)
@@ -2249,7 +2249,7 @@ struct net_device *init_ft1000_card(struct pcmcia_device *link,
 
        ft1000InitProc(dev);
        ft1000_card_present = 1;
-       SET_ETHTOOL_OPS(dev, &ops);
+       dev->ethtool_ops = &ops;
        printk(KERN_INFO "ft1000: %s: addr 0x%04lx irq %d, MAC addr %pM\n",
                        dev->name, dev->base_addr, dev->irq, dev->dev_addr);
        return dev;
index 11fb95201545233921f67d912051b27119a1648a..dae8d1a9038e661885e2c3bf51869ba76121d122 100644 (file)
@@ -1526,7 +1526,7 @@ static int mxs_lradc_probe(struct platform_device *pdev)
        struct resource *iores;
        int ret = 0, touch_ret;
        int i, s;
-       unsigned int scale_uv;
+       uint64_t scale_uv;
 
        /* Allocate the IIO device. */
        iio = devm_iio_device_alloc(dev, sizeof(*lradc));
index 36eedd8a0ea9815c168889d3ed614597a93847a6..017d2f8379b78ca86f7e30ed0e85855684c7b6ec 100644 (file)
@@ -70,6 +70,7 @@ static int ad2s1200_read_raw(struct iio_dev *indio_dev,
                vel = (((s16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
                vel = (vel << 4) >> 4;
                *val = vel;
+               break;
        default:
                mutex_unlock(&st->lock);
                return -EINVAL;
@@ -106,7 +107,7 @@ static int ad2s1200_probe(struct spi_device *spi)
        int pn, ret = 0;
        unsigned short *pins = spi->dev.platform_data;
 
-       for (pn = 0; pn < AD2S1200_PN; pn++)
+       for (pn = 0; pn < AD2S1200_PN; pn++) {
                ret = devm_gpio_request_one(&spi->dev, pins[pn], GPIOF_DIR_OUT,
                                            DRV_NAME);
                if (ret) {
@@ -114,6 +115,7 @@ static int ad2s1200_probe(struct spi_device *spi)
                                                        pins[pn]);
                        return ret;
                }
+       }
        indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
        if (!indio_dev)
                return -ENOMEM;
index 4144a75e5f71bc6258d9d8dcdc1a02b3b2a2f035..c270c9ae6d27711d531ebb54a07b1cdc125be9da 100644 (file)
@@ -517,7 +517,7 @@ int imx_drm_encoder_get_mux_id(struct device_node *node,
                of_node_put(port);
                if (port == imx_crtc->port) {
                        ret = of_graph_parse_endpoint(ep, &endpoint);
-                       return ret ? ret : endpoint.id;
+                       return ret ? ret : endpoint.port;
                }
        } while (ep);
 
@@ -675,6 +675,11 @@ static int imx_drm_platform_probe(struct platform_device *pdev)
                        if (!remote || !of_device_is_available(remote)) {
                                of_node_put(remote);
                                continue;
+                       } else if (!of_device_is_available(remote->parent)) {
+                               dev_warn(&pdev->dev, "parent device of %s is not available\n",
+                                        remote->full_name);
+                               of_node_put(remote);
+                               continue;
                        }
 
                        ret = imx_drm_add_component(&pdev->dev, remote);
index 575533f4fd64fc7d53d38c4a0cb307fa429f9f68..a23f4f773146a8891925165622cb5167d30252df 100644 (file)
@@ -582,7 +582,7 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
        tve->dev = dev;
        spin_lock_init(&tve->lock);
 
-       ddc_node = of_parse_phandle(np, "i2c-ddc-bus", 0);
+       ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0);
        if (ddc_node) {
                tve->ddc = of_find_i2c_adapter_by_node(ddc_node);
                of_node_put(ddc_node);
index 8c101cbbee97646d2b06166865381bf9144df2cb..acc8184c46cde0d85ebeccd41c000b67ae9da429 100644 (file)
@@ -1247,9 +1247,18 @@ static int vpfe_stop_streaming(struct vb2_queue *vq)
        struct vpfe_fh *fh = vb2_get_drv_priv(vq);
        struct vpfe_video_device *video = fh->video;
 
-       if (!vb2_is_streaming(vq))
-               return 0;
        /* release all active buffers */
+       if (video->cur_frm == video->next_frm) {
+               vb2_buffer_done(&video->cur_frm->vb, VB2_BUF_STATE_ERROR);
+       } else {
+               if (video->cur_frm != NULL)
+                       vb2_buffer_done(&video->cur_frm->vb,
+                                       VB2_BUF_STATE_ERROR);
+               if (video->next_frm != NULL)
+                       vb2_buffer_done(&video->next_frm->vb,
+                                       VB2_BUF_STATE_ERROR);
+       }
+
        while (!list_empty(&video->dma_queue)) {
                video->next_frm = list_entry(video->dma_queue.next,
                                                struct vpfe_cap_buffer, list);
index b3d2cc729657df34e57bc300033bafc1baf2bdd6..4ba569258498b9d6248de4d58d13c63750c68561 100644 (file)
@@ -48,10 +48,8 @@ static const struct usb_device_id sn9c102_id_table[] = {
        { SN9C102_USB_DEVICE(0x0c45, 0x600d, BRIDGE_SN9C102), },
 /*     { SN9C102_USB_DEVICE(0x0c45, 0x6011, BRIDGE_SN9C102), }, OV6650 */
        { SN9C102_USB_DEVICE(0x0c45, 0x6019, BRIDGE_SN9C102), },
-#endif
        { SN9C102_USB_DEVICE(0x0c45, 0x6024, BRIDGE_SN9C102), },
        { SN9C102_USB_DEVICE(0x0c45, 0x6025, BRIDGE_SN9C102), },
-#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
        { SN9C102_USB_DEVICE(0x0c45, 0x6028, BRIDGE_SN9C102), },
        { SN9C102_USB_DEVICE(0x0c45, 0x6029, BRIDGE_SN9C102), },
        { SN9C102_USB_DEVICE(0x0c45, 0x602a, BRIDGE_SN9C102), },
index c83e3375104b5066db2a4991726fbb8a3aa45fde..9d957615e32addcd405ebdfe4ad505b99a42fdc9 100644 (file)
@@ -1066,7 +1066,7 @@ static int xlr_net_probe(struct platform_device *pdev)
        xlr_set_rx_mode(ndev);
 
        priv->num_rx_desc += MAX_NUM_DESC_SPILL;
-       SET_ETHTOOL_OPS(ndev, &xlr_ethtool_ops);
+       ndev->ethtool_ops = &xlr_ethtool_ops;
        SET_NETDEV_DEV(ndev, &pdev->dev);
 
        /* Common registers, do one time initialization */
index ff7214aac9dd6ed05c524c69fa23e35767d2dcce..da9dd6bc56600f2fe094df6a59d3836ad491be72 100644 (file)
@@ -469,7 +469,7 @@ int cvm_oct_common_init(struct net_device *dev)
 
        /* We do our own locking, Linux doesn't need to */
        dev->features |= NETIF_F_LLTX;
-       SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops);
+       dev->ethtool_ops = &cvm_oct_ethtool_ops;
 
        cvm_oct_phy_setup_device(dev);
        cvm_oct_set_mac_filter(dev);
index 57eca7a45672b94674677f88cfef688ca403c386..4fe751f7c2bf2438f0e49054731d911c4e84d239 100644 (file)
@@ -953,8 +953,6 @@ static int netdev_close(struct net_device *pnetdev)
 #endif /* CONFIG_8723AU_P2P */
 
        rtw_scan_abort23a(padapter);
-        /* set this at the end */
-       padapter->rtw_wdev->iftype = NL80211_IFTYPE_MONITOR;
 
        RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-871x_drv - drv_close\n"));
        DBG_8723A("-871x_drv - drv_close, bup =%d\n", padapter->bup);
index c49160e477d8d0245392fea0db2a2d432c186b2f..07e542e5d1562b18fa3caad2903b0928dccdd1e2 100644 (file)
@@ -26,7 +26,7 @@ unsigned int ffaddr2pipehdl23a(struct dvobj_priv *pdvobj, u32 addr)
        if (addr == RECV_BULK_IN_ADDR) {
                pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[0]);
        } else if (addr == RECV_INT_IN_ADDR) {
-               pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[1]);
+               pipe = usb_rcvintpipe(pusbd, pdvobj->RtInPipe[1]);
        } else if (addr < HW_QUEUE_ENTRY) {
                ep_num = pdvobj->Queue2Pipe[addr];
                pipe = usb_sndbulkpipe(pusbd, ep_num);
index ff3139b6da656f2f985b2c1498fa16508c2030b2..63ae2d1997d3c19fb1bd38c97ac4d39bd1c7cf52 100644 (file)
@@ -1414,23 +1414,15 @@ static void rtl_op_rfkill_poll(struct ieee80211_hw *hw)
  * before switch channel or power save, or tx buffer packet
  * maybe send after offchannel or rf sleep, this may cause
  * dis-association by AP */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
-static void rtl_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void rtl_op_flush(struct ieee80211_hw *hw,
+                        struct ieee80211_vif *vif,
+                        u32 queues, bool drop)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
 
        if (rtlpriv->intf_ops->flush)
                rtlpriv->intf_ops->flush(hw, queues, drop);
 }
-#else
-static void rtl_op_flush(struct ieee80211_hw *hw, bool drop)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       if (rtlpriv->intf_ops->flush)
-               rtlpriv->intf_ops->flush(hw, drop);
-}
-#endif
 
 const struct ieee80211_ops rtl_ops = {
        .start = rtl_op_start,
index f76f95c29617b3b7058a1999b7896d4e3d8a875a..723319ee08f39af6a7a27b21de6f8467742717cb 100644 (file)
@@ -84,7 +84,7 @@ static int prism2_domibset_uint32(wlandevice_t *wlandev, u32 did, u32 data)
 }
 
 static int prism2_domibset_pstr32(wlandevice_t *wlandev,
-                                 u32 did, u8 len, u8 *data)
+                                 u32 did, u8 len, const u8 *data)
 {
        struct p80211msg_dot11req_mibset msg;
        p80211item_pstr32_t *mibitem =
@@ -298,7 +298,7 @@ static int prism2_set_default_key(struct wiphy *wiphy, struct net_device *dev,
 
 
 static int prism2_get_station(struct wiphy *wiphy, struct net_device *dev,
-                             u8 *mac, struct station_info *sinfo)
+                             const u8 *mac, struct station_info *sinfo)
 {
        wlandevice_t *wlandev = dev->ml_priv;
        struct p80211msg_lnxreq_commsquality quality;
index 78cab13bbb1be3796b0e00af4a0667329ed4a2d8..46588c85d39bd0ce206f213aebba9330db7e33b8 100644 (file)
@@ -1593,7 +1593,9 @@ int iscsit_process_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
         * Initiator is expecting a NopIN ping reply..
         */
        if (hdr->itt != RESERVED_ITT) {
-               BUG_ON(!cmd);
+               if (!cmd)
+                       return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+                                               (unsigned char *)hdr);
 
                spin_lock_bh(&conn->cmd_lock);
                list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
index 6960f22909ae2eeddd651d59217d1f09519e9154..302eb3b7871558bb2a1241fedc521e20efebf415 100644 (file)
@@ -775,6 +775,7 @@ struct iscsi_np {
        int                     np_ip_proto;
        int                     np_sock_type;
        enum np_thread_state_table np_thread_state;
+       bool                    enabled;
        enum iscsi_timer_flags_table np_login_timer_flags;
        u32                     np_exports;
        enum np_flags_table     np_flags;
index 8739b98f6f93539b8c6eb95f27d7fde3601b40d7..ca31fa1b8a4b69058290243bc61b0007d2c7d3cc 100644 (file)
@@ -436,7 +436,7 @@ static int iscsi_login_zero_tsih_s2(
                }
                off = mrdsl % PAGE_SIZE;
                if (!off)
-                       return 0;
+                       goto check_prot;
 
                if (mrdsl < PAGE_SIZE)
                        mrdsl = PAGE_SIZE;
@@ -452,6 +452,31 @@ static int iscsi_login_zero_tsih_s2(
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
                        return -1;
                }
+               /*
+                * ISER currently requires that ImmediateData + Unsolicited
+                * Data be disabled when protection / signature MRs are enabled.
+                */
+check_prot:
+               if (sess->se_sess->sup_prot_ops &
+                  (TARGET_PROT_DOUT_STRIP | TARGET_PROT_DOUT_PASS |
+                   TARGET_PROT_DOUT_INSERT)) {
+
+                       sprintf(buf, "ImmediateData=No");
+                       if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
+                               iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+                                                   ISCSI_LOGIN_STATUS_NO_RESOURCES);
+                               return -1;
+                       }
+
+                       sprintf(buf, "InitialR2T=Yes");
+                       if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
+                               iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+                                                   ISCSI_LOGIN_STATUS_NO_RESOURCES);
+                               return -1;
+                       }
+                       pr_debug("Forcing ImmediateData=No + InitialR2T=Yes for"
+                                " T10-PI enabled ISER session\n");
+               }
        }
 
        return 0;
@@ -984,6 +1009,7 @@ int iscsi_target_setup_login_socket(
        }
 
        np->np_transport = t;
+       np->enabled = true;
        return 0;
 }
 
index eb96b20dc09e13ffe32e226df38b73241f176a63..ca1811858afd01fa4b09ba6e1523f32bd0c3e1b2 100644 (file)
@@ -184,6 +184,7 @@ static void iscsit_clear_tpg_np_login_thread(
                return;
        }
 
+       tpg_np->tpg_np->enabled = false;
        iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown);
 }
 
index 65001e1336702966108081443d5a44f39988d5af..26416c15d65c25c1b915f6bdf00b03db341b9152 100644 (file)
@@ -798,10 +798,10 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
                pr_err("emulate_write_cache not supported for pSCSI\n");
                return -EINVAL;
        }
-       if (dev->transport->get_write_cache) {
-               pr_warn("emulate_write_cache cannot be changed when underlying"
-                       " HW reports WriteCacheEnabled, ignoring request\n");
-               return 0;
+       if (flag &&
+           dev->transport->get_write_cache) {
+               pr_err("emulate_write_cache not supported for this device\n");
+               return -EINVAL;
        }
 
        dev->dev_attrib.emulate_write_cache = flag;
@@ -936,6 +936,10 @@ int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
                return 0;
        }
        if (!dev->transport->init_prot || !dev->transport->free_prot) {
+               /* 0 is only allowed value for non-supporting backends */
+               if (flag == 0)
+                       return 0;
+
                pr_err("DIF protection not supported by backend: %s\n",
                       dev->transport->name);
                return -ENOSYS;
index d4b98690a73680244676b6e608ede6c85ff724cb..789aa9eb0a1e590b8853a49e8f0ae137f04d3241 100644 (file)
@@ -1113,6 +1113,7 @@ void transport_init_se_cmd(
        init_completion(&cmd->cmd_wait_comp);
        init_completion(&cmd->task_stop_comp);
        spin_lock_init(&cmd->t_state_lock);
+       kref_init(&cmd->cmd_kref);
        cmd->transport_state = CMD_T_DEV_ACTIVE;
 
        cmd->se_tfo = tfo;
@@ -2357,7 +2358,6 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
        unsigned long flags;
        int ret = 0;
 
-       kref_init(&se_cmd->cmd_kref);
        /*
         * Add a second kref if the fabric caller is expecting to handle
         * fabric acknowledgement that requires two target_put_sess_cmd()
index 01cf37f212c30724ed6a0addbe8c7cbe69dfd6a3..f5fd515b2bee266dd9c8279ea804bc7372d955f3 100644 (file)
@@ -90,18 +90,18 @@ static void ft_free_cmd(struct ft_cmd *cmd)
 {
        struct fc_frame *fp;
        struct fc_lport *lport;
-       struct se_session *se_sess;
+       struct ft_sess *sess;
 
        if (!cmd)
                return;
-       se_sess = cmd->sess->se_sess;
+       sess = cmd->sess;
        fp = cmd->req_frame;
        lport = fr_dev(fp);
        if (fr_seq(fp))
                lport->tt.seq_release(fr_seq(fp));
        fc_frame_free(fp);
-       percpu_ida_free(&se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
-       ft_sess_put(cmd->sess); /* undo get from lookup at recv */
+       percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
+       ft_sess_put(sess);      /* undo get from lookup at recv */
 }
 
 void ft_release_cmd(struct se_cmd *se_cmd)
index 94f9e3a38412f3071d63b4964925b375abec4283..0ff7fda0742f4326113cf82f8a72cb355c840319 100644 (file)
@@ -190,7 +190,7 @@ static struct tty_driver *hvc_console_device(struct console *c, int *index)
        return hvc_driver;
 }
 
-static int __init hvc_console_setup(struct console *co, char *options)
+static int hvc_console_setup(struct console *co, char *options)
 {      
        if (co->index < 0 || co->index >= MAX_NR_HVC_CONSOLES)
                return -ENODEV;
index 41fe8a047d373cf84b14a9a2f5d8f41e07fd3b5f..fe9d129c87351b47392320a626dedb89e2f0bf55 100644 (file)
@@ -2353,8 +2353,12 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
                        if (tty->ops->flush_chars)
                                tty->ops->flush_chars(tty);
                } else {
+                       struct n_tty_data *ldata = tty->disc_data;
+
                        while (nr > 0) {
+                               mutex_lock(&ldata->output_lock);
                                c = tty->ops->write(tty, b, nr);
+                               mutex_unlock(&ldata->output_lock);
                                if (c < 0) {
                                        retval = c;
                                        goto break_out;
index 81f909c2101f6145f568fd048b60453533cf943d..2d4bd3929e507376f7d4b25f788fbba3b61af1a4 100644 (file)
@@ -555,7 +555,7 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
         */
        if ((p->port.type == PORT_XR17V35X) ||
           (p->port.type == PORT_XR17D15X)) {
-               serial_out(p, UART_EXAR_SLEEP, 0xff);
+               serial_out(p, UART_EXAR_SLEEP, sleep ? 0xff : 0);
                return;
        }
 
@@ -1520,7 +1520,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
                        status = serial8250_rx_chars(up, status);
        }
        serial8250_modem_status(up);
-       if (status & UART_LSR_THRE)
+       if (!up->dma && (status & UART_LSR_THRE))
                serial8250_tx_chars(up);
 
        spin_unlock_irqrestore(&port->lock, flags);
index 7046769608d403501158a044fd109203f0e0531f..ab9096dc384976de15c41f0d5f39b6975b72a45d 100644 (file)
@@ -20,12 +20,15 @@ static void __dma_tx_complete(void *param)
        struct uart_8250_port   *p = param;
        struct uart_8250_dma    *dma = p->dma;
        struct circ_buf         *xmit = &p->port.state->xmit;
-
-       dma->tx_running = 0;
+       unsigned long   flags;
 
        dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
                                UART_XMIT_SIZE, DMA_TO_DEVICE);
 
+       spin_lock_irqsave(&p->port.lock, flags);
+
+       dma->tx_running = 0;
+
        xmit->tail += dma->tx_size;
        xmit->tail &= UART_XMIT_SIZE - 1;
        p->port.icount.tx += dma->tx_size;
@@ -35,6 +38,8 @@ static void __dma_tx_complete(void *param)
 
        if (!uart_circ_empty(xmit) && !uart_tx_stopped(&p->port))
                serial8250_tx_dma(p);
+
+       spin_unlock_irqrestore(&p->port.lock, flags);
 }
 
 static void __dma_rx_complete(void *param)
index 23f4596007382501589476b885d269bcce1484a6..1f5505e7f90dd9de0c84770a164c3a1d95968dc4 100644 (file)
@@ -1446,8 +1446,8 @@ static int s3c24xx_serial_get_poll_char(struct uart_port *port)
 static void s3c24xx_serial_put_poll_char(struct uart_port *port,
                unsigned char c)
 {
-       unsigned int ufcon = rd_regl(cons_uart, S3C2410_UFCON);
-       unsigned int ucon = rd_regl(cons_uart, S3C2410_UCON);
+       unsigned int ufcon = rd_regl(port, S3C2410_UFCON);
+       unsigned int ucon = rd_regl(port, S3C2410_UCON);
 
        /* not possible to xmit on unconfigured port */
        if (!s3c24xx_port_configured(ucon))
@@ -1455,7 +1455,7 @@ static void s3c24xx_serial_put_poll_char(struct uart_port *port,
 
        while (!s3c24xx_serial_console_txrdy(port, ufcon))
                cpu_relax();
-       wr_regb(cons_uart, S3C2410_UTXH, c);
+       wr_regb(port, S3C2410_UTXH, c);
 }
 
 #endif /* CONFIG_CONSOLE_POLL */
@@ -1463,22 +1463,23 @@ static void s3c24xx_serial_put_poll_char(struct uart_port *port,
 static void
 s3c24xx_serial_console_putchar(struct uart_port *port, int ch)
 {
-       unsigned int ufcon = rd_regl(cons_uart, S3C2410_UFCON);
-       unsigned int ucon = rd_regl(cons_uart, S3C2410_UCON);
-
-       /* not possible to xmit on unconfigured port */
-       if (!s3c24xx_port_configured(ucon))
-               return;
+       unsigned int ufcon = rd_regl(port, S3C2410_UFCON);
 
        while (!s3c24xx_serial_console_txrdy(port, ufcon))
-               barrier();
-       wr_regb(cons_uart, S3C2410_UTXH, ch);
+               cpu_relax();
+       wr_regb(port, S3C2410_UTXH, ch);
 }
 
 static void
 s3c24xx_serial_console_write(struct console *co, const char *s,
                             unsigned int count)
 {
+       unsigned int ucon = rd_regl(cons_uart, S3C2410_UCON);
+
+       /* not possible to xmit on unconfigured port */
+       if (!s3c24xx_port_configured(ucon))
+               return;
+
        uart_console_write(cons_uart, s, count, s3c24xx_serial_console_putchar);
 }
 
index f26834d262b3a8d540a96455480fa4d5b6a1a81e..b68550d95a403dbc6d5ecb0faa4a5bb4d38cd86f 100644 (file)
@@ -136,6 +136,11 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
        if (uport->type == PORT_UNKNOWN)
                return 1;
 
+       /*
+        * Make sure the device is in D0 state.
+        */
+       uart_change_pm(state, UART_PM_STATE_ON);
+
        /*
         * Initialise and allocate the transmit and temporary
         * buffer.
@@ -825,25 +830,29 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
                 * If we fail to request resources for the
                 * new port, try to restore the old settings.
                 */
-               if (retval && old_type != PORT_UNKNOWN) {
+               if (retval) {
                        uport->iobase = old_iobase;
                        uport->type = old_type;
                        uport->hub6 = old_hub6;
                        uport->iotype = old_iotype;
                        uport->regshift = old_shift;
                        uport->mapbase = old_mapbase;
-                       retval = uport->ops->request_port(uport);
-                       /*
-                        * If we failed to restore the old settings,
-                        * we fail like this.
-                        */
-                       if (retval)
-                               uport->type = PORT_UNKNOWN;
 
-                       /*
-                        * We failed anyway.
-                        */
-                       retval = -EBUSY;
+                       if (old_type != PORT_UNKNOWN) {
+                               retval = uport->ops->request_port(uport);
+                               /*
+                                * If we failed to restore the old settings,
+                                * we fail like this.
+                                */
+                               if (retval)
+                                       uport->type = PORT_UNKNOWN;
+
+                               /*
+                                * We failed anyway.
+                                */
+                               retval = -EBUSY;
+                       }
+
                        /* Added to return the correct error -Ram Gupta */
                        goto exit;
                }
@@ -1570,12 +1579,6 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
                goto err_dec_count;
        }
 
-       /*
-        * Make sure the device is in D0 state.
-        */
-       if (port->count == 1)
-               uart_change_pm(state, UART_PM_STATE_ON);
-
        /*
         * Start up the serial port.
         */
index 8ebd9f88a6f69ff85f63139944fad2542c789483..cf78d1985cd851fb2b6615054bfabf5a8e3b2b13 100644 (file)
@@ -258,7 +258,11 @@ static int __tty_buffer_request_room(struct tty_port *port, size_t size,
                        n->flags = flags;
                        buf->tail = n;
                        b->commit = b->used;
-                       smp_mb();
+                       /* paired w/ barrier in flush_to_ldisc(); ensures the
+                        * latest commit value can be read before the head is
+                        * advanced to the next buffer
+                        */
+                       smp_wmb();
                        b->next = n;
                } else if (change)
                        size = 0;
@@ -444,17 +448,24 @@ static void flush_to_ldisc(struct work_struct *work)
 
        while (1) {
                struct tty_buffer *head = buf->head;
+               struct tty_buffer *next;
                int count;
 
                /* Ldisc or user is trying to gain exclusive access */
                if (atomic_read(&buf->priority))
                        break;
 
+               next = head->next;
+               /* paired w/ barrier in __tty_buffer_request_room();
+                * ensures commit value read is not stale if the head
+                * is advancing to the next buffer
+                */
+               smp_rmb();
                count = head->commit - head->read;
                if (!count) {
-                       if (head->next == NULL)
+                       if (next == NULL)
                                break;
-                       buf->head = head->next;
+                       buf->head = next;
                        tty_buffer_free(port, head);
                        continue;
                }
index ca6831c5b763053d146d8c7bbae2f10d85e3c3d1..1cd5d0ba587c8ebaf49c876a086570be4cabd8c9 100644 (file)
@@ -276,6 +276,39 @@ static void hw_phymode_configure(struct ci_hdrc *ci)
        }
 }
 
+/**
+ * ci_usb_phy_init: initialize phy according to different phy type
+ * @ci: the controller
+  *
+ * This function returns an error code if usb_phy_init has failed
+ */
+static int ci_usb_phy_init(struct ci_hdrc *ci)
+{
+       int ret;
+
+       switch (ci->platdata->phy_mode) {
+       case USBPHY_INTERFACE_MODE_UTMI:
+       case USBPHY_INTERFACE_MODE_UTMIW:
+       case USBPHY_INTERFACE_MODE_HSIC:
+               ret = usb_phy_init(ci->transceiver);
+               if (ret)
+                       return ret;
+               hw_phymode_configure(ci);
+               break;
+       case USBPHY_INTERFACE_MODE_ULPI:
+       case USBPHY_INTERFACE_MODE_SERIAL:
+               hw_phymode_configure(ci);
+               ret = usb_phy_init(ci->transceiver);
+               if (ret)
+                       return ret;
+               break;
+       default:
+               ret = usb_phy_init(ci->transceiver);
+       }
+
+       return ret;
+}
+
 /**
  * hw_device_reset: resets chip (execute without interruption)
  * @ci: the controller
@@ -543,8 +576,6 @@ static int ci_hdrc_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       hw_phymode_configure(ci);
-
        if (ci->platdata->phy)
                ci->transceiver = ci->platdata->phy;
        else
@@ -564,7 +595,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
                return -EPROBE_DEFER;
        }
 
-       ret = usb_phy_init(ci->transceiver);
+       ret = ci_usb_phy_init(ci);
        if (ret) {
                dev_err(dev, "unable to init phy: %d\n", ret);
                return ret;
index d001417e8e370cd7f3e7fa2d9b9c2678271987dc..10aaaae9af25e8f6b797f063b5e5ba64ec37c847 100644 (file)
@@ -821,6 +821,7 @@ static void dwc3_complete(struct device *dev)
 
        spin_lock_irqsave(&dwc->lock, flags);
 
+       dwc3_event_buffers_setup(dwc);
        switch (dwc->dr_mode) {
        case USB_DR_MODE_PERIPHERAL:
        case USB_DR_MODE_OTG:
@@ -828,7 +829,6 @@ static void dwc3_complete(struct device *dev)
                /* FALLTHROUGH */
        case USB_DR_MODE_HOST:
        default:
-               dwc3_event_buffers_setup(dwc);
                break;
        }
 
index a740eac74d56d502675107f03ffbd324e85f1f63..70715eeededda3b4acfab6bff72f13531338a8b2 100644 (file)
@@ -187,15 +187,12 @@ int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
         * improve this algorithm so that we better use the internal
         * FIFO space
         */
-       for (num = 0; num < DWC3_ENDPOINTS_NUM; num++) {
-               struct dwc3_ep  *dep = dwc->eps[num];
-               int             fifo_number = dep->number >> 1;
+       for (num = 0; num < dwc->num_in_eps; num++) {
+               /* bit0 indicates direction; 1 means IN ep */
+               struct dwc3_ep  *dep = dwc->eps[(num << 1) | 1];
                int             mult = 1;
                int             tmp;
 
-               if (!(dep->number & 1))
-                       continue;
-
                if (!(dep->flags & DWC3_EP_ENABLED))
                        continue;
 
@@ -224,8 +221,7 @@ int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
                dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n",
                                dep->name, last_fifo_depth, fifo_size & 0xffff);
 
-               dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(fifo_number),
-                               fifo_size);
+               dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size);
 
                last_fifo_depth += (fifo_size & 0xffff);
        }
index f605ad8c1902fc775cffc39dcd53be8d65fe3a8f..cfd18bcca723ef700d727fa1f8f8a39405eba13f 100644 (file)
@@ -1709,16 +1709,6 @@ static int at91udc_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       if (pdev->num_resources != 2) {
-               DBG("invalid num_resources\n");
-               return -ENODEV;
-       }
-       if ((pdev->resource[0].flags != IORESOURCE_MEM)
-                       || (pdev->resource[1].flags != IORESOURCE_IRQ)) {
-               DBG("invalid resource type\n");
-               return -ENODEV;
-       }
-
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res)
                return -ENXIO;
index 2e164dca08e89fc29ea1887f0afe7b2a09e1a5af..1e12b3ee56fd837117a03266cf66205bb418b366 100644 (file)
@@ -745,6 +745,12 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
                 */
                struct usb_gadget *gadget = epfile->ffs->gadget;
 
+               spin_lock_irq(&epfile->ffs->eps_lock);
+               /* In the meantime, endpoint got disabled or changed. */
+               if (epfile->ep != ep) {
+                       spin_unlock_irq(&epfile->ffs->eps_lock);
+                       return -ESHUTDOWN;
+               }
                /*
                 * Controller may require buffer size to be aligned to
                 * maxpacketsize of an out endpoint.
@@ -752,6 +758,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
                data_len = io_data->read ?
                           usb_ep_align_maybe(gadget, ep->ep, io_data->len) :
                           io_data->len;
+               spin_unlock_irq(&epfile->ffs->eps_lock);
 
                data = kmalloc(data_len, GFP_KERNEL);
                if (unlikely(!data))
index c11761ce511302fbc386791937a4fecf8a98cfa0..9a4f49dc6ac4f879cfac9496c49951b21c18fe71 100644 (file)
@@ -377,7 +377,7 @@ static struct sk_buff *rndis_add_header(struct gether *port,
        if (skb2)
                rndis_add_hdr(skb2);
 
-       dev_kfree_skb_any(skb);
+       dev_kfree_skb(skb);
        return skb2;
 }
 
index 15960af0f67ea7805e22e9fea6671c2635e16153..a2f26cdb56fef07a0a15e8bb329c089413efab6a 100644 (file)
@@ -1219,6 +1219,10 @@ static int fsl_pullup(struct usb_gadget *gadget, int is_on)
        struct fsl_udc *udc;
 
        udc = container_of(gadget, struct fsl_udc, gadget);
+
+       if (!udc->vbus_active)
+               return -EOPNOTSUPP;
+
        udc->softconnect = (is_on != 0);
        if (can_pullup(udc))
                fsl_writel((fsl_readl(&dr_regs->usbcmd) | USB_CMD_RUN_STOP),
@@ -2532,8 +2536,8 @@ static int __exit fsl_udc_remove(struct platform_device *pdev)
        if (!udc_controller)
                return -ENODEV;
 
-       usb_del_gadget_udc(&udc_controller->gadget);
        udc_controller->done = &done;
+       usb_del_gadget_udc(&udc_controller->gadget);
 
        fsl_udc_clk_release();
 
index b5be6f0308c270f2a844db3f00e7884380e5c2e6..a925d0cbcd4199d777071408f8e335119932b3ab 100644 (file)
@@ -2043,6 +2043,7 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
                return -ESRCH;
 
        /* fake probe to determine $CHIP */
+       CHIP = NULL;
        usb_gadget_probe_driver(&probe_driver);
        if (!CHIP)
                return -ENODEV;
index d822d822efb34d38b1621d4d63478c82cd6ec9e3..7ed452d90f4d76c9d48c2bb3c2f55a8d40a0f5e9 100644 (file)
@@ -35,6 +35,7 @@
 #include <asm/byteorder.h>
 #include <asm/unaligned.h>
 
+#include "u_rndis.h"
 
 #undef VERBOSE_DEBUG
 
index 50d09c289137024271a480acab22ff1d8443d96e..ce8e28146162723a1b9b2f0bf7db34f66065d09b 100644 (file)
@@ -48,8 +48,6 @@
 
 #define UETH__VERSION  "29-May-2008"
 
-#define GETHER_NAPI_WEIGHT     32
-
 struct eth_dev {
        /* lock is held while accessing port_usb
         */
@@ -74,7 +72,6 @@ struct eth_dev {
                                                struct sk_buff_head *list);
 
        struct work_struct      work;
-       struct napi_struct      rx_napi;
 
        unsigned long           todo;
 #define        WORK_RX_MEMORY          0
@@ -256,16 +253,18 @@ enomem:
                DBG(dev, "rx submit --> %d\n", retval);
                if (skb)
                        dev_kfree_skb_any(skb);
+               spin_lock_irqsave(&dev->req_lock, flags);
+               list_add(&req->list, &dev->rx_reqs);
+               spin_unlock_irqrestore(&dev->req_lock, flags);
        }
        return retval;
 }
 
 static void rx_complete(struct usb_ep *ep, struct usb_request *req)
 {
-       struct sk_buff  *skb = req->context;
+       struct sk_buff  *skb = req->context, *skb2;
        struct eth_dev  *dev = ep->driver_data;
        int             status = req->status;
-       bool            rx_queue = 0;
 
        switch (status) {
 
@@ -289,8 +288,30 @@ static void rx_complete(struct usb_ep *ep, struct usb_request *req)
                } else {
                        skb_queue_tail(&dev->rx_frames, skb);
                }
-               if (!status)
-                       rx_queue = 1;
+               skb = NULL;
+
+               skb2 = skb_dequeue(&dev->rx_frames);
+               while (skb2) {
+                       if (status < 0
+                                       || ETH_HLEN > skb2->len
+                                       || skb2->len > VLAN_ETH_FRAME_LEN) {
+                               dev->net->stats.rx_errors++;
+                               dev->net->stats.rx_length_errors++;
+                               DBG(dev, "rx length %d\n", skb2->len);
+                               dev_kfree_skb_any(skb2);
+                               goto next_frame;
+                       }
+                       skb2->protocol = eth_type_trans(skb2, dev->net);
+                       dev->net->stats.rx_packets++;
+                       dev->net->stats.rx_bytes += skb2->len;
+
+                       /* no buffer copies needed, unless hardware can't
+                        * use skb buffers.
+                        */
+                       status = netif_rx(skb2);
+next_frame:
+                       skb2 = skb_dequeue(&dev->rx_frames);
+               }
                break;
 
        /* software-driven interface shutdown */
@@ -313,20 +334,22 @@ quiesce:
                /* FALLTHROUGH */
 
        default:
-               rx_queue = 1;
-               dev_kfree_skb_any(skb);
                dev->net->stats.rx_errors++;
                DBG(dev, "rx status %d\n", status);
                break;
        }
 
+       if (skb)
+               dev_kfree_skb_any(skb);
+       if (!netif_running(dev->net)) {
 clean:
                spin_lock(&dev->req_lock);
                list_add(&req->list, &dev->rx_reqs);
                spin_unlock(&dev->req_lock);
-
-       if (rx_queue && likely(napi_schedule_prep(&dev->rx_napi)))
-               __napi_schedule(&dev->rx_napi);
+               req = NULL;
+       }
+       if (req)
+               rx_submit(dev, req, GFP_ATOMIC);
 }
 
 static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
@@ -391,24 +414,16 @@ static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
 {
        struct usb_request      *req;
        unsigned long           flags;
-       int                     rx_counts = 0;
 
        /* fill unused rxq slots with some skb */
        spin_lock_irqsave(&dev->req_lock, flags);
        while (!list_empty(&dev->rx_reqs)) {
-
-               if (++rx_counts > qlen(dev->gadget, dev->qmult))
-                       break;
-
                req = container_of(dev->rx_reqs.next,
                                struct usb_request, list);
                list_del_init(&req->list);
                spin_unlock_irqrestore(&dev->req_lock, flags);
 
                if (rx_submit(dev, req, gfp_flags) < 0) {
-                       spin_lock_irqsave(&dev->req_lock, flags);
-                       list_add(&req->list, &dev->rx_reqs);
-                       spin_unlock_irqrestore(&dev->req_lock, flags);
                        defer_kevent(dev, WORK_RX_MEMORY);
                        return;
                }
@@ -418,41 +433,6 @@ static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
        spin_unlock_irqrestore(&dev->req_lock, flags);
 }
 
-static int gether_poll(struct napi_struct *napi, int budget)
-{
-       struct eth_dev  *dev = container_of(napi, struct eth_dev, rx_napi);
-       struct sk_buff  *skb;
-       unsigned int    work_done = 0;
-       int             status = 0;
-
-       while ((skb = skb_dequeue(&dev->rx_frames))) {
-               if (status < 0
-                               || ETH_HLEN > skb->len
-                               || skb->len > VLAN_ETH_FRAME_LEN) {
-                       dev->net->stats.rx_errors++;
-                       dev->net->stats.rx_length_errors++;
-                       DBG(dev, "rx length %d\n", skb->len);
-                       dev_kfree_skb_any(skb);
-                       continue;
-               }
-               skb->protocol = eth_type_trans(skb, dev->net);
-               dev->net->stats.rx_packets++;
-               dev->net->stats.rx_bytes += skb->len;
-
-               status = netif_rx_ni(skb);
-       }
-
-       if (netif_running(dev->net)) {
-               rx_fill(dev, GFP_KERNEL);
-               work_done++;
-       }
-
-       if (work_done < budget)
-               napi_complete(&dev->rx_napi);
-
-       return work_done;
-}
-
 static void eth_work(struct work_struct *work)
 {
        struct eth_dev  *dev = container_of(work, struct eth_dev, work);
@@ -645,7 +625,6 @@ static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
        /* and open the tx floodgates */
        atomic_set(&dev->tx_qlen, 0);
        netif_wake_queue(dev->net);
-       napi_enable(&dev->rx_napi);
 }
 
 static int eth_open(struct net_device *net)
@@ -672,7 +651,6 @@ static int eth_stop(struct net_device *net)
        unsigned long   flags;
 
        VDBG(dev, "%s\n", __func__);
-       napi_disable(&dev->rx_napi);
        netif_stop_queue(net);
 
        DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
@@ -790,7 +768,6 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
                return ERR_PTR(-ENOMEM);
 
        dev = netdev_priv(net);
-       netif_napi_add(net, &dev->rx_napi, gether_poll, GETHER_NAPI_WEIGHT);
        spin_lock_init(&dev->lock);
        spin_lock_init(&dev->req_lock);
        INIT_WORK(&dev->work, eth_work);
@@ -816,7 +793,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
 
        net->netdev_ops = &eth_netdev_ops;
 
-       SET_ETHTOOL_OPS(net, &ops);
+       net->ethtool_ops = &ops;
 
        dev->gadget = g;
        SET_NETDEV_DEV(net, &g->dev);
@@ -853,7 +830,6 @@ struct net_device *gether_setup_name_default(const char *netname)
                return ERR_PTR(-ENOMEM);
 
        dev = netdev_priv(net);
-       netif_napi_add(net, &dev->rx_napi, gether_poll, GETHER_NAPI_WEIGHT);
        spin_lock_init(&dev->lock);
        spin_lock_init(&dev->req_lock);
        INIT_WORK(&dev->work, eth_work);
@@ -874,7 +850,7 @@ struct net_device *gether_setup_name_default(const char *netname)
 
        net->netdev_ops = &eth_netdev_ops;
 
-       SET_ETHTOOL_OPS(net, &ops);
+       net->ethtool_ops = &ops;
        SET_NETDEV_DEVTYPE(net, &gadget_type);
 
        return net;
@@ -1137,7 +1113,6 @@ void gether_disconnect(struct gether *link)
 {
        struct eth_dev          *dev = link->ioport;
        struct usb_request      *req;
-       struct sk_buff          *skb;
 
        WARN_ON(!dev);
        if (!dev)
@@ -1164,12 +1139,6 @@ void gether_disconnect(struct gether *link)
                spin_lock(&dev->req_lock);
        }
        spin_unlock(&dev->req_lock);
-
-       spin_lock(&dev->rx_frames.lock);
-       while ((skb = __skb_dequeue(&dev->rx_frames)))
-               dev_kfree_skb_any(skb);
-       spin_unlock(&dev->rx_frames.lock);
-
        link->in_ep->driver_data = NULL;
        link->in_ep->desc = NULL;
 
index 9f170c53e3d9a96d97c6978502ba6f4240189438..134f354ede62e1b9f35ff189b0feda75aad4ac92 100644 (file)
@@ -300,7 +300,7 @@ static int __init zero_bind(struct usb_composite_dev *cdev)
        ss_opts->isoc_interval = gzero_options.isoc_interval;
        ss_opts->isoc_maxpacket = gzero_options.isoc_maxpacket;
        ss_opts->isoc_mult = gzero_options.isoc_mult;
-       ss_opts->isoc_maxburst = gzero_options.isoc_maxpacket;
+       ss_opts->isoc_maxburst = gzero_options.isoc_maxburst;
        ss_opts->bulk_buflen = gzero_options.bulk_buflen;
 
        func_ss = usb_get_function(func_inst_ss);
index 6f2c8d3899d2cfb00f14fe641341f68944943213..cf2734b532a7ab288d24dd13fd79db364490a748 100644 (file)
@@ -248,7 +248,8 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
                break;
        }
 
-       if (pdata->have_sysif_regs && pdata->controller_ver &&
+       if (pdata->have_sysif_regs &&
+           pdata->controller_ver > FSL_USB_VER_1_6 &&
            (phy_mode == FSL_USB2_PHY_ULPI)) {
                /* check PHY_CLK_VALID to get phy clk valid */
                if (!(spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) &
index c81c8721cc5a9e3d07e20c29d9d40e540136409e..cd871b89501325407af3ab3ebf0eb56ab0814a79 100644 (file)
@@ -90,6 +90,24 @@ __acquires(ohci->lock)
        dl_done_list (ohci);
        finish_unlinks (ohci, ohci_frame_no(ohci));
 
+       /*
+        * Some controllers don't handle "global" suspend properly if
+        * there are unsuspended ports.  For these controllers, put all
+        * the enabled ports into suspend before suspending the root hub.
+        */
+       if (ohci->flags & OHCI_QUIRK_GLOBAL_SUSPEND) {
+               __hc32 __iomem  *portstat = ohci->regs->roothub.portstatus;
+               int             i;
+               unsigned        temp;
+
+               for (i = 0; i < ohci->num_ports; (++i, ++portstat)) {
+                       temp = ohci_readl(ohci, portstat);
+                       if ((temp & (RH_PS_PES | RH_PS_PSS)) ==
+                                       RH_PS_PES)
+                               ohci_writel(ohci, RH_PS_PSS, portstat);
+               }
+       }
+
        /* maybe resume can wake root hub */
        if (ohci_to_hcd(ohci)->self.root_hub->do_remote_wakeup || autostop) {
                ohci->hc_control |= OHCI_CTRL_RWE;
index 90879e9ccbec302e8c5272d45e4847009b3730c4..bb1509675727b374586d61917920578cc7631a45 100644 (file)
@@ -160,6 +160,7 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
                ohci_dbg(ohci, "enabled AMD prefetch quirk\n");
        }
 
+       ohci->flags |= OHCI_QUIRK_GLOBAL_SUSPEND;
        return 0;
 }
 
index 9250cada13f0b3e9a22711de3345b67de1627e01..4550ce05af7fa1d1b96c03dc2590c5c0cc943615 100644 (file)
@@ -405,6 +405,8 @@ struct ohci_hcd {
 #define        OHCI_QUIRK_HUB_POWER    0x100                   /* distrust firmware power/oc setup */
 #define        OHCI_QUIRK_AMD_PLL      0x200                   /* AMD PLL quirk*/
 #define        OHCI_QUIRK_AMD_PREFETCH 0x400                   /* pre-fetch for ISO transfer */
+#define        OHCI_QUIRK_GLOBAL_SUSPEND       0x800           /* must suspend ports */
+
        // there are also chip quirks/bugs in init logic
 
        struct work_struct      nec_work;       /* Worker for NEC quirk */
index 47390e369cd402f776c20bbb35b4c37223f04057..35d44778070786109262d96c95341758675a41e3 100644 (file)
@@ -134,6 +134,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                 */
                if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)
                        xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
+
+               xhci->quirks |= XHCI_SPURIOUS_REBOOT;
        }
        if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
                        pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
@@ -143,9 +145,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                xhci->quirks |= XHCI_TRUST_TX_LENGTH;
        }
        if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
-                       pdev->device == 0x0015 &&
-                       pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG &&
-                       pdev->subsystem_device == 0xc0cd)
+                       pdev->device == 0x0015)
                xhci->quirks |= XHCI_RESET_ON_RESUME;
        if (pdev->vendor == PCI_VENDOR_ID_VIA)
                xhci->quirks |= XHCI_RESET_ON_RESUME;
index 5f926bea5ab1d87a54054ad68555ea31bd62cb63..7a0e3c720c005faed89e3caa55eeff73f1fd113c 100644 (file)
@@ -550,6 +550,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
        struct xhci_ring *ep_ring;
        struct xhci_generic_trb *trb;
        dma_addr_t addr;
+       u64 hw_dequeue;
 
        ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
                        ep_index, stream_id);
@@ -559,16 +560,6 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
                                stream_id);
                return;
        }
-       state->new_cycle_state = 0;
-       xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
-                       "Finding segment containing stopped TRB.");
-       state->new_deq_seg = find_trb_seg(cur_td->start_seg,
-                       dev->eps[ep_index].stopped_trb,
-                       &state->new_cycle_state);
-       if (!state->new_deq_seg) {
-               WARN_ON(1);
-               return;
-       }
 
        /* Dig out the cycle state saved by the xHC during the stop ep cmd */
        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
@@ -577,46 +568,57 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
        if (ep->ep_state & EP_HAS_STREAMS) {
                struct xhci_stream_ctx *ctx =
                        &ep->stream_info->stream_ctx_array[stream_id];
-               state->new_cycle_state = 0x1 & le64_to_cpu(ctx->stream_ring);
+               hw_dequeue = le64_to_cpu(ctx->stream_ring);
        } else {
                struct xhci_ep_ctx *ep_ctx
                        = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
-               state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
+               hw_dequeue = le64_to_cpu(ep_ctx->deq);
        }
 
+       /* Find virtual address and segment of hardware dequeue pointer */
+       state->new_deq_seg = ep_ring->deq_seg;
+       state->new_deq_ptr = ep_ring->dequeue;
+       while (xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr)
+                       != (dma_addr_t)(hw_dequeue & ~0xf)) {
+               next_trb(xhci, ep_ring, &state->new_deq_seg,
+                                       &state->new_deq_ptr);
+               if (state->new_deq_ptr == ep_ring->dequeue) {
+                       WARN_ON(1);
+                       return;
+               }
+       }
+       /*
+        * Find cycle state for last_trb, starting at old cycle state of
+        * hw_dequeue. If there is only one segment ring, find_trb_seg() will
+        * return immediately and cannot toggle the cycle state if this search
+        * wraps around, so add one more toggle manually in that case.
+        */
+       state->new_cycle_state = hw_dequeue & 0x1;
+       if (ep_ring->first_seg == ep_ring->first_seg->next &&
+                       cur_td->last_trb < state->new_deq_ptr)
+               state->new_cycle_state ^= 0x1;
+
        state->new_deq_ptr = cur_td->last_trb;
        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
                        "Finding segment containing last TRB in TD.");
        state->new_deq_seg = find_trb_seg(state->new_deq_seg,
-                       state->new_deq_ptr,
-                       &state->new_cycle_state);
+                       state->new_deq_ptr, &state->new_cycle_state);
        if (!state->new_deq_seg) {
                WARN_ON(1);
                return;
        }
 
+       /* Increment to find next TRB after last_trb. Cycle if appropriate. */
        trb = &state->new_deq_ptr->generic;
        if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
            (trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
                state->new_cycle_state ^= 0x1;
        next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
 
-       /*
-        * If there is only one segment in a ring, find_trb_seg()'s while loop
-        * will not run, and it will return before it has a chance to see if it
-        * needs to toggle the cycle bit.  It can't tell if the stalled transfer
-        * ended just before the link TRB on a one-segment ring, or if the TD
-        * wrapped around the top of the ring, because it doesn't have the TD in
-        * question.  Look for the one-segment case where stalled TRB's address
-        * is greater than the new dequeue pointer address.
-        */
-       if (ep_ring->first_seg == ep_ring->first_seg->next &&
-                       state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
-               state->new_cycle_state ^= 0x1;
+       /* Don't update the ring cycle state for the producer (us). */
        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
                        "Cycle state = 0x%x", state->new_cycle_state);
 
-       /* Don't update the ring cycle state for the producer (us). */
        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
                        "New dequeue segment = %p (virtual)",
                        state->new_deq_seg);
@@ -799,7 +801,6 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
        if (list_empty(&ep->cancelled_td_list)) {
                xhci_stop_watchdog_timer_in_irq(xhci, ep);
                ep->stopped_td = NULL;
-               ep->stopped_trb = NULL;
                ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
                return;
        }
@@ -867,11 +868,9 @@ remove_finished_td:
                ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
        }
 
-       /* Clear stopped_td and stopped_trb if endpoint is not halted */
-       if (!(ep->ep_state & EP_HALTED)) {
+       /* Clear stopped_td if endpoint is not halted */
+       if (!(ep->ep_state & EP_HALTED))
                ep->stopped_td = NULL;
-               ep->stopped_trb = NULL;
-       }
 
        /*
         * Drop the lock and complete the URBs in the cancelled TD list.
@@ -1941,14 +1940,12 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
        struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
        ep->ep_state |= EP_HALTED;
        ep->stopped_td = td;
-       ep->stopped_trb = event_trb;
        ep->stopped_stream = stream_id;
 
        xhci_queue_reset_ep(xhci, slot_id, ep_index);
        xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
 
        ep->stopped_td = NULL;
-       ep->stopped_trb = NULL;
        ep->stopped_stream = 0;
 
        xhci_ring_cmd_db(xhci);
@@ -2030,7 +2027,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
                 * the ring dequeue pointer or take this TD off any lists yet.
                 */
                ep->stopped_td = td;
-               ep->stopped_trb = event_trb;
                return 0;
        } else {
                if (trb_comp_code == COMP_STALL) {
@@ -2042,7 +2038,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
                         * USB class driver clear the stall later.
                         */
                        ep->stopped_td = td;
-                       ep->stopped_trb = event_trb;
                        ep->stopped_stream = ep_ring->stream_id;
                } else if (xhci_requires_manual_halt_cleanup(xhci,
                                        ep_ctx, trb_comp_code)) {
index 8fe4e124ddd49f17fb3ac6d2088cedc15758d7b2..300836972faa41cb11b3c61bf4423f702e2d1d0c 100644 (file)
@@ -408,16 +408,16 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
 
 #else
 
-static int xhci_try_enable_msi(struct usb_hcd *hcd)
+static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
 {
        return 0;
 }
 
-static void xhci_cleanup_msix(struct xhci_hcd *xhci)
+static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
 {
 }
 
-static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
+static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
 {
 }
 
@@ -2954,7 +2954,6 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
                xhci_ring_cmd_db(xhci);
        }
        virt_ep->stopped_td = NULL;
-       virt_ep->stopped_trb = NULL;
        virt_ep->stopped_stream = 0;
        spin_unlock_irqrestore(&xhci->lock, flags);
 
index d280e9213d08614002030573afcb2b93a5bbe9c8..4746816aed3e7c42097d10986acfff09e19ac27b 100644 (file)
@@ -865,8 +865,6 @@ struct xhci_virt_ep {
 #define EP_GETTING_NO_STREAMS  (1 << 5)
        /* ----  Related to URB cancellation ---- */
        struct list_head        cancelled_td_list;
-       /* The TRB that was last reported in a stopped endpoint ring */
-       union xhci_trb          *stopped_trb;
        struct xhci_td          *stopped_td;
        unsigned int            stopped_stream;
        /* Watchdog timer for stop endpoint command to cancel URBs */
index 3372ded5def79853e75ac79ecc66b74d73c63542..e2fd263585de3fb2d8cb5cab3e782eff5675a198 100644 (file)
@@ -470,8 +470,9 @@ static int dsps_musb_exit(struct musb *musb)
        struct dsps_glue *glue = dev_get_drvdata(dev->parent);
 
        del_timer_sync(&glue->timer);
-
        usb_phy_shutdown(musb->xceiv);
+       debugfs_remove_recursive(glue->dbgfs_root);
+
        return 0;
 }
 
@@ -708,8 +709,6 @@ static int dsps_remove(struct platform_device *pdev)
        pm_runtime_put(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
-       debugfs_remove_recursive(glue->dbgfs_root);
-
        return 0;
 }
 
index d341c149a2f90c1372201b9a78aff8a89d4f3e6f..d369bf1f3936cba910de97a4859c27174dbed8a5 100644 (file)
@@ -316,7 +316,13 @@ static void omap_musb_mailbox_work(struct work_struct *mailbox_work)
 {
        struct omap2430_glue *glue = container_of(mailbox_work,
                                struct omap2430_glue, omap_musb_mailbox_work);
+       struct musb *musb = glue_to_musb(glue);
+       struct device *dev = musb->controller;
+
+       pm_runtime_get_sync(dev);
        omap_musb_set_mailbox(glue);
+       pm_runtime_mark_last_busy(dev);
+       pm_runtime_put_autosuspend(dev);
 }
 
 static irqreturn_t omap2430_musb_interrupt(int irq, void *__hci)
@@ -416,6 +422,7 @@ static int omap2430_musb_init(struct musb *musb)
                omap_musb_set_mailbox(glue);
 
        phy_init(musb->phy);
+       phy_power_on(musb->phy);
 
        pm_runtime_put_noidle(musb->controller);
        return 0;
@@ -478,6 +485,7 @@ static int omap2430_musb_exit(struct musb *musb)
        del_timer_sync(&musb_idle_timer);
 
        omap2430_low_level_exit(musb);
+       phy_power_off(musb->phy);
        phy_exit(musb->phy);
 
        return 0;
index d75196ad5f2f6e7a13a76e16f283464ae875507e..35b6083b799949bc30f4f359ee62a55542959ca9 100644 (file)
@@ -3,6 +3,7 @@
 #include <linux/err.h>
 #include <linux/of.h>
 #include <linux/io.h>
+#include <linux/delay.h>
 #include "am35x-phy-control.h"
 
 struct am335x_control_usb {
@@ -86,6 +87,14 @@ static void am335x_phy_power(struct phy_control *phy_ctrl, u32 id, bool on)
        }
 
        writel(val, usb_ctrl->phy_reg + reg);
+
+       /*
+        * Give the PHY ~1ms to complete the power up operation.
+        * Tests have shown unstable behaviour if other USB PHY related
+        * registers are written too shortly after such a transition.
+        */
+       if (on)
+               mdelay(1);
 }
 
 static const struct phy_control ctrl_am335x = {
index c47e5a6edde28a1d35d64d6f375df401a8519709..d03fadd2629f1419b00a60ef9d842913c5ae6183 100644 (file)
@@ -303,17 +303,18 @@ int otg_statemachine(struct otg_fsm *fsm)
                        otg_set_state(fsm, OTG_STATE_A_WAIT_VRISE);
                break;
        case OTG_STATE_A_WAIT_VRISE:
-               if (fsm->id || fsm->a_bus_drop || fsm->a_vbus_vld ||
-                               fsm->a_wait_vrise_tmout) {
+               if (fsm->a_vbus_vld)
                        otg_set_state(fsm, OTG_STATE_A_WAIT_BCON);
-               }
+               else if (fsm->id || fsm->a_bus_drop ||
+                               fsm->a_wait_vrise_tmout)
+                       otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL);
                break;
        case OTG_STATE_A_WAIT_BCON:
                if (!fsm->a_vbus_vld)
                        otg_set_state(fsm, OTG_STATE_A_VBUS_ERR);
                else if (fsm->b_conn)
                        otg_set_state(fsm, OTG_STATE_A_HOST);
-               else if (fsm->id | fsm->a_bus_drop | fsm->a_wait_bcon_tmout)
+               else if (fsm->id || fsm->a_bus_drop || fsm->a_wait_bcon_tmout)
                        otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL);
                break;
        case OTG_STATE_A_HOST:
index 8afa813d690bc6f7aa15c9b9c7523cf96b24099a..36b6bce33b20c17df4583e5ce5e9b07298335842 100644 (file)
@@ -132,6 +132,9 @@ struct usb_phy *usb_get_phy(enum usb_phy_type type)
        if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
                pr_debug("PHY: unable to find transceiver of type %s\n",
                        usb_phy_type_string(type));
+               if (!IS_ERR(phy))
+                       phy = ERR_PTR(-ENODEV);
+
                goto err0;
        }
 
index a2db5be9c30534ceae25eb262bde00242a152e2d..df90dae53eb97cde7d9e1d6ebf47233c7b3bb149 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/spinlock.h>
 #include <linux/mutex.h>
 #include <linux/serial.h>
+#include <linux/swab.h>
 #include <linux/kfifo.h>
 #include <linux/ioctl.h>
 #include <linux/firmware.h>
@@ -280,7 +281,7 @@ static int read_download_mem(struct usb_device *dev, int start_address,
 {
        int status = 0;
        __u8 read_length;
-       __be16 be_start_address;
+       u16 be_start_address;
 
        dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, length);
 
@@ -296,10 +297,14 @@ static int read_download_mem(struct usb_device *dev, int start_address,
                if (read_length > 1) {
                        dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, read_length);
                }
-               be_start_address = cpu_to_be16(start_address);
+               /*
+                * NOTE: Must use swab as wIndex is sent in little-endian
+                *       byte order regardless of host byte order.
+                */
+               be_start_address = swab16((u16)start_address);
                status = ti_vread_sync(dev, UMPC_MEMORY_READ,
                                        (__u16)address_type,
-                                       (__force __u16)be_start_address,
+                                       be_start_address,
                                        buffer, read_length);
 
                if (status) {
@@ -394,7 +399,7 @@ static int write_i2c_mem(struct edgeport_serial *serial,
        struct device *dev = &serial->serial->dev->dev;
        int status = 0;
        int write_length;
-       __be16 be_start_address;
+       u16 be_start_address;
 
        /* We can only send a maximum of 1 aligned byte page at a time */
 
@@ -409,11 +414,16 @@ static int write_i2c_mem(struct edgeport_serial *serial,
                __func__, start_address, write_length);
        usb_serial_debug_data(dev, __func__, write_length, buffer);
 
-       /* Write first page */
-       be_start_address = cpu_to_be16(start_address);
+       /*
+        * Write first page.
+        *
+        * NOTE: Must use swab as wIndex is sent in little-endian byte order
+        *       regardless of host byte order.
+        */
+       be_start_address = swab16((u16)start_address);
        status = ti_vsend_sync(serial->serial->dev,
                                UMPC_MEMORY_WRITE, (__u16)address_type,
-                               (__force __u16)be_start_address,
+                               be_start_address,
                                buffer, write_length);
        if (status) {
                dev_dbg(dev, "%s - ERROR %d\n", __func__, status);
@@ -436,11 +446,16 @@ static int write_i2c_mem(struct edgeport_serial *serial,
                        __func__, start_address, write_length);
                usb_serial_debug_data(dev, __func__, write_length, buffer);
 
-               /* Write next page */
-               be_start_address = cpu_to_be16(start_address);
+               /*
+                * Write next page.
+                *
+                * NOTE: Must use swab as wIndex is sent in little-endian byte
+                *       order regardless of host byte order.
+                */
+               be_start_address = swab16((u16)start_address);
                status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE,
                                (__u16)address_type,
-                               (__force __u16)be_start_address,
+                               be_start_address,
                                buffer, write_length);
                if (status) {
                        dev_err(dev, "%s - ERROR %d\n", __func__, status);
@@ -585,8 +600,8 @@ static int get_descriptor_addr(struct edgeport_serial *serial,
                if (rom_desc->Type == desc_type)
                        return start_address;
 
-               start_address = start_address + sizeof(struct ti_i2c_desc)
-                                                       + rom_desc->Size;
+               start_address = start_address + sizeof(struct ti_i2c_desc) +
+                                               le16_to_cpu(rom_desc->Size);
 
        } while ((start_address < TI_MAX_I2C_SIZE) && rom_desc->Type);
 
@@ -599,7 +614,7 @@ static int valid_csum(struct ti_i2c_desc *rom_desc, __u8 *buffer)
        __u16 i;
        __u8 cs = 0;
 
-       for (i = 0; i < rom_desc->Size; i++)
+       for (i = 0; i < le16_to_cpu(rom_desc->Size); i++)
                cs = (__u8)(cs + buffer[i]);
 
        if (cs != rom_desc->CheckSum) {
@@ -650,7 +665,7 @@ static int check_i2c_image(struct edgeport_serial *serial)
                        break;
 
                if ((start_address + sizeof(struct ti_i2c_desc) +
-                                       rom_desc->Size) > TI_MAX_I2C_SIZE) {
+                       le16_to_cpu(rom_desc->Size)) > TI_MAX_I2C_SIZE) {
                        status = -ENODEV;
                        dev_dbg(dev, "%s - structure too big, erroring out.\n", __func__);
                        break;
@@ -665,7 +680,8 @@ static int check_i2c_image(struct edgeport_serial *serial)
                        /* Read the descriptor data */
                        status = read_rom(serial, start_address +
                                                sizeof(struct ti_i2c_desc),
-                                               rom_desc->Size, buffer);
+                                               le16_to_cpu(rom_desc->Size),
+                                               buffer);
                        if (status)
                                break;
 
@@ -674,7 +690,7 @@ static int check_i2c_image(struct edgeport_serial *serial)
                                break;
                }
                start_address = start_address + sizeof(struct ti_i2c_desc) +
-                                                               rom_desc->Size;
+                                               le16_to_cpu(rom_desc->Size);
 
        } while ((rom_desc->Type != I2C_DESC_TYPE_ION) &&
                                (start_address < TI_MAX_I2C_SIZE));
@@ -712,7 +728,7 @@ static int get_manuf_info(struct edgeport_serial *serial, __u8 *buffer)
 
        /* Read the descriptor data */
        status = read_rom(serial, start_address+sizeof(struct ti_i2c_desc),
-                                               rom_desc->Size, buffer);
+                                       le16_to_cpu(rom_desc->Size), buffer);
        if (status)
                goto exit;
 
index 367c7f08b27c53c154c4c387a434f159939b4730..f213ee97851650af87a0d22e5173b34a55e1b2de 100644 (file)
@@ -234,8 +234,31 @@ static void option_instat_callback(struct urb *urb);
 #define QUALCOMM_VENDOR_ID                     0x05C6
 
 #define CMOTECH_VENDOR_ID                      0x16d8
-#define CMOTECH_PRODUCT_6008                   0x6008
-#define CMOTECH_PRODUCT_6280                   0x6280
+#define CMOTECH_PRODUCT_6001                   0x6001
+#define CMOTECH_PRODUCT_CMU_300                        0x6002
+#define CMOTECH_PRODUCT_6003                   0x6003
+#define CMOTECH_PRODUCT_6004                   0x6004
+#define CMOTECH_PRODUCT_6005                   0x6005
+#define CMOTECH_PRODUCT_CGU_628A               0x6006
+#define CMOTECH_PRODUCT_CHE_628S               0x6007
+#define CMOTECH_PRODUCT_CMU_301                        0x6008
+#define CMOTECH_PRODUCT_CHU_628                        0x6280
+#define CMOTECH_PRODUCT_CHU_628S               0x6281
+#define CMOTECH_PRODUCT_CDU_680                        0x6803
+#define CMOTECH_PRODUCT_CDU_685A               0x6804
+#define CMOTECH_PRODUCT_CHU_720S               0x7001
+#define CMOTECH_PRODUCT_7002                   0x7002
+#define CMOTECH_PRODUCT_CHU_629K               0x7003
+#define CMOTECH_PRODUCT_7004                   0x7004
+#define CMOTECH_PRODUCT_7005                   0x7005
+#define CMOTECH_PRODUCT_CGU_629                        0x7006
+#define CMOTECH_PRODUCT_CHU_629S               0x700a
+#define CMOTECH_PRODUCT_CHU_720I               0x7211
+#define CMOTECH_PRODUCT_7212                   0x7212
+#define CMOTECH_PRODUCT_7213                   0x7213
+#define CMOTECH_PRODUCT_7251                   0x7251
+#define CMOTECH_PRODUCT_7252                   0x7252
+#define CMOTECH_PRODUCT_7253                   0x7253
 
 #define TELIT_VENDOR_ID                                0x1bc7
 #define TELIT_PRODUCT_UC864E                   0x1003
@@ -287,6 +310,7 @@ static void option_instat_callback(struct urb *urb);
 #define ALCATEL_PRODUCT_X060S_X200             0x0000
 #define ALCATEL_PRODUCT_X220_X500D             0x0017
 #define ALCATEL_PRODUCT_L100V                  0x011e
+#define ALCATEL_PRODUCT_L800MA                 0x0203
 
 #define PIRELLI_VENDOR_ID                      0x1266
 #define PIRELLI_PRODUCT_C100_1                 0x1002
@@ -349,6 +373,7 @@ static void option_instat_callback(struct urb *urb);
 #define OLIVETTI_PRODUCT_OLICARD100            0xc000
 #define OLIVETTI_PRODUCT_OLICARD145            0xc003
 #define OLIVETTI_PRODUCT_OLICARD200            0xc005
+#define OLIVETTI_PRODUCT_OLICARD500            0xc00b
 
 /* Celot products */
 #define CELOT_VENDOR_ID                                0x211f
@@ -502,6 +527,10 @@ static const struct option_blacklist_info huawei_cdc12_blacklist = {
        .reserved = BIT(1) | BIT(2),
 };
 
+static const struct option_blacklist_info net_intf0_blacklist = {
+       .reserved = BIT(0),
+};
+
 static const struct option_blacklist_info net_intf1_blacklist = {
        .reserved = BIT(1),
 };
@@ -1035,8 +1064,47 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
-       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
-       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
+         .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6004) },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6005) },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_628A) },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHE_628S),
+         .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_301),
+         .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628),
+         .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628S) },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_680) },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_685A) },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720S),
+         .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7002),
+         .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629K),
+         .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7004),
+         .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7005) },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_629),
+         .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629S),
+         .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720I),
+         .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7212),
+         .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7213),
+         .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7251),
+         .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7252),
+         .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7253),
+         .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },
@@ -1500,6 +1568,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
        { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
          .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+       { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L800MA),
+         .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
        { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
        { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
        { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
@@ -1545,6 +1615,9 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
                .driver_info = (kernel_ulong_t)&net_intf6_blacklist
        },
+       { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500),
+               .driver_info = (kernel_ulong_t)&net_intf4_blacklist
+       },
        { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
        { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
        { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
index 968a40201e5f6e2f2fed8de8e1668977e3f47db4..6c0a542e8ec1820d60d03f5a7896843d4a6b96f6 100644 (file)
@@ -136,12 +136,36 @@ static const struct usb_device_id id_table[] = {
        {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 0)},       /* Sierra Wireless MC7710 Device Management */
        {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 2)},       /* Sierra Wireless MC7710 NMEA */
        {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 3)},       /* Sierra Wireless MC7710 Modem */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68c0, 0)},       /* Sierra Wireless MC73xx Device Management */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68c0, 2)},       /* Sierra Wireless MC73xx NMEA */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68c0, 3)},       /* Sierra Wireless MC73xx Modem */
        {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 0)},       /* Sierra Wireless EM7700 Device Management */
        {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 2)},       /* Sierra Wireless EM7700 NMEA */
        {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 3)},       /* Sierra Wireless EM7700 Modem */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 0)},       /* Sierra Wireless EM7355 Device Management */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 2)},       /* Sierra Wireless EM7355 NMEA */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 3)},       /* Sierra Wireless EM7355 Modem */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 0)},       /* Sierra Wireless MC7305/MC7355 Device Management */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 2)},       /* Sierra Wireless MC7305/MC7355 NMEA */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 3)},       /* Sierra Wireless MC7305/MC7355 Modem */
        {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)},       /* Netgear AirCard 340U Device Management */
        {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)},       /* Netgear AirCard 340U NMEA */
        {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)},       /* Netgear AirCard 340U Modem */
+       {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 0)},       /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Device Management */
+       {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 2)},       /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card NMEA */
+       {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 3)},       /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Modem */
+       {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 0)},       /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card Device Management */
+       {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 2)},       /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card NMEA */
+       {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 3)},       /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card Modem */
+       {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 0)},       /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card Device Management */
+       {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 2)},       /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card NMEA */
+       {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 3)},       /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card Modem */
+       {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 0)},       /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card Device Management */
+       {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 2)},       /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card NMEA */
+       {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 3)},       /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card Modem */
+       {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 0)},       /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card Device Management */
+       {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 2)},       /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card NMEA */
+       {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 3)},       /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card Modem */
 
        { }                             /* Terminating entry */
 };
index 81fc0dfcfdcf6a073dc408ae7203747b1cd6167b..6d40d56378d77d0a1e4c65f66a2b46cbd81fe396 100644 (file)
@@ -1347,10 +1347,12 @@ static int usb_serial_register(struct usb_serial_driver *driver)
 static void usb_serial_deregister(struct usb_serial_driver *device)
 {
        pr_info("USB Serial deregistering driver %s\n", device->description);
+
        mutex_lock(&table_lock);
        list_del(&device->driver_list);
-       usb_serial_bus_deregister(device);
        mutex_unlock(&table_lock);
+
+       usb_serial_bus_deregister(device);
 }
 
 /**
index 4ef2a80728f74521d103dc8d33b1fed8735b1947..008d805c3d21cde7458058a6a2a5b0803da92bcf 100644 (file)
@@ -1851,7 +1851,7 @@ static int usbat_probe(struct usb_interface *intf,
        us->transport_name = "Shuttle USBAT";
        us->transport = usbat_flash_transport;
        us->transport_reset = usb_stor_CB_reset;
-       us->max_lun = 1;
+       us->max_lun = 0;
 
        result = usb_stor_probe2(us);
        return result;
index f4a82291894ab2964754ac6eb06b1c7dfb4b3978..174a447868cd6924fd81f39ea0da8666b88e2110 100644 (file)
@@ -234,6 +234,20 @@ UNUSUAL_DEV(  0x0421, 0x0495, 0x0370, 0x0370,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_MAX_SECTORS_64 ),
 
+/* Reported by Daniele Forsi <dforsi@gmail.com> */
+UNUSUAL_DEV(  0x0421, 0x04b9, 0x0350, 0x0350,
+               "Nokia",
+               "5300",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_MAX_SECTORS_64 ),
+
+/* Patch submitted by Victor A. Santos <victoraur.santos@gmail.com> */
+UNUSUAL_DEV(  0x0421, 0x05af, 0x0742, 0x0742,
+               "Nokia",
+               "305",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_MAX_SECTORS_64),
+
 /* Patch submitted by Mikhail Zolotaryov <lebon@lebon.org.ua> */
 UNUSUAL_DEV(  0x0421, 0x06aa, 0x1110, 0x1110,
                "Nokia",
index 44741267c917672149474fea1aef4efcd86d2210..3f485df9622643f1da5345d83a80c62078a80563 100644 (file)
@@ -301,7 +301,7 @@ int wusbhc_chid_set(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid)
 
        if (chid)
                result = uwb_radio_start(&wusbhc->pal);
-       else
+       else if (wusbhc->uwb_rc)
                uwb_radio_stop(&wusbhc->pal);
 
        return result;
index c8e2a47d62a77eee093820f4eee1a3af613ed61a..3e2e4ed2015739bf9acd3cea2a7fd22fa8c3a1d8 100644 (file)
@@ -2390,10 +2390,10 @@ error_complete:
                done) {
 
                dev_info(dev, "Control EP stall.  Queue delayed work.\n");
-               spin_lock_irq(&wa->xfer_list_lock);
+               spin_lock(&wa->xfer_list_lock);
                /* move xfer from xfer_list to xfer_errored_list. */
                list_move_tail(&xfer->list_node, &wa->xfer_errored_list);
-               spin_unlock_irq(&wa->xfer_list_lock);
+               spin_unlock(&wa->xfer_list_lock);
                spin_unlock_irqrestore(&xfer->lock, flags);
                queue_work(wusbd, &wa->xfer_error_work);
        } else {
index 1a2fd9795367cc719aec064b8443bacd5348f5c0..468c89fb6a1689a60ff6d0ccc41d0d1fcb2bd1ef 100644 (file)
@@ -59,6 +59,7 @@ static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg,
                                    struct uwb_rceb *reply, ssize_t reply_size)
 {
        struct uwb_rc_evt_set_drp_ie *r = (struct uwb_rc_evt_set_drp_ie *)reply;
+       unsigned long flags;
 
        if (r != NULL) {
                if (r->bResultCode != UWB_RC_RES_SUCCESS)
@@ -67,14 +68,14 @@ static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg,
        } else
                dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: timeout\n");
 
-       spin_lock_irq(&rc->rsvs_lock);
+       spin_lock_irqsave(&rc->rsvs_lock, flags);
        if (rc->set_drp_ie_pending > 1) {
                rc->set_drp_ie_pending = 0;
-               uwb_rsv_queue_update(rc);       
+               uwb_rsv_queue_update(rc);
        } else {
-               rc->set_drp_ie_pending = 0;     
+               rc->set_drp_ie_pending = 0;
        }
-       spin_unlock_irq(&rc->rsvs_lock);
+       spin_unlock_irqrestore(&rc->rsvs_lock, flags);
 }
 
 /**
index 96109a9972b6113cdb88d1861bf00353a00a0a93..84b4bfb843443ef934d2d80abc12131bb8616a5a 100644 (file)
@@ -66,7 +66,22 @@ static DEFINE_PER_CPU(struct evtchn_fifo_queue, cpu_queue);
 static event_word_t *event_array[MAX_EVENT_ARRAY_PAGES] __read_mostly;
 static unsigned event_array_pages __read_mostly;
 
+/*
+ * sync_set_bit() and friends must be unsigned long aligned on non-x86
+ * platforms.
+ */
+#if !defined(CONFIG_X86) && BITS_PER_LONG > 32
+
+#define BM(w) (unsigned long *)((unsigned long)w & ~0x7UL)
+#define EVTCHN_FIFO_BIT(b, w) \
+    (((unsigned long)w & 0x4UL) ? (EVTCHN_FIFO_ ##b + 32) : EVTCHN_FIFO_ ##b)
+
+#else
+
 #define BM(w) ((unsigned long *)(w))
+#define EVTCHN_FIFO_BIT(b, w) EVTCHN_FIFO_ ##b
+
+#endif
 
 static inline event_word_t *event_word_from_port(unsigned port)
 {
@@ -161,33 +176,38 @@ static void evtchn_fifo_bind_to_cpu(struct irq_info *info, unsigned cpu)
 static void evtchn_fifo_clear_pending(unsigned port)
 {
        event_word_t *word = event_word_from_port(port);
-       sync_clear_bit(EVTCHN_FIFO_PENDING, BM(word));
+       sync_clear_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
 }
 
 static void evtchn_fifo_set_pending(unsigned port)
 {
        event_word_t *word = event_word_from_port(port);
-       sync_set_bit(EVTCHN_FIFO_PENDING, BM(word));
+       sync_set_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
 }
 
 static bool evtchn_fifo_is_pending(unsigned port)
 {
        event_word_t *word = event_word_from_port(port);
-       return sync_test_bit(EVTCHN_FIFO_PENDING, BM(word));
+       return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
 }
 
 static bool evtchn_fifo_test_and_set_mask(unsigned port)
 {
        event_word_t *word = event_word_from_port(port);
-       return sync_test_and_set_bit(EVTCHN_FIFO_MASKED, BM(word));
+       return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
 }
 
 static void evtchn_fifo_mask(unsigned port)
 {
        event_word_t *word = event_word_from_port(port);
-       sync_set_bit(EVTCHN_FIFO_MASKED, BM(word));
+       sync_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
 }
 
+static bool evtchn_fifo_is_masked(unsigned port)
+{
+       event_word_t *word = event_word_from_port(port);
+       return sync_test_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
+}
 /*
  * Clear MASKED, spinning if BUSY is set.
  */
@@ -211,7 +231,7 @@ static void evtchn_fifo_unmask(unsigned port)
        BUG_ON(!irqs_disabled());
 
        clear_masked(word);
-       if (sync_test_bit(EVTCHN_FIFO_PENDING, BM(word))) {
+       if (evtchn_fifo_is_pending(port)) {
                struct evtchn_unmask unmask = { .port = port };
                (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
        }
@@ -243,7 +263,7 @@ static void handle_irq_for_port(unsigned port)
 
 static void consume_one_event(unsigned cpu,
                              struct evtchn_fifo_control_block *control_block,
-                             unsigned priority, uint32_t *ready)
+                             unsigned priority, unsigned long *ready)
 {
        struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
        uint32_t head;
@@ -273,10 +293,9 @@ static void consume_one_event(unsigned cpu,
         * copy of the ready word.
         */
        if (head == 0)
-               clear_bit(priority, BM(ready));
+               clear_bit(priority, ready);
 
-       if (sync_test_bit(EVTCHN_FIFO_PENDING, BM(word))
-           && !sync_test_bit(EVTCHN_FIFO_MASKED, BM(word)))
+       if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port))
                handle_irq_for_port(port);
 
        q->head[priority] = head;
@@ -285,7 +304,7 @@ static void consume_one_event(unsigned cpu,
 static void evtchn_fifo_handle_events(unsigned cpu)
 {
        struct evtchn_fifo_control_block *control_block;
-       uint32_t ready;
+       unsigned long ready;
        unsigned q;
 
        control_block = per_cpu(cpu_control_block, cpu);
index 6d589f28bf9b849bae629cea4d61f7dba60eda2d..895ac7dc9dbf9ef1ab800b713373887ebc17f903 100644 (file)
@@ -340,8 +340,6 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent)
                                &blocksize,&sbi->s_prefix,
                                sbi->s_volume, &mount_flags)) {
                printk(KERN_ERR "AFFS: Error parsing options\n");
-               kfree(sbi->s_prefix);
-               kfree(sbi);
                return -EINVAL;
        }
        /* N.B. after this point s_prefix must be released */
index 12a3de0ee6dacbdea873ec9ea28bdd88d1ea999d..a0ed6c7d2cd2a3e91a5d12e48af705d75afe315a 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -112,6 +112,11 @@ struct kioctx {
 
        struct work_struct      free_work;
 
+       /*
+        * signals when all in-flight requests are done
+        */
+       struct completion *requests_done;
+
        struct {
                /*
                 * This counts the number of available slots in the ringbuffer,
@@ -508,6 +513,10 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
 {
        struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
 
+       /* At this point we know that there are no any in-flight requests */
+       if (ctx->requests_done)
+               complete(ctx->requests_done);
+
        INIT_WORK(&ctx->free_work, free_ioctx);
        schedule_work(&ctx->free_work);
 }
@@ -718,7 +727,8 @@ err:
  *     when the processes owning a context have all exited to encourage
  *     the rapid destruction of the kioctx.
  */
-static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx)
+static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
+               struct completion *requests_done)
 {
        if (!atomic_xchg(&ctx->dead, 1)) {
                struct kioctx_table *table;
@@ -747,7 +757,11 @@ static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx)
                if (ctx->mmap_size)
                        vm_munmap(ctx->mmap_base, ctx->mmap_size);
 
+               ctx->requests_done = requests_done;
                percpu_ref_kill(&ctx->users);
+       } else {
+               if (requests_done)
+                       complete(requests_done);
        }
 }
 
@@ -809,7 +823,7 @@ void exit_aio(struct mm_struct *mm)
                 */
                ctx->mmap_size = 0;
 
-               kill_ioctx(mm, ctx);
+               kill_ioctx(mm, ctx, NULL);
        }
 }
 
@@ -1185,7 +1199,7 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
        if (!IS_ERR(ioctx)) {
                ret = put_user(ioctx->user_id, ctxp);
                if (ret)
-                       kill_ioctx(current->mm, ioctx);
+                       kill_ioctx(current->mm, ioctx, NULL);
                percpu_ref_put(&ioctx->users);
        }
 
@@ -1203,8 +1217,22 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
 {
        struct kioctx *ioctx = lookup_ioctx(ctx);
        if (likely(NULL != ioctx)) {
-               kill_ioctx(current->mm, ioctx);
+               struct completion requests_done =
+                       COMPLETION_INITIALIZER_ONSTACK(requests_done);
+
+               /* Pass requests_done to kill_ioctx() where it can be set
+                * in a thread-safe way. If we try to set it here then we have
+                * a race condition if two io_destroy() called simultaneously.
+                */
+               kill_ioctx(current->mm, ioctx, &requests_done);
                percpu_ref_put(&ioctx->users);
+
+               /* Wait until all IO for the context are done. Otherwise kernel
+                * keep using user-space buffers even if user thinks the context
+                * is destroyed.
+                */
+               wait_for_completion(&requests_done);
+
                return 0;
        }
        pr_debug("EINVAL: io_destroy: invalid context id\n");
@@ -1299,10 +1327,8 @@ rw_common:
                                                &iovec, compat)
                        : aio_setup_single_vector(req, rw, buf, &nr_segs,
                                                  iovec);
-               if (ret)
-                       return ret;
-
-               ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes);
+               if (!ret)
+                       ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes);
                if (ret < 0) {
                        if (iovec != &inline_vec)
                                kfree(iovec);
index 2caf36ac3e93e41d4f14ac48ef4bcedc4b682548..cc87c1abac9710169cd56ad44a78cf7185da2834 100644 (file)
@@ -179,7 +179,7 @@ static struct dentry *autofs4_lookup_active(struct dentry *dentry)
                spin_lock(&active->d_lock);
 
                /* Already gone? */
-               if (!d_count(active))
+               if ((int) d_count(active) <= 0)
                        goto next;
 
                qstr = &active->d_name;
@@ -230,7 +230,7 @@ static struct dentry *autofs4_lookup_expiring(struct dentry *dentry)
 
                spin_lock(&expiring->d_lock);
 
-               /* Bad luck, we've already been dentry_iput */
+               /* We've already been dentry_iput or unlinked */
                if (!expiring->d_inode)
                        goto next;
 
index 4c48df572bd65d74636df643c77acda204af5418..ba6b88528dc7eb28b9fe282cc2370f43fa7ea7d4 100644 (file)
@@ -2058,6 +2058,20 @@ struct btrfs_ioctl_defrag_range_args {
 #define btrfs_raw_test_opt(o, opt)     ((o) & BTRFS_MOUNT_##opt)
 #define btrfs_test_opt(root, opt)      ((root)->fs_info->mount_opt & \
                                         BTRFS_MOUNT_##opt)
+#define btrfs_set_and_info(root, opt, fmt, args...)                    \
+{                                                                      \
+       if (!btrfs_test_opt(root, opt))                                 \
+               btrfs_info(root->fs_info, fmt, ##args);                 \
+       btrfs_set_opt(root->fs_info->mount_opt, opt);                   \
+}
+
+#define btrfs_clear_and_info(root, opt, fmt, args...)                  \
+{                                                                      \
+       if (btrfs_test_opt(root, opt))                                  \
+               btrfs_info(root->fs_info, fmt, ##args);                 \
+       btrfs_clear_opt(root->fs_info->mount_opt, opt);                 \
+}
+
 /*
  * Inode flags
  */
index 029d46c2e17048a20a02e7cb34955d22ff80e752..983314932af3cc51260753b8acf807a98ce0988a 100644 (file)
@@ -2861,7 +2861,7 @@ retry_root_backup:
                        printk(KERN_ERR "BTRFS: failed to read log tree\n");
                        free_extent_buffer(log_tree_root->node);
                        kfree(log_tree_root);
-                       goto fail_trans_kthread;
+                       goto fail_qgroup;
                }
                /* returns with log_tree_root freed on success */
                ret = btrfs_recover_log_trees(log_tree_root);
@@ -2870,24 +2870,24 @@ retry_root_backup:
                                    "Failed to recover log tree");
                        free_extent_buffer(log_tree_root->node);
                        kfree(log_tree_root);
-                       goto fail_trans_kthread;
+                       goto fail_qgroup;
                }
 
                if (sb->s_flags & MS_RDONLY) {
                        ret = btrfs_commit_super(tree_root);
                        if (ret)
-                               goto fail_trans_kthread;
+                               goto fail_qgroup;
                }
        }
 
        ret = btrfs_find_orphan_roots(tree_root);
        if (ret)
-               goto fail_trans_kthread;
+               goto fail_qgroup;
 
        if (!(sb->s_flags & MS_RDONLY)) {
                ret = btrfs_cleanup_fs_roots(fs_info);
                if (ret)
-                       goto fail_trans_kthread;
+                       goto fail_qgroup;
 
                ret = btrfs_recover_relocation(tree_root);
                if (ret < 0) {
index 1306487c82cf6a05c8c528f8851fbe70d84c1f80..5590af92094bb67ea61c8ae397cc393b58b75ae6 100644 (file)
@@ -1542,6 +1542,7 @@ again:
                                ret = 0;
                }
                if (ret) {
+                       key.objectid = bytenr;
                        key.type = BTRFS_EXTENT_ITEM_KEY;
                        key.offset = num_bytes;
                        btrfs_release_path(path);
@@ -3542,11 +3543,13 @@ static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
        return extended_to_chunk(flags | tmp);
 }
 
-static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
+static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
 {
        unsigned seq;
+       u64 flags;
 
        do {
+               flags = orig_flags;
                seq = read_seqbegin(&root->fs_info->profiles_lock);
 
                if (flags & BTRFS_BLOCK_GROUP_DATA)
@@ -5719,6 +5722,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
 
                        if (ret > 0 && skinny_metadata) {
                                skinny_metadata = false;
+                               key.objectid = bytenr;
                                key.type = BTRFS_EXTENT_ITEM_KEY;
                                key.offset = num_bytes;
                                btrfs_release_path(path);
index eb742c07e7a41aacdb595b0252a12b3584bbee83..ae6af072b635e195e26f3199c3aabd427964881f 100644 (file)
@@ -800,7 +800,7 @@ next_slot:
                if (start > key.offset && end < extent_end) {
                        BUG_ON(del_nr > 0);
                        if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
-                               ret = -EINVAL;
+                               ret = -EOPNOTSUPP;
                                break;
                        }
 
@@ -846,7 +846,7 @@ next_slot:
                 */
                if (start <= key.offset && end < extent_end) {
                        if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
-                               ret = -EINVAL;
+                               ret = -EOPNOTSUPP;
                                break;
                        }
 
@@ -872,7 +872,7 @@ next_slot:
                if (start > key.offset && end >= extent_end) {
                        BUG_ON(del_nr > 0);
                        if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
-                               ret = -EINVAL;
+                               ret = -EOPNOTSUPP;
                                break;
                        }
 
@@ -1777,7 +1777,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
        start_pos = round_down(pos, root->sectorsize);
        if (start_pos > i_size_read(inode)) {
                /* Expand hole size to cover write data, preventing empty gap */
-               end_pos = round_up(pos + iov->iov_len, root->sectorsize);
+               end_pos = round_up(pos + count, root->sectorsize);
                err = btrfs_cont_expand(inode, i_size_read(inode), end_pos);
                if (err) {
                        mutex_unlock(&inode->i_mutex);
index cc8ca193d830f62ec5202933f49d9867b1427969..86935f5ae29162b0b3dec1ed30c34df4c93db921 100644 (file)
@@ -176,7 +176,11 @@ static void start_caching(struct btrfs_root *root)
 
        tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n",
                          root->root_key.objectid);
-       BUG_ON(IS_ERR(tsk)); /* -ENOMEM */
+       if (IS_ERR(tsk)) {
+               btrfs_warn(root->fs_info, "failed to start inode caching task");
+               btrfs_clear_and_info(root, CHANGE_INODE_CACHE,
+                               "disabling inode map caching");
+       }
 }
 
 int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
@@ -205,24 +209,14 @@ again:
 
 void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
 {
-       struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
        struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
 
        if (!btrfs_test_opt(root, INODE_MAP_CACHE))
                return;
-
 again:
        if (root->cached == BTRFS_CACHE_FINISHED) {
-               __btrfs_add_free_space(ctl, objectid, 1);
+               __btrfs_add_free_space(pinned, objectid, 1);
        } else {
-               /*
-                * If we are in the process of caching free ino chunks,
-                * to avoid adding the same inode number to the free_ino
-                * tree twice due to cross transaction, we'll leave it
-                * in the pinned tree until a transaction is committed
-                * or the caching work is done.
-                */
-
                down_write(&root->fs_info->commit_root_sem);
                spin_lock(&root->cache_lock);
                if (root->cached == BTRFS_CACHE_FINISHED) {
@@ -234,11 +228,7 @@ again:
 
                start_caching(root);
 
-               if (objectid <= root->cache_progress ||
-                   objectid >= root->highest_objectid)
-                       __btrfs_add_free_space(ctl, objectid, 1);
-               else
-                       __btrfs_add_free_space(pinned, objectid, 1);
+               __btrfs_add_free_space(pinned, objectid, 1);
 
                up_write(&root->fs_info->commit_root_sem);
        }
index e79ff6b90cb71bb131426b97838c369ae0e6f48c..2f6d7b13b5bdacaba4df505b57b1c3cd9417ee13 100644 (file)
@@ -3066,7 +3066,7 @@ process_slot:
                                                         new_key.offset + datal,
                                                         1);
                                if (ret) {
-                                       if (ret != -EINVAL)
+                                       if (ret != -EOPNOTSUPP)
                                                btrfs_abort_transaction(trans,
                                                                root, ret);
                                        btrfs_end_transaction(trans, root);
@@ -3120,6 +3120,8 @@ process_slot:
                        } else if (type == BTRFS_FILE_EXTENT_INLINE) {
                                u64 skip = 0;
                                u64 trim = 0;
+                               u64 aligned_end = 0;
+
                                if (off > key.offset) {
                                        skip = off - key.offset;
                                        new_key.offset += skip;
@@ -3136,12 +3138,14 @@ process_slot:
                                size -= skip + trim;
                                datal -= skip + trim;
 
+                               aligned_end = ALIGN(new_key.offset + datal,
+                                                   root->sectorsize);
                                ret = btrfs_drop_extents(trans, root, inode,
                                                         new_key.offset,
-                                                        new_key.offset + datal,
+                                                        aligned_end,
                                                         1);
                                if (ret) {
-                                       if (ret != -EINVAL)
+                                       if (ret != -EOPNOTSUPP)
                                                btrfs_abort_transaction(trans,
                                                        root, ret);
                                        btrfs_end_transaction(trans, root);
index 1ac3ca98c4294ae54781f97931278386e8bb4bcc..fd38b5053479cf62f3297d43038288027646ebe0 100644 (file)
@@ -349,6 +349,11 @@ static int fs_path_ensure_buf(struct fs_path *p, int len)
        if (p->buf_len >= len)
                return 0;
 
+       if (len > PATH_MAX) {
+               WARN_ON(1);
+               return -ENOMEM;
+       }
+
        path_len = p->end - p->start;
        old_buf_len = p->buf_len;
 
@@ -1663,7 +1668,7 @@ static int get_first_ref(struct btrfs_root *root, u64 ino,
                goto out;
        }
 
-       if (key.type == BTRFS_INODE_REF_KEY) {
+       if (found_key.type == BTRFS_INODE_REF_KEY) {
                struct btrfs_inode_ref *iref;
                iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
                                      struct btrfs_inode_ref);
index 5011aadacab8e4cf1ac291f061f3c6ada24f8b9a..9601d25a46075eaa44c0151ba890213efae8ec29 100644 (file)
@@ -385,20 +385,6 @@ static match_table_t tokens = {
        {Opt_err, NULL},
 };
 
-#define btrfs_set_and_info(root, opt, fmt, args...)                    \
-{                                                                      \
-       if (!btrfs_test_opt(root, opt))                                 \
-               btrfs_info(root->fs_info, fmt, ##args);                 \
-       btrfs_set_opt(root->fs_info->mount_opt, opt);                   \
-}
-
-#define btrfs_clear_and_info(root, opt, fmt, args...)                  \
-{                                                                      \
-       if (btrfs_test_opt(root, opt))                                  \
-               btrfs_info(root->fs_info, fmt, ##args);                 \
-       btrfs_clear_opt(root->fs_info->mount_opt, opt);                 \
-}
-
 /*
  * Regular mount options parser.  Everything that is needed only when
  * reading in a new superblock is parsed here.
@@ -1186,7 +1172,6 @@ static struct dentry *mount_subvol(const char *subvol_name, int flags,
                return ERR_PTR(-ENOMEM);
        mnt = vfs_kern_mount(&btrfs_fs_type, flags, device_name,
                             newargs);
-       kfree(newargs);
 
        if (PTR_RET(mnt) == -EBUSY) {
                if (flags & MS_RDONLY) {
@@ -1196,17 +1181,22 @@ static struct dentry *mount_subvol(const char *subvol_name, int flags,
                        int r;
                        mnt = vfs_kern_mount(&btrfs_fs_type, flags | MS_RDONLY, device_name,
                                             newargs);
-                       if (IS_ERR(mnt))
+                       if (IS_ERR(mnt)) {
+                               kfree(newargs);
                                return ERR_CAST(mnt);
+                       }
 
                        r = btrfs_remount(mnt->mnt_sb, &flags, NULL);
                        if (r < 0) {
                                /* FIXME: release vfsmount mnt ??*/
+                               kfree(newargs);
                                return ERR_PTR(r);
                        }
                }
        }
 
+       kfree(newargs);
+
        if (IS_ERR(mnt))
                return ERR_CAST(mnt);
 
index 2e5e648eb5c3dc3bd82bea5ce8dead051864cf75..c561b628ebce519d111d159f541b9df88242a5b1 100644 (file)
@@ -3261,7 +3261,7 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
                        rel->seq = cpu_to_le32(cap->seq);
                        rel->issue_seq = cpu_to_le32(cap->issue_seq),
                        rel->mseq = cpu_to_le32(cap->mseq);
-                       rel->caps = cpu_to_le32(cap->issued);
+                       rel->caps = cpu_to_le32(cap->implemented);
                        rel->wanted = cpu_to_le32(cap->mds_wanted);
                        rel->dname_len = 0;
                        rel->dname_seq = 0;
index 766410a12c2cb209a224fcfd97f63a055fd20801..c29d6ae6887489c29902bec33c4118d4d807e9dc 100644 (file)
@@ -141,7 +141,7 @@ static int __dcache_readdir(struct file *file,  struct dir_context *ctx,
 
        /* start at beginning? */
        if (ctx->pos == 2 || last == NULL ||
-           ctx->pos < ceph_dentry(last)->offset) {
+           fpos_cmp(ctx->pos, ceph_dentry(last)->offset) < 0) {
                if (list_empty(&parent->d_subdirs))
                        goto out_unlock;
                p = parent->d_subdirs.prev;
@@ -182,9 +182,16 @@ more:
        spin_unlock(&dentry->d_lock);
        spin_unlock(&parent->d_lock);
 
+       /* make sure a dentry wasn't dropped while we didn't have parent lock */
+       if (!ceph_dir_is_complete(dir)) {
+               dout(" lost dir complete on %p; falling back to mds\n", dir);
+               dput(dentry);
+               err = -EAGAIN;
+               goto out;
+       }
+
        dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, ctx->pos,
             dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
-       ctx->pos = di->offset;
        if (!dir_emit(ctx, dentry->d_name.name,
                      dentry->d_name.len,
                      ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
@@ -198,19 +205,12 @@ more:
                return 0;
        }
 
+       ctx->pos = di->offset + 1;
+
        if (last)
                dput(last);
        last = dentry;
 
-       ctx->pos++;
-
-       /* make sure a dentry wasn't dropped while we didn't have parent lock */
-       if (!ceph_dir_is_complete(dir)) {
-               dout(" lost dir complete on %p; falling back to mds\n", dir);
-               err = -EAGAIN;
-               goto out;
-       }
-
        spin_lock(&parent->d_lock);
        p = p->prev;    /* advance to next dentry */
        goto more;
@@ -296,6 +296,8 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
                err = __dcache_readdir(file, ctx, shared_gen);
                if (err != -EAGAIN)
                        return err;
+               frag = fpos_frag(ctx->pos);
+               off = fpos_off(ctx->pos);
        } else {
                spin_unlock(&ci->i_ceph_lock);
        }
@@ -446,7 +448,6 @@ more:
        if (atomic_read(&ci->i_release_count) == fi->dir_release_count) {
                dout(" marking %p complete\n", inode);
                __ceph_dir_set_complete(ci, fi->dir_release_count);
-               ci->i_max_offset = ctx->pos;
        }
        spin_unlock(&ci->i_ceph_lock);
 
@@ -935,14 +936,16 @@ static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
                 * to do it here.
                 */
 
-               /* d_move screws up d_subdirs order */
-               ceph_dir_clear_complete(new_dir);
-
                d_move(old_dentry, new_dentry);
 
                /* ensure target dentry is invalidated, despite
                   rehashing bug in vfs_rename_dir */
                ceph_invalidate_dentry_lease(new_dentry);
+
+               /* d_move screws up sibling dentries' offsets */
+               ceph_dir_clear_complete(old_dir);
+               ceph_dir_clear_complete(new_dir);
+
        }
        ceph_mdsc_put_request(req);
        return err;
index 39da1c2efa5030216d18bc6bb3020a78afb4c5f6..88a6df4cbe6d8a52bd083a756ac452b798c33708 100644 (file)
@@ -1221,9 +1221,6 @@ static long ceph_fallocate(struct file *file, int mode,
        if (!S_ISREG(inode->i_mode))
                return -EOPNOTSUPP;
 
-       if (IS_SWAPFILE(inode))
-               return -ETXTBSY;
-
        mutex_lock(&inode->i_mutex);
 
        if (ceph_snap(inode) != CEPH_NOSNAP) {
index 0b0728e5be2d7cba589a935159b88f9d26f0b2e9..233c6f96910abc78d2b120e4e30a44ba0009b88a 100644 (file)
@@ -744,7 +744,6 @@ static int fill_inode(struct inode *inode,
            !__ceph_dir_is_complete(ci)) {
                dout(" marking %p complete (empty)\n", inode);
                __ceph_dir_set_complete(ci, atomic_read(&ci->i_release_count));
-               ci->i_max_offset = 2;
        }
 no_change:
        /* only update max_size on auth cap */
@@ -889,41 +888,6 @@ out_unlock:
        return;
 }
 
-/*
- * Set dentry's directory position based on the current dir's max, and
- * order it in d_subdirs, so that dcache_readdir behaves.
- *
- * Always called under directory's i_mutex.
- */
-static void ceph_set_dentry_offset(struct dentry *dn)
-{
-       struct dentry *dir = dn->d_parent;
-       struct inode *inode = dir->d_inode;
-       struct ceph_inode_info *ci;
-       struct ceph_dentry_info *di;
-
-       BUG_ON(!inode);
-
-       ci = ceph_inode(inode);
-       di = ceph_dentry(dn);
-
-       spin_lock(&ci->i_ceph_lock);
-       if (!__ceph_dir_is_complete(ci)) {
-               spin_unlock(&ci->i_ceph_lock);
-               return;
-       }
-       di->offset = ceph_inode(inode)->i_max_offset++;
-       spin_unlock(&ci->i_ceph_lock);
-
-       spin_lock(&dir->d_lock);
-       spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
-       list_move(&dn->d_u.d_child, &dir->d_subdirs);
-       dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
-            dn->d_u.d_child.prev, dn->d_u.d_child.next);
-       spin_unlock(&dn->d_lock);
-       spin_unlock(&dir->d_lock);
-}
-
 /*
  * splice a dentry to an inode.
  * caller must hold directory i_mutex for this to be safe.
@@ -933,7 +897,7 @@ static void ceph_set_dentry_offset(struct dentry *dn)
  * the caller) if we fail.
  */
 static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
-                                   bool *prehash, bool set_offset)
+                                   bool *prehash)
 {
        struct dentry *realdn;
 
@@ -965,8 +929,6 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
        }
        if ((!prehash || *prehash) && d_unhashed(dn))
                d_rehash(dn);
-       if (set_offset)
-               ceph_set_dentry_offset(dn);
 out:
        return dn;
 }
@@ -987,7 +949,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
 {
        struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
        struct inode *in = NULL;
-       struct ceph_mds_reply_inode *ininfo;
        struct ceph_vino vino;
        struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
        int err = 0;
@@ -1161,6 +1122,9 @@ retry_lookup:
 
                /* rename? */
                if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
+                       struct inode *olddir = req->r_old_dentry_dir;
+                       BUG_ON(!olddir);
+
                        dout(" src %p '%.*s' dst %p '%.*s'\n",
                             req->r_old_dentry,
                             req->r_old_dentry->d_name.len,
@@ -1180,13 +1144,10 @@ retry_lookup:
                           rehashing bug in vfs_rename_dir */
                        ceph_invalidate_dentry_lease(dn);
 
-                       /*
-                        * d_move() puts the renamed dentry at the end of
-                        * d_subdirs.  We need to assign it an appropriate
-                        * directory offset so we can behave when dir is
-                        * complete.
-                        */
-                       ceph_set_dentry_offset(req->r_old_dentry);
+                       /* d_move screws up sibling dentries' offsets */
+                       ceph_dir_clear_complete(dir);
+                       ceph_dir_clear_complete(olddir);
+
                        dout("dn %p gets new offset %lld\n", req->r_old_dentry,
                             ceph_dentry(req->r_old_dentry)->offset);
 
@@ -1213,8 +1174,9 @@ retry_lookup:
 
                /* attach proper inode */
                if (!dn->d_inode) {
+                       ceph_dir_clear_complete(dir);
                        ihold(in);
-                       dn = splice_dentry(dn, in, &have_lease, true);
+                       dn = splice_dentry(dn, in, &have_lease);
                        if (IS_ERR(dn)) {
                                err = PTR_ERR(dn);
                                goto done;
@@ -1235,17 +1197,16 @@ retry_lookup:
                   (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
                    req->r_op == CEPH_MDS_OP_MKSNAP)) {
                struct dentry *dn = req->r_dentry;
+               struct inode *dir = req->r_locked_dir;
 
                /* fill out a snapdir LOOKUPSNAP dentry */
                BUG_ON(!dn);
-               BUG_ON(!req->r_locked_dir);
-               BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR);
-               ininfo = rinfo->targeti.in;
-               vino.ino = le64_to_cpu(ininfo->ino);
-               vino.snap = le64_to_cpu(ininfo->snapid);
+               BUG_ON(!dir);
+               BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
                dout(" linking snapped dir %p to dn %p\n", in, dn);
+               ceph_dir_clear_complete(dir);
                ihold(in);
-               dn = splice_dentry(dn, in, NULL, true);
+               dn = splice_dentry(dn, in, NULL);
                if (IS_ERR(dn)) {
                        err = PTR_ERR(dn);
                        goto done;
@@ -1407,7 +1368,7 @@ retry_lookup:
                }
 
                if (!dn->d_inode) {
-                       dn = splice_dentry(dn, in, NULL, false);
+                       dn = splice_dentry(dn, in, NULL);
                        if (IS_ERR(dn)) {
                                err = PTR_ERR(dn);
                                dn = NULL;
index fdf941b44ff103a2590a3804aa850e468ec980d6..a822a6e58290bbedfb0e363bf3bbb601075891fb 100644 (file)
@@ -109,6 +109,8 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
                return PTR_ERR(req);
        req->r_inode = inode;
        ihold(inode);
+       req->r_num_caps = 1;
+
        req->r_inode_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL;
 
        req->r_args.setlayout.layout.fl_stripe_unit =
@@ -153,6 +155,7 @@ static long ceph_ioctl_set_layout_policy (struct file *file, void __user *arg)
                return PTR_ERR(req);
        req->r_inode = inode;
        ihold(inode);
+       req->r_num_caps = 1;
 
        req->r_args.setlayout.layout.fl_stripe_unit =
                        cpu_to_le32(l.stripe_unit);
index d94ba0df9f4d195cabf677fcdcd41cc01096c7e7..191398852a2e8927b1ac193b53ceea623829caf1 100644 (file)
@@ -45,6 +45,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
                return PTR_ERR(req);
        req->r_inode = inode;
        ihold(inode);
+       req->r_num_caps = 1;
 
        /* mds requires start and length rather than start and end */
        if (LLONG_MAX == fl->fl_end)
index 7866cd05a6bbee4afd2478f3737d0ccd8ac28975..ead05cc1f447562271578131ab25769257080915 100644 (file)
@@ -266,7 +266,6 @@ struct ceph_inode_info {
        struct timespec i_rctime;
        u64 i_rbytes, i_rfiles, i_rsubdirs;
        u64 i_files, i_subdirs;
-       u64 i_max_offset;  /* largest readdir offset, set with complete dir */
 
        struct rb_root i_fragtree;
        struct mutex i_fragtree_mutex;
index aadc2b68678b7d70c0381d10c847c86624589c4c..a22d667f1069e5eb8485d121f241725de732b8a9 100644 (file)
@@ -1737,6 +1737,9 @@ cifs_inode_needs_reval(struct inode *inode)
        if (cifs_i->time == 0)
                return true;
 
+       if (!cifs_sb->actimeo)
+               return true;
+
        if (!time_in_range(jiffies, cifs_i->time,
                                cifs_i->time + cifs_sb->actimeo))
                return true;
index ca926ad0430cf715af12ebc3f0fa86d8c273d492..66d3d3c6b4b248878af6d751178d055310224c3d 100644 (file)
@@ -457,9 +457,9 @@ COMPAT_SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
        case F_GETLK64:
        case F_SETLK64:
        case F_SETLKW64:
-       case F_GETLKP:
-       case F_SETLKP:
-       case F_SETLKPW:
+       case F_OFD_GETLK:
+       case F_OFD_SETLK:
+       case F_OFD_SETLKW:
                ret = get_compat_flock64(&f, compat_ptr(arg));
                if (ret != 0)
                        break;
@@ -468,7 +468,7 @@ COMPAT_SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
                conv_cmd = convert_fcntl_cmd(cmd);
                ret = sys_fcntl(fd, conv_cmd, (unsigned long)&f);
                set_fs(old_fs);
-               if ((conv_cmd == F_GETLK || conv_cmd == F_GETLKP) && ret == 0) {
+               if ((conv_cmd == F_GETLK || conv_cmd == F_OFD_GETLK) && ret == 0) {
                        /* need to return lock information - see above for commentary */
                        if (f.l_start > COMPAT_LOFF_T_MAX)
                                ret = -EOVERFLOW;
@@ -493,9 +493,9 @@ COMPAT_SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd,
        case F_GETLK64:
        case F_SETLK64:
        case F_SETLKW64:
-       case F_GETLKP:
-       case F_SETLKP:
-       case F_SETLKPW:
+       case F_OFD_GETLK:
+       case F_OFD_SETLK:
+       case F_OFD_SETLKW:
                return -EINVAL;
        }
        return compat_sys_fcntl64(fd, cmd, arg);
index e3ad709a4232f414b91fe1ed18989a07d0f7aaa3..0b2528fb640e77e4a38a351c51f8a01f12102c14 100644 (file)
@@ -73,10 +73,15 @@ static int expand_corename(struct core_name *cn, int size)
 static int cn_vprintf(struct core_name *cn, const char *fmt, va_list arg)
 {
        int free, need;
+       va_list arg_copy;
 
 again:
        free = cn->size - cn->used;
-       need = vsnprintf(cn->corename + cn->used, free, fmt, arg);
+
+       va_copy(arg_copy, arg);
+       need = vsnprintf(cn->corename + cn->used, free, fmt, arg_copy);
+       va_end(arg_copy);
+
        if (need < free) {
                cn->used += need;
                return 0;
index 40707d88a9452aea7c995198c2d94bfb1e9775c5..42ae01eefc0767902b4827ef4113199fd6e57a1a 100644 (file)
@@ -246,16 +246,8 @@ static void __d_free(struct rcu_head *head)
        kmem_cache_free(dentry_cache, dentry); 
 }
 
-/*
- * no locks, please.
- */
-static void d_free(struct dentry *dentry)
+static void dentry_free(struct dentry *dentry)
 {
-       BUG_ON((int)dentry->d_lockref.count > 0);
-       this_cpu_dec(nr_dentry);
-       if (dentry->d_op && dentry->d_op->d_release)
-               dentry->d_op->d_release(dentry);
-
        /* if dentry was never visible to RCU, immediate free is OK */
        if (!(dentry->d_flags & DCACHE_RCUACCESS))
                __d_free(&dentry->d_u.d_rcu);
@@ -403,56 +395,6 @@ static void dentry_lru_add(struct dentry *dentry)
                d_lru_add(dentry);
 }
 
-/*
- * Remove a dentry with references from the LRU.
- *
- * If we are on the shrink list, then we can get to try_prune_one_dentry() and
- * lose our last reference through the parent walk. In this case, we need to
- * remove ourselves from the shrink list, not the LRU.
- */
-static void dentry_lru_del(struct dentry *dentry)
-{
-       if (dentry->d_flags & DCACHE_LRU_LIST) {
-               if (dentry->d_flags & DCACHE_SHRINK_LIST)
-                       return d_shrink_del(dentry);
-               d_lru_del(dentry);
-       }
-}
-
-/**
- * d_kill - kill dentry and return parent
- * @dentry: dentry to kill
- * @parent: parent dentry
- *
- * The dentry must already be unhashed and removed from the LRU.
- *
- * If this is the root of the dentry tree, return NULL.
- *
- * dentry->d_lock and parent->d_lock must be held by caller, and are dropped by
- * d_kill.
- */
-static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
-       __releases(dentry->d_lock)
-       __releases(parent->d_lock)
-       __releases(dentry->d_inode->i_lock)
-{
-       list_del(&dentry->d_u.d_child);
-       /*
-        * Inform d_walk() that we are no longer attached to the
-        * dentry tree
-        */
-       dentry->d_flags |= DCACHE_DENTRY_KILLED;
-       if (parent)
-               spin_unlock(&parent->d_lock);
-       dentry_iput(dentry);
-       /*
-        * dentry_iput drops the locks, at which point nobody (except
-        * transient RCU lookups) can reach this dentry.
-        */
-       d_free(dentry);
-       return parent;
-}
-
 /**
  * d_drop - drop a dentry
  * @dentry: dentry to drop
@@ -510,7 +452,14 @@ dentry_kill(struct dentry *dentry, int unlock_on_failure)
        __releases(dentry->d_lock)
 {
        struct inode *inode;
-       struct dentry *parent;
+       struct dentry *parent = NULL;
+       bool can_free = true;
+
+       if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
+               can_free = dentry->d_flags & DCACHE_MAY_FREE;
+               spin_unlock(&dentry->d_lock);
+               goto out;
+       }
 
        inode = dentry->d_inode;
        if (inode && !spin_trylock(&inode->i_lock)) {
@@ -521,9 +470,7 @@ relock:
                }
                return dentry; /* try again with same dentry */
        }
-       if (IS_ROOT(dentry))
-               parent = NULL;
-       else
+       if (!IS_ROOT(dentry))
                parent = dentry->d_parent;
        if (parent && !spin_trylock(&parent->d_lock)) {
                if (inode)
@@ -543,10 +490,40 @@ relock:
        if ((dentry->d_flags & DCACHE_OP_PRUNE) && !d_unhashed(dentry))
                dentry->d_op->d_prune(dentry);
 
-       dentry_lru_del(dentry);
+       if (dentry->d_flags & DCACHE_LRU_LIST) {
+               if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
+                       d_lru_del(dentry);
+       }
        /* if it was on the hash then remove it */
        __d_drop(dentry);
-       return d_kill(dentry, parent);
+       list_del(&dentry->d_u.d_child);
+       /*
+        * Inform d_walk() that we are no longer attached to the
+        * dentry tree
+        */
+       dentry->d_flags |= DCACHE_DENTRY_KILLED;
+       if (parent)
+               spin_unlock(&parent->d_lock);
+       dentry_iput(dentry);
+       /*
+        * dentry_iput drops the locks, at which point nobody (except
+        * transient RCU lookups) can reach this dentry.
+        */
+       BUG_ON((int)dentry->d_lockref.count > 0);
+       this_cpu_dec(nr_dentry);
+       if (dentry->d_op && dentry->d_op->d_release)
+               dentry->d_op->d_release(dentry);
+
+       spin_lock(&dentry->d_lock);
+       if (dentry->d_flags & DCACHE_SHRINK_LIST) {
+               dentry->d_flags |= DCACHE_MAY_FREE;
+               can_free = false;
+       }
+       spin_unlock(&dentry->d_lock);
+out:
+       if (likely(can_free))
+               dentry_free(dentry);
+       return parent;
 }
 
 /* 
@@ -815,65 +792,13 @@ restart:
 }
 EXPORT_SYMBOL(d_prune_aliases);
 
-/*
- * Try to throw away a dentry - free the inode, dput the parent.
- * Requires dentry->d_lock is held, and dentry->d_count == 0.
- * Releases dentry->d_lock.
- *
- * This may fail if locks cannot be acquired no problem, just try again.
- */
-static struct dentry * try_prune_one_dentry(struct dentry *dentry)
-       __releases(dentry->d_lock)
-{
-       struct dentry *parent;
-
-       parent = dentry_kill(dentry, 0);
-       /*
-        * If dentry_kill returns NULL, we have nothing more to do.
-        * if it returns the same dentry, trylocks failed. In either
-        * case, just loop again.
-        *
-        * Otherwise, we need to prune ancestors too. This is necessary
-        * to prevent quadratic behavior of shrink_dcache_parent(), but
-        * is also expected to be beneficial in reducing dentry cache
-        * fragmentation.
-        */
-       if (!parent)
-               return NULL;
-       if (parent == dentry)
-               return dentry;
-
-       /* Prune ancestors. */
-       dentry = parent;
-       while (dentry) {
-               if (lockref_put_or_lock(&dentry->d_lockref))
-                       return NULL;
-               dentry = dentry_kill(dentry, 1);
-       }
-       return NULL;
-}
-
 static void shrink_dentry_list(struct list_head *list)
 {
-       struct dentry *dentry;
+       struct dentry *dentry, *parent;
 
-       rcu_read_lock();
-       for (;;) {
-               dentry = list_entry_rcu(list->prev, struct dentry, d_lru);
-               if (&dentry->d_lru == list)
-                       break; /* empty */
-
-               /*
-                * Get the dentry lock, and re-verify that the dentry is
-                * this on the shrinking list. If it is, we know that
-                * DCACHE_SHRINK_LIST and DCACHE_LRU_LIST are set.
-                */
+       while (!list_empty(list)) {
+               dentry = list_entry(list->prev, struct dentry, d_lru);
                spin_lock(&dentry->d_lock);
-               if (dentry != list_entry(list->prev, struct dentry, d_lru)) {
-                       spin_unlock(&dentry->d_lock);
-                       continue;
-               }
-
                /*
                 * The dispose list is isolated and dentries are not accounted
                 * to the LRU here, so we can simply remove it from the list
@@ -885,30 +810,38 @@ static void shrink_dentry_list(struct list_head *list)
                 * We found an inuse dentry which was not removed from
                 * the LRU because of laziness during lookup. Do not free it.
                 */
-               if (dentry->d_lockref.count) {
+               if ((int)dentry->d_lockref.count > 0) {
                        spin_unlock(&dentry->d_lock);
                        continue;
                }
-               rcu_read_unlock();
 
+               parent = dentry_kill(dentry, 0);
                /*
-                * If 'try_to_prune()' returns a dentry, it will
-                * be the same one we passed in, and d_lock will
-                * have been held the whole time, so it will not
-                * have been added to any other lists. We failed
-                * to get the inode lock.
-                *
-                * We just add it back to the shrink list.
+                * If dentry_kill returns NULL, we have nothing more to do.
                 */
-               dentry = try_prune_one_dentry(dentry);
+               if (!parent)
+                       continue;
 
-               rcu_read_lock();
-               if (dentry) {
+               if (unlikely(parent == dentry)) {
+                       /*
+                        * trylocks have failed and d_lock has been held the
+                        * whole time, so it could not have been added to any
+                        * other lists. Just add it back to the shrink list.
+                        */
                        d_shrink_add(dentry, list);
                        spin_unlock(&dentry->d_lock);
+                       continue;
                }
+               /*
+                * We need to prune ancestors too. This is necessary to prevent
+                * quadratic behavior of shrink_dcache_parent(), but is also
+                * expected to be beneficial in reducing dentry cache
+                * fragmentation.
+                */
+               dentry = parent;
+               while (dentry && !lockref_put_or_lock(&dentry->d_lockref))
+                       dentry = dentry_kill(dentry, 1);
        }
-       rcu_read_unlock();
 }
 
 static enum lru_status
@@ -1261,34 +1194,23 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
        if (data->start == dentry)
                goto out;
 
-       /*
-        * move only zero ref count dentries to the dispose list.
-        *
-        * Those which are presently on the shrink list, being processed
-        * by shrink_dentry_list(), shouldn't be moved.  Otherwise the
-        * loop in shrink_dcache_parent() might not make any progress
-        * and loop forever.
-        */
-       if (dentry->d_lockref.count) {
-               dentry_lru_del(dentry);
-       } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
-               /*
-                * We can't use d_lru_shrink_move() because we
-                * need to get the global LRU lock and do the
-                * LRU accounting.
-                */
-               d_lru_del(dentry);
-               d_shrink_add(dentry, &data->dispose);
+       if (dentry->d_flags & DCACHE_SHRINK_LIST) {
                data->found++;
-               ret = D_WALK_NORETRY;
+       } else {
+               if (dentry->d_flags & DCACHE_LRU_LIST)
+                       d_lru_del(dentry);
+               if (!dentry->d_lockref.count) {
+                       d_shrink_add(dentry, &data->dispose);
+                       data->found++;
+               }
        }
        /*
         * We can return to the caller if we have found some (this
         * ensures forward progress). We'll be coming back to find
         * the rest.
         */
-       if (data->found && need_resched())
-               ret = D_WALK_QUIT;
+       if (!list_empty(&data->dispose))
+               ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
 out:
        return ret;
 }
@@ -1318,45 +1240,35 @@ void shrink_dcache_parent(struct dentry *parent)
 }
 EXPORT_SYMBOL(shrink_dcache_parent);
 
-static enum d_walk_ret umount_collect(void *_data, struct dentry *dentry)
+static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
 {
-       struct select_data *data = _data;
-       enum d_walk_ret ret = D_WALK_CONTINUE;
+       /* it has busy descendents; complain about those instead */
+       if (!list_empty(&dentry->d_subdirs))
+               return D_WALK_CONTINUE;
 
-       if (dentry->d_lockref.count) {
-               dentry_lru_del(dentry);
-               if (likely(!list_empty(&dentry->d_subdirs)))
-                       goto out;
-               if (dentry == data->start && dentry->d_lockref.count == 1)
-                       goto out;
-               printk(KERN_ERR
-                      "BUG: Dentry %p{i=%lx,n=%s}"
-                      " still in use (%d)"
-                      " [unmount of %s %s]\n",
+       /* root with refcount 1 is fine */
+       if (dentry == _data && dentry->d_lockref.count == 1)
+               return D_WALK_CONTINUE;
+
+       printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
+                       " still in use (%d) [unmount of %s %s]\n",
                       dentry,
                       dentry->d_inode ?
                       dentry->d_inode->i_ino : 0UL,
-                      dentry->d_name.name,
+                      dentry,
                       dentry->d_lockref.count,
                       dentry->d_sb->s_type->name,
                       dentry->d_sb->s_id);
-               BUG();
-       } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
-               /*
-                * We can't use d_lru_shrink_move() because we
-                * need to get the global LRU lock and do the
-                * LRU accounting.
-                */
-               if (dentry->d_flags & DCACHE_LRU_LIST)
-                       d_lru_del(dentry);
-               d_shrink_add(dentry, &data->dispose);
-               data->found++;
-               ret = D_WALK_NORETRY;
-       }
-out:
-       if (data->found && need_resched())
-               ret = D_WALK_QUIT;
-       return ret;
+       WARN_ON(1);
+       return D_WALK_CONTINUE;
+}
+
+static void do_one_tree(struct dentry *dentry)
+{
+       shrink_dcache_parent(dentry);
+       d_walk(dentry, dentry, umount_check, NULL);
+       d_drop(dentry);
+       dput(dentry);
 }
 
 /*
@@ -1366,40 +1278,15 @@ void shrink_dcache_for_umount(struct super_block *sb)
 {
        struct dentry *dentry;
 
-       if (down_read_trylock(&sb->s_umount))
-               BUG();
+       WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
 
        dentry = sb->s_root;
        sb->s_root = NULL;
-       for (;;) {
-               struct select_data data;
-
-               INIT_LIST_HEAD(&data.dispose);
-               data.start = dentry;
-               data.found = 0;
-
-               d_walk(dentry, &data, umount_collect, NULL);
-               if (!data.found)
-                       break;
-
-               shrink_dentry_list(&data.dispose);
-               cond_resched();
-       }
-       d_drop(dentry);
-       dput(dentry);
+       do_one_tree(dentry);
 
        while (!hlist_bl_empty(&sb->s_anon)) {
-               struct select_data data;
-               dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash);
-
-               INIT_LIST_HEAD(&data.dispose);
-               data.start = NULL;
-               data.found = 0;
-
-               d_walk(dentry, &data, umount_collect, NULL);
-               if (data.found)
-                       shrink_dentry_list(&data.dispose);
-               cond_resched();
+               dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash));
+               do_one_tree(dentry);
        }
 }
 
@@ -1647,8 +1534,7 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
        unsigned add_flags = d_flags_for_inode(inode);
 
        spin_lock(&dentry->d_lock);
-       dentry->d_flags &= ~DCACHE_ENTRY_TYPE;
-       dentry->d_flags |= add_flags;
+       __d_set_type(dentry, add_flags);
        if (inode)
                hlist_add_head(&dentry->d_alias, &inode->i_dentry);
        dentry->d_inode = inode;
index 476f3ebf437ef40ddd7432200080825b7e9e992c..238b7aa26f68ab538df0cc219073a3d26541cc11 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -657,10 +657,10 @@ int setup_arg_pages(struct linux_binprm *bprm,
        unsigned long rlim_stack;
 
 #ifdef CONFIG_STACK_GROWSUP
-       /* Limit stack size to 1GB */
+       /* Limit stack size */
        stack_base = rlimit_max(RLIMIT_STACK);
-       if (stack_base > (1 << 30))
-               stack_base = 1 << 30;
+       if (stack_base > STACK_SIZE_MAX)
+               stack_base = STACK_SIZE_MAX;
 
        /* Make sure we didn't let the argument array grow too large. */
        if (vma->vm_end - vma->vm_start > stack_base)
index 6ea7b1436bbc201e872d6ee18f7321b2e099f156..5c56785007e0e36fec78e6535aa210e09247a983 100644 (file)
@@ -667,7 +667,7 @@ ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
                        continue;
 
                x = ext4_count_free(bitmap_bh->b_data,
-                                   EXT4_BLOCKS_PER_GROUP(sb) / 8);
+                                   EXT4_CLUSTERS_PER_GROUP(sb) / 8);
                printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
                        i, ext4_free_group_clusters(sb, gdp), x);
                bitmap_count += x;
index f1c65dc7cc0ad268a9fccc7b6f1aeaf078d84a0a..66946aa621270716c580a2617bceecbcadb6bda7 100644 (file)
@@ -2466,23 +2466,6 @@ static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize)
        up_write(&EXT4_I(inode)->i_data_sem);
 }
 
-/*
- * Update i_disksize after writeback has been started. Races with truncate
- * are avoided by checking i_size under i_data_sem.
- */
-static inline void ext4_wb_update_i_disksize(struct inode *inode, loff_t newsize)
-{
-       loff_t i_size;
-
-       down_write(&EXT4_I(inode)->i_data_sem);
-       i_size = i_size_read(inode);
-       if (newsize > i_size)
-               newsize = i_size;
-       if (newsize > EXT4_I(inode)->i_disksize)
-               EXT4_I(inode)->i_disksize = newsize;
-       up_write(&EXT4_I(inode)->i_data_sem);
-}
-
 struct ext4_group_info {
        unsigned long   bb_state;
        struct rb_root  bb_free_root;
index 82df3ce9874ab7f3a65abc10e2bd2238b1ae2af3..01b0c208f62507e12f50ddd4fd3669972797f823 100644 (file)
@@ -3313,6 +3313,11 @@ static int ext4_split_extent(handle_t *handle,
                return PTR_ERR(path);
        depth = ext_depth(inode);
        ex = path[depth].p_ext;
+       if (!ex) {
+               EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
+                                (unsigned long) map->m_lblk);
+               return -EIO;
+       }
        uninitialized = ext4_ext_is_uninitialized(ex);
        split_flag1 = 0;
 
@@ -3694,6 +3699,12 @@ static int ext4_convert_initialized_extents(handle_t *handle,
                }
                depth = ext_depth(inode);
                ex = path[depth].p_ext;
+               if (!ex) {
+                       EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
+                                        (unsigned long) map->m_lblk);
+                       err = -EIO;
+                       goto out;
+               }
        }
 
        err = ext4_ext_get_access(handle, inode, path + depth);
@@ -4730,6 +4741,9 @@ static long ext4_zero_range(struct file *file, loff_t offset,
 
        trace_ext4_zero_range(inode, offset, len, mode);
 
+       if (!S_ISREG(inode->i_mode))
+               return -EINVAL;
+
        /*
         * Write out all dirty pages to avoid race conditions
         * Then release them.
@@ -4878,9 +4892,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
        if (mode & FALLOC_FL_PUNCH_HOLE)
                return ext4_punch_hole(inode, offset, len);
 
-       if (mode & FALLOC_FL_COLLAPSE_RANGE)
-               return ext4_collapse_range(inode, offset, len);
-
        ret = ext4_convert_inline_data(inode);
        if (ret)
                return ret;
@@ -4892,6 +4903,9 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
        if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
                return -EOPNOTSUPP;
 
+       if (mode & FALLOC_FL_COLLAPSE_RANGE)
+               return ext4_collapse_range(inode, offset, len);
+
        if (mode & FALLOC_FL_ZERO_RANGE)
                return ext4_zero_range(file, offset, len, mode);
 
@@ -5229,18 +5243,19 @@ ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
                        if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr))
                                update = 1;
 
-                       *start = ex_last->ee_block +
+                       *start = le32_to_cpu(ex_last->ee_block) +
                                ext4_ext_get_actual_len(ex_last);
 
                        while (ex_start <= ex_last) {
-                               ex_start->ee_block -= shift;
-                               if (ex_start >
-                                       EXT_FIRST_EXTENT(path[depth].p_hdr)) {
-                                       if (ext4_ext_try_to_merge_right(inode,
-                                               path, ex_start - 1))
-                                               ex_last--;
-                               }
-                               ex_start++;
+                               le32_add_cpu(&ex_start->ee_block, -shift);
+                               /* Try to merge to the left. */
+                               if ((ex_start >
+                                    EXT_FIRST_EXTENT(path[depth].p_hdr)) &&
+                                   ext4_ext_try_to_merge_right(inode,
+                                                       path, ex_start - 1))
+                                       ex_last--;
+                               else
+                                       ex_start++;
                        }
                        err = ext4_ext_dirty(handle, inode, path + depth);
                        if (err)
@@ -5255,7 +5270,7 @@ ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
                if (err)
                        goto out;
 
-               path[depth].p_idx->ei_block -= shift;
+               le32_add_cpu(&path[depth].p_idx->ei_block, -shift);
                err = ext4_ext_dirty(handle, inode, path + depth);
                if (err)
                        goto out;
@@ -5300,7 +5315,8 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
                return ret;
        }
 
-       stop_block = extent->ee_block + ext4_ext_get_actual_len(extent);
+       stop_block = le32_to_cpu(extent->ee_block) +
+                       ext4_ext_get_actual_len(extent);
        ext4_ext_drop_refs(path);
        kfree(path);
 
@@ -5313,10 +5329,18 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
         * enough to accomodate the shift.
         */
        path = ext4_ext_find_extent(inode, start - 1, NULL, 0);
+       if (IS_ERR(path))
+               return PTR_ERR(path);
        depth = path->p_depth;
        extent =  path[depth].p_ext;
-       ex_start = extent->ee_block;
-       ex_end = extent->ee_block + ext4_ext_get_actual_len(extent);
+       if (extent) {
+               ex_start = le32_to_cpu(extent->ee_block);
+               ex_end = le32_to_cpu(extent->ee_block) +
+                       ext4_ext_get_actual_len(extent);
+       } else {
+               ex_start = 0;
+               ex_end = 0;
+       }
        ext4_ext_drop_refs(path);
        kfree(path);
 
@@ -5331,7 +5355,13 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
                        return PTR_ERR(path);
                depth = path->p_depth;
                extent = path[depth].p_ext;
-               current_block = extent->ee_block;
+               if (!extent) {
+                       EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
+                                        (unsigned long) start);
+                       return -EIO;
+               }
+
+               current_block = le32_to_cpu(extent->ee_block);
                if (start > current_block) {
                        /* Hole, move to the next extent */
                        ret = mext_next_extent(inode, path, &extent);
@@ -5365,17 +5395,18 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
        ext4_lblk_t punch_start, punch_stop;
        handle_t *handle;
        unsigned int credits;
-       loff_t new_size;
+       loff_t new_size, ioffset;
        int ret;
 
-       BUG_ON(offset + len > i_size_read(inode));
-
        /* Collapse range works only on fs block size aligned offsets. */
        if (offset & (EXT4_BLOCK_SIZE(sb) - 1) ||
            len & (EXT4_BLOCK_SIZE(sb) - 1))
                return -EINVAL;
 
        if (!S_ISREG(inode->i_mode))
+               return -EINVAL;
+
+       if (EXT4_SB(inode->i_sb)->s_cluster_ratio > 1)
                return -EOPNOTSUPP;
 
        trace_ext4_collapse_range(inode, offset, len);
@@ -5383,22 +5414,34 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
        punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb);
        punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb);
 
+       /* Call ext4_force_commit to flush all data in case of data=journal. */
+       if (ext4_should_journal_data(inode)) {
+               ret = ext4_force_commit(inode->i_sb);
+               if (ret)
+                       return ret;
+       }
+
+       /*
+        * Need to round down offset to be aligned with page size boundary
+        * for page size > block size.
+        */
+       ioffset = round_down(offset, PAGE_SIZE);
+
        /* Write out all dirty pages */
-       ret = filemap_write_and_wait_range(inode->i_mapping, offset, -1);
+       ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
+                                          LLONG_MAX);
        if (ret)
                return ret;
 
        /* Take mutex lock */
        mutex_lock(&inode->i_mutex);
 
-       /* It's not possible punch hole on append only file */
-       if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
-               ret = -EPERM;
-               goto out_mutex;
-       }
-
-       if (IS_SWAPFILE(inode)) {
-               ret = -ETXTBSY;
+       /*
+        * There is no need to overlap collapse range with EOF, in which case
+        * it is effectively a truncate operation
+        */
+       if (offset + len >= i_size_read(inode)) {
+               ret = -EINVAL;
                goto out_mutex;
        }
 
@@ -5408,7 +5451,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
                goto out_mutex;
        }
 
-       truncate_pagecache_range(inode, offset, -1);
+       truncate_pagecache(inode, ioffset);
 
        /* Wait for existing dio to complete */
        ext4_inode_block_unlocked_dio(inode);
@@ -5425,7 +5468,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
        ext4_discard_preallocations(inode);
 
        ret = ext4_es_remove_extent(inode, punch_start,
-                                   EXT_MAX_BLOCKS - punch_start - 1);
+                                   EXT_MAX_BLOCKS - punch_start);
        if (ret) {
                up_write(&EXT4_I(inode)->i_data_sem);
                goto out_stop;
@@ -5436,6 +5479,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
                up_write(&EXT4_I(inode)->i_data_sem);
                goto out_stop;
        }
+       ext4_discard_preallocations(inode);
 
        ret = ext4_ext_shift_extents(inode, handle, punch_stop,
                                     punch_stop - punch_start);
@@ -5445,10 +5489,9 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
        }
 
        new_size = i_size_read(inode) - len;
-       truncate_setsize(inode, new_size);
+       i_size_write(inode, new_size);
        EXT4_I(inode)->i_disksize = new_size;
 
-       ext4_discard_preallocations(inode);
        up_write(&EXT4_I(inode)->i_data_sem);
        if (IS_SYNC(inode))
                ext4_handle_sync(handle);
index 0a014a7194b28cac95e56f21b59f3776fcf8c9fc..0ebc21204b5184841405f890fa11dd5ae11ef54c 100644 (file)
@@ -810,7 +810,7 @@ retry:
 
                        newes.es_lblk = end + 1;
                        newes.es_len = len2;
-                       block = 0x7FDEADBEEF;
+                       block = 0x7FDEADBEEFULL;
                        if (ext4_es_is_written(&orig_es) ||
                            ext4_es_is_unwritten(&orig_es))
                                block = ext4_es_pblock(&orig_es) +
index ca7502d89fdee07b96585c768854375b207daaf6..063fc1538355972d912553ad6c8e419390f057de 100644 (file)
@@ -82,7 +82,7 @@ ext4_unaligned_aio(struct inode *inode, const struct iovec *iov,
        size_t count = iov_length(iov, nr_segs);
        loff_t final_size = pos + count;
 
-       if (pos >= inode->i_size)
+       if (pos >= i_size_read(inode))
                return 0;
 
        if ((pos & blockmask) || (final_size & blockmask))
index 5b0d2c7d54080dea4080909fe8ec6a74ecf19b56..d7b7462a0e13e11e7131f2b148d1323a3de5c996 100644 (file)
@@ -522,6 +522,10 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
        if (unlikely(map->m_len > INT_MAX))
                map->m_len = INT_MAX;
 
+       /* We can handle the block number less than EXT_MAX_BLOCKS */
+       if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
+               return -EIO;
+
        /* Lookup extent status tree firstly */
        if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
                ext4_es_lru_add(inode);
@@ -2243,13 +2247,23 @@ static int mpage_map_and_submit_extent(handle_t *handle,
                        return err;
        } while (map->m_len);
 
-       /* Update on-disk size after IO is submitted */
+       /*
+        * Update on-disk size after IO is submitted.  Races with
+        * truncate are avoided by checking i_size under i_data_sem.
+        */
        disksize = ((loff_t)mpd->first_page) << PAGE_CACHE_SHIFT;
        if (disksize > EXT4_I(inode)->i_disksize) {
                int err2;
-
-               ext4_wb_update_i_disksize(inode, disksize);
+               loff_t i_size;
+
+               down_write(&EXT4_I(inode)->i_data_sem);
+               i_size = i_size_read(inode);
+               if (disksize > i_size)
+                       disksize = i_size;
+               if (disksize > EXT4_I(inode)->i_disksize)
+                       EXT4_I(inode)->i_disksize = disksize;
                err2 = ext4_mark_inode_dirty(handle, inode);
+               up_write(&EXT4_I(inode)->i_data_sem);
                if (err2)
                        ext4_error(inode->i_sb,
                                   "Failed to mark inode %lu dirty",
@@ -3527,15 +3541,6 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
        }
 
        mutex_lock(&inode->i_mutex);
-       /* It's not possible punch hole on append only file */
-       if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
-               ret = -EPERM;
-               goto out_mutex;
-       }
-       if (IS_SWAPFILE(inode)) {
-               ret = -ETXTBSY;
-               goto out_mutex;
-       }
 
        /* No need to punch hole beyond i_size */
        if (offset >= inode->i_size)
@@ -3616,7 +3621,6 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
                ret = ext4_free_hole_blocks(handle, inode, first_block,
                                            stop_block);
 
-       ext4_discard_preallocations(inode);
        up_write(&EXT4_I(inode)->i_data_sem);
        if (IS_SYNC(inode))
                ext4_handle_sync(handle);
@@ -4423,21 +4427,20 @@ out_brelse:
  *
  * We are called from a few places:
  *
- * - Within generic_file_write() for O_SYNC files.
+ * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files.
  *   Here, there will be no transaction running. We wait for any running
  *   transaction to commit.
  *
- * - Within sys_sync(), kupdate and such.
- *   We wait on commit, if tol to.
+ * - Within flush work (sys_sync(), kupdate and such).
+ *   We wait on commit, if told to.
  *
- * - Within prune_icache() (PF_MEMALLOC == true)
- *   Here we simply return.  We can't afford to block kswapd on the
- *   journal commit.
+ * - Within iput_final() -> write_inode_now()
+ *   We wait on commit, if told to.
  *
  * In all cases it is actually safe for us to return without doing anything,
  * because the inode has been copied into a raw inode buffer in
- * ext4_mark_inode_dirty().  This is a correctness thing for O_SYNC and for
- * knfsd.
+ * ext4_mark_inode_dirty().  This is a correctness thing for WB_SYNC_ALL
+ * writeback.
  *
  * Note that we are absolutely dependent upon all inode dirtiers doing the
  * right thing: they *must* call mark_inode_dirty() after dirtying info in
@@ -4449,15 +4452,15 @@ out_brelse:
  *     stuff();
  *     inode->i_size = expr;
  *
- * is in error because a kswapd-driven write_inode() could occur while
- * `stuff()' is running, and the new i_size will be lost.  Plus the inode
- * will no longer be on the superblock's dirty inode list.
+ * is in error because write_inode() could occur while `stuff()' is running,
+ * and the new i_size will be lost.  Plus the inode will no longer be on the
+ * superblock's dirty inode list.
  */
 int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
 {
        int err;
 
-       if (current->flags & PF_MEMALLOC)
+       if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
                return 0;
 
        if (EXT4_SB(inode->i_sb)->s_journal) {
index a888cac76e9c55c34002f930a7bc8a8df53376bf..c8238a26818cd9ef7567d0552a60a461bfd1f76e 100644 (file)
@@ -989,7 +989,7 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
        poff = block % blocks_per_page;
        page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
        if (!page)
-               return -EIO;
+               return -ENOMEM;
        BUG_ON(page->mapping != inode->i_mapping);
        e4b->bd_bitmap_page = page;
        e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
@@ -1003,7 +1003,7 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
        pnum = block / blocks_per_page;
        page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
        if (!page)
-               return -EIO;
+               return -ENOMEM;
        BUG_ON(page->mapping != inode->i_mapping);
        e4b->bd_buddy_page = page;
        return 0;
@@ -1168,7 +1168,11 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
                        unlock_page(page);
                }
        }
-       if (page == NULL || !PageUptodate(page)) {
+       if (page == NULL) {
+               ret = -ENOMEM;
+               goto err;
+       }
+       if (!PageUptodate(page)) {
                ret = -EIO;
                goto err;
        }
@@ -1197,7 +1201,11 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
                        unlock_page(page);
                }
        }
-       if (page == NULL || !PageUptodate(page)) {
+       if (page == NULL) {
+               ret = -ENOMEM;
+               goto err;
+       }
+       if (!PageUptodate(page)) {
                ret = -EIO;
                goto err;
        }
@@ -5008,6 +5016,8 @@ error_return:
  */
 static int ext4_trim_extent(struct super_block *sb, int start, int count,
                             ext4_group_t group, struct ext4_buddy *e4b)
+__releases(bitlock)
+__acquires(bitlock)
 {
        struct ext4_free_extent ex;
        int ret = 0;
index ab95508e3d4018eab92647c6d2308e98524080d1..c18d95b5054081c75e0c7a2fab975976838f9b02 100644 (file)
@@ -308,13 +308,14 @@ static void ext4_end_bio(struct bio *bio, int error)
        if (error) {
                struct inode *inode = io_end->inode;
 
-               ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
+               ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu "
                             "(offset %llu size %ld starting block %llu)",
-                            inode->i_ino,
+                            error, inode->i_ino,
                             (unsigned long long) io_end->offset,
                             (long) io_end->size,
                             (unsigned long long)
                             bi_sector >> (inode->i_blkbits - 9));
+               mapping_set_error(inode->i_mapping, error);
        }
 
        if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
index f3c667091618d8b26e09964dafe2f673a4c6cbd3..6f9e6fadac04e1c8af1d4a98d6258cdbc2f45dea 100644 (file)
@@ -3869,19 +3869,38 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                        goto failed_mount2;
                }
        }
+
+       /*
+        * set up enough so that it can read an inode,
+        * and create new inode for buddy allocator
+        */
+       sbi->s_gdb_count = db_count;
+       if (!test_opt(sb, NOLOAD) &&
+           EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL))
+               sb->s_op = &ext4_sops;
+       else
+               sb->s_op = &ext4_nojournal_sops;
+
+       ext4_ext_init(sb);
+       err = ext4_mb_init(sb);
+       if (err) {
+               ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
+                        err);
+               goto failed_mount2;
+       }
+
        if (!ext4_check_descriptors(sb, &first_not_zeroed)) {
                ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
-               goto failed_mount2;
+               goto failed_mount2a;
        }
        if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
                if (!ext4_fill_flex_info(sb)) {
                        ext4_msg(sb, KERN_ERR,
                               "unable to initialize "
                               "flex_bg meta info!");
-                       goto failed_mount2;
+                       goto failed_mount2a;
                }
 
-       sbi->s_gdb_count = db_count;
        get_random_bytes(&sbi->s_next_generation, sizeof(u32));
        spin_lock_init(&sbi->s_next_gen_lock);
 
@@ -3916,14 +3935,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        sbi->s_stripe = ext4_get_stripe_size(sbi);
        sbi->s_extent_max_zeroout_kb = 32;
 
-       /*
-        * set up enough so that it can read an inode
-        */
-       if (!test_opt(sb, NOLOAD) &&
-           EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL))
-               sb->s_op = &ext4_sops;
-       else
-               sb->s_op = &ext4_nojournal_sops;
        sb->s_export_op = &ext4_export_ops;
        sb->s_xattr = ext4_xattr_handlers;
 #ifdef CONFIG_QUOTA
@@ -4113,21 +4124,13 @@ no_journal:
        if (err) {
                ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for "
                         "reserved pool", ext4_calculate_resv_clusters(sb));
-               goto failed_mount4a;
+               goto failed_mount5;
        }
 
        err = ext4_setup_system_zone(sb);
        if (err) {
                ext4_msg(sb, KERN_ERR, "failed to initialize system "
                         "zone (%d)", err);
-               goto failed_mount4a;
-       }
-
-       ext4_ext_init(sb);
-       err = ext4_mb_init(sb);
-       if (err) {
-               ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
-                        err);
                goto failed_mount5;
        }
 
@@ -4204,11 +4207,8 @@ failed_mount8:
 failed_mount7:
        ext4_unregister_li_request(sb);
 failed_mount6:
-       ext4_mb_release(sb);
-failed_mount5:
-       ext4_ext_release(sb);
        ext4_release_system_zone(sb);
-failed_mount4a:
+failed_mount5:
        dput(sb->s_root);
        sb->s_root = NULL;
 failed_mount4:
@@ -4232,11 +4232,14 @@ failed_mount3:
        percpu_counter_destroy(&sbi->s_extent_cache_cnt);
        if (sbi->s_mmp_tsk)
                kthread_stop(sbi->s_mmp_tsk);
+failed_mount2a:
+       ext4_mb_release(sb);
 failed_mount2:
        for (i = 0; i < db_count; i++)
                brelse(sbi->s_group_desc[i]);
        ext4_kvfree(sbi->s_group_desc);
 failed_mount:
+       ext4_ext_release(sb);
        if (sbi->s_chksum_driver)
                crypto_free_shash(sbi->s_chksum_driver);
        if (sbi->s_proc) {
index 1f5cf5880718d28c8ca7893f7165807f78101c6b..4eec399ec807bc6733d1a90b8c3d0d205eb795c1 100644 (file)
@@ -520,8 +520,8 @@ static void ext4_xattr_update_super_block(handle_t *handle,
 }
 
 /*
- * Release the xattr block BH: If the reference count is > 1, decrement
- * it; otherwise free the block.
+ * Release the xattr block BH: If the reference count is > 1, decrement it;
+ * otherwise free the block.
  */
 static void
 ext4_xattr_release_block(handle_t *handle, struct inode *inode,
@@ -542,16 +542,31 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
                if (ce)
                        mb_cache_entry_free(ce);
                get_bh(bh);
+               unlock_buffer(bh);
                ext4_free_blocks(handle, inode, bh, 0, 1,
                                 EXT4_FREE_BLOCKS_METADATA |
                                 EXT4_FREE_BLOCKS_FORGET);
-               unlock_buffer(bh);
        } else {
                le32_add_cpu(&BHDR(bh)->h_refcount, -1);
                if (ce)
                        mb_cache_entry_release(ce);
+               /*
+                * Beware of this ugliness: Releasing of xattr block references
+                * from different inodes can race and so we have to protect
+                * from a race where someone else frees the block (and releases
+                * its journal_head) before we are done dirtying the buffer. In
+                * nojournal mode this race is harmless and we actually cannot
+                * call ext4_handle_dirty_xattr_block() with locked buffer as
+                * that function can call sync_dirty_buffer() so for that case
+                * we handle the dirtying after unlocking the buffer.
+                */
+               if (ext4_handle_valid(handle))
+                       error = ext4_handle_dirty_xattr_block(handle, inode,
+                                                             bh);
                unlock_buffer(bh);
-               error = ext4_handle_dirty_xattr_block(handle, inode, bh);
+               if (!ext4_handle_valid(handle))
+                       error = ext4_handle_dirty_xattr_block(handle, inode,
+                                                             bh);
                if (IS_SYNC(inode))
                        ext4_handle_sync(handle);
                dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
index 9ead1596399a12ef66ecf5f087b59463424ccf5d..72c82f69b01b28594e56bb9518df6f211f0d51a9 100644 (file)
@@ -274,15 +274,15 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
                break;
 #if BITS_PER_LONG != 32
        /* 32-bit arches must use fcntl64() */
-       case F_GETLKP:
+       case F_OFD_GETLK:
 #endif
        case F_GETLK:
                err = fcntl_getlk(filp, cmd, (struct flock __user *) arg);
                break;
 #if BITS_PER_LONG != 32
        /* 32-bit arches must use fcntl64() */
-       case F_SETLKP:
-       case F_SETLKPW:
+       case F_OFD_SETLK:
+       case F_OFD_SETLKW:
 #endif
                /* Fallthrough */
        case F_SETLK:
@@ -399,13 +399,13 @@ SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
        
        switch (cmd) {
        case F_GETLK64:
-       case F_GETLKP:
+       case F_OFD_GETLK:
                err = fcntl_getlk64(f.file, cmd, (struct flock64 __user *) arg);
                break;
        case F_SETLK64:
        case F_SETLKW64:
-       case F_SETLKP:
-       case F_SETLKPW:
+       case F_OFD_SETLK:
+       case F_OFD_SETLKW:
                err = fcntl_setlk64(fd, f.file, cmd,
                                (struct flock64 __user *) arg);
                break;
index a0b0855d00a985c78288074f9adc813d14addfe1..205e0d5d530752532bcb045b4630ddee0ded7ffd 100644 (file)
@@ -348,7 +348,7 @@ int __init fuse_ctl_init(void)
        return register_filesystem(&fuse_ctl_fs_type);
 }
 
-void fuse_ctl_cleanup(void)
+void __exit fuse_ctl_cleanup(void)
 {
        unregister_filesystem(&fuse_ctl_fs_type);
 }
index 5b4e035b364cc604d68e72411337ef061a766fea..42198359fa1b472557e44f325e9f55c305237e99 100644 (file)
@@ -679,6 +679,14 @@ static int fuse_symlink(struct inode *dir, struct dentry *entry,
        return create_new_entry(fc, req, dir, entry, S_IFLNK);
 }
 
+static inline void fuse_update_ctime(struct inode *inode)
+{
+       if (!IS_NOCMTIME(inode)) {
+               inode->i_ctime = current_fs_time(inode->i_sb);
+               mark_inode_dirty_sync(inode);
+       }
+}
+
 static int fuse_unlink(struct inode *dir, struct dentry *entry)
 {
        int err;
@@ -713,6 +721,7 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
                fuse_invalidate_attr(inode);
                fuse_invalidate_attr(dir);
                fuse_invalidate_entry_cache(entry);
+               fuse_update_ctime(inode);
        } else if (err == -EINTR)
                fuse_invalidate_entry(entry);
        return err;
@@ -743,23 +752,26 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry)
        return err;
 }
 
-static int fuse_rename(struct inode *olddir, struct dentry *oldent,
-                      struct inode *newdir, struct dentry *newent)
+static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
+                             struct inode *newdir, struct dentry *newent,
+                             unsigned int flags, int opcode, size_t argsize)
 {
        int err;
-       struct fuse_rename_in inarg;
+       struct fuse_rename2_in inarg;
        struct fuse_conn *fc = get_fuse_conn(olddir);
-       struct fuse_req *req = fuse_get_req_nopages(fc);
+       struct fuse_req *req;
 
+       req = fuse_get_req_nopages(fc);
        if (IS_ERR(req))
                return PTR_ERR(req);
 
-       memset(&inarg, 0, sizeof(inarg));
+       memset(&inarg, 0, argsize);
        inarg.newdir = get_node_id(newdir);
-       req->in.h.opcode = FUSE_RENAME;
+       inarg.flags = flags;
+       req->in.h.opcode = opcode;
        req->in.h.nodeid = get_node_id(olddir);
        req->in.numargs = 3;
-       req->in.args[0].size = sizeof(inarg);
+       req->in.args[0].size = argsize;
        req->in.args[0].value = &inarg;
        req->in.args[1].size = oldent->d_name.len + 1;
        req->in.args[1].value = oldent->d_name.name;
@@ -771,15 +783,22 @@ static int fuse_rename(struct inode *olddir, struct dentry *oldent,
        if (!err) {
                /* ctime changes */
                fuse_invalidate_attr(oldent->d_inode);
+               fuse_update_ctime(oldent->d_inode);
+
+               if (flags & RENAME_EXCHANGE) {
+                       fuse_invalidate_attr(newent->d_inode);
+                       fuse_update_ctime(newent->d_inode);
+               }
 
                fuse_invalidate_attr(olddir);
                if (olddir != newdir)
                        fuse_invalidate_attr(newdir);
 
                /* newent will end up negative */
-               if (newent->d_inode) {
+               if (!(flags & RENAME_EXCHANGE) && newent->d_inode) {
                        fuse_invalidate_attr(newent->d_inode);
                        fuse_invalidate_entry_cache(newent);
+                       fuse_update_ctime(newent->d_inode);
                }
        } else if (err == -EINTR) {
                /* If request was interrupted, DEITY only knows if the
@@ -795,6 +814,36 @@ static int fuse_rename(struct inode *olddir, struct dentry *oldent,
        return err;
 }
 
+static int fuse_rename(struct inode *olddir, struct dentry *oldent,
+                      struct inode *newdir, struct dentry *newent)
+{
+       return fuse_rename_common(olddir, oldent, newdir, newent, 0,
+                                 FUSE_RENAME, sizeof(struct fuse_rename_in));
+}
+
+static int fuse_rename2(struct inode *olddir, struct dentry *oldent,
+                       struct inode *newdir, struct dentry *newent,
+                       unsigned int flags)
+{
+       struct fuse_conn *fc = get_fuse_conn(olddir);
+       int err;
+
+       if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
+               return -EINVAL;
+
+       if (fc->no_rename2 || fc->minor < 23)
+               return -EINVAL;
+
+       err = fuse_rename_common(olddir, oldent, newdir, newent, flags,
+                                FUSE_RENAME2, sizeof(struct fuse_rename2_in));
+       if (err == -ENOSYS) {
+               fc->no_rename2 = 1;
+               err = -EINVAL;
+       }
+       return err;
+
+}
+
 static int fuse_link(struct dentry *entry, struct inode *newdir,
                     struct dentry *newent)
 {
@@ -829,6 +878,7 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
                inc_nlink(inode);
                spin_unlock(&fc->lock);
                fuse_invalidate_attr(inode);
+               fuse_update_ctime(inode);
        } else if (err == -EINTR) {
                fuse_invalidate_attr(inode);
        }
@@ -846,6 +896,8 @@ static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr,
                attr->size = i_size_read(inode);
                attr->mtime = inode->i_mtime.tv_sec;
                attr->mtimensec = inode->i_mtime.tv_nsec;
+               attr->ctime = inode->i_ctime.tv_sec;
+               attr->ctimensec = inode->i_ctime.tv_nsec;
        }
 
        stat->dev = inode->i_sb->s_dev;
@@ -1504,7 +1556,7 @@ static bool update_mtime(unsigned ivalid, bool trust_local_mtime)
 }
 
 static void iattr_to_fattr(struct iattr *iattr, struct fuse_setattr_in *arg,
-                          bool trust_local_mtime)
+                          bool trust_local_cmtime)
 {
        unsigned ivalid = iattr->ia_valid;
 
@@ -1523,13 +1575,18 @@ static void iattr_to_fattr(struct iattr *iattr, struct fuse_setattr_in *arg,
                if (!(ivalid & ATTR_ATIME_SET))
                        arg->valid |= FATTR_ATIME_NOW;
        }
-       if ((ivalid & ATTR_MTIME) && update_mtime(ivalid, trust_local_mtime)) {
+       if ((ivalid & ATTR_MTIME) && update_mtime(ivalid, trust_local_cmtime)) {
                arg->valid |= FATTR_MTIME;
                arg->mtime = iattr->ia_mtime.tv_sec;
                arg->mtimensec = iattr->ia_mtime.tv_nsec;
-               if (!(ivalid & ATTR_MTIME_SET) && !trust_local_mtime)
+               if (!(ivalid & ATTR_MTIME_SET) && !trust_local_cmtime)
                        arg->valid |= FATTR_MTIME_NOW;
        }
+       if ((ivalid & ATTR_CTIME) && trust_local_cmtime) {
+               arg->valid |= FATTR_CTIME;
+               arg->ctime = iattr->ia_ctime.tv_sec;
+               arg->ctimensec = iattr->ia_ctime.tv_nsec;
+       }
 }
 
 /*
@@ -1597,39 +1654,38 @@ static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_req *req,
 /*
  * Flush inode->i_mtime to the server
  */
-int fuse_flush_mtime(struct file *file, bool nofail)
+int fuse_flush_times(struct inode *inode, struct fuse_file *ff)
 {
-       struct inode *inode = file->f_mapping->host;
-       struct fuse_inode *fi = get_fuse_inode(inode);
        struct fuse_conn *fc = get_fuse_conn(inode);
-       struct fuse_req *req = NULL;
+       struct fuse_req *req;
        struct fuse_setattr_in inarg;
        struct fuse_attr_out outarg;
        int err;
 
-       if (nofail) {
-               req = fuse_get_req_nofail_nopages(fc, file);
-       } else {
-               req = fuse_get_req_nopages(fc);
-               if (IS_ERR(req))
-                       return PTR_ERR(req);
-       }
+       req = fuse_get_req_nopages(fc);
+       if (IS_ERR(req))
+               return PTR_ERR(req);
 
        memset(&inarg, 0, sizeof(inarg));
        memset(&outarg, 0, sizeof(outarg));
 
-       inarg.valid |= FATTR_MTIME;
+       inarg.valid = FATTR_MTIME;
        inarg.mtime = inode->i_mtime.tv_sec;
        inarg.mtimensec = inode->i_mtime.tv_nsec;
-
+       if (fc->minor >= 23) {
+               inarg.valid |= FATTR_CTIME;
+               inarg.ctime = inode->i_ctime.tv_sec;
+               inarg.ctimensec = inode->i_ctime.tv_nsec;
+       }
+       if (ff) {
+               inarg.valid |= FATTR_FH;
+               inarg.fh = ff->fh;
+       }
        fuse_setattr_fill(fc, req, inode, &inarg, &outarg);
        fuse_request_send(fc, req);
        err = req->out.h.error;
        fuse_put_request(fc, req);
 
-       if (!err)
-               clear_bit(FUSE_I_MTIME_DIRTY, &fi->state);
-
        return err;
 }
 
@@ -1653,7 +1709,7 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
        bool is_wb = fc->writeback_cache;
        loff_t oldsize;
        int err;
-       bool trust_local_mtime = is_wb && S_ISREG(inode->i_mode);
+       bool trust_local_cmtime = is_wb && S_ISREG(inode->i_mode);
 
        if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS))
                attr->ia_valid |= ATTR_FORCE;
@@ -1678,11 +1734,13 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
        if (is_truncate) {
                fuse_set_nowrite(inode);
                set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
+               if (trust_local_cmtime && attr->ia_size != inode->i_size)
+                       attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
        }
 
        memset(&inarg, 0, sizeof(inarg));
        memset(&outarg, 0, sizeof(outarg));
-       iattr_to_fattr(attr, &inarg, trust_local_mtime);
+       iattr_to_fattr(attr, &inarg, trust_local_cmtime);
        if (file) {
                struct fuse_file *ff = file->private_data;
                inarg.valid |= FATTR_FH;
@@ -1711,9 +1769,12 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
 
        spin_lock(&fc->lock);
        /* the kernel maintains i_mtime locally */
-       if (trust_local_mtime && (attr->ia_valid & ATTR_MTIME)) {
-               inode->i_mtime = attr->ia_mtime;
-               clear_bit(FUSE_I_MTIME_DIRTY, &fi->state);
+       if (trust_local_cmtime) {
+               if (attr->ia_valid & ATTR_MTIME)
+                       inode->i_mtime = attr->ia_mtime;
+               if (attr->ia_valid & ATTR_CTIME)
+                       inode->i_ctime = attr->ia_ctime;
+               /* FIXME: clear I_DIRTY_SYNC? */
        }
 
        fuse_change_attributes_common(inode, &outarg.attr,
@@ -1810,8 +1871,10 @@ static int fuse_setxattr(struct dentry *entry, const char *name,
                fc->no_setxattr = 1;
                err = -EOPNOTSUPP;
        }
-       if (!err)
+       if (!err) {
                fuse_invalidate_attr(inode);
+               fuse_update_ctime(inode);
+       }
        return err;
 }
 
@@ -1941,20 +2004,11 @@ static int fuse_removexattr(struct dentry *entry, const char *name)
                fc->no_removexattr = 1;
                err = -EOPNOTSUPP;
        }
-       if (!err)
+       if (!err) {
                fuse_invalidate_attr(inode);
-       return err;
-}
-
-static int fuse_update_time(struct inode *inode, struct timespec *now,
-                           int flags)
-{
-       if (flags & S_MTIME) {
-               inode->i_mtime = *now;
-               set_bit(FUSE_I_MTIME_DIRTY, &get_fuse_inode(inode)->state);
-               BUG_ON(!S_ISREG(inode->i_mode));
+               fuse_update_ctime(inode);
        }
-       return 0;
+       return err;
 }
 
 static const struct inode_operations fuse_dir_inode_operations = {
@@ -1964,6 +2018,7 @@ static const struct inode_operations fuse_dir_inode_operations = {
        .unlink         = fuse_unlink,
        .rmdir          = fuse_rmdir,
        .rename         = fuse_rename,
+       .rename2        = fuse_rename2,
        .link           = fuse_link,
        .setattr        = fuse_setattr,
        .create         = fuse_create,
@@ -1996,7 +2051,6 @@ static const struct inode_operations fuse_common_inode_operations = {
        .getxattr       = fuse_getxattr,
        .listxattr      = fuse_listxattr,
        .removexattr    = fuse_removexattr,
-       .update_time    = fuse_update_time,
 };
 
 static const struct inode_operations fuse_symlink_inode_operations = {
index 13f8bdec5110d1a7db12b2a262bb5e2ecb0e4f82..96d513e01a5d534e8717622a4fb2c7b20bb98d64 100644 (file)
@@ -223,6 +223,8 @@ void fuse_finish_open(struct inode *inode, struct file *file)
                i_size_write(inode, 0);
                spin_unlock(&fc->lock);
                fuse_invalidate_attr(inode);
+               if (fc->writeback_cache)
+                       file_update_time(file);
        }
        if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache)
                fuse_link_write_file(file);
@@ -232,18 +234,26 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
 {
        struct fuse_conn *fc = get_fuse_conn(inode);
        int err;
+       bool lock_inode = (file->f_flags & O_TRUNC) &&
+                         fc->atomic_o_trunc &&
+                         fc->writeback_cache;
 
        err = generic_file_open(inode, file);
        if (err)
                return err;
 
+       if (lock_inode)
+               mutex_lock(&inode->i_mutex);
+
        err = fuse_do_open(fc, get_node_id(inode), file, isdir);
-       if (err)
-               return err;
 
-       fuse_finish_open(inode, file);
+       if (!err)
+               fuse_finish_open(inode, file);
 
-       return 0;
+       if (lock_inode)
+               mutex_unlock(&inode->i_mutex);
+
+       return err;
 }
 
 static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode)
@@ -314,10 +324,7 @@ static int fuse_release(struct inode *inode, struct file *file)
 
        /* see fuse_vma_close() for !writeback_cache case */
        if (fc->writeback_cache)
-               filemap_write_and_wait(file->f_mapping);
-
-       if (test_bit(FUSE_I_MTIME_DIRTY, &get_fuse_inode(inode)->state))
-               fuse_flush_mtime(file, true);
+               write_inode_now(inode, 1);
 
        fuse_release_common(file, FUSE_RELEASE);
 
@@ -439,7 +446,7 @@ static int fuse_flush(struct file *file, fl_owner_t id)
        if (fc->no_flush)
                return 0;
 
-       err = filemap_write_and_wait(file->f_mapping);
+       err = write_inode_now(inode, 1);
        if (err)
                return err;
 
@@ -480,13 +487,6 @@ int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
        if (is_bad_inode(inode))
                return -EIO;
 
-       err = filemap_write_and_wait_range(inode->i_mapping, start, end);
-       if (err)
-               return err;
-
-       if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir))
-               return 0;
-
        mutex_lock(&inode->i_mutex);
 
        /*
@@ -494,17 +494,17 @@ int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
         * wait for all outstanding writes, before sending the FSYNC
         * request.
         */
-       err = write_inode_now(inode, 0);
+       err = filemap_write_and_wait_range(inode->i_mapping, start, end);
        if (err)
                goto out;
 
        fuse_sync_writes(inode);
+       err = sync_inode_metadata(inode, 1);
+       if (err)
+               goto out;
 
-       if (test_bit(FUSE_I_MTIME_DIRTY, &get_fuse_inode(inode)->state)) {
-               int err = fuse_flush_mtime(file, false);
-               if (err)
-                       goto out;
-       }
+       if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir))
+               goto out;
 
        req = fuse_get_req_nopages(fc);
        if (IS_ERR(req)) {
@@ -1659,13 +1659,13 @@ static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req)
        fuse_writepage_free(fc, req);
 }
 
-static struct fuse_file *fuse_write_file_get(struct fuse_conn *fc,
-                                            struct fuse_inode *fi)
+static struct fuse_file *__fuse_write_file_get(struct fuse_conn *fc,
+                                              struct fuse_inode *fi)
 {
        struct fuse_file *ff = NULL;
 
        spin_lock(&fc->lock);
-       if (!WARN_ON(list_empty(&fi->write_files))) {
+       if (!list_empty(&fi->write_files)) {
                ff = list_entry(fi->write_files.next, struct fuse_file,
                                write_entry);
                fuse_file_get(ff);
@@ -1675,6 +1675,29 @@ static struct fuse_file *fuse_write_file_get(struct fuse_conn *fc,
        return ff;
 }
 
+static struct fuse_file *fuse_write_file_get(struct fuse_conn *fc,
+                                            struct fuse_inode *fi)
+{
+       struct fuse_file *ff = __fuse_write_file_get(fc, fi);
+       WARN_ON(!ff);
+       return ff;
+}
+
+int fuse_write_inode(struct inode *inode, struct writeback_control *wbc)
+{
+       struct fuse_conn *fc = get_fuse_conn(inode);
+       struct fuse_inode *fi = get_fuse_inode(inode);
+       struct fuse_file *ff;
+       int err;
+
+       ff = __fuse_write_file_get(fc, fi);
+       err = fuse_flush_times(inode, ff);
+       if (ff)
+               fuse_file_put(ff, 0);
+
+       return err;
+}
+
 static int fuse_writepage_locked(struct page *page)
 {
        struct address_space *mapping = page->mapping;
@@ -2972,6 +2995,9 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
        bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) ||
                           (mode & FALLOC_FL_PUNCH_HOLE);
 
+       if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
+               return -EOPNOTSUPP;
+
        if (fc->no_fallocate)
                return -EOPNOTSUPP;
 
@@ -3017,12 +3043,8 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
        if (!(mode & FALLOC_FL_KEEP_SIZE)) {
                bool changed = fuse_write_update_size(inode, offset + length);
 
-               if (changed && fc->writeback_cache) {
-                       struct fuse_inode *fi = get_fuse_inode(inode);
-
-                       inode->i_mtime = current_fs_time(inode->i_sb);
-                       set_bit(FUSE_I_MTIME_DIRTY, &fi->state);
-               }
+               if (changed && fc->writeback_cache)
+                       file_update_time(file);
        }
 
        if (mode & FALLOC_FL_PUNCH_HOLE)
index a257ed8ebee6c6db62339e1a8d3f2f87479310ea..7aa5c75e0de13dcc9728ba890983e842c1a98554 100644 (file)
@@ -119,8 +119,6 @@ enum {
        FUSE_I_INIT_RDPLUS,
        /** An operation changing file size is in progress  */
        FUSE_I_SIZE_UNSTABLE,
-       /** i_mtime has been updated locally; a flush to userspace needed */
-       FUSE_I_MTIME_DIRTY,
 };
 
 struct fuse_conn;
@@ -544,6 +542,9 @@ struct fuse_conn {
        /** Is fallocate not implemented by fs? */
        unsigned no_fallocate:1;
 
+       /** Is rename with flags implemented by fs? */
+       unsigned no_rename2:1;
+
        /** Use enhanced/automatic page cache invalidation. */
        unsigned auto_inval_data:1;
 
@@ -725,7 +726,7 @@ int fuse_dev_init(void);
 void fuse_dev_cleanup(void);
 
 int fuse_ctl_init(void);
-void fuse_ctl_cleanup(void);
+void __exit fuse_ctl_cleanup(void);
 
 /**
  * Allocate a request
@@ -891,7 +892,8 @@ int fuse_dev_release(struct inode *inode, struct file *file);
 
 bool fuse_write_update_size(struct inode *inode, loff_t pos);
 
-int fuse_flush_mtime(struct file *file, bool nofail);
+int fuse_flush_times(struct inode *inode, struct fuse_file *ff);
+int fuse_write_inode(struct inode *inode, struct writeback_control *wbc);
 
 int fuse_do_setattr(struct inode *inode, struct iattr *attr,
                    struct file *file);
index 8d611696fcad303dfe4137a2ff9bc9e1d2a5b97d..754dcf23de8abf10ceee81926f022731b810cb54 100644 (file)
@@ -175,9 +175,9 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
        if (!fc->writeback_cache || !S_ISREG(inode->i_mode)) {
                inode->i_mtime.tv_sec   = attr->mtime;
                inode->i_mtime.tv_nsec  = attr->mtimensec;
+               inode->i_ctime.tv_sec   = attr->ctime;
+               inode->i_ctime.tv_nsec  = attr->ctimensec;
        }
-       inode->i_ctime.tv_sec   = attr->ctime;
-       inode->i_ctime.tv_nsec  = attr->ctimensec;
 
        if (attr->blksize != 0)
                inode->i_blkbits = ilog2(attr->blksize);
@@ -256,6 +256,8 @@ static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr)
        inode->i_size = attr->size;
        inode->i_mtime.tv_sec  = attr->mtime;
        inode->i_mtime.tv_nsec = attr->mtimensec;
+       inode->i_ctime.tv_sec  = attr->ctime;
+       inode->i_ctime.tv_nsec = attr->ctimensec;
        if (S_ISREG(inode->i_mode)) {
                fuse_init_common(inode);
                fuse_init_file_inode(inode);
@@ -303,7 +305,7 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
 
        if ((inode->i_state & I_NEW)) {
                inode->i_flags |= S_NOATIME;
-               if (!fc->writeback_cache || !S_ISREG(inode->i_mode))
+               if (!fc->writeback_cache || !S_ISREG(attr->mode))
                        inode->i_flags |= S_NOCMTIME;
                inode->i_generation = generation;
                inode->i_data.backing_dev_info = &fc->bdi;
@@ -788,6 +790,7 @@ static const struct super_operations fuse_super_operations = {
        .alloc_inode    = fuse_alloc_inode,
        .destroy_inode  = fuse_destroy_inode,
        .evict_inode    = fuse_evict_inode,
+       .write_inode    = fuse_write_inode,
        .drop_inode     = generic_delete_inode,
        .remount_fs     = fuse_remount_fs,
        .put_super      = fuse_put_super,
@@ -890,6 +893,11 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
                                fc->async_dio = 1;
                        if (arg->flags & FUSE_WRITEBACK_CACHE)
                                fc->writeback_cache = 1;
+                       if (arg->time_gran && arg->time_gran <= 1000000000)
+                               fc->sb->s_time_gran = arg->time_gran;
+                       else
+                               fc->sb->s_time_gran = 1000000000;
+
                } else {
                        ra_pages = fc->max_read / PAGE_CACHE_SIZE;
                        fc->no_lock = 1;
@@ -996,7 +1004,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
        if (sb->s_flags & MS_MANDLOCK)
                goto err;
 
-       sb->s_flags &= ~MS_NOSEC;
+       sb->s_flags &= ~(MS_NOSEC | MS_I_VERSION);
 
        if (!parse_fuse_opt((char *) data, &d, is_bdev))
                goto err;
index 2040275209371d53d751a2cbbf92b251933c6dc6..e19d4c0cacae176bcb003e608c6872079908d92d 100644 (file)
@@ -1030,6 +1030,11 @@ static int __init init_hugetlbfs_fs(void)
        int error;
        int i;
 
+       if (!hugepages_supported()) {
+               pr_info("hugetlbfs: disabling because there are no supported hugepage sizes\n");
+               return -ENOTSUPP;
+       }
+
        error = bdi_init(&hugetlbfs_backing_dev_info);
        if (error)
                return error;
index 78f3403300afd5d825ab833964a443953d58f995..ac127cd008bfeb268c5be789e339b03538d2597c 100644 (file)
@@ -232,9 +232,6 @@ static int kernfs_link_sibling(struct kernfs_node *kn)
        struct rb_node **node = &kn->parent->dir.children.rb_node;
        struct rb_node *parent = NULL;
 
-       if (kernfs_type(kn) == KERNFS_DIR)
-               kn->parent->dir.subdirs++;
-
        while (*node) {
                struct kernfs_node *pos;
                int result;
@@ -249,9 +246,15 @@ static int kernfs_link_sibling(struct kernfs_node *kn)
                else
                        return -EEXIST;
        }
+
        /* add new node and rebalance the tree */
        rb_link_node(&kn->rb, parent, node);
        rb_insert_color(&kn->rb, &kn->parent->dir.children);
+
+       /* successfully added, account subdir number */
+       if (kernfs_type(kn) == KERNFS_DIR)
+               kn->parent->dir.subdirs++;
+
        return 0;
 }
 
index 8034706a7af87523bfc40e8660f21cc54238563f..5e9a80cfc3d8857c7cfe496eae627c921c47c6f2 100644 (file)
@@ -484,6 +484,8 @@ static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
 
        ops = kernfs_ops(of->kn);
        rc = ops->mmap(of, vma);
+       if (rc)
+               goto out_put;
 
        /*
         * PowerPC's pci_mmap of legacy_mem uses shmem_zero_setup()
@@ -608,6 +610,7 @@ static void kernfs_put_open_node(struct kernfs_node *kn,
 static int kernfs_fop_open(struct inode *inode, struct file *file)
 {
        struct kernfs_node *kn = file->f_path.dentry->d_fsdata;
+       struct kernfs_root *root = kernfs_root(kn);
        const struct kernfs_ops *ops;
        struct kernfs_open_file *of;
        bool has_read, has_write, has_mmap;
@@ -622,14 +625,16 @@ static int kernfs_fop_open(struct inode *inode, struct file *file)
        has_write = ops->write || ops->mmap;
        has_mmap = ops->mmap;
 
-       /* check perms and supported operations */
-       if ((file->f_mode & FMODE_WRITE) &&
-           (!(inode->i_mode & S_IWUGO) || !has_write))
-               goto err_out;
+       /* see the flag definition for details */
+       if (root->flags & KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK) {
+               if ((file->f_mode & FMODE_WRITE) &&
+                   (!(inode->i_mode & S_IWUGO) || !has_write))
+                       goto err_out;
 
-       if ((file->f_mode & FMODE_READ) &&
-           (!(inode->i_mode & S_IRUGO) || !has_read))
-               goto err_out;
+               if ((file->f_mode & FMODE_READ) &&
+                   (!(inode->i_mode & S_IRUGO) || !has_read))
+                       goto err_out;
+       }
 
        /* allocate a kernfs_open_file for the file */
        error = -ENOMEM;
index 13fc7a6d380ae6648945c8956cc53901de2d0ccc..e390bd9ae068696d4a5425057559d3037e6f4518 100644 (file)
 #define IS_POSIX(fl)   (fl->fl_flags & FL_POSIX)
 #define IS_FLOCK(fl)   (fl->fl_flags & FL_FLOCK)
 #define IS_LEASE(fl)   (fl->fl_flags & (FL_LEASE|FL_DELEG))
-#define IS_FILE_PVT(fl)        (fl->fl_flags & FL_FILE_PVT)
+#define IS_OFDLCK(fl)  (fl->fl_flags & FL_OFDLCK)
 
 static bool lease_breaking(struct file_lock *fl)
 {
@@ -389,18 +389,6 @@ static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
        fl->fl_ops = NULL;
        fl->fl_lmops = NULL;
 
-       /* Ensure that fl->fl_filp has compatible f_mode */
-       switch (l->l_type) {
-       case F_RDLCK:
-               if (!(filp->f_mode & FMODE_READ))
-                       return -EBADF;
-               break;
-       case F_WRLCK:
-               if (!(filp->f_mode & FMODE_WRITE))
-                       return -EBADF;
-               break;
-       }
-
        return assign_type(fl, l->l_type);
 }
 
@@ -564,7 +552,7 @@ static void __locks_insert_block(struct file_lock *blocker,
        BUG_ON(!list_empty(&waiter->fl_block));
        waiter->fl_next = blocker;
        list_add_tail(&waiter->fl_block, &blocker->fl_block);
-       if (IS_POSIX(blocker) && !IS_FILE_PVT(blocker))
+       if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
                locks_insert_global_blocked(waiter);
 }
 
@@ -759,12 +747,12 @@ EXPORT_SYMBOL(posix_test_lock);
  * of tasks (such as posix threads) sharing the same open file table.
  * To handle those cases, we just bail out after a few iterations.
  *
- * For FL_FILE_PVT locks, the owner is the filp, not the files_struct.
+ * For FL_OFDLCK locks, the owner is the filp, not the files_struct.
  * Because the owner is not even nominally tied to a thread of
  * execution, the deadlock detection below can't reasonably work well. Just
  * skip it for those.
  *
- * In principle, we could do a more limited deadlock detection on FL_FILE_PVT
+ * In principle, we could do a more limited deadlock detection on FL_OFDLCK
  * locks that just checks for the case where two tasks are attempting to
  * upgrade from read to write locks on the same inode.
  */
@@ -791,9 +779,9 @@ static int posix_locks_deadlock(struct file_lock *caller_fl,
 
        /*
         * This deadlock detector can't reasonably detect deadlocks with
-        * FL_FILE_PVT locks, since they aren't owned by a process, per-se.
+        * FL_OFDLCK locks, since they aren't owned by a process, per-se.
         */
-       if (IS_FILE_PVT(caller_fl))
+       if (IS_OFDLCK(caller_fl))
                return 0;
 
        while ((block_fl = what_owner_is_waiting_for(block_fl))) {
@@ -1391,11 +1379,10 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
 
 restart:
        break_time = flock->fl_break_time;
-       if (break_time != 0) {
+       if (break_time != 0)
                break_time -= jiffies;
-               if (break_time == 0)
-                       break_time++;
-       }
+       if (break_time == 0)
+               break_time++;
        locks_insert_block(flock, new_fl);
        spin_unlock(&inode->i_lock);
        error = wait_event_interruptible_timeout(new_fl->fl_wait,
@@ -1891,7 +1878,7 @@ EXPORT_SYMBOL_GPL(vfs_test_lock);
 
 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
 {
-       flock->l_pid = IS_FILE_PVT(fl) ? -1 : fl->fl_pid;
+       flock->l_pid = IS_OFDLCK(fl) ? -1 : fl->fl_pid;
 #if BITS_PER_LONG == 32
        /*
         * Make sure we can represent the posix lock via
@@ -1913,7 +1900,7 @@ static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
 #if BITS_PER_LONG == 32
 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
 {
-       flock->l_pid = IS_FILE_PVT(fl) ? -1 : fl->fl_pid;
+       flock->l_pid = IS_OFDLCK(fl) ? -1 : fl->fl_pid;
        flock->l_start = fl->fl_start;
        flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
                fl->fl_end - fl->fl_start + 1;
@@ -1942,13 +1929,13 @@ int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock __user *l)
        if (error)
                goto out;
 
-       if (cmd == F_GETLKP) {
+       if (cmd == F_OFD_GETLK) {
                error = -EINVAL;
                if (flock.l_pid != 0)
                        goto out;
 
                cmd = F_GETLK;
-               file_lock.fl_flags |= FL_FILE_PVT;
+               file_lock.fl_flags |= FL_OFDLCK;
                file_lock.fl_owner = (fl_owner_t)filp;
        }
 
@@ -2035,6 +2022,22 @@ static int do_lock_file_wait(struct file *filp, unsigned int cmd,
        return error;
 }
 
+/* Ensure that fl->fl_filp has compatible f_mode for F_SETLK calls */
+static int
+check_fmode_for_setlk(struct file_lock *fl)
+{
+       switch (fl->fl_type) {
+       case F_RDLCK:
+               if (!(fl->fl_file->f_mode & FMODE_READ))
+                       return -EBADF;
+               break;
+       case F_WRLCK:
+               if (!(fl->fl_file->f_mode & FMODE_WRITE))
+                       return -EBADF;
+       }
+       return 0;
+}
+
 /* Apply the lock described by l to an open file descriptor.
  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
  */
@@ -2072,27 +2075,31 @@ again:
        if (error)
                goto out;
 
+       error = check_fmode_for_setlk(file_lock);
+       if (error)
+               goto out;
+
        /*
         * If the cmd is requesting file-private locks, then set the
-        * FL_FILE_PVT flag and override the owner.
+        * FL_OFDLCK flag and override the owner.
         */
        switch (cmd) {
-       case F_SETLKP:
+       case F_OFD_SETLK:
                error = -EINVAL;
                if (flock.l_pid != 0)
                        goto out;
 
                cmd = F_SETLK;
-               file_lock->fl_flags |= FL_FILE_PVT;
+               file_lock->fl_flags |= FL_OFDLCK;
                file_lock->fl_owner = (fl_owner_t)filp;
                break;
-       case F_SETLKPW:
+       case F_OFD_SETLKW:
                error = -EINVAL;
                if (flock.l_pid != 0)
                        goto out;
 
                cmd = F_SETLKW;
-               file_lock->fl_flags |= FL_FILE_PVT;
+               file_lock->fl_flags |= FL_OFDLCK;
                file_lock->fl_owner = (fl_owner_t)filp;
                /* Fallthrough */
        case F_SETLKW:
@@ -2144,13 +2151,13 @@ int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 __user *l)
        if (error)
                goto out;
 
-       if (cmd == F_GETLKP) {
+       if (cmd == F_OFD_GETLK) {
                error = -EINVAL;
                if (flock.l_pid != 0)
                        goto out;
 
                cmd = F_GETLK64;
-               file_lock.fl_flags |= FL_FILE_PVT;
+               file_lock.fl_flags |= FL_OFDLCK;
                file_lock.fl_owner = (fl_owner_t)filp;
        }
 
@@ -2207,27 +2214,31 @@ again:
        if (error)
                goto out;
 
+       error = check_fmode_for_setlk(file_lock);
+       if (error)
+               goto out;
+
        /*
         * If the cmd is requesting file-private locks, then set the
-        * FL_FILE_PVT flag and override the owner.
+        * FL_OFDLCK flag and override the owner.
         */
        switch (cmd) {
-       case F_SETLKP:
+       case F_OFD_SETLK:
                error = -EINVAL;
                if (flock.l_pid != 0)
                        goto out;
 
                cmd = F_SETLK64;
-               file_lock->fl_flags |= FL_FILE_PVT;
+               file_lock->fl_flags |= FL_OFDLCK;
                file_lock->fl_owner = (fl_owner_t)filp;
                break;
-       case F_SETLKPW:
+       case F_OFD_SETLKW:
                error = -EINVAL;
                if (flock.l_pid != 0)
                        goto out;
 
                cmd = F_SETLKW64;
-               file_lock->fl_flags |= FL_FILE_PVT;
+               file_lock->fl_flags |= FL_OFDLCK;
                file_lock->fl_owner = (fl_owner_t)filp;
                /* Fallthrough */
        case F_SETLKW64:
@@ -2413,8 +2424,8 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
        if (IS_POSIX(fl)) {
                if (fl->fl_flags & FL_ACCESS)
                        seq_printf(f, "ACCESS");
-               else if (IS_FILE_PVT(fl))
-                       seq_printf(f, "FLPVT ");
+               else if (IS_OFDLCK(fl))
+                       seq_printf(f, "OFDLCK");
                else
                        seq_printf(f, "POSIX ");
 
index c6157c894fce234c333d5a2d787f81ee3e5e7ba9..80168273396bbaeb4490677854fe34d637a78551 100644 (file)
@@ -1542,7 +1542,7 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
                inode = path->dentry->d_inode;
        }
        err = -ENOENT;
-       if (!inode)
+       if (!inode || d_is_negative(path->dentry))
                goto out_path_put;
 
        if (should_follow_link(path->dentry, follow)) {
@@ -2249,7 +2249,7 @@ mountpoint_last(struct nameidata *nd, struct path *path)
        mutex_unlock(&dir->d_inode->i_mutex);
 
 done:
-       if (!dentry->d_inode) {
+       if (!dentry->d_inode || d_is_negative(dentry)) {
                error = -ENOENT;
                dput(dentry);
                goto out;
@@ -2994,7 +2994,7 @@ retry_lookup:
 finish_lookup:
        /* we _can_ be in RCU mode here */
        error = -ENOENT;
-       if (d_is_negative(path->dentry)) {
+       if (!inode || d_is_negative(path->dentry)) {
                path_to_nameidata(path, nd);
                goto out;
        }
index 6f3f392d48af76d9b7bdb752f1d13eff9580be1b..b6f46013dddf26bc5306349a26160aa5c9450eb7 100644 (file)
@@ -402,8 +402,10 @@ sort_pacl(struct posix_acl *pacl)
         * by uid/gid. */
        int i, j;
 
-       if (pacl->a_count <= 4)
-               return; /* no users or groups */
+       /* no users or groups */
+       if (!pacl || pacl->a_count <= 4)
+               return;
+
        i = 1;
        while (pacl->a_entries[i].e_tag == ACL_USER)
                i++;
@@ -530,13 +532,12 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
 
        /*
         * ACLs with no ACEs are treated differently in the inheritable
-        * and effective cases: when there are no inheritable ACEs, we
-        * set a zero-length default posix acl:
+        * and effective cases: when there are no inheritable ACEs,
+        * calls ->set_acl with a NULL ACL structure.
         */
-       if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT)) {
-               pacl = posix_acl_alloc(0, GFP_KERNEL);
-               return pacl ? pacl : ERR_PTR(-ENOMEM);
-       }
+       if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT))
+               return NULL;
+
        /*
         * When there are no effective ACEs, the following will end
         * up setting a 3-element effective posix ACL with all
index 39c8ef875f91b5a93b57c8886b56569d679296a3..2c73cae9899d25007818373e11eaec1c8fdb9436 100644 (file)
@@ -654,9 +654,11 @@ static struct rpc_clnt *create_backchannel_client(struct rpc_create_args *args)
 
 static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
 {
+       int maxtime = max_cb_time(clp->net);
        struct rpc_timeout      timeparms = {
-               .to_initval     = max_cb_time(clp->net),
+               .to_initval     = maxtime,
                .to_retries     = 0,
+               .to_maxval      = maxtime,
        };
        struct rpc_create_args args = {
                .net            = clp->net,
index 3ba65979a3cde006e73ed2c310e2bc42f6988fb2..32b699bebb9c3e7281ba58e1d123dc6c2f0ce361 100644 (file)
@@ -1078,6 +1078,18 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
                return NULL;
        }
        clp->cl_name.len = name.len;
+       INIT_LIST_HEAD(&clp->cl_sessions);
+       idr_init(&clp->cl_stateids);
+       atomic_set(&clp->cl_refcount, 0);
+       clp->cl_cb_state = NFSD4_CB_UNKNOWN;
+       INIT_LIST_HEAD(&clp->cl_idhash);
+       INIT_LIST_HEAD(&clp->cl_openowners);
+       INIT_LIST_HEAD(&clp->cl_delegations);
+       INIT_LIST_HEAD(&clp->cl_lru);
+       INIT_LIST_HEAD(&clp->cl_callbacks);
+       INIT_LIST_HEAD(&clp->cl_revoked);
+       spin_lock_init(&clp->cl_lock);
+       rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
        return clp;
 }
 
@@ -1095,6 +1107,7 @@ free_client(struct nfs4_client *clp)
                WARN_ON_ONCE(atomic_read(&ses->se_ref));
                free_session(ses);
        }
+       rpc_destroy_wait_queue(&clp->cl_cb_waitq);
        free_svc_cred(&clp->cl_cred);
        kfree(clp->cl_name.data);
        idr_destroy(&clp->cl_stateids);
@@ -1347,7 +1360,6 @@ static struct nfs4_client *create_client(struct xdr_netobj name,
        if (clp == NULL)
                return NULL;
 
-       INIT_LIST_HEAD(&clp->cl_sessions);
        ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
        if (ret) {
                spin_lock(&nn->client_lock);
@@ -1355,20 +1367,9 @@ static struct nfs4_client *create_client(struct xdr_netobj name,
                spin_unlock(&nn->client_lock);
                return NULL;
        }
-       idr_init(&clp->cl_stateids);
-       atomic_set(&clp->cl_refcount, 0);
-       clp->cl_cb_state = NFSD4_CB_UNKNOWN;
-       INIT_LIST_HEAD(&clp->cl_idhash);
-       INIT_LIST_HEAD(&clp->cl_openowners);
-       INIT_LIST_HEAD(&clp->cl_delegations);
-       INIT_LIST_HEAD(&clp->cl_lru);
-       INIT_LIST_HEAD(&clp->cl_callbacks);
-       INIT_LIST_HEAD(&clp->cl_revoked);
-       spin_lock_init(&clp->cl_lock);
        nfsd4_init_callback(&clp->cl_cb_null);
        clp->cl_time = get_seconds();
        clear_bit(0, &clp->cl_cb_slot_busy);
-       rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
        copy_verf(clp, verf);
        rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
        gen_confirm(clp);
index 2723c1badd01276f9c1802d6cac210aa40f8f0a3..18881f34737ad89e8259042fd33ee5d6a9c26edc 100644 (file)
@@ -3627,14 +3627,6 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
        /* nfsd4_check_resp_size guarantees enough room for error status */
        if (!op->status)
                op->status = nfsd4_check_resp_size(resp, 0);
-       if (op->status == nfserr_resource && nfsd4_has_session(&resp->cstate)) {
-               struct nfsd4_slot *slot = resp->cstate.slot;
-
-               if (slot->sl_flags & NFSD4_SLOT_CACHETHIS)
-                       op->status = nfserr_rep_too_big_to_cache;
-               else
-                       op->status = nfserr_rep_too_big;
-       }
        if (so) {
                so->so_replay.rp_status = op->status;
                so->so_replay.rp_buflen = (char *)resp->p - (char *)(statp+1);
index 4e565c814309d2637c7d9bb8081f03a4d19216d0..732648b270dc1072f679b550fb386147e65b5db0 100644 (file)
@@ -698,6 +698,8 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
        }
        group->overflow_event = &oevent->fse;
 
+       if (force_o_largefile())
+               event_f_flags |= O_LARGEFILE;
        group->fanotify_data.f_flags = event_f_flags;
 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
        spin_lock_init(&group->fanotify_data.access_lock);
index af3f7aa73e13a007d06fd528d8c8d101d2553087..ee1f88419cb0640d38203eb3a16c7b03094f7c56 100644 (file)
@@ -472,11 +472,15 @@ bail:
 
 void dlm_destroy_master_caches(void)
 {
-       if (dlm_lockname_cache)
+       if (dlm_lockname_cache) {
                kmem_cache_destroy(dlm_lockname_cache);
+               dlm_lockname_cache = NULL;
+       }
 
-       if (dlm_lockres_cache)
+       if (dlm_lockres_cache) {
                kmem_cache_destroy(dlm_lockres_cache);
+               dlm_lockres_cache = NULL;
+       }
 }
 
 static void dlm_lockres_release(struct kref *kref)
index 3d30eb1fc95e383e50e91605d3526161bcfdebde..9d64679cec73b00fc4685e23d69374ca122fed09 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -254,16 +254,21 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
                return -EBADF;
 
        /*
-        * It's not possible to punch hole or perform collapse range
-        * on append only file
+        * We can only allow pure fallocate on append only files
         */
-       if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE)
-           && IS_APPEND(inode))
+       if ((mode & ~FALLOC_FL_KEEP_SIZE) && IS_APPEND(inode))
                return -EPERM;
 
        if (IS_IMMUTABLE(inode))
                return -EPERM;
 
+       /*
+        * We can not allow to do any fallocate operation on an active
+        * swapfile
+        */
+       if (IS_SWAPFILE(inode))
+               ret = -ETXTBSY;
+
        /*
         * Revalidate the write permissions, in case security policy has
         * changed since the files were opened.
@@ -286,14 +291,6 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
        if (((offset + len) > inode->i_sb->s_maxbytes) || ((offset + len) < 0))
                return -EFBIG;
 
-       /*
-        * There is no need to overlap collapse range with EOF, in which case
-        * it is effectively a truncate operation
-        */
-       if ((mode & FALLOC_FL_COLLAPSE_RANGE) &&
-           (offset + len >= i_size_read(inode)))
-               return -EINVAL;
-
        if (!file->f_op->fallocate)
                return -EOPNOTSUPP;
 
index 9e363e41dacc8c2ebc007661a598c01e84da45c9..0855f772cd41599d6c1d1091e7da616d32cccf53 100644 (file)
@@ -246,6 +246,12 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
        umode_t mode = 0;
        int not_equiv = 0;
 
+       /*
+        * A null ACL can always be presented as mode bits.
+        */
+       if (!acl)
+               return 0;
+
        FOREACH_ACL_ENTRY(pa, acl, pe) {
                switch (pa->e_tag) {
                        case ACL_USER_OBJ:
index 28cc1acd5439bf8caeb6d93e8bf68804ad738b1b..e9ef59b3abb1e5552cdc2a8880df37b85093d249 100644 (file)
@@ -47,12 +47,13 @@ static int sysfs_kf_seq_show(struct seq_file *sf, void *v)
        ssize_t count;
        char *buf;
 
-       /* acquire buffer and ensure that it's >= PAGE_SIZE */
+       /* acquire buffer and ensure that it's >= PAGE_SIZE and clear */
        count = seq_get_buf(sf, &buf);
        if (count < PAGE_SIZE) {
                seq_commit(sf, -1);
                return 0;
        }
+       memset(buf, 0, PAGE_SIZE);
 
        /*
         * Invoke show().  Control may reach here via seq file lseek even
index a66ad6196f59cca2f61a31c4a966d41b92497ad8..8794423f7efbe2c90408ba9dfa6cbb7e55e6c3d9 100644 (file)
@@ -63,7 +63,8 @@ int __init sysfs_init(void)
 {
        int err;
 
-       sysfs_root = kernfs_create_root(NULL, 0, NULL);
+       sysfs_root = kernfs_create_root(NULL, KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK,
+                                       NULL);
        if (IS_ERR(sysfs_root))
                return PTR_ERR(sysfs_root);
 
index a1266089eca1fc0054065dfc723e697daf5691e6..a81c7b556896115a4afbdea5452523057ccd195f 100644 (file)
@@ -1556,7 +1556,7 @@ static int ubifs_remount_rw(struct ubifs_info *c)
        if (c->space_fixup) {
                err = ubifs_fixup_free_space(c);
                if (err)
-                       return err;
+                       goto out;
        }
 
        err = check_free_space(c);
index 01b6a0102fbdd4d153612740d95c4f00fda691f7..abda1124a70f66fbd1a79b28f8eaf5bf1916da1e 100644 (file)
@@ -213,7 +213,7 @@ xfs_attr_calc_size(
                 * Out of line attribute, cannot double split, but
                 * make room for the attribute value itself.
                 */
-               uint    dblocks = XFS_B_TO_FSB(mp, valuelen);
+               uint    dblocks = xfs_attr3_rmt_blocks(mp, valuelen);
                nblks += dblocks;
                nblks += XFS_NEXTENTADD_SPACE_RES(mp, dblocks, XFS_ATTR_FORK);
        }
@@ -698,11 +698,22 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
 
                trace_xfs_attr_leaf_replace(args);
 
+               /* save the attribute state for later removal*/
                args->op_flags |= XFS_DA_OP_RENAME;     /* an atomic rename */
                args->blkno2 = args->blkno;             /* set 2nd entry info*/
                args->index2 = args->index;
                args->rmtblkno2 = args->rmtblkno;
                args->rmtblkcnt2 = args->rmtblkcnt;
+               args->rmtvaluelen2 = args->rmtvaluelen;
+
+               /*
+                * clear the remote attr state now that it is saved so that the
+                * values reflect the state of the attribute we are about to
+                * add, not the attribute we just found and will remove later.
+                */
+               args->rmtblkno = 0;
+               args->rmtblkcnt = 0;
+               args->rmtvaluelen = 0;
        }
 
        /*
@@ -794,6 +805,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
                args->blkno = args->blkno2;
                args->rmtblkno = args->rmtblkno2;
                args->rmtblkcnt = args->rmtblkcnt2;
+               args->rmtvaluelen = args->rmtvaluelen2;
                if (args->rmtblkno) {
                        error = xfs_attr_rmtval_remove(args);
                        if (error)
@@ -999,13 +1011,22 @@ restart:
 
                trace_xfs_attr_node_replace(args);
 
+               /* save the attribute state for later removal*/
                args->op_flags |= XFS_DA_OP_RENAME;     /* atomic rename op */
                args->blkno2 = args->blkno;             /* set 2nd entry info*/
                args->index2 = args->index;
                args->rmtblkno2 = args->rmtblkno;
                args->rmtblkcnt2 = args->rmtblkcnt;
+               args->rmtvaluelen2 = args->rmtvaluelen;
+
+               /*
+                * clear the remote attr state now that it is saved so that the
+                * values reflect the state of the attribute we are about to
+                * add, not the attribute we just found and will remove later.
+                */
                args->rmtblkno = 0;
                args->rmtblkcnt = 0;
+               args->rmtvaluelen = 0;
        }
 
        retval = xfs_attr3_leaf_add(blk->bp, state->args);
@@ -1133,6 +1154,7 @@ restart:
                args->blkno = args->blkno2;
                args->rmtblkno = args->rmtblkno2;
                args->rmtblkcnt = args->rmtblkcnt2;
+               args->rmtvaluelen = args->rmtvaluelen2;
                if (args->rmtblkno) {
                        error = xfs_attr_rmtval_remove(args);
                        if (error)
index fe9587fab17a6822d9bec1b5862a6a133adf9dc7..511c283459b19441d782114c4f832207860deab1 100644 (file)
@@ -1229,6 +1229,7 @@ xfs_attr3_leaf_add_work(
                name_rmt->valueblk = 0;
                args->rmtblkno = 1;
                args->rmtblkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen);
+               args->rmtvaluelen = args->valuelen;
        }
        xfs_trans_log_buf(args->trans, bp,
             XFS_DA_LOGRANGE(leaf, xfs_attr3_leaf_name(leaf, args->index),
@@ -2167,11 +2168,11 @@ xfs_attr3_leaf_lookup_int(
                        if (!xfs_attr_namesp_match(args->flags, entry->flags))
                                continue;
                        args->index = probe;
-                       args->valuelen = be32_to_cpu(name_rmt->valuelen);
+                       args->rmtvaluelen = be32_to_cpu(name_rmt->valuelen);
                        args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
                        args->rmtblkcnt = xfs_attr3_rmt_blocks(
                                                        args->dp->i_mount,
-                                                       args->valuelen);
+                                                       args->rmtvaluelen);
                        return XFS_ERROR(EEXIST);
                }
        }
@@ -2220,19 +2221,19 @@ xfs_attr3_leaf_getvalue(
                name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index);
                ASSERT(name_rmt->namelen == args->namelen);
                ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0);
-               valuelen = be32_to_cpu(name_rmt->valuelen);
+               args->rmtvaluelen = be32_to_cpu(name_rmt->valuelen);
                args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
                args->rmtblkcnt = xfs_attr3_rmt_blocks(args->dp->i_mount,
-                                                      valuelen);
+                                                      args->rmtvaluelen);
                if (args->flags & ATTR_KERNOVAL) {
-                       args->valuelen = valuelen;
+                       args->valuelen = args->rmtvaluelen;
                        return 0;
                }
-               if (args->valuelen < valuelen) {
-                       args->valuelen = valuelen;
+               if (args->valuelen < args->rmtvaluelen) {
+                       args->valuelen = args->rmtvaluelen;
                        return XFS_ERROR(ERANGE);
                }
-               args->valuelen = valuelen;
+               args->valuelen = args->rmtvaluelen;
        }
        return 0;
 }
@@ -2519,7 +2520,7 @@ xfs_attr3_leaf_clearflag(
                ASSERT((entry->flags & XFS_ATTR_LOCAL) == 0);
                name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index);
                name_rmt->valueblk = cpu_to_be32(args->rmtblkno);
-               name_rmt->valuelen = cpu_to_be32(args->valuelen);
+               name_rmt->valuelen = cpu_to_be32(args->rmtvaluelen);
                xfs_trans_log_buf(args->trans, bp,
                         XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt)));
        }
@@ -2677,7 +2678,7 @@ xfs_attr3_leaf_flipflags(
                ASSERT((entry1->flags & XFS_ATTR_LOCAL) == 0);
                name_rmt = xfs_attr3_leaf_name_remote(leaf1, args->index);
                name_rmt->valueblk = cpu_to_be32(args->rmtblkno);
-               name_rmt->valuelen = cpu_to_be32(args->valuelen);
+               name_rmt->valuelen = cpu_to_be32(args->rmtvaluelen);
                xfs_trans_log_buf(args->trans, bp1,
                         XFS_DA_LOGRANGE(leaf1, name_rmt, sizeof(*name_rmt)));
        }
index 01db96f60cf05abf1f42e20252cdff4de0ebea0d..833fe5d98d806783ca6bb6cf2f4ecb83424bfdca 100644 (file)
@@ -447,6 +447,7 @@ xfs_attr3_leaf_list_int(
                                args.dp = context->dp;
                                args.whichfork = XFS_ATTR_FORK;
                                args.valuelen = valuelen;
+                               args.rmtvaluelen = valuelen;
                                args.value = kmem_alloc(valuelen, KM_SLEEP | KM_NOFS);
                                args.rmtblkno = be32_to_cpu(name_rmt->valueblk);
                                args.rmtblkcnt = xfs_attr3_rmt_blocks(
index 6e37823e2932aeb45d112b55010ab627adb5bde2..d2e6e948cec7be3013b853033b6e1a64b365a682 100644 (file)
@@ -337,7 +337,7 @@ xfs_attr_rmtval_get(
        struct xfs_buf          *bp;
        xfs_dablk_t             lblkno = args->rmtblkno;
        __uint8_t               *dst = args->value;
-       int                     valuelen = args->valuelen;
+       int                     valuelen;
        int                     nmap;
        int                     error;
        int                     blkcnt = args->rmtblkcnt;
@@ -347,7 +347,9 @@ xfs_attr_rmtval_get(
        trace_xfs_attr_rmtval_get(args);
 
        ASSERT(!(args->flags & ATTR_KERNOVAL));
+       ASSERT(args->rmtvaluelen == args->valuelen);
 
+       valuelen = args->rmtvaluelen;
        while (valuelen > 0) {
                nmap = ATTR_RMTVALUE_MAPSIZE;
                error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno,
@@ -415,7 +417,7 @@ xfs_attr_rmtval_set(
         * attributes have headers, we can't just do a straight byte to FSB
         * conversion and have to take the header space into account.
         */
-       blkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen);
+       blkcnt = xfs_attr3_rmt_blocks(mp, args->rmtvaluelen);
        error = xfs_bmap_first_unused(args->trans, args->dp, blkcnt, &lfileoff,
                                                   XFS_ATTR_FORK);
        if (error)
@@ -480,7 +482,7 @@ xfs_attr_rmtval_set(
         */
        lblkno = args->rmtblkno;
        blkcnt = args->rmtblkcnt;
-       valuelen = args->valuelen;
+       valuelen = args->rmtvaluelen;
        while (valuelen > 0) {
                struct xfs_buf  *bp;
                xfs_daddr_t     dblkno;
index 6e95ea79f5d73aa2447fc49eb0e4c7e9bb73a25d..201c6091d26abfa2e38dd17fd960e3efcb69bcd7 100644 (file)
@@ -60,10 +60,12 @@ typedef struct xfs_da_args {
        int             index;          /* index of attr of interest in blk */
        xfs_dablk_t     rmtblkno;       /* remote attr value starting blkno */
        int             rmtblkcnt;      /* remote attr value block count */
+       int             rmtvaluelen;    /* remote attr value length in bytes */
        xfs_dablk_t     blkno2;         /* blkno of 2nd attr leaf of interest */
        int             index2;         /* index of 2nd attr in blk */
        xfs_dablk_t     rmtblkno2;      /* remote attr value starting blkno */
        int             rmtblkcnt2;     /* remote attr value block count */
+       int             rmtvaluelen2;   /* remote attr value length in bytes */
        int             op_flags;       /* operation flags */
        enum xfs_dacmp  cmpresult;      /* name compare result for lookups */
 } xfs_da_args_t;
index 1399e187d425dc7af0f8b5062afaf312fe5a0927..753e467aa1a5991d0175087284ff9cde35591c40 100644 (file)
@@ -237,7 +237,7 @@ xfs_fs_nfs_commit_metadata(
 
        if (!lsn)
                return 0;
-       return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
+       return -_xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
 }
 
 const struct export_operations xfs_export_operations = {
index 82afdcb33183951350df18d3ce05b3aeecdf3e76..830c1c937b8888e7adba5557997d8d30dfc91713 100644 (file)
@@ -155,7 +155,7 @@ xfs_dir_fsync(
 
        if (!lsn)
                return 0;
-       return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
+       return -_xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
 }
 
 STATIC int
@@ -295,7 +295,7 @@ xfs_file_aio_read(
                xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
 
                if (inode->i_mapping->nrpages) {
-                       ret = -filemap_write_and_wait_range(
+                       ret = filemap_write_and_wait_range(
                                                        VFS_I(ip)->i_mapping,
                                                        pos, -1);
                        if (ret) {
@@ -837,11 +837,19 @@ xfs_file_fallocate(
                unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
 
                if (offset & blksize_mask || len & blksize_mask) {
-                       error = -EINVAL;
+                       error = EINVAL;
+                       goto out_unlock;
+               }
+
+               /*
+                * There is no need to overlap collapse range with EOF,
+                * in which case it is effectively a truncate operation
+                */
+               if (offset + len >= i_size_read(inode)) {
+                       error = EINVAL;
                        goto out_unlock;
                }
 
-               ASSERT(offset + len < i_size_read(inode));
                new_size = i_size_read(inode) - len;
 
                error = xfs_collapse_file_space(ip, offset, len);
index ef1ca010f417713358c0d0f3869189121e2f0ff0..36d630319a2784c7fe39f83fc9d66b74ee3add7b 100644 (file)
@@ -72,8 +72,8 @@ xfs_initxattrs(
        int                     error = 0;
 
        for (xattr = xattr_array; xattr->name != NULL; xattr++) {
-               error = xfs_attr_set(ip, xattr->name, xattr->value,
-                                    xattr->value_len, ATTR_SECURE);
+               error = -xfs_attr_set(ip, xattr->name, xattr->value,
+                                     xattr->value_len, ATTR_SECURE);
                if (error < 0)
                        break;
        }
@@ -93,8 +93,8 @@ xfs_init_security(
        struct inode    *dir,
        const struct qstr *qstr)
 {
-       return security_inode_init_security(inode, dir, qstr,
-                                           &xfs_initxattrs, NULL);
+       return -security_inode_init_security(inode, dir, qstr,
+                                            &xfs_initxattrs, NULL);
 }
 
 static void
@@ -124,15 +124,15 @@ xfs_cleanup_inode(
        xfs_dentry_to_name(&teardown, dentry, 0);
 
        xfs_remove(XFS_I(dir), &teardown, XFS_I(inode));
-       iput(inode);
 }
 
 STATIC int
-xfs_vn_mknod(
+xfs_generic_create(
        struct inode    *dir,
        struct dentry   *dentry,
        umode_t         mode,
-       dev_t           rdev)
+       dev_t           rdev,
+       bool            tmpfile)        /* unnamed file */
 {
        struct inode    *inode;
        struct xfs_inode *ip = NULL;
@@ -156,8 +156,12 @@ xfs_vn_mknod(
        if (error)
                return error;
 
-       xfs_dentry_to_name(&name, dentry, mode);
-       error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip);
+       if (!tmpfile) {
+               xfs_dentry_to_name(&name, dentry, mode);
+               error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip);
+       } else {
+               error = xfs_create_tmpfile(XFS_I(dir), dentry, mode, &ip);
+       }
        if (unlikely(error))
                goto out_free_acl;
 
@@ -169,18 +173,22 @@ xfs_vn_mknod(
 
 #ifdef CONFIG_XFS_POSIX_ACL
        if (default_acl) {
-               error = xfs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
+               error = -xfs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
                if (error)
                        goto out_cleanup_inode;
        }
        if (acl) {
-               error = xfs_set_acl(inode, acl, ACL_TYPE_ACCESS);
+               error = -xfs_set_acl(inode, acl, ACL_TYPE_ACCESS);
                if (error)
                        goto out_cleanup_inode;
        }
 #endif
 
-       d_instantiate(dentry, inode);
+       if (tmpfile)
+               d_tmpfile(dentry, inode);
+       else
+               d_instantiate(dentry, inode);
+
  out_free_acl:
        if (default_acl)
                posix_acl_release(default_acl);
@@ -189,10 +197,22 @@ xfs_vn_mknod(
        return -error;
 
  out_cleanup_inode:
-       xfs_cleanup_inode(dir, inode, dentry);
+       if (!tmpfile)
+               xfs_cleanup_inode(dir, inode, dentry);
+       iput(inode);
        goto out_free_acl;
 }
 
+STATIC int
+xfs_vn_mknod(
+       struct inode    *dir,
+       struct dentry   *dentry,
+       umode_t         mode,
+       dev_t           rdev)
+{
+       return xfs_generic_create(dir, dentry, mode, rdev, false);
+}
+
 STATIC int
 xfs_vn_create(
        struct inode    *dir,
@@ -353,6 +373,7 @@ xfs_vn_symlink(
 
  out_cleanup_inode:
        xfs_cleanup_inode(dir, inode, dentry);
+       iput(inode);
  out:
        return -error;
 }
@@ -1053,25 +1074,7 @@ xfs_vn_tmpfile(
        struct dentry   *dentry,
        umode_t         mode)
 {
-       int                     error;
-       struct xfs_inode        *ip;
-       struct inode            *inode;
-
-       error = xfs_create_tmpfile(XFS_I(dir), dentry, mode, &ip);
-       if (unlikely(error))
-               return -error;
-
-       inode = VFS_I(ip);
-
-       error = xfs_init_security(inode, dir, &dentry->d_name);
-       if (unlikely(error)) {
-               iput(inode);
-               return -error;
-       }
-
-       d_tmpfile(dentry, inode);
-
-       return 0;
+       return xfs_generic_create(dir, dentry, mode, 0, true);
 }
 
 static const struct inode_operations xfs_inode_operations = {
index 08624dc67317185b1e044fd7d67ef4573d941ef6..a5f8bd9899d37a811af38a616da44f66d73d399f 100644 (file)
@@ -616,11 +616,13 @@ xfs_log_mount(
        int             error = 0;
        int             min_logfsbs;
 
-       if (!(mp->m_flags & XFS_MOUNT_NORECOVERY))
-               xfs_notice(mp, "Mounting Filesystem");
-       else {
+       if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) {
+               xfs_notice(mp, "Mounting V%d Filesystem",
+                          XFS_SB_VERSION_NUM(&mp->m_sb));
+       } else {
                xfs_notice(mp,
-"Mounting filesystem in no-recovery mode.  Filesystem will be inconsistent.");
+"Mounting V%d filesystem in no-recovery mode. Filesystem will be inconsistent.",
+                          XFS_SB_VERSION_NUM(&mp->m_sb));
                ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
        }
 
index 993cb19e7d390e03220f265e9ba97e0f0c8dc7fa..944f3d9456a8b4f6f0fe44721fe98f9f52c3bd69 100644 (file)
@@ -743,8 +743,6 @@ xfs_mountfs(
                new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE;
                if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size))
                        mp->m_inode_cluster_size = new_size;
-               xfs_info(mp, "Using inode cluster size of %d bytes",
-                        mp->m_inode_cluster_size);
        }
 
        /*
index 348e4d2ed6e6e9621b82c535212e32093b51da0c..dc977b6e6a365a4222fb79b212f5c2b23b46b84c 100644 (file)
@@ -843,22 +843,17 @@ xfs_qm_init_quotainfo(
 
        qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
 
-       if ((error = list_lru_init(&qinf->qi_lru))) {
-               kmem_free(qinf);
-               mp->m_quotainfo = NULL;
-               return error;
-       }
+       error = -list_lru_init(&qinf->qi_lru);
+       if (error)
+               goto out_free_qinf;
 
        /*
         * See if quotainodes are setup, and if not, allocate them,
         * and change the superblock accordingly.
         */
-       if ((error = xfs_qm_init_quotainos(mp))) {
-               list_lru_destroy(&qinf->qi_lru);
-               kmem_free(qinf);
-               mp->m_quotainfo = NULL;
-               return error;
-       }
+       error = xfs_qm_init_quotainos(mp);
+       if (error)
+               goto out_free_lru;
 
        INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
        INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
@@ -918,7 +913,7 @@ xfs_qm_init_quotainfo(
                qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
                qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
                qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
+
                xfs_qm_dqdestroy(dqp);
        } else {
                qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
@@ -935,6 +930,13 @@ xfs_qm_init_quotainfo(
        qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
        register_shrinker(&qinf->qi_shrinker);
        return 0;
+
+out_free_lru:
+       list_lru_destroy(&qinf->qi_lru);
+out_free_qinf:
+       kmem_free(qinf);
+       mp->m_quotainfo = NULL;
+       return error;
 }
 
 
index 0c0e41bbe4e369d7bf5267ed906e59159a4e2ff7..8baf61afae1ddf132757caad16c359ec447dcc39 100644 (file)
@@ -201,10 +201,6 @@ xfs_mount_validate_sb(
         * write validation, we don't need to check feature masks.
         */
        if (check_version && XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) {
-               xfs_alert(mp,
-"Version 5 superblock detected. This kernel has EXPERIMENTAL support enabled!\n"
-"Use of these features in this kernel is at your own risk!");
-
                if (xfs_sb_has_compat_feature(sbp,
                                        XFS_SB_FEAT_COMPAT_UNKNOWN)) {
                        xfs_warn(mp,
index 2053767763773b60d7b7f8aca31f3b31a9219785..3494eff8e4ebfbce7256fd8d95769e2a1799619c 100644 (file)
@@ -1433,11 +1433,11 @@ xfs_fs_fill_super(
        if (error)
                goto out_free_fsname;
 
-       error = xfs_init_mount_workqueues(mp);
+       error = -xfs_init_mount_workqueues(mp);
        if (error)
                goto out_close_devices;
 
-       error = xfs_icsb_init_counters(mp);
+       error = -xfs_icsb_init_counters(mp);
        if (error)
                goto out_destroy_workqueues;
 
index 5a64ca4621f3f650e3c6718137a8ea549198d171..f23174fb9ec4340378df59b5cc89b43ecf342bec 100644 (file)
@@ -93,5 +93,8 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
 #define set_fixmap_io(idx, phys) \
        __set_fixmap(idx, phys, FIXMAP_PAGE_IO)
 
+#define set_fixmap_offset_io(idx, phys) \
+       __set_fixmap_offset(idx, phys, FIXMAP_PAGE_IO)
+
 #endif /* __ASSEMBLY__ */
 #endif /* __ASM_GENERIC_FIXMAP_H */
index b4ea8f50fc65ed409335bf37b3088dfde626d4a2..5e752b9590548448b42080151169cc7e38a1245c 100644 (file)
@@ -12,7 +12,7 @@
        [RLIMIT_CPU]            = {  RLIM_INFINITY,  RLIM_INFINITY },   \
        [RLIMIT_FSIZE]          = {  RLIM_INFINITY,  RLIM_INFINITY },   \
        [RLIMIT_DATA]           = {  RLIM_INFINITY,  RLIM_INFINITY },   \
-       [RLIMIT_STACK]          = {       _STK_LIM,   _STK_LIM_MAX },   \
+       [RLIMIT_STACK]          = {       _STK_LIM,  RLIM_INFINITY },   \
        [RLIMIT_CORE]           = {              0,  RLIM_INFINITY },   \
        [RLIMIT_RSS]            = {  RLIM_INFINITY,  RLIM_INFINITY },   \
        [RLIMIT_NPROC]          = {              0,              0 },   \
index d3909effd7256ee1334910f72ab57becccff93f9..94f9ea8abcae35af8ca36560403fbd25facb7c65 100644 (file)
@@ -50,11 +50,7 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct
 }
 
 #ifndef zero_bytemask
-#ifdef CONFIG_64BIT
-#define zero_bytemask(mask)    (~0ul << fls64(mask))
-#else
-#define zero_bytemask(mask)    (~0ul << fls(mask))
-#endif /* CONFIG_64BIT */
-#endif /* zero_bytemask */
+#define zero_bytemask(mask) (~1ul << __fls(mask))
+#endif
 
 #endif /* _ASM_WORD_AT_A_TIME_H */
index 49376aec2fbb8a9e054b9605fd9de36e4cd39c5f..6dfd64b3a6042d34176e85f25e73ed477a7c600e 100644 (file)
        {0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9851, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9852, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9853, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9854, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9856, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9857, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x985A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x985B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x985C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
index 940ece4934bab0e0c17f491b28da059454967621..012d58fa8ff0ebb69812e2b863e6dee6a2ae6c01 100644 (file)
        INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \
        INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \
        INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \
-       INTEL_VGA_DEVICE(0x0A0E, info), /* ULT GT1 reserved */ \
-       INTEL_VGA_DEVICE(0x0A1E, info), /* ULT GT2 reserved */ \
+       INTEL_VGA_DEVICE(0x0A0E, info), /* ULX GT1 mobile */ \
+       INTEL_VGA_DEVICE(0x0A1E, info), /* ULX GT2 mobile */ \
        INTEL_VGA_DEVICE(0x0A2E, info), /* ULT GT3 reserved */ \
        INTEL_VGA_DEVICE(0x0D06, info), /* CRW GT1 mobile */ \
        INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \
diff --git a/include/dt-bindings/clk/at91.h b/include/dt-bindings/clk/at91.h
deleted file mode 100644 (file)
index 0b4cb99..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * This header provides constants for AT91 pmc status.
- *
- * The constants defined in this header are being used in dts.
- *
- * Licensed under GPLv2 or later.
- */
-
-#ifndef _DT_BINDINGS_CLK_AT91_H
-#define _DT_BINDINGS_CLK_AT91_H
-
-#define AT91_PMC_MOSCS         0               /* MOSCS Flag */
-#define AT91_PMC_LOCKA         1               /* PLLA Lock */
-#define AT91_PMC_LOCKB         2               /* PLLB Lock */
-#define AT91_PMC_MCKRDY                3               /* Master Clock */
-#define AT91_PMC_LOCKU         6               /* UPLL Lock */
-#define AT91_PMC_PCKRDY(id)    (8 + (id))      /* Programmable Clock */
-#define AT91_PMC_MOSCSELS      16              /* Main Oscillator Selection */
-#define AT91_PMC_MOSCRCS       17              /* Main On-Chip RC */
-#define AT91_PMC_CFDEV         18              /* Clock Failure Detector Event */
-
-#endif
diff --git a/include/dt-bindings/clock/at91.h b/include/dt-bindings/clock/at91.h
new file mode 100644 (file)
index 0000000..0b4cb99
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * This header provides constants for AT91 pmc status.
+ *
+ * The constants defined in this header are being used in dts.
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+#ifndef _DT_BINDINGS_CLK_AT91_H
+#define _DT_BINDINGS_CLK_AT91_H
+
+#define AT91_PMC_MOSCS         0               /* MOSCS Flag */
+#define AT91_PMC_LOCKA         1               /* PLLA Lock */
+#define AT91_PMC_LOCKB         2               /* PLLB Lock */
+#define AT91_PMC_MCKRDY                3               /* Master Clock */
+#define AT91_PMC_LOCKU         6               /* UPLL Lock */
+#define AT91_PMC_PCKRDY(id)    (8 + (id))      /* Programmable Clock */
+#define AT91_PMC_MOSCSELS      16              /* Main Oscillator Selection */
+#define AT91_PMC_MOSCRCS       17              /* Main On-Chip RC */
+#define AT91_PMC_CFDEV         18              /* Clock Failure Detector Event */
+
+#endif
index 8c1603b10665d141a2b0cb67e2643c0db7452146..433528ab51611ad684b546f6d9fccaf15de970ca 100644 (file)
@@ -29,7 +29,7 @@
 /* 10 (register bit affects spdif_in and spdif_out) */
 #define TEGRA124_CLK_I2S1 11
 #define TEGRA124_CLK_I2C1 12
-#define TEGRA124_CLK_NDFLASH 13
+/* 13 */
 #define TEGRA124_CLK_SDMMC1 14
 #define TEGRA124_CLK_SDMMC4 15
 /* 16 */
@@ -83,7 +83,7 @@
 
 /* 64 */
 #define TEGRA124_CLK_UARTD 65
-#define TEGRA124_CLK_UARTE 66
+/* 66 */
 #define TEGRA124_CLK_I2C3 67
 #define TEGRA124_CLK_SBC4 68
 #define TEGRA124_CLK_SDMMC3 69
@@ -97,7 +97,7 @@
 #define TEGRA124_CLK_TRACE 77
 #define TEGRA124_CLK_SOC_THERM 78
 #define TEGRA124_CLK_DTV 79
-#define TEGRA124_CLK_NDSPEED 80
+/* 80 */
 #define TEGRA124_CLK_I2CSLOW 81
 #define TEGRA124_CLK_DSIB 82
 #define TEGRA124_CLK_TSEC 83
index 8598f8eacb20e175aacb638edd41e514afc7f9d8..a495a959e8a754939b9c8c9d9bd748a73f54f587 100644 (file)
@@ -36,6 +36,8 @@ struct ath9k_platform_data {
 
        int (*get_mac_revision)(void);
        int (*external_reset)(void);
+
+       bool use_eeprom;
 };
 
 #endif /* _LINUX_ATH9K_PLATFORM_H */
index 78c6c52073ad62948a7d750951ed94d232957048..a0875001b13c84ad70a9b2909654e9ffb6824c58 100644 (file)
@@ -10,8 +10,8 @@
  *
  */
 
-#ifndef CAN_CORE_H
-#define CAN_CORE_H
+#ifndef _CAN_CORE_H
+#define _CAN_CORE_H
 
 #include <linux/can.h>
 #include <linux/skbuff.h>
@@ -58,4 +58,4 @@ extern void can_rx_unregister(struct net_device *dev, canid_t can_id,
 extern int can_send(struct sk_buff *skb, int loop);
 extern int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 
-#endif /* CAN_CORE_H */
+#endif /* !_CAN_CORE_H */
index 3ce5e526525f852f37ea242700363036c85068ca..6992afc6ba7f96fda9dba202f771fe7e28f807fe 100644 (file)
@@ -10,8 +10,8 @@
  *
  */
 
-#ifndef CAN_DEV_H
-#define CAN_DEV_H
+#ifndef _CAN_DEV_H
+#define _CAN_DEV_H
 
 #include <linux/can.h>
 #include <linux/can/netlink.h>
@@ -132,4 +132,4 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
 struct sk_buff *alloc_can_err_skb(struct net_device *dev,
                                  struct can_frame **cf);
 
-#endif /* CAN_DEV_H */
+#endif /* !_CAN_DEV_H */
index 9c1167baf273e7e62cb29dbfb89266e11f1c29d6..e0475c5cbb92aac6fe1163bc2ac65a4664048152 100644 (file)
@@ -6,8 +6,8 @@
  * published by the Free Software Foundation.
  */
 
-#ifndef CAN_LED_H
-#define CAN_LED_H
+#ifndef _CAN_LED_H
+#define _CAN_LED_H
 
 #include <linux/if.h>
 #include <linux/leds.h>
@@ -48,4 +48,4 @@ static inline void can_led_notifier_exit(void)
 
 #endif
 
-#endif
+#endif /* !_CAN_LED_H */
index 7702641f87ee032b76d2cfa68fdd5f75aec4ec2b..78b2d44f04cffc83f7ce7feb4cd08c94cea84ce8 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef _CAN_PLATFORM_CC770_H_
-#define _CAN_PLATFORM_CC770_H_
+#ifndef _CAN_PLATFORM_CC770_H
+#define _CAN_PLATFORM_CC770_H
 
 /* CPU Interface Register (0x02) */
 #define CPUIF_CEN      0x01    /* Clock Out Enable */
@@ -30,4 +30,4 @@ struct cc770_platform_data {
        u8 bcr;         /* Bus Configuration Register */
 };
 
-#endif /* !_CAN_PLATFORM_CC770_H_ */
+#endif /* !_CAN_PLATFORM_CC770_H */
index dc029dba7a030d384d9389038b6f7d600eee3dbf..d44fcae274ff2a0877c06091bfaafe68e0cc8d6d 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef __CAN_PLATFORM_MCP251X_H__
-#define __CAN_PLATFORM_MCP251X_H__
+#ifndef _CAN_PLATFORM_MCP251X_H
+#define _CAN_PLATFORM_MCP251X_H
 
 /*
  *
@@ -18,4 +18,4 @@ struct mcp251x_platform_data {
        unsigned long oscillator_frequency;
 };
 
-#endif /* __CAN_PLATFORM_MCP251X_H__ */
+#endif /* !_CAN_PLATFORM_MCP251X_H */
diff --git a/include/linux/can/platform/rcar_can.h b/include/linux/can/platform/rcar_can.h
new file mode 100644 (file)
index 0000000..0f4a2f3
--- /dev/null
@@ -0,0 +1,17 @@
+#ifndef _CAN_PLATFORM_RCAR_CAN_H_
+#define _CAN_PLATFORM_RCAR_CAN_H_
+
+#include <linux/types.h>
+
+/* Clock Select Register settings */
+enum CLKR {
+       CLKR_CLKP1 = 0, /* Peripheral clock (clkp1) */
+       CLKR_CLKP2 = 1, /* Peripheral clock (clkp2) */
+       CLKR_CLKEXT = 3 /* Externally input clock */
+};
+
+struct rcar_can_platform_data {
+       enum CLKR clock_select; /* Clock source select */
+};
+
+#endif /* !_CAN_PLATFORM_RCAR_CAN_H_ */
index 96f8fcc78d787a3b826967b77328ea6f4e1dc8ef..93570b61ec6c58bfa433e6b4f710fb9a4e466121 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef _CAN_PLATFORM_SJA1000_H_
-#define _CAN_PLATFORM_SJA1000_H_
+#ifndef _CAN_PLATFORM_SJA1000_H
+#define _CAN_PLATFORM_SJA1000_H
 
 /* clock divider register */
 #define CDR_CLKOUT_MASK 0x07
@@ -32,4 +32,4 @@ struct sja1000_platform_data {
        u8 cdr;         /* clock divider register */
 };
 
-#endif /* !_CAN_PLATFORM_SJA1000_H_ */
+#endif /* !_CAN_PLATFORM_SJA1000_H */
index af17cb3f7a8402bdd3816911abb9f98ebf60632f..a52f47ca6c8ad9c5159c34b4f568b5d84eccbd18 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef __CAN_PLATFORM_TI_HECC_H__
-#define __CAN_PLATFORM_TI_HECC_H__
+#ifndef _CAN_PLATFORM_TI_HECC_H
+#define _CAN_PLATFORM_TI_HECC_H
 
 /*
  * TI HECC (High End CAN Controller) driver platform header
@@ -41,4 +41,4 @@ struct ti_hecc_platform_data {
        u32 version;
        void (*transceiver_switch) (int);
 };
-#endif
+#endif /* !_CAN_PLATFORM_TI_HECC_H */
index f9bbbb472663af08aef78ac152e8293f36736ea4..cc00d15c6107be8893b024e3eabb2ef1ba243016 100644 (file)
@@ -7,8 +7,8 @@
  *
  */
 
-#ifndef CAN_SKB_H
-#define CAN_SKB_H
+#ifndef _CAN_SKB_H
+#define _CAN_SKB_H
 
 #include <linux/types.h>
 #include <linux/skbuff.h>
@@ -80,4 +80,4 @@ static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb)
        return skb;
 }
 
-#endif /* CAN_SKB_H */
+#endif /* !_CAN_SKB_H */
index c2515851c1aa2038c5298c8ab3288d469cd3bb5b..d60904b9e50532410af2956c3546fa5072cae08e 100644 (file)
@@ -473,6 +473,7 @@ struct cftype {
 };
 
 extern struct cgroup_root cgrp_dfl_root;
+extern struct css_set init_css_set;
 
 static inline bool cgroup_on_dfl(const struct cgroup *cgrp)
 {
@@ -700,6 +701,20 @@ static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
        return task_css_check(task, subsys_id, false);
 }
 
+/**
+ * task_css_is_root - test whether a task belongs to the root css
+ * @task: the target task
+ * @subsys_id: the target subsystem ID
+ *
+ * Test whether @task belongs to the root css on the specified subsystem.
+ * May be invoked in any context.
+ */
+static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
+{
+       return task_css_check(task, subsys_id, true) ==
+               init_css_set.subsys[subsys_id];
+}
+
 static inline struct cgroup *task_cgroup(struct task_struct *task,
                                         int subsys_id)
 {
index 1786e772d5c6e9a2634eb13d916d8d8bd6c25fcb..d590765106f3d226449ccf5228181baef53c4c17 100644 (file)
@@ -2,13 +2,13 @@
 #define _LINUX_CRC7_H
 #include <linux/types.h>
 
-extern const u8 crc7_syndrome_table[256];
+extern const u8 crc7_be_syndrome_table[256];
 
-static inline u8 crc7_byte(u8 crc, u8 data)
+static inline u8 crc7_be_byte(u8 crc, u8 data)
 {
-       return crc7_syndrome_table[(crc << 1) ^ data];
+       return crc7_be_syndrome_table[crc ^ data];
 }
 
-extern u8 crc7(u8 crc, const u8 *buffer, size_t len);
+extern u8 crc7_be(u8 crc, const u8 *buffer, size_t len);
 
 #endif
index 3b9bfdb83ba6f698cbe1ddf8409f8b6e0dc17427..3c7ec327ebd2eecc2fb63ac4f742ca0d75c56ec8 100644 (file)
@@ -221,6 +221,8 @@ struct dentry_operations {
 #define DCACHE_SYMLINK_TYPE            0x00300000 /* Symlink */
 #define DCACHE_FILE_TYPE               0x00400000 /* Other file type */
 
+#define DCACHE_MAY_FREE                        0x00800000
+
 extern seqlock_t rename_lock;
 
 static inline int dname_external(const struct dentry *dentry)
index 759abf78dd61945422e20792f50cab73dec7c622..625f4de9bdf2d841b4f71579672469e0ebfdb5a8 100644 (file)
 #define BPF_CALL       0x80    /* function call */
 #define BPF_EXIT       0x90    /* function return */
 
+/* Register numbers */
+enum {
+       BPF_REG_0 = 0,
+       BPF_REG_1,
+       BPF_REG_2,
+       BPF_REG_3,
+       BPF_REG_4,
+       BPF_REG_5,
+       BPF_REG_6,
+       BPF_REG_7,
+       BPF_REG_8,
+       BPF_REG_9,
+       BPF_REG_10,
+       __MAX_BPF_REG,
+};
+
 /* BPF has 10 general purpose 64-bit registers and stack frame. */
-#define MAX_BPF_REG    11
+#define MAX_BPF_REG    __MAX_BPF_REG
+
+/* ArgX, context and stack frame pointer register positions. Note,
+ * Arg1, Arg2, Arg3, etc are used as argument mappings of function
+ * calls in BPF_CALL instruction.
+ */
+#define BPF_REG_ARG1   BPF_REG_1
+#define BPF_REG_ARG2   BPF_REG_2
+#define BPF_REG_ARG3   BPF_REG_3
+#define BPF_REG_ARG4   BPF_REG_4
+#define BPF_REG_ARG5   BPF_REG_5
+#define BPF_REG_CTX    BPF_REG_6
+#define BPF_REG_FP     BPF_REG_10
+
+/* Additional register mappings for converted user programs. */
+#define BPF_REG_A      BPF_REG_0
+#define BPF_REG_X      BPF_REG_7
+#define BPF_REG_TMP    BPF_REG_8
 
 /* BPF program can access up to 512 bytes of stack space. */
 #define MAX_BPF_STACK  512
 
-/* Arg1, context and stack frame pointer register positions. */
-#define ARG1_REG       1
-#define CTX_REG                6
-#define FP_REG         10
+/* bpf_add|sub|...: a += x, bpf_mov: a = x */
+#define BPF_ALU64_REG(op, a, x) \
+       ((struct sock_filter_int) {BPF_ALU64|BPF_OP(op)|BPF_X, a, x, 0, 0})
+#define BPF_ALU32_REG(op, a, x) \
+       ((struct sock_filter_int) {BPF_ALU|BPF_OP(op)|BPF_X, a, x, 0, 0})
+
+/* bpf_add|sub|...: a += imm, bpf_mov: a = imm */
+#define BPF_ALU64_IMM(op, a, imm) \
+       ((struct sock_filter_int) {BPF_ALU64|BPF_OP(op)|BPF_K, a, 0, 0, imm})
+#define BPF_ALU32_IMM(op, a, imm) \
+       ((struct sock_filter_int) {BPF_ALU|BPF_OP(op)|BPF_K, a, 0, 0, imm})
+
+/* R0 = *(uint *) (skb->data + off) */
+#define BPF_LD_ABS(size, off) \
+       ((struct sock_filter_int) {BPF_LD|BPF_SIZE(size)|BPF_ABS, 0, 0, 0, off})
+
+/* R0 = *(uint *) (skb->data + x + off) */
+#define BPF_LD_IND(size, x, off) \
+       ((struct sock_filter_int) {BPF_LD|BPF_SIZE(size)|BPF_IND, 0, x, 0, off})
+
+/* a = *(uint *) (x + off) */
+#define BPF_LDX_MEM(sz, a, x, off) \
+       ((struct sock_filter_int) {BPF_LDX|BPF_SIZE(sz)|BPF_MEM, a, x, off, 0})
+
+/* if (a 'op' x) goto pc+off */
+#define BPF_JMP_REG(op, a, x, off) \
+       ((struct sock_filter_int) {BPF_JMP|BPF_OP(op)|BPF_X, a, x, off, 0})
+
+/* if (a 'op' imm) goto pc+off */
+#define BPF_JMP_IMM(op, a, imm, off) \
+       ((struct sock_filter_int) {BPF_JMP|BPF_OP(op)|BPF_K, a, 0, off, imm})
+
+#define BPF_EXIT_INSN() \
+       ((struct sock_filter_int) {BPF_JMP|BPF_EXIT, 0, 0, 0, 0})
+
+static inline int size_to_bpf(int size)
+{
+       switch (size) {
+       case 1:
+               return BPF_B;
+       case 2:
+               return BPF_H;
+       case 4:
+               return BPF_W;
+       case 8:
+               return BPF_DW;
+       default:
+               return -EINVAL;
+       }
+}
+
+/* Macro to invoke filter function. */
+#define SK_RUN_FILTER(filter, ctx)  (*filter->bpf_func)(ctx, filter->insnsi)
 
 struct sock_filter_int {
        __u8    code;           /* opcode */
@@ -97,21 +179,16 @@ static inline unsigned int sk_filter_size(unsigned int proglen)
 #define sk_filter_proglen(fprog)                       \
                (fprog->len * sizeof(fprog->filter[0]))
 
-#define SK_RUN_FILTER(filter, ctx)                     \
-               (*filter->bpf_func)(ctx, filter->insnsi)
-
 int sk_filter(struct sock *sk, struct sk_buff *skb);
 
-u32 sk_run_filter_int_seccomp(const struct seccomp_data *ctx,
-                             const struct sock_filter_int *insni);
-u32 sk_run_filter_int_skb(const struct sk_buff *ctx,
-                         const struct sock_filter_int *insni);
+void sk_filter_select_runtime(struct sk_filter *fp);
+void sk_filter_free(struct sk_filter *fp);
 
 int sk_convert_filter(struct sock_filter *prog, int len,
                      struct sock_filter_int *new_prog, int *new_len);
 
 int sk_unattached_filter_create(struct sk_filter **pfp,
-                               struct sock_fprog *fprog);
+                               struct sock_fprog_kern *fprog);
 void sk_unattached_filter_destroy(struct sk_filter *fp);
 
 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
@@ -125,6 +202,9 @@ void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to);
 void sk_filter_charge(struct sock *sk, struct sk_filter *fp);
 void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
 
+u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+void bpf_int_jit_compile(struct sk_filter *fp);
+
 #ifdef CONFIG_BPF_JIT
 #include <stdarg.h>
 #include <linux/linkage.h>
index 7a9c5bca2b7694f5496dbcf793eea2920fd37af9..878031227c57a0b41be7cde3070ee90f1921b570 100644 (file)
@@ -815,7 +815,7 @@ static inline struct file *get_file(struct file *f)
 #define FL_SLEEP       128     /* A blocking lock */
 #define FL_DOWNGRADE_PENDING   256 /* Lease is being downgraded */
 #define FL_UNLOCK_PENDING      512 /* Lease is being broken */
-#define FL_FILE_PVT    1024    /* lock is private to the file */
+#define FL_OFDLCK      1024    /* lock is "owned" by struct file */
 
 /*
  * Special return value from posix_lock_file() and vfs_lock_file() for
index 9212b017bc7236cfc63afe5c268cc741995a1af4..ae9504b4b67d3026cd9c1c1fcd30a8cfc928c984 100644 (file)
@@ -535,6 +535,7 @@ static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_a
 extern int ftrace_arch_read_dyn_info(char *buf, int size);
 
 extern int skip_trace(unsigned long ip);
+extern void ftrace_module_init(struct module *mod);
 
 extern void ftrace_disable_daemon(void);
 extern void ftrace_enable_daemon(void);
@@ -544,6 +545,7 @@ static inline int ftrace_force_update(void) { return 0; }
 static inline void ftrace_disable_daemon(void) { }
 static inline void ftrace_enable_daemon(void) { }
 static inline void ftrace_release_mod(struct module *mod) {}
+static inline void ftrace_module_init(struct module *mod) {}
 static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
 {
        return -EINVAL;
index 5b337cf8fb8655755fd00360df587745e8cc34f6..b65166de1d9d1f754f787aa1f58444e9d3da4ff0 100644 (file)
@@ -412,6 +412,16 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
        return &mm->page_table_lock;
 }
 
+static inline bool hugepages_supported(void)
+{
+       /*
+        * Some platform decide whether they support huge pages at boot
+        * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
+        * there is no such support
+        */
+       return HPAGE_SHIFT != 0;
+}
+
 #else  /* CONFIG_HUGETLB_PAGE */
 struct hstate {};
 #define alloc_huge_page_node(h, nid) NULL
index a86784dec3d34fcafb7e5d1b377a9d73b2d7717e..119130e9298b21ef0aa2a75e90a7c756318492a7 100644 (file)
@@ -10,8 +10,9 @@ struct ifla_vf_info {
        __u8 mac[32];
        __u32 vlan;
        __u32 qos;
-       __u32 tx_rate;
        __u32 spoofchk;
        __u32 linkstate;
+       __u32 min_tx_rate;
+       __u32 max_tx_rate;
 };
 #endif /* _LINUX_IF_LINK_H */
index 7c8b20b120eac680f27e3cd1e99630b1fe521799..a9a53b12397b0c4fd29a11cbf5f9cbbd18944fd8 100644 (file)
@@ -56,6 +56,7 @@ struct macvlan_dev {
        int                     numqueues;
        netdev_features_t       tap_features;
        int                     minor;
+       int                     nest_level;
 };
 
 static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
index 13bbbde00e68de454c8cf30f0581796d55eb0a40..4967916fe4ac8c6732930d768a2e60fe1bec35c7 100644 (file)
@@ -73,7 +73,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
 /* found in socket.c */
 extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
 
-static inline int is_vlan_dev(struct net_device *dev)
+static inline bool is_vlan_dev(struct net_device *dev)
 {
         return dev->priv_flags & IFF_802_1Q_VLAN;
 }
@@ -106,7 +106,7 @@ struct vlan_pcpu_stats {
 
 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
 
-extern struct net_device *__vlan_find_dev_deep(struct net_device *real_dev,
+extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev,
                                               __be16 vlan_proto, u16 vlan_id);
 extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
 extern u16 vlan_dev_vlan_id(const struct net_device *dev);
@@ -159,6 +159,7 @@ struct vlan_dev_priv {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        struct netpoll                          *netpoll;
 #endif
+       unsigned int                            nest_level;
 };
 
 static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
@@ -197,9 +198,15 @@ extern void vlan_vids_del_by_dev(struct net_device *dev,
                                 const struct net_device *by_dev);
 
 extern bool vlan_uses_dev(const struct net_device *dev);
+
+static inline int vlan_get_encap_level(struct net_device *dev)
+{
+       BUG_ON(!is_vlan_dev(dev));
+       return vlan_dev_priv(dev)->nest_level;
+}
 #else
 static inline struct net_device *
-__vlan_find_dev_deep(struct net_device *real_dev,
+__vlan_find_dev_deep_rcu(struct net_device *real_dev,
                     __be16 vlan_proto, u16 vlan_id)
 {
        return NULL;
@@ -263,6 +270,11 @@ static inline bool vlan_uses_dev(const struct net_device *dev)
 {
        return false;
 }
+static inline int vlan_get_encap_level(struct net_device *dev)
+{
+       BUG();
+       return 0;
+}
 #endif
 
 static inline bool vlan_hw_offload_capable(netdev_features_t features,
@@ -483,4 +495,5 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
                 */
                skb->protocol = htons(ETH_P_802_2);
 }
+
 #endif /* !(_LINUX_IF_VLAN_H_) */
index c7bfac1c4a7b8f6c82742b4d9f97c058131ae4fc..051c85032f481ae8fa1fb5ca6542df83588a9153 100644 (file)
@@ -203,7 +203,40 @@ static inline int check_wakeup_irqs(void) { return 0; }
 
 extern cpumask_var_t irq_default_affinity;
 
-extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
+/* Internal implementation. Use the helpers below */
+extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
+                             bool force);
+
+/**
+ * irq_set_affinity - Set the irq affinity of a given irq
+ * @irq:       Interrupt to set affinity
+ * @cpumask:   cpumask
+ *
+ * Fails if cpumask does not contain an online CPU
+ */
+static inline int
+irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
+{
+       return __irq_set_affinity(irq, cpumask, false);
+}
+
+/**
+ * irq_force_affinity - Force the irq affinity of a given irq
+ * @irq:       Interrupt to set affinity
+ * @cpumask:   cpumask
+ *
+ * Same as irq_set_affinity, but without checking the mask against
+ * online cpus.
+ *
+ * Solely for low level cpu hotplug code, where we need to make per
+ * cpu interrupts affine before the cpu becomes online.
+ */
+static inline int
+irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
+{
+       return __irq_set_affinity(irq, cpumask, true);
+}
+
 extern int irq_can_set_affinity(unsigned int irq);
 extern int irq_select_affinity(unsigned int irq);
 
@@ -239,6 +272,11 @@ static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
        return -EINVAL;
 }
 
+static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
+{
+       return 0;
+}
+
 static inline int irq_can_set_affinity(unsigned int irq)
 {
        return 0;
index d278838908cbc3f1cdac08ae3f3714934f01f188..5c57efb863d08e5937a36e06778efa8047060156 100644 (file)
@@ -394,7 +394,8 @@ extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
 
 extern void irq_cpu_online(void);
 extern void irq_cpu_offline(void);
-extern int __irq_set_affinity_locked(struct irq_data *data,  const struct cpumask *cpumask);
+extern int irq_set_affinity_locked(struct irq_data *data,
+                                  const struct cpumask *cpumask, bool force);
 
 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
 void irq_move_irq(struct irq_data *data);
@@ -602,6 +603,8 @@ static inline u32 irq_get_trigger_type(unsigned int irq)
        return d ? irqd_get_trigger_type(d) : 0;
 }
 
+unsigned int arch_dynirq_lower_bound(unsigned int from);
+
 int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
                struct module *owner);
 
index b0122dc6f96a0a21324f86b5a28c725ac3ea74cf..ca1be5c9136c4557de511143b4c9b36e1eec3734 100644 (file)
@@ -50,7 +50,24 @@ enum kernfs_node_flag {
 
 /* @flags for kernfs_create_root() */
 enum kernfs_root_flag {
-       KERNFS_ROOT_CREATE_DEACTIVATED = 0x0001,
+       /*
+        * kernfs_nodes are created in the deactivated state and invisible.
+        * They require explicit kernfs_activate() to become visible.  This
+        * can be used to make related nodes become visible atomically
+        * after all nodes are created successfully.
+        */
+       KERNFS_ROOT_CREATE_DEACTIVATED          = 0x0001,
+
+       /*
+        * For regular flies, if the opener has CAP_DAC_OVERRIDE, open(2)
+        * succeeds regardless of the RW permissions.  sysfs had an extra
+        * layer of enforcement where open(2) fails with -EACCES regardless
+        * of CAP_DAC_OVERRIDE if the permission doesn't have the
+        * respective read or write access at all (none of S_IRUGO or
+        * S_IWUGO) or the respective operation isn't implemented.  The
+        * following flag enables that behavior.
+        */
+       KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK       = 0x0002,
 };
 
 /* type-specific structures for kernfs_node union members */
index 1de36be64df4d1516a2a451901733d36fd32ec4e..5ab4e3a76721760e4d5a70f88de282a687858990 100644 (file)
@@ -822,6 +822,7 @@ struct ata_port {
        unsigned long           qc_allocated;
        unsigned int            qc_active;
        int                     nr_active_links; /* #links with active qcs */
+       unsigned int            last_tag;       /* track next tag hw expects */
 
        struct ata_link         link;           /* host default link */
        struct ata_link         *slave_link;    /* see ata_slave_link_init() */
index 34a513a2727bbe83adff047613a1ad3458684ac2..a6a42dd024661324dbeed5b9cfaa028744bae154 100644 (file)
@@ -12,9 +12,9 @@
 #endif
 
 #ifdef __cplusplus
-#define CPP_ASMLINKAGE extern "C" __visible
+#define CPP_ASMLINKAGE extern "C"
 #else
-#define CPP_ASMLINKAGE __visible
+#define CPP_ASMLINKAGE
 #endif
 
 #ifndef asmlinkage
index 7c36cc55d2c79b8fe90cdec3898751ac4988165f..443176ee1ab04e1f9d2788b51d700eb2a913610c 100644 (file)
@@ -45,7 +45,6 @@ struct platform_device;
 struct rtsx_slot {
        struct platform_device  *p_dev;
        void                    (*card_event)(struct platform_device *p_dev);
-       void                    (*done_transfer)(struct platform_device *p_dev);
 };
 
 #endif
index 8d6bbd609ad9b6142357b0203bf7e0250e923ae4..a3835976f7c639e8f24e17cf2df6dc495f07fae4 100644 (file)
@@ -943,12 +943,6 @@ void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr);
 int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout);
 int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
                int num_sg, bool read, int timeout);
-int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
-               int num_sg, bool read);
-int rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
-               int num_sg, bool read);
-int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
-               int sg_count, bool read);
 int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len);
 int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len);
 int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card);
index ba87bd21295a533c8d6941bc4c11d29bceba0112..ca38871a585cf2f0f0d60b7dc6aa44b73257e388 100644 (file)
@@ -449,7 +449,6 @@ struct mlx4_caps {
        int                     reserved_qps_base[MLX4_NUM_QP_REGION];
        int                     log_num_macs;
        int                     log_num_vlans;
-       int                     log_num_prios;
        enum mlx4_port_type     port_type[MLX4_MAX_PORTS + 1];
        u8                      supported_type[MLX4_MAX_PORTS + 1];
        u8                      suggested_type[MLX4_MAX_PORTS + 1];
@@ -577,6 +576,9 @@ struct mlx4_cq {
 
        u32                     cons_index;
 
+       u16                     irq;
+       bool                    irq_affinity_change;
+
        __be32                 *set_ci_db;
        __be32                 *arm_db;
        int                     arm_sn;
index b66e7610d4eec9f4d67e5f8bbd745bb6cbd3c99a..7040dc98ff8baa59118cd42dd728d3152afb7c86 100644 (file)
@@ -421,6 +421,17 @@ struct mlx4_wqe_inline_seg {
        __be32                  byte_count;
 };
 
+enum mlx4_update_qp_attr {
+       MLX4_UPDATE_QP_SMAC             = 1 << 0,
+};
+
+struct mlx4_update_qp_params {
+       u8      smac_index;
+};
+
+int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
+                  enum mlx4_update_qp_attr attr,
+                  struct mlx4_update_qp_params *params);
 int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
                   enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
                   struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
index bf9811e1321a5ae80eceeba7664f7d677da9648e..d6777060449fc633df907744a0237152ef9f8c4b 100644 (file)
@@ -370,6 +370,8 @@ static inline int is_vmalloc_or_module_addr(const void *x)
 }
 #endif
 
+extern void kvfree(const void *addr);
+
 static inline void compound_lock(struct page *page)
 {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
index 94734a6259a4d9ee36e25b342c86d3c1bf9d5fcf..17d83393afcc4337d50f00cfa837030d9429c6f8 100644 (file)
@@ -248,24 +248,17 @@ do {                                                              \
 bool __net_get_random_once(void *buf, int nbytes, bool *done,
                           struct static_key *done_key);
 
-#ifdef HAVE_JUMP_LABEL
-#define ___NET_RANDOM_STATIC_KEY_INIT ((struct static_key) \
-               { .enabled = ATOMIC_INIT(0), .entries = (void *)1 })
-#else /* !HAVE_JUMP_LABEL */
-#define ___NET_RANDOM_STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
-#endif /* HAVE_JUMP_LABEL */
-
 #define net_get_random_once(buf, nbytes)                               \
        ({                                                              \
                bool ___ret = false;                                    \
                static bool ___done = false;                            \
-               static struct static_key ___done_key =                  \
-                       ___NET_RANDOM_STATIC_KEY_INIT;                  \
-               if (!static_key_true(&___done_key))                     \
+               static struct static_key ___once_key =                  \
+                       STATIC_KEY_INIT_TRUE;                           \
+               if (static_key_true(&___once_key))                      \
                        ___ret = __net_get_random_once(buf,             \
                                                       nbytes,          \
                                                       &___done,        \
-                                                      &___done_key);   \
+                                                      &___once_key);   \
                ___ret;                                                 \
        })
 
index a803d792df1e6c9c6d3a83ed2ae81a0b9265cae0..2db1610bf109dc5edea26d006e65aaa6521c854b 100644 (file)
@@ -56,9 +56,6 @@ struct device;
 struct phy_device;
 /* 802.11 specific */
 struct wireless_dev;
-                                       /* source back-compat hooks */
-#define SET_ETHTOOL_OPS(netdev,ops) \
-       ( (netdev)->ethtool_ops = (ops) )
 
 void netdev_set_default_ethtool_ops(struct net_device *dev,
                                    const struct ethtool_ops *ops);
@@ -853,7 +850,8 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
  *     SR-IOV management functions.
  * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
  * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
- * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate);
+ * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
+ *                       int max_tx_rate);
  * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
  * int (*ndo_get_vf_config)(struct net_device *dev,
  *                         int vf, struct ifla_vf_info *ivf);
@@ -1047,8 +1045,9 @@ struct net_device_ops {
                                                  int queue, u8 *mac);
        int                     (*ndo_set_vf_vlan)(struct net_device *dev,
                                                   int queue, u16 vlan, u8 qos);
-       int                     (*ndo_set_vf_tx_rate)(struct net_device *dev,
-                                                     int vf, int rate);
+       int                     (*ndo_set_vf_rate)(struct net_device *dev,
+                                                  int vf, int min_tx_rate,
+                                                  int max_tx_rate);
        int                     (*ndo_set_vf_spoofchk)(struct net_device *dev,
                                                       int vf, bool setting);
        int                     (*ndo_get_vf_config)(struct net_device *dev,
@@ -1144,6 +1143,7 @@ struct net_device_ops {
        netdev_tx_t             (*ndo_dfwd_start_xmit) (struct sk_buff *skb,
                                                        struct net_device *dev,
                                                        void *priv);
+       int                     (*ndo_get_lock_subclass)(struct net_device *dev);
 };
 
 /**
@@ -2951,7 +2951,12 @@ static inline void netif_addr_lock(struct net_device *dev)
 
 static inline void netif_addr_lock_nested(struct net_device *dev)
 {
-       spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING);
+       int subclass = SINGLE_DEPTH_NESTING;
+
+       if (dev->netdev_ops->ndo_get_lock_subclass)
+               subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
+
+       spin_lock_nested(&dev->addr_list_lock, subclass);
 }
 
 static inline void netif_addr_lock_bh(struct net_device *dev)
@@ -3051,9 +3056,18 @@ extern int               weight_p;
 extern int             bpf_jit_enable;
 
 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
+struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
+                                                    struct list_head **iter);
 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
                                                     struct list_head **iter);
 
+/* iterate through upper list, must be called under RCU read lock */
+#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
+       for (iter = &(dev)->adj_list.upper, \
+            updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
+            updev; \
+            updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
+
 /* iterate through upper list, must be called under RCU read lock */
 #define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
        for (iter = &(dev)->all_adj_list.upper, \
@@ -3078,6 +3092,14 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
             priv; \
             priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
 
+void *netdev_lower_get_next(struct net_device *dev,
+                               struct list_head **iter);
+#define netdev_for_each_lower_dev(dev, ldev, iter) \
+       for (iter = &(dev)->adj_list.lower, \
+            ldev = netdev_lower_get_next(dev, &(iter)); \
+            ldev; \
+            ldev = netdev_lower_get_next(dev, &(iter)))
+
 void *netdev_adjacent_get_private(struct list_head *adj_list);
 void *netdev_lower_get_first_private_rcu(struct net_device *dev);
 struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
@@ -3093,6 +3115,8 @@ void netdev_upper_dev_unlink(struct net_device *dev,
 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
 void *netdev_lower_dev_get_private(struct net_device *dev,
                                   struct net_device *lower_dev);
+int dev_get_nest_level(struct net_device *dev,
+                      bool (*type_check)(struct net_device *dev));
 int skb_checksum_help(struct sk_buff *skb);
 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
                                  netdev_features_t features, bool tx_path);
@@ -3156,6 +3180,20 @@ const char *netdev_drivername(const struct net_device *dev);
 
 void linkwatch_run_queue(void);
 
+static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
+                                                         netdev_features_t f2)
+{
+       if (f1 & NETIF_F_GEN_CSUM)
+               f1 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
+       if (f2 & NETIF_F_GEN_CSUM)
+               f2 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
+       f1 &= f2;
+       if (f1 & NETIF_F_GEN_CSUM)
+               f1 &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
+
+       return f1;
+}
+
 static inline netdev_features_t netdev_get_wanted_features(
        struct net_device *dev)
 {
@@ -3181,12 +3219,7 @@ void netdev_change_features(struct net_device *dev);
 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
                                        struct net_device *dev);
 
-netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
-                                        const struct net_device *dev);
-static inline netdev_features_t netif_skb_features(struct sk_buff *skb)
-{
-       return netif_skb_dev_features(skb, skb->dev);
-}
+netdev_features_t netif_skb_features(struct sk_buff *skb);
 
 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
 {
index 5146ce06649889ab9b122d244aff7b8b8b649b8e..7a28115dd3965a7f64487378f5661bda8592b33c 100644 (file)
@@ -170,4 +170,11 @@ struct netlink_tap {
 extern int netlink_add_tap(struct netlink_tap *nt);
 extern int netlink_remove_tap(struct netlink_tap *nt);
 
+bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
+                         struct user_namespace *ns, int cap);
+bool netlink_ns_capable(const struct sk_buff *skb,
+                       struct user_namespace *ns, int cap);
+bool netlink_capable(const struct sk_buff *skb, int cap);
+bool netlink_net_capable(const struct sk_buff *skb, int cap);
+
 #endif /* __LINUX_NETLINK_H */
index c8d7f3965fff913a55f158a53b7bc0944c1d4b61..20163b9a0eae70cfdfba688bab5dc08eed5fcfdb 100644 (file)
@@ -80,6 +80,22 @@ enum {
 
        IEEE802154_ATTR_FRAME_RETRIES,
 
+       IEEE802154_ATTR_LLSEC_ENABLED,
+       IEEE802154_ATTR_LLSEC_SECLEVEL,
+       IEEE802154_ATTR_LLSEC_KEY_MODE,
+       IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT,
+       IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED,
+       IEEE802154_ATTR_LLSEC_KEY_ID,
+       IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
+       IEEE802154_ATTR_LLSEC_KEY_BYTES,
+       IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES,
+       IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS,
+       IEEE802154_ATTR_LLSEC_FRAME_TYPE,
+       IEEE802154_ATTR_LLSEC_CMD_FRAME_ID,
+       IEEE802154_ATTR_LLSEC_SECLEVELS,
+       IEEE802154_ATTR_LLSEC_DEV_OVERRIDE,
+       IEEE802154_ATTR_LLSEC_DEV_KEY_MODE,
+
        __IEEE802154_ATTR_MAX,
 };
 
@@ -134,6 +150,21 @@ enum {
 
        IEEE802154_SET_MACPARAMS,
 
+       IEEE802154_LLSEC_GETPARAMS,
+       IEEE802154_LLSEC_SETPARAMS,
+       IEEE802154_LLSEC_LIST_KEY,
+       IEEE802154_LLSEC_ADD_KEY,
+       IEEE802154_LLSEC_DEL_KEY,
+       IEEE802154_LLSEC_LIST_DEV,
+       IEEE802154_LLSEC_ADD_DEV,
+       IEEE802154_LLSEC_DEL_DEV,
+       IEEE802154_LLSEC_LIST_DEVKEY,
+       IEEE802154_LLSEC_ADD_DEVKEY,
+       IEEE802154_LLSEC_DEL_DEVKEY,
+       IEEE802154_LLSEC_LIST_SECLEVEL,
+       IEEE802154_LLSEC_ADD_SECLEVEL,
+       IEEE802154_LLSEC_DEL_SECLEVEL,
+
        __IEEE802154_CMD_MAX,
 };
 
index 3bad8d106e0ea01b2ae7524e216c83b4e654ced3..e6f0988c1c68a18dbc96c804d5d1c01c0aa6d9fb 100644 (file)
@@ -349,7 +349,7 @@ int of_device_is_stdout_path(struct device_node *dn);
 
 #else /* CONFIG_OF */
 
-static inline const char* of_node_full_name(struct device_node *np)
+static inline const char* of_node_full_name(const struct device_node *np)
 {
        return "<no-node>";
 }
index 3f23b4472c3150990237a566ba2d46acb1d1aa82..6404253d810d7482a64fa9e959e99c8bad05c912 100644 (file)
@@ -44,11 +44,16 @@ extern void of_irq_init(const struct of_device_id *matches);
 
 #ifdef CONFIG_OF_IRQ
 extern int of_irq_count(struct device_node *dev);
+extern int of_irq_get(struct device_node *dev, int index);
 #else
 static inline int of_irq_count(struct device_node *dev)
 {
        return 0;
 }
+static inline int of_irq_get(struct device_node *dev, int index)
+{
+       return 0;
+}
 #endif
 
 #if defined(CONFIG_OF)
index 6fe8464ed767f0dac6481a6ffc7b39ecaebdd648..a70c9493d55a4c01976b093b965cc91b270aaf92 100644 (file)
@@ -22,16 +22,21 @@ extern struct phy_device *of_phy_connect(struct net_device *dev,
 struct phy_device *of_phy_attach(struct net_device *dev,
                                 struct device_node *phy_np, u32 flags,
                                 phy_interface_t iface);
-extern struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
-                                        void (*hndlr)(struct net_device *),
-                                        phy_interface_t iface);
 
 extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
 
+extern void of_mdiobus_link_phydev(struct mii_bus *mdio,
+                                  struct phy_device *phydev);
+
 #else /* CONFIG_OF */
 static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
 {
-       return -ENOSYS;
+       /*
+        * Fall back to the non-DT function to register a bus.
+        * This way, we don't have to keep compat bits around in drivers.
+        */
+
+       return mdiobus_register(mdio);
 }
 
 static inline struct phy_device *of_phy_find_device(struct device_node *phy_np)
@@ -54,17 +59,30 @@ static inline struct phy_device *of_phy_attach(struct net_device *dev,
        return NULL;
 }
 
-static inline struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
-                                                          void (*hndlr)(struct net_device *),
-                                                          phy_interface_t iface)
+static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
 {
        return NULL;
 }
 
-static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
+static inline void of_mdiobus_link_phydev(struct mii_bus *mdio,
+                                         struct phy_device *phydev)
 {
-       return NULL;
 }
 #endif /* CONFIG_OF */
 
+#if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY)
+extern int of_phy_register_fixed_link(struct device_node *np);
+extern bool of_phy_is_fixed_link(struct device_node *np);
+#else
+static inline int of_phy_register_fixed_link(struct device_node *np)
+{
+       return -ENOSYS;
+}
+static inline bool of_phy_is_fixed_link(struct device_node *np)
+{
+       return false;
+}
+#endif
+
+
 #endif /* __LINUX_OF_MDIO_H */
index 3356abcfff184e707eccb08d6f99042a8fd51acc..3ef6ea12806a297107ff65b2ad554f5176c8e301 100644 (file)
@@ -402,6 +402,8 @@ struct perf_event {
 
        struct ring_buffer              *rb;
        struct list_head                rb_entry;
+       unsigned long                   rcu_batches;
+       int                             rcu_pending;
 
        /* poll related */
        wait_queue_head_t               waitq;
index 51d15f684e7e0b0259b2183c83097ec77baf541e..864ddafad8cc2f0697b181d9c113ca02ef715613 100644 (file)
@@ -198,6 +198,13 @@ static inline struct mii_bus *mdiobus_alloc(void)
 int mdiobus_register(struct mii_bus *bus);
 void mdiobus_unregister(struct mii_bus *bus);
 void mdiobus_free(struct mii_bus *bus);
+struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv);
+static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev)
+{
+       return devm_mdiobus_alloc_size(dev, 0);
+}
+
+void devm_mdiobus_free(struct device *dev, struct mii_bus *bus);
 struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
 int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
 int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
index e2f5ca96cddc521fcc62a868039c8361f4e48997..2760744cb2a75ff59e3d172a6dd864ef42b32106 100644 (file)
@@ -174,21 +174,29 @@ void devm_of_phy_provider_unregister(struct device *dev,
 #else
 static inline int phy_pm_runtime_get(struct phy *phy)
 {
+       if (!phy)
+               return 0;
        return -ENOSYS;
 }
 
 static inline int phy_pm_runtime_get_sync(struct phy *phy)
 {
+       if (!phy)
+               return 0;
        return -ENOSYS;
 }
 
 static inline int phy_pm_runtime_put(struct phy *phy)
 {
+       if (!phy)
+               return 0;
        return -ENOSYS;
 }
 
 static inline int phy_pm_runtime_put_sync(struct phy *phy)
 {
+       if (!phy)
+               return 0;
        return -ENOSYS;
 }
 
@@ -204,21 +212,29 @@ static inline void phy_pm_runtime_forbid(struct phy *phy)
 
 static inline int phy_init(struct phy *phy)
 {
+       if (!phy)
+               return 0;
        return -ENOSYS;
 }
 
 static inline int phy_exit(struct phy *phy)
 {
+       if (!phy)
+               return 0;
        return -ENOSYS;
 }
 
 static inline int phy_power_on(struct phy *phy)
 {
+       if (!phy)
+               return 0;
        return -ENOSYS;
 }
 
 static inline int phy_power_off(struct phy *phy)
 {
+       if (!phy)
+               return 0;
        return -ENOSYS;
 }
 
index 509d8f5f984e3985cffe178b8a335498f30bcf50..4f2478b4713651a7dd19e17d19d589b658f52470 100644 (file)
@@ -9,15 +9,26 @@ struct fixed_phy_status {
        int asym_pause;
 };
 
+struct device_node;
+
 #ifdef CONFIG_FIXED_PHY
 extern int fixed_phy_add(unsigned int irq, int phy_id,
                         struct fixed_phy_status *status);
+extern int fixed_phy_register(unsigned int irq,
+                             struct fixed_phy_status *status,
+                             struct device_node *np);
 #else
 static inline int fixed_phy_add(unsigned int irq, int phy_id,
                                struct fixed_phy_status *status)
 {
        return -ENODEV;
 }
+static inline int fixed_phy_register(unsigned int irq,
+                                    struct fixed_phy_status *status,
+                                    struct device_node *np)
+{
+       return -ENODEV;
+}
 #endif /* CONFIG_FIXED_PHY */
 
 /*
diff --git a/include/linux/platform_data/st21nfca.h b/include/linux/platform_data/st21nfca.h
new file mode 100644 (file)
index 0000000..1730312
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Driver include for the ST21NFCA NFC chip.
+ *
+ * Copyright (C) 2014  STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ST21NFCA_HCI_H_
+#define _ST21NFCA_HCI_H_
+
+#include <linux/i2c.h>
+
+#define ST21NFCA_HCI_DRIVER_NAME "st21nfca_hci"
+
+struct st21nfca_nfc_platform_data {
+       unsigned int gpio_irq;
+       unsigned int gpio_ena;
+       unsigned int irq_polarity;
+};
+
+#endif /* _ST21NFCA_HCI_H_ */
index e530681bea7049cfa818e66acea0bf863952d281..1a4a8c157b31a7eb8cf3e9c1bedba8ab64354542 100644 (file)
@@ -258,14 +258,14 @@ regulator_get_exclusive(struct device *dev, const char *id)
 static inline struct regulator *__must_check
 regulator_get_optional(struct device *dev, const char *id)
 {
-       return NULL;
+       return ERR_PTR(-ENODEV);
 }
 
 
 static inline struct regulator *__must_check
 devm_regulator_get_optional(struct device *dev, const char *id)
 {
-       return NULL;
+       return ERR_PTR(-ENODEV);
 }
 
 static inline void regulator_put(struct regulator *regulator)
index 4d09f6eab359f8e78b2c5b89b45797dd8fec17de..20bcb55498cd5714028bf7ccf9474b9fa36aa7aa 100644 (file)
  * struct rfkill_gpio_platform_data - platform data for rfkill gpio device.
  * for unused gpio's, the expected value is -1.
  * @name:              name for the gpio rf kill instance
- * @reset_gpio:                GPIO which is used for reseting rfkill switch
- * @shutdown_gpio:     GPIO which is used for shutdown of rfkill switch
- * @power_clk_name:    [optional] name of clk to turn off while blocked
- * @gpio_runtime_close:        clean up platform specific gpio configuration
- * @gpio_runtime_setup:        set up platform specific gpio configuration
  */
 
 struct rfkill_gpio_platform_data {
        char                    *name;
-       int                     reset_gpio;
-       int                     shutdown_gpio;
-       const char              *power_clk_name;
        enum rfkill_type        type;
-       void    (*gpio_runtime_close)(struct platform_device *);
-       int     (*gpio_runtime_setup)(struct platform_device *);
 };
 
 #endif /* __RFKILL_GPIO_H */
index 8e3e66ac0a5215d221042e15e631fb2fe2fb51d1..953937ea5233c770d631bf3ae59be60b72630d88 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <linux/mutex.h>
 #include <linux/netdevice.h>
+#include <linux/wait.h>
 #include <uapi/linux/rtnetlink.h>
 
 extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
@@ -22,6 +23,10 @@ extern void rtnl_lock(void);
 extern void rtnl_unlock(void);
 extern int rtnl_trylock(void);
 extern int rtnl_is_locked(void);
+
+extern wait_queue_head_t netdev_unregistering_wq;
+extern struct mutex net_mutex;
+
 #ifdef CONFIG_PROVE_LOCKING
 extern int lockdep_rtnl_is_held(void);
 #else
index 25f54c79f75772a9f133c585e17a2d8e4a59e8ac..221b2bde372363765b5328638bf9320d9c95f5fd 100644 (file)
@@ -220,7 +220,7 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
 #define TASK_PARKED            512
 #define TASK_STATE_MAX         1024
 
-#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
+#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWP"
 
 extern char ___assert_task_state[1 - 2*!!(
                sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
@@ -1153,9 +1153,12 @@ struct sched_dl_entity {
         *
         * @dl_boosted tells if we are boosted due to DI. If so we are
         * outside bandwidth enforcement mechanism (but only until we
-        * exit the critical section).
+        * exit the critical section);
+        *
+        * @dl_yielded tells if task gave up the cpu before consuming
+        * all its available runtime during the last job.
         */
-       int dl_throttled, dl_new, dl_boosted;
+       int dl_throttled, dl_new, dl_boosted, dl_yielded;
 
        /*
         * Bandwidth enforcement timer. Each -deadline task has its
index 36aac733840afc1b71deee284cfaf78b973aa673..9f779c7a2da467c7945cff496e7609a3a16bd71a 100644 (file)
@@ -23,6 +23,7 @@ struct serio {
 
        char name[32];
        char phys[32];
+       char firmware_id[128];
 
        bool manual_bind;
 
index 08074a8101646d0c438415979124b3a5ff8283bb..7a9beeb1c458fb1b4beeb09b62a549d417187cdf 100644 (file)
@@ -426,7 +426,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
  *     @csum_start: Offset from skb->head where checksumming should start
  *     @csum_offset: Offset from csum_start where checksum should be stored
  *     @priority: Packet queueing priority
- *     @local_df: allow local fragmentation
+ *     @ignore_df: allow local fragmentation
  *     @cloned: Head may be cloned (check refcnt to be sure)
  *     @ip_summed: Driver fed us an IP checksum
  *     @nohdr: Payload reference only, must not modify header
@@ -514,7 +514,7 @@ struct sk_buff {
        };
        __u32                   priority;
        kmemcheck_bitfield_begin(flags1);
-       __u8                    local_df:1,
+       __u8                    ignore_df:1,
                                cloned:1,
                                ip_summed:2,
                                nohdr:1,
@@ -2741,6 +2741,99 @@ static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
               0 : __skb_checksum_complete(skb);
 }
 
+/* Check if we need to perform checksum complete validation.
+ *
+ * Returns true if checksum complete is needed, false otherwise
+ * (either checksum is unnecessary or zero checksum is allowed).
+ */
+static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
+                                                 bool zero_okay,
+                                                 __sum16 check)
+{
+       if (skb_csum_unnecessary(skb)) {
+               return false;
+       } else if (zero_okay && !check) {
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+               return false;
+       }
+
+       return true;
+}
+
+/* For small packets <= CHECKSUM_BREAK peform checksum complete directly
+ * in checksum_init.
+ */
+#define CHECKSUM_BREAK 76
+
+/* Validate (init) checksum based on checksum complete.
+ *
+ * Return values:
+ *   0: checksum is validated or try to in skb_checksum_complete. In the latter
+ *     case the ip_summed will not be CHECKSUM_UNNECESSARY and the pseudo
+ *     checksum is stored in skb->csum for use in __skb_checksum_complete
+ *   non-zero: value of invalid checksum
+ *
+ */
+static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
+                                                      bool complete,
+                                                      __wsum psum)
+{
+       if (skb->ip_summed == CHECKSUM_COMPLETE) {
+               if (!csum_fold(csum_add(psum, skb->csum))) {
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+                       return 0;
+               }
+       }
+
+       skb->csum = psum;
+
+       if (complete || skb->len <= CHECKSUM_BREAK)
+               return __skb_checksum_complete(skb);
+
+       return 0;
+}
+
+static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
+{
+       return 0;
+}
+
+/* Perform checksum validate (init). Note that this is a macro since we only
+ * want to calculate the pseudo header which is an input function if necessary.
+ * First we try to validate without any computation (checksum unnecessary) and
+ * then calculate based on checksum complete calling the function to compute
+ * pseudo header.
+ *
+ * Return values:
+ *   0: checksum is validated or try to in skb_checksum_complete
+ *   non-zero: value of invalid checksum
+ */
+#define __skb_checksum_validate(skb, proto, complete,                  \
+                               zero_okay, check, compute_pseudo)       \
+({                                                                     \
+       __sum16 __ret = 0;                                              \
+       if (__skb_checksum_validate_needed(skb, zero_okay, check))      \
+               __ret = __skb_checksum_validate_complete(skb,           \
+                               complete, compute_pseudo(skb, proto));  \
+       __ret;                                                          \
+})
+
+#define skb_checksum_init(skb, proto, compute_pseudo)                  \
+       __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
+
+#define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo)        \
+       __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
+
+#define skb_checksum_validate(skb, proto, compute_pseudo)              \
+       __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
+
+#define skb_checksum_validate_zero_check(skb, proto, check,            \
+                                        compute_pseudo)                \
+       __skb_checksum_validate_(skb, proto, true, true, check, compute_pseudo)
+
+#define skb_checksum_simple_validate(skb)                              \
+       __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
+
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 void nf_conntrack_destroy(struct nf_conntrack *nfct);
 static inline void nf_conntrack_put(struct nf_conntrack *nfct)
index f2f7398848cfed2b348222aae7573ad2c2a76ba8..d82abd40a3c061745e94385b1c43e498004a60e8 100644 (file)
@@ -101,4 +101,13 @@ struct kmem_cache {
        struct kmem_cache_node *node[MAX_NUMNODES];
 };
 
+#ifdef CONFIG_SYSFS
+#define SLAB_SUPPORTS_SYSFS
+void sysfs_slab_remove(struct kmem_cache *);
+#else
+static inline void sysfs_slab_remove(struct kmem_cache *s)
+{
+}
+#endif
+
 #endif /* _LINUX_SLUB_DEF_H */
index 54f91d35e5fd76f13b94d8297b10be6642e07887..46cca4c06848346ca84753ac182526a4514ff277 100644 (file)
@@ -23,7 +23,7 @@ int sock_diag_check_cookie(void *sk, __u32 *cookie);
 void sock_diag_save_cookie(void *sk, __u32 *cookie);
 
 int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr);
-int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk,
+int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
                             struct sk_buff *skb, int attrtype);
 
 #endif
index aa327a8105ada0d2502d697c6f66699e8f8c516b..b2b1afbb32024ebbb80be196ea276a7ff53ae43e 100644 (file)
@@ -26,20 +26,6 @@ struct at86rf230_platform_data {
        int rstn;
        int slp_tr;
        int dig2;
-
-       /* Setting the irq_type will configure the driver to request
-        * the platform irq trigger type according to the given value
-        * and configure the interrupt polarity of the device to the
-        * corresponding polarity.
-        *
-        * Allowed values are: IRQF_TRIGGER_RISING, IRQF_TRIGGER_FALLING,
-        *                     IRQF_TRIGGER_HIGH and IRQF_TRIGGER_LOW
-        *
-        * Setting it to 0, the driver does not touch the trigger type
-        * configuration of the interrupt and sets the interrupt polarity
-        * of the device to high active (the default value).
-        */
-       int irq_type;
 };
 
 #endif
index 07ef9b82b66da9088f0a8811d21d14d8f0b66db2..4568a5cc9ab851c5c53e536cfbcd5e62003d6a47 100644 (file)
@@ -33,6 +33,7 @@ struct ssb_sprom {
        u8 et1phyaddr;          /* MII address for enet1 */
        u8 et0mdcport;          /* MDIO for enet0 */
        u8 et1mdcport;          /* MDIO for enet1 */
+       u16 dev_id;             /* Device ID overriding e.g. PCI ID */
        u16 board_rev;          /* Board revision number from SPROM. */
        u16 board_num;          /* Board number from SPROM. */
        u16 board_type;         /* Board type from SPROM. */
index 239946868142cec2893e89259555d3b86884d616..a0513210798fc9027af01dccccc5ff6c677d3d7e 100644 (file)
@@ -197,7 +197,8 @@ struct tcp_sock {
        u8      do_early_retrans:1,/* Enable RFC5827 early-retransmit  */
                syn_data:1,     /* SYN includes data */
                syn_fastopen:1, /* SYN includes Fast Open option */
-               syn_data_acked:1;/* data in SYN is acked by SYN-ACK */
+               syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
+               is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */
        u32     tlp_high_seq;   /* snd_nxt at the time of TLP retransmit. */
 
 /* RTT measurement */
@@ -209,6 +210,8 @@ struct tcp_sock {
 
        u32     packets_out;    /* Packets which are "in flight"        */
        u32     retrans_out;    /* Retransmitted packets out            */
+       u32     max_packets_out;  /* max packets_out in last window */
+       u32     max_packets_seq;  /* right edge of max_packets_out flight */
 
        u16     urg_data;       /* Saved octet of OOB data and control flags */
        u8      ecn_flags;      /* ECN status bits.                     */
@@ -365,11 +368,6 @@ static inline bool tcp_passive_fastopen(const struct sock *sk)
                tcp_sk(sk)->fastopen_rsk != NULL);
 }
 
-static inline bool fastopen_cookie_present(struct tcp_fastopen_cookie *foc)
-{
-       return foc->len != -1;
-}
-
 extern void tcp_sock_destruct(struct sock *sk);
 
 static inline int fastopen_init_queue(struct sock *sk, int backlog)
index 42278bbf7a882d90360d141db9c8cc1b1eca1719..247cfdcc4b08bbf377ff5819ebd02683806b0c83 100644 (file)
@@ -47,7 +47,9 @@ struct udp_sock {
 #define udp_portaddr_node      inet.sk.__sk_common.skc_portaddr_node
        int              pending;       /* Any pending frames ? */
        unsigned int     corkflag;      /* Cork is required */
-       __u16            encap_type;    /* Is this an Encapsulation socket? */
+       __u8             encap_type;    /* Is this an Encapsulation socket? */
+       unsigned char    no_check6_tx:1,/* Send zero UDP6 checksums on TX? */
+                        no_check6_rx:1;/* Allow zero UDP6 checksums on RX? */
        /*
         * Following member retains the information to create a UDP header
         * when the socket is uncorked.
@@ -76,6 +78,26 @@ static inline struct udp_sock *udp_sk(const struct sock *sk)
        return (struct udp_sock *)sk;
 }
 
+static inline void udp_set_no_check6_tx(struct sock *sk, bool val)
+{
+       udp_sk(sk)->no_check6_tx = val;
+}
+
+static inline void udp_set_no_check6_rx(struct sock *sk, bool val)
+{
+       udp_sk(sk)->no_check6_rx = val;
+}
+
+static inline bool udp_get_no_check6_tx(struct sock *sk)
+{
+       return udp_sk(sk)->no_check6_tx;
+}
+
+static inline bool udp_get_no_check6_rx(struct sock *sk)
+{
+       return udp_sk(sk)->no_check6_rx;
+}
+
 #define udp_portaddr_for_each_entry(__sk, node, list) \
        hlist_nulls_for_each_entry(__sk, node, list, __sk_common.skc_portaddr_node)
 
index 44b38b92236a5834f022185e99707ea9ac6edc39..7c9b484735c533ed4243be050d8ad2e8c67d581a 100644 (file)
 #define        CDC_NCM_NTB_MAX_SIZE_TX                 32768   /* bytes */
 #define        CDC_NCM_NTB_MAX_SIZE_RX                 32768   /* bytes */
 
+/* Initial NTB length */
+#define        CDC_NCM_NTB_DEF_SIZE_TX                 16384   /* bytes */
+#define        CDC_NCM_NTB_DEF_SIZE_RX                 16384   /* bytes */
+
 /* Minimum value for MaxDatagramSize, ch. 6.2.9 */
 #define        CDC_NCM_MIN_DATAGRAM_SIZE               1514    /* bytes */
 
 /* Restart the timer, if amount of datagrams is less than given value */
 #define        CDC_NCM_RESTART_TIMER_DATAGRAM_CNT      3
 #define        CDC_NCM_TIMER_PENDING_CNT               2
-#define CDC_NCM_TIMER_INTERVAL                 (400UL * NSEC_PER_USEC)
-
-/* The following macro defines the minimum header space */
-#define        CDC_NCM_MIN_HDR_SIZE \
-       (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \
-       (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16))
-
-#define CDC_NCM_NDP_SIZE \
-       (sizeof(struct usb_cdc_ncm_ndp16) +                             \
-             (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16))
+#define CDC_NCM_TIMER_INTERVAL_USEC            400UL
+#define CDC_NCM_TIMER_INTERVAL_MIN             5UL
+#define CDC_NCM_TIMER_INTERVAL_MAX             (U32_MAX / NSEC_PER_USEC)
 
 #define cdc_ncm_comm_intf_is_mbim(x)  ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \
                                       (x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE)
@@ -107,6 +104,9 @@ struct cdc_ncm_ctx {
        spinlock_t mtx;
        atomic_t stop;
 
+       u32 timer_interval;
+       u32 max_ndp_size;
+
        u32 tx_timer_pending;
        u32 tx_curr_frame_num;
        u32 rx_max;
@@ -118,10 +118,21 @@ struct cdc_ncm_ctx {
        u16 tx_ndp_modulus;
        u16 tx_seq;
        u16 rx_seq;
-       u16 connected;
+       u16 min_tx_pkt;
+
+       /* statistics */
+       u32 tx_curr_frame_payload;
+       u32 tx_reason_ntb_full;
+       u32 tx_reason_ndp_full;
+       u32 tx_reason_timeout;
+       u32 tx_reason_max_datagram;
+       u64 tx_overhead;
+       u64 tx_ntbs;
+       u64 rx_overhead;
+       u64 rx_ntbs;
 };
 
-u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf);
+u8 cdc_ncm_select_altsetting(struct usb_interface *intf);
 int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting);
 void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf);
 struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign);
index 933a9f22a05ff63abb4379f94cb907c95f6beac4..f679877bb6017dd4a6da7d1fe0e6ea217ba3b3e4 100644 (file)
@@ -306,11 +306,6 @@ static inline void addrconf_addr_solict_mult(const struct in6_addr *addr,
                      htonl(0xFF000000) | addr->s6_addr32[3]);
 }
 
-static inline bool ipv6_addr_is_multicast(const struct in6_addr *addr)
-{
-       return (addr->s6_addr32[0] & htonl(0xFF000000)) == htonl(0xFF000000);
-}
-
 static inline bool ipv6_addr_is_ll_all_nodes(const struct in6_addr *addr)
 {
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
index f79ae2aa76d6a45fb9a1e452d53f82d05e101717..085940f7eeec0e56d5f95226034b32b906bc3380 100644 (file)
@@ -57,6 +57,14 @@ struct sockaddr_ieee802154 {
 /* get/setsockopt */
 #define SOL_IEEE802154 0
 
-#define WPAN_WANTACK   0
+#define WPAN_WANTACK           0
+#define WPAN_SECURITY          1
+#define WPAN_SECURITY_LEVEL    2
+
+#define WPAN_SECURITY_DEFAULT  0
+#define WPAN_SECURITY_OFF      1
+#define WPAN_SECURITY_ON       2
+
+#define WPAN_SECURITY_LEVEL_DEFAULT    (-1)
 
 #endif
index 7d64d3609ec97ea76751009e82d43c38a10ab5d8..4282778694006034bccca4ce3fbeba9be29537ec 100644 (file)
@@ -155,7 +155,11 @@ struct vsock_transport {
 
 /**** CORE ****/
 
-int vsock_core_init(const struct vsock_transport *t);
+int __vsock_core_init(const struct vsock_transport *t, struct module *owner);
+static inline int vsock_core_init(const struct vsock_transport *t)
+{
+       return __vsock_core_init(t, THIS_MODULE);
+}
 void vsock_core_exit(void);
 
 /**** UTILS ****/
index be150cf8cd432298d47860268a1d4566af74481d..4261a67682c032f84da321987e30ee97f510ffa8 100644 (file)
@@ -367,6 +367,7 @@ enum {
 #define HCI_ERROR_REMOTE_POWER_OFF     0x15
 #define HCI_ERROR_LOCAL_HOST_TERM      0x16
 #define HCI_ERROR_PAIRING_NOT_ALLOWED  0x18
+#define HCI_ERROR_ADVERTISING_TIMEOUT  0x3c
 
 /* Flow control modes */
 #define HCI_FLOW_CTL_MODE_PACKET_BASED 0x00
index 5f8bc05694ac665159bb25f7a9eaa704c3784e91..d73f41855ada2cbb7ce9dfc61c72cd9b592083ca 100644 (file)
@@ -68,6 +68,11 @@ struct discovery_state {
        struct list_head        unknown;        /* Name state not known */
        struct list_head        resolve;        /* Name needs to be resolved */
        __u32                   timestamp;
+       bdaddr_t                last_adv_addr;
+       u8                      last_adv_addr_type;
+       s8                      last_adv_rssi;
+       u8                      last_adv_data[HCI_MAX_AD_LENGTH];
+       u8                      last_adv_data_len;
 };
 
 struct hci_conn_hash {
@@ -194,6 +199,7 @@ struct hci_dev {
        __u16           le_scan_window;
        __u16           le_conn_min_interval;
        __u16           le_conn_max_interval;
+       __u16           discov_interleaved_timeout;
        __u8            ssp_debug_mode;
 
        __u16           devid_source;
@@ -1204,8 +1210,8 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event);
  */
 #define DISCOV_LE_SCAN_WIN             0x12
 #define DISCOV_LE_SCAN_INT             0x12
-#define DISCOV_LE_TIMEOUT              msecs_to_jiffies(10240)
-#define DISCOV_INTERLEAVED_TIMEOUT     msecs_to_jiffies(5120)
+#define DISCOV_LE_TIMEOUT              10240   /* msec */
+#define DISCOV_INTERLEAVED_TIMEOUT     5120    /* msec */
 #define DISCOV_INTERLEAVED_INQUIRY_LEN 0x04
 #define DISCOV_BREDR_INQUIRY_LEN       0x08
 
@@ -1265,7 +1271,8 @@ void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
                                       u8 *randomizer256, u8 status);
 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
                       u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
-                      u8 ssp, u8 *eir, u16 eir_len);
+                      u8 ssp, u8 *eir, u16 eir_len, u8 *scan_rsp,
+                      u8 scan_rsp_len);
 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
                      u8 addr_type, s8 rssi, u8 *name, u8 name_len);
 void mgmt_discovering(struct hci_dev *hdev, u8 discovering);
index f3539a15c41103b743c0571913fbb93dc402f40a..e46c437944f73e66cb275f4adc345a1f869923c8 100644 (file)
@@ -109,6 +109,13 @@ enum ieee80211_band {
  *     channel as the control or any of the secondary channels.
  *     This may be due to the driver or due to regulatory bandwidth
  *     restrictions.
+ * @IEEE80211_CHAN_INDOOR_ONLY: see %NL80211_FREQUENCY_ATTR_INDOOR_ONLY
+ * @IEEE80211_CHAN_GO_CONCURRENT: see %NL80211_FREQUENCY_ATTR_GO_CONCURRENT
+ * @IEEE80211_CHAN_NO_20MHZ: 20 MHz bandwidth is not permitted
+ *     on this channel.
+ * @IEEE80211_CHAN_NO_10MHZ: 10 MHz bandwidth is not permitted
+ *     on this channel.
+ *
  */
 enum ieee80211_channel_flags {
        IEEE80211_CHAN_DISABLED         = 1<<0,
@@ -120,6 +127,10 @@ enum ieee80211_channel_flags {
        IEEE80211_CHAN_NO_OFDM          = 1<<6,
        IEEE80211_CHAN_NO_80MHZ         = 1<<7,
        IEEE80211_CHAN_NO_160MHZ        = 1<<8,
+       IEEE80211_CHAN_INDOOR_ONLY      = 1<<9,
+       IEEE80211_CHAN_GO_CONCURRENT    = 1<<10,
+       IEEE80211_CHAN_NO_20MHZ         = 1<<11,
+       IEEE80211_CHAN_NO_10MHZ         = 1<<12,
 };
 
 #define IEEE80211_CHAN_NO_HT40 \
@@ -330,8 +341,8 @@ struct vif_params {
  * @seq_len: length of @seq.
  */
 struct key_params {
-       u8 *key;
-       u8 *seq;
+       const u8 *key;
+       const u8 *seq;
        int key_len;
        int seq_len;
        u32 cipher;
@@ -441,10 +452,13 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
  * cfg80211_chandef_dfs_required - checks if radar detection is required
  * @wiphy: the wiphy to validate against
  * @chandef: the channel definition to check
- * Return: 1 if radar detection is required, 0 if it is not, < 0 on error
+ * @iftype: the interface type as specified in &enum nl80211_iftype
+ * Returns:
+ *     1 if radar detection is required, 0 if it is not, < 0 on error
  */
 int cfg80211_chandef_dfs_required(struct wiphy *wiphy,
-                                 const struct cfg80211_chan_def *chandef);
+                                 const struct cfg80211_chan_def *chandef,
+                                 enum nl80211_iftype iftype);
 
 /**
  * ieee80211_chandef_rate_flags - returns rate flags for a channel
@@ -654,7 +668,6 @@ struct cfg80211_acl_data {
  * @p2p_opp_ps: P2P opportunistic PS
  * @acl: ACL configuration used by the drivers which has support for
  *     MAC address based access control
- * @radar_required: set if radar detection is required
  */
 struct cfg80211_ap_settings {
        struct cfg80211_chan_def chandef;
@@ -672,7 +685,6 @@ struct cfg80211_ap_settings {
        u8 p2p_ctwindow;
        bool p2p_opp_ps;
        const struct cfg80211_acl_data *acl;
-       bool radar_required;
 };
 
 /**
@@ -682,8 +694,10 @@ struct cfg80211_ap_settings {
  *
  * @chandef: defines the channel to use after the switch
  * @beacon_csa: beacon data while performing the switch
- * @counter_offset_beacon: offset for the counter within the beacon (tail)
- * @counter_offset_presp: offset for the counter within the probe response
+ * @counter_offsets_beacon: offsets of the counters within the beacon (tail)
+ * @counter_offsets_presp: offsets of the counters within the probe response
+ * @n_counter_offsets_beacon: number of csa counters the beacon (tail)
+ * @n_counter_offsets_presp: number of csa counters in the probe response
  * @beacon_after: beacon data to be used on the new channel
  * @radar_required: whether radar detection is required on the new channel
  * @block_tx: whether transmissions should be blocked while changing
@@ -692,7 +706,10 @@ struct cfg80211_ap_settings {
 struct cfg80211_csa_settings {
        struct cfg80211_chan_def chandef;
        struct cfg80211_beacon_data beacon_csa;
-       u16 counter_offset_beacon, counter_offset_presp;
+       const u16 *counter_offsets_beacon;
+       const u16 *counter_offsets_presp;
+       unsigned int n_counter_offsets_beacon;
+       unsigned int n_counter_offsets_presp;
        struct cfg80211_beacon_data beacon_after;
        bool radar_required;
        bool block_tx;
@@ -856,36 +873,38 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
  * @STATION_INFO_NONPEER_PM: @nonpeer_pm filled
  * @STATION_INFO_CHAIN_SIGNAL: @chain_signal filled
  * @STATION_INFO_CHAIN_SIGNAL_AVG: @chain_signal_avg filled
+ * @STATION_INFO_EXPECTED_THROUGHPUT: @expected_throughput filled
  */
 enum station_info_flags {
-       STATION_INFO_INACTIVE_TIME      = 1<<0,
-       STATION_INFO_RX_BYTES           = 1<<1,
-       STATION_INFO_TX_BYTES           = 1<<2,
-       STATION_INFO_LLID               = 1<<3,
-       STATION_INFO_PLID               = 1<<4,
-       STATION_INFO_PLINK_STATE        = 1<<5,
-       STATION_INFO_SIGNAL             = 1<<6,
-       STATION_INFO_TX_BITRATE         = 1<<7,
-       STATION_INFO_RX_PACKETS         = 1<<8,
-       STATION_INFO_TX_PACKETS         = 1<<9,
-       STATION_INFO_TX_RETRIES         = 1<<10,
-       STATION_INFO_TX_FAILED          = 1<<11,
-       STATION_INFO_RX_DROP_MISC       = 1<<12,
-       STATION_INFO_SIGNAL_AVG         = 1<<13,
-       STATION_INFO_RX_BITRATE         = 1<<14,
-       STATION_INFO_BSS_PARAM          = 1<<15,
-       STATION_INFO_CONNECTED_TIME     = 1<<16,
-       STATION_INFO_ASSOC_REQ_IES      = 1<<17,
-       STATION_INFO_STA_FLAGS          = 1<<18,
-       STATION_INFO_BEACON_LOSS_COUNT  = 1<<19,
-       STATION_INFO_T_OFFSET           = 1<<20,
-       STATION_INFO_LOCAL_PM           = 1<<21,
-       STATION_INFO_PEER_PM            = 1<<22,
-       STATION_INFO_NONPEER_PM         = 1<<23,
-       STATION_INFO_RX_BYTES64         = 1<<24,
-       STATION_INFO_TX_BYTES64         = 1<<25,
-       STATION_INFO_CHAIN_SIGNAL       = 1<<26,
-       STATION_INFO_CHAIN_SIGNAL_AVG   = 1<<27,
+       STATION_INFO_INACTIVE_TIME              = BIT(0),
+       STATION_INFO_RX_BYTES                   = BIT(1),
+       STATION_INFO_TX_BYTES                   = BIT(2),
+       STATION_INFO_LLID                       = BIT(3),
+       STATION_INFO_PLID                       = BIT(4),
+       STATION_INFO_PLINK_STATE                = BIT(5),
+       STATION_INFO_SIGNAL                     = BIT(6),
+       STATION_INFO_TX_BITRATE                 = BIT(7),
+       STATION_INFO_RX_PACKETS                 = BIT(8),
+       STATION_INFO_TX_PACKETS                 = BIT(9),
+       STATION_INFO_TX_RETRIES                 = BIT(10),
+       STATION_INFO_TX_FAILED                  = BIT(11),
+       STATION_INFO_RX_DROP_MISC               = BIT(12),
+       STATION_INFO_SIGNAL_AVG                 = BIT(13),
+       STATION_INFO_RX_BITRATE                 = BIT(14),
+       STATION_INFO_BSS_PARAM                  = BIT(15),
+       STATION_INFO_CONNECTED_TIME             = BIT(16),
+       STATION_INFO_ASSOC_REQ_IES              = BIT(17),
+       STATION_INFO_STA_FLAGS                  = BIT(18),
+       STATION_INFO_BEACON_LOSS_COUNT          = BIT(19),
+       STATION_INFO_T_OFFSET                   = BIT(20),
+       STATION_INFO_LOCAL_PM                   = BIT(21),
+       STATION_INFO_PEER_PM                    = BIT(22),
+       STATION_INFO_NONPEER_PM                 = BIT(23),
+       STATION_INFO_RX_BYTES64                 = BIT(24),
+       STATION_INFO_TX_BYTES64                 = BIT(25),
+       STATION_INFO_CHAIN_SIGNAL               = BIT(26),
+       STATION_INFO_CHAIN_SIGNAL_AVG           = BIT(27),
+       STATION_INFO_EXPECTED_THROUGHPUT        = BIT(28),
 };
 
 /**
@@ -1007,6 +1026,8 @@ struct sta_bss_parameters {
  * @local_pm: local mesh STA power save mode
  * @peer_pm: peer mesh STA power save mode
  * @nonpeer_pm: non-peer mesh STA power save mode
+ * @expected_throughput: expected throughput in kbps (including 802.11 headers)
+ *     towards this station.
  */
 struct station_info {
        u32 filled;
@@ -1045,12 +1066,27 @@ struct station_info {
        enum nl80211_mesh_power_mode peer_pm;
        enum nl80211_mesh_power_mode nonpeer_pm;
 
+       u32 expected_throughput;
+
        /*
         * Note: Add a new enum station_info_flags value for each new field and
         * use it to check which fields are initialized.
         */
 };
 
+/**
+ * cfg80211_get_station - retrieve information about a given station
+ * @dev: the device where the station is supposed to be connected to
+ * @mac_addr: the mac address of the station of interest
+ * @sinfo: pointer to the structure to fill with the information
+ *
+ * Returns 0 on success and sinfo is filled with the available information
+ * otherwise returns a negative error code and the content of sinfo has to be
+ * considered undefined.
+ */
+int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
+                        struct station_info *sinfo);
+
 /**
  * enum monitor_flags - monitor flags
  *
@@ -1152,7 +1188,7 @@ struct bss_parameters {
        int use_cts_prot;
        int use_short_preamble;
        int use_short_slot_time;
-       u8 *basic_rates;
+       const u8 *basic_rates;
        u8 basic_rates_len;
        int ap_isolate;
        int ht_opmode;
@@ -1682,10 +1718,10 @@ struct cfg80211_disassoc_request {
  * @ht_capa_mask:  The bits of ht_capa which are to be used.
  */
 struct cfg80211_ibss_params {
-       u8 *ssid;
-       u8 *bssid;
+       const u8 *ssid;
+       const u8 *bssid;
        struct cfg80211_chan_def chandef;
-       u8 *ie;
+       const u8 *ie;
        u8 ssid_len, ie_len;
        u16 beacon_interval;
        u32 basic_rates;
@@ -1794,8 +1830,8 @@ struct cfg80211_bitrate_mask {
  * @pmkid: The PMK material itself.
  */
 struct cfg80211_pmksa {
-       u8 *bssid;
-       u8 *pmkid;
+       const u8 *bssid;
+       const u8 *pmkid;
 };
 
 /**
@@ -1810,7 +1846,7 @@ struct cfg80211_pmksa {
  * memory, free @mask only!
  */
 struct cfg80211_pkt_pattern {
-       u8 *mask, *pattern;
+       const u8 *mask, *pattern;
        int pattern_len;
        int pkt_offset;
 };
@@ -1974,6 +2010,8 @@ struct cfg80211_update_ft_ies_params {
  * @len: buffer length
  * @no_cck: don't use cck rates for this frame
  * @dont_wait_for_ack: tells the low level not to wait for an ack
+ * @n_csa_offsets: length of csa_offsets array
+ * @csa_offsets: array of all the csa offsets in the frame
  */
 struct cfg80211_mgmt_tx_params {
        struct ieee80211_channel *chan;
@@ -1983,6 +2021,8 @@ struct cfg80211_mgmt_tx_params {
        size_t len;
        bool no_cck;
        bool dont_wait_for_ack;
+       int n_csa_offsets;
+       const u16 *csa_offsets;
 };
 
 /**
@@ -2278,6 +2318,10 @@ struct cfg80211_qos_map {
  * @channel_switch: initiate channel-switch procedure (with CSA)
  *
  * @set_qos_map: Set QoS mapping information to the driver
+ *
+ * @set_ap_chanwidth: Set the AP (including P2P GO) mode channel width for the
+ *     given interface This is used e.g. for dynamic HT 20/40 MHz channel width
+ *     changes during the lifetime of the BSS.
  */
 struct cfg80211_ops {
        int     (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -2320,28 +2364,29 @@ struct cfg80211_ops {
 
 
        int     (*add_station)(struct wiphy *wiphy, struct net_device *dev,
-                              u8 *mac, struct station_parameters *params);
+                              const u8 *mac,
+                              struct station_parameters *params);
        int     (*del_station)(struct wiphy *wiphy, struct net_device *dev,
-                              u8 *mac);
+                              const u8 *mac);
        int     (*change_station)(struct wiphy *wiphy, struct net_device *dev,
-                                 u8 *mac, struct station_parameters *params);
+                                 const u8 *mac,
+                                 struct station_parameters *params);
        int     (*get_station)(struct wiphy *wiphy, struct net_device *dev,
-                              u8 *mac, struct station_info *sinfo);
+                              const u8 *mac, struct station_info *sinfo);
        int     (*dump_station)(struct wiphy *wiphy, struct net_device *dev,
-                              int idx, u8 *mac, struct station_info *sinfo);
+                               int idx, u8 *mac, struct station_info *sinfo);
 
        int     (*add_mpath)(struct wiphy *wiphy, struct net_device *dev,
-                              u8 *dst, u8 *next_hop);
+                              const u8 *dst, const u8 *next_hop);
        int     (*del_mpath)(struct wiphy *wiphy, struct net_device *dev,
-                              u8 *dst);
+                              const u8 *dst);
        int     (*change_mpath)(struct wiphy *wiphy, struct net_device *dev,
-                                 u8 *dst, u8 *next_hop);
+                                 const u8 *dst, const u8 *next_hop);
        int     (*get_mpath)(struct wiphy *wiphy, struct net_device *dev,
-                              u8 *dst, u8 *next_hop,
-                              struct mpath_info *pinfo);
+                            u8 *dst, u8 *next_hop, struct mpath_info *pinfo);
        int     (*dump_mpath)(struct wiphy *wiphy, struct net_device *dev,
-                              int idx, u8 *dst, u8 *next_hop,
-                              struct mpath_info *pinfo);
+                             int idx, u8 *dst, u8 *next_hop,
+                             struct mpath_info *pinfo);
        int     (*get_mesh_config)(struct wiphy *wiphy,
                                struct net_device *dev,
                                struct mesh_config *conf);
@@ -2471,11 +2516,11 @@ struct cfg80211_ops {
                                  struct cfg80211_gtk_rekey_data *data);
 
        int     (*tdls_mgmt)(struct wiphy *wiphy, struct net_device *dev,
-                            u8 *peer, u8 action_code,  u8 dialog_token,
+                            const u8 *peer, u8 action_code,  u8 dialog_token,
                             u16 status_code, u32 peer_capability,
                             const u8 *buf, size_t len);
        int     (*tdls_oper)(struct wiphy *wiphy, struct net_device *dev,
-                            u8 *peer, enum nl80211_tdls_operation oper);
+                            const u8 *peer, enum nl80211_tdls_operation oper);
 
        int     (*probe_client)(struct wiphy *wiphy, struct net_device *dev,
                                const u8 *peer, u64 *cookie);
@@ -2521,9 +2566,13 @@ struct cfg80211_ops {
        int     (*channel_switch)(struct wiphy *wiphy,
                                  struct net_device *dev,
                                  struct cfg80211_csa_settings *params);
+
        int     (*set_qos_map)(struct wiphy *wiphy,
                               struct net_device *dev,
                               struct cfg80211_qos_map *qos_map);
+
+       int     (*set_ap_chanwidth)(struct wiphy *wiphy, struct net_device *dev,
+                                   struct cfg80211_chan_def *chandef);
 };
 
 /*
@@ -2618,6 +2667,7 @@ struct ieee80211_iface_limit {
  *     between infrastructure and AP types must match. This is required
  *     only in special cases.
  * @radar_detect_widths: bitmap of channel widths supported for radar detection
+ * @radar_detect_regions: bitmap of regions supported for radar detection
  *
  * With this structure the driver can describe which interface
  * combinations it supports concurrently.
@@ -2675,6 +2725,7 @@ struct ieee80211_iface_combination {
        u8 n_limits;
        bool beacon_int_infra_match;
        u8 radar_detect_widths;
+       u8 radar_detect_regions;
 };
 
 struct ieee80211_txrx_stypes {
@@ -2905,6 +2956,17 @@ struct wiphy_vendor_command {
  *     (including P2P GO) or 0 to indicate no such limit is advertised. The
  *     driver is allowed to advertise a theoretical limit that it can reach in
  *     some cases, but may not always reach.
+ *
+ * @max_num_csa_counters: Number of supported csa_counters in beacons
+ *     and probe responses.  This value should be set if the driver
+ *     wishes to limit the number of csa counters. Default (0) means
+ *     infinite.
+ * @max_adj_channel_rssi_comp: max offset of between the channel on which the
+ *     frame was sent and the channel on which the frame was heard for which
+ *     the reported rssi is still valid. If a driver is able to compensate the
+ *     low rssi when a frame is heard on different channel, then it should set
+ *     this variable to the maximal offset for which it can compensate.
+ *     This value should be set in MHz.
  */
 struct wiphy {
        /* assign these fields before you register the wiphy */
@@ -3022,6 +3084,9 @@ struct wiphy {
 
        u16 max_ap_assoc_sta;
 
+       u8 max_num_csa_counters;
+       u8 max_adj_channel_rssi_comp;
+
        char priv[0] __aligned(NETDEV_ALIGN);
 };
 
@@ -3194,6 +3259,7 @@ struct cfg80211_cached_keys;
  * @ibss_dfs_possible: (private) IBSS may change to a DFS channel
  * @event_list: (private) list for internal event processing
  * @event_lock: (private) lock for event list
+ * @owner_nlportid: (private) owner socket port ID
  */
 struct wireless_dev {
        struct wiphy *wiphy;
@@ -3241,13 +3307,15 @@ struct wireless_dev {
        unsigned long cac_start_time;
        unsigned int cac_time_ms;
 
+       u32 owner_nlportid;
+
 #ifdef CONFIG_CFG80211_WEXT
        /* wext data */
        struct {
                struct cfg80211_ibss_params ibss;
                struct cfg80211_connect_params connect;
                struct cfg80211_cached_keys *keys;
-               u8 *ie;
+               const u8 *ie;
                size_t ie_len;
                u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN];
                u8 ssid[IEEE80211_MAX_SSID_LEN];
@@ -3488,7 +3556,8 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
  * Return: 0 on success, or a negative error code.
  */
 int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
-                            enum nl80211_iftype iftype, u8 *bssid, bool qos);
+                            enum nl80211_iftype iftype, const u8 *bssid,
+                            bool qos);
 
 /**
  * ieee80211_amsdu_to_8023s - decode an IEEE 802.11n A-MSDU frame
@@ -3600,7 +3669,7 @@ int regulatory_hint(struct wiphy *wiphy, const char *alpha2);
  * default channel settings will be disregarded. If no rule is found for a
  * channel on the regulatory domain the channel will be disabled.
  * Drivers using this for a wiphy should also set the wiphy flag
- * WIPHY_FLAG_CUSTOM_REGULATORY or cfg80211 will set it for the wiphy
+ * REGULATORY_CUSTOM_REG or cfg80211 will set it for the wiphy
  * that called this helper.
  */
 void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
@@ -3668,6 +3737,18 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy);
  */
 void cfg80211_sched_scan_stopped(struct wiphy *wiphy);
 
+/**
+ * cfg80211_sched_scan_stopped_rtnl - notify that the scheduled scan has stopped
+ *
+ * @wiphy: the wiphy on which the scheduled scan stopped
+ *
+ * The driver can call this function to inform cfg80211 that the
+ * scheduled scan had to be stopped, for whatever reason.  The driver
+ * is then called back via the sched_scan_stop operation when done.
+ * This function should be called with rtnl locked.
+ */
+void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy);
+
 /**
  * cfg80211_inform_bss_width_frame - inform cfg80211 of a received BSS frame
  *
@@ -4277,7 +4358,7 @@ void cfg80211_roamed_bss(struct net_device *dev, struct cfg80211_bss *bss,
  * and not try to connect to any AP any more.
  */
 void cfg80211_disconnected(struct net_device *dev, u16 reason,
-                          u8 *ie, size_t ie_len, gfp_t gfp);
+                          const u8 *ie, size_t ie_len, gfp_t gfp);
 
 /**
  * cfg80211_ready_on_channel - notification of remain_on_channel start
@@ -4531,12 +4612,14 @@ void cfg80211_report_obss_beacon(struct wiphy *wiphy,
  * cfg80211_reg_can_beacon - check if beaconing is allowed
  * @wiphy: the wiphy
  * @chandef: the channel definition
+ * @iftype: interface type
  *
  * Return: %true if there is no secondary channel or the secondary channel(s)
  * can be used for beaconing (i.e. is not a radar channel etc.)
  */
 bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
-                            struct cfg80211_chan_def *chandef);
+                            struct cfg80211_chan_def *chandef,
+                            enum nl80211_iftype iftype);
 
 /*
  * cfg80211_ch_switch_notify - update wdev channel and notify userspace
@@ -4682,6 +4765,84 @@ void cfg80211_crit_proto_stopped(struct wireless_dev *wdev, gfp_t gfp);
  */
 unsigned int ieee80211_get_num_supported_channels(struct wiphy *wiphy);
 
+/**
+ * cfg80211_check_combinations - check interface combinations
+ *
+ * @wiphy: the wiphy
+ * @num_different_channels: the number of different channels we want
+ *     to use for verification
+ * @radar_detect: a bitmap where each bit corresponds to a channel
+ *     width where radar detection is needed, as in the definition of
+ *     &struct ieee80211_iface_combination.@radar_detect_widths
+ * @iftype_num: array with the numbers of interfaces of each interface
+ *     type.  The index is the interface type as specified in &enum
+ *     nl80211_iftype.
+ *
+ * This function can be called by the driver to check whether a
+ * combination of interfaces and their types are allowed according to
+ * the interface combinations.
+ */
+int cfg80211_check_combinations(struct wiphy *wiphy,
+                               const int num_different_channels,
+                               const u8 radar_detect,
+                               const int iftype_num[NUM_NL80211_IFTYPES]);
+
+/**
+ * cfg80211_iter_combinations - iterate over matching combinations
+ *
+ * @wiphy: the wiphy
+ * @num_different_channels: the number of different channels we want
+ *     to use for verification
+ * @radar_detect: a bitmap where each bit corresponds to a channel
+ *     width where radar detection is needed, as in the definition of
+ *     &struct ieee80211_iface_combination.@radar_detect_widths
+ * @iftype_num: array with the numbers of interfaces of each interface
+ *     type.  The index is the interface type as specified in &enum
+ *     nl80211_iftype.
+ * @iter: function to call for each matching combination
+ * @data: pointer to pass to iter function
+ *
+ * This function can be called by the driver to check what possible
+ * combinations it fits in at a given moment, e.g. for channel switching
+ * purposes.
+ */
+int cfg80211_iter_combinations(struct wiphy *wiphy,
+                              const int num_different_channels,
+                              const u8 radar_detect,
+                              const int iftype_num[NUM_NL80211_IFTYPES],
+                              void (*iter)(const struct ieee80211_iface_combination *c,
+                                           void *data),
+                              void *data);
+
+/*
+ * cfg80211_stop_iface - trigger interface disconnection
+ *
+ * @wiphy: the wiphy
+ * @wdev: wireless device
+ * @gfp: context flags
+ *
+ * Trigger interface to be stopped as if AP was stopped, IBSS/mesh left, STA
+ * disconnected.
+ *
+ * Note: This doesn't need any locks and is asynchronous.
+ */
+void cfg80211_stop_iface(struct wiphy *wiphy, struct wireless_dev *wdev,
+                        gfp_t gfp);
+
+/**
+ * cfg80211_shutdown_all_interfaces - shut down all interfaces for a wiphy
+ * @wiphy: the wiphy to shut down
+ *
+ * This function shuts down all interfaces belonging to this wiphy by
+ * calling dev_close() (and treating non-netdev interfaces as needed).
+ * It shouldn't really be used unless there are some fatal device errors
+ * that really can't be recovered in any other way.
+ *
+ * Callers must hold the RTNL and be able to deal with callbacks into
+ * the driver while the function is running.
+ */
+void cfg80211_shutdown_all_interfaces(struct wiphy *wiphy);
+
 /* Logging, debugging and troubleshooting/diagnostic helpers. */
 
 /* wiphy_printk helpers, similar to dev_printk */
index a28f4e0f625193b0682932207a46578f094f52a9..87cb1903640d63ccfed890c957294af46a93cf24 100644 (file)
@@ -57,12 +57,14 @@ static __inline__ __wsum csum_and_copy_to_user
 }
 #endif
 
+#ifndef HAVE_ARCH_CSUM_ADD
 static inline __wsum csum_add(__wsum csum, __wsum addend)
 {
        u32 res = (__force u32)csum;
        res += (__force u32)addend;
        return (__force __wsum)(res + (res < (__force u32)addend));
 }
+#endif
 
 static inline __wsum csum_sub(__wsum csum, __wsum addend)
 {
index 7828ebf99ee132241b76e500681a43188143da99..6efce384451e56f16d8aab0847a6a7be4e94e0a0 100644 (file)
@@ -181,6 +181,11 @@ struct dsa_switch_driver {
 void register_switch_driver(struct dsa_switch_driver *type);
 void unregister_switch_driver(struct dsa_switch_driver *type);
 
+static inline void *ds_to_priv(struct dsa_switch *ds)
+{
+       return (void *)(ds + 1);
+}
+
 /*
  * The original DSA tag format and some other tag formats have no
  * ethertype, which means that we need to add a little hack to the
index c7ae0ac528dc1e5e1d3c2d5456c2d0e5221b6933..0aa7122e8f15390b4b6158429a245057fdd23e5d 100644 (file)
 #define IEEE802154_SCF_KEY_SHORT_INDEX         2
 #define IEEE802154_SCF_KEY_HW_INDEX            3
 
+#define IEEE802154_SCF_SECLEVEL_NONE           0
+#define IEEE802154_SCF_SECLEVEL_MIC32          1
+#define IEEE802154_SCF_SECLEVEL_MIC64          2
+#define IEEE802154_SCF_SECLEVEL_MIC128         3
+#define IEEE802154_SCF_SECLEVEL_ENC            4
+#define IEEE802154_SCF_SECLEVEL_ENC_MIC32      5
+#define IEEE802154_SCF_SECLEVEL_ENC_MIC64      6
+#define IEEE802154_SCF_SECLEVEL_ENC_MIC128     7
+
 /* MAC footer size */
 #define IEEE802154_MFR_SIZE    2 /* 2 octets */
 
index 5a719ca892f41c24eb45e849a64327cd6221d407..3b53c8e405e48143f667c20850db1666c402d8fa 100644 (file)
@@ -27,6 +27,7 @@
 #ifndef IEEE802154_NETDEVICE_H
 #define IEEE802154_NETDEVICE_H
 
+#include <net/ieee802154.h>
 #include <net/af_ieee802154.h>
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
@@ -114,6 +115,34 @@ int ieee802154_hdr_pull(struct sk_buff *skb, struct ieee802154_hdr *hdr);
 int ieee802154_hdr_peek_addrs(const struct sk_buff *skb,
                              struct ieee802154_hdr *hdr);
 
+/* parses the full 802.15.4 header a given skb and stores them into hdr,
+ * performing pan id decompression and length checks to be suitable for use in
+ * header_ops.parse
+ */
+int ieee802154_hdr_peek(const struct sk_buff *skb, struct ieee802154_hdr *hdr);
+
+int ieee802154_max_payload(const struct ieee802154_hdr *hdr);
+
+static inline int
+ieee802154_sechdr_authtag_len(const struct ieee802154_sechdr *sec)
+{
+       switch (sec->level) {
+       case IEEE802154_SCF_SECLEVEL_MIC32:
+       case IEEE802154_SCF_SECLEVEL_ENC_MIC32:
+               return 4;
+       case IEEE802154_SCF_SECLEVEL_MIC64:
+       case IEEE802154_SCF_SECLEVEL_ENC_MIC64:
+               return 8;
+       case IEEE802154_SCF_SECLEVEL_MIC128:
+       case IEEE802154_SCF_SECLEVEL_ENC_MIC128:
+               return 16;
+       case IEEE802154_SCF_SECLEVEL_NONE:
+       case IEEE802154_SCF_SECLEVEL_ENC:
+       default:
+               return 0;
+       }
+}
+
 static inline int ieee802154_hdr_length(struct sk_buff *skb)
 {
        struct ieee802154_hdr hdr;
@@ -193,8 +222,12 @@ static inline void ieee802154_addr_to_sa(struct ieee802154_addr_sa *sa,
  */
 struct ieee802154_mac_cb {
        u8 lqi;
-       u8 flags;
-       u8 seq;
+       u8 type;
+       bool ackreq;
+       bool secen;
+       bool secen_override;
+       u8 seclevel;
+       bool seclevel_override;
        struct ieee802154_addr source;
        struct ieee802154_addr dest;
 };
@@ -204,25 +237,96 @@ static inline struct ieee802154_mac_cb *mac_cb(struct sk_buff *skb)
        return (struct ieee802154_mac_cb *)skb->cb;
 }
 
-#define MAC_CB_FLAG_TYPEMASK           ((1 << 3) - 1)
-
-#define MAC_CB_FLAG_ACKREQ             (1 << 3)
-#define MAC_CB_FLAG_SECEN              (1 << 4)
-
-static inline bool mac_cb_is_ackreq(struct sk_buff *skb)
+static inline struct ieee802154_mac_cb *mac_cb_init(struct sk_buff *skb)
 {
-       return mac_cb(skb)->flags & MAC_CB_FLAG_ACKREQ;
-}
+       BUILD_BUG_ON(sizeof(struct ieee802154_mac_cb) > sizeof(skb->cb));
 
-static inline bool mac_cb_is_secen(struct sk_buff *skb)
-{
-       return mac_cb(skb)->flags & MAC_CB_FLAG_SECEN;
+       memset(skb->cb, 0, sizeof(struct ieee802154_mac_cb));
+       return mac_cb(skb);
 }
 
-static inline int mac_cb_type(struct sk_buff *skb)
-{
-       return mac_cb(skb)->flags & MAC_CB_FLAG_TYPEMASK;
-}
+#define IEEE802154_LLSEC_KEY_SIZE 16
+
+struct ieee802154_llsec_key_id {
+       u8 mode;
+       u8 id;
+       union {
+               struct ieee802154_addr device_addr;
+               __le32 short_source;
+               __le64 extended_source;
+       };
+};
+
+struct ieee802154_llsec_key {
+       u8 frame_types;
+       u32 cmd_frame_ids;
+       u8 key[IEEE802154_LLSEC_KEY_SIZE];
+};
+
+struct ieee802154_llsec_key_entry {
+       struct list_head list;
+
+       struct ieee802154_llsec_key_id id;
+       struct ieee802154_llsec_key *key;
+};
+
+struct ieee802154_llsec_device_key {
+       struct list_head list;
+
+       struct ieee802154_llsec_key_id key_id;
+       u32 frame_counter;
+};
+
+enum {
+       IEEE802154_LLSEC_DEVKEY_IGNORE,
+       IEEE802154_LLSEC_DEVKEY_RESTRICT,
+       IEEE802154_LLSEC_DEVKEY_RECORD,
+
+       __IEEE802154_LLSEC_DEVKEY_MAX,
+};
+
+struct ieee802154_llsec_device {
+       struct list_head list;
+
+       __le16 pan_id;
+       __le16 short_addr;
+       __le64 hwaddr;
+       u32 frame_counter;
+       bool seclevel_exempt;
+
+       u8 key_mode;
+       struct list_head keys;
+};
+
+struct ieee802154_llsec_seclevel {
+       struct list_head list;
+
+       u8 frame_type;
+       u8 cmd_frame_id;
+       bool device_override;
+       u32 sec_levels;
+};
+
+struct ieee802154_llsec_params {
+       bool enabled;
+
+       __be32 frame_counter;
+       u8 out_level;
+       struct ieee802154_llsec_key_id out_key;
+
+       __le64 default_key_source;
+
+       __le16 pan_id;
+       __le64 hwaddr;
+       __le64 coord_hwaddr;
+       __le16 coord_shortaddr;
+};
+
+struct ieee802154_llsec_table {
+       struct list_head keys;
+       struct list_head devices;
+       struct list_head security_levels;
+};
 
 #define IEEE802154_MAC_SCAN_ED         0
 #define IEEE802154_MAC_SCAN_ACTIVE     1
@@ -242,6 +346,53 @@ struct ieee802154_mac_params {
 };
 
 struct wpan_phy;
+
+enum {
+       IEEE802154_LLSEC_PARAM_ENABLED = 1 << 0,
+       IEEE802154_LLSEC_PARAM_FRAME_COUNTER = 1 << 1,
+       IEEE802154_LLSEC_PARAM_OUT_LEVEL = 1 << 2,
+       IEEE802154_LLSEC_PARAM_OUT_KEY = 1 << 3,
+       IEEE802154_LLSEC_PARAM_KEY_SOURCE = 1 << 4,
+       IEEE802154_LLSEC_PARAM_PAN_ID = 1 << 5,
+       IEEE802154_LLSEC_PARAM_HWADDR = 1 << 6,
+       IEEE802154_LLSEC_PARAM_COORD_HWADDR = 1 << 7,
+       IEEE802154_LLSEC_PARAM_COORD_SHORTADDR = 1 << 8,
+};
+
+struct ieee802154_llsec_ops {
+       int (*get_params)(struct net_device *dev,
+                         struct ieee802154_llsec_params *params);
+       int (*set_params)(struct net_device *dev,
+                         const struct ieee802154_llsec_params *params,
+                         int changed);
+
+       int (*add_key)(struct net_device *dev,
+                      const struct ieee802154_llsec_key_id *id,
+                      const struct ieee802154_llsec_key *key);
+       int (*del_key)(struct net_device *dev,
+                      const struct ieee802154_llsec_key_id *id);
+
+       int (*add_dev)(struct net_device *dev,
+                      const struct ieee802154_llsec_device *llsec_dev);
+       int (*del_dev)(struct net_device *dev, __le64 dev_addr);
+
+       int (*add_devkey)(struct net_device *dev,
+                         __le64 device_addr,
+                         const struct ieee802154_llsec_device_key *key);
+       int (*del_devkey)(struct net_device *dev,
+                         __le64 device_addr,
+                         const struct ieee802154_llsec_device_key *key);
+
+       int (*add_seclevel)(struct net_device *dev,
+                           const struct ieee802154_llsec_seclevel *sl);
+       int (*del_seclevel)(struct net_device *dev,
+                           const struct ieee802154_llsec_seclevel *sl);
+
+       void (*lock_table)(struct net_device *dev);
+       void (*get_table)(struct net_device *dev,
+                         struct ieee802154_llsec_table **t);
+       void (*unlock_table)(struct net_device *dev);
+};
 /*
  * This should be located at net_device->ml_priv
  *
@@ -272,6 +423,8 @@ struct ieee802154_mlme_ops {
        void (*get_mac_params)(struct net_device *dev,
                               struct ieee802154_mac_params *params);
 
+       struct ieee802154_llsec_ops *llsec;
+
        /* The fields below are required. */
 
        struct wpan_phy *(*get_phy)(const struct net_device *dev);
index 3bd22795c3e259e1f1f55176c808c6fdcc994600..84b20835b736c53b55a19eac0bbe187e65626fec 100644 (file)
@@ -150,7 +150,7 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
 }
 
 /*
- * RFC 6080 4.2
+ * RFC 6040 4.2
  *  To decapsulate the inner header at the tunnel egress, a compliant
  *  tunnel egress MUST set the outgoing ECN field to the codepoint at the
  *  intersection of the appropriate arriving inner header (row) and outer
index 1bdb47715def0e21496ae89a59d3d9bd5f1f2c81..dd1950a7e2730e0024f1c82fac8ab20f9b533fe5 100644 (file)
@@ -292,12 +292,12 @@ static inline struct sock *inet_lookup_listener(struct net *net,
 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
        const __addrpair __name = (__force __addrpair) ( \
                                   (((__force __u64)(__be32)(__saddr)) << 32) | \
-                                  ((__force __u64)(__be32)(__daddr)));
+                                  ((__force __u64)(__be32)(__daddr)))
 #else /* __LITTLE_ENDIAN */
 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
        const __addrpair __name = (__force __addrpair) ( \
                                   (((__force __u64)(__be32)(__daddr)) << 32) | \
-                                  ((__force __u64)(__be32)(__saddr)));
+                                  ((__force __u64)(__be32)(__saddr)))
 #endif /* __BIG_ENDIAN */
 #define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif)    \
        (((__sk)->sk_portpair == (__ports))                     &&      \
@@ -306,7 +306,9 @@ static inline struct sock *inet_lookup_listener(struct net *net,
           ((__sk)->sk_bound_dev_if == (__dif)))                &&      \
         net_eq(sock_net(__sk), (__net)))
 #else /* 32-bit arch */
-#define INET_ADDR_COOKIE(__name, __saddr, __daddr)
+#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
+       const int __name __deprecated __attribute__((unused))
+
 #define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif) \
        (((__sk)->sk_portpair == (__ports))             &&              \
         ((__sk)->sk_daddr      == (__saddr))           &&              \
index 1833c3f389ee64a0c6b3862d4f2fbc6db0984b0a..b1edf17bec01130f9751747c4d092e5de50aaeac 100644 (file)
@@ -90,6 +90,7 @@ struct inet_request_sock {
        kmemcheck_bitfield_end(flags);
        struct ip_options_rcu   *opt;
        struct sk_buff          *pktopts;
+       u32                     ir_mark;
 };
 
 static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
@@ -97,6 +98,15 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
        return (struct inet_request_sock *)sk;
 }
 
+static inline u32 inet_request_mark(struct sock *sk, struct sk_buff *skb)
+{
+       if (!sk->sk_mark && sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept) {
+               return skb->mark;
+       } else {
+               return sk->sk_mark;
+       }
+}
+
 struct inet_cork {
        unsigned int            flags;
        __be32                  addr;
index 3ec2b0fb9d8395384373917691f49c433262a8db..2e4947895d753a606c0cd0dfc641c87c96241dff 100644 (file)
@@ -196,35 +196,31 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
 #define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd)
 #define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd)
 
-unsigned long snmp_fold_field(void __percpu *mib[], int offt);
+unsigned long snmp_fold_field(void __percpu *mib, int offt);
 #if BITS_PER_LONG==32
-u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t sync_off);
+u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off);
 #else
-static inline u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_off)
+static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off)
 {
        return snmp_fold_field(mib, offt);
 }
 #endif
-int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align);
-
-static inline void snmp_mib_free(void __percpu *ptr[SNMP_ARRAY_SZ])
-{
-       int i;
-
-       BUG_ON(ptr == NULL);
-       for (i = 0; i < SNMP_ARRAY_SZ; i++) {
-               free_percpu(ptr[i]);
-               ptr[i] = NULL;
-       }
-}
 
 void inet_get_local_port_range(struct net *net, int *low, int *high);
 
-extern unsigned long *sysctl_local_reserved_ports;
-static inline int inet_is_reserved_local_port(int port)
+#ifdef CONFIG_SYSCTL
+static inline int inet_is_local_reserved_port(struct net *net, int port)
 {
-       return test_bit(port, sysctl_local_reserved_ports);
+       if (!net->ipv4.sysctl_local_reserved_ports)
+               return 0;
+       return test_bit(port, net->ipv4.sysctl_local_reserved_ports);
 }
+#else
+static inline int inet_is_local_reserved_port(struct net *net, int port)
+{
+       return 0;
+}
+#endif
 
 extern int sysctl_ip_nonlocal_bind;
 
@@ -243,6 +239,9 @@ void ipfrag_init(void);
 
 void ip_static_sysctl_init(void);
 
+#define IP4_REPLY_MARK(net, mark) \
+       ((net)->ipv4.sysctl_fwmark_reflect ? (mark) : 0)
+
 static inline bool ip_is_fragment(const struct iphdr *iph)
 {
        return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0;
@@ -281,7 +280,7 @@ static inline bool ip_sk_use_pmtu(const struct sock *sk)
        return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE;
 }
 
-static inline bool ip_sk_local_df(const struct sock *sk)
+static inline bool ip_sk_ignore_df(const struct sock *sk)
 {
        return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO ||
               inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT;
@@ -316,7 +315,7 @@ static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, s
 {
        struct iphdr *iph = ip_hdr(skb);
 
-       if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) {
+       if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
                /* This is only to work around buggy Windows95/2000
                 * VJ compression implementations.  If the ID field
                 * does not change, they drop every other packet in
@@ -332,7 +331,7 @@ static inline void ip_select_ident_more(struct sk_buff *skb, struct dst_entry *d
 {
        struct iphdr *iph = ip_hdr(skb);
 
-       if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) {
+       if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
                if (sk && inet_sk(sk)->inet_daddr) {
                        iph->id = htons(inet_sk(sk)->inet_id);
                        inet_sk(sk)->inet_id += 1 + more;
@@ -342,6 +341,12 @@ static inline void ip_select_ident_more(struct sk_buff *skb, struct dst_entry *d
                __ip_select_ident(iph, dst, more);
 }
 
+static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
+{
+       return csum_tcpudp_nofold(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
+                                 skb->len, proto, 0);
+}
+
 /*
  *     Map a multicast IP onto multicast MAC for type ethernet.
  */
index 9e3c540c1b110c71b65003a6aac22cc6c333be5a..8ac5c21f84563faeda028606034f2c69d23dc2a6 100644 (file)
@@ -41,6 +41,13 @@ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
                        __wsum csum);
 #endif
 
+static inline __wsum ip6_compute_pseudo(struct sk_buff *skb, int proto)
+{
+       return ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+                                           &ipv6_hdr(skb)->daddr,
+                                           skb->len, proto, 0));
+}
+
 static __inline__ __sum16 tcp_v6_check(int len,
                                   const struct in6_addr *saddr,
                                   const struct in6_addr *daddr,
index 6c4f5eac98e7be133af4b868507f0d217ac05c1b..1d09b46c1e489325b95f9987327d95ca8affed08 100644 (file)
@@ -127,6 +127,7 @@ int rt6_dump_route(struct rt6_info *rt, void *p_arg);
 void rt6_ifdown(struct net *net, struct net_device *dev);
 void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
+void rt6_clean_tohost(struct net *net, struct in6_addr *gateway);
 
 
 /*
@@ -185,7 +186,7 @@ static inline bool ip6_sk_accept_pmtu(const struct sock *sk)
               inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_OMIT;
 }
 
-static inline bool ip6_sk_local_df(const struct sock *sk)
+static inline bool ip6_sk_ignore_df(const struct sock *sk)
 {
        return inet6_sk(sk)->pmtudisc < IPV6_PMTUDISC_DO ||
               inet6_sk(sk)->pmtudisc == IPV6_PMTUDISC_OMIT;
index d640925bc4543bdfb30bf15d164bececbda7e798..ba810d0546bc636319cf896b460d4c50cfcc7092 100644 (file)
@@ -113,6 +113,9 @@ struct frag_hdr {
 #define        IP6_MF          0x0001
 #define        IP6_OFFSET      0xFFF8
 
+#define IP6_REPLY_MARK(net, mark) \
+       ((net)->ipv6.sysctl.fwmark_reflect ? (mark) : 0)
+
 #include <net/sock.h>
 
 /* sysctls */
@@ -583,6 +586,11 @@ static inline bool ipv6_addr_orchid(const struct in6_addr *a)
        return (a->s6_addr32[0] & htonl(0xfffffff0)) == htonl(0x20010010);
 }
 
+static inline bool ipv6_addr_is_multicast(const struct in6_addr *addr)
+{
+       return (addr->s6_addr32[0] & htonl(0xFF000000)) == htonl(0xFF000000);
+}
+
 static inline void ipv6_addr_set_v4mapped(const __be32 addr,
                                          struct in6_addr *v4mapped)
 {
@@ -664,6 +672,20 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
 
 int ip6_dst_hoplimit(struct dst_entry *dst);
 
+static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6,
+                                     struct dst_entry *dst)
+{
+       int hlimit;
+
+       if (ipv6_addr_is_multicast(&fl6->daddr))
+               hlimit = np->mcast_hops;
+       else
+               hlimit = np->hop_limit;
+       if (hlimit < 0)
+               hlimit = ip6_dst_hoplimit(dst);
+       return hlimit;
+}
+
 /*
  *     Header manipulation
  */
index 8248e3909fdf7d8531890e15bc3f18c5b90ac95f..2c78997bc48d33b9d63d9611f5003bcf2a256083 100644 (file)
@@ -1113,7 +1113,9 @@ enum ieee80211_vif_flags {
  * @addr: address of this interface
  * @p2p: indicates whether this AP or STA interface is a p2p
  *     interface, i.e. a GO or p2p-sta respectively
- * @csa_active: marks whether a channel switch is going on
+ * @csa_active: marks whether a channel switch is going on. Internally it is
+ *     write-protected by sdata_lock and local->mtx so holding either is fine
+ *     for read access.
  * @driver_flags: flags/capabilities the driver has for this interface,
  *     these need to be set (or cleared) when the interface is added
  *     or, if supported by the driver, the interface type is changed
@@ -1202,14 +1204,18 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
  *     fall back to software crypto. Note that this flag deals only with
  *     RX, if your crypto engine can't deal with TX you can also set the
  *     %IEEE80211_KEY_FLAG_SW_MGMT_TX flag to encrypt such frames in SW.
+ * @IEEE80211_KEY_FLAG_GENERATE_IV_MGMT: This flag should be set by the
+ *     driver for a CCMP key to indicate that is requires IV generation
+ *     only for managment frames (MFP).
  */
 enum ieee80211_key_flags {
-       IEEE80211_KEY_FLAG_GENERATE_IV  = 1<<1,
-       IEEE80211_KEY_FLAG_GENERATE_MMIC= 1<<2,
-       IEEE80211_KEY_FLAG_PAIRWISE     = 1<<3,
-       IEEE80211_KEY_FLAG_SW_MGMT_TX   = 1<<4,
-       IEEE80211_KEY_FLAG_PUT_IV_SPACE = 1<<5,
-       IEEE80211_KEY_FLAG_RX_MGMT      = 1<<6,
+       IEEE80211_KEY_FLAG_GENERATE_IV_MGMT     = BIT(0),
+       IEEE80211_KEY_FLAG_GENERATE_IV          = BIT(1),
+       IEEE80211_KEY_FLAG_GENERATE_MMIC        = BIT(2),
+       IEEE80211_KEY_FLAG_PAIRWISE             = BIT(3),
+       IEEE80211_KEY_FLAG_SW_MGMT_TX           = BIT(4),
+       IEEE80211_KEY_FLAG_PUT_IV_SPACE         = BIT(5),
+       IEEE80211_KEY_FLAG_RX_MGMT              = BIT(6),
 };
 
 /**
@@ -1370,6 +1376,7 @@ struct ieee80211_sta_rates {
  *     the station moves to associated state.
  * @smps_mode: current SMPS mode (off, static or dynamic)
  * @rates: rate control selection table
+ * @tdls: indicates whether the STA is a TDLS peer
  */
 struct ieee80211_sta {
        u32 supp_rates[IEEE80211_NUM_BANDS];
@@ -1384,6 +1391,7 @@ struct ieee80211_sta {
        enum ieee80211_sta_rx_bandwidth bandwidth;
        enum ieee80211_smps_mode smps_mode;
        struct ieee80211_sta_rates __rcu *rates;
+       bool tdls;
 
        /* must be last */
        u8 drv_priv[0] __aligned(sizeof(void *));
@@ -1555,6 +1563,12 @@ struct ieee80211_tx_control {
  *     for a single active channel while using channel contexts. When support
  *     is not enabled the default action is to disconnect when getting the
  *     CSA frame.
+ *
+ * @IEEE80211_HW_CHANGE_RUNNING_CHANCTX: The hardware can change a
+ *     channel context on-the-fly.  This is needed for channel switch
+ *     on single-channel hardware.  It can also be used as an
+ *     optimization in certain channel switch cases with
+ *     multi-channel.
  */
 enum ieee80211_hw_flags {
        IEEE80211_HW_HAS_RATE_CONTROL                   = 1<<0,
@@ -1586,6 +1600,7 @@ enum ieee80211_hw_flags {
        IEEE80211_HW_TIMING_BEACON_ONLY                 = 1<<26,
        IEEE80211_HW_SUPPORTS_HT_CCK_RATES              = 1<<27,
        IEEE80211_HW_CHANCTX_STA_CSA                    = 1<<28,
+       IEEE80211_HW_CHANGE_RUNNING_CHANCTX             = 1<<29,
 };
 
 /**
@@ -2609,6 +2624,7 @@ enum ieee80211_roc_type {
  *     of queues to flush, which is useful if different virtual interfaces
  *     use different hardware queues; it may also indicate all queues.
  *     If the parameter @drop is set to %true, pending frames may be dropped.
+ *     Note that vif can be NULL.
  *     The callback can sleep.
  *
  * @channel_switch: Drivers that need (or want) to offload the channel
@@ -2753,6 +2769,10 @@ enum ieee80211_roc_type {
  *     information in bss_conf is set up and the beacon can be retrieved. A
  *     channel context is bound before this is called.
  * @leave_ibss: Leave the IBSS again.
+ *
+ * @get_expected_throughput: extract the expected throughput towards the
+ *     specified station. The returned value is expressed in Kbps. It returns 0
+ *     if the RC algorithm does not have proper data to provide.
  */
 struct ieee80211_ops {
        void (*tx)(struct ieee80211_hw *hw,
@@ -2871,7 +2891,8 @@ struct ieee80211_ops {
                             struct netlink_callback *cb,
                             void *data, int len);
 #endif
-       void (*flush)(struct ieee80211_hw *hw, u32 queues, bool drop);
+       void (*flush)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                     u32 queues, bool drop);
        void (*channel_switch)(struct ieee80211_hw *hw,
                               struct ieee80211_channel_switch *ch_switch);
        int (*set_antenna)(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant);
@@ -2945,6 +2966,7 @@ struct ieee80211_ops {
 
        int (*join_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
        void (*leave_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+       u32 (*get_expected_throughput)(struct ieee80211_sta *sta);
 };
 
 /**
@@ -3394,6 +3416,47 @@ void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
  */
 void ieee80211_report_low_ack(struct ieee80211_sta *sta, u32 num_packets);
 
+#define IEEE80211_MAX_CSA_COUNTERS_NUM 2
+
+/**
+ * struct ieee80211_mutable_offsets - mutable beacon offsets
+ * @tim_offset: position of TIM element
+ * @tim_length: size of TIM element
+ * @csa_counter_offs: array of IEEE80211_MAX_CSA_COUNTERS_NUM offsets
+ *     to CSA counters.  This array can contain zero values which
+ *     should be ignored.
+ */
+struct ieee80211_mutable_offsets {
+       u16 tim_offset;
+       u16 tim_length;
+
+       u16 csa_counter_offs[IEEE80211_MAX_CSA_COUNTERS_NUM];
+};
+
+/**
+ * ieee80211_beacon_get_template - beacon template generation function
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @offs: &struct ieee80211_mutable_offsets pointer to struct that will
+ *     receive the offsets that may be updated by the driver.
+ *
+ * If the driver implements beaconing modes, it must use this function to
+ * obtain the beacon template.
+ *
+ * This function should be used if the beacon frames are generated by the
+ * device, and then the driver must use the returned beacon as the template
+ * The driver or the device are responsible to update the DTIM and, when
+ * applicable, the CSA count.
+ *
+ * The driver is responsible for freeing the returned skb.
+ *
+ * Return: The beacon template. %NULL on error.
+ */
+struct sk_buff *
+ieee80211_beacon_get_template(struct ieee80211_hw *hw,
+                             struct ieee80211_vif *vif,
+                             struct ieee80211_mutable_offsets *offs);
+
 /**
  * ieee80211_beacon_get_tim - beacon generation function
  * @hw: pointer obtained from ieee80211_alloc_hw().
@@ -3405,16 +3468,12 @@ void ieee80211_report_low_ack(struct ieee80211_sta *sta, u32 num_packets);
  *     Set to 0 if invalid (in non-AP modes).
  *
  * If the driver implements beaconing modes, it must use this function to
- * obtain the beacon frame/template.
+ * obtain the beacon frame.
  *
  * If the beacon frames are generated by the host system (i.e., not in
  * hardware/firmware), the driver uses this function to get each beacon
- * frame from mac80211 -- it is responsible for calling this function
- * before the beacon is needed (e.g. based on hardware interrupt).
- *
- * If the beacon frames are generated by the device, then the driver
- * must use the returned beacon as the template and change the TIM IE
- * according to the current DTIM parameters/TIM bitmap.
+ * frame from mac80211 -- it is responsible for calling this function exactly
+ * once before the beacon is needed (e.g. based on hardware interrupt).
  *
  * The driver is responsible for freeing the returned skb.
  *
@@ -3439,6 +3498,20 @@ static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
        return ieee80211_beacon_get_tim(hw, vif, NULL, NULL);
 }
 
+/**
+ * ieee80211_csa_update_counter - request mac80211 to decrement the csa counter
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * The csa counter should be updated after each beacon transmission.
+ * This function is called implicitly when
+ * ieee80211_beacon_get/ieee80211_beacon_get_tim are called, however if the
+ * beacon frames are generated by the device, the driver should call this
+ * function after each beacon transmission to sync mac80211's csa counters.
+ *
+ * Return: new csa counter value
+ */
+u8 ieee80211_csa_update_counter(struct ieee80211_vif *vif);
+
 /**
  * ieee80211_csa_finish - notify mac80211 about channel switch
  * @vif: &struct ieee80211_vif pointer from the add_interface callback.
@@ -4467,6 +4540,8 @@ struct rate_control_ops {
        void (*add_sta_debugfs)(void *priv, void *priv_sta,
                                struct dentry *dir);
        void (*remove_sta_debugfs)(void *priv, void *priv_sta);
+
+       u32 (*get_expected_throughput)(void *priv_sta);
 };
 
 static inline int rate_supported(struct ieee80211_sta *sta,
@@ -4576,7 +4651,9 @@ conf_is_ht40(struct ieee80211_conf *conf)
 static inline bool
 conf_is_ht(struct ieee80211_conf *conf)
 {
-       return conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
+       return (conf->chandef.width != NL80211_CHAN_WIDTH_5) &&
+               (conf->chandef.width != NL80211_CHAN_WIDTH_10) &&
+               (conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT);
 }
 
 static inline enum nl80211_iftype
index bc4118ede5b56d9a115caca01c211a3b94b7c8e2..361d26077196678af5bd7ce217715a2cc847f2af 100644 (file)
@@ -379,15 +379,8 @@ net_ieee802154_lowpan(struct net *net)
 {
        return &net->ieee802154_lowpan;
 }
-#else
-static inline struct netns_ieee802154_lowpan *
-net_ieee802154_lowpan(struct net *net)
-{
-       return NULL;
-}
 #endif
 
-
 /* For callers who don't really care about whether it's IPv4 or IPv6 */
 static inline void rt_genid_bump_all(struct net *net)
 {
index e6bc14d8fa9a9a4b324fac9df5a47e64277f2aa8..7ee6ce6564aecc6b98eb6247e6d6c6263a3e130c 100644 (file)
@@ -72,21 +72,23 @@ static inline void nft_data_debug(const struct nft_data *data)
  *     struct nft_ctx - nf_tables rule/set context
  *
  *     @net: net namespace
- *     @skb: netlink skb
- *     @nlh: netlink message header
  *     @afi: address family info
  *     @table: the table the chain is contained in
  *     @chain: the chain the rule is contained in
  *     @nla: netlink attributes
+ *     @portid: netlink portID of the original message
+ *     @seq: netlink sequence number
+ *     @report: notify via unicast netlink message
  */
 struct nft_ctx {
        struct net                      *net;
-       const struct sk_buff            *skb;
-       const struct nlmsghdr           *nlh;
-       const struct nft_af_info        *afi;
-       const struct nft_table          *table;
-       const struct nft_chain          *chain;
+       struct nft_af_info              *afi;
+       struct nft_table                *table;
+       struct nft_chain                *chain;
        const struct nlattr * const     *nla;
+       u32                             portid;
+       u32                             seq;
+       bool                            report;
 };
 
 struct nft_data_desc {
@@ -145,6 +147,44 @@ struct nft_set_iter {
                              const struct nft_set_elem *elem);
 };
 
+/**
+ *     struct nft_set_desc - description of set elements
+ *
+ *     @klen: key length
+ *     @dlen: data length
+ *     @size: number of set elements
+ */
+struct nft_set_desc {
+       unsigned int            klen;
+       unsigned int            dlen;
+       unsigned int            size;
+};
+
+/**
+ *     enum nft_set_class - performance class
+ *
+ *     @NFT_LOOKUP_O_1: constant, O(1)
+ *     @NFT_LOOKUP_O_LOG_N: logarithmic, O(log N)
+ *     @NFT_LOOKUP_O_N: linear, O(N)
+ */
+enum nft_set_class {
+       NFT_SET_CLASS_O_1,
+       NFT_SET_CLASS_O_LOG_N,
+       NFT_SET_CLASS_O_N,
+};
+
+/**
+ *     struct nft_set_estimate - estimation of memory and performance
+ *                               characteristics
+ *
+ *     @size: required memory
+ *     @class: lookup performance class
+ */
+struct nft_set_estimate {
+       unsigned int            size;
+       enum nft_set_class      class;
+};
+
 /**
  *     struct nft_set_ops - nf_tables set operations
  *
@@ -174,7 +214,11 @@ struct nft_set_ops {
                                                struct nft_set_iter *iter);
 
        unsigned int                    (*privsize)(const struct nlattr * const nla[]);
+       bool                            (*estimate)(const struct nft_set_desc *desc,
+                                                   u32 features,
+                                                   struct nft_set_estimate *est);
        int                             (*init)(const struct nft_set *set,
+                                               const struct nft_set_desc *desc,
                                                const struct nlattr * const nla[]);
        void                            (*destroy)(const struct nft_set *set);
 
@@ -194,6 +238,8 @@ void nft_unregister_set(struct nft_set_ops *ops);
  *     @name: name of the set
  *     @ktype: key type (numeric type defined by userspace, not used in the kernel)
  *     @dtype: data type (verdict or numeric type defined by userspace)
+ *     @size: maximum set size
+ *     @nelems: number of elements
  *     @ops: set ops
  *     @flags: set flags
  *     @klen: key length
@@ -206,6 +252,8 @@ struct nft_set {
        char                            name[IFNAMSIZ];
        u32                             ktype;
        u32                             dtype;
+       u32                             size;
+       u32                             nelems;
        /* runtime data below here */
        const struct nft_set_ops        *ops ____cacheline_aligned;
        u16                             flags;
@@ -222,6 +270,8 @@ static inline void *nft_set_priv(const struct nft_set *set)
 
 struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
                                     const struct nlattr *nla);
+struct nft_set *nf_tables_set_lookup_byid(const struct net *net,
+                                         const struct nlattr *nla);
 
 /**
  *     struct nft_set_binding - nf_tables set binding
@@ -341,18 +391,75 @@ struct nft_rule {
 };
 
 /**
- *     struct nft_rule_trans - nf_tables rule update in transaction
+ *     struct nft_trans - nf_tables object update in transaction
  *
+ *     @rcu_head: rcu head to defer release of transaction data
  *     @list: used internally
- *     @ctx: rule context
- *     @rule: rule that needs to be updated
+ *     @msg_type: message type
+ *     @ctx: transaction context
+ *     @data: internal information related to the transaction
  */
-struct nft_rule_trans {
+struct nft_trans {
+       struct rcu_head                 rcu_head;
        struct list_head                list;
+       int                             msg_type;
        struct nft_ctx                  ctx;
+       char                            data[0];
+};
+
+struct nft_trans_rule {
        struct nft_rule                 *rule;
 };
 
+#define nft_trans_rule(trans)  \
+       (((struct nft_trans_rule *)trans->data)->rule)
+
+struct nft_trans_set {
+       struct nft_set  *set;
+       u32             set_id;
+};
+
+#define nft_trans_set(trans)   \
+       (((struct nft_trans_set *)trans->data)->set)
+#define nft_trans_set_id(trans)        \
+       (((struct nft_trans_set *)trans->data)->set_id)
+
+struct nft_trans_chain {
+       bool            update;
+       char            name[NFT_CHAIN_MAXNAMELEN];
+       struct nft_stats __percpu *stats;
+       u8              policy;
+};
+
+#define nft_trans_chain_update(trans)  \
+       (((struct nft_trans_chain *)trans->data)->update)
+#define nft_trans_chain_name(trans)    \
+       (((struct nft_trans_chain *)trans->data)->name)
+#define nft_trans_chain_stats(trans)   \
+       (((struct nft_trans_chain *)trans->data)->stats)
+#define nft_trans_chain_policy(trans)  \
+       (((struct nft_trans_chain *)trans->data)->policy)
+
+struct nft_trans_table {
+       bool            update;
+       bool            enable;
+};
+
+#define nft_trans_table_update(trans)  \
+       (((struct nft_trans_table *)trans->data)->update)
+#define nft_trans_table_enable(trans)  \
+       (((struct nft_trans_table *)trans->data)->enable)
+
+struct nft_trans_elem {
+       struct nft_set          *set;
+       struct nft_set_elem     elem;
+};
+
+#define nft_trans_elem_set(trans)      \
+       (((struct nft_trans_elem *)trans->data)->set)
+#define nft_trans_elem(trans)  \
+       (((struct nft_trans_elem *)trans->data)->elem)
+
 static inline struct nft_expr *nft_expr_first(const struct nft_rule *rule)
 {
        return (struct nft_expr *)&rule->data[0];
@@ -385,6 +492,7 @@ static inline void *nft_userdata(const struct nft_rule *rule)
 
 enum nft_chain_flags {
        NFT_BASE_CHAIN                  = 0x1,
+       NFT_CHAIN_INACTIVE              = 0x2,
 };
 
 /**
diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h
new file mode 100644 (file)
index 0000000..0ee47c3
--- /dev/null
@@ -0,0 +1,36 @@
+#ifndef _NFT_META_H_
+#define _NFT_META_H_
+
+struct nft_meta {
+       enum nft_meta_keys      key:8;
+       union {
+               enum nft_registers      dreg:8;
+               enum nft_registers      sreg:8;
+       };
+};
+
+extern const struct nla_policy nft_meta_policy[];
+
+int nft_meta_get_init(const struct nft_ctx *ctx,
+                     const struct nft_expr *expr,
+                     const struct nlattr * const tb[]);
+
+int nft_meta_set_init(const struct nft_ctx *ctx,
+                     const struct nft_expr *expr,
+                     const struct nlattr * const tb[]);
+
+int nft_meta_get_dump(struct sk_buff *skb,
+                     const struct nft_expr *expr);
+
+int nft_meta_set_dump(struct sk_buff *skb,
+                     const struct nft_expr *expr);
+
+void nft_meta_get_eval(const struct nft_expr *expr,
+                      struct nft_data data[NFT_REG_MAX + 1],
+                      const struct nft_pktinfo *pkt);
+
+void nft_meta_set_eval(const struct nft_expr *expr,
+                      struct nft_data data[NFT_REG_MAX + 1],
+                      const struct nft_pktinfo *pkt);
+
+#endif
index 80f500a29498e1fc9b8892e5c66be6bd02362eaa..aec5e12f9f19f1a6c506e47f60cc3056d7ce2a3d 100644 (file)
@@ -20,6 +20,11 @@ struct local_ports {
        int             range[2];
 };
 
+struct ping_group_range {
+       seqlock_t       lock;
+       kgid_t          range[2];
+};
+
 struct netns_ipv4 {
 #ifdef CONFIG_SYSCTL
        struct ctl_table_header *forw_hdr;
@@ -66,16 +71,23 @@ struct netns_ipv4 {
        int sysctl_icmp_ratemask;
        int sysctl_icmp_errors_use_inbound_ifaddr;
 
-       struct local_ports sysctl_local_ports;
+       struct local_ports ip_local_ports;
 
        int sysctl_tcp_ecn;
        int sysctl_ip_no_pmtu_disc;
        int sysctl_ip_fwd_use_pmtu;
 
-       kgid_t sysctl_ping_group_range[2];
+       int sysctl_fwmark_reflect;
+       int sysctl_tcp_fwmark_accept;
+
+       struct ping_group_range ping_group_range;
 
        atomic_t dev_addr_genid;
 
+#ifdef CONFIG_SYSCTL
+       unsigned long *sysctl_local_reserved_ports;
+#endif
+
 #ifdef CONFIG_IP_MROUTE
 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
        struct mr_table         *mrt;
index 21edaf1f79161535af7ae1ae3ae7535ff1a236e3..19d3446e59d2555639e9553b1958d7354792af1e 100644 (file)
@@ -30,6 +30,7 @@ struct netns_sysctl_ipv6 {
        int flowlabel_consistency;
        int icmpv6_time;
        int anycast_src_echo_reply;
+       int fwmark_reflect;
 };
 
 struct netns_ipv6 {
index 7655cfe27c3465f726dc0d1eed26040e61b6b366..bdf55c3b7a19ee1756c528ba067d79cef7e89d57 100644 (file)
@@ -36,6 +36,7 @@ enum {
        NFC_DIGITAL_RF_TECH_212F,
        NFC_DIGITAL_RF_TECH_424F,
        NFC_DIGITAL_RF_TECH_ISO15693,
+       NFC_DIGITAL_RF_TECH_106B,
 
        NFC_DIGITAL_RF_TECH_LAST,
 };
@@ -62,6 +63,9 @@ enum {
        NFC_DIGITAL_FRAMING_ISO15693_INVENTORY,
        NFC_DIGITAL_FRAMING_ISO15693_T5T,
 
+       NFC_DIGITAL_FRAMING_NFCB,
+       NFC_DIGITAL_FRAMING_NFCB_T4T,
+
        NFC_DIGITAL_FRAMING_LAST,
 };
 
index 03c4650b548ca7b01c24ba0f95317139949c7cdb..61286db54388b9d03b6a49b494d1492e7cfd8e5c 100644 (file)
@@ -27,6 +27,7 @@ struct nfc_hci_dev;
 struct nfc_hci_ops {
        int (*open) (struct nfc_hci_dev *hdev);
        void (*close) (struct nfc_hci_dev *hdev);
+       int (*load_session) (struct nfc_hci_dev *hdev);
        int (*hci_ready) (struct nfc_hci_dev *hdev);
        /*
         * xmit must always send the complete buffer before
index 2e8b40c16274f73d1ef98d6ee17fa17da172354e..6c583e244de2198d41effbf8a21086296a2c7e40 100644 (file)
@@ -264,4 +264,7 @@ int nfc_add_se(struct nfc_dev *dev, u32 se_idx, u16 type);
 int nfc_remove_se(struct nfc_dev *dev, u32 se_idx);
 struct nfc_se *nfc_find_se(struct nfc_dev *dev, u32 se_idx);
 
+void nfc_send_to_raw_sock(struct nfc_dev *dev, struct sk_buff *skb,
+                         u8 payload_type, u8 direction);
+
 #endif /* __NET_NFC_H */
index a2441fb1428f3f2e181df63319ca2b3fdc15dc4e..6da46dcf1049789f492cefd9472d0df84d4db91d 100644 (file)
@@ -136,7 +136,7 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
 
 int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
                      struct nlattr **tb, struct nlattr *rate_tlv,
-                     struct tcf_exts *exts);
+                     struct tcf_exts *exts, bool ovr);
 void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts);
 void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
                     struct tcf_exts *src);
index a7e986b081474a51da5011fc19b300d358b0b871..d6fcc1fcdb5b0928a0bd89279e11819a9cc3169d 100644 (file)
@@ -86,7 +86,6 @@ struct inet_protosw {
        struct proto     *prot;
        const struct proto_ops *ops;
   
-       char             no_check;   /* checksum on rcv/xmit/none? */
        unsigned char    flags;      /* See INET_PROTOSW_* below.  */
 };
 #define INET_PROTOSW_REUSE 0x01             /* Are ports automatically reusable? */
index 75fc1f5a948d685fcfff12e04cc6b85e194cd541..259992444e80ae0b88eaec4ff345d23fd8f81c75 100644 (file)
@@ -131,6 +131,11 @@ struct regulatory_request {
  *     all country IE information processed by the regulatory core. This will
  *     override %REGULATORY_COUNTRY_IE_FOLLOW_POWER as all country IEs will
  *     be ignored.
+ * @REGULATORY_ENABLE_RELAX_NO_IR: for devices that wish to allow the
+ *      NO_IR relaxation, which enables transmissions on channels on which
+ *      otherwise initiating radiation is not allowed. This will enable the
+ *      relaxations enabled under the CFG80211_REG_RELAX_NO_IR configuration
+ *      option
  */
 enum ieee80211_regulatory_flags {
        REGULATORY_CUSTOM_REG                   = BIT(0),
@@ -138,6 +143,7 @@ enum ieee80211_regulatory_flags {
        REGULATORY_DISABLE_BEACON_HINTS         = BIT(2),
        REGULATORY_COUNTRY_IE_FOLLOW_POWER      = BIT(3),
        REGULATORY_COUNTRY_IE_IGNORE            = BIT(4),
+       REGULATORY_ENABLE_RELAX_NO_IR           = BIT(5),
 };
 
 struct ieee80211_freq_range {
index d062f81c692f1ee3e61ba1a06bd27e3a9edb761a..624f9857c83e3d7f2987ef95ecc410ad6f8c744f 100644 (file)
@@ -199,7 +199,7 @@ struct tcf_proto_ops {
        int                     (*change)(struct net *net, struct sk_buff *,
                                        struct tcf_proto*, unsigned long,
                                        u32 handle, struct nlattr **,
-                                       unsigned long *);
+                                       unsigned long *, bool);
        int                     (*delete)(struct tcf_proto*, unsigned long);
        void                    (*walk)(struct tcf_proto*, struct tcf_walker *arg);
 
index 71596261fa997ec7014b77f0bbee9b47b6146493..f1f27fdbb0d5738d6f3f3fbb93a79d240a1129b5 100644 (file)
@@ -116,51 +116,49 @@ struct linux_xfrm_mib {
        unsigned long   mibs[LINUX_MIB_XFRMMAX];
 };
 
-#define SNMP_ARRAY_SZ 1
-
 #define DEFINE_SNMP_STAT(type, name)   \
-       __typeof__(type) __percpu *name[SNMP_ARRAY_SZ]
+       __typeof__(type) __percpu *name
 #define DEFINE_SNMP_STAT_ATOMIC(type, name)    \
        __typeof__(type) *name
 #define DECLARE_SNMP_STAT(type, name)  \
-       extern __typeof__(type) __percpu *name[SNMP_ARRAY_SZ]
+       extern __typeof__(type) __percpu *name
 
 #define SNMP_INC_STATS_BH(mib, field)  \
-                       __this_cpu_inc(mib[0]->mibs[field])
+                       __this_cpu_inc(mib->mibs[field])
 
 #define SNMP_INC_STATS_USER(mib, field)        \
-                       this_cpu_inc(mib[0]->mibs[field])
+                       this_cpu_inc(mib->mibs[field])
 
 #define SNMP_INC_STATS_ATOMIC_LONG(mib, field) \
                        atomic_long_inc(&mib->mibs[field])
 
 #define SNMP_INC_STATS(mib, field)     \
-                       this_cpu_inc(mib[0]->mibs[field])
+                       this_cpu_inc(mib->mibs[field])
 
 #define SNMP_DEC_STATS(mib, field)     \
-                       this_cpu_dec(mib[0]->mibs[field])
+                       this_cpu_dec(mib->mibs[field])
 
 #define SNMP_ADD_STATS_BH(mib, field, addend)  \
-                       __this_cpu_add(mib[0]->mibs[field], addend)
+                       __this_cpu_add(mib->mibs[field], addend)
 
 #define SNMP_ADD_STATS_USER(mib, field, addend)        \
-                       this_cpu_add(mib[0]->mibs[field], addend)
+                       this_cpu_add(mib->mibs[field], addend)
 
 #define SNMP_ADD_STATS(mib, field, addend)     \
-                       this_cpu_add(mib[0]->mibs[field], addend)
+                       this_cpu_add(mib->mibs[field], addend)
 /*
- * Use "__typeof__(*mib[0]) *ptr" instead of "__typeof__(mib[0]) ptr"
+ * Use "__typeof__(*mib) *ptr" instead of "__typeof__(mib) ptr"
  * to make @ptr a non-percpu pointer.
  */
 #define SNMP_UPD_PO_STATS(mib, basefield, addend)      \
        do { \
-               __typeof__(*mib[0]->mibs) *ptr = mib[0]->mibs;  \
+               __typeof__(*mib->mibs) *ptr = mib->mibs;        \
                this_cpu_inc(ptr[basefield##PKTS]);             \
                this_cpu_add(ptr[basefield##OCTETS], addend);   \
        } while (0)
 #define SNMP_UPD_PO_STATS_BH(mib, basefield, addend)   \
        do { \
-               __typeof__(*mib[0]->mibs) *ptr = mib[0]->mibs;  \
+               __typeof__(*mib->mibs) *ptr = mib->mibs;        \
                __this_cpu_inc(ptr[basefield##PKTS]);           \
                __this_cpu_add(ptr[basefield##OCTETS], addend); \
        } while (0)
@@ -170,7 +168,7 @@ struct linux_xfrm_mib {
 
 #define SNMP_ADD_STATS64_BH(mib, field, addend)                        \
        do {                                                            \
-               __typeof__(*mib[0]) *ptr = __this_cpu_ptr((mib)[0]);    \
+               __typeof__(*mib) *ptr = __this_cpu_ptr(mib);            \
                u64_stats_update_begin(&ptr->syncp);                    \
                ptr->mibs[field] += addend;                             \
                u64_stats_update_end(&ptr->syncp);                      \
@@ -191,8 +189,8 @@ struct linux_xfrm_mib {
 #define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1)
 #define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend)                 \
        do {                                                            \
-               __typeof__(*mib[0]) *ptr;                               \
-               ptr = __this_cpu_ptr((mib)[0]);                         \
+               __typeof__(*mib) *ptr;                                  \
+               ptr = __this_cpu_ptr(mib);                              \
                u64_stats_update_begin(&ptr->syncp);                    \
                ptr->mibs[basefield##PKTS]++;                           \
                ptr->mibs[basefield##OCTETS] += addend;                 \
index 8338a14e48053d853a57af674f7edca1e085fdc5..07b7fcd60d808a33f9e6fff208c07fe412da8c7e 100644 (file)
@@ -243,7 +243,8 @@ struct cg_proto;
   *    @sk_sndbuf: size of send buffer in bytes
   *    @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
   *               %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
-  *    @sk_no_check: %SO_NO_CHECK setting, whether or not checkup packets
+  *    @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets
+  *    @sk_no_check_rx: allow zero checksum in RX packets
   *    @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
   *    @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
   *    @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
@@ -371,7 +372,8 @@ struct sock {
        struct sk_buff_head     sk_write_queue;
        kmemcheck_bitfield_begin(flags);
        unsigned int            sk_shutdown  : 2,
-                               sk_no_check  : 2,
+                               sk_no_check_tx : 1,
+                               sk_no_check_rx : 1,
                                sk_userlocks : 4,
                                sk_protocol  : 8,
                                sk_type      : 16;
@@ -2255,6 +2257,11 @@ int sock_get_timestampns(struct sock *, struct timespec __user *);
 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
                       int type);
 
+bool sk_ns_capable(const struct sock *sk,
+                  struct user_namespace *user_ns, int cap);
+bool sk_capable(const struct sock *sk, int cap);
+bool sk_net_capable(const struct sock *sk, int cap);
+
 /*
  *     Enable debug/info messages
  */
index 163d2b467d78982be34b278411e306a49444fc31..e80abe4486cbd252eb556997978fc804d389db7e 100644 (file)
@@ -220,8 +220,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
 #define        TFO_SERVER_ENABLE       2
 #define        TFO_CLIENT_NO_COOKIE    4       /* Data in SYN w/o cookie option */
 
-/* Process SYN data but skip cookie validation */
-#define        TFO_SERVER_COOKIE_NOT_CHKED     0x100
 /* Accept SYN data w/o any cookie option */
 #define        TFO_SERVER_COOKIE_NOT_REQD      0x200
 
@@ -230,10 +228,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
  */
 #define        TFO_SERVER_WO_SOCKOPT1  0x400
 #define        TFO_SERVER_WO_SOCKOPT2  0x800
-/* Always create TFO child sockets on a TFO listener even when
- * cookie/data not present. (For testing purpose!)
- */
-#define        TFO_SERVER_ALWAYS       0x1000
 
 extern struct inet_timewait_death_row tcp_death_row;
 
@@ -796,7 +790,7 @@ struct tcp_congestion_ops {
        /* return slow start threshold (required) */
        u32 (*ssthresh)(struct sock *sk);
        /* do new cwnd calculation (required) */
-       void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked, u32 in_flight);
+       void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
        /* call before changing ca_state (optional) */
        void (*set_state)(struct sock *sk, u8 new_state);
        /* call when cwnd event occurs (optional) */
@@ -828,7 +822,7 @@ void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
 
 extern struct tcp_congestion_ops tcp_init_congestion_ops;
 u32 tcp_reno_ssthresh(struct sock *sk);
-void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight);
+void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
 extern struct tcp_congestion_ops tcp_reno;
 
 static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
@@ -974,7 +968,30 @@ static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
 {
        return tp->snd_una + tp->snd_wnd;
 }
-bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
+
+/* We follow the spirit of RFC2861 to validate cwnd but implement a more
+ * flexible approach. The RFC suggests cwnd should not be raised unless
+ * it was fully used previously. And that's exactly what we do in
+ * congestion avoidance mode. But in slow start we allow cwnd to grow
+ * as long as the application has used half the cwnd.
+ * Example :
+ *    cwnd is 10 (IW10), but application sends 9 frames.
+ *    We allow cwnd to reach 18 when all frames are ACKed.
+ * This check is safe because it's as aggressive as slow start which already
+ * risks 100% overshoot. The advantage is that we discourage application to
+ * either send more filler packets or data to artificially blow up the cwnd
+ * usage, and allow application-limited process to probe bw more aggressively.
+ */
+static inline bool tcp_is_cwnd_limited(const struct sock *sk)
+{
+       const struct tcp_sock *tp = tcp_sk(sk);
+
+       /* If in slow start, ensure cwnd grows to twice what was ACKed. */
+       if (tp->snd_cwnd <= tp->snd_ssthresh)
+               return tp->snd_cwnd < 2 * tp->max_packets_out;
+
+       return tp->is_cwnd_limited;
+}
 
 static inline void tcp_check_probe_timer(struct sock *sk)
 {
@@ -1102,6 +1119,9 @@ static inline void tcp_openreq_init(struct request_sock *req,
        ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
 }
 
+extern void tcp_openreq_init_rwin(struct request_sock *req,
+                                 struct sock *sk, struct dst_entry *dst);
+
 void tcp_enter_memory_pressure(struct sock *sk);
 
 static inline int keepalive_intvl_when(const struct tcp_sock *tp)
@@ -1311,8 +1331,10 @@ void tcp_free_fastopen_req(struct tcp_sock *tp);
 
 extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
 int tcp_fastopen_reset_cipher(void *key, unsigned int len);
-void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
-                            struct tcp_fastopen_cookie *foc);
+bool tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
+                     struct request_sock *req,
+                     struct tcp_fastopen_cookie *foc,
+                     struct dst_entry *dst);
 void tcp_fastopen_init_key_once(bool publish);
 #define TCP_FASTOPEN_KEY_LENGTH 16
 
diff --git a/include/net/tso.h b/include/net/tso.h
new file mode 100644 (file)
index 0000000..47e5444
--- /dev/null
@@ -0,0 +1,20 @@
+#ifndef _TSO_H
+#define _TSO_H
+
+#include <net/ip.h>
+
+struct tso_t {
+       int next_frag_idx;
+       void *data;
+       size_t size;
+       u16 ip_id;
+       u32 tcp_seq;
+};
+
+int tso_count_descs(struct sk_buff *skb);
+void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso,
+                  int size, bool is_last);
+void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size);
+void tso_start(struct sk_buff *skb, struct tso_t *tso);
+
+#endif /* _TSO_H */
index a24f0f3e107f67c71b256cd4fb85d04b2389fb7a..5eb86874bcd68bc5b5d1c0069db35411b0573b85 100644 (file)
@@ -95,15 +95,6 @@ static inline struct udp_hslot *udp_hashslot2(struct udp_table *table,
        return &table->hash2[hash & table->mask];
 }
 
-/* Note: this must match 'valbool' in sock_setsockopt */
-#define UDP_CSUM_NOXMIT                1
-
-/* Used by SunRPC/xprt layer. */
-#define UDP_CSUM_NORCV         2
-
-/* Default, as per the RFC, is to always do csums. */
-#define UDP_CSUM_DEFAULT       0
-
 extern struct proto udp_prot;
 
 extern atomic_long_t udp_memory_allocated;
index 5deef1ae78c964608d629d29d5628dfef52fdf6e..7bb4084b1bd0c036250e002c59ef35613db3aad7 100644 (file)
@@ -33,7 +33,7 @@ void vxlan_sock_release(struct vxlan_sock *vs);
 int vxlan_xmit_skb(struct vxlan_sock *vs,
                   struct rtable *rt, struct sk_buff *skb,
                   __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
-                  __be16 src_port, __be16 dst_port, __be32 vni);
+                  __be16 src_port, __be16 dst_port, __be32 vni, bool xnet);
 
 __be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb);
 
index 116e9c7e19cbbe00272bbf4adc6de7681b0c27ee..721e9c3b11bddb208927852d223a36f548a3cade 100644 (file)
@@ -691,13 +691,6 @@ struct xfrm_spi_skb_cb {
 
 #define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0]))
 
-/* Audit Information */
-struct xfrm_audit {
-       u32     secid;
-       kuid_t  loginuid;
-       unsigned int sessionid;
-};
-
 #ifdef CONFIG_AUDITSYSCALL
 static inline struct audit_buffer *xfrm_audit_start(const char *op)
 {
@@ -713,30 +706,24 @@ static inline struct audit_buffer *xfrm_audit_start(const char *op)
        return audit_buf;
 }
 
-static inline void xfrm_audit_helper_usrinfo(kuid_t auid, unsigned int ses, u32 secid,
+static inline void xfrm_audit_helper_usrinfo(bool task_valid,
                                             struct audit_buffer *audit_buf)
 {
-       char *secctx;
-       u32 secctx_len;
-
-       audit_log_format(audit_buf, " auid=%u ses=%u",
-                        from_kuid(&init_user_ns, auid), ses);
-       if (secid != 0 &&
-           security_secid_to_secctx(secid, &secctx, &secctx_len) == 0) {
-               audit_log_format(audit_buf, " subj=%s", secctx);
-               security_release_secctx(secctx, secctx_len);
-       } else
-               audit_log_task_context(audit_buf);
-}
-
-void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, kuid_t auid,
-                          unsigned int ses, u32 secid);
-void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, kuid_t auid,
-                             unsigned int ses, u32 secid);
-void xfrm_audit_state_add(struct xfrm_state *x, int result, kuid_t auid,
-                         unsigned int ses, u32 secid);
-void xfrm_audit_state_delete(struct xfrm_state *x, int result, kuid_t auid,
-                            unsigned int ses, u32 secid);
+       const unsigned int auid = from_kuid(&init_user_ns, task_valid ?
+                                           audit_get_loginuid(current) :
+                                           INVALID_UID);
+       const unsigned int ses = task_valid ? audit_get_sessionid(current) :
+               (unsigned int) -1;
+
+       audit_log_format(audit_buf, " auid=%u ses=%u", auid, ses);
+       audit_log_task_context(audit_buf);
+}
+
+void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid);
+void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
+                             bool task_valid);
+void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid);
+void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid);
 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
                                      struct sk_buff *skb);
 void xfrm_audit_state_replay(struct xfrm_state *x, struct sk_buff *skb,
@@ -749,22 +736,22 @@ void xfrm_audit_state_icvfail(struct xfrm_state *x, struct sk_buff *skb,
 #else
 
 static inline void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
-                                 kuid_t auid, unsigned int ses, u32 secid)
+                                        bool task_valid)
 {
 }
 
 static inline void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
-                                 kuid_t auid, unsigned int ses, u32 secid)
+                                           bool task_valid)
 {
 }
 
 static inline void xfrm_audit_state_add(struct xfrm_state *x, int result,
-                                kuid_t auid, unsigned int ses, u32 secid)
+                                       bool task_valid)
 {
 }
 
 static inline void xfrm_audit_state_delete(struct xfrm_state *x, int result,
-                                   kuid_t auid, unsigned int ses, u32 secid)
+                                          bool task_valid)
 {
 }
 
@@ -1508,7 +1495,7 @@ struct xfrmk_spdinfo {
 
 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
 int xfrm_state_delete(struct xfrm_state *x);
-int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info);
+int xfrm_state_flush(struct net *net, u8 proto, bool task_valid);
 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
 u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
@@ -1603,7 +1590,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark,
                                          int *err);
 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir,
                                     u32 id, int delete, int *err);
-int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info);
+int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
 u32 xfrm_get_acqseq(void);
 int verify_spi_info(u8 proto, u32 min, u32 max);
 int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
index 010ea89eeb0e407a85a052e6b8905dedb6ac5991..6a1a0245474feee8f32fe040e56f0044f53a20f4 100644 (file)
@@ -16,15 +16,6 @@ struct mpage_da_data;
 struct ext4_map_blocks;
 struct extent_status;
 
-/* shim until we merge in the xfs_collapse_range branch */
-#ifndef FALLOC_FL_COLLAPSE_RANGE
-#define FALLOC_FL_COLLAPSE_RANGE       0x08
-#endif
-
-#ifndef FALLOC_FL_ZERO_RANGE
-#define FALLOC_FL_ZERO_RANGE           0x10
-#endif
-
 #define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode))
 
 #define show_mballoc_flags(flags) __print_flags(flags, "|",    \
index 11fd51b413de25a6a2415c1724dee458d3314ddc..7c5cbfe3fc49d6761bccc004c9475b16fc2a3dbe 100644 (file)
@@ -25,7 +25,7 @@ struct module;
        { (1UL << TAINT_OOT_MODULE),            "O" },          \
        { (1UL << TAINT_FORCED_MODULE),         "F" },          \
        { (1UL << TAINT_CRAP),                  "C" },          \
-       { (1UL << TAINT_UNSIGNED_MODULE),       "X" })
+       { (1UL << TAINT_UNSIGNED_MODULE),       "E" })
 
 TRACE_EVENT(module_load,
 
@@ -80,7 +80,7 @@ DECLARE_EVENT_CLASS(module_refcnt,
 
        TP_fast_assign(
                __entry->ip     = ip;
-               __entry->refcnt = __this_cpu_read(mod->refptr->incs) + __this_cpu_read(mod->refptr->decs);
+               __entry->refcnt = __this_cpu_read(mod->refptr->incs) - __this_cpu_read(mod->refptr->decs);
                __assign_str(name, mod->name);
        ),
 
index a9b13f8b3595107579ca6ea20421d53a9dd15017..7543b3e51331fcb38574e3f309713b3a6a2d31c0 100644 (file)
 #endif
 
 /*
- * fd "private" POSIX locks.
+ * Open File Description Locks
  *
- * Usually POSIX locks held by a process are released on *any* close and are
+ * Usually record locks held by a process are released on *any* close and are
  * not inherited across a fork().
  *
- * These cmd values will set locks that conflict with normal POSIX locks, but
- * are "owned" by the opened file, not the process. This means that they are
- * inherited across fork() like BSD (flock) locks, and they are only released
- * automatically when the last reference to the the open file against which
- * they were acquired is put.
+ * These cmd values will set locks that conflict with process-associated
+ * record  locks, but are "owned" by the open file description, not the
+ * process. This means that they are inherited across fork() like BSD (flock)
+ * locks, and they are only released automatically when the last reference to
+ * the the open file against which they were acquired is put.
  */
-#define F_GETLKP       36
-#define F_SETLKP       37
-#define F_SETLKPW      38
+#define F_OFD_GETLK    36
+#define F_OFD_SETLK    37
+#define F_OFD_SETLKW   38
 
 #define F_OWNER_TID    0
 #define F_OWNER_PID    1
index f863428796d532a42df39e45527da2f8c6eb9c88..c6d10af50123e1c5810aff634400e8c273949221 100644 (file)
 # define RLIM_INFINITY         (~0UL)
 #endif
 
-/*
- * RLIMIT_STACK default maximum - some architectures override it:
- */
-#ifndef _STK_LIM_MAX
-# define _STK_LIM_MAX          RLIM_INFINITY
-#endif
-
 
 #endif /* _UAPI_ASM_GENERIC_RESOURCE_H */
index 6db66783d268d9a286a86b836c9277bd89f5ac13..3336406080874bf2bd06cb1f0563aea6d8e56650 100644 (file)
@@ -697,9 +697,11 @@ __SYSCALL(__NR_finit_module, sys_finit_module)
 __SYSCALL(__NR_sched_setattr, sys_sched_setattr)
 #define __NR_sched_getattr 275
 __SYSCALL(__NR_sched_getattr, sys_sched_getattr)
+#define __NR_renameat2 276
+__SYSCALL(__NR_renameat2, sys_renameat2)
 
 #undef __NR_syscalls
-#define __NR_syscalls 276
+#define __NR_syscalls 277
 
 /*
  * All syscalls below here should go away really,
index dfa4c860ccefd1af49a3fff7a0a0328db295c647..b21ea454bd33c7d3da6024657ce54eec87dd5154 100644 (file)
@@ -331,9 +331,17 @@ enum {
 #define AUDIT_FAIL_PRINTK      1
 #define AUDIT_FAIL_PANIC       2
 
+/*
+ * These bits disambiguate different calling conventions that share an
+ * ELF machine type, bitness, and endianness
+ */
+#define __AUDIT_ARCH_CONVENTION_MASK 0x30000000
+#define __AUDIT_ARCH_CONVENTION_MIPS64_N32 0x20000000
+
 /* distinguish syscall tables */
 #define __AUDIT_ARCH_64BIT 0x80000000
 #define __AUDIT_ARCH_LE           0x40000000
+
 #define AUDIT_ARCH_ALPHA       (EM_ALPHA|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_ARM         (EM_ARM|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_ARMEB       (EM_ARM)
@@ -346,7 +354,11 @@ enum {
 #define AUDIT_ARCH_MIPS                (EM_MIPS)
 #define AUDIT_ARCH_MIPSEL      (EM_MIPS|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_MIPS64      (EM_MIPS|__AUDIT_ARCH_64BIT)
+#define AUDIT_ARCH_MIPS64N32   (EM_MIPS|__AUDIT_ARCH_64BIT|\
+                                __AUDIT_ARCH_CONVENTION_MIPS64_N32)
 #define AUDIT_ARCH_MIPSEL64    (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
+#define AUDIT_ARCH_MIPSEL64N32 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE\
+                                __AUDIT_ARCH_CONVENTION_MIPS64_N32)
 #define AUDIT_ARCH_OPENRISC    (EM_OPENRISC)
 #define AUDIT_ARCH_PARISC      (EM_PARISC)
 #define AUDIT_ARCH_PARISC64    (EM_PARISC|__AUDIT_ARCH_64BIT)
index 5d9d1d1407180a9291c0f986945e3a34f2ccf51e..41892f720057df2cc23f96c7d44d6ab2808fdfdd 100644 (file)
@@ -42,8 +42,8 @@
  * DAMAGE.
  */
 
-#ifndef CAN_H
-#define CAN_H
+#ifndef _UAPI_CAN_H
+#define _UAPI_CAN_H
 
 #include <linux/types.h>
 #include <linux/socket.h>
@@ -191,4 +191,4 @@ struct can_filter {
 
 #define CAN_INV_FILTER 0x20000000U /* to be set in can_filter.can_id */
 
-#endif /* CAN_H */
+#endif /* !_UAPI_CAN_H */
index 382251a1d21403acd817577d83c21f47d0389865..89ddb9dc9bdf7ca8bd191c9dedf7019f24573931 100644 (file)
@@ -41,8 +41,8 @@
  * DAMAGE.
  */
 
-#ifndef CAN_BCM_H
-#define CAN_BCM_H
+#ifndef _UAPI_CAN_BCM_H
+#define _UAPI_CAN_BCM_H
 
 #include <linux/types.h>
 #include <linux/can.h>
@@ -95,4 +95,4 @@ enum {
 #define TX_RESET_MULTI_IDX  0x0200
 #define RX_RTR_FRAME        0x0400
 
-#endif /* CAN_BCM_H */
+#endif /* !_UAPI_CAN_BCM_H */
index b632045453202074ada263866052bc2a806e85bc..c247446ab25a4e564a068ae97e154cf7972f0a1d 100644 (file)
@@ -41,8 +41,8 @@
  * DAMAGE.
  */
 
-#ifndef CAN_ERROR_H
-#define CAN_ERROR_H
+#ifndef _UAPI_CAN_ERROR_H
+#define _UAPI_CAN_ERROR_H
 
 #define CAN_ERR_DLC 8 /* dlc for error message frames */
 
 
 /* controller specific additional information / data[5..7] */
 
-#endif /* CAN_ERROR_H */
+#endif /* _UAPI_CAN_ERROR_H */
index 844c8964bdfee3a3f4a7308bf0fd832e82754a89..3e6184cf2f6dc5b2318f87f49db09ef119182683 100644 (file)
@@ -41,8 +41,8 @@
  * DAMAGE.
  */
 
-#ifndef CAN_GW_H
-#define CAN_GW_H
+#ifndef _UAPI_CAN_GW_H
+#define _UAPI_CAN_GW_H
 
 #include <linux/types.h>
 #include <linux/can.h>
@@ -200,4 +200,4 @@ enum {
  *         Beware of sending unpacked or aligned structs!
  */
 
-#endif
+#endif /* !_UAPI_CAN_GW_H */
index 7e2e1863db16e02fa15e1edc109adefda8236ba7..813d11f549774aadf5f3d87ba28be840e7f6e399 100644 (file)
@@ -15,8 +15,8 @@
  * GNU General Public License for more details.
  */
 
-#ifndef CAN_NETLINK_H
-#define CAN_NETLINK_H
+#ifndef _UAPI_CAN_NETLINK_H
+#define _UAPI_CAN_NETLINK_H
 
 #include <linux/types.h>
 
@@ -130,4 +130,4 @@ enum {
 
 #define IFLA_CAN_MAX   (__IFLA_CAN_MAX - 1)
 
-#endif /* CAN_NETLINK_H */
+#endif /* !_UAPI_CAN_NETLINK_H */
index c7d8c334e0ce26838c7cc611bd3ad1eb5a31a6c4..78ec76fd89a6ce4fe70161576ec0c01e5d6156d3 100644 (file)
@@ -42,8 +42,8 @@
  * DAMAGE.
  */
 
-#ifndef CAN_RAW_H
-#define CAN_RAW_H
+#ifndef _UAPI_CAN_RAW_H
+#define _UAPI_CAN_RAW_H
 
 #include <linux/can.h>
 
@@ -59,4 +59,4 @@ enum {
        CAN_RAW_FD_FRAMES,      /* allow CAN FD frames (default:off) */
 };
 
-#endif
+#endif /* !_UAPI_CAN_RAW_H */
index cf4750e1bb4971d03a0e870d323aa3f53c683ba9..40b5ca8a1b1f3028e5e03c5b3372f98e39437422 100644 (file)
  *
  * 7.23
  *  - add FUSE_WRITEBACK_CACHE
+ *  - add time_gran to fuse_init_out
+ *  - add reserved space to fuse_init_out
+ *  - add FATTR_CTIME
+ *  - add ctime and ctimensec to fuse_setattr_in
+ *  - add FUSE_RENAME2 request
  */
 
 #ifndef _LINUX_FUSE_H
@@ -191,6 +196,7 @@ struct fuse_file_lock {
 #define FATTR_ATIME_NOW        (1 << 7)
 #define FATTR_MTIME_NOW        (1 << 8)
 #define FATTR_LOCKOWNER        (1 << 9)
+#define FATTR_CTIME    (1 << 10)
 
 /**
  * Flags returned by the OPEN request
@@ -348,6 +354,7 @@ enum fuse_opcode {
        FUSE_BATCH_FORGET  = 42,
        FUSE_FALLOCATE     = 43,
        FUSE_READDIRPLUS   = 44,
+       FUSE_RENAME2       = 45,
 
        /* CUSE specific operations */
        CUSE_INIT          = 4096,
@@ -426,6 +433,12 @@ struct fuse_rename_in {
        uint64_t        newdir;
 };
 
+struct fuse_rename2_in {
+       uint64_t        newdir;
+       uint32_t        flags;
+       uint32_t        padding;
+};
+
 struct fuse_link_in {
        uint64_t        oldnodeid;
 };
@@ -438,10 +451,10 @@ struct fuse_setattr_in {
        uint64_t        lock_owner;
        uint64_t        atime;
        uint64_t        mtime;
-       uint64_t        unused2;
+       uint64_t        ctime;
        uint32_t        atimensec;
        uint32_t        mtimensec;
-       uint32_t        unused3;
+       uint32_t        ctimensec;
        uint32_t        mode;
        uint32_t        unused4;
        uint32_t        uid;
@@ -559,6 +572,9 @@ struct fuse_init_in {
        uint32_t        flags;
 };
 
+#define FUSE_COMPAT_INIT_OUT_SIZE 8
+#define FUSE_COMPAT_22_INIT_OUT_SIZE 24
+
 struct fuse_init_out {
        uint32_t        major;
        uint32_t        minor;
@@ -567,6 +583,8 @@ struct fuse_init_out {
        uint16_t        max_background;
        uint16_t        congestion_threshold;
        uint32_t        max_write;
+       uint32_t        time_gran;
+       uint32_t        unused[9];
 };
 
 #define CUSE_INIT_INFO_MAX 4096
index 0d36909c3aefa27f26aaf60d1d356d462ae94d0e..1086cd9f675473b21f2c316eac16d591743af777 100644 (file)
  *  Define max and min legal sizes.  The frame sizes do not include
  *  4 byte FCS/CRC (frame check sequence).
  */
-#define FDDI_K_ALEN                    6               /* Octets in one FDDI address */
-#define FDDI_K_8022_HLEN       16              /* Total octets in 802.2 header */
-#define FDDI_K_SNAP_HLEN       21              /* Total octets in 802.2 SNAP header */
-#define FDDI_K_8022_ZLEN       16              /* Min octets in 802.2 frame sans FCS */
-#define FDDI_K_SNAP_ZLEN       21              /* Min octets in 802.2 SNAP frame sans FCS */
+#define FDDI_K_ALEN            6       /* Octets in one FDDI address */
+#define FDDI_K_8022_HLEN       16      /* Total octets in 802.2 header */
+#define FDDI_K_SNAP_HLEN       21      /* Total octets in 802.2 SNAP header */
+#define FDDI_K_8022_ZLEN       16      /* Min octets in 802.2 frame sans
+                                          FCS */
+#define FDDI_K_SNAP_ZLEN       21      /* Min octets in 802.2 SNAP frame sans
+                                          FCS */
 #define FDDI_K_8022_DLEN       4475    /* Max octets in 802.2 payload */
 #define FDDI_K_SNAP_DLEN       4470    /* Max octets in 802.2 SNAP payload */
-#define FDDI_K_LLC_ZLEN                13              /* Min octets in LLC frame sans FCS */
+#define FDDI_K_LLC_ZLEN                13      /* Min octets in LLC frame sans FCS */
 #define FDDI_K_LLC_LEN         4491    /* Max octets in LLC frame sans FCS */
+#define FDDI_K_OUI_LEN         3       /* Octets in OUI in 802.2 SNAP
+                                          header */
 
 /* Define FDDI Frame Control (FC) Byte values */
-#define FDDI_FC_K_VOID                                 0x00    
-#define FDDI_FC_K_NON_RESTRICTED_TOKEN 0x80    
-#define FDDI_FC_K_RESTRICTED_TOKEN             0xC0    
-#define FDDI_FC_K_SMT_MIN                              0x41
-#define FDDI_FC_K_SMT_MAX                              0x4F
-#define FDDI_FC_K_MAC_MIN                              0xC1
-#define FDDI_FC_K_MAC_MAX                              0xCF    
-#define FDDI_FC_K_ASYNC_LLC_MIN                        0x50
-#define FDDI_FC_K_ASYNC_LLC_DEF                        0x54
-#define FDDI_FC_K_ASYNC_LLC_MAX                        0x5F
-#define FDDI_FC_K_SYNC_LLC_MIN                 0xD0
-#define FDDI_FC_K_SYNC_LLC_MAX                 0xD7
-#define FDDI_FC_K_IMPLEMENTOR_MIN              0x60
-#define FDDI_FC_K_IMPLEMENTOR_MAX              0x6F
-#define FDDI_FC_K_RESERVED_MIN                 0x70
-#define FDDI_FC_K_RESERVED_MAX                 0x7F
+#define FDDI_FC_K_VOID                 0x00
+#define FDDI_FC_K_NON_RESTRICTED_TOKEN 0x80
+#define FDDI_FC_K_RESTRICTED_TOKEN     0xC0
+#define FDDI_FC_K_SMT_MIN              0x41
+#define FDDI_FC_K_SMT_MAX              0x4F
+#define FDDI_FC_K_MAC_MIN              0xC1
+#define FDDI_FC_K_MAC_MAX              0xCF
+#define FDDI_FC_K_ASYNC_LLC_MIN                0x50
+#define FDDI_FC_K_ASYNC_LLC_DEF                0x54
+#define FDDI_FC_K_ASYNC_LLC_MAX                0x5F
+#define FDDI_FC_K_SYNC_LLC_MIN         0xD0
+#define FDDI_FC_K_SYNC_LLC_MAX         0xD7
+#define FDDI_FC_K_IMPLEMENTOR_MIN      0x60
+#define FDDI_FC_K_IMPLEMENTOR_MAX      0x6F
+#define FDDI_FC_K_RESERVED_MIN         0x70
+#define FDDI_FC_K_RESERVED_MAX         0x7F
 
 /* Define LLC and SNAP constants */
-#define FDDI_EXTENDED_SAP      0xAA
+#define FDDI_EXTENDED_SAP              0xAA
 #define FDDI_UI_CMD                    0x03
 
 /* Define 802.2 Type 1 header */
 struct fddi_8022_1_hdr {
-       __u8    dsap;                                   /* destination service access point */
-       __u8    ssap;                                   /* source service access point */
-       __u8    ctrl;                                   /* control byte #1 */
+       __u8    dsap;                   /* destination service access point */
+       __u8    ssap;                   /* source service access point */
+       __u8    ctrl;                   /* control byte #1 */
 } __attribute__((packed));
 
 /* Define 802.2 Type 2 header */
 struct fddi_8022_2_hdr {
-       __u8    dsap;                                   /* destination service access point */
-       __u8    ssap;                                   /* source service access point */
-       __u8    ctrl_1;                                 /* control byte #1 */
-       __u8    ctrl_2;                                 /* control byte #2 */
+       __u8    dsap;                   /* destination service access point */
+       __u8    ssap;                   /* source service access point */
+       __u8    ctrl_1;                 /* control byte #1 */
+       __u8    ctrl_2;                 /* control byte #2 */
 } __attribute__((packed));
 
 /* Define 802.2 SNAP header */
-#define FDDI_K_OUI_LEN 3
 struct fddi_snap_hdr {
-       __u8    dsap;                                   /* always 0xAA */
-       __u8    ssap;                                   /* always 0xAA */
-       __u8    ctrl;                                   /* always 0x03 */
+       __u8    dsap;                   /* always 0xAA */
+       __u8    ssap;                   /* always 0xAA */
+       __u8    ctrl;                   /* always 0x03 */
        __u8    oui[FDDI_K_OUI_LEN];    /* organizational universal id */
-       __be16  ethertype;                              /* packet type ID field */
+       __be16  ethertype;              /* packet type ID field */
 } __attribute__((packed));
 
 /* Define FDDI LLC frame header */
 struct fddihdr {
-       __u8    fc;                                             /* frame control */
-       __u8    daddr[FDDI_K_ALEN];             /* destination address */
-       __u8    saddr[FDDI_K_ALEN];             /* source address */
-       union
-               {
-               struct fddi_8022_1_hdr          llc_8022_1;
-               struct fddi_8022_2_hdr          llc_8022_2;
-               struct fddi_snap_hdr            llc_snap;
-               } hdr;
+       __u8    fc;                     /* frame control */
+       __u8    daddr[FDDI_K_ALEN];     /* destination address */
+       __u8    saddr[FDDI_K_ALEN];     /* source address */
+       union {
+               struct fddi_8022_1_hdr  llc_8022_1;
+               struct fddi_8022_2_hdr  llc_8022_2;
+               struct fddi_snap_hdr    llc_snap;
+       } hdr;
 } __attribute__((packed));
 
 
index 9a7f7ace66494e144c55c08d5bbba085de3a2b7c..622e7910b8cc740c75aba15e375e3c9550736502 100644 (file)
@@ -399,9 +399,10 @@ enum {
        IFLA_VF_UNSPEC,
        IFLA_VF_MAC,            /* Hardware queue specific attributes */
        IFLA_VF_VLAN,
-       IFLA_VF_TX_RATE,        /* TX Bandwidth Allocation */
+       IFLA_VF_TX_RATE,        /* Max TX Bandwidth Allocation */
        IFLA_VF_SPOOFCHK,       /* Spoof Checking on/off switch */
        IFLA_VF_LINK_STATE,     /* link state enable/disable/auto switch */
+       IFLA_VF_RATE,           /* Min and Max TX Bandwidth Allocation */
        __IFLA_VF_MAX,
 };
 
@@ -423,6 +424,12 @@ struct ifla_vf_tx_rate {
        __u32 rate; /* Max TX bandwidth in Mbps, 0 disables throttling */
 };
 
+struct ifla_vf_rate {
+       __u32 vf;
+       __u32 min_tx_rate; /* Min Bandwidth in Mbps */
+       __u32 max_tx_rate; /* Max Bandwidth in Mbps */
+};
+
 struct ifla_vf_spoofchk {
        __u32 vf;
        __u32 setting;
index bd24470d24a2c7a3145af21094e03f67b7ce0761..f4849525519c7a301f4202f68a8eea9fb0e9d296 100644 (file)
@@ -164,6 +164,7 @@ struct input_keymap_entry {
 #define INPUT_PROP_DIRECT              0x01    /* direct input devices */
 #define INPUT_PROP_BUTTONPAD           0x02    /* has button(s) under pad */
 #define INPUT_PROP_SEMI_MT             0x03    /* touch rectangle only */
+#define INPUT_PROP_TOPBUTTONPAD                0x04    /* softbuttons at top of pad */
 
 #define INPUT_PROP_MAX                 0x1f
 #define INPUT_PROP_CNT                 (INPUT_PROP_MAX + 1)
index 8adb681603273c287f25be055c0258c8a2a73c65..21caa2631c209fdc0f0c7e21591e0507fc3e0b9a 100644 (file)
@@ -124,6 +124,8 @@ enum {
        L2TP_ATTR_STATS,                /* nested */
        L2TP_ATTR_IP6_SADDR,            /* struct in6_addr */
        L2TP_ATTR_IP6_DADDR,            /* struct in6_addr */
+       L2TP_ATTR_UDP_ZERO_CSUM6_TX,    /* u8 */
+       L2TP_ATTR_UDP_ZERO_CSUM6_RX,    /* u8 */
        __L2TP_ATTR_MAX,
 };
 
index c88ccbfda5f1b111a5fa43e1d1803bcccf95b521..2a88f645a5d821c47d7a53a05dc7a0e083a72342 100644 (file)
@@ -211,6 +211,29 @@ enum nft_set_flags {
        NFT_SET_MAP                     = 0x8,
 };
 
+/**
+ * enum nft_set_policies - set selection policy
+ *
+ * @NFT_SET_POL_PERFORMANCE: prefer high performance over low memory use
+ * @NFT_SET_POL_MEMORY: prefer low memory use over high performance
+ */
+enum nft_set_policies {
+       NFT_SET_POL_PERFORMANCE,
+       NFT_SET_POL_MEMORY,
+};
+
+/**
+ * enum nft_set_desc_attributes - set element description
+ *
+ * @NFTA_SET_DESC_SIZE: number of elements in set (NLA_U32)
+ */
+enum nft_set_desc_attributes {
+       NFTA_SET_DESC_UNSPEC,
+       NFTA_SET_DESC_SIZE,
+       __NFTA_SET_DESC_MAX
+};
+#define NFTA_SET_DESC_MAX      (__NFTA_SET_DESC_MAX - 1)
+
 /**
  * enum nft_set_attributes - nf_tables set netlink attributes
  *
@@ -221,6 +244,9 @@ enum nft_set_flags {
  * @NFTA_SET_KEY_LEN: key data length (NLA_U32)
  * @NFTA_SET_DATA_TYPE: mapping data type (NLA_U32)
  * @NFTA_SET_DATA_LEN: mapping data length (NLA_U32)
+ * @NFTA_SET_POLICY: selection policy (NLA_U32)
+ * @NFTA_SET_DESC: set description (NLA_NESTED)
+ * @NFTA_SET_ID: uniquely identifies a set in a transaction (NLA_U32)
  */
 enum nft_set_attributes {
        NFTA_SET_UNSPEC,
@@ -231,6 +257,9 @@ enum nft_set_attributes {
        NFTA_SET_KEY_LEN,
        NFTA_SET_DATA_TYPE,
        NFTA_SET_DATA_LEN,
+       NFTA_SET_POLICY,
+       NFTA_SET_DESC,
+       NFTA_SET_ID,
        __NFTA_SET_MAX
 };
 #define NFTA_SET_MAX           (__NFTA_SET_MAX - 1)
@@ -266,12 +295,14 @@ enum nft_set_elem_attributes {
  * @NFTA_SET_ELEM_LIST_TABLE: table of the set to be changed (NLA_STRING)
  * @NFTA_SET_ELEM_LIST_SET: name of the set to be changed (NLA_STRING)
  * @NFTA_SET_ELEM_LIST_ELEMENTS: list of set elements (NLA_NESTED: nft_set_elem_attributes)
+ * @NFTA_SET_ELEM_LIST_SET_ID: uniquely identifies a set in a transaction (NLA_U32)
  */
 enum nft_set_elem_list_attributes {
        NFTA_SET_ELEM_LIST_UNSPEC,
        NFTA_SET_ELEM_LIST_TABLE,
        NFTA_SET_ELEM_LIST_SET,
        NFTA_SET_ELEM_LIST_ELEMENTS,
+       NFTA_SET_ELEM_LIST_SET_ID,
        __NFTA_SET_ELEM_LIST_MAX
 };
 #define NFTA_SET_ELEM_LIST_MAX (__NFTA_SET_ELEM_LIST_MAX - 1)
@@ -457,12 +488,14 @@ enum nft_cmp_attributes {
  * @NFTA_LOOKUP_SET: name of the set where to look for (NLA_STRING)
  * @NFTA_LOOKUP_SREG: source register of the data to look for (NLA_U32: nft_registers)
  * @NFTA_LOOKUP_DREG: destination register (NLA_U32: nft_registers)
+ * @NFTA_LOOKUP_SET_ID: uniquely identifies a set in a transaction (NLA_U32)
  */
 enum nft_lookup_attributes {
        NFTA_LOOKUP_UNSPEC,
        NFTA_LOOKUP_SET,
        NFTA_LOOKUP_SREG,
        NFTA_LOOKUP_DREG,
+       NFTA_LOOKUP_SET_ID,
        __NFTA_LOOKUP_MAX
 };
 #define NFTA_LOOKUP_MAX                (__NFTA_LOOKUP_MAX - 1)
@@ -536,6 +569,8 @@ enum nft_exthdr_attributes {
  * @NFT_META_SECMARK: packet secmark (skb->secmark)
  * @NFT_META_NFPROTO: netfilter protocol
  * @NFT_META_L4PROTO: layer 4 protocol number
+ * @NFT_META_BRI_IIFNAME: packet input bridge interface name
+ * @NFT_META_BRI_OIFNAME: packet output bridge interface name
  */
 enum nft_meta_keys {
        NFT_META_LEN,
@@ -555,6 +590,8 @@ enum nft_meta_keys {
        NFT_META_SECMARK,
        NFT_META_NFPROTO,
        NFT_META_L4PROTO,
+       NFT_META_BRI_IIFNAME,
+       NFT_META_BRI_OIFNAME,
 };
 
 /**
index 9789dc95b6a8fb775a16e4fff1612101cae879c7..9b19b44619286616b04f1aa9f04d026878730744 100644 (file)
@@ -273,11 +273,19 @@ struct sockaddr_nfc_llcp {
  * First byte is the adapter index
  * Second byte contains flags
  *  - 0x01 - Direction (0=RX, 1=TX)
- *  - 0x02-0x80 - Reserved
+ *  - 0x02-0x04 - Payload type (000=LLCP, 001=NCI, 010=HCI, 011=Digital,
+ *                              100=Proprietary)
+ *  - 0x05-0x80 - Reserved
  **/
-#define NFC_LLCP_RAW_HEADER_SIZE       2
-#define NFC_LLCP_DIRECTION_RX          0x00
-#define NFC_LLCP_DIRECTION_TX          0x01
+#define NFC_RAW_HEADER_SIZE    2
+#define NFC_DIRECTION_RX               0x00
+#define NFC_DIRECTION_TX               0x01
+
+#define RAW_PAYLOAD_LLCP 0
+#define RAW_PAYLOAD_NCI        1
+#define RAW_PAYLOAD_HCI        2
+#define RAW_PAYLOAD_DIGITAL    3
+#define RAW_PAYLOAD_PROPRIETARY        4
 
 /* socket option names */
 #define NFC_LLCP_RW            0
index 1ba9d626aa833db91c462560f27054b30e91939d..be9519b52bb10edef5e5be12ddd3ff2065d706ac 100644 (file)
  *     TX status event pertaining to the TX request.
  *     %NL80211_ATTR_TX_NO_CCK_RATE is used to decide whether to send the
  *     management frames at CCK rate or not in 2GHz band.
+ *     %NL80211_ATTR_CSA_C_OFFSETS_TX is an array of offsets to CSA
+ *     counters which will be updated to the current value. This attribute
+ *     is used during CSA period.
  * @NL80211_CMD_FRAME_WAIT_CANCEL: When an off-channel TX was requested, this
  *     command may be used with the corresponding cookie to cancel the wait
  *     time if it is known that it is no longer necessary.
@@ -1525,10 +1528,10 @@ enum nl80211_commands {
  *     operation).
  * @NL80211_ATTR_CSA_IES: Nested set of attributes containing the IE information
  *     for the time while performing a channel switch.
- * @NL80211_ATTR_CSA_C_OFF_BEACON: Offset of the channel switch counter
- *     field in the beacons tail (%NL80211_ATTR_BEACON_TAIL).
- * @NL80211_ATTR_CSA_C_OFF_PRESP: Offset of the channel switch counter
- *     field in the probe response (%NL80211_ATTR_PROBE_RESP).
+ * @NL80211_ATTR_CSA_C_OFF_BEACON: An array of offsets (u16) to the channel
+ *     switch counters in the beacons tail (%NL80211_ATTR_BEACON_TAIL).
+ * @NL80211_ATTR_CSA_C_OFF_PRESP: An array of offsets (u16) to the channel
+ *     switch counters in the probe response (%NL80211_ATTR_PROBE_RESP).
  *
  * @NL80211_ATTR_RXMGMT_FLAGS: flags for nl80211_send_mgmt(), u32.
  *     As specified in the &enum nl80211_rxmgmt_flags.
@@ -1576,9 +1579,18 @@ enum nl80211_commands {
  *     advertise values that cannot always be met. In such cases, an attempt
  *     to add a new station entry with @NL80211_CMD_NEW_STATION may fail.
  *
+ * @NL80211_ATTR_CSA_C_OFFSETS_TX: An array of csa counter offsets (u16) which
+ *     should be updated when the frame is transmitted.
+ * @NL80211_ATTR_MAX_CSA_COUNTERS: U8 attribute used to advertise the maximum
+ *     supported number of csa counters.
+ *
  * @NL80211_ATTR_TDLS_PEER_CAPABILITY: flags for TDLS peer capabilities, u32.
  *     As specified in the &enum nl80211_tdls_peer_capability.
  *
+ * @NL80211_ATTR_IFACE_SOCKET_OWNER: flag attribute, if set during interface
+ *     creation then the new interface will be owned by the netlink socket
+ *     that created it and will be destroyed when the socket is closed
+ *
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
  */
@@ -1914,6 +1926,11 @@ enum nl80211_attrs {
 
        NL80211_ATTR_TDLS_PEER_CAPABILITY,
 
+       NL80211_ATTR_IFACE_SOCKET_OWNER,
+
+       NL80211_ATTR_CSA_C_OFFSETS_TX,
+       NL80211_ATTR_MAX_CSA_COUNTERS,
+
        /* add attributes here, update the policy in nl80211.c */
 
        __NL80211_ATTR_AFTER_LAST,
@@ -2182,6 +2199,8 @@ enum nl80211_sta_bss_param {
  *     Contains a nested array of signal strength attributes (u8, dBm)
  * @NL80211_STA_INFO_CHAIN_SIGNAL_AVG: per-chain signal strength average
  *     Same format as NL80211_STA_INFO_CHAIN_SIGNAL.
+ * @NL80211_STA_EXPECTED_THROUGHPUT: expected throughput considering also the
+ *     802.11 header (u32, kbps)
  * @__NL80211_STA_INFO_AFTER_LAST: internal
  * @NL80211_STA_INFO_MAX: highest possible station info attribute
  */
@@ -2213,6 +2232,7 @@ enum nl80211_sta_info {
        NL80211_STA_INFO_TX_BYTES64,
        NL80211_STA_INFO_CHAIN_SIGNAL,
        NL80211_STA_INFO_CHAIN_SIGNAL_AVG,
+       NL80211_STA_INFO_EXPECTED_THROUGHPUT,
 
        /* keep last */
        __NL80211_STA_INFO_AFTER_LAST,
@@ -2336,9 +2356,34 @@ enum nl80211_band_attr {
  *     using this channel as the primary or any of the secondary channels
  *     isn't possible
  * @NL80211_FREQUENCY_ATTR_DFS_CAC_TIME: DFS CAC time in milliseconds.
+ * @NL80211_FREQUENCY_ATTR_INDOOR_ONLY: Only indoor use is permitted on this
+ *     channel. A channel that has the INDOOR_ONLY attribute can only be
+ *     used when there is a clear assessment that the device is operating in
+ *     an indoor surroundings, i.e., it is connected to AC power (and not
+ *     through portable DC inverters) or is under the control of a master
+ *     that is acting as an AP and is connected to AC power.
+ * @NL80211_FREQUENCY_ATTR_GO_CONCURRENT: GO operation is allowed on this
+ *     channel if it's connected concurrently to a BSS on the same channel on
+ *     the 2 GHz band or to a channel in the same UNII band (on the 5 GHz
+ *     band), and IEEE80211_CHAN_RADAR is not set. Instantiating a GO on a
+ *     channel that has the GO_CONCURRENT attribute set can be done when there
+ *     is a clear assessment that the device is operating under the guidance of
+ *     an authorized master, i.e., setting up a GO while the device is also
+ *     connected to an AP with DFS and radar detection on the UNII band (it is
+ *     up to user-space, i.e., wpa_supplicant to perform the required
+ *     verifications)
+ * @NL80211_FREQUENCY_ATTR_NO_20MHZ: 20 MHz operation is not allowed
+ *     on this channel in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_NO_10MHZ: 10 MHz operation is not allowed
+ *     on this channel in current regulatory domain.
  * @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number
  *     currently defined
  * @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use
+ *
+ * See https://apps.fcc.gov/eas/comments/GetPublishedDocument.html?id=327&tn=528122
+ * for more information on the FCC description of the relaxations allowed
+ * by NL80211_FREQUENCY_ATTR_INDOOR_ONLY and
+ * NL80211_FREQUENCY_ATTR_GO_CONCURRENT.
  */
 enum nl80211_frequency_attr {
        __NL80211_FREQUENCY_ATTR_INVALID,
@@ -2355,6 +2400,10 @@ enum nl80211_frequency_attr {
        NL80211_FREQUENCY_ATTR_NO_80MHZ,
        NL80211_FREQUENCY_ATTR_NO_160MHZ,
        NL80211_FREQUENCY_ATTR_DFS_CAC_TIME,
+       NL80211_FREQUENCY_ATTR_INDOOR_ONLY,
+       NL80211_FREQUENCY_ATTR_GO_CONCURRENT,
+       NL80211_FREQUENCY_ATTR_NO_20MHZ,
+       NL80211_FREQUENCY_ATTR_NO_10MHZ,
 
        /* keep last */
        __NL80211_FREQUENCY_ATTR_AFTER_LAST,
@@ -2573,10 +2622,13 @@ enum nl80211_dfs_regions {
  *     present has been registered with the wireless core that
  *     has listed NL80211_FEATURE_CELL_BASE_REG_HINTS as a
  *     supported feature.
+ * @NL80211_USER_REG_HINT_INDOOR: a user sent an hint indicating that the
+ *     platform is operating in an indoor environment.
  */
 enum nl80211_user_reg_hint_type {
        NL80211_USER_REG_HINT_USER      = 0,
        NL80211_USER_REG_HINT_CELL_BASE = 1,
+       NL80211_USER_REG_HINT_INDOOR    = 2,
 };
 
 /**
@@ -3650,6 +3702,8 @@ enum nl80211_iface_limit_attrs {
  *     different channels may be used within this group.
  * @NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS: u32 attribute containing the bitmap
  *     of supported channel widths for radar detection.
+ * @NL80211_IFACE_COMB_RADAR_DETECT_REGIONS: u32 attribute containing the bitmap
+ *     of supported regulatory regions for radar detection.
  * @NUM_NL80211_IFACE_COMB: number of attributes
  * @MAX_NL80211_IFACE_COMB: highest attribute number
  *
@@ -3683,6 +3737,7 @@ enum nl80211_if_combination_attrs {
        NL80211_IFACE_COMB_STA_AP_BI_MATCH,
        NL80211_IFACE_COMB_NUM_CHANNELS,
        NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS,
+       NL80211_IFACE_COMB_RADAR_DETECT_REGIONS,
 
        /* keep last */
        NUM_NL80211_IFACE_COMB,
@@ -3856,6 +3911,8 @@ enum nl80211_ap_sme_features {
  * @NL80211_FEATURE_CELL_BASE_REG_HINTS: This driver has been tested
  *     to work properly to suppport receiving regulatory hints from
  *     cellular base stations.
+ * @NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL: (no longer available, only
+ *     here to reserve the value for API/ABI compatibility)
  * @NL80211_FEATURE_SAE: This driver supports simultaneous authentication of
  *     equals (SAE) with user space SME (NL80211_CMD_AUTHENTICATE) in station
  *     mode
@@ -3891,13 +3948,16 @@ enum nl80211_ap_sme_features {
  *     interface. An active monitor interface behaves like a normal monitor
  *     interface, but gets added to the driver. It ensures that incoming
  *     unicast packets directed at the configured interface address get ACKed.
+ * @NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE: This driver supports dynamic
+ *     channel bandwidth change (e.g., HT 20 <-> 40 MHz channel) during the
+ *     lifetime of a BSS.
  */
 enum nl80211_feature_flags {
        NL80211_FEATURE_SK_TX_STATUS                    = 1 << 0,
        NL80211_FEATURE_HT_IBSS                         = 1 << 1,
        NL80211_FEATURE_INACTIVITY_TIMER                = 1 << 2,
        NL80211_FEATURE_CELL_BASE_REG_HINTS             = 1 << 3,
-       /* bit 4 is reserved - don't use */
+       NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL        = 1 << 4,
        NL80211_FEATURE_SAE                             = 1 << 5,
        NL80211_FEATURE_LOW_PRIORITY_SCAN               = 1 << 6,
        NL80211_FEATURE_SCAN_FLUSH                      = 1 << 7,
@@ -3911,6 +3971,7 @@ enum nl80211_feature_flags {
        NL80211_FEATURE_FULL_AP_CLIENT_STATE            = 1 << 15,
        NL80211_FEATURE_USERSPACE_MPM                   = 1 << 16,
        NL80211_FEATURE_ACTIVE_MONITOR                  = 1 << 17,
+       NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE       = 1 << 18,
 };
 
 /**
index 970553cbbc8e9d69dca1167aedfbfee20947e257..0b979ee4bfc0dea7e7d3f2f743708c997b2138cb 100644 (file)
@@ -395,7 +395,9 @@ struct ovs_key_nd {
  * @OVS_FLOW_ATTR_ACTIONS: Nested %OVS_ACTION_ATTR_* attributes specifying
  * the actions to take for packets that match the key.  Always present in
  * notifications.  Required for %OVS_FLOW_CMD_NEW requests, optional for
- * %OVS_FLOW_CMD_SET requests.
+ * %OVS_FLOW_CMD_SET requests.  An %OVS_FLOW_CMD_SET without
+ * %OVS_FLOW_ATTR_ACTIONS will not modify the actions.  To clear the actions,
+ * an %OVS_FLOW_ATTR_ACTIONS without any nested attributes must be given.
  * @OVS_FLOW_ATTR_STATS: &struct ovs_flow_stats giving statistics for this
  * flow.  Present in notifications if the stats would be nonzero.  Ignored in
  * requests.
index 852373d27dbb2bdd2016bf6a9d2ba00cd68e2954..6f71b9b4159581eac01241f9c443948584f30f6e 100644 (file)
@@ -38,6 +38,7 @@
 #define _LINUX_TIPC_H_
 
 #include <linux/types.h>
+#include <linux/sockios.h>
 
 /*
  * TIPC addressing primitives
@@ -87,6 +88,7 @@ static inline unsigned int tipc_node(__u32 addr)
 
 #define TIPC_CFG_SRV           0       /* configuration service name type */
 #define TIPC_TOP_SRV           1       /* topology service name type */
+#define TIPC_LINK_STATE                2       /* link state name type */
 #define TIPC_RESERVED_TYPES    64      /* lowest user-publishable name type */
 
 /*
@@ -206,4 +208,25 @@ struct sockaddr_tipc {
 #define TIPC_NODE_RECVQ_DEPTH  131     /* Default: none (read only) */
 #define TIPC_SOCK_RECVQ_DEPTH  132     /* Default: none (read only) */
 
+/*
+ * Maximum sizes of TIPC bearer-related names (including terminating NULL)
+ * The string formatting for each name element is:
+ * media: media
+ * interface: media:interface name
+ * link: Z.C.N:interface-Z.C.N:interface
+ *
+ */
+
+#define TIPC_MAX_MEDIA_NAME    16
+#define TIPC_MAX_IF_NAME       16
+#define TIPC_MAX_BEARER_NAME   32
+#define TIPC_MAX_LINK_NAME     60
+
+#define SIOCGETLINKNAME                SIOCPROTOPRIVATE
+
+struct tipc_sioc_ln_req {
+       __u32 peer;
+       __u32 bearer_id;
+       char linkname[TIPC_MAX_LINK_NAME];
+};
 #endif
index 6b0bff09b3a7ced5dc7cf2c1a07dd4f82112b088..41a76acbb305f85cb4cb0ec6dfab9cab1e20e1d4 100644 (file)
@@ -39,6 +39,7 @@
 
 #include <linux/types.h>
 #include <linux/string.h>
+#include <linux/tipc.h>
 #include <asm/byteorder.h>
 
 #ifndef __KERNEL__
 #define TIPC_TLV_NAME_TBL_QUERY        25      /* struct tipc_name_table_query */
 #define TIPC_TLV_PORT_REF      26      /* 32-bit port reference */
 
-/*
- * Maximum sizes of TIPC bearer-related names (including terminating NUL)
- */
-
-#define TIPC_MAX_MEDIA_NAME    16      /* format = media */
-#define TIPC_MAX_IF_NAME       16      /* format = interface */
-#define TIPC_MAX_BEARER_NAME   32      /* format = media:interface */
-#define TIPC_MAX_LINK_NAME     60      /* format = Z.C.N:interface-Z.C.N:interface */
-
 /*
  * Link priority limits (min, default, max, media default)
  */
index e2bcfd75a30d38d37475cef6e0382e1081d5a3b0..16574ea18f0cf62d743287779ffaed6205d120d0 100644 (file)
@@ -29,6 +29,8 @@ struct udphdr {
 /* UDP socket options */
 #define UDP_CORK       1       /* Never send partially complete segments */
 #define UDP_ENCAP      100     /* Set the socket to accept encapsulated packets */
+#define UDP_NO_CHECK6_TX 101   /* Disable sending checksum for UDP6X */
+#define UDP_NO_CHECK6_RX 102   /* Disable accpeting checksum for UDP6 */
 
 /* UDP encapsulation types */
 #define UDP_ENCAP_ESPINUDP_NON_IKE     1 /* draft-ietf-ipsec-nat-t-ike-00/01 */
index 9c7fd4c9249f2c72395fcaf2ac953f782a3e2b59..48655ceb66f45cdf27cf8bf769d9de296175a21c 100644 (file)
@@ -476,7 +476,7 @@ static void __init mm_init(void)
        vmalloc_init();
 }
 
-asmlinkage void __init start_kernel(void)
+asmlinkage __visible void __init start_kernel(void)
 {
        char * command_line;
        extern const struct kernel_param __start___param[], __stop___param[];
index 33531d72e4a21014a68a98821bbef232edb9a2fb..81f5f49479da7c64625007a0eb086f720959f728 100644 (file)
@@ -675,13 +675,13 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
                if ((task_active_pid_ns(current) != &init_pid_ns))
                        return -EPERM;
 
-               if (!capable(CAP_AUDIT_CONTROL))
+               if (!netlink_capable(skb, CAP_AUDIT_CONTROL))
                        err = -EPERM;
                break;
        case AUDIT_USER:
        case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
        case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
-               if (!capable(CAP_AUDIT_WRITE))
+               if (!netlink_capable(skb, CAP_AUDIT_WRITE))
                        err = -EPERM;
                break;
        default:  /* bad msg */
index 9fcdaa705b6cb7442b4babcb3a9ab40564afa27b..3f1ca934a2378495e5129dbe807bfc7111f53e8b 100644 (file)
@@ -348,7 +348,7 @@ struct cgrp_cset_link {
  * reference-counted, to improve performance when child cgroups
  * haven't been created.
  */
-static struct css_set init_css_set = {
+struct css_set init_css_set = {
        .refcount               = ATOMIC_INIT(1),
        .cgrp_links             = LIST_HEAD_INIT(init_css_set.cgrp_links),
        .tasks                  = LIST_HEAD_INIT(init_css_set.tasks),
@@ -1495,7 +1495,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
         */
        if (!use_task_css_set_links)
                cgroup_enable_task_cg_lists();
-retry:
+
        mutex_lock(&cgroup_tree_mutex);
        mutex_lock(&cgroup_mutex);
 
@@ -1503,7 +1503,7 @@ retry:
        ret = parse_cgroupfs_options(data, &opts);
        if (ret)
                goto out_unlock;
-
+retry:
        /* look for a matching existing root */
        if (!opts.subsys_mask && !opts.none && !opts.name) {
                cgrp_dfl_root_visible = true;
@@ -1562,9 +1562,9 @@ retry:
                if (!atomic_inc_not_zero(&root->cgrp.refcnt)) {
                        mutex_unlock(&cgroup_mutex);
                        mutex_unlock(&cgroup_tree_mutex);
-                       kfree(opts.release_agent);
-                       kfree(opts.name);
                        msleep(10);
+                       mutex_lock(&cgroup_tree_mutex);
+                       mutex_lock(&cgroup_mutex);
                        goto retry;
                }
 
index 2bc4a2256444ebf500269c5dc276871e48e0efa8..345628c78b5b3779460038ec6f036f9e8b7c1a32 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/uaccess.h>
 #include <linux/freezer.h>
 #include <linux/seq_file.h>
+#include <linux/mutex.h>
 
 /*
  * A cgroup is freezing if any FREEZING flags are set.  FREEZING_SELF is
@@ -42,9 +43,10 @@ enum freezer_state_flags {
 struct freezer {
        struct cgroup_subsys_state      css;
        unsigned int                    state;
-       spinlock_t                      lock;
 };
 
+static DEFINE_MUTEX(freezer_mutex);
+
 static inline struct freezer *css_freezer(struct cgroup_subsys_state *css)
 {
        return css ? container_of(css, struct freezer, css) : NULL;
@@ -93,7 +95,6 @@ freezer_css_alloc(struct cgroup_subsys_state *parent_css)
        if (!freezer)
                return ERR_PTR(-ENOMEM);
 
-       spin_lock_init(&freezer->lock);
        return &freezer->css;
 }
 
@@ -110,14 +111,7 @@ static int freezer_css_online(struct cgroup_subsys_state *css)
        struct freezer *freezer = css_freezer(css);
        struct freezer *parent = parent_freezer(freezer);
 
-       /*
-        * The following double locking and freezing state inheritance
-        * guarantee that @cgroup can never escape ancestors' freezing
-        * states.  See css_for_each_descendant_pre() for details.
-        */
-       if (parent)
-               spin_lock_irq(&parent->lock);
-       spin_lock_nested(&freezer->lock, SINGLE_DEPTH_NESTING);
+       mutex_lock(&freezer_mutex);
 
        freezer->state |= CGROUP_FREEZER_ONLINE;
 
@@ -126,10 +120,7 @@ static int freezer_css_online(struct cgroup_subsys_state *css)
                atomic_inc(&system_freezing_cnt);
        }
 
-       spin_unlock(&freezer->lock);
-       if (parent)
-               spin_unlock_irq(&parent->lock);
-
+       mutex_unlock(&freezer_mutex);
        return 0;
 }
 
@@ -144,14 +135,14 @@ static void freezer_css_offline(struct cgroup_subsys_state *css)
 {
        struct freezer *freezer = css_freezer(css);
 
-       spin_lock_irq(&freezer->lock);
+       mutex_lock(&freezer_mutex);
 
        if (freezer->state & CGROUP_FREEZING)
                atomic_dec(&system_freezing_cnt);
 
        freezer->state = 0;
 
-       spin_unlock_irq(&freezer->lock);
+       mutex_unlock(&freezer_mutex);
 }
 
 static void freezer_css_free(struct cgroup_subsys_state *css)
@@ -175,7 +166,7 @@ static void freezer_attach(struct cgroup_subsys_state *new_css,
        struct task_struct *task;
        bool clear_frozen = false;
 
-       spin_lock_irq(&freezer->lock);
+       mutex_lock(&freezer_mutex);
 
        /*
         * Make the new tasks conform to the current state of @new_css.
@@ -197,21 +188,13 @@ static void freezer_attach(struct cgroup_subsys_state *new_css,
                }
        }
 
-       spin_unlock_irq(&freezer->lock);
-
-       /*
-        * Propagate FROZEN clearing upwards.  We may race with
-        * update_if_frozen(), but as long as both work bottom-up, either
-        * update_if_frozen() sees child's FROZEN cleared or we clear the
-        * parent's FROZEN later.  No parent w/ !FROZEN children can be
-        * left FROZEN.
-        */
+       /* propagate FROZEN clearing upwards */
        while (clear_frozen && (freezer = parent_freezer(freezer))) {
-               spin_lock_irq(&freezer->lock);
                freezer->state &= ~CGROUP_FROZEN;
                clear_frozen = freezer->state & CGROUP_FREEZING;
-               spin_unlock_irq(&freezer->lock);
        }
+
+       mutex_unlock(&freezer_mutex);
 }
 
 /**
@@ -228,9 +211,6 @@ static void freezer_fork(struct task_struct *task)
 {
        struct freezer *freezer;
 
-       rcu_read_lock();
-       freezer = task_freezer(task);
-
        /*
         * The root cgroup is non-freezable, so we can skip locking the
         * freezer.  This is safe regardless of race with task migration.
@@ -238,24 +218,18 @@ static void freezer_fork(struct task_struct *task)
         * to do.  If we lost and root is the new cgroup, noop is still the
         * right thing to do.
         */
-       if (!parent_freezer(freezer))
-               goto out;
+       if (task_css_is_root(task, freezer_cgrp_id))
+               return;
 
-       /*
-        * Grab @freezer->lock and freeze @task after verifying @task still
-        * belongs to @freezer and it's freezing.  The former is for the
-        * case where we have raced against task migration and lost and
-        * @task is already in a different cgroup which may not be frozen.
-        * This isn't strictly necessary as freeze_task() is allowed to be
-        * called spuriously but let's do it anyway for, if nothing else,
-        * documentation.
-        */
-       spin_lock_irq(&freezer->lock);
-       if (freezer == task_freezer(task) && (freezer->state & CGROUP_FREEZING))
+       mutex_lock(&freezer_mutex);
+       rcu_read_lock();
+
+       freezer = task_freezer(task);
+       if (freezer->state & CGROUP_FREEZING)
                freeze_task(task);
-       spin_unlock_irq(&freezer->lock);
-out:
+
        rcu_read_unlock();
+       mutex_unlock(&freezer_mutex);
 }
 
 /**
@@ -281,22 +255,24 @@ static void update_if_frozen(struct cgroup_subsys_state *css)
        struct css_task_iter it;
        struct task_struct *task;
 
-       WARN_ON_ONCE(!rcu_read_lock_held());
-
-       spin_lock_irq(&freezer->lock);
+       lockdep_assert_held(&freezer_mutex);
 
        if (!(freezer->state & CGROUP_FREEZING) ||
            (freezer->state & CGROUP_FROZEN))
-               goto out_unlock;
+               return;
 
        /* are all (live) children frozen? */
+       rcu_read_lock();
        css_for_each_child(pos, css) {
                struct freezer *child = css_freezer(pos);
 
                if ((child->state & CGROUP_FREEZER_ONLINE) &&
-                   !(child->state & CGROUP_FROZEN))
-                       goto out_unlock;
+                   !(child->state & CGROUP_FROZEN)) {
+                       rcu_read_unlock();
+                       return;
+               }
        }
+       rcu_read_unlock();
 
        /* are all tasks frozen? */
        css_task_iter_start(css, &it);
@@ -317,21 +293,29 @@ static void update_if_frozen(struct cgroup_subsys_state *css)
        freezer->state |= CGROUP_FROZEN;
 out_iter_end:
        css_task_iter_end(&it);
-out_unlock:
-       spin_unlock_irq(&freezer->lock);
 }
 
 static int freezer_read(struct seq_file *m, void *v)
 {
        struct cgroup_subsys_state *css = seq_css(m), *pos;
 
+       mutex_lock(&freezer_mutex);
        rcu_read_lock();
 
        /* update states bottom-up */
-       css_for_each_descendant_post(pos, css)
+       css_for_each_descendant_post(pos, css) {
+               if (!css_tryget(pos))
+                       continue;
+               rcu_read_unlock();
+
                update_if_frozen(pos);
 
+               rcu_read_lock();
+               css_put(pos);
+       }
+
        rcu_read_unlock();
+       mutex_unlock(&freezer_mutex);
 
        seq_puts(m, freezer_state_strs(css_freezer(css)->state));
        seq_putc(m, '\n');
@@ -373,7 +357,7 @@ static void freezer_apply_state(struct freezer *freezer, bool freeze,
                                unsigned int state)
 {
        /* also synchronizes against task migration, see freezer_attach() */
-       lockdep_assert_held(&freezer->lock);
+       lockdep_assert_held(&freezer_mutex);
 
        if (!(freezer->state & CGROUP_FREEZER_ONLINE))
                return;
@@ -414,31 +398,29 @@ static void freezer_change_state(struct freezer *freezer, bool freeze)
         * descendant will try to inherit its parent's FREEZING state as
         * CGROUP_FREEZING_PARENT.
         */
+       mutex_lock(&freezer_mutex);
        rcu_read_lock();
        css_for_each_descendant_pre(pos, &freezer->css) {
                struct freezer *pos_f = css_freezer(pos);
                struct freezer *parent = parent_freezer(pos_f);
 
-               spin_lock_irq(&pos_f->lock);
+               if (!css_tryget(pos))
+                       continue;
+               rcu_read_unlock();
 
-               if (pos_f == freezer) {
+               if (pos_f == freezer)
                        freezer_apply_state(pos_f, freeze,
                                            CGROUP_FREEZING_SELF);
-               } else {
-                       /*
-                        * Our update to @parent->state is already visible
-                        * which is all we need.  No need to lock @parent.
-                        * For more info on synchronization, see
-                        * freezer_post_create().
-                        */
+               else
                        freezer_apply_state(pos_f,
                                            parent->state & CGROUP_FREEZING,
                                            CGROUP_FREEZING_PARENT);
-               }
 
-               spin_unlock_irq(&pos_f->lock);
+               rcu_read_lock();
+               css_put(pos);
        }
        rcu_read_unlock();
+       mutex_unlock(&freezer_mutex);
 }
 
 static int freezer_write(struct cgroup_subsys_state *css, struct cftype *cft,
index 6cb20d2e7ee0d28b0d6399a8cd1f3e23f9366899..019d45008448cc54160fd25edd5bbde1cc279e50 100644 (file)
@@ -120,7 +120,7 @@ void context_tracking_user_enter(void)
  * instead of preempt_schedule() to exit user context if needed before
  * calling the scheduler.
  */
-asmlinkage void __sched notrace preempt_schedule_context(void)
+asmlinkage __visible void __sched notrace preempt_schedule_context(void)
 {
        enum ctx_state prev_ctx;
 
index f83a71a3e46d75547e540ed317f99531838f408b..440eefc67397e48f15b58bc8cf31712bec91286b 100644 (file)
@@ -1443,6 +1443,11 @@ group_sched_out(struct perf_event *group_event,
                cpuctx->exclusive = 0;
 }
 
+struct remove_event {
+       struct perf_event *event;
+       bool detach_group;
+};
+
 /*
  * Cross CPU call to remove a performance event
  *
@@ -1451,12 +1456,15 @@ group_sched_out(struct perf_event *group_event,
  */
 static int __perf_remove_from_context(void *info)
 {
-       struct perf_event *event = info;
+       struct remove_event *re = info;
+       struct perf_event *event = re->event;
        struct perf_event_context *ctx = event->ctx;
        struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
 
        raw_spin_lock(&ctx->lock);
        event_sched_out(event, cpuctx, ctx);
+       if (re->detach_group)
+               perf_group_detach(event);
        list_del_event(event, ctx);
        if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
                ctx->is_active = 0;
@@ -1481,10 +1489,14 @@ static int __perf_remove_from_context(void *info)
  * When called from perf_event_exit_task, it's OK because the
  * context has been detached from its task.
  */
-static void perf_remove_from_context(struct perf_event *event)
+static void perf_remove_from_context(struct perf_event *event, bool detach_group)
 {
        struct perf_event_context *ctx = event->ctx;
        struct task_struct *task = ctx->task;
+       struct remove_event re = {
+               .event = event,
+               .detach_group = detach_group,
+       };
 
        lockdep_assert_held(&ctx->mutex);
 
@@ -1493,12 +1505,12 @@ static void perf_remove_from_context(struct perf_event *event)
                 * Per cpu events are removed via an smp call and
                 * the removal is always successful.
                 */
-               cpu_function_call(event->cpu, __perf_remove_from_context, event);
+               cpu_function_call(event->cpu, __perf_remove_from_context, &re);
                return;
        }
 
 retry:
-       if (!task_function_call(task, __perf_remove_from_context, event))
+       if (!task_function_call(task, __perf_remove_from_context, &re))
                return;
 
        raw_spin_lock_irq(&ctx->lock);
@@ -1515,6 +1527,8 @@ retry:
         * Since the task isn't running, its safe to remove the event, us
         * holding the ctx->lock ensures the task won't get scheduled in.
         */
+       if (detach_group)
+               perf_group_detach(event);
        list_del_event(event, ctx);
        raw_spin_unlock_irq(&ctx->lock);
 }
@@ -3178,7 +3192,8 @@ static void free_event_rcu(struct rcu_head *head)
 }
 
 static void ring_buffer_put(struct ring_buffer *rb);
-static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
+static void ring_buffer_attach(struct perf_event *event,
+                              struct ring_buffer *rb);
 
 static void unaccount_event_cpu(struct perf_event *event, int cpu)
 {
@@ -3238,8 +3253,6 @@ static void free_event(struct perf_event *event)
        unaccount_event(event);
 
        if (event->rb) {
-               struct ring_buffer *rb;
-
                /*
                 * Can happen when we close an event with re-directed output.
                 *
@@ -3247,12 +3260,7 @@ static void free_event(struct perf_event *event)
                 * over us; possibly making our ring_buffer_put() the last.
                 */
                mutex_lock(&event->mmap_mutex);
-               rb = event->rb;
-               if (rb) {
-                       rcu_assign_pointer(event->rb, NULL);
-                       ring_buffer_detach(event, rb);
-                       ring_buffer_put(rb); /* could be last */
-               }
+               ring_buffer_attach(event, NULL);
                mutex_unlock(&event->mmap_mutex);
        }
 
@@ -3281,10 +3289,7 @@ int perf_event_release_kernel(struct perf_event *event)
         *     to trigger the AB-BA case.
         */
        mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
-       raw_spin_lock_irq(&ctx->lock);
-       perf_group_detach(event);
-       raw_spin_unlock_irq(&ctx->lock);
-       perf_remove_from_context(event);
+       perf_remove_from_context(event, true);
        mutex_unlock(&ctx->mutex);
 
        free_event(event);
@@ -3839,28 +3844,47 @@ unlock:
 static void ring_buffer_attach(struct perf_event *event,
                               struct ring_buffer *rb)
 {
+       struct ring_buffer *old_rb = NULL;
        unsigned long flags;
 
-       if (!list_empty(&event->rb_entry))
-               return;
+       if (event->rb) {
+               /*
+                * Should be impossible, we set this when removing
+                * event->rb_entry and wait/clear when adding event->rb_entry.
+                */
+               WARN_ON_ONCE(event->rcu_pending);
 
-       spin_lock_irqsave(&rb->event_lock, flags);
-       if (list_empty(&event->rb_entry))
-               list_add(&event->rb_entry, &rb->event_list);
-       spin_unlock_irqrestore(&rb->event_lock, flags);
-}
+               old_rb = event->rb;
+               event->rcu_batches = get_state_synchronize_rcu();
+               event->rcu_pending = 1;
 
-static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb)
-{
-       unsigned long flags;
+               spin_lock_irqsave(&old_rb->event_lock, flags);
+               list_del_rcu(&event->rb_entry);
+               spin_unlock_irqrestore(&old_rb->event_lock, flags);
+       }
 
-       if (list_empty(&event->rb_entry))
-               return;
+       if (event->rcu_pending && rb) {
+               cond_synchronize_rcu(event->rcu_batches);
+               event->rcu_pending = 0;
+       }
+
+       if (rb) {
+               spin_lock_irqsave(&rb->event_lock, flags);
+               list_add_rcu(&event->rb_entry, &rb->event_list);
+               spin_unlock_irqrestore(&rb->event_lock, flags);
+       }
+
+       rcu_assign_pointer(event->rb, rb);
 
-       spin_lock_irqsave(&rb->event_lock, flags);
-       list_del_init(&event->rb_entry);
-       wake_up_all(&event->waitq);
-       spin_unlock_irqrestore(&rb->event_lock, flags);
+       if (old_rb) {
+               ring_buffer_put(old_rb);
+               /*
+                * Since we detached before setting the new rb, so that we
+                * could attach the new rb, we could have missed a wakeup.
+                * Provide it now.
+                */
+               wake_up_all(&event->waitq);
+       }
 }
 
 static void ring_buffer_wakeup(struct perf_event *event)
@@ -3929,7 +3953,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
 {
        struct perf_event *event = vma->vm_file->private_data;
 
-       struct ring_buffer *rb = event->rb;
+       struct ring_buffer *rb = ring_buffer_get(event);
        struct user_struct *mmap_user = rb->mmap_user;
        int mmap_locked = rb->mmap_locked;
        unsigned long size = perf_data_size(rb);
@@ -3937,18 +3961,14 @@ static void perf_mmap_close(struct vm_area_struct *vma)
        atomic_dec(&rb->mmap_count);
 
        if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
-               return;
+               goto out_put;
 
-       /* Detach current event from the buffer. */
-       rcu_assign_pointer(event->rb, NULL);
-       ring_buffer_detach(event, rb);
+       ring_buffer_attach(event, NULL);
        mutex_unlock(&event->mmap_mutex);
 
        /* If there's still other mmap()s of this buffer, we're done. */
-       if (atomic_read(&rb->mmap_count)) {
-               ring_buffer_put(rb); /* can't be last */
-               return;
-       }
+       if (atomic_read(&rb->mmap_count))
+               goto out_put;
 
        /*
         * No other mmap()s, detach from all other events that might redirect
@@ -3978,11 +3998,9 @@ again:
                 * still restart the iteration to make sure we're not now
                 * iterating the wrong list.
                 */
-               if (event->rb == rb) {
-                       rcu_assign_pointer(event->rb, NULL);
-                       ring_buffer_detach(event, rb);
-                       ring_buffer_put(rb); /* can't be last, we still have one */
-               }
+               if (event->rb == rb)
+                       ring_buffer_attach(event, NULL);
+
                mutex_unlock(&event->mmap_mutex);
                put_event(event);
 
@@ -4007,6 +4025,7 @@ again:
        vma->vm_mm->pinned_vm -= mmap_locked;
        free_uid(mmap_user);
 
+out_put:
        ring_buffer_put(rb); /* could be last */
 }
 
@@ -4124,7 +4143,6 @@ again:
        vma->vm_mm->pinned_vm += extra;
 
        ring_buffer_attach(event, rb);
-       rcu_assign_pointer(event->rb, rb);
 
        perf_event_init_userpage(event);
        perf_event_update_userpage(event);
@@ -5408,6 +5426,9 @@ struct swevent_htable {
 
        /* Recursion avoidance in each contexts */
        int                             recursion[PERF_NR_CONTEXTS];
+
+       /* Keeps track of cpu being initialized/exited */
+       bool                            online;
 };
 
 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
@@ -5654,8 +5675,14 @@ static int perf_swevent_add(struct perf_event *event, int flags)
        hwc->state = !(flags & PERF_EF_START);
 
        head = find_swevent_head(swhash, event);
-       if (WARN_ON_ONCE(!head))
+       if (!head) {
+               /*
+                * We can race with cpu hotplug code. Do not
+                * WARN if the cpu just got unplugged.
+                */
+               WARN_ON_ONCE(swhash->online);
                return -EINVAL;
+       }
 
        hlist_add_head_rcu(&event->hlist_entry, head);
 
@@ -6914,7 +6941,7 @@ err_size:
 static int
 perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
 {
-       struct ring_buffer *rb = NULL, *old_rb = NULL;
+       struct ring_buffer *rb = NULL;
        int ret = -EINVAL;
 
        if (!output_event)
@@ -6942,8 +6969,6 @@ set:
        if (atomic_read(&event->mmap_count))
                goto unlock;
 
-       old_rb = event->rb;
-
        if (output_event) {
                /* get the rb we want to redirect to */
                rb = ring_buffer_get(output_event);
@@ -6951,23 +6976,7 @@ set:
                        goto unlock;
        }
 
-       if (old_rb)
-               ring_buffer_detach(event, old_rb);
-
-       if (rb)
-               ring_buffer_attach(event, rb);
-
-       rcu_assign_pointer(event->rb, rb);
-
-       if (old_rb) {
-               ring_buffer_put(old_rb);
-               /*
-                * Since we detached before setting the new rb, so that we
-                * could attach the new rb, we could have missed a wakeup.
-                * Provide it now.
-                */
-               wake_up_all(&event->waitq);
-       }
+       ring_buffer_attach(event, rb);
 
        ret = 0;
 unlock:
@@ -7018,6 +7027,9 @@ SYSCALL_DEFINE5(perf_event_open,
        if (attr.freq) {
                if (attr.sample_freq > sysctl_perf_event_sample_rate)
                        return -EINVAL;
+       } else {
+               if (attr.sample_period & (1ULL << 63))
+                       return -EINVAL;
        }
 
        /*
@@ -7165,7 +7177,7 @@ SYSCALL_DEFINE5(perf_event_open,
                struct perf_event_context *gctx = group_leader->ctx;
 
                mutex_lock(&gctx->mutex);
-               perf_remove_from_context(group_leader);
+               perf_remove_from_context(group_leader, false);
 
                /*
                 * Removing from the context ends up with disabled
@@ -7175,7 +7187,7 @@ SYSCALL_DEFINE5(perf_event_open,
                perf_event__state_init(group_leader);
                list_for_each_entry(sibling, &group_leader->sibling_list,
                                    group_entry) {
-                       perf_remove_from_context(sibling);
+                       perf_remove_from_context(sibling, false);
                        perf_event__state_init(sibling);
                        put_ctx(gctx);
                }
@@ -7305,7 +7317,7 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
        mutex_lock(&src_ctx->mutex);
        list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
                                 event_entry) {
-               perf_remove_from_context(event);
+               perf_remove_from_context(event, false);
                unaccount_event_cpu(event, src_cpu);
                put_ctx(src_ctx);
                list_add(&event->migrate_entry, &events);
@@ -7367,13 +7379,7 @@ __perf_event_exit_task(struct perf_event *child_event,
                         struct perf_event_context *child_ctx,
                         struct task_struct *child)
 {
-       if (child_event->parent) {
-               raw_spin_lock_irq(&child_ctx->lock);
-               perf_group_detach(child_event);
-               raw_spin_unlock_irq(&child_ctx->lock);
-       }
-
-       perf_remove_from_context(child_event);
+       perf_remove_from_context(child_event, !!child_event->parent);
 
        /*
         * It can happen that the parent exits first, and has events
@@ -7724,6 +7730,8 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
         * swapped under us.
         */
        parent_ctx = perf_pin_task_context(parent, ctxn);
+       if (!parent_ctx)
+               return 0;
 
        /*
         * No need to check if parent_ctx != NULL here; since we saw
@@ -7835,6 +7843,7 @@ static void perf_event_init_cpu(int cpu)
        struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
 
        mutex_lock(&swhash->hlist_mutex);
+       swhash->online = true;
        if (swhash->hlist_refcount > 0) {
                struct swevent_hlist *hlist;
 
@@ -7857,14 +7866,14 @@ static void perf_pmu_rotate_stop(struct pmu *pmu)
 
 static void __perf_event_exit_context(void *__info)
 {
+       struct remove_event re = { .detach_group = false };
        struct perf_event_context *ctx = __info;
-       struct perf_event *event;
 
        perf_pmu_rotate_stop(ctx->pmu);
 
        rcu_read_lock();
-       list_for_each_entry_rcu(event, &ctx->event_list, event_entry)
-               __perf_remove_from_context(event);
+       list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
+               __perf_remove_from_context(&re);
        rcu_read_unlock();
 }
 
@@ -7892,6 +7901,7 @@ static void perf_event_exit_cpu(int cpu)
        perf_event_exit_cpu_context(cpu);
 
        mutex_lock(&swhash->hlist_mutex);
+       swhash->online = false;
        swevent_hlist_release(swhash);
        mutex_unlock(&swhash->hlist_mutex);
 }
index d55092ceee2975c204bcb90e856f9b6504d577ac..e0501fe7140d7c97daba3f3438b51272cf6d9932 100644 (file)
@@ -234,6 +234,11 @@ again:
                        goto again;
                }
                timer->base = new_base;
+       } else {
+               if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
+                       cpu = this_cpu;
+                       goto again;
+               }
        }
        return new_base;
 }
@@ -569,6 +574,23 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
 
        cpu_base->expires_next.tv64 = expires_next.tv64;
 
+       /*
+        * If a hang was detected in the last timer interrupt then we
+        * leave the hang delay active in the hardware. We want the
+        * system to make progress. That also prevents the following
+        * scenario:
+        * T1 expires 50ms from now
+        * T2 expires 5s from now
+        *
+        * T1 is removed, so this code is called and would reprogram
+        * the hardware to 5s from now. Any hrtimer_start after that
+        * will not reprogram the hardware due to hang_detected being
+        * set. So we'd effectivly block all timers until the T2 event
+        * fires.
+        */
+       if (cpu_base->hang_detected)
+               return;
+
        if (cpu_base->expires_next.tv64 != KTIME_MAX)
                tick_program_event(cpu_base->expires_next, 1);
 }
@@ -968,11 +990,8 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
        /* Remove an active timer from the queue: */
        ret = remove_hrtimer(timer, base);
 
-       /* Switch the timer base, if necessary: */
-       new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
-
        if (mode & HRTIMER_MODE_REL) {
-               tim = ktime_add_safe(tim, new_base->get_time());
+               tim = ktime_add_safe(tim, base->get_time());
                /*
                 * CONFIG_TIME_LOW_RES is a temporary way for architectures
                 * to signal that they simply return xtime in
@@ -987,6 +1006,9 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
 
        hrtimer_set_expires_range_ns(timer, tim, delta_ns);
 
+       /* Switch the timer base, if necessary: */
+       new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
+
        timer_stats_hrtimer_set_start_info(timer);
 
        leftmost = enqueue_hrtimer(timer, new_base);
index a7174617616ba6b8f404a1c3f01cf8b7dd90cb4d..bb07f2928f4b9c2ca33803f712c8889ca5823907 100644 (file)
@@ -363,6 +363,13 @@ __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
                if (from > irq)
                        return -EINVAL;
                from = irq;
+       } else {
+               /*
+                * For interrupts which are freely allocated the
+                * architecture can force a lower bound to the @from
+                * argument. x86 uses this to exclude the GSI space.
+                */
+               from = arch_dynirq_lower_bound(from);
        }
 
        mutex_lock(&sparse_irq_lock);
index 2486a4c1a710ba057c7f884faae19bff1fc6d31c..d34131ca372baee79aa17ca2670cde5ff32a2cc8 100644 (file)
@@ -180,7 +180,7 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
        struct irq_chip *chip = irq_data_get_irq_chip(data);
        int ret;
 
-       ret = chip->irq_set_affinity(data, mask, false);
+       ret = chip->irq_set_affinity(data, mask, force);
        switch (ret) {
        case IRQ_SET_MASK_OK:
                cpumask_copy(data->affinity, mask);
@@ -192,7 +192,8 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
        return ret;
 }
 
-int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
+int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
+                           bool force)
 {
        struct irq_chip *chip = irq_data_get_irq_chip(data);
        struct irq_desc *desc = irq_data_to_desc(data);
@@ -202,7 +203,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
                return -EINVAL;
 
        if (irq_can_move_pcntxt(data)) {
-               ret = irq_do_set_affinity(data, mask, false);
+               ret = irq_do_set_affinity(data, mask, force);
        } else {
                irqd_set_move_pending(data);
                irq_copy_pending(desc, mask);
@@ -217,13 +218,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
        return ret;
 }
 
-/**
- *     irq_set_affinity - Set the irq affinity of a given irq
- *     @irq:           Interrupt to set affinity
- *     @mask:          cpumask
- *
- */
-int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
+int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
 {
        struct irq_desc *desc = irq_to_desc(irq);
        unsigned long flags;
@@ -233,7 +228,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
                return -EINVAL;
 
        raw_spin_lock_irqsave(&desc->lock, flags);
-       ret =  __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
+       ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
        raw_spin_unlock_irqrestore(&desc->lock, flags);
        return ret;
 }
index b0e9467922e1a476bfe1d4d8503ac7623affcaea..d24e4339b46d3c84d03f6998c44bc14035054aa6 100644 (file)
@@ -4188,7 +4188,7 @@ void debug_show_held_locks(struct task_struct *task)
 }
 EXPORT_SYMBOL_GPL(debug_show_held_locks);
 
-asmlinkage void lockdep_sys_exit(void)
+asmlinkage __visible void lockdep_sys_exit(void)
 {
        struct task_struct *curr = current;
 
index 11869408f79b86abe33e5194d0f5c705b44e9d81..079c4615607d6ed266330a5416529bfcc37e4db0 100644 (file)
@@ -815,9 +815,6 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
                return -EFAULT;
        name[MODULE_NAME_LEN-1] = '\0';
 
-       if (!(flags & O_NONBLOCK))
-               pr_warn("waiting module removal not supported: please upgrade\n");
-
        if (mutex_lock_interruptible(&module_mutex) != 0)
                return -EINTR;
 
@@ -3271,6 +3268,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
 
        dynamic_debug_setup(info->debug, info->num_debug);
 
+       /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
+       ftrace_module_init(mod);
+
        /* Finally it's fully formed, ready to start executing. */
        err = complete_formation(mod, info);
        if (err)
index 18fb7a2fb14b315cf3f7f9a7bdaf0173438d57b5..1ea328aafdc9a7437d01e19b46033f7fd7fe5412 100644 (file)
@@ -1586,7 +1586,7 @@ swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
        return -ENOMEM;
 }
 
-asmlinkage int swsusp_save(void)
+asmlinkage __visible int swsusp_save(void)
 {
        unsigned int nr_pages, nr_highmem;
 
index c3ad9cafe930e550a6400dc1994f8ee86570d885..8233cd4047d776c311ef71800479f1e2b637e5da 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/console.h>
 #include <linux/cpu.h>
+#include <linux/cpuidle.h>
 #include <linux/syscalls.h>
 #include <linux/gfp.h>
 #include <linux/io.h>
@@ -53,7 +54,9 @@ static void freeze_begin(void)
 
 static void freeze_enter(void)
 {
+       cpuidle_resume();
        wait_event(suspend_freeze_wait_head, suspend_freeze_wake);
+       cpuidle_pause();
 }
 
 void freeze_wake(void)
index a45b509622952a751fda8f962c143f9ef684aedf..7228258b85eca19e105df60bf3101bbc8a5e30b4 100644 (file)
@@ -1674,7 +1674,7 @@ EXPORT_SYMBOL(printk_emit);
  *
  * See the vsnprintf() documentation for format string extensions over C99.
  */
-asmlinkage int printk(const char *fmt, ...)
+asmlinkage __visible int printk(const char *fmt, ...)
 {
        va_list args;
        int r;
@@ -1737,7 +1737,7 @@ void early_vprintk(const char *fmt, va_list ap)
        }
 }
 
-asmlinkage void early_printk(const char *fmt, ...)
+asmlinkage __visible void early_printk(const char *fmt, ...)
 {
        va_list ap;
 
index 268a45ea238cc84f51ae7612bf0ba3c531b9887f..204d3d281809aa90686b8d4f0199c7d933710908 100644 (file)
@@ -2192,7 +2192,7 @@ static inline void post_schedule(struct rq *rq)
  * schedule_tail - first thing a freshly forked thread must call.
  * @prev: the thread we just switched away from.
  */
-asmlinkage void schedule_tail(struct task_struct *prev)
+asmlinkage __visible void schedule_tail(struct task_struct *prev)
        __releases(rq->lock)
 {
        struct rq *rq = this_rq();
@@ -2592,8 +2592,14 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
        if (likely(prev->sched_class == class &&
                   rq->nr_running == rq->cfs.h_nr_running)) {
                p = fair_sched_class.pick_next_task(rq, prev);
-               if (likely(p && p != RETRY_TASK))
-                       return p;
+               if (unlikely(p == RETRY_TASK))
+                       goto again;
+
+               /* assumes fair_sched_class->next == idle_sched_class */
+               if (unlikely(!p))
+                       p = idle_sched_class.pick_next_task(rq, prev);
+
+               return p;
        }
 
 again:
@@ -2741,7 +2747,7 @@ static inline void sched_submit_work(struct task_struct *tsk)
                blk_schedule_flush_plug(tsk);
 }
 
-asmlinkage void __sched schedule(void)
+asmlinkage __visible void __sched schedule(void)
 {
        struct task_struct *tsk = current;
 
@@ -2751,7 +2757,7 @@ asmlinkage void __sched schedule(void)
 EXPORT_SYMBOL(schedule);
 
 #ifdef CONFIG_CONTEXT_TRACKING
-asmlinkage void __sched schedule_user(void)
+asmlinkage __visible void __sched schedule_user(void)
 {
        /*
         * If we come here after a random call to set_need_resched(),
@@ -2783,7 +2789,7 @@ void __sched schedule_preempt_disabled(void)
  * off of preempt_enable. Kernel preemptions off return from interrupt
  * occur there and call schedule directly.
  */
-asmlinkage void __sched notrace preempt_schedule(void)
+asmlinkage __visible void __sched notrace preempt_schedule(void)
 {
        /*
         * If there is a non-zero preempt_count or interrupts are disabled,
@@ -2813,7 +2819,7 @@ EXPORT_SYMBOL(preempt_schedule);
  * Note, that this is called and return with irqs disabled. This will
  * protect us against recursive calling from irq.
  */
-asmlinkage void __sched preempt_schedule_irq(void)
+asmlinkage __visible void __sched preempt_schedule_irq(void)
 {
        enum ctx_state prev_state;
 
@@ -3124,6 +3130,7 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
        dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
        dl_se->dl_throttled = 0;
        dl_se->dl_new = 1;
+       dl_se->dl_yielded = 0;
 }
 
 static void __setscheduler_params(struct task_struct *p,
@@ -3639,6 +3646,7 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
  * sys_sched_setattr - same as above, but with extended sched_attr
  * @pid: the pid in question.
  * @uattr: structure containing the extended parameters.
+ * @flags: for future extension.
  */
 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
                               unsigned int, flags)
@@ -3783,6 +3791,7 @@ err_size:
  * @pid: the pid in question.
  * @uattr: structure containing the extended parameters.
  * @size: sizeof(attr) for fwd/bwd comp.
+ * @flags: for future extension.
  */
 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
                unsigned int, size, unsigned int, flags)
@@ -6017,6 +6026,8 @@ sd_numa_init(struct sched_domain_topology_level *tl, int cpu)
                                        ,
                .last_balance           = jiffies,
                .balance_interval       = sd_weight,
+               .max_newidle_lb_cost    = 0,
+               .next_decay_max_lb_cost = jiffies,
        };
        SD_INIT_NAME(sd, NUMA);
        sd->private = &tl->data;
index 5b9bb42b2d47e760f3a7cd17af24ba37d2cf07ea..ab001b5d50487815da984947b592b26038cce794 100644 (file)
@@ -210,7 +210,5 @@ int cpudl_init(struct cpudl *cp)
  */
 void cpudl_cleanup(struct cpudl *cp)
 {
-       /*
-        * nothing to do for the moment
-        */
+       free_cpumask_var(cp->free_cpus);
 }
index 8b836b376d9129760066326eabf5040f72b2e4f3..3031bac8aa3ea990bc7425e835675c7a5a386ab5 100644 (file)
@@ -70,8 +70,7 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
        int idx = 0;
        int task_pri = convert_prio(p->prio);
 
-       if (task_pri >= MAX_RT_PRIO)
-               return 0;
+       BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
 
        for (idx = 0; idx < task_pri; idx++) {
                struct cpupri_vec *vec  = &cp->pri_to_cpu[idx];
index a95097cb4591b5bfa2466adb5600895e782fc661..72fdf06ef8652d5cb443b080f53ac117bd5517ba 100644 (file)
@@ -332,50 +332,50 @@ out:
  * softirq as those do not count in task exec_runtime any more.
  */
 static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
-                                               struct rq *rq)
+                                        struct rq *rq, int ticks)
 {
-       cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
+       cputime_t scaled = cputime_to_scaled(cputime_one_jiffy);
+       u64 cputime = (__force u64) cputime_one_jiffy;
        u64 *cpustat = kcpustat_this_cpu->cpustat;
 
        if (steal_account_process_tick())
                return;
 
+       cputime *= ticks;
+       scaled *= ticks;
+
        if (irqtime_account_hi_update()) {
-               cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy;
+               cpustat[CPUTIME_IRQ] += cputime;
        } else if (irqtime_account_si_update()) {
-               cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy;
+               cpustat[CPUTIME_SOFTIRQ] += cputime;
        } else if (this_cpu_ksoftirqd() == p) {
                /*
                 * ksoftirqd time do not get accounted in cpu_softirq_time.
                 * So, we have to handle it separately here.
                 * Also, p->stime needs to be updated for ksoftirqd.
                 */
-               __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
-                                       CPUTIME_SOFTIRQ);
+               __account_system_time(p, cputime, scaled, CPUTIME_SOFTIRQ);
        } else if (user_tick) {
-               account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
+               account_user_time(p, cputimescaled);
        } else if (p == rq->idle) {
-               account_idle_time(cputime_one_jiffy);
+               account_idle_time(cputime);
        } else if (p->flags & PF_VCPU) { /* System time or guest time */
-               account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
+               account_guest_time(p, cputimescaled);
        } else {
-               __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
-                                       CPUTIME_SYSTEM);
+               __account_system_time(p, cputime, scaled,       CPUTIME_SYSTEM);
        }
 }
 
 static void irqtime_account_idle_ticks(int ticks)
 {
-       int i;
        struct rq *rq = this_rq();
 
-       for (i = 0; i < ticks; i++)
-               irqtime_account_process_tick(current, 0, rq);
+       irqtime_account_process_tick(current, 0, rq, ticks);
 }
 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
 static inline void irqtime_account_idle_ticks(int ticks) {}
 static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
-                                               struct rq *rq) {}
+                                               struct rq *rq, int nr_ticks) {}
 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
 
 /*
@@ -464,7 +464,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
                return;
 
        if (sched_clock_irqtime) {
-               irqtime_account_process_tick(p, user_tick, rq);
+               irqtime_account_process_tick(p, user_tick, rq, 1);
                return;
        }
 
index b08095786cb8fff0c96773eb5f6a582da503bcb8..800e99b99075141421d82f0bdc07e42f09baea9d 100644 (file)
@@ -528,6 +528,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
        sched_clock_tick();
        update_rq_clock(rq);
        dl_se->dl_throttled = 0;
+       dl_se->dl_yielded = 0;
        if (p->on_rq) {
                enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
                if (task_has_dl_policy(rq->curr))
@@ -893,10 +894,10 @@ static void yield_task_dl(struct rq *rq)
         * We make the task go to sleep until its current deadline by
         * forcing its runtime to zero. This way, update_curr_dl() stops
         * it and the bandwidth timer will wake it up and will give it
-        * new scheduling parameters (thanks to dl_new=1).
+        * new scheduling parameters (thanks to dl_yielded=1).
         */
        if (p->dl.runtime > 0) {
-               rq->curr->dl.dl_new = 1;
+               rq->curr->dl.dl_yielded = 1;
                p->dl.runtime = 0;
        }
        update_curr_dl(rq);
index 7570dd969c2838e9aab12ba9cb2c24cb87e21855..0fdb96de81a5b8a92c302961769cdefdb5cad915 100644 (file)
@@ -6653,6 +6653,7 @@ static int idle_balance(struct rq *this_rq)
        int this_cpu = this_rq->cpu;
 
        idle_enter_fair(this_rq);
+
        /*
         * We must set idle_stamp _before_ calling idle_balance(), such that we
         * measure the duration of idle_balance() as idle time.
@@ -6705,14 +6706,16 @@ static int idle_balance(struct rq *this_rq)
 
        raw_spin_lock(&this_rq->lock);
 
+       if (curr_cost > this_rq->max_idle_balance_cost)
+               this_rq->max_idle_balance_cost = curr_cost;
+
        /*
-        * While browsing the domains, we released the rq lock.
-        * A task could have be enqueued in the meantime
+        * While browsing the domains, we released the rq lock, a task could
+        * have been enqueued in the meantime. Since we're not going idle,
+        * pretend we pulled a task.
         */
-       if (this_rq->cfs.h_nr_running && !pulled_task) {
+       if (this_rq->cfs.h_nr_running && !pulled_task)
                pulled_task = 1;
-               goto out;
-       }
 
        if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
                /*
@@ -6722,9 +6725,6 @@ static int idle_balance(struct rq *this_rq)
                this_rq->next_balance = next_balance;
        }
 
-       if (curr_cost > this_rq->max_idle_balance_cost)
-               this_rq->max_idle_balance_cost = curr_cost;
-
 out:
        /* Is there a task of a high priority class? */
        if (this_rq->nr_running != this_rq->cfs.h_nr_running &&
index b35c21503a36d6e63160f7f46a2eb7bf59d371ce..1036b6f2fdedaaa905284ecf7c46a8ec4286b05e 100644 (file)
@@ -54,8 +54,7 @@
 struct seccomp_filter {
        atomic_t usage;
        struct seccomp_filter *prev;
-       unsigned short len;  /* Instruction count */
-       struct sock_filter_int insnsi[];
+       struct sk_filter *prog;
 };
 
 /* Limit any path through the tree to 256KB worth of instructions. */
@@ -189,7 +188,8 @@ static u32 seccomp_run_filters(int syscall)
         * value always takes priority (ignoring the DATA).
         */
        for (f = current->seccomp.filter; f; f = f->prev) {
-               u32 cur_ret = sk_run_filter_int_seccomp(&sd, f->insnsi);
+               u32 cur_ret = SK_RUN_FILTER(f->prog, (void *)&sd);
+
                if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
                        ret = cur_ret;
        }
@@ -215,7 +215,7 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
                return -EINVAL;
 
        for (filter = current->seccomp.filter; filter; filter = filter->prev)
-               total_insns += filter->len + 4;  /* include a 4 instr penalty */
+               total_insns += filter->prog->len + 4;  /* include a 4 instr penalty */
        if (total_insns > MAX_INSNS_PER_PATH)
                return -ENOMEM;
 
@@ -256,19 +256,25 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
 
        /* Allocate a new seccomp_filter */
        ret = -ENOMEM;
-       filter = kzalloc(sizeof(struct seccomp_filter) +
-                        sizeof(struct sock_filter_int) * new_len,
+       filter = kzalloc(sizeof(struct seccomp_filter),
                         GFP_KERNEL|__GFP_NOWARN);
        if (!filter)
                goto free_prog;
 
-       ret = sk_convert_filter(fp, fprog->len, filter->insnsi, &new_len);
-       if (ret)
+       filter->prog = kzalloc(sk_filter_size(new_len),
+                              GFP_KERNEL|__GFP_NOWARN);
+       if (!filter->prog)
                goto free_filter;
+
+       ret = sk_convert_filter(fp, fprog->len, filter->prog->insnsi, &new_len);
+       if (ret)
+               goto free_filter_prog;
        kfree(fp);
 
        atomic_set(&filter->usage, 1);
-       filter->len = new_len;
+       filter->prog->len = new_len;
+
+       sk_filter_select_runtime(filter->prog);
 
        /*
         * If there is an existing filter, make it the prev and don't drop its
@@ -278,6 +284,8 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
        current->seccomp.filter = filter;
        return 0;
 
+free_filter_prog:
+       kfree(filter->prog);
 free_filter:
        kfree(filter);
 free_prog:
@@ -330,6 +338,7 @@ void put_seccomp_filter(struct task_struct *tsk)
        while (orig && atomic_dec_and_test(&orig->usage)) {
                struct seccomp_filter *freeme = orig;
                orig = orig->prev;
+               sk_filter_free(freeme->prog);
                kfree(freeme);
        }
 }
index b50990a5bea0220df9034f0bcc71d92e452edc78..92f24f5e8d5281aa03cf51e77d26297b91643769 100644 (file)
@@ -223,7 +223,7 @@ static inline bool lockdep_softirq_start(void) { return false; }
 static inline void lockdep_softirq_end(bool in_hardirq) { }
 #endif
 
-asmlinkage void __do_softirq(void)
+asmlinkage __visible void __do_softirq(void)
 {
        unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
        unsigned long old_flags = current->flags;
@@ -299,7 +299,7 @@ restart:
        tsk_restore_flags(current, old_flags, PF_MEMALLOC);
 }
 
-asmlinkage void do_softirq(void)
+asmlinkage __visible void do_softirq(void)
 {
        __u32 pending;
        unsigned long flags;
@@ -779,3 +779,8 @@ int __init __weak arch_early_irq_init(void)
 {
        return 0;
 }
+
+unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
+{
+       return from;
+}
index 74f5b580fe34904fa4d9bcb2790545a323367773..e36ae4b15726041337e74b26b0a09dcec6ff73b8 100644 (file)
@@ -2501,11 +2501,11 @@ int proc_do_large_bitmap(struct ctl_table *table, int write,
        bool first = 1;
        size_t left = *lenp;
        unsigned long bitmap_len = table->maxlen;
-       unsigned long *bitmap = (unsigned long *) table->data;
+       unsigned long *bitmap = *(unsigned long **) table->data;
        unsigned long *tmp_bitmap = NULL;
        char tr_a[] = { '-', ',', '\n' }, tr_b[] = { ',', '\n', 0 }, c;
 
-       if (!bitmap_len || !left || (*ppos && !write)) {
+       if (!bitmap || !bitmap_len || !left || (*ppos && !write)) {
                *lenp = 0;
                return 0;
        }
index 87bd529879c23bb12705fa0144cff354064f91dc..3bb01a323b2a3e0ae9291271f4dc0322f01bfd80 100644 (file)
@@ -838,7 +838,7 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
 
        bit = find_last_bit(&mask, BITS_PER_LONG);
 
-       mask = (1 << bit) - 1;
+       mask = (1UL << bit) - 1;
 
        expires_limit = expires_limit & ~(mask);
 
index 1fd4b9479210183762293944be777abb5435f8e3..4a54a25afa2fe67165cb6f65cd0b63c42eb724eb 100644 (file)
@@ -4330,16 +4330,11 @@ static void ftrace_init_module(struct module *mod,
        ftrace_process_locs(mod, start, end);
 }
 
-static int ftrace_module_notify_enter(struct notifier_block *self,
-                                     unsigned long val, void *data)
+void ftrace_module_init(struct module *mod)
 {
-       struct module *mod = data;
-
-       if (val == MODULE_STATE_COMING)
-               ftrace_init_module(mod, mod->ftrace_callsites,
-                                  mod->ftrace_callsites +
-                                  mod->num_ftrace_callsites);
-       return 0;
+       ftrace_init_module(mod, mod->ftrace_callsites,
+                          mod->ftrace_callsites +
+                          mod->num_ftrace_callsites);
 }
 
 static int ftrace_module_notify_exit(struct notifier_block *self,
@@ -4353,11 +4348,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self,
        return 0;
 }
 #else
-static int ftrace_module_notify_enter(struct notifier_block *self,
-                                     unsigned long val, void *data)
-{
-       return 0;
-}
 static int ftrace_module_notify_exit(struct notifier_block *self,
                                     unsigned long val, void *data)
 {
@@ -4365,11 +4355,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self,
 }
 #endif /* CONFIG_MODULES */
 
-struct notifier_block ftrace_module_enter_nb = {
-       .notifier_call = ftrace_module_notify_enter,
-       .priority = INT_MAX,    /* Run before anything that can use kprobes */
-};
-
 struct notifier_block ftrace_module_exit_nb = {
        .notifier_call = ftrace_module_notify_exit,
        .priority = INT_MIN,    /* Run after anything that can remove kprobes */
@@ -4403,10 +4388,6 @@ void __init ftrace_init(void)
                                  __start_mcount_loc,
                                  __stop_mcount_loc);
 
-       ret = register_module_notifier(&ftrace_module_enter_nb);
-       if (ret)
-               pr_warning("Failed to register trace ftrace module enter notifier\n");
-
        ret = register_module_notifier(&ftrace_module_exit_nb);
        if (ret)
                pr_warning("Failed to register trace ftrace module exit notifier\n");
index 925f537f07d17db7caae363dd39a20bd2296d2ee..4747b476a0300bc3c08ad82f97d6a7a10fb638c4 100644 (file)
@@ -77,7 +77,7 @@ event_triggers_call(struct ftrace_event_file *file, void *rec)
                        data->ops->func(data);
                        continue;
                }
-               filter = rcu_dereference(data->filter);
+               filter = rcu_dereference_sched(data->filter);
                if (filter && !filter_match_preds(filter, rec))
                        continue;
                if (data->cmd_ops->post_trigger) {
index ac5b23cf7212c6ebb0045bedce2f13ee8d0a19b3..6620e5837ce2e361e6014caca35632ae48ab67cb 100644 (file)
@@ -188,7 +188,6 @@ static int tracepoint_add_func(struct tracepoint *tp,
                WARN_ON_ONCE(1);
                return PTR_ERR(old);
        }
-       release_probes(old);
 
        /*
         * rcu_assign_pointer has a smp_wmb() which makes sure that the new
@@ -200,6 +199,7 @@ static int tracepoint_add_func(struct tracepoint *tp,
        rcu_assign_pointer(tp->funcs, tp_funcs);
        if (!static_key_enabled(&tp->key))
                static_key_slow_inc(&tp->key);
+       release_probes(old);
        return 0;
 }
 
@@ -221,7 +221,6 @@ static int tracepoint_remove_func(struct tracepoint *tp,
                WARN_ON_ONCE(1);
                return PTR_ERR(old);
        }
-       release_probes(old);
 
        if (!tp_funcs) {
                /* Removed last function */
@@ -232,6 +231,7 @@ static int tracepoint_remove_func(struct tracepoint *tp,
                        static_key_slow_dec(&tp->key);
        }
        rcu_assign_pointer(tp->funcs, tp_funcs);
+       release_probes(old);
        return 0;
 }
 
index 0ee63af30bd14a4ad7f4b8f846d19b100fd596b3..8edc87185427cb17fa02ed93498fcf6f8301cb7e 100644 (file)
@@ -1916,6 +1916,12 @@ static void send_mayday(struct work_struct *work)
 
        /* mayday mayday mayday */
        if (list_empty(&pwq->mayday_node)) {
+               /*
+                * If @pwq is for an unbound wq, its base ref may be put at
+                * any time due to an attribute change.  Pin @pwq until the
+                * rescuer is done with it.
+                */
+               get_pwq(pwq);
                list_add_tail(&pwq->mayday_node, &wq->maydays);
                wake_up_process(wq->rescuer->task);
        }
@@ -2398,6 +2404,7 @@ static int rescuer_thread(void *__rescuer)
        struct worker *rescuer = __rescuer;
        struct workqueue_struct *wq = rescuer->rescue_wq;
        struct list_head *scheduled = &rescuer->scheduled;
+       bool should_stop;
 
        set_user_nice(current, RESCUER_NICE_LEVEL);
 
@@ -2409,11 +2416,15 @@ static int rescuer_thread(void *__rescuer)
 repeat:
        set_current_state(TASK_INTERRUPTIBLE);
 
-       if (kthread_should_stop()) {
-               __set_current_state(TASK_RUNNING);
-               rescuer->task->flags &= ~PF_WQ_WORKER;
-               return 0;
-       }
+       /*
+        * By the time the rescuer is requested to stop, the workqueue
+        * shouldn't have any work pending, but @wq->maydays may still have
+        * pwq(s) queued.  This can happen by non-rescuer workers consuming
+        * all the work items before the rescuer got to them.  Go through
+        * @wq->maydays processing before acting on should_stop so that the
+        * list is always empty on exit.
+        */
+       should_stop = kthread_should_stop();
 
        /* see whether any pwq is asking for help */
        spin_lock_irq(&wq_mayday_lock);
@@ -2444,6 +2455,12 @@ repeat:
 
                process_scheduled_works(rescuer);
 
+               /*
+                * Put the reference grabbed by send_mayday().  @pool won't
+                * go away while we're holding its lock.
+                */
+               put_pwq(pwq);
+
                /*
                 * Leave this pool.  If keep_working() is %true, notify a
                 * regular worker; otherwise, we end up with 0 concurrency
@@ -2459,6 +2476,12 @@ repeat:
 
        spin_unlock_irq(&wq_mayday_lock);
 
+       if (should_stop) {
+               __set_current_state(TASK_RUNNING);
+               rescuer->task->flags &= ~PF_WQ_WORKER;
+               return 0;
+       }
+
        /* rescuers should never participate in concurrency management */
        WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
        schedule();
@@ -4100,7 +4123,8 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
        if (!pwq) {
                pr_warning("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
                           wq->name);
-               goto out_unlock;
+               mutex_lock(&wq->mutex);
+               goto use_dfl_pwq;
        }
 
        /*
index 819ac51202c01006e105f91355d492db6bdd6eb6..d1b7bdfb8f8e174bd584a6ad616d881fcc6635b1 100644 (file)
@@ -1620,6 +1620,19 @@ config TEST_USER_COPY
 
          If unsure, say N.
 
+config TEST_BPF
+       tristate "Test BPF filter functionality"
+       default n
+       depends on m && NET
+       help
+         This builds the "test_bpf" module that runs various test vectors
+         against the BPF interpreter or BPF JIT compiler depending on the
+         current setting. This is in particular useful for BPF JIT compiler
+         development, but also to run regression tests against changes in
+         the interpreter code.
+
+         If unsure, say N.
+
 source "samples/Kconfig"
 
 source "lib/Kconfig.kgdb"
index 0cd7b68e1382dee93301898da70a6ca51c3d764e..b2be1ef1e8ece2939550b3ffb029d5bbba24dac4 100644 (file)
@@ -33,6 +33,7 @@ obj-y += kstrtox.o
 obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
 obj-$(CONFIG_TEST_MODULE) += test_module.o
 obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
+obj-$(CONFIG_TEST_BPF) += test_bpf.o
 
 ifeq ($(CONFIG_DEBUG_KOBJECT),y)
 CFLAGS_kobject.o += -DDEBUG
index f1c3a144cec123d73bee9897922bca0fa4dace48..bf6255e239193159afb1fc6fef402b6b011f1b22 100644 (file)
 #include <linux/crc7.h>
 
 
-/* Table for CRC-7 (polynomial x^7 + x^3 + 1) */
-const u8 crc7_syndrome_table[256] = {
-       0x00, 0x09, 0x12, 0x1b, 0x24, 0x2d, 0x36, 0x3f,
-       0x48, 0x41, 0x5a, 0x53, 0x6c, 0x65, 0x7e, 0x77,
-       0x19, 0x10, 0x0b, 0x02, 0x3d, 0x34, 0x2f, 0x26,
-       0x51, 0x58, 0x43, 0x4a, 0x75, 0x7c, 0x67, 0x6e,
-       0x32, 0x3b, 0x20, 0x29, 0x16, 0x1f, 0x04, 0x0d,
-       0x7a, 0x73, 0x68, 0x61, 0x5e, 0x57, 0x4c, 0x45,
-       0x2b, 0x22, 0x39, 0x30, 0x0f, 0x06, 0x1d, 0x14,
-       0x63, 0x6a, 0x71, 0x78, 0x47, 0x4e, 0x55, 0x5c,
-       0x64, 0x6d, 0x76, 0x7f, 0x40, 0x49, 0x52, 0x5b,
-       0x2c, 0x25, 0x3e, 0x37, 0x08, 0x01, 0x1a, 0x13,
-       0x7d, 0x74, 0x6f, 0x66, 0x59, 0x50, 0x4b, 0x42,
-       0x35, 0x3c, 0x27, 0x2e, 0x11, 0x18, 0x03, 0x0a,
-       0x56, 0x5f, 0x44, 0x4d, 0x72, 0x7b, 0x60, 0x69,
-       0x1e, 0x17, 0x0c, 0x05, 0x3a, 0x33, 0x28, 0x21,
-       0x4f, 0x46, 0x5d, 0x54, 0x6b, 0x62, 0x79, 0x70,
-       0x07, 0x0e, 0x15, 0x1c, 0x23, 0x2a, 0x31, 0x38,
-       0x41, 0x48, 0x53, 0x5a, 0x65, 0x6c, 0x77, 0x7e,
-       0x09, 0x00, 0x1b, 0x12, 0x2d, 0x24, 0x3f, 0x36,
-       0x58, 0x51, 0x4a, 0x43, 0x7c, 0x75, 0x6e, 0x67,
-       0x10, 0x19, 0x02, 0x0b, 0x34, 0x3d, 0x26, 0x2f,
-       0x73, 0x7a, 0x61, 0x68, 0x57, 0x5e, 0x45, 0x4c,
-       0x3b, 0x32, 0x29, 0x20, 0x1f, 0x16, 0x0d, 0x04,
-       0x6a, 0x63, 0x78, 0x71, 0x4e, 0x47, 0x5c, 0x55,
-       0x22, 0x2b, 0x30, 0x39, 0x06, 0x0f, 0x14, 0x1d,
-       0x25, 0x2c, 0x37, 0x3e, 0x01, 0x08, 0x13, 0x1a,
-       0x6d, 0x64, 0x7f, 0x76, 0x49, 0x40, 0x5b, 0x52,
-       0x3c, 0x35, 0x2e, 0x27, 0x18, 0x11, 0x0a, 0x03,
-       0x74, 0x7d, 0x66, 0x6f, 0x50, 0x59, 0x42, 0x4b,
-       0x17, 0x1e, 0x05, 0x0c, 0x33, 0x3a, 0x21, 0x28,
-       0x5f, 0x56, 0x4d, 0x44, 0x7b, 0x72, 0x69, 0x60,
-       0x0e, 0x07, 0x1c, 0x15, 0x2a, 0x23, 0x38, 0x31,
-       0x46, 0x4f, 0x54, 0x5d, 0x62, 0x6b, 0x70, 0x79
+/*
+ * Table for CRC-7 (polynomial x^7 + x^3 + 1).
+ * This is a big-endian CRC (msbit is highest power of x),
+ * aligned so the msbit of the byte is the x^6 coefficient
+ * and the lsbit is not used.
+ */
+const u8 crc7_be_syndrome_table[256] = {
+       0x00, 0x12, 0x24, 0x36, 0x48, 0x5a, 0x6c, 0x7e,
+       0x90, 0x82, 0xb4, 0xa6, 0xd8, 0xca, 0xfc, 0xee,
+       0x32, 0x20, 0x16, 0x04, 0x7a, 0x68, 0x5e, 0x4c,
+       0xa2, 0xb0, 0x86, 0x94, 0xea, 0xf8, 0xce, 0xdc,
+       0x64, 0x76, 0x40, 0x52, 0x2c, 0x3e, 0x08, 0x1a,
+       0xf4, 0xe6, 0xd0, 0xc2, 0xbc, 0xae, 0x98, 0x8a,
+       0x56, 0x44, 0x72, 0x60, 0x1e, 0x0c, 0x3a, 0x28,
+       0xc6, 0xd4, 0xe2, 0xf0, 0x8e, 0x9c, 0xaa, 0xb8,
+       0xc8, 0xda, 0xec, 0xfe, 0x80, 0x92, 0xa4, 0xb6,
+       0x58, 0x4a, 0x7c, 0x6e, 0x10, 0x02, 0x34, 0x26,
+       0xfa, 0xe8, 0xde, 0xcc, 0xb2, 0xa0, 0x96, 0x84,
+       0x6a, 0x78, 0x4e, 0x5c, 0x22, 0x30, 0x06, 0x14,
+       0xac, 0xbe, 0x88, 0x9a, 0xe4, 0xf6, 0xc0, 0xd2,
+       0x3c, 0x2e, 0x18, 0x0a, 0x74, 0x66, 0x50, 0x42,
+       0x9e, 0x8c, 0xba, 0xa8, 0xd6, 0xc4, 0xf2, 0xe0,
+       0x0e, 0x1c, 0x2a, 0x38, 0x46, 0x54, 0x62, 0x70,
+       0x82, 0x90, 0xa6, 0xb4, 0xca, 0xd8, 0xee, 0xfc,
+       0x12, 0x00, 0x36, 0x24, 0x5a, 0x48, 0x7e, 0x6c,
+       0xb0, 0xa2, 0x94, 0x86, 0xf8, 0xea, 0xdc, 0xce,
+       0x20, 0x32, 0x04, 0x16, 0x68, 0x7a, 0x4c, 0x5e,
+       0xe6, 0xf4, 0xc2, 0xd0, 0xae, 0xbc, 0x8a, 0x98,
+       0x76, 0x64, 0x52, 0x40, 0x3e, 0x2c, 0x1a, 0x08,
+       0xd4, 0xc6, 0xf0, 0xe2, 0x9c, 0x8e, 0xb8, 0xaa,
+       0x44, 0x56, 0x60, 0x72, 0x0c, 0x1e, 0x28, 0x3a,
+       0x4a, 0x58, 0x6e, 0x7c, 0x02, 0x10, 0x26, 0x34,
+       0xda, 0xc8, 0xfe, 0xec, 0x92, 0x80, 0xb6, 0xa4,
+       0x78, 0x6a, 0x5c, 0x4e, 0x30, 0x22, 0x14, 0x06,
+       0xe8, 0xfa, 0xcc, 0xde, 0xa0, 0xb2, 0x84, 0x96,
+       0x2e, 0x3c, 0x0a, 0x18, 0x66, 0x74, 0x42, 0x50,
+       0xbe, 0xac, 0x9a, 0x88, 0xf6, 0xe4, 0xd2, 0xc0,
+       0x1c, 0x0e, 0x38, 0x2a, 0x54, 0x46, 0x70, 0x62,
+       0x8c, 0x9e, 0xa8, 0xba, 0xc4, 0xd6, 0xe0, 0xf2
 };
-EXPORT_SYMBOL(crc7_syndrome_table);
+EXPORT_SYMBOL(crc7_be_syndrome_table);
 
 /**
  * crc7 - update the CRC7 for the data buffer
@@ -55,14 +60,17 @@ EXPORT_SYMBOL(crc7_syndrome_table);
  * Context: any
  *
  * Returns the updated CRC7 value.
+ * The CRC7 is left-aligned in the byte (the lsbit is always 0), as that
+ * makes the computation easier, and all callers want it in that form.
+ *
  */
-u8 crc7(u8 crc, const u8 *buffer, size_t len)
+u8 crc7_be(u8 crc, const u8 *buffer, size_t len)
 {
        while (len--)
-               crc = crc7_byte(crc, *buffer++);
+               crc = crc7_be_byte(crc, *buffer++);
        return crc;
 }
-EXPORT_SYMBOL(crc7);
+EXPORT_SYMBOL(crc7_be);
 
 MODULE_DESCRIPTION("CRC7 calculations");
 MODULE_LICENSE("GPL");
index f23b63f0a1c391303e6a13e7fb6495a6354aa9c0..6745c6230db3403629048256968443f51b777655 100644 (file)
@@ -23,7 +23,7 @@ static void __dump_stack(void)
 #ifdef CONFIG_SMP
 static atomic_t dump_lock = ATOMIC_INIT(-1);
 
-asmlinkage void dump_stack(void)
+asmlinkage __visible void dump_stack(void)
 {
        int was_locked;
        int old;
@@ -55,7 +55,7 @@ retry:
        preempt_enable();
 }
 #else
-asmlinkage void dump_stack(void)
+asmlinkage __visible void dump_stack(void)
 {
        __dump_stack();
 }
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
new file mode 100644 (file)
index 0000000..3c4a1e3
--- /dev/null
@@ -0,0 +1,1815 @@
+/*
+ * Testsuite for BPF interpreter and BPF JIT compiler
+ *
+ * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/filter.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+
+/* General test specific settings */
+#define MAX_SUBTESTS   3
+#define MAX_TESTRUNS   10000
+#define MAX_DATA       128
+#define MAX_INSNS      512
+#define MAX_K          0xffffFFFF
+
+/* Few constants used to init test 'skb' */
+#define SKB_TYPE       3
+#define SKB_MARK       0x1234aaaa
+#define SKB_HASH       0x1234aaab
+#define SKB_QUEUE_MAP  123
+#define SKB_VLAN_TCI   0xffff
+#define SKB_DEV_IFINDEX        577
+#define SKB_DEV_TYPE   588
+
+/* Redefine REGs to make tests less verbose */
+#define R0             BPF_REG_0
+#define R1             BPF_REG_1
+#define R2             BPF_REG_2
+#define R3             BPF_REG_3
+#define R4             BPF_REG_4
+#define R5             BPF_REG_5
+#define R6             BPF_REG_6
+#define R7             BPF_REG_7
+#define R8             BPF_REG_8
+#define R9             BPF_REG_9
+#define R10            BPF_REG_10
+
+/* Flags that can be passed to test cases */
+#define FLAG_NO_DATA           BIT(0)
+#define FLAG_EXPECTED_FAIL     BIT(1)
+
+enum {
+       CLASSIC  = BIT(6),      /* Old BPF instructions only. */
+       INTERNAL = BIT(7),      /* Extended instruction set.  */
+};
+
+#define TEST_TYPE_MASK         (CLASSIC | INTERNAL)
+
+struct bpf_test {
+       const char *descr;
+       union {
+               struct sock_filter insns[MAX_INSNS];
+               struct sock_filter_int insns_int[MAX_INSNS];
+       } u;
+       __u8 aux;
+       __u8 data[MAX_DATA];
+       struct {
+               int data_size;
+               __u32 result;
+       } test[MAX_SUBTESTS];
+};
+
+static struct bpf_test tests[] = {
+       {
+               "TAX",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_IMM, 1),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_LD | BPF_IMM, 2),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+                       BPF_STMT(BPF_ALU | BPF_NEG, 0), /* A == -3 */
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_LD | BPF_LEN, 0),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0), /* X == len - 3 */
+                       BPF_STMT(BPF_LD | BPF_B | BPF_IND, 1),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               CLASSIC,
+               { 10, 20, 30, 40, 50 },
+               { { 2, 10 }, { 3, 20 }, { 4, 30 } },
+       },
+       {
+               "TXA",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_LEN, 0),
+                       BPF_STMT(BPF_MISC | BPF_TXA, 0),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0) /* A == len * 2 */
+               },
+               CLASSIC,
+               { 10, 20, 30, 40, 50 },
+               { { 1, 2 }, { 3, 6 }, { 4, 8 } },
+       },
+       {
+               "ADD_SUB_MUL_K",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_IMM, 1),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 2),
+                       BPF_STMT(BPF_LDX | BPF_IMM, 3),
+                       BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 0xffffffff),
+                       BPF_STMT(BPF_ALU | BPF_MUL | BPF_K, 3),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               CLASSIC | FLAG_NO_DATA,
+               { },
+               { { 0, 0xfffffffd } }
+       },
+       {
+               "DIV_KX",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_IMM, 8),
+                       BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 2),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
+                       BPF_STMT(BPF_ALU | BPF_DIV | BPF_X, 0),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
+                       BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x70000000),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               CLASSIC | FLAG_NO_DATA,
+               { },
+               { { 0, 0x40000001 } }
+       },
+       {
+               "AND_OR_LSH_K",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_IMM, 0xff),
+                       BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0),
+                       BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 27),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_LD | BPF_IMM, 0xf),
+                       BPF_STMT(BPF_ALU | BPF_OR | BPF_K, 0xf0),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               CLASSIC | FLAG_NO_DATA,
+               { },
+               { { 0, 0x800000ff }, { 1, 0x800000ff } },
+       },
+       {
+               "LD_IND",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_LEN, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_IND, MAX_K),
+                       BPF_STMT(BPF_RET | BPF_K, 1)
+               },
+               CLASSIC,
+               { },
+               { { 1, 0 }, { 10, 0 }, { 60, 0 } },
+       },
+       {
+               "LD_ABS",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS, 1000),
+                       BPF_STMT(BPF_RET | BPF_K, 1)
+               },
+               CLASSIC,
+               { },
+               { { 1, 0 }, { 10, 0 }, { 60, 0 } },
+       },
+       {
+               "LD_ABS_LL",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF + 1),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               CLASSIC,
+               { 1, 2, 3 },
+               { { 1, 0 }, { 2, 3 } },
+       },
+       {
+               "LD_IND_LL",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_IMM, SKF_LL_OFF - 1),
+                       BPF_STMT(BPF_LDX | BPF_LEN, 0),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               CLASSIC,
+               { 1, 2, 3, 0xff },
+               { { 1, 1 }, { 3, 3 }, { 4, 0xff } },
+       },
+       {
+               "LD_ABS_NET",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF + 1),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               CLASSIC,
+               { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
+               { { 15, 0 }, { 16, 3 } },
+       },
+       {
+               "LD_IND_NET",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_IMM, SKF_NET_OFF - 15),
+                       BPF_STMT(BPF_LDX | BPF_LEN, 0),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               CLASSIC,
+               { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
+               { { 14, 0 }, { 15, 1 }, { 17, 3 } },
+       },
+       {
+               "LD_PKTTYPE",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_PKTTYPE),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 1),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_PKTTYPE),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 1),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_PKTTYPE),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 1),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               CLASSIC,
+               { },
+               { { 1, 3 }, { 10, 3 } },
+       },
+       {
+               "LD_MARK",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_MARK),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               CLASSIC,
+               { },
+               { { 1, SKB_MARK}, { 10, SKB_MARK} },
+       },
+       {
+               "LD_RXHASH",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_RXHASH),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               CLASSIC,
+               { },
+               { { 1, SKB_HASH}, { 10, SKB_HASH} },
+       },
+       {
+               "LD_QUEUE",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_QUEUE),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               CLASSIC,
+               { },
+               { { 1, SKB_QUEUE_MAP }, { 10, SKB_QUEUE_MAP } },
+       },
+       {
+               "LD_PROTOCOL",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 1),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 20, 1, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 0),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_PROTOCOL),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 30, 1, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 0),
+                       BPF_STMT(BPF_MISC | BPF_TXA, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               CLASSIC,
+               { 10, 20, 30 },
+               { { 10, ETH_P_IP }, { 100, ETH_P_IP } },
+       },
+       {
+               "LD_VLAN_TAG",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_VLAN_TAG),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               CLASSIC,
+               { },
+               {
+                       { 1, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT },
+                       { 10, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT }
+               },
+       },
+       {
+               "LD_VLAN_TAG_PRESENT",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               CLASSIC,
+               { },
+               {
+                       { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
+                       { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }
+               },
+       },
+       {
+               "LD_IFINDEX",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_IFINDEX),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               CLASSIC,
+               { },
+               { { 1, SKB_DEV_IFINDEX }, { 10, SKB_DEV_IFINDEX } },
+       },
+       {
+               "LD_HATYPE",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_HATYPE),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               CLASSIC,
+               { },
+               { { 1, SKB_DEV_TYPE }, { 10, SKB_DEV_TYPE } },
+       },
+       {
+               "LD_CPU",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_CPU),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_CPU),
+                       BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               CLASSIC,
+               { },
+               { { 1, 0 }, { 10, 0 } },
+       },
+       {
+               "LD_NLATTR",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_IMM, 1),
+                       BPF_STMT(BPF_MISC | BPF_TXA, 0),
+                       BPF_STMT(BPF_LDX | BPF_IMM, 3),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_NLATTR),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               CLASSIC,
+               { 0xff, 4, 0, 2, 0, 4, 0, 3, 0 },
+               { { 4, 0 }, { 20, 5 } },
+       },
+       {
+               "LD_NLATTR_NEST",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_IMM, 1),
+                       BPF_STMT(BPF_LDX | BPF_IMM, 3),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+                       BPF_STMT(BPF_LD | BPF_IMM, 1),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+                       BPF_STMT(BPF_LD | BPF_IMM, 1),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+                       BPF_STMT(BPF_LD | BPF_IMM, 1),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+                       BPF_STMT(BPF_LD | BPF_IMM, 1),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+                       BPF_STMT(BPF_LD | BPF_IMM, 1),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+                       BPF_STMT(BPF_LD | BPF_IMM, 1),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+                       BPF_STMT(BPF_LD | BPF_IMM, 1),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               CLASSIC,
+               { 0xff, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3, 0 },
+               { { 4, 0 }, { 20, 9 } },
+       },
+       {
+               "LD_PAYLOAD_OFF",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_PAY_OFFSET),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_PAY_OFFSET),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_PAY_OFFSET),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_PAY_OFFSET),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_PAY_OFFSET),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               CLASSIC,
+               /* 00:00:00:00:00:00 > 00:00:00:00:00:00, ethtype IPv4 (0x0800),
+                * length 98: 127.0.0.1 > 127.0.0.1: ICMP echo request,
+                * id 9737, seq 1, length 64
+                */
+               { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+                 0x08, 0x00,
+                 0x45, 0x00, 0x00, 0x54, 0xac, 0x8b, 0x40, 0x00, 0x40,
+                 0x01, 0x90, 0x1b, 0x7f, 0x00, 0x00, 0x01 },
+               { { 30, 0 }, { 100, 42 } },
+       },
+       {
+               "LD_ANC_XOR",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_IMM, 10),
+                       BPF_STMT(BPF_LDX | BPF_IMM, 300),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_ALU_XOR_X),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               CLASSIC,
+               { },
+               { { 4, 10 ^ 300 }, { 20, 10 ^ 300 } },
+       },
+       {
+               "SPILL_FILL",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_LEN, 0),
+                       BPF_STMT(BPF_LD | BPF_IMM, 2),
+                       BPF_STMT(BPF_ALU | BPF_RSH, 1),
+                       BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
+                       BPF_STMT(BPF_ST, 1), /* M1 = 1 ^ len */
+                       BPF_STMT(BPF_ALU | BPF_XOR | BPF_K, 0x80000000),
+                       BPF_STMT(BPF_ST, 2), /* M2 = 1 ^ len ^ 0x80000000 */
+                       BPF_STMT(BPF_STX, 15), /* M3 = len */
+                       BPF_STMT(BPF_LDX | BPF_MEM, 1),
+                       BPF_STMT(BPF_LD | BPF_MEM, 2),
+                       BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
+                       BPF_STMT(BPF_LDX | BPF_MEM, 15),
+                       BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               CLASSIC,
+               { },
+               { { 1, 0x80000001 }, { 2, 0x80000002 }, { 60, 0x80000000 ^ 60 } }
+       },
+       {
+               "JEQ",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_LEN, 0),
+                       BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 1),
+                       BPF_STMT(BPF_RET | BPF_K, 1),
+                       BPF_STMT(BPF_RET | BPF_K, MAX_K)
+               },
+               CLASSIC,
+               { 3, 3, 3, 3, 3 },
+               { { 1, 0 }, { 3, 1 }, { 4, MAX_K } },
+       },
+       {
+               "JGT",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_LEN, 0),
+                       BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
+                       BPF_JUMP(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 1),
+                       BPF_STMT(BPF_RET | BPF_K, 1),
+                       BPF_STMT(BPF_RET | BPF_K, MAX_K)
+               },
+               CLASSIC,
+               { 4, 4, 4, 3, 3 },
+               { { 2, 0 }, { 3, 1 }, { 4, MAX_K } },
+       },
+       {
+               "JGE",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_LEN, 0),
+                       BPF_STMT(BPF_LD | BPF_B | BPF_IND, MAX_K),
+                       BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 1, 1, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 10),
+                       BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 2, 1, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 20),
+                       BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 3, 1, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 30),
+                       BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 4, 1, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 40),
+                       BPF_STMT(BPF_RET | BPF_K, MAX_K)
+               },
+               CLASSIC,
+               { 1, 2, 3, 4, 5 },
+               { { 1, 20 }, { 3, 40 }, { 5, MAX_K } },
+       },
+       {
+               "JSET",
+               .u.insns = {
+                       BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
+                       BPF_JUMP(BPF_JMP | BPF_JA, 1, 1, 1),
+                       BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
+                       BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
+                       BPF_STMT(BPF_LDX | BPF_LEN, 0),
+                       BPF_STMT(BPF_MISC | BPF_TXA, 0),
+                       BPF_STMT(BPF_ALU | BPF_SUB | BPF_K, 4),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_IND, 0),
+                       BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 1, 0, 1),
+                       BPF_STMT(BPF_RET | BPF_K, 10),
+                       BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x80000000, 0, 1),
+                       BPF_STMT(BPF_RET | BPF_K, 20),
+                       BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 30),
+                       BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 30),
+                       BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 30),
+                       BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 30),
+                       BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 30),
+                       BPF_STMT(BPF_RET | BPF_K, MAX_K)
+               },
+               CLASSIC,
+               { 0, 0xAA, 0x55, 1 },
+               { { 4, 10 }, { 5, 20 }, { 6, MAX_K } },
+       },
+       {
+               "tcpdump port 22",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 12),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x86dd, 0, 8), /* IPv6 */
+                       BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 20),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x84, 2, 0),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 1, 0),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x11, 0, 17),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 54),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 14, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 56),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 12, 13),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x0800, 0, 12), /* IPv4 */
+                       BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 23),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x84, 2, 0),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 1, 0),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x11, 0, 8),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 20),
+                       BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x1fff, 6, 0),
+                       BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_IND, 14),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 2, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_IND, 16),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 0, 1),
+                       BPF_STMT(BPF_RET | BPF_K, 0xffff),
+                       BPF_STMT(BPF_RET | BPF_K, 0),
+               },
+               CLASSIC,
+               /* 3c:07:54:43:e5:76 > 10:bf:48:d6:43:d6, ethertype IPv4(0x0800)
+                * length 114: 10.1.1.149.49700 > 10.1.2.10.22: Flags [P.],
+                * seq 1305692979:1305693027, ack 3650467037, win 65535,
+                * options [nop,nop,TS val 2502645400 ecr 3971138], length 48
+                */
+               { 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
+                 0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76,
+                 0x08, 0x00,
+                 0x45, 0x10, 0x00, 0x64, 0x75, 0xb5,
+                 0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */
+                 0x0a, 0x01, 0x01, 0x95, /* ip src */
+                 0x0a, 0x01, 0x02, 0x0a, /* ip dst */
+                 0xc2, 0x24,
+                 0x00, 0x16 /* dst port */ },
+               { { 10, 0 }, { 30, 0 }, { 100, 65535 } },
+       },
+       {
+               "tcpdump complex",
+               .u.insns = {
+                       /* tcpdump -nei eth0 'tcp port 22 and (((ip[2:2] -
+                        * ((ip[0]&0xf)<<2)) - ((tcp[12]&0xf0)>>2)) != 0) and
+                        * (len > 115 or len < 30000000000)' -d
+                        */
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 12),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x86dd, 30, 0),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x800, 0, 29),
+                       BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 23),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 0, 27),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 20),
+                       BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x1fff, 25, 0),
+                       BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_IND, 14),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 2, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_IND, 16),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 0, 20),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 16),
+                       BPF_STMT(BPF_ST, 1),
+                       BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 14),
+                       BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf),
+                       BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 2),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0x5), /* libpcap emits K on TAX */
+                       BPF_STMT(BPF_LD | BPF_MEM, 1),
+                       BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
+                       BPF_STMT(BPF_ST, 5),
+                       BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
+                       BPF_STMT(BPF_LD | BPF_B | BPF_IND, 26),
+                       BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0),
+                       BPF_STMT(BPF_ALU | BPF_RSH | BPF_K, 2),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0x9), /* libpcap emits K on TAX */
+                       BPF_STMT(BPF_LD | BPF_MEM, 5),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 4, 0),
+                       BPF_STMT(BPF_LD | BPF_LEN, 0),
+                       BPF_JUMP(BPF_JMP | BPF_JGT | BPF_K, 0x73, 1, 0),
+                       BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 0xfc23ac00, 1, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 0xffff),
+                       BPF_STMT(BPF_RET | BPF_K, 0),
+               },
+               CLASSIC,
+               { 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
+                 0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76,
+                 0x08, 0x00,
+                 0x45, 0x10, 0x00, 0x64, 0x75, 0xb5,
+                 0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */
+                 0x0a, 0x01, 0x01, 0x95, /* ip src */
+                 0x0a, 0x01, 0x02, 0x0a, /* ip dst */
+                 0xc2, 0x24,
+                 0x00, 0x16 /* dst port */ },
+               { { 10, 0 }, { 30, 0 }, { 100, 65535 } },
+       },
+       {
+               "RET_A",
+               .u.insns = {
+                       /* check that unitialized X and A contain zeros */
+                       BPF_STMT(BPF_MISC | BPF_TXA, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               CLASSIC,
+               { },
+               { {1, 0}, {2, 0} },
+       },
+       {
+               "INT: ADD trivial",
+               .u.insns_int = {
+                       BPF_ALU64_IMM(BPF_MOV, R1, 1),
+                       BPF_ALU64_IMM(BPF_ADD, R1, 2),
+                       BPF_ALU64_IMM(BPF_MOV, R2, 3),
+                       BPF_ALU64_REG(BPF_SUB, R1, R2),
+                       BPF_ALU64_IMM(BPF_ADD, R1, -1),
+                       BPF_ALU64_IMM(BPF_MUL, R1, 3),
+                       BPF_ALU64_REG(BPF_MOV, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0xfffffffd } }
+       },
+       {
+               "INT: MUL_X",
+               .u.insns_int = {
+                       BPF_ALU64_IMM(BPF_MOV, R0, -1),
+                       BPF_ALU64_IMM(BPF_MOV, R1, -1),
+                       BPF_ALU64_IMM(BPF_MOV, R2, 3),
+                       BPF_ALU64_REG(BPF_MUL, R1, R2),
+                       BPF_JMP_IMM(BPF_JEQ, R1, 0xfffffffd, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } }
+       },
+       {
+               "INT: MUL_X2",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, -1),
+                       BPF_ALU32_IMM(BPF_MOV, R1, -1),
+                       BPF_ALU32_IMM(BPF_MOV, R2, 3),
+                       BPF_ALU64_REG(BPF_MUL, R1, R2),
+                       BPF_ALU64_IMM(BPF_RSH, R1, 8),
+                       BPF_JMP_IMM(BPF_JEQ, R1, 0x2ffffff, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } }
+       },
+       {
+               "INT: MUL32_X",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, -1),
+                       BPF_ALU64_IMM(BPF_MOV, R1, -1),
+                       BPF_ALU32_IMM(BPF_MOV, R2, 3),
+                       BPF_ALU32_REG(BPF_MUL, R1, R2),
+                       BPF_ALU64_IMM(BPF_RSH, R1, 8),
+                       BPF_JMP_IMM(BPF_JEQ, R1, 0xffffff, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } }
+       },
+       {
+               /* Have to test all register combinations, since
+                * JITing of different registers will produce
+                * different asm code.
+                */
+               "INT: ADD 64-bit",
+               .u.insns_int = {
+                       BPF_ALU64_IMM(BPF_MOV, R0, 0),
+                       BPF_ALU64_IMM(BPF_MOV, R1, 1),
+                       BPF_ALU64_IMM(BPF_MOV, R2, 2),
+                       BPF_ALU64_IMM(BPF_MOV, R3, 3),
+                       BPF_ALU64_IMM(BPF_MOV, R4, 4),
+                       BPF_ALU64_IMM(BPF_MOV, R5, 5),
+                       BPF_ALU64_IMM(BPF_MOV, R6, 6),
+                       BPF_ALU64_IMM(BPF_MOV, R7, 7),
+                       BPF_ALU64_IMM(BPF_MOV, R8, 8),
+                       BPF_ALU64_IMM(BPF_MOV, R9, 9),
+                       BPF_ALU64_IMM(BPF_ADD, R0, 20),
+                       BPF_ALU64_IMM(BPF_ADD, R1, 20),
+                       BPF_ALU64_IMM(BPF_ADD, R2, 20),
+                       BPF_ALU64_IMM(BPF_ADD, R3, 20),
+                       BPF_ALU64_IMM(BPF_ADD, R4, 20),
+                       BPF_ALU64_IMM(BPF_ADD, R5, 20),
+                       BPF_ALU64_IMM(BPF_ADD, R6, 20),
+                       BPF_ALU64_IMM(BPF_ADD, R7, 20),
+                       BPF_ALU64_IMM(BPF_ADD, R8, 20),
+                       BPF_ALU64_IMM(BPF_ADD, R9, 20),
+                       BPF_ALU64_IMM(BPF_SUB, R0, 10),
+                       BPF_ALU64_IMM(BPF_SUB, R1, 10),
+                       BPF_ALU64_IMM(BPF_SUB, R2, 10),
+                       BPF_ALU64_IMM(BPF_SUB, R3, 10),
+                       BPF_ALU64_IMM(BPF_SUB, R4, 10),
+                       BPF_ALU64_IMM(BPF_SUB, R5, 10),
+                       BPF_ALU64_IMM(BPF_SUB, R6, 10),
+                       BPF_ALU64_IMM(BPF_SUB, R7, 10),
+                       BPF_ALU64_IMM(BPF_SUB, R8, 10),
+                       BPF_ALU64_IMM(BPF_SUB, R9, 10),
+                       BPF_ALU64_REG(BPF_ADD, R0, R0),
+                       BPF_ALU64_REG(BPF_ADD, R0, R1),
+                       BPF_ALU64_REG(BPF_ADD, R0, R2),
+                       BPF_ALU64_REG(BPF_ADD, R0, R3),
+                       BPF_ALU64_REG(BPF_ADD, R0, R4),
+                       BPF_ALU64_REG(BPF_ADD, R0, R5),
+                       BPF_ALU64_REG(BPF_ADD, R0, R6),
+                       BPF_ALU64_REG(BPF_ADD, R0, R7),
+                       BPF_ALU64_REG(BPF_ADD, R0, R8),
+                       BPF_ALU64_REG(BPF_ADD, R0, R9), /* R0 == 155 */
+                       BPF_JMP_IMM(BPF_JEQ, R0, 155, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_ADD, R1, R0),
+                       BPF_ALU64_REG(BPF_ADD, R1, R1),
+                       BPF_ALU64_REG(BPF_ADD, R1, R2),
+                       BPF_ALU64_REG(BPF_ADD, R1, R3),
+                       BPF_ALU64_REG(BPF_ADD, R1, R4),
+                       BPF_ALU64_REG(BPF_ADD, R1, R5),
+                       BPF_ALU64_REG(BPF_ADD, R1, R6),
+                       BPF_ALU64_REG(BPF_ADD, R1, R7),
+                       BPF_ALU64_REG(BPF_ADD, R1, R8),
+                       BPF_ALU64_REG(BPF_ADD, R1, R9), /* R1 == 456 */
+                       BPF_JMP_IMM(BPF_JEQ, R1, 456, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_ADD, R2, R0),
+                       BPF_ALU64_REG(BPF_ADD, R2, R1),
+                       BPF_ALU64_REG(BPF_ADD, R2, R2),
+                       BPF_ALU64_REG(BPF_ADD, R2, R3),
+                       BPF_ALU64_REG(BPF_ADD, R2, R4),
+                       BPF_ALU64_REG(BPF_ADD, R2, R5),
+                       BPF_ALU64_REG(BPF_ADD, R2, R6),
+                       BPF_ALU64_REG(BPF_ADD, R2, R7),
+                       BPF_ALU64_REG(BPF_ADD, R2, R8),
+                       BPF_ALU64_REG(BPF_ADD, R2, R9), /* R2 == 1358 */
+                       BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_ADD, R3, R0),
+                       BPF_ALU64_REG(BPF_ADD, R3, R1),
+                       BPF_ALU64_REG(BPF_ADD, R3, R2),
+                       BPF_ALU64_REG(BPF_ADD, R3, R3),
+                       BPF_ALU64_REG(BPF_ADD, R3, R4),
+                       BPF_ALU64_REG(BPF_ADD, R3, R5),
+                       BPF_ALU64_REG(BPF_ADD, R3, R6),
+                       BPF_ALU64_REG(BPF_ADD, R3, R7),
+                       BPF_ALU64_REG(BPF_ADD, R3, R8),
+                       BPF_ALU64_REG(BPF_ADD, R3, R9), /* R3 == 4063 */
+                       BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_ADD, R4, R0),
+                       BPF_ALU64_REG(BPF_ADD, R4, R1),
+                       BPF_ALU64_REG(BPF_ADD, R4, R2),
+                       BPF_ALU64_REG(BPF_ADD, R4, R3),
+                       BPF_ALU64_REG(BPF_ADD, R4, R4),
+                       BPF_ALU64_REG(BPF_ADD, R4, R5),
+                       BPF_ALU64_REG(BPF_ADD, R4, R6),
+                       BPF_ALU64_REG(BPF_ADD, R4, R7),
+                       BPF_ALU64_REG(BPF_ADD, R4, R8),
+                       BPF_ALU64_REG(BPF_ADD, R4, R9), /* R4 == 12177 */
+                       BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_ADD, R5, R0),
+                       BPF_ALU64_REG(BPF_ADD, R5, R1),
+                       BPF_ALU64_REG(BPF_ADD, R5, R2),
+                       BPF_ALU64_REG(BPF_ADD, R5, R3),
+                       BPF_ALU64_REG(BPF_ADD, R5, R4),
+                       BPF_ALU64_REG(BPF_ADD, R5, R5),
+                       BPF_ALU64_REG(BPF_ADD, R5, R6),
+                       BPF_ALU64_REG(BPF_ADD, R5, R7),
+                       BPF_ALU64_REG(BPF_ADD, R5, R8),
+                       BPF_ALU64_REG(BPF_ADD, R5, R9), /* R5 == 36518 */
+                       BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_ADD, R6, R0),
+                       BPF_ALU64_REG(BPF_ADD, R6, R1),
+                       BPF_ALU64_REG(BPF_ADD, R6, R2),
+                       BPF_ALU64_REG(BPF_ADD, R6, R3),
+                       BPF_ALU64_REG(BPF_ADD, R6, R4),
+                       BPF_ALU64_REG(BPF_ADD, R6, R5),
+                       BPF_ALU64_REG(BPF_ADD, R6, R6),
+                       BPF_ALU64_REG(BPF_ADD, R6, R7),
+                       BPF_ALU64_REG(BPF_ADD, R6, R8),
+                       BPF_ALU64_REG(BPF_ADD, R6, R9), /* R6 == 109540 */
+                       BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_ADD, R7, R0),
+                       BPF_ALU64_REG(BPF_ADD, R7, R1),
+                       BPF_ALU64_REG(BPF_ADD, R7, R2),
+                       BPF_ALU64_REG(BPF_ADD, R7, R3),
+                       BPF_ALU64_REG(BPF_ADD, R7, R4),
+                       BPF_ALU64_REG(BPF_ADD, R7, R5),
+                       BPF_ALU64_REG(BPF_ADD, R7, R6),
+                       BPF_ALU64_REG(BPF_ADD, R7, R7),
+                       BPF_ALU64_REG(BPF_ADD, R7, R8),
+                       BPF_ALU64_REG(BPF_ADD, R7, R9), /* R7 == 328605 */
+                       BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_ADD, R8, R0),
+                       BPF_ALU64_REG(BPF_ADD, R8, R1),
+                       BPF_ALU64_REG(BPF_ADD, R8, R2),
+                       BPF_ALU64_REG(BPF_ADD, R8, R3),
+                       BPF_ALU64_REG(BPF_ADD, R8, R4),
+                       BPF_ALU64_REG(BPF_ADD, R8, R5),
+                       BPF_ALU64_REG(BPF_ADD, R8, R6),
+                       BPF_ALU64_REG(BPF_ADD, R8, R7),
+                       BPF_ALU64_REG(BPF_ADD, R8, R8),
+                       BPF_ALU64_REG(BPF_ADD, R8, R9), /* R8 == 985799 */
+                       BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_ADD, R9, R0),
+                       BPF_ALU64_REG(BPF_ADD, R9, R1),
+                       BPF_ALU64_REG(BPF_ADD, R9, R2),
+                       BPF_ALU64_REG(BPF_ADD, R9, R3),
+                       BPF_ALU64_REG(BPF_ADD, R9, R4),
+                       BPF_ALU64_REG(BPF_ADD, R9, R5),
+                       BPF_ALU64_REG(BPF_ADD, R9, R6),
+                       BPF_ALU64_REG(BPF_ADD, R9, R7),
+                       BPF_ALU64_REG(BPF_ADD, R9, R8),
+                       BPF_ALU64_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */
+                       BPF_ALU64_REG(BPF_MOV, R0, R9),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 2957380 } }
+       },
+       {
+               "INT: ADD 32-bit",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 20),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 1),
+                       BPF_ALU32_IMM(BPF_MOV, R2, 2),
+                       BPF_ALU32_IMM(BPF_MOV, R3, 3),
+                       BPF_ALU32_IMM(BPF_MOV, R4, 4),
+                       BPF_ALU32_IMM(BPF_MOV, R5, 5),
+                       BPF_ALU32_IMM(BPF_MOV, R6, 6),
+                       BPF_ALU32_IMM(BPF_MOV, R7, 7),
+                       BPF_ALU32_IMM(BPF_MOV, R8, 8),
+                       BPF_ALU32_IMM(BPF_MOV, R9, 9),
+                       BPF_ALU64_IMM(BPF_ADD, R1, 10),
+                       BPF_ALU64_IMM(BPF_ADD, R2, 10),
+                       BPF_ALU64_IMM(BPF_ADD, R3, 10),
+                       BPF_ALU64_IMM(BPF_ADD, R4, 10),
+                       BPF_ALU64_IMM(BPF_ADD, R5, 10),
+                       BPF_ALU64_IMM(BPF_ADD, R6, 10),
+                       BPF_ALU64_IMM(BPF_ADD, R7, 10),
+                       BPF_ALU64_IMM(BPF_ADD, R8, 10),
+                       BPF_ALU64_IMM(BPF_ADD, R9, 10),
+                       BPF_ALU32_REG(BPF_ADD, R0, R1),
+                       BPF_ALU32_REG(BPF_ADD, R0, R2),
+                       BPF_ALU32_REG(BPF_ADD, R0, R3),
+                       BPF_ALU32_REG(BPF_ADD, R0, R4),
+                       BPF_ALU32_REG(BPF_ADD, R0, R5),
+                       BPF_ALU32_REG(BPF_ADD, R0, R6),
+                       BPF_ALU32_REG(BPF_ADD, R0, R7),
+                       BPF_ALU32_REG(BPF_ADD, R0, R8),
+                       BPF_ALU32_REG(BPF_ADD, R0, R9), /* R0 == 155 */
+                       BPF_JMP_IMM(BPF_JEQ, R0, 155, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_REG(BPF_ADD, R1, R0),
+                       BPF_ALU32_REG(BPF_ADD, R1, R1),
+                       BPF_ALU32_REG(BPF_ADD, R1, R2),
+                       BPF_ALU32_REG(BPF_ADD, R1, R3),
+                       BPF_ALU32_REG(BPF_ADD, R1, R4),
+                       BPF_ALU32_REG(BPF_ADD, R1, R5),
+                       BPF_ALU32_REG(BPF_ADD, R1, R6),
+                       BPF_ALU32_REG(BPF_ADD, R1, R7),
+                       BPF_ALU32_REG(BPF_ADD, R1, R8),
+                       BPF_ALU32_REG(BPF_ADD, R1, R9), /* R1 == 456 */
+                       BPF_JMP_IMM(BPF_JEQ, R1, 456, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_REG(BPF_ADD, R2, R0),
+                       BPF_ALU32_REG(BPF_ADD, R2, R1),
+                       BPF_ALU32_REG(BPF_ADD, R2, R2),
+                       BPF_ALU32_REG(BPF_ADD, R2, R3),
+                       BPF_ALU32_REG(BPF_ADD, R2, R4),
+                       BPF_ALU32_REG(BPF_ADD, R2, R5),
+                       BPF_ALU32_REG(BPF_ADD, R2, R6),
+                       BPF_ALU32_REG(BPF_ADD, R2, R7),
+                       BPF_ALU32_REG(BPF_ADD, R2, R8),
+                       BPF_ALU32_REG(BPF_ADD, R2, R9), /* R2 == 1358 */
+                       BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_REG(BPF_ADD, R3, R0),
+                       BPF_ALU32_REG(BPF_ADD, R3, R1),
+                       BPF_ALU32_REG(BPF_ADD, R3, R2),
+                       BPF_ALU32_REG(BPF_ADD, R3, R3),
+                       BPF_ALU32_REG(BPF_ADD, R3, R4),
+                       BPF_ALU32_REG(BPF_ADD, R3, R5),
+                       BPF_ALU32_REG(BPF_ADD, R3, R6),
+                       BPF_ALU32_REG(BPF_ADD, R3, R7),
+                       BPF_ALU32_REG(BPF_ADD, R3, R8),
+                       BPF_ALU32_REG(BPF_ADD, R3, R9), /* R3 == 4063 */
+                       BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_REG(BPF_ADD, R4, R0),
+                       BPF_ALU32_REG(BPF_ADD, R4, R1),
+                       BPF_ALU32_REG(BPF_ADD, R4, R2),
+                       BPF_ALU32_REG(BPF_ADD, R4, R3),
+                       BPF_ALU32_REG(BPF_ADD, R4, R4),
+                       BPF_ALU32_REG(BPF_ADD, R4, R5),
+                       BPF_ALU32_REG(BPF_ADD, R4, R6),
+                       BPF_ALU32_REG(BPF_ADD, R4, R7),
+                       BPF_ALU32_REG(BPF_ADD, R4, R8),
+                       BPF_ALU32_REG(BPF_ADD, R4, R9), /* R4 == 12177 */
+                       BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_REG(BPF_ADD, R5, R0),
+                       BPF_ALU32_REG(BPF_ADD, R5, R1),
+                       BPF_ALU32_REG(BPF_ADD, R5, R2),
+                       BPF_ALU32_REG(BPF_ADD, R5, R3),
+                       BPF_ALU32_REG(BPF_ADD, R5, R4),
+                       BPF_ALU32_REG(BPF_ADD, R5, R5),
+                       BPF_ALU32_REG(BPF_ADD, R5, R6),
+                       BPF_ALU32_REG(BPF_ADD, R5, R7),
+                       BPF_ALU32_REG(BPF_ADD, R5, R8),
+                       BPF_ALU32_REG(BPF_ADD, R5, R9), /* R5 == 36518 */
+                       BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_REG(BPF_ADD, R6, R0),
+                       BPF_ALU32_REG(BPF_ADD, R6, R1),
+                       BPF_ALU32_REG(BPF_ADD, R6, R2),
+                       BPF_ALU32_REG(BPF_ADD, R6, R3),
+                       BPF_ALU32_REG(BPF_ADD, R6, R4),
+                       BPF_ALU32_REG(BPF_ADD, R6, R5),
+                       BPF_ALU32_REG(BPF_ADD, R6, R6),
+                       BPF_ALU32_REG(BPF_ADD, R6, R7),
+                       BPF_ALU32_REG(BPF_ADD, R6, R8),
+                       BPF_ALU32_REG(BPF_ADD, R6, R9), /* R6 == 109540 */
+                       BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_REG(BPF_ADD, R7, R0),
+                       BPF_ALU32_REG(BPF_ADD, R7, R1),
+                       BPF_ALU32_REG(BPF_ADD, R7, R2),
+                       BPF_ALU32_REG(BPF_ADD, R7, R3),
+                       BPF_ALU32_REG(BPF_ADD, R7, R4),
+                       BPF_ALU32_REG(BPF_ADD, R7, R5),
+                       BPF_ALU32_REG(BPF_ADD, R7, R6),
+                       BPF_ALU32_REG(BPF_ADD, R7, R7),
+                       BPF_ALU32_REG(BPF_ADD, R7, R8),
+                       BPF_ALU32_REG(BPF_ADD, R7, R9), /* R7 == 328605 */
+                       BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_REG(BPF_ADD, R8, R0),
+                       BPF_ALU32_REG(BPF_ADD, R8, R1),
+                       BPF_ALU32_REG(BPF_ADD, R8, R2),
+                       BPF_ALU32_REG(BPF_ADD, R8, R3),
+                       BPF_ALU32_REG(BPF_ADD, R8, R4),
+                       BPF_ALU32_REG(BPF_ADD, R8, R5),
+                       BPF_ALU32_REG(BPF_ADD, R8, R6),
+                       BPF_ALU32_REG(BPF_ADD, R8, R7),
+                       BPF_ALU32_REG(BPF_ADD, R8, R8),
+                       BPF_ALU32_REG(BPF_ADD, R8, R9), /* R8 == 985799 */
+                       BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_REG(BPF_ADD, R9, R0),
+                       BPF_ALU32_REG(BPF_ADD, R9, R1),
+                       BPF_ALU32_REG(BPF_ADD, R9, R2),
+                       BPF_ALU32_REG(BPF_ADD, R9, R3),
+                       BPF_ALU32_REG(BPF_ADD, R9, R4),
+                       BPF_ALU32_REG(BPF_ADD, R9, R5),
+                       BPF_ALU32_REG(BPF_ADD, R9, R6),
+                       BPF_ALU32_REG(BPF_ADD, R9, R7),
+                       BPF_ALU32_REG(BPF_ADD, R9, R8),
+                       BPF_ALU32_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */
+                       BPF_ALU32_REG(BPF_MOV, R0, R9),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 2957380 } }
+       },
+       {       /* Mainly checking JIT here. */
+               "INT: SUB",
+               .u.insns_int = {
+                       BPF_ALU64_IMM(BPF_MOV, R0, 0),
+                       BPF_ALU64_IMM(BPF_MOV, R1, 1),
+                       BPF_ALU64_IMM(BPF_MOV, R2, 2),
+                       BPF_ALU64_IMM(BPF_MOV, R3, 3),
+                       BPF_ALU64_IMM(BPF_MOV, R4, 4),
+                       BPF_ALU64_IMM(BPF_MOV, R5, 5),
+                       BPF_ALU64_IMM(BPF_MOV, R6, 6),
+                       BPF_ALU64_IMM(BPF_MOV, R7, 7),
+                       BPF_ALU64_IMM(BPF_MOV, R8, 8),
+                       BPF_ALU64_IMM(BPF_MOV, R9, 9),
+                       BPF_ALU64_REG(BPF_SUB, R0, R0),
+                       BPF_ALU64_REG(BPF_SUB, R0, R1),
+                       BPF_ALU64_REG(BPF_SUB, R0, R2),
+                       BPF_ALU64_REG(BPF_SUB, R0, R3),
+                       BPF_ALU64_REG(BPF_SUB, R0, R4),
+                       BPF_ALU64_REG(BPF_SUB, R0, R5),
+                       BPF_ALU64_REG(BPF_SUB, R0, R6),
+                       BPF_ALU64_REG(BPF_SUB, R0, R7),
+                       BPF_ALU64_REG(BPF_SUB, R0, R8),
+                       BPF_ALU64_REG(BPF_SUB, R0, R9),
+                       BPF_ALU64_IMM(BPF_SUB, R0, 10),
+                       BPF_JMP_IMM(BPF_JEQ, R0, -55, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_SUB, R1, R0),
+                       BPF_ALU64_REG(BPF_SUB, R1, R2),
+                       BPF_ALU64_REG(BPF_SUB, R1, R3),
+                       BPF_ALU64_REG(BPF_SUB, R1, R4),
+                       BPF_ALU64_REG(BPF_SUB, R1, R5),
+                       BPF_ALU64_REG(BPF_SUB, R1, R6),
+                       BPF_ALU64_REG(BPF_SUB, R1, R7),
+                       BPF_ALU64_REG(BPF_SUB, R1, R8),
+                       BPF_ALU64_REG(BPF_SUB, R1, R9),
+                       BPF_ALU64_IMM(BPF_SUB, R1, 10),
+                       BPF_ALU64_REG(BPF_SUB, R2, R0),
+                       BPF_ALU64_REG(BPF_SUB, R2, R1),
+                       BPF_ALU64_REG(BPF_SUB, R2, R3),
+                       BPF_ALU64_REG(BPF_SUB, R2, R4),
+                       BPF_ALU64_REG(BPF_SUB, R2, R5),
+                       BPF_ALU64_REG(BPF_SUB, R2, R6),
+                       BPF_ALU64_REG(BPF_SUB, R2, R7),
+                       BPF_ALU64_REG(BPF_SUB, R2, R8),
+                       BPF_ALU64_REG(BPF_SUB, R2, R9),
+                       BPF_ALU64_IMM(BPF_SUB, R2, 10),
+                       BPF_ALU64_REG(BPF_SUB, R3, R0),
+                       BPF_ALU64_REG(BPF_SUB, R3, R1),
+                       BPF_ALU64_REG(BPF_SUB, R3, R2),
+                       BPF_ALU64_REG(BPF_SUB, R3, R4),
+                       BPF_ALU64_REG(BPF_SUB, R3, R5),
+                       BPF_ALU64_REG(BPF_SUB, R3, R6),
+                       BPF_ALU64_REG(BPF_SUB, R3, R7),
+                       BPF_ALU64_REG(BPF_SUB, R3, R8),
+                       BPF_ALU64_REG(BPF_SUB, R3, R9),
+                       BPF_ALU64_IMM(BPF_SUB, R3, 10),
+                       BPF_ALU64_REG(BPF_SUB, R4, R0),
+                       BPF_ALU64_REG(BPF_SUB, R4, R1),
+                       BPF_ALU64_REG(BPF_SUB, R4, R2),
+                       BPF_ALU64_REG(BPF_SUB, R4, R3),
+                       BPF_ALU64_REG(BPF_SUB, R4, R5),
+                       BPF_ALU64_REG(BPF_SUB, R4, R6),
+                       BPF_ALU64_REG(BPF_SUB, R4, R7),
+                       BPF_ALU64_REG(BPF_SUB, R4, R8),
+                       BPF_ALU64_REG(BPF_SUB, R4, R9),
+                       BPF_ALU64_IMM(BPF_SUB, R4, 10),
+                       BPF_ALU64_REG(BPF_SUB, R5, R0),
+                       BPF_ALU64_REG(BPF_SUB, R5, R1),
+                       BPF_ALU64_REG(BPF_SUB, R5, R2),
+                       BPF_ALU64_REG(BPF_SUB, R5, R3),
+                       BPF_ALU64_REG(BPF_SUB, R5, R4),
+                       BPF_ALU64_REG(BPF_SUB, R5, R6),
+                       BPF_ALU64_REG(BPF_SUB, R5, R7),
+                       BPF_ALU64_REG(BPF_SUB, R5, R8),
+                       BPF_ALU64_REG(BPF_SUB, R5, R9),
+                       BPF_ALU64_IMM(BPF_SUB, R5, 10),
+                       BPF_ALU64_REG(BPF_SUB, R6, R0),
+                       BPF_ALU64_REG(BPF_SUB, R6, R1),
+                       BPF_ALU64_REG(BPF_SUB, R6, R2),
+                       BPF_ALU64_REG(BPF_SUB, R6, R3),
+                       BPF_ALU64_REG(BPF_SUB, R6, R4),
+                       BPF_ALU64_REG(BPF_SUB, R6, R5),
+                       BPF_ALU64_REG(BPF_SUB, R6, R7),
+                       BPF_ALU64_REG(BPF_SUB, R6, R8),
+                       BPF_ALU64_REG(BPF_SUB, R6, R9),
+                       BPF_ALU64_IMM(BPF_SUB, R6, 10),
+                       BPF_ALU64_REG(BPF_SUB, R7, R0),
+                       BPF_ALU64_REG(BPF_SUB, R7, R1),
+                       BPF_ALU64_REG(BPF_SUB, R7, R2),
+                       BPF_ALU64_REG(BPF_SUB, R7, R3),
+                       BPF_ALU64_REG(BPF_SUB, R7, R4),
+                       BPF_ALU64_REG(BPF_SUB, R7, R5),
+                       BPF_ALU64_REG(BPF_SUB, R7, R6),
+                       BPF_ALU64_REG(BPF_SUB, R7, R8),
+                       BPF_ALU64_REG(BPF_SUB, R7, R9),
+                       BPF_ALU64_IMM(BPF_SUB, R7, 10),
+                       BPF_ALU64_REG(BPF_SUB, R8, R0),
+                       BPF_ALU64_REG(BPF_SUB, R8, R1),
+                       BPF_ALU64_REG(BPF_SUB, R8, R2),
+                       BPF_ALU64_REG(BPF_SUB, R8, R3),
+                       BPF_ALU64_REG(BPF_SUB, R8, R4),
+                       BPF_ALU64_REG(BPF_SUB, R8, R5),
+                       BPF_ALU64_REG(BPF_SUB, R8, R6),
+                       BPF_ALU64_REG(BPF_SUB, R8, R7),
+                       BPF_ALU64_REG(BPF_SUB, R8, R9),
+                       BPF_ALU64_IMM(BPF_SUB, R8, 10),
+                       BPF_ALU64_REG(BPF_SUB, R9, R0),
+                       BPF_ALU64_REG(BPF_SUB, R9, R1),
+                       BPF_ALU64_REG(BPF_SUB, R9, R2),
+                       BPF_ALU64_REG(BPF_SUB, R9, R3),
+                       BPF_ALU64_REG(BPF_SUB, R9, R4),
+                       BPF_ALU64_REG(BPF_SUB, R9, R5),
+                       BPF_ALU64_REG(BPF_SUB, R9, R6),
+                       BPF_ALU64_REG(BPF_SUB, R9, R7),
+                       BPF_ALU64_REG(BPF_SUB, R9, R8),
+                       BPF_ALU64_IMM(BPF_SUB, R9, 10),
+                       BPF_ALU64_IMM(BPF_SUB, R0, 10),
+                       BPF_ALU64_IMM(BPF_NEG, R0, 0),
+                       BPF_ALU64_REG(BPF_SUB, R0, R1),
+                       BPF_ALU64_REG(BPF_SUB, R0, R2),
+                       BPF_ALU64_REG(BPF_SUB, R0, R3),
+                       BPF_ALU64_REG(BPF_SUB, R0, R4),
+                       BPF_ALU64_REG(BPF_SUB, R0, R5),
+                       BPF_ALU64_REG(BPF_SUB, R0, R6),
+                       BPF_ALU64_REG(BPF_SUB, R0, R7),
+                       BPF_ALU64_REG(BPF_SUB, R0, R8),
+                       BPF_ALU64_REG(BPF_SUB, R0, R9),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 11 } }
+       },
+       {       /* Mainly checking JIT here. */
+               "INT: XOR",
+               .u.insns_int = {
+                       BPF_ALU64_REG(BPF_SUB, R0, R0),
+                       BPF_ALU64_REG(BPF_XOR, R1, R1),
+                       BPF_JMP_REG(BPF_JEQ, R0, R1, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_IMM(BPF_MOV, R0, 10),
+                       BPF_ALU64_IMM(BPF_MOV, R1, -1),
+                       BPF_ALU64_REG(BPF_SUB, R1, R1),
+                       BPF_ALU64_REG(BPF_XOR, R2, R2),
+                       BPF_JMP_REG(BPF_JEQ, R1, R2, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_SUB, R2, R2),
+                       BPF_ALU64_REG(BPF_XOR, R3, R3),
+                       BPF_ALU64_IMM(BPF_MOV, R0, 10),
+                       BPF_ALU64_IMM(BPF_MOV, R1, -1),
+                       BPF_JMP_REG(BPF_JEQ, R2, R3, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_SUB, R3, R3),
+                       BPF_ALU64_REG(BPF_XOR, R4, R4),
+                       BPF_ALU64_IMM(BPF_MOV, R2, 1),
+                       BPF_ALU64_IMM(BPF_MOV, R5, -1),
+                       BPF_JMP_REG(BPF_JEQ, R3, R4, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_SUB, R4, R4),
+                       BPF_ALU64_REG(BPF_XOR, R5, R5),
+                       BPF_ALU64_IMM(BPF_MOV, R3, 1),
+                       BPF_ALU64_IMM(BPF_MOV, R7, -1),
+                       BPF_JMP_REG(BPF_JEQ, R5, R4, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_IMM(BPF_MOV, R5, 1),
+                       BPF_ALU64_REG(BPF_SUB, R5, R5),
+                       BPF_ALU64_REG(BPF_XOR, R6, R6),
+                       BPF_ALU64_IMM(BPF_MOV, R1, 1),
+                       BPF_ALU64_IMM(BPF_MOV, R8, -1),
+                       BPF_JMP_REG(BPF_JEQ, R5, R6, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_SUB, R6, R6),
+                       BPF_ALU64_REG(BPF_XOR, R7, R7),
+                       BPF_JMP_REG(BPF_JEQ, R7, R6, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_SUB, R7, R7),
+                       BPF_ALU64_REG(BPF_XOR, R8, R8),
+                       BPF_JMP_REG(BPF_JEQ, R7, R8, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_SUB, R8, R8),
+                       BPF_ALU64_REG(BPF_XOR, R9, R9),
+                       BPF_JMP_REG(BPF_JEQ, R9, R8, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_SUB, R9, R9),
+                       BPF_ALU64_REG(BPF_XOR, R0, R0),
+                       BPF_JMP_REG(BPF_JEQ, R9, R0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_SUB, R1, R1),
+                       BPF_ALU64_REG(BPF_XOR, R0, R0),
+                       BPF_JMP_REG(BPF_JEQ, R9, R0, 2),
+                       BPF_ALU64_IMM(BPF_MOV, R0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } }
+       },
+       {       /* Mainly checking JIT here. */
+               "INT: MUL",
+               .u.insns_int = {
+                       BPF_ALU64_IMM(BPF_MOV, R0, 11),
+                       BPF_ALU64_IMM(BPF_MOV, R1, 1),
+                       BPF_ALU64_IMM(BPF_MOV, R2, 2),
+                       BPF_ALU64_IMM(BPF_MOV, R3, 3),
+                       BPF_ALU64_IMM(BPF_MOV, R4, 4),
+                       BPF_ALU64_IMM(BPF_MOV, R5, 5),
+                       BPF_ALU64_IMM(BPF_MOV, R6, 6),
+                       BPF_ALU64_IMM(BPF_MOV, R7, 7),
+                       BPF_ALU64_IMM(BPF_MOV, R8, 8),
+                       BPF_ALU64_IMM(BPF_MOV, R9, 9),
+                       BPF_ALU64_REG(BPF_MUL, R0, R0),
+                       BPF_ALU64_REG(BPF_MUL, R0, R1),
+                       BPF_ALU64_REG(BPF_MUL, R0, R2),
+                       BPF_ALU64_REG(BPF_MUL, R0, R3),
+                       BPF_ALU64_REG(BPF_MUL, R0, R4),
+                       BPF_ALU64_REG(BPF_MUL, R0, R5),
+                       BPF_ALU64_REG(BPF_MUL, R0, R6),
+                       BPF_ALU64_REG(BPF_MUL, R0, R7),
+                       BPF_ALU64_REG(BPF_MUL, R0, R8),
+                       BPF_ALU64_REG(BPF_MUL, R0, R9),
+                       BPF_ALU64_IMM(BPF_MUL, R0, 10),
+                       BPF_JMP_IMM(BPF_JEQ, R0, 439084800, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_MUL, R1, R0),
+                       BPF_ALU64_REG(BPF_MUL, R1, R2),
+                       BPF_ALU64_REG(BPF_MUL, R1, R3),
+                       BPF_ALU64_REG(BPF_MUL, R1, R4),
+                       BPF_ALU64_REG(BPF_MUL, R1, R5),
+                       BPF_ALU64_REG(BPF_MUL, R1, R6),
+                       BPF_ALU64_REG(BPF_MUL, R1, R7),
+                       BPF_ALU64_REG(BPF_MUL, R1, R8),
+                       BPF_ALU64_REG(BPF_MUL, R1, R9),
+                       BPF_ALU64_IMM(BPF_MUL, R1, 10),
+                       BPF_ALU64_REG(BPF_MOV, R2, R1),
+                       BPF_ALU64_IMM(BPF_RSH, R2, 32),
+                       BPF_JMP_IMM(BPF_JEQ, R2, 0x5a924, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_IMM(BPF_LSH, R1, 32),
+                       BPF_ALU64_IMM(BPF_ARSH, R1, 32),
+                       BPF_JMP_IMM(BPF_JEQ, R1, 0xebb90000, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_MUL, R2, R0),
+                       BPF_ALU64_REG(BPF_MUL, R2, R1),
+                       BPF_ALU64_REG(BPF_MUL, R2, R3),
+                       BPF_ALU64_REG(BPF_MUL, R2, R4),
+                       BPF_ALU64_REG(BPF_MUL, R2, R5),
+                       BPF_ALU64_REG(BPF_MUL, R2, R6),
+                       BPF_ALU64_REG(BPF_MUL, R2, R7),
+                       BPF_ALU64_REG(BPF_MUL, R2, R8),
+                       BPF_ALU64_REG(BPF_MUL, R2, R9),
+                       BPF_ALU64_IMM(BPF_MUL, R2, 10),
+                       BPF_ALU64_IMM(BPF_RSH, R2, 32),
+                       BPF_ALU64_REG(BPF_MOV, R0, R2),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x35d97ef2 } }
+       },
+       {
+               "INT: ALU MIX",
+               .u.insns_int = {
+                       BPF_ALU64_IMM(BPF_MOV, R0, 11),
+                       BPF_ALU64_IMM(BPF_ADD, R0, -1),
+                       BPF_ALU64_IMM(BPF_MOV, R2, 2),
+                       BPF_ALU64_IMM(BPF_XOR, R2, 3),
+                       BPF_ALU64_REG(BPF_DIV, R0, R2),
+                       BPF_JMP_IMM(BPF_JEQ, R0, 10, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_IMM(BPF_MOD, R0, 3),
+                       BPF_JMP_IMM(BPF_JEQ, R0, 1, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_IMM(BPF_MOV, R0, -1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, -1 } }
+       },
+       {
+               "INT: DIV + ABS",
+               .u.insns_int = {
+                       BPF_ALU64_REG(BPF_MOV, R6, R1),
+                       BPF_LD_ABS(BPF_B, 3),
+                       BPF_ALU64_IMM(BPF_MOV, R2, 2),
+                       BPF_ALU32_REG(BPF_DIV, R0, R2),
+                       BPF_ALU64_REG(BPF_MOV, R8, R0),
+                       BPF_LD_ABS(BPF_B, 4),
+                       BPF_ALU64_REG(BPF_ADD, R8, R0),
+                       BPF_LD_IND(BPF_B, R8, -70),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { 10, 20, 30, 40, 50 },
+               { { 4, 0 }, { 5, 10 } }
+       },
+       {
+               "INT: DIV by zero",
+               .u.insns_int = {
+                       BPF_ALU64_REG(BPF_MOV, R6, R1),
+                       BPF_ALU64_IMM(BPF_MOV, R7, 0),
+                       BPF_LD_ABS(BPF_B, 3),
+                       BPF_ALU32_REG(BPF_DIV, R0, R7),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { 10, 20, 30, 40, 50 },
+               { { 3, 0 }, { 4, 0 } }
+       },
+       {
+               "check: missing ret",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_IMM, 1),
+               },
+               CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+               { },
+               { }
+       },
+       {
+               "check: div_k_0",
+               .u.insns = {
+                       BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 0)
+               },
+               CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+               { },
+               { }
+       },
+       {
+               "check: unknown insn",
+               .u.insns = {
+                       /* seccomp insn, rejected in socket filter */
+                       BPF_STMT(BPF_LDX | BPF_W | BPF_ABS, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 0)
+               },
+               CLASSIC | FLAG_EXPECTED_FAIL,
+               { },
+               { }
+       },
+       {
+               "check: out of range spill/fill",
+               .u.insns = {
+                       BPF_STMT(BPF_STX, 16),
+                       BPF_STMT(BPF_RET | BPF_K, 0)
+               },
+               CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+               { },
+               { }
+       },
+       {
+               "JUMPS + HOLES",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 15),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 3, 4),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 1, 2),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15),
+                       BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 2, 3),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 1, 2),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15),
+                       BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 2, 3),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 1, 2),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0),
+               },
+               CLASSIC,
+               { 0x00, 0x1b, 0x21, 0x3c, 0x9d, 0xf8,
+                 0x90, 0xe2, 0xba, 0x0a, 0x56, 0xb4,
+                 0x08, 0x00,
+                 0x45, 0x00, 0x00, 0x28, 0x00, 0x00,
+                 0x20, 0x00, 0x40, 0x11, 0x00, 0x00, /* IP header */
+                 0xc0, 0xa8, 0x33, 0x01,
+                 0xc0, 0xa8, 0x33, 0x02,
+                 0xbb, 0xb6,
+                 0xa9, 0xfa,
+                 0x00, 0x14, 0x00, 0x00,
+                 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+                 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+                 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+                 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+                 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+                 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+                 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+                 0xcc, 0xcc, 0xcc, 0xcc },
+               { { 88, 0x001b } }
+       },
+       {
+               "check: RET X",
+               .u.insns = {
+                       BPF_STMT(BPF_RET | BPF_X, 0),
+               },
+               CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+               { },
+               { },
+       },
+       {
+               "check: LDX + RET X",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_IMM, 42),
+                       BPF_STMT(BPF_RET | BPF_X, 0),
+               },
+               CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+               { },
+               { },
+       },
+       {       /* Mainly checking JIT here. */
+               "M[]: STX + LDX",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_IMM, 100),
+                       BPF_STMT(BPF_STX, 0),
+                       BPF_STMT(BPF_LDX | BPF_MEM, 0),
+                       BPF_STMT(BPF_MISC | BPF_TXA, 0),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_STX, 1),
+                       BPF_STMT(BPF_LDX | BPF_MEM, 1),
+                       BPF_STMT(BPF_MISC | BPF_TXA, 0),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_STX, 2),
+                       BPF_STMT(BPF_LDX | BPF_MEM, 2),
+                       BPF_STMT(BPF_MISC | BPF_TXA, 0),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_STX, 3),
+                       BPF_STMT(BPF_LDX | BPF_MEM, 3),
+                       BPF_STMT(BPF_MISC | BPF_TXA, 0),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_STX, 4),
+                       BPF_STMT(BPF_LDX | BPF_MEM, 4),
+                       BPF_STMT(BPF_MISC | BPF_TXA, 0),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_STX, 5),
+                       BPF_STMT(BPF_LDX | BPF_MEM, 5),
+                       BPF_STMT(BPF_MISC | BPF_TXA, 0),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_STX, 6),
+                       BPF_STMT(BPF_LDX | BPF_MEM, 6),
+                       BPF_STMT(BPF_MISC | BPF_TXA, 0),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_STX, 7),
+                       BPF_STMT(BPF_LDX | BPF_MEM, 7),
+                       BPF_STMT(BPF_MISC | BPF_TXA, 0),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_STX, 8),
+                       BPF_STMT(BPF_LDX | BPF_MEM, 8),
+                       BPF_STMT(BPF_MISC | BPF_TXA, 0),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_STX, 9),
+                       BPF_STMT(BPF_LDX | BPF_MEM, 9),
+                       BPF_STMT(BPF_MISC | BPF_TXA, 0),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_STX, 10),
+                       BPF_STMT(BPF_LDX | BPF_MEM, 10),
+                       BPF_STMT(BPF_MISC | BPF_TXA, 0),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_STX, 11),
+                       BPF_STMT(BPF_LDX | BPF_MEM, 11),
+                       BPF_STMT(BPF_MISC | BPF_TXA, 0),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_STX, 12),
+                       BPF_STMT(BPF_LDX | BPF_MEM, 12),
+                       BPF_STMT(BPF_MISC | BPF_TXA, 0),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_STX, 13),
+                       BPF_STMT(BPF_LDX | BPF_MEM, 13),
+                       BPF_STMT(BPF_MISC | BPF_TXA, 0),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_STX, 14),
+                       BPF_STMT(BPF_LDX | BPF_MEM, 14),
+                       BPF_STMT(BPF_MISC | BPF_TXA, 0),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_STX, 15),
+                       BPF_STMT(BPF_LDX | BPF_MEM, 15),
+                       BPF_STMT(BPF_MISC | BPF_TXA, 0),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0),
+               },
+               CLASSIC | FLAG_NO_DATA,
+               { },
+               { { 0, 116 } },
+       },
+};
+
+static struct net_device dev;
+
+static struct sk_buff *populate_skb(char *buf, int size)
+{
+       struct sk_buff *skb;
+
+       if (size >= MAX_DATA)
+               return NULL;
+
+       skb = alloc_skb(MAX_DATA, GFP_KERNEL);
+       if (!skb)
+               return NULL;
+
+       memcpy(__skb_put(skb, size), buf, size);
+
+       /* Initialize a fake skb with test pattern. */
+       skb_reset_mac_header(skb);
+       skb->protocol = htons(ETH_P_IP);
+       skb->pkt_type = SKB_TYPE;
+       skb->mark = SKB_MARK;
+       skb->hash = SKB_HASH;
+       skb->queue_mapping = SKB_QUEUE_MAP;
+       skb->vlan_tci = SKB_VLAN_TCI;
+       skb->dev = &dev;
+       skb->dev->ifindex = SKB_DEV_IFINDEX;
+       skb->dev->type = SKB_DEV_TYPE;
+       skb_set_network_header(skb, min(size, ETH_HLEN));
+
+       return skb;
+}
+
+static void *generate_test_data(struct bpf_test *test, int sub)
+{
+       if (test->aux & FLAG_NO_DATA)
+               return NULL;
+
+       /* Test case expects an skb, so populate one. Various
+        * subtests generate skbs of different sizes based on
+        * the same data.
+        */
+       return populate_skb(test->data, test->test[sub].data_size);
+}
+
+static void release_test_data(const struct bpf_test *test, void *data)
+{
+       if (test->aux & FLAG_NO_DATA)
+               return;
+
+       kfree_skb(data);
+}
+
+static int probe_filter_length(struct sock_filter *fp)
+{
+       int len = 0;
+
+       while (fp->code != 0 || fp->k != 0) {
+               fp++;
+               len++;
+       }
+
+       return len;
+}
+
+static struct sk_filter *generate_filter(int which, int *err)
+{
+       struct sk_filter *fp;
+       struct sock_fprog_kern fprog;
+       unsigned int flen = probe_filter_length(tests[which].u.insns);
+       __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
+
+       switch (test_type) {
+       case CLASSIC:
+               fprog.filter = tests[which].u.insns;
+               fprog.len = flen;
+
+               *err = sk_unattached_filter_create(&fp, &fprog);
+               if (tests[which].aux & FLAG_EXPECTED_FAIL) {
+                       if (*err == -EINVAL) {
+                               pr_cont("PASS\n");
+                               /* Verifier rejected filter as expected. */
+                               *err = 0;
+                               return NULL;
+                       } else {
+                               pr_cont("UNEXPECTED_PASS\n");
+                               /* Verifier didn't reject the test that's
+                                * bad enough, just return!
+                                */
+                               *err = -EINVAL;
+                               return NULL;
+                       }
+               }
+               /* We don't expect to fail. */
+               if (*err) {
+                       pr_cont("FAIL to attach err=%d len=%d\n",
+                               *err, fprog.len);
+                       return NULL;
+               }
+               break;
+
+       case INTERNAL:
+               fp = kzalloc(sk_filter_size(flen), GFP_KERNEL);
+               if (fp == NULL) {
+                       pr_cont("UNEXPECTED_FAIL no memory left\n");
+                       *err = -ENOMEM;
+                       return NULL;
+               }
+
+               fp->len = flen;
+               memcpy(fp->insnsi, tests[which].u.insns_int,
+                      fp->len * sizeof(struct sock_filter_int));
+
+               sk_filter_select_runtime(fp);
+               break;
+       }
+
+       *err = 0;
+       return fp;
+}
+
+static void release_filter(struct sk_filter *fp, int which)
+{
+       __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
+
+       switch (test_type) {
+       case CLASSIC:
+               sk_unattached_filter_destroy(fp);
+               break;
+       case INTERNAL:
+               sk_filter_free(fp);
+               break;
+       }
+}
+
+static int __run_one(const struct sk_filter *fp, const void *data,
+                    int runs, u64 *duration)
+{
+       u64 start, finish;
+       int ret, i;
+
+       start = ktime_to_us(ktime_get());
+
+       for (i = 0; i < runs; i++)
+               ret = SK_RUN_FILTER(fp, data);
+
+       finish = ktime_to_us(ktime_get());
+
+       *duration = (finish - start) * 1000ULL;
+       do_div(*duration, runs);
+
+       return ret;
+}
+
+static int run_one(const struct sk_filter *fp, struct bpf_test *test)
+{
+       int err_cnt = 0, i, runs = MAX_TESTRUNS;
+
+       for (i = 0; i < MAX_SUBTESTS; i++) {
+               void *data;
+               u64 duration;
+               u32 ret;
+
+               if (test->test[i].data_size == 0 &&
+                   test->test[i].result == 0)
+                       break;
+
+               data = generate_test_data(test, i);
+               ret = __run_one(fp, data, runs, &duration);
+               release_test_data(test, data);
+
+               if (ret == test->test[i].result) {
+                       pr_cont("%lld ", duration);
+               } else {
+                       pr_cont("ret %d != %d ", ret,
+                               test->test[i].result);
+                       err_cnt++;
+               }
+       }
+
+       return err_cnt;
+}
+
+static __init int test_bpf(void)
+{
+       int i, err_cnt = 0, pass_cnt = 0;
+
+       for (i = 0; i < ARRAY_SIZE(tests); i++) {
+               struct sk_filter *fp;
+               int err;
+
+               pr_info("#%d %s ", i, tests[i].descr);
+
+               fp = generate_filter(i, &err);
+               if (fp == NULL) {
+                       if (err == 0) {
+                               pass_cnt++;
+                               continue;
+                       }
+
+                       return err;
+               }
+               err = run_one(fp, &tests[i]);
+               release_filter(fp, i);
+
+               if (err) {
+                       pr_cont("FAIL (%d times)\n", err);
+                       err_cnt++;
+               } else {
+                       pr_cont("PASS\n");
+                       pass_cnt++;
+               }
+       }
+
+       pr_info("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt);
+       return err_cnt ? -EINVAL : 0;
+}
+
+static int __init test_bpf_init(void)
+{
+       return test_bpf();
+}
+
+static void __exit test_bpf_exit(void)
+{
+}
+
+module_init(test_bpf_init);
+module_exit(test_bpf_exit);
+
+MODULE_LICENSE("GPL");
index ebe5880c29d6cbe2306054f19bda87ffb3daa59a..1b5a95f0fa013ca428e877ae39d6f0148a49cd61 100644 (file)
@@ -581,3 +581,18 @@ config PGTABLE_MAPPING
 
 config GENERIC_EARLY_IOREMAP
        bool
+
+config MAX_STACK_SIZE_MB
+       int "Maximum user stack size for 32-bit processes (MB)"
+       default 80
+       range 8 256 if METAG
+       range 8 2048
+       depends on STACK_GROWSUP && (!64BIT || COMPAT)
+       help
+         This is the maximum stack size in Megabytes in the VM layout of 32-bit
+         user processes when the stack grows upwards (currently only on parisc
+         and metag arch). The stack will be located at the highest memory
+         address minus the given value, unless the RLIMIT_STACK hard limit is
+         changed to a smaller value in which case that is used.
+
+         A sane initial value is 80 MB.
index 37f976287068cef4ed185836059da2cc12cdceda..627dc2e4320fdf976895e9b96c2c54d91b2b2760 100644 (file)
@@ -671,16 +671,20 @@ static void isolate_freepages(struct zone *zone,
                                struct compact_control *cc)
 {
        struct page *page;
-       unsigned long high_pfn, low_pfn, pfn, z_end_pfn, end_pfn;
+       unsigned long high_pfn, low_pfn, pfn, z_end_pfn;
        int nr_freepages = cc->nr_freepages;
        struct list_head *freelist = &cc->freepages;
 
        /*
         * Initialise the free scanner. The starting point is where we last
-        * scanned from (or the end of the zone if starting). The low point
-        * is the end of the pageblock the migration scanner is using.
+        * successfully isolated from, zone-cached value, or the end of the
+        * zone when isolating for the first time. We need this aligned to
+        * the pageblock boundary, because we do pfn -= pageblock_nr_pages
+        * in the for loop.
+        * The low boundary is the end of the pageblock the migration scanner
+        * is using.
         */
-       pfn = cc->free_pfn;
+       pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
        low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
 
        /*
@@ -700,6 +704,7 @@ static void isolate_freepages(struct zone *zone,
        for (; pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
                                        pfn -= pageblock_nr_pages) {
                unsigned long isolated;
+               unsigned long end_pfn;
 
                /*
                 * This can iterate a massively long zone without finding any
@@ -734,13 +739,10 @@ static void isolate_freepages(struct zone *zone,
                isolated = 0;
 
                /*
-                * As pfn may not start aligned, pfn+pageblock_nr_page
-                * may cross a MAX_ORDER_NR_PAGES boundary and miss
-                * a pfn_valid check. Ensure isolate_freepages_block()
-                * only scans within a pageblock
+                * Take care when isolating in last pageblock of a zone which
+                * ends in the middle of a pageblock.
                 */
-               end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
-               end_pfn = min(end_pfn, z_end_pfn);
+               end_pfn = min(pfn + pageblock_nr_pages, z_end_pfn);
                isolated = isolate_freepages_block(cc, pfn, end_pfn,
                                                   freelist, false);
                nr_freepages += isolated;
index 5020b280a771a4929bee7dbf0c5aee3fab1dea8b..088358c8006bb9c109da188c2b5ccf4a91614114 100644 (file)
@@ -257,9 +257,11 @@ static int filemap_check_errors(struct address_space *mapping)
 {
        int ret = 0;
        /* Check for outstanding write errors */
-       if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
+       if (test_bit(AS_ENOSPC, &mapping->flags) &&
+           test_and_clear_bit(AS_ENOSPC, &mapping->flags))
                ret = -ENOSPC;
-       if (test_and_clear_bit(AS_EIO, &mapping->flags))
+       if (test_bit(AS_EIO, &mapping->flags) &&
+           test_and_clear_bit(AS_EIO, &mapping->flags))
                ret = -EIO;
        return ret;
 }
@@ -906,8 +908,8 @@ EXPORT_SYMBOL(page_cache_prev_hole);
  * Looks up the page cache slot at @mapping & @offset.  If there is a
  * page cache page, it is returned with an increased refcount.
  *
- * If the slot holds a shadow entry of a previously evicted page, it
- * is returned.
+ * If the slot holds a shadow entry of a previously evicted page, or a
+ * swap entry from shmem/tmpfs, it is returned.
  *
  * Otherwise, %NULL is returned.
  */
@@ -928,9 +930,9 @@ repeat:
                        if (radix_tree_deref_retry(page))
                                goto repeat;
                        /*
-                        * Otherwise, shmem/tmpfs must be storing a swap entry
-                        * here as an exceptional entry: so return it without
-                        * attempting to raise page count.
+                        * A shadow entry of a recently evicted page,
+                        * or a swap entry from shmem/tmpfs.  Return
+                        * it without attempting to raise page count.
                         */
                        goto out;
                }
@@ -983,8 +985,8 @@ EXPORT_SYMBOL(find_get_page);
  * page cache page, it is returned locked and with an increased
  * refcount.
  *
- * If the slot holds a shadow entry of a previously evicted page, it
- * is returned.
+ * If the slot holds a shadow entry of a previously evicted page, or a
+ * swap entry from shmem/tmpfs, it is returned.
  *
  * Otherwise, %NULL is returned.
  *
@@ -1099,8 +1101,8 @@ EXPORT_SYMBOL(find_or_create_page);
  * with ascending indexes.  There may be holes in the indices due to
  * not-present pages.
  *
- * Any shadow entries of evicted pages are included in the returned
- * array.
+ * Any shadow entries of evicted pages, or swap entries from
+ * shmem/tmpfs, are included in the returned array.
  *
  * find_get_entries() returns the number of pages and shadow entries
  * which were found.
@@ -1128,9 +1130,9 @@ repeat:
                        if (radix_tree_deref_retry(page))
                                goto restart;
                        /*
-                        * Otherwise, we must be storing a swap entry
-                        * here as an exceptional entry: so return it
-                        * without attempting to raise page count.
+                        * A shadow entry of a recently evicted page,
+                        * or a swap entry from shmem/tmpfs.  Return
+                        * it without attempting to raise page count.
                         */
                        goto export;
                }
@@ -1198,9 +1200,9 @@ repeat:
                                goto restart;
                        }
                        /*
-                        * Otherwise, shmem/tmpfs must be storing a swap entry
-                        * here as an exceptional entry: so skip over it -
-                        * we only reach this from invalidate_mapping_pages().
+                        * A shadow entry of a recently evicted page,
+                        * or a swap entry from shmem/tmpfs.  Skip
+                        * over it.
                         */
                        continue;
                }
@@ -1265,9 +1267,9 @@ repeat:
                                goto restart;
                        }
                        /*
-                        * Otherwise, shmem/tmpfs must be storing a swap entry
-                        * here as an exceptional entry: so stop looking for
-                        * contiguous pages.
+                        * A shadow entry of a recently evicted page,
+                        * or a swap entry from shmem/tmpfs.  Stop
+                        * looking for contiguous pages.
                         */
                        break;
                }
@@ -1341,10 +1343,17 @@ repeat:
                                goto restart;
                        }
                        /*
-                        * This function is never used on a shmem/tmpfs
-                        * mapping, so a swap entry won't be found here.
+                        * A shadow entry of a recently evicted page.
+                        *
+                        * Those entries should never be tagged, but
+                        * this tree walk is lockless and the tags are
+                        * looked up in bulk, one radix tree node at a
+                        * time, so there is a sizable window for page
+                        * reclaim to evict a page we saw tagged.
+                        *
+                        * Skip over it.
                         */
-                       BUG();
+                       continue;
                }
 
                if (!page_cache_get_speculative(page))
index 246192929a2d5c4c4aa424e15c6a7b88e929e785..c82290b9c1fcdb1b1dfdffcad699d6eb83ee8a43 100644 (file)
@@ -1981,11 +1981,7 @@ static int __init hugetlb_init(void)
 {
        int i;
 
-       /* Some platform decide whether they support huge pages at boot
-        * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
-        * there is no such support
-        */
-       if (HPAGE_SHIFT == 0)
+       if (!hugepages_supported())
                return 0;
 
        if (!size_to_hstate(default_hstate_size)) {
@@ -2112,6 +2108,9 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
        unsigned long tmp;
        int ret;
 
+       if (!hugepages_supported())
+               return -ENOTSUPP;
+
        tmp = h->max_huge_pages;
 
        if (write && h->order >= MAX_ORDER)
@@ -2165,6 +2164,9 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
        unsigned long tmp;
        int ret;
 
+       if (!hugepages_supported())
+               return -ENOTSUPP;
+
        tmp = h->nr_overcommit_huge_pages;
 
        if (write && h->order >= MAX_ORDER)
@@ -2190,6 +2192,8 @@ out:
 void hugetlb_report_meminfo(struct seq_file *m)
 {
        struct hstate *h = &default_hstate;
+       if (!hugepages_supported())
+               return;
        seq_printf(m,
                        "HugePages_Total:   %5lu\n"
                        "HugePages_Free:    %5lu\n"
@@ -2206,6 +2210,8 @@ void hugetlb_report_meminfo(struct seq_file *m)
 int hugetlb_report_node_meminfo(int nid, char *buf)
 {
        struct hstate *h = &default_hstate;
+       if (!hugepages_supported())
+               return 0;
        return sprintf(buf,
                "Node %d HugePages_Total: %5u\n"
                "Node %d HugePages_Free:  %5u\n"
@@ -2220,6 +2226,9 @@ void hugetlb_show_meminfo(void)
        struct hstate *h;
        int nid;
 
+       if (!hugepages_supported())
+               return;
+
        for_each_node_state(nid, N_MEMORY)
                for_each_hstate(h)
                        pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
index 91d67eaee0500796c9e5569fedc7cc5775002dda..8d2fcdfeff7fdb319f58c838cd8b94a6cc59121e 100644 (file)
@@ -1775,10 +1775,9 @@ void __init kmemleak_init(void)
        int i;
        unsigned long flags;
 
-       kmemleak_early_log = 0;
-
 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
        if (!kmemleak_skip_disable) {
+               kmemleak_early_log = 0;
                kmemleak_disable();
                return;
        }
@@ -1796,6 +1795,7 @@ void __init kmemleak_init(void)
 
        /* the kernel is still in UP mode, so disabling the IRQs is enough */
        local_irq_save(flags);
+       kmemleak_early_log = 0;
        if (kmemleak_error) {
                local_irq_restore(flags);
                return;
index 539eeb96b323bf649f83783e0dddcb4f907e1d6e..a402f8fdc68e94888ea177104524085c9f490fd5 100644 (file)
@@ -195,7 +195,7 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma,
        for (; start < end; start += PAGE_SIZE) {
                index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
 
-               page = find_get_page(mapping, index);
+               page = find_get_entry(mapping, index);
                if (!radix_tree_exceptional_entry(page)) {
                        if (page)
                                page_cache_release(page);
index 29501f04056887297be694c315c7caf3adf666f5..5177c6d4a2ddbf6d28ece095287c1a4a36ef9a2a 100644 (file)
@@ -1077,9 +1077,18 @@ static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
 
        rcu_read_lock();
        do {
-               memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
-               if (unlikely(!memcg))
+               /*
+                * Page cache insertions can happen withou an
+                * actual mm context, e.g. during disk probing
+                * on boot, loopback IO, acct() writes etc.
+                */
+               if (unlikely(!mm))
                        memcg = root_mem_cgroup;
+               else {
+                       memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
+                       if (unlikely(!memcg))
+                               memcg = root_mem_cgroup;
+               }
        } while (!css_tryget(&memcg->css));
        rcu_read_unlock();
        return memcg;
@@ -3958,17 +3967,9 @@ int mem_cgroup_charge_file(struct page *page, struct mm_struct *mm,
                return 0;
        }
 
-       /*
-        * Page cache insertions can happen without an actual mm
-        * context, e.g. during disk probing on boot.
-        */
-       if (unlikely(!mm))
-               memcg = root_mem_cgroup;
-       else {
-               memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
-               if (!memcg)
-                       return -ENOMEM;
-       }
+       memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
+       if (!memcg)
+               return -ENOMEM;
        __mem_cgroup_commit_charge(memcg, page, 1, type, false);
        return 0;
 }
@@ -6686,16 +6687,20 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
                pgoff = pte_to_pgoff(ptent);
 
        /* page is moved even if it's not RSS of this task(page-faulted). */
-       page = find_get_page(mapping, pgoff);
-
 #ifdef CONFIG_SWAP
        /* shmem/tmpfs may report page out on swap: account for that too. */
-       if (radix_tree_exceptional_entry(page)) {
-               swp_entry_t swap = radix_to_swp_entry(page);
-               if (do_swap_account)
-                       *entry = swap;
-               page = find_get_page(swap_address_space(swap), swap.val);
-       }
+       if (shmem_mapping(mapping)) {
+               page = find_get_entry(mapping, pgoff);
+               if (radix_tree_exceptional_entry(page)) {
+                       swp_entry_t swp = radix_to_swp_entry(page);
+                       if (do_swap_account)
+                               *entry = swp;
+                       page = find_get_page(swap_address_space(swp), swp.val);
+               }
+       } else
+               page = find_get_page(mapping, pgoff);
+#else
+       page = find_get_page(mapping, pgoff);
 #endif
        return page;
 }
index 35ef28acf137c0ab76393ede3dbc1c3d820f5c37..9ccef39a9de261c96f4e5775d7dca48b63d4d133 100644 (file)
@@ -1081,15 +1081,16 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
                        return 0;
                } else if (PageHuge(hpage)) {
                        /*
-                        * Check "just unpoisoned", "filter hit", and
-                        * "race with other subpage."
+                        * Check "filter hit" and "race with other subpage."
                         */
                        lock_page(hpage);
-                       if (!PageHWPoison(hpage)
-                           || (hwpoison_filter(p) && TestClearPageHWPoison(p))
-                           || (p != hpage && TestSetPageHWPoison(hpage))) {
-                               atomic_long_sub(nr_pages, &num_poisoned_pages);
-                               return 0;
+                       if (PageHWPoison(hpage)) {
+                               if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
+                                   || (p != hpage && TestSetPageHWPoison(hpage))) {
+                                       atomic_long_sub(nr_pages, &num_poisoned_pages);
+                                       unlock_page(hpage);
+                                       return 0;
+                               }
                        }
                        set_page_hwpoison_huge_page(hpage);
                        res = dequeue_hwpoisoned_huge_page(hpage);
@@ -1152,6 +1153,8 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
         */
        if (!PageHWPoison(p)) {
                printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
+               atomic_long_sub(nr_pages, &num_poisoned_pages);
+               put_page(hpage);
                res = 0;
                goto out;
        }
index d0f0bef3be488af9eb9406cc5d28272093abb5a6..037b812a953141f3dc77b1f7402b29bb54cd9e44 100644 (file)
@@ -232,17 +232,18 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
 #endif
 }
 
-void tlb_flush_mmu(struct mmu_gather *tlb)
+static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
 {
-       struct mmu_gather_batch *batch;
-
-       if (!tlb->need_flush)
-               return;
        tlb->need_flush = 0;
        tlb_flush(tlb);
 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
        tlb_table_flush(tlb);
 #endif
+}
+
+static void tlb_flush_mmu_free(struct mmu_gather *tlb)
+{
+       struct mmu_gather_batch *batch;
 
        for (batch = &tlb->local; batch; batch = batch->next) {
                free_pages_and_swap_cache(batch->pages, batch->nr);
@@ -251,6 +252,14 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
        tlb->active = &tlb->local;
 }
 
+void tlb_flush_mmu(struct mmu_gather *tlb)
+{
+       if (!tlb->need_flush)
+               return;
+       tlb_flush_mmu_tlbonly(tlb);
+       tlb_flush_mmu_free(tlb);
+}
+
 /* tlb_finish_mmu
  *     Called at the end of the shootdown operation to free up any resources
  *     that were required.
@@ -1127,8 +1136,10 @@ again:
                        if (PageAnon(page))
                                rss[MM_ANONPAGES]--;
                        else {
-                               if (pte_dirty(ptent))
+                               if (pte_dirty(ptent)) {
+                                       force_flush = 1;
                                        set_page_dirty(page);
+                               }
                                if (pte_young(ptent) &&
                                    likely(!(vma->vm_flags & VM_SEQ_READ)))
                                        mark_page_accessed(page);
@@ -1137,9 +1148,10 @@ again:
                        page_remove_rmap(page);
                        if (unlikely(page_mapcount(page) < 0))
                                print_bad_pte(vma, addr, ptent, page);
-                       force_flush = !__tlb_remove_page(tlb, page);
-                       if (force_flush)
+                       if (unlikely(!__tlb_remove_page(tlb, page))) {
+                               force_flush = 1;
                                break;
+                       }
                        continue;
                }
                /*
@@ -1174,18 +1186,11 @@ again:
 
        add_mm_rss_vec(mm, rss);
        arch_leave_lazy_mmu_mode();
-       pte_unmap_unlock(start_pte, ptl);
 
-       /*
-        * mmu_gather ran out of room to batch pages, we break out of
-        * the PTE lock to avoid doing the potential expensive TLB invalidate
-        * and page-free while holding it.
-        */
+       /* Do the actual TLB flush before dropping ptl */
        if (force_flush) {
                unsigned long old_end;
 
-               force_flush = 0;
-
                /*
                 * Flush the TLB just for the previous segment,
                 * then update the range to be the remaining
@@ -1193,11 +1198,21 @@ again:
                 */
                old_end = tlb->end;
                tlb->end = addr;
-
-               tlb_flush_mmu(tlb);
-
+               tlb_flush_mmu_tlbonly(tlb);
                tlb->start = addr;
                tlb->end = old_end;
+       }
+       pte_unmap_unlock(start_pte, ptl);
+
+       /*
+        * If we forced a TLB flush (either due to running out of
+        * batch buffers or because we needed to flush dirty TLB
+        * entries before releasing the ptl), free the batched
+        * memory too. Restart if we didn't do everything.
+        */
+       if (force_flush) {
+               force_flush = 0;
+               tlb_flush_mmu_free(tlb);
 
                if (addr != end)
                        goto again;
@@ -1955,12 +1970,17 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
                     unsigned long address, unsigned int fault_flags)
 {
        struct vm_area_struct *vma;
+       vm_flags_t vm_flags;
        int ret;
 
        vma = find_extend_vma(mm, address);
        if (!vma || address < vma->vm_start)
                return -EFAULT;
 
+       vm_flags = (fault_flags & FAULT_FLAG_WRITE) ? VM_WRITE : VM_READ;
+       if (!(vm_flags & vma->vm_flags))
+               return -EFAULT;
+
        ret = handle_mm_fault(mm, vma, address, fault_flags);
        if (ret & VM_FAULT_ERROR) {
                if (ret & VM_FAULT_OOM)
index 0843feb66f3d0236abd4386b5bfd0170c24ae0ef..05f1180e9f21822e99a5f11a2a7a03af663a422c 100644 (file)
@@ -194,10 +194,17 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
                        break;
                if (pmd_trans_huge(*old_pmd)) {
                        int err = 0;
-                       if (extent == HPAGE_PMD_SIZE)
+                       if (extent == HPAGE_PMD_SIZE) {
+                               VM_BUG_ON(vma->vm_file || !vma->anon_vma);
+                               /* See comment in move_ptes() */
+                               if (need_rmap_locks)
+                                       anon_vma_lock_write(vma->anon_vma);
                                err = move_huge_pmd(vma, new_vma, old_addr,
                                                    new_addr, old_end,
                                                    old_pmd, new_pmd);
+                               if (need_rmap_locks)
+                                       anon_vma_unlock_write(vma->anon_vma);
+                       }
                        if (err > 0) {
                                need_flush = true;
                                continue;
index ef413492a14945c5e7ab0f5a8d33bd66eaf34873..a4317da60532be3eccb1c1e604362b3923271f65 100644 (file)
@@ -593,14 +593,14 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
  * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
  *     => fast response on large errors; small oscillation near setpoint
  */
-static inline long long pos_ratio_polynom(unsigned long setpoint,
+static long long pos_ratio_polynom(unsigned long setpoint,
                                          unsigned long dirty,
                                          unsigned long limit)
 {
        long long pos_ratio;
        long x;
 
-       x = div_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
+       x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
                    limit - setpoint + 1);
        pos_ratio = x;
        pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
@@ -842,7 +842,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
        x_intercept = bdi_setpoint + span;
 
        if (bdi_dirty < x_intercept - span / 4) {
-               pos_ratio = div_u64(pos_ratio * (x_intercept - bdi_dirty),
+               pos_ratio = div64_u64(pos_ratio * (x_intercept - bdi_dirty),
                                    x_intercept - bdi_setpoint + 1);
        } else
                pos_ratio /= 4;
index 63e24fb4387b6d305960f9e7ba8c0554e6818ca5..2ddf9a990dbd057228782a3af5ac6901a0af632b 100644 (file)
@@ -610,7 +610,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
        chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
                                                sizeof(chunk->map[0]));
        if (!chunk->map) {
-               kfree(chunk);
+               pcpu_mem_free(chunk, pcpu_chunk_struct_size);
                return NULL;
        }
 
index 388cb1ae6fbc4907e6f0c6776b652adb5d055fe7..19d92181ce24917f799f5f6bbd46dada145d32d8 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -166,7 +166,7 @@ typedef unsigned char freelist_idx_t;
 typedef unsigned short freelist_idx_t;
 #endif
 
-#define SLAB_OBJ_MAX_NUM (1 << sizeof(freelist_idx_t) * BITS_PER_BYTE)
+#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
 
 /*
  * true if a page was allocated from pfmemalloc reserves for network-based
@@ -2572,13 +2572,13 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
        return freelist;
 }
 
-static inline freelist_idx_t get_free_obj(struct page *page, unsigned char idx)
+static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
 {
        return ((freelist_idx_t *)page->freelist)[idx];
 }
 
 static inline void set_free_obj(struct page *page,
-                                       unsigned char idx, freelist_idx_t val)
+                                       unsigned int idx, freelist_idx_t val)
 {
        ((freelist_idx_t *)(page->freelist))[idx] = val;
 }
index 3045316b7c9df285e04b3954c1921f7542f2311a..6bd4c353704ffd842f29ce9d6adfb3437f829676 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -91,6 +91,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
 
 int __kmem_cache_shutdown(struct kmem_cache *);
+void slab_kmem_cache_release(struct kmem_cache *);
 
 struct seq_file;
 struct file;
index f3cfccf76dda693250106d3c8d0387292e741c14..102cc6fca3d393f374984a7628d644b99b629a54 100644 (file)
@@ -323,6 +323,12 @@ static int kmem_cache_destroy_memcg_children(struct kmem_cache *s)
 }
 #endif /* CONFIG_MEMCG_KMEM */
 
+void slab_kmem_cache_release(struct kmem_cache *s)
+{
+       kfree(s->name);
+       kmem_cache_free(kmem_cache, s);
+}
+
 void kmem_cache_destroy(struct kmem_cache *s)
 {
        get_online_cpus();
@@ -352,8 +358,11 @@ void kmem_cache_destroy(struct kmem_cache *s)
                rcu_barrier();
 
        memcg_free_cache_params(s);
-       kfree(s->name);
-       kmem_cache_free(kmem_cache, s);
+#ifdef SLAB_SUPPORTS_SYSFS
+       sysfs_slab_remove(s);
+#else
+       slab_kmem_cache_release(s);
+#endif
        goto out_put_cpus;
 
 out_unlock:
index 5e234f1f8853e952dceefe8c6b92201fcc3853d7..2b1ce697fc4b4fc44d30ea3b1770ae1bea38d25e 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -210,14 +210,11 @@ enum track_item { TRACK_ALLOC, TRACK_FREE };
 #ifdef CONFIG_SYSFS
 static int sysfs_slab_add(struct kmem_cache *);
 static int sysfs_slab_alias(struct kmem_cache *, const char *);
-static void sysfs_slab_remove(struct kmem_cache *);
 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
 #else
 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
                                                        { return 0; }
-static inline void sysfs_slab_remove(struct kmem_cache *s) { }
-
 static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
 #endif
 
@@ -3238,24 +3235,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
 
 int __kmem_cache_shutdown(struct kmem_cache *s)
 {
-       int rc = kmem_cache_close(s);
-
-       if (!rc) {
-               /*
-                * Since slab_attr_store may take the slab_mutex, we should
-                * release the lock while removing the sysfs entry in order to
-                * avoid a deadlock. Because this is pretty much the last
-                * operation we do and the lock will be released shortly after
-                * that in slab_common.c, we could just move sysfs_slab_remove
-                * to a later point in common code. We should do that when we
-                * have a common sysfs framework for all allocators.
-                */
-               mutex_unlock(&slab_mutex);
-               sysfs_slab_remove(s);
-               mutex_lock(&slab_mutex);
-       }
-
-       return rc;
+       return kmem_cache_close(s);
 }
 
 /********************************************************************
@@ -5071,15 +5051,18 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
 #ifdef CONFIG_MEMCG_KMEM
        int i;
        char *buffer = NULL;
+       struct kmem_cache *root_cache;
 
-       if (!is_root_cache(s))
+       if (is_root_cache(s))
                return;
 
+       root_cache = s->memcg_params->root_cache;
+
        /*
         * This mean this cache had no attribute written. Therefore, no point
         * in copying default values around
         */
-       if (!s->max_attr_size)
+       if (!root_cache->max_attr_size)
                return;
 
        for (i = 0; i < ARRAY_SIZE(slab_attrs); i++) {
@@ -5101,7 +5084,7 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
                 */
                if (buffer)
                        buf = buffer;
-               else if (s->max_attr_size < ARRAY_SIZE(mbuf))
+               else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf))
                        buf = mbuf;
                else {
                        buffer = (char *) get_zeroed_page(GFP_KERNEL);
@@ -5110,7 +5093,7 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
                        buf = buffer;
                }
 
-               attr->show(s->memcg_params->root_cache, buf);
+               attr->show(root_cache, buf);
                attr->store(s, buf, strlen(buf));
        }
 
@@ -5119,6 +5102,11 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
 #endif
 }
 
+static void kmem_cache_release(struct kobject *k)
+{
+       slab_kmem_cache_release(to_slab(k));
+}
+
 static const struct sysfs_ops slab_sysfs_ops = {
        .show = slab_attr_show,
        .store = slab_attr_store,
@@ -5126,6 +5114,7 @@ static const struct sysfs_ops slab_sysfs_ops = {
 
 static struct kobj_type slab_ktype = {
        .sysfs_ops = &slab_sysfs_ops,
+       .release = kmem_cache_release,
 };
 
 static int uevent_filter(struct kset *kset, struct kobject *kobj)
@@ -5252,7 +5241,7 @@ out_put_kobj:
        goto out;
 }
 
-static void sysfs_slab_remove(struct kmem_cache *s)
+void sysfs_slab_remove(struct kmem_cache *s)
 {
        if (slab_state < FULL)
                /*
index e5cc39ab0751f08b30691c3e7e8c98e32ded409c..6a78c814bebfb151b1e490424c731edc172ad430 100644 (file)
@@ -484,14 +484,6 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
        unsigned long count = 0;
        int i;
 
-       /*
-        * Note: this function may get called on a shmem/tmpfs mapping:
-        * pagevec_lookup() might then return 0 prematurely (because it
-        * got a gangful of swap entries); but it's hardly worth worrying
-        * about - it can rarely have anything to free from such a mapping
-        * (most pages are dirty), and already skips over any difficulties.
-        */
-
        pagevec_init(&pvec, 0);
        while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
                        min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
index f380af7ea7797e287b222cd57b665fad02b6132e..d5ea733c508265aaba619248d973ec640a73d04a 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -10,6 +10,7 @@
 #include <linux/swapops.h>
 #include <linux/mman.h>
 #include <linux/hugetlb.h>
+#include <linux/vmalloc.h>
 
 #include <asm/uaccess.h>
 
@@ -387,6 +388,15 @@ unsigned long vm_mmap(struct file *file, unsigned long addr,
 }
 EXPORT_SYMBOL(vm_mmap);
 
+void kvfree(const void *addr)
+{
+       if (is_vmalloc_addr(addr))
+               vfree(addr);
+       else
+               kfree(addr);
+}
+EXPORT_SYMBOL(kvfree);
+
 struct address_space *page_mapping(struct page *page)
 {
        struct address_space *mapping = page->mapping;
index d4224b397c0e4e4492fa135c3c5ee9b224872c07..1037a3bab50529f84c9d81c383df07dbfbbda081 100644 (file)
@@ -81,10 +81,12 @@ struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
        for (i = 0; i < VMACACHE_SIZE; i++) {
                struct vm_area_struct *vma = current->vmacache[i];
 
-               if (vma && vma->vm_start <= addr && vma->vm_end > addr) {
-                       BUG_ON(vma->vm_mm != mm);
+               if (!vma)
+                       continue;
+               if (WARN_ON_ONCE(vma->vm_mm != mm))
+                       break;
+               if (vma->vm_start <= addr && vma->vm_end > addr)
                        return vma;
-               }
        }
 
        return NULL;
index 3f56c8deb3c05f0904917b87113e188dbeff9ef0..32c661d66a45498e270ba5e9019cda60a114cc27 100644 (file)
@@ -1915,6 +1915,24 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
        file  = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
                get_lru_size(lruvec, LRU_INACTIVE_FILE);
 
+       /*
+        * Prevent the reclaimer from falling into the cache trap: as
+        * cache pages start out inactive, every cache fault will tip
+        * the scan balance towards the file LRU.  And as the file LRU
+        * shrinks, so does the window for rotation from references.
+        * This means we have a runaway feedback loop where a tiny
+        * thrashing file LRU becomes infinitely more attractive than
+        * anon pages.  Try to detect this based on file LRU size.
+        */
+       if (global_reclaim(sc)) {
+               unsigned long free = zone_page_state(zone, NR_FREE_PAGES);
+
+               if (unlikely(file + free <= high_wmark_pages(zone))) {
+                       scan_balance = SCAN_ANON;
+                       goto out;
+               }
+       }
+
        /*
         * There is enough inactive page cache, do not reclaim
         * anything from the anonymous working set right now.
index 175273f38cb1bd59f5aeb88cb8c815033475dfe8..44ebd5c2cd4aef0f86bd6475132cc40c501c6fef 100644 (file)
@@ -169,6 +169,7 @@ int register_vlan_dev(struct net_device *dev)
        if (err < 0)
                goto out_uninit_mvrp;
 
+       vlan->nest_level = dev_get_nest_level(real_dev, is_vlan_dev) + 1;
        err = register_netdevice(dev);
        if (err < 0)
                goto out_uninit_mvrp;
index 3c32bd257b73975a33ba104c1c3b3797d9f29843..9012b1c922b61acd28fffb7f50b4968da9293b2f 100644 (file)
@@ -63,7 +63,7 @@ bool vlan_do_receive(struct sk_buff **skbp)
 }
 
 /* Must be invoked with rcu_read_lock. */
-struct net_device *__vlan_find_dev_deep(struct net_device *dev,
+struct net_device *__vlan_find_dev_deep_rcu(struct net_device *dev,
                                        __be16 vlan_proto, u16 vlan_id)
 {
        struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info);
@@ -81,13 +81,13 @@ struct net_device *__vlan_find_dev_deep(struct net_device *dev,
 
                upper_dev = netdev_master_upper_dev_get_rcu(dev);
                if (upper_dev)
-                       return __vlan_find_dev_deep(upper_dev,
+                       return __vlan_find_dev_deep_rcu(upper_dev,
                                                    vlan_proto, vlan_id);
        }
 
        return NULL;
 }
-EXPORT_SYMBOL(__vlan_find_dev_deep);
+EXPORT_SYMBOL(__vlan_find_dev_deep_rcu);
 
 struct net_device *vlan_dev_real_dev(const struct net_device *dev)
 {
index 8f025afa29fdac1e65288891dc5977499cac9345..ad2ac3c003988741c066c2bb467c2abc4a523c5a 100644 (file)
@@ -493,48 +493,10 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
        }
 }
 
-static int vlan_calculate_locking_subclass(struct net_device *real_dev)
-{
-       int subclass = 0;
-
-       while (is_vlan_dev(real_dev)) {
-               subclass++;
-               real_dev = vlan_dev_priv(real_dev)->real_dev;
-       }
-
-       return subclass;
-}
-
-static void vlan_dev_mc_sync(struct net_device *to, struct net_device *from)
-{
-       int err = 0, subclass;
-
-       subclass = vlan_calculate_locking_subclass(to);
-
-       spin_lock_nested(&to->addr_list_lock, subclass);
-       err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
-       if (!err)
-               __dev_set_rx_mode(to);
-       spin_unlock(&to->addr_list_lock);
-}
-
-static void vlan_dev_uc_sync(struct net_device *to, struct net_device *from)
-{
-       int err = 0, subclass;
-
-       subclass = vlan_calculate_locking_subclass(to);
-
-       spin_lock_nested(&to->addr_list_lock, subclass);
-       err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
-       if (!err)
-               __dev_set_rx_mode(to);
-       spin_unlock(&to->addr_list_lock);
-}
-
 static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
 {
-       vlan_dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
-       vlan_dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
+       dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
+       dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
 }
 
 /*
@@ -562,6 +524,11 @@ static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
        netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass);
 }
 
+static int vlan_dev_get_lock_subclass(struct net_device *dev)
+{
+       return vlan_dev_priv(dev)->nest_level;
+}
+
 static const struct header_ops vlan_header_ops = {
        .create  = vlan_dev_hard_header,
        .rebuild = vlan_dev_rebuild_header,
@@ -597,7 +564,6 @@ static const struct net_device_ops vlan_netdev_ops;
 static int vlan_dev_init(struct net_device *dev)
 {
        struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
-       int subclass = 0;
 
        netif_carrier_off(dev);
 
@@ -646,8 +612,7 @@ static int vlan_dev_init(struct net_device *dev)
 
        SET_NETDEV_DEVTYPE(dev, &vlan_type);
 
-       subclass = vlan_calculate_locking_subclass(dev);
-       vlan_dev_set_lockdep_class(dev, subclass);
+       vlan_dev_set_lockdep_class(dev, vlan_dev_get_lock_subclass(dev));
 
        vlan_dev_priv(dev)->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
        if (!vlan_dev_priv(dev)->vlan_pcpu_stats)
@@ -678,9 +643,9 @@ static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
        struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        netdev_features_t old_features = features;
 
-       features &= real_dev->vlan_features;
+       features = netdev_intersect_features(features, real_dev->vlan_features);
        features |= NETIF_F_RXCSUM;
-       features &= real_dev->features;
+       features = netdev_intersect_features(features, real_dev->features);
 
        features |= old_features & NETIF_F_SOFT_FEATURES;
        features |= NETIF_F_LLTX;
@@ -817,6 +782,7 @@ static const struct net_device_ops vlan_netdev_ops = {
        .ndo_netpoll_cleanup    = vlan_dev_netpoll_cleanup,
 #endif
        .ndo_fix_features       = vlan_dev_fix_features,
+       .ndo_get_lock_subclass  = vlan_dev_get_lock_subclass,
 };
 
 void vlan_setup(struct net_device *dev)
index 786ee2f83d5fea1dbfd6bb2660544d7f88e1eff2..01a1082e02b3157b3abc84e6b23844ed3a26f2f2 100644 (file)
@@ -1669,7 +1669,7 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
                goto out;
        }
 
-       if (sk->sk_no_check == 1)
+       if (sk->sk_no_check_tx)
                ddp->deh_sum = 0;
        else
                ddp->deh_sum = atalk_checksum(skb, len + sizeof(*ddp));
index 1281049c135f269f9ab0553eb51a681cd97b2079..d8e5d0c2ebbc2acb9a084581f5fa9f99fbd904da 100644 (file)
@@ -263,17 +263,11 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr,
                        goto out;
                }
        }
-/*
- * Not supported yet
- *
- * #ifndef CONFIG_SINGLE_SIGITF
- */
+
        vcc->qos.txtp.max_pcr = SELECT_TOP_PCR(vcc->qos.txtp);
        vcc->qos.txtp.pcr = 0;
        vcc->qos.txtp.min_pcr = 0;
-/*
- * #endif
- */
+
        error = vcc_connect(sock, vcc->itf, vcc->vpi, vcc->vci);
        if (!error)
                sock->state = SS_CONNECTED;
index b3bd4ec3fd9452f0d1f9a99dd4782260ab65c818..f04224c32005aa9a732622805915fe9aead9ee3e 100644 (file)
@@ -1545,6 +1545,8 @@ out_neigh:
        if ((orig_neigh_node) && (!is_single_hop_neigh))
                batadv_orig_node_free_ref(orig_neigh_node);
 out:
+       if (router_ifinfo)
+               batadv_neigh_ifinfo_free_ref(router_ifinfo);
        if (router)
                batadv_neigh_node_free_ref(router);
        if (router_router)
index b758881be108c84bfa059cf61f15d44a9008c1c8..a12e25efaf6ff055094f843c7c5536ce861f593a 100644 (file)
@@ -245,6 +245,7 @@ static int batadv_algorithms_open(struct inode *inode, struct file *file)
 static int batadv_originators_open(struct inode *inode, struct file *file)
 {
        struct net_device *net_dev = (struct net_device *)inode->i_private;
+
        return single_open(file, batadv_orig_seq_print_text, net_dev);
 }
 
@@ -258,18 +259,21 @@ static int batadv_originators_hardif_open(struct inode *inode,
                                          struct file *file)
 {
        struct net_device *net_dev = (struct net_device *)inode->i_private;
+
        return single_open(file, batadv_orig_hardif_seq_print_text, net_dev);
 }
 
 static int batadv_gateways_open(struct inode *inode, struct file *file)
 {
        struct net_device *net_dev = (struct net_device *)inode->i_private;
+
        return single_open(file, batadv_gw_client_seq_print_text, net_dev);
 }
 
 static int batadv_transtable_global_open(struct inode *inode, struct file *file)
 {
        struct net_device *net_dev = (struct net_device *)inode->i_private;
+
        return single_open(file, batadv_tt_global_seq_print_text, net_dev);
 }
 
@@ -277,6 +281,7 @@ static int batadv_transtable_global_open(struct inode *inode, struct file *file)
 static int batadv_bla_claim_table_open(struct inode *inode, struct file *file)
 {
        struct net_device *net_dev = (struct net_device *)inode->i_private;
+
        return single_open(file, batadv_bla_claim_table_seq_print_text,
                           net_dev);
 }
@@ -285,6 +290,7 @@ static int batadv_bla_backbone_table_open(struct inode *inode,
                                          struct file *file)
 {
        struct net_device *net_dev = (struct net_device *)inode->i_private;
+
        return single_open(file, batadv_bla_backbone_table_seq_print_text,
                           net_dev);
 }
@@ -300,6 +306,7 @@ static int batadv_bla_backbone_table_open(struct inode *inode,
 static int batadv_dat_cache_open(struct inode *inode, struct file *file)
 {
        struct net_device *net_dev = (struct net_device *)inode->i_private;
+
        return single_open(file, batadv_dat_cache_seq_print_text, net_dev);
 }
 #endif
@@ -307,6 +314,7 @@ static int batadv_dat_cache_open(struct inode *inode, struct file *file)
 static int batadv_transtable_local_open(struct inode *inode, struct file *file)
 {
        struct net_device *net_dev = (struct net_device *)inode->i_private;
+
        return single_open(file, batadv_tt_local_seq_print_text, net_dev);
 }
 
@@ -319,6 +327,7 @@ struct batadv_debuginfo {
 static int batadv_nc_nodes_open(struct inode *inode, struct file *file)
 {
        struct net_device *net_dev = (struct net_device *)inode->i_private;
+
        return single_open(file, batadv_nc_nodes_seq_print_text, net_dev);
 }
 #endif
@@ -333,7 +342,7 @@ struct batadv_debuginfo batadv_debuginfo_##_name = {        \
                  .llseek = seq_lseek,                  \
                  .release = single_release,            \
                }                                       \
-};
+}
 
 /* the following attributes are general and therefore they will be directly
  * placed in the BATADV_DEBUGFS_SUBDIR subdirectory of debugfs
@@ -395,7 +404,7 @@ struct batadv_debuginfo batadv_hardif_debuginfo_##_name = { \
                .llseek = seq_lseek,                            \
                .release = single_release,                      \
        },                                                      \
-};
+}
 static BATADV_HARDIF_DEBUGINFO(originators, S_IRUGO,
                               batadv_originators_hardif_open);
 
index b25fd64d727b0d6e8227671f860b133095df5100..dcd99b2bea3c47db32785411a1955420983e1f45 100644 (file)
@@ -662,6 +662,7 @@ static void batadv_dat_tvlv_container_update(struct batadv_priv *bat_priv)
 void batadv_dat_status_update(struct net_device *net_dev)
 {
        struct batadv_priv *bat_priv = netdev_priv(net_dev);
+
        batadv_dat_tvlv_container_update(bat_priv);
 }
 
@@ -940,8 +941,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
                 * additional DAT answer may trigger kernel warnings about
                 * a packet coming from the wrong port.
                 */
-               if (batadv_is_my_client(bat_priv, dat_entry->mac_addr,
-                                       BATADV_NO_FLAGS)) {
+               if (batadv_is_my_client(bat_priv, dat_entry->mac_addr, vid)) {
                        ret = true;
                        goto out;
                }
index bcc4bea632fa69ead6567e016f2f265840f7968b..f14e54a0569178e8b423b4921c4b610e4c63039c 100644 (file)
@@ -418,12 +418,13 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
                             struct batadv_neigh_node *neigh_node)
 {
        struct batadv_priv *bat_priv;
-       struct batadv_hard_iface *primary_if;
+       struct batadv_hard_iface *primary_if = NULL;
        struct batadv_frag_packet frag_header;
        struct sk_buff *skb_fragment;
        unsigned mtu = neigh_node->if_incoming->net_dev->mtu;
        unsigned header_size = sizeof(frag_header);
        unsigned max_fragment_size, max_packet_size;
+       bool ret = false;
 
        /* To avoid merge and refragmentation at next-hops we never send
         * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
@@ -483,7 +484,11 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
                           skb->len + ETH_HLEN);
        batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
 
-       return true;
+       ret = true;
+
 out_err:
-       return false;
+       if (primary_if)
+               batadv_hardif_free_ref(primary_if);
+
+       return ret;
 }
index c835e137423bb9ec70b98b5130d5a33c12f9b139..90cff585b37d5a3779cdb8f7b3b90a26eb2df88e 100644 (file)
 
 static void batadv_gw_node_free_ref(struct batadv_gw_node *gw_node)
 {
-       if (atomic_dec_and_test(&gw_node->refcount))
+       if (atomic_dec_and_test(&gw_node->refcount)) {
+               batadv_orig_node_free_ref(gw_node->orig_node);
                kfree_rcu(gw_node, rcu);
+       }
 }
 
 static struct batadv_gw_node *
@@ -406,9 +408,14 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
        if (gateway->bandwidth_down == 0)
                return;
 
+       if (!atomic_inc_not_zero(&orig_node->refcount))
+               return;
+
        gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
-       if (!gw_node)
+       if (!gw_node) {
+               batadv_orig_node_free_ref(orig_node);
                return;
+       }
 
        INIT_HLIST_NODE(&gw_node->list);
        gw_node->orig_node = orig_node;
index b851cc58085330acbab02848fedf3cb01751a060..fbda6b54baffccf798375cb8add49bb179738386 100644 (file)
@@ -83,7 +83,7 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
                return true;
 
        /* no more parents..stop recursion */
-       if (net_dev->iflink == net_dev->ifindex)
+       if (net_dev->iflink == 0 || net_dev->iflink == net_dev->ifindex)
                return false;
 
        /* recurse over the parent device */
index 770dc890ceefdb712f254b378c825cbeab255742..118b990bae25d7ecc7221330546b59a5391cd21e 100644 (file)
@@ -24,7 +24,7 @@
 #define BATADV_DRIVER_DEVICE "batman-adv"
 
 #ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2014.2.0"
+#define BATADV_SOURCE_VERSION "2014.3.0"
 #endif
 
 /* B.A.T.M.A.N. parameters */
index a9546fe541ebb0ff8905fd3fe82d48e48302ce9e..40a2fc4bcf4c4e1887a8cfc571a13631ef1e655b 100644 (file)
@@ -86,6 +86,7 @@ static void batadv_nc_tvlv_container_update(struct batadv_priv *bat_priv)
 void batadv_nc_status_update(struct net_device *net_dev)
 {
        struct batadv_priv *bat_priv = netdev_priv(net_dev);
+
        batadv_nc_tvlv_container_update(bat_priv);
 }
 
index ffd9dfbd9b0e856e35e2ac6ea594739e8feb614d..6a484514cd3e98b9e0b27a924b4dcb92f2682055 100644 (file)
@@ -501,12 +501,17 @@ batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
 static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu)
 {
        struct batadv_orig_ifinfo *orig_ifinfo;
+       struct batadv_neigh_node *router;
 
        orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu);
 
        if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
                batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing);
 
+       /* this is the last reference to this object */
+       router = rcu_dereference_protected(orig_ifinfo->router, true);
+       if (router)
+               batadv_neigh_node_free_ref_now(router);
        kfree(orig_ifinfo);
 }
 
@@ -701,6 +706,47 @@ free_orig_node:
        return NULL;
 }
 
+/**
+ * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
+ * @bat_priv: the bat priv with all the soft interface information
+ * @neigh: orig node which is to be checked
+ */
+static void
+batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
+                         struct batadv_neigh_node *neigh)
+{
+       struct batadv_neigh_ifinfo *neigh_ifinfo;
+       struct batadv_hard_iface *if_outgoing;
+       struct hlist_node *node_tmp;
+
+       spin_lock_bh(&neigh->ifinfo_lock);
+
+       /* for all ifinfo objects for this neighinator */
+       hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
+                                 &neigh->ifinfo_list, list) {
+               if_outgoing = neigh_ifinfo->if_outgoing;
+
+               /* always keep the default interface */
+               if (if_outgoing == BATADV_IF_DEFAULT)
+                       continue;
+
+               /* don't purge if the interface is not (going) down */
+               if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
+                   (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
+                   (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
+                       continue;
+
+               batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+                          "neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
+                          neigh->addr, if_outgoing->net_dev->name);
+
+               hlist_del_rcu(&neigh_ifinfo->list);
+               batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
+       }
+
+       spin_unlock_bh(&neigh->ifinfo_lock);
+}
+
 /**
  * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
  * @bat_priv: the bat priv with all the soft interface information
@@ -800,6 +846,11 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
 
                        hlist_del_rcu(&neigh_node->list);
                        batadv_neigh_node_free_ref(neigh_node);
+               } else {
+                       /* only necessary if not the whole neighbor is to be
+                        * deleted, but some interface has been removed.
+                        */
+                       batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
                }
        }
 
@@ -857,7 +908,7 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
 {
        struct batadv_neigh_node *best_neigh_node;
        struct batadv_hard_iface *hard_iface;
-       bool changed;
+       bool changed_ifinfo, changed_neigh;
 
        if (batadv_has_timed_out(orig_node->last_seen,
                                 2 * BATADV_PURGE_TIMEOUT)) {
@@ -867,10 +918,10 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
                           jiffies_to_msecs(orig_node->last_seen));
                return true;
        }
-       changed = batadv_purge_orig_ifinfo(bat_priv, orig_node);
-       changed = changed || batadv_purge_orig_neighbors(bat_priv, orig_node);
+       changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
+       changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
 
-       if (!changed)
+       if (!changed_ifinfo && !changed_neigh)
                return false;
 
        /* first for NULL ... */
@@ -1028,7 +1079,8 @@ int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
        bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
 
 out:
-       batadv_hardif_free_ref(hard_iface);
+       if (hard_iface)
+               batadv_hardif_free_ref(hard_iface);
        return 0;
 }
 
index 744a59b85e15ded75f61da8a9fa5a8a87cdb7b8d..e7ee65dc20bf4f25a1a8d0134c66b0bfaef25bd3 100644 (file)
@@ -884,7 +884,7 @@ static void batadv_softif_init_early(struct net_device *dev)
        /* generate random address */
        eth_hw_addr_random(dev);
 
-       SET_ETHTOOL_OPS(dev, &batadv_ethtool_ops);
+       dev->ethtool_ops = &batadv_ethtool_ops;
 
        memset(priv, 0, sizeof(*priv));
 }
index 1ebb0d9e2ea547d1c263a6b09d30d81214e4ba33..fc47baa888c54896c6ccde6352202736d3c9ba6b 100644 (file)
 static struct net_device *batadv_kobj_to_netdev(struct kobject *obj)
 {
        struct device *dev = container_of(obj->parent, struct device, kobj);
+
        return to_net_dev(dev);
 }
 
 static struct batadv_priv *batadv_kobj_to_batpriv(struct kobject *obj)
 {
        struct net_device *net_dev = batadv_kobj_to_netdev(obj);
+
        return netdev_priv(net_dev);
 }
 
@@ -106,7 +108,7 @@ struct batadv_attribute batadv_attr_vlan_##_name = {        \
                 .mode = _mode },                       \
        .show   = _show,                                \
        .store  = _store,                               \
-};
+}
 
 /* Use this, if you have customized show and store functions */
 #define BATADV_ATTR(_name, _mode, _show, _store)       \
@@ -115,7 +117,7 @@ struct batadv_attribute batadv_attr_##_name = {             \
                 .mode = _mode },                       \
        .show   = _show,                                \
        .store  = _store,                               \
-};
+}
 
 #define BATADV_ATTR_SIF_STORE_BOOL(_name, _post_func)                  \
 ssize_t batadv_store_##_name(struct kobject *kobj,                     \
@@ -124,6 +126,7 @@ ssize_t batadv_store_##_name(struct kobject *kobj,                  \
 {                                                                      \
        struct net_device *net_dev = batadv_kobj_to_netdev(kobj);       \
        struct batadv_priv *bat_priv = netdev_priv(net_dev);            \
+                                                                       \
        return __batadv_store_bool_attr(buff, count, _post_func, attr,  \
                                        &bat_priv->_name, net_dev);     \
 }
@@ -133,6 +136,7 @@ ssize_t batadv_show_##_name(struct kobject *kobj,                   \
                            struct attribute *attr, char *buff)         \
 {                                                                      \
        struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);    \
+                                                                       \
        return sprintf(buff, "%s\n",                                    \
                       atomic_read(&bat_priv->_name) == 0 ?             \
                       "disabled" : "enabled");                         \
@@ -155,6 +159,7 @@ ssize_t batadv_store_##_name(struct kobject *kobj,                  \
 {                                                                      \
        struct net_device *net_dev = batadv_kobj_to_netdev(kobj);       \
        struct batadv_priv *bat_priv = netdev_priv(net_dev);            \
+                                                                       \
        return __batadv_store_uint_attr(buff, count, _min, _max,        \
                                        _post_func, attr,               \
                                        &bat_priv->_name, net_dev);     \
@@ -165,6 +170,7 @@ ssize_t batadv_show_##_name(struct kobject *kobj,                   \
                            struct attribute *attr, char *buff)         \
 {                                                                      \
        struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);    \
+                                                                       \
        return sprintf(buff, "%i\n", atomic_read(&bat_priv->_name));    \
 }                                                                      \
 
@@ -188,6 +194,7 @@ ssize_t batadv_store_vlan_##_name(struct kobject *kobj,                     \
        size_t res = __batadv_store_bool_attr(buff, count, _post_func,  \
                                              attr, &vlan->_name,       \
                                              bat_priv->soft_iface);    \
+                                                                       \
        batadv_softif_vlan_free_ref(vlan);                              \
        return res;                                                     \
 }
@@ -202,6 +209,7 @@ ssize_t batadv_show_vlan_##_name(struct kobject *kobj,                      \
        size_t res = sprintf(buff, "%s\n",                              \
                             atomic_read(&vlan->_name) == 0 ?           \
                             "disabled" : "enabled");                   \
+                                                                       \
        batadv_softif_vlan_free_ref(vlan);                              \
        return res;                                                     \
 }
@@ -324,12 +332,14 @@ static ssize_t batadv_show_bat_algo(struct kobject *kobj,
                                    struct attribute *attr, char *buff)
 {
        struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
+
        return sprintf(buff, "%s\n", bat_priv->bat_algo_ops->name);
 }
 
 static void batadv_post_gw_reselect(struct net_device *net_dev)
 {
        struct batadv_priv *bat_priv = netdev_priv(net_dev);
+
        batadv_gw_reselect(bat_priv);
 }
 
index d958e2dca52fa5bb4d166e0073fa90d18729a73f..095943c02d6e4ae9643cd1099e7e648ef8de6488 100644 (file)
@@ -367,9 +367,23 @@ static void le_conn_timeout(struct work_struct *work)
 {
        struct hci_conn *conn = container_of(work, struct hci_conn,
                                             le_conn_timeout.work);
+       struct hci_dev *hdev = conn->hdev;
 
        BT_DBG("");
 
+       /* We could end up here due to having done directed advertising,
+        * so clean up the state if necessary. This should however only
+        * happen with broken hardware or if low duty cycle was used
+        * (which doesn't have a timeout of its own).
+        */
+       if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
+               u8 enable = 0x00;
+               hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
+                            &enable);
+               hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
+               return;
+       }
+
        hci_le_create_connection_cancel(conn);
 }
 
@@ -401,6 +415,10 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
        case ACL_LINK:
                conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
                break;
+       case LE_LINK:
+               /* conn->src should reflect the local identity address */
+               hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
+               break;
        case SCO_LINK:
                if (lmp_esco_capable(hdev))
                        conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
@@ -545,6 +563,11 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
         * favor of connection establishment, we should restart it.
         */
        hci_update_background_scan(hdev);
+
+       /* Re-enable advertising in case this was a failed connection
+        * attempt as a peripheral.
+        */
+       mgmt_reenable_advertising(hdev);
 }
 
 static void create_le_conn_complete(struct hci_dev *hdev, u8 status)
@@ -605,6 +628,45 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
        conn->state = BT_CONNECT;
 }
 
+static void hci_req_directed_advertising(struct hci_request *req,
+                                        struct hci_conn *conn)
+{
+       struct hci_dev *hdev = req->hdev;
+       struct hci_cp_le_set_adv_param cp;
+       u8 own_addr_type;
+       u8 enable;
+
+       enable = 0x00;
+       hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
+
+       /* Clear the HCI_ADVERTISING bit temporarily so that the
+        * hci_update_random_address knows that it's safe to go ahead
+        * and write a new random address. The flag will be set back on
+        * as soon as the SET_ADV_ENABLE HCI command completes.
+        */
+       clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
+
+       /* Set require_privacy to false so that the remote device has a
+        * chance of identifying us.
+        */
+       if (hci_update_random_address(req, false, &own_addr_type) < 0)
+               return;
+
+       memset(&cp, 0, sizeof(cp));
+       cp.type = LE_ADV_DIRECT_IND;
+       cp.own_address_type = own_addr_type;
+       cp.direct_addr_type = conn->dst_type;
+       bacpy(&cp.direct_addr, &conn->dst);
+       cp.channel_map = hdev->le_adv_channel_map;
+
+       hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
+
+       enable = 0x01;
+       hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
+
+       conn->state = BT_CONNECT;
+}
+
 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
                                u8 dst_type, u8 sec_level, u8 auth_type)
 {
@@ -614,9 +676,6 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
        struct hci_request req;
        int err;
 
-       if (test_bit(HCI_ADVERTISING, &hdev->flags))
-               return ERR_PTR(-ENOTSUPP);
-
        /* Some devices send ATT messages as soon as the physical link is
         * established. To be able to handle these ATT messages, the user-
         * space first establishes the connection and then starts the pairing
@@ -664,13 +723,20 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
                return ERR_PTR(-ENOMEM);
 
        conn->dst_type = dst_type;
-
-       conn->out = true;
-       conn->link_mode |= HCI_LM_MASTER;
        conn->sec_level = BT_SECURITY_LOW;
        conn->pending_sec_level = sec_level;
        conn->auth_type = auth_type;
 
+       hci_req_init(&req, hdev);
+
+       if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
+               hci_req_directed_advertising(&req, conn);
+               goto create_conn;
+       }
+
+       conn->out = true;
+       conn->link_mode |= HCI_LM_MASTER;
+
        params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
        if (params) {
                conn->le_conn_min_interval = params->conn_min_interval;
@@ -680,8 +746,6 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
                conn->le_conn_max_interval = hdev->le_conn_max_interval;
        }
 
-       hci_req_init(&req, hdev);
-
        /* If controller is scanning, we stop it since some controllers are
         * not able to scan and connect at the same time. Also set the
         * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
@@ -695,6 +759,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
 
        hci_req_add_le_create_conn(&req, conn);
 
+create_conn:
        err = hci_req_run(&req, create_le_conn_complete);
        if (err) {
                hci_conn_del(conn);
@@ -819,14 +884,17 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
        if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
                struct hci_cp_auth_requested cp;
 
-               /* encrypt must be pending if auth is also pending */
-               set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
-
                cp.handle = cpu_to_le16(conn->handle);
                hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
                             sizeof(cp), &cp);
+
+               /* If we're already encrypted set the REAUTH_PEND flag,
+                * otherwise set the ENCRYPT_PEND.
+                */
                if (conn->key_type != 0xff)
                        set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
+               else
+                       set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
        }
 
        return 0;
index 1c6ffaa8902f5e9fe32a86f2a19cf9073d6f0dae..d31f144860d127cdf223f2145185feb963e011c4 100644 (file)
@@ -955,14 +955,9 @@ static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
        if (count < 3)
                return -EINVAL;
 
-       buf = kzalloc(count, GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-
-       if (copy_from_user(buf, data, count)) {
-               err = -EFAULT;
-               goto done;
-       }
+       buf = memdup_user(data, count);
+       if (IS_ERR(buf))
+               return PTR_ERR(buf);
 
        if (memcmp(buf, "add", 3) == 0) {
                n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
@@ -1828,6 +1823,9 @@ static int __hci_init(struct hci_dev *hdev)
                                    &lowpan_debugfs_fops);
                debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
                                    &le_auto_conn_fops);
+               debugfs_create_u16("discov_interleaved_timeout", 0644,
+                                  hdev->debugfs,
+                                  &hdev->discov_interleaved_timeout);
        }
 
        return 0;
@@ -2033,12 +2031,11 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
 
        hci_remove_remote_oob_data(hdev, &data->bdaddr);
 
-       if (ssp)
-               *ssp = data->ssp_mode;
+       *ssp = data->ssp_mode;
 
        ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
        if (ie) {
-               if (ie->data.ssp_mode && ssp)
+               if (ie->data.ssp_mode)
                        *ssp = true;
 
                if (ie->name_state == NAME_NEEDED &&
@@ -3791,6 +3788,7 @@ struct hci_dev *hci_alloc_dev(void)
        hdev->le_conn_max_interval = 0x0038;
 
        hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
+       hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
 
        mutex_init(&hdev->lock);
        mutex_init(&hdev->req_lock);
index 49774912cb01f23ef6f85cb26f8613f538edec94..ca19fd4bbb8f198ed5d3a3b959c35aceb3a1e54e 100644 (file)
@@ -991,10 +991,25 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
        if (!sent)
                return;
 
+       if (status)
+               return;
+
        hci_dev_lock(hdev);
 
-       if (!status)
-               mgmt_advertising(hdev, *sent);
+       /* If we're doing connection initation as peripheral. Set a
+        * timeout in case something goes wrong.
+        */
+       if (*sent) {
+               struct hci_conn *conn;
+
+               conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+               if (conn)
+                       queue_delayed_work(hdev->workqueue,
+                                          &conn->le_conn_timeout,
+                                          HCI_LE_CONN_TIMEOUT);
+       }
+
+       mgmt_advertising(hdev, *sent);
 
        hci_dev_unlock(hdev);
 }
@@ -1018,6 +1033,33 @@ static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
        hci_dev_unlock(hdev);
 }
 
+static bool has_pending_adv_report(struct hci_dev *hdev)
+{
+       struct discovery_state *d = &hdev->discovery;
+
+       return bacmp(&d->last_adv_addr, BDADDR_ANY);
+}
+
+static void clear_pending_adv_report(struct hci_dev *hdev)
+{
+       struct discovery_state *d = &hdev->discovery;
+
+       bacpy(&d->last_adv_addr, BDADDR_ANY);
+       d->last_adv_data_len = 0;
+}
+
+static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                                    u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
+{
+       struct discovery_state *d = &hdev->discovery;
+
+       bacpy(&d->last_adv_addr, bdaddr);
+       d->last_adv_addr_type = bdaddr_type;
+       d->last_adv_rssi = rssi;
+       memcpy(d->last_adv_data, data, len);
+       d->last_adv_data_len = len;
+}
+
 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
                                      struct sk_buff *skb)
 {
@@ -1036,9 +1078,25 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
        switch (cp->enable) {
        case LE_SCAN_ENABLE:
                set_bit(HCI_LE_SCAN, &hdev->dev_flags);
+               if (hdev->le_scan_type == LE_SCAN_ACTIVE)
+                       clear_pending_adv_report(hdev);
                break;
 
        case LE_SCAN_DISABLE:
+               /* We do this here instead of when setting DISCOVERY_STOPPED
+                * since the latter would potentially require waiting for
+                * inquiry to stop too.
+                */
+               if (has_pending_adv_report(hdev)) {
+                       struct discovery_state *d = &hdev->discovery;
+
+                       mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
+                                         d->last_adv_addr_type, NULL,
+                                         d->last_adv_rssi, 0, 1,
+                                         d->last_adv_data,
+                                         d->last_adv_data_len, NULL, 0);
+               }
+
                /* Cancel this timer so that we don't try to disable scanning
                 * when it's already disabled.
                 */
@@ -1827,7 +1885,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
                name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
                mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
                                  info->dev_class, 0, !name_known, ssp, NULL,
-                                 0);
+                                 0, NULL, 0);
        }
 
        hci_dev_unlock(hdev);
@@ -3102,7 +3160,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
                                                              false, &ssp);
                        mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
                                          info->dev_class, info->rssi,
-                                         !name_known, ssp, NULL, 0);
+                                         !name_known, ssp, NULL, 0, NULL, 0);
                }
        } else {
                struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
@@ -3120,7 +3178,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
                                                              false, &ssp);
                        mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
                                          info->dev_class, info->rssi,
-                                         !name_known, ssp, NULL, 0);
+                                         !name_known, ssp, NULL, 0, NULL, 0);
                }
        }
 
@@ -3309,7 +3367,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
                eir_len = eir_get_length(info->data, sizeof(info->data));
                mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
                                  info->dev_class, info->rssi, !name_known,
-                                 ssp, info->data, eir_len);
+                                 ssp, info->data, eir_len, NULL, 0);
        }
 
        hci_dev_unlock(hdev);
@@ -3330,6 +3388,12 @@ static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
        if (!conn)
                goto unlock;
 
+       /* For BR/EDR the necessary steps are taken through the
+        * auth_complete event.
+        */
+       if (conn->type != LE_LINK)
+               goto unlock;
+
        if (!ev->status)
                conn->sec_level = conn->pending_sec_level;
 
@@ -3361,24 +3425,20 @@ unlock:
 
 static u8 hci_get_auth_req(struct hci_conn *conn)
 {
-       /* If remote requests dedicated bonding follow that lead */
-       if (conn->remote_auth == HCI_AT_DEDICATED_BONDING ||
-           conn->remote_auth == HCI_AT_DEDICATED_BONDING_MITM) {
-               /* If both remote and local IO capabilities allow MITM
-                * protection then require it, otherwise don't */
-               if (conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT ||
-                   conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)
-                       return HCI_AT_DEDICATED_BONDING;
-               else
-                       return HCI_AT_DEDICATED_BONDING_MITM;
-       }
-
        /* If remote requests no-bonding follow that lead */
        if (conn->remote_auth == HCI_AT_NO_BONDING ||
            conn->remote_auth == HCI_AT_NO_BONDING_MITM)
                return conn->remote_auth | (conn->auth_type & 0x01);
 
-       return conn->auth_type;
+       /* If both remote and local have enough IO capabilities, require
+        * MITM protection
+        */
+       if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
+           conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
+               return conn->remote_auth | 0x01;
+
+       /* No MITM protection possible so ignore remote requirement */
+       return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
 }
 
 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -3408,8 +3468,21 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
                 * to DisplayYesNo as it is not supported by BT spec. */
                cp.capability = (conn->io_capability == 0x04) ?
                                HCI_IO_DISPLAY_YESNO : conn->io_capability;
-               conn->auth_type = hci_get_auth_req(conn);
-               cp.authentication = conn->auth_type;
+
+               /* If we are initiators, there is no remote information yet */
+               if (conn->remote_auth == 0xff) {
+                       cp.authentication = conn->auth_type;
+
+                       /* Request MITM protection if our IO caps allow it
+                        * except for the no-bonding case
+                        */
+                       if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
+                           cp.authentication != HCI_AT_NO_BONDING)
+                               cp.authentication |= 0x01;
+               } else {
+                       conn->auth_type = hci_get_auth_req(conn);
+                       cp.authentication = conn->auth_type;
+               }
 
                if (hci_find_remote_oob_data(hdev, &conn->dst) &&
                    (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
@@ -3477,12 +3550,9 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
        rem_mitm = (conn->remote_auth & 0x01);
 
        /* If we require MITM but the remote device can't provide that
-        * (it has NoInputNoOutput) then reject the confirmation
-        * request. The only exception is when we're dedicated bonding
-        * initiators (connect_cfm_cb set) since then we always have the MITM
-        * bit set. */
-       if (!conn->connect_cfm_cb && loc_mitm &&
-           conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
+        * (it has NoInputNoOutput) then reject the confirmation request
+        */
+       if (loc_mitm && conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
                BT_DBG("Rejecting request: remote device can't provide MITM");
                hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
                             sizeof(ev->bdaddr), &ev->bdaddr);
@@ -3840,17 +3910,6 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
                conn->dst_type = ev->bdaddr_type;
 
-               /* The advertising parameters for own address type
-                * define which source address and source address
-                * type this connections has.
-                */
-               if (bacmp(&conn->src, BDADDR_ANY)) {
-                       conn->src_type = ADDR_LE_DEV_PUBLIC;
-               } else {
-                       bacpy(&conn->src, &hdev->static_addr);
-                       conn->src_type = ADDR_LE_DEV_RANDOM;
-               }
-
                if (ev->role == LE_CONN_ROLE_MASTER) {
                        conn->out = true;
                        conn->link_mode |= HCI_LM_MASTER;
@@ -3875,27 +3934,24 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                                                          &conn->init_addr,
                                                          &conn->init_addr_type);
                        }
-               } else {
-                       /* Set the responder (our side) address type based on
-                        * the advertising address type.
-                        */
-                       conn->resp_addr_type = hdev->adv_addr_type;
-                       if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
-                               bacpy(&conn->resp_addr, &hdev->random_addr);
-                       else
-                               bacpy(&conn->resp_addr, &hdev->bdaddr);
-
-                       conn->init_addr_type = ev->bdaddr_type;
-                       bacpy(&conn->init_addr, &ev->bdaddr);
                }
        } else {
                cancel_delayed_work(&conn->le_conn_timeout);
        }
 
-       /* Ensure that the hci_conn contains the identity address type
-        * regardless of which address the connection was made with.
-        */
-       hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
+       if (!conn->out) {
+               /* Set the responder (our side) address type based on
+                * the advertising address type.
+                */
+               conn->resp_addr_type = hdev->adv_addr_type;
+               if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
+                       bacpy(&conn->resp_addr, &hdev->random_addr);
+               else
+                       bacpy(&conn->resp_addr, &hdev->bdaddr);
+
+               conn->init_addr_type = ev->bdaddr_type;
+               bacpy(&conn->init_addr, &ev->bdaddr);
+       }
 
        /* Lookup the identity address from the stored connection
         * address and address type.
@@ -3975,25 +4031,97 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
        }
 }
 
+static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
+                              u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
+{
+       struct discovery_state *d = &hdev->discovery;
+       bool match;
+
+       /* Passive scanning shouldn't trigger any device found events */
+       if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
+               if (type == LE_ADV_IND || type == LE_ADV_DIRECT_IND)
+                       check_pending_le_conn(hdev, bdaddr, bdaddr_type);
+               return;
+       }
+
+       /* If there's nothing pending either store the data from this
+        * event or send an immediate device found event if the data
+        * should not be stored for later.
+        */
+       if (!has_pending_adv_report(hdev)) {
+               /* If the report will trigger a SCAN_REQ store it for
+                * later merging.
+                */
+               if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
+                       store_pending_adv_report(hdev, bdaddr, bdaddr_type,
+                                                rssi, data, len);
+                       return;
+               }
+
+               mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
+                                 rssi, 0, 1, data, len, NULL, 0);
+               return;
+       }
+
+       /* Check if the pending report is for the same device as the new one */
+       match = (!bacmp(bdaddr, &d->last_adv_addr) &&
+                bdaddr_type == d->last_adv_addr_type);
+
+       /* If the pending data doesn't match this report or this isn't a
+        * scan response (e.g. we got a duplicate ADV_IND) then force
+        * sending of the pending data.
+        */
+       if (type != LE_ADV_SCAN_RSP || !match) {
+               /* Send out whatever is in the cache, but skip duplicates */
+               if (!match)
+                       mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
+                                         d->last_adv_addr_type, NULL,
+                                         d->last_adv_rssi, 0, 1,
+                                         d->last_adv_data,
+                                         d->last_adv_data_len, NULL, 0);
+
+               /* If the new report will trigger a SCAN_REQ store it for
+                * later merging.
+                */
+               if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
+                       store_pending_adv_report(hdev, bdaddr, bdaddr_type,
+                                                rssi, data, len);
+                       return;
+               }
+
+               /* The advertising reports cannot be merged, so clear
+                * the pending report and send out a device found event.
+                */
+               clear_pending_adv_report(hdev);
+               mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
+                                 rssi, 0, 1, data, len, NULL, 0);
+               return;
+       }
+
+       /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
+        * the new event is a SCAN_RSP. We can therefore proceed with
+        * sending a merged device found event.
+        */
+       mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
+                         d->last_adv_addr_type, NULL, rssi, 0, 1, data, len,
+                         d->last_adv_data, d->last_adv_data_len);
+       clear_pending_adv_report(hdev);
+}
+
 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
        u8 num_reports = skb->data[0];
        void *ptr = &skb->data[1];
-       s8 rssi;
 
        hci_dev_lock(hdev);
 
        while (num_reports--) {
                struct hci_ev_le_advertising_info *ev = ptr;
-
-               if (ev->evt_type == LE_ADV_IND ||
-                   ev->evt_type == LE_ADV_DIRECT_IND)
-                       check_pending_le_conn(hdev, &ev->bdaddr,
-                                             ev->bdaddr_type);
+               s8 rssi;
 
                rssi = ev->data[ev->length];
-               mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
-                                 NULL, rssi, 0, 1, ev->data, ev->length);
+               process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
+                                  ev->bdaddr_type, rssi, ev->data, ev->length);
 
                ptr += sizeof(*ev) + ev->length + 1;
        }
index b9a418e578e0000ec4ad68734b3e34575cd759b5..f608bffdb8b940915ed160b18603c0733aee63a0 100644 (file)
@@ -524,16 +524,7 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
        case HCISETRAW:
                if (!capable(CAP_NET_ADMIN))
                        return -EPERM;
-
-               if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
-                       return -EPERM;
-
-               if (arg)
-                       set_bit(HCI_RAW, &hdev->flags);
-               else
-                       clear_bit(HCI_RAW, &hdev->flags);
-
-               return 0;
+               return -EOPNOTSUPP;
 
        case HCIGETCONNINFO:
                return hci_get_conn_info(hdev, (void __user *) arg);
index a1e5bb7d06e880b07cb6ab4e7cac153c0011fa79..dc4d301d3a728ccd0af8efe4e9648b843d598406 100644 (file)
@@ -7519,9 +7519,9 @@ int __init l2cap_init(void)
        l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
                                            NULL, &l2cap_debugfs_fops);
 
-       debugfs_create_u16("l2cap_le_max_credits", 0466, bt_debugfs,
+       debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
                           &le_max_credits);
-       debugfs_create_u16("l2cap_le_default_mps", 0466, bt_debugfs,
+       debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
                           &le_default_mps);
 
        bt_6lowpan_init();
index b3fbc73516c415ee1654eb6f2aa38d5e390fb00b..941ad7530eda48f21dcf7faef9167cbce6574a8e 100644 (file)
@@ -58,6 +58,7 @@ int bt_to_errno(__u16 code)
                return EIO;
 
        case 0x04:
+       case 0x3c:
                return EHOSTDOWN;
 
        case 0x05:
index d2d4e0d5aed017366668bf263538baf332255d20..54abbce3a39e8bc0cdbd9018ca8e616e5c864546 100644 (file)
@@ -2850,10 +2850,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
        }
 
        sec_level = BT_SECURITY_MEDIUM;
-       if (cp->io_cap == 0x03)
-               auth_type = HCI_AT_DEDICATED_BONDING;
-       else
-               auth_type = HCI_AT_DEDICATED_BONDING_MITM;
+       auth_type = HCI_AT_DEDICATED_BONDING;
 
        if (cp->addr.type == BDADDR_BREDR) {
                conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
@@ -3351,6 +3348,8 @@ static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
 
 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
 {
+       unsigned long timeout = 0;
+
        BT_DBG("status %d", status);
 
        if (status) {
@@ -3366,13 +3365,11 @@ static void start_discovery_complete(struct hci_dev *hdev, u8 status)
 
        switch (hdev->discovery.type) {
        case DISCOV_TYPE_LE:
-               queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
-                                  DISCOV_LE_TIMEOUT);
+               timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
                break;
 
        case DISCOV_TYPE_INTERLEAVED:
-               queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
-                                  DISCOV_INTERLEAVED_TIMEOUT);
+               timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
                break;
 
        case DISCOV_TYPE_BREDR:
@@ -3381,6 +3378,11 @@ static void start_discovery_complete(struct hci_dev *hdev, u8 status)
        default:
                BT_ERR("Invalid discovery type %d", hdev->discovery.type);
        }
+
+       if (!timeout)
+               return;
+
+       queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
 }
 
 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
@@ -5668,8 +5670,9 @@ void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
 }
 
 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
-                      u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
-                      ssp, u8 *eir, u16 eir_len)
+                      u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
+                      u8 ssp, u8 *eir, u16 eir_len, u8 *scan_rsp,
+                      u8 scan_rsp_len)
 {
        char buf[512];
        struct mgmt_ev_device_found *ev = (void *) buf;
@@ -5679,8 +5682,10 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
        if (!hci_discovery_active(hdev))
                return;
 
-       /* Leave 5 bytes for a potential CoD field */
-       if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
+       /* Make sure that the buffer is big enough. The 5 extra bytes
+        * are for the potential CoD field.
+        */
+       if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
                return;
 
        memset(buf, 0, sizeof(buf));
@@ -5707,8 +5712,11 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
                eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
                                          dev_class, 3);
 
-       ev->eir_len = cpu_to_le16(eir_len);
-       ev_size = sizeof(*ev) + eir_len;
+       if (scan_rsp_len > 0)
+               memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
+
+       ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
+       ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
 
        mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
 }
index e85498b2f1669f7dae3b7f5f5e1c0970b1be65c6..8590b942bffa62a11d23c4e7a0666c9564d310c1 100644 (file)
@@ -5,7 +5,7 @@
 obj-$(CONFIG_BRIDGE) += bridge.o
 
 bridge-y       := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o \
-                       br_ioctl.o br_notify.o br_stp.o br_stp_bpdu.o \
+                       br_ioctl.o br_stp.o br_stp_bpdu.o \
                        br_stp_if.o br_stp_timer.o br_netlink.o
 
 bridge-$(CONFIG_SYSFS) += br_sysfs_if.o br_sysfs_br.o
@@ -16,4 +16,4 @@ bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o br_mdb.o
 
 bridge-$(CONFIG_BRIDGE_VLAN_FILTERING) += br_vlan.o
 
-obj-$(CONFIG_BRIDGE_NF_EBTABLES) += netfilter/
+obj-$(CONFIG_NETFILTER) += netfilter/
index 19311aafcf5a06e6ae7abaaf82527711573dc6d6..1a755a1e54101d924e88ea240a82c154dcb7bbe5 100644 (file)
 
 #include "br_private.h"
 
+/*
+ * Handle changes in state of network devices enslaved to a bridge.
+ *
+ * Note: don't care about up/down if bridge itself is down, because
+ *     port state is checked when bridge is brought up.
+ */
+static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
+{
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+       struct net_bridge_port *p;
+       struct net_bridge *br;
+       bool changed_addr;
+       int err;
+
+       /* register of bridge completed, add sysfs entries */
+       if ((dev->priv_flags & IFF_EBRIDGE) && event == NETDEV_REGISTER) {
+               br_sysfs_addbr(dev);
+               return NOTIFY_DONE;
+       }
+
+       /* not a port of a bridge */
+       p = br_port_get_rtnl(dev);
+       if (!p)
+               return NOTIFY_DONE;
+
+       br = p->br;
+
+       switch (event) {
+       case NETDEV_CHANGEMTU:
+               dev_set_mtu(br->dev, br_min_mtu(br));
+               break;
+
+       case NETDEV_CHANGEADDR:
+               spin_lock_bh(&br->lock);
+               br_fdb_changeaddr(p, dev->dev_addr);
+               changed_addr = br_stp_recalculate_bridge_id(br);
+               spin_unlock_bh(&br->lock);
+
+               if (changed_addr)
+                       call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
+
+               break;
+
+       case NETDEV_CHANGE:
+               br_port_carrier_check(p);
+               break;
+
+       case NETDEV_FEAT_CHANGE:
+               netdev_update_features(br->dev);
+               break;
+
+       case NETDEV_DOWN:
+               spin_lock_bh(&br->lock);
+               if (br->dev->flags & IFF_UP)
+                       br_stp_disable_port(p);
+               spin_unlock_bh(&br->lock);
+               break;
+
+       case NETDEV_UP:
+               if (netif_running(br->dev) && netif_oper_up(dev)) {
+                       spin_lock_bh(&br->lock);
+                       br_stp_enable_port(p);
+                       spin_unlock_bh(&br->lock);
+               }
+               break;
+
+       case NETDEV_UNREGISTER:
+               br_del_if(br, dev);
+               break;
+
+       case NETDEV_CHANGENAME:
+               err = br_sysfs_renameif(p);
+               if (err)
+                       return notifier_from_errno(err);
+               break;
+
+       case NETDEV_PRE_TYPE_CHANGE:
+               /* Forbid underlaying device to change its type. */
+               return NOTIFY_BAD;
+
+       case NETDEV_RESEND_IGMP:
+               /* Propagate to master device */
+               call_netdevice_notifiers(event, br->dev);
+               break;
+       }
+
+       /* Events that may cause spanning tree to refresh */
+       if (event == NETDEV_CHANGEADDR || event == NETDEV_UP ||
+           event == NETDEV_CHANGE || event == NETDEV_DOWN)
+               br_ifinfo_notify(RTM_NEWLINK, p);
+
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block br_device_notifier = {
+       .notifier_call = br_device_event
+};
+
 static void __net_exit br_net_exit(struct net *net)
 {
        struct net_device *dev;
index 3e2da2cb72db1725f064ec21d3ce6ab8765532c1..d77e2f0ff0e9d5d9644a3f4df02446be6fea15cb 100644 (file)
@@ -112,6 +112,12 @@ static void br_dev_set_multicast_list(struct net_device *dev)
 {
 }
 
+static void br_dev_change_rx_flags(struct net_device *dev, int change)
+{
+       if (change & IFF_PROMISC)
+               br_manage_promisc(netdev_priv(dev));
+}
+
 static int br_dev_stop(struct net_device *dev)
 {
        struct net_bridge *br = netdev_priv(dev);
@@ -309,6 +315,7 @@ static const struct net_device_ops br_netdev_ops = {
        .ndo_get_stats64         = br_get_stats64,
        .ndo_set_mac_address     = br_set_mac_address,
        .ndo_set_rx_mode         = br_dev_set_multicast_list,
+       .ndo_change_rx_flags     = br_dev_change_rx_flags,
        .ndo_change_mtu          = br_change_mtu,
        .ndo_do_ioctl            = br_dev_ioctl,
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -348,7 +355,7 @@ void br_dev_setup(struct net_device *dev)
 
        dev->netdev_ops = &br_netdev_ops;
        dev->destructor = br_dev_free;
-       SET_ETHTOOL_OPS(dev, &br_ethtool_ops);
+       dev->ethtool_ops = &br_ethtool_ops;
        SET_NETDEV_DEVTYPE(dev, &br_type);
        dev->tx_queue_len = 0;
        dev->priv_flags = IFF_EBRIDGE;
index 9203d5a1943fbd4ba272ae38e742d8692093f7f1..648d0e84959567c09f0692675a4742a4c939c3d8 100644 (file)
@@ -85,8 +85,58 @@ static void fdb_rcu_free(struct rcu_head *head)
        kmem_cache_free(br_fdb_cache, ent);
 }
 
+/* When a static FDB entry is added, the mac address from the entry is
+ * added to the bridge private HW address list and all required ports
+ * are then updated with the new information.
+ * Called under RTNL.
+ */
+static void fdb_add_hw(struct net_bridge *br, const unsigned char *addr)
+{
+       int err;
+       struct net_bridge_port *p, *tmp;
+
+       ASSERT_RTNL();
+
+       list_for_each_entry(p, &br->port_list, list) {
+               if (!br_promisc_port(p)) {
+                       err = dev_uc_add(p->dev, addr);
+                       if (err)
+                               goto undo;
+               }
+       }
+
+       return;
+undo:
+       list_for_each_entry(tmp, &br->port_list, list) {
+               if (tmp == p)
+                       break;
+               if (!br_promisc_port(tmp))
+                       dev_uc_del(tmp->dev, addr);
+       }
+}
+
+/* When a static FDB entry is deleted, the HW address from that entry is
+ * also removed from the bridge private HW address list and updates all
+ * the ports with needed information.
+ * Called under RTNL.
+ */
+static void fdb_del_hw(struct net_bridge *br, const unsigned char *addr)
+{
+       struct net_bridge_port *p;
+
+       ASSERT_RTNL();
+
+       list_for_each_entry(p, &br->port_list, list) {
+               if (!br_promisc_port(p))
+                       dev_uc_del(p->dev, addr);
+       }
+}
+
 static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f)
 {
+       if (f->is_static)
+               fdb_del_hw(br, f->addr.addr);
+
        hlist_del_rcu(&f->hlist);
        fdb_notify(br, f, RTM_DELNEIGH);
        call_rcu(&f->rcu, fdb_rcu_free);
@@ -466,6 +516,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
                return -ENOMEM;
 
        fdb->is_local = fdb->is_static = 1;
+       fdb_add_hw(br, addr);
        fdb_notify(br, fdb, RTM_NEWNEIGH);
        return 0;
 }
@@ -678,13 +729,25 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
        }
 
        if (fdb_to_nud(fdb) != state) {
-               if (state & NUD_PERMANENT)
-                       fdb->is_local = fdb->is_static = 1;
-               else if (state & NUD_NOARP) {
+               if (state & NUD_PERMANENT) {
+                       fdb->is_local = 1;
+                       if (!fdb->is_static) {
+                               fdb->is_static = 1;
+                               fdb_add_hw(br, addr);
+                       }
+               } else if (state & NUD_NOARP) {
+                       fdb->is_local = 0;
+                       if (!fdb->is_static) {
+                               fdb->is_static = 1;
+                               fdb_add_hw(br, addr);
+                       }
+               } else {
                        fdb->is_local = 0;
-                       fdb->is_static = 1;
-               } else
-                       fdb->is_local = fdb->is_static = 0;
+                       if (fdb->is_static) {
+                               fdb->is_static = 0;
+                               fdb_del_hw(br, addr);
+                       }
+               }
 
                modified = true;
        }
@@ -874,3 +937,59 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
 out:
        return err;
 }
+
+int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p)
+{
+       struct net_bridge_fdb_entry *fdb, *tmp;
+       int i;
+       int err;
+
+       ASSERT_RTNL();
+
+       for (i = 0; i < BR_HASH_SIZE; i++) {
+               hlist_for_each_entry(fdb, &br->hash[i], hlist) {
+                       /* We only care for static entries */
+                       if (!fdb->is_static)
+                               continue;
+
+                       err = dev_uc_add(p->dev, fdb->addr.addr);
+                       if (err)
+                               goto rollback;
+               }
+       }
+       return 0;
+
+rollback:
+       for (i = 0; i < BR_HASH_SIZE; i++) {
+               hlist_for_each_entry(tmp, &br->hash[i], hlist) {
+                       /* If we reached the fdb that failed, we can stop */
+                       if (tmp == fdb)
+                               break;
+
+                       /* We only care for static entries */
+                       if (!tmp->is_static)
+                               continue;
+
+                       dev_uc_del(p->dev, tmp->addr.addr);
+               }
+       }
+       return err;
+}
+
+void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
+{
+       struct net_bridge_fdb_entry *fdb;
+       int i;
+
+       ASSERT_RTNL();
+
+       for (i = 0; i < BR_HASH_SIZE; i++) {
+               hlist_for_each_entry_rcu(fdb, &br->hash[i], hlist) {
+                       /* We only care for static entries */
+                       if (!fdb->is_static)
+                               continue;
+
+                       dev_uc_del(p->dev, fdb->addr.addr);
+               }
+       }
+}
index 5262b8617eb9cc21b1070e48d1c1efda584aef6f..104a811dde571173973d7082c2b1886fa4a44647 100644 (file)
@@ -85,6 +85,110 @@ void br_port_carrier_check(struct net_bridge_port *p)
        spin_unlock_bh(&br->lock);
 }
 
+static void br_port_set_promisc(struct net_bridge_port *p)
+{
+       int err = 0;
+
+       if (br_promisc_port(p))
+               return;
+
+       err = dev_set_promiscuity(p->dev, 1);
+       if (err)
+               return;
+
+       br_fdb_unsync_static(p->br, p);
+       p->flags |= BR_PROMISC;
+}
+
+static void br_port_clear_promisc(struct net_bridge_port *p)
+{
+       int err;
+
+       /* Check if the port is already non-promisc or if it doesn't
+        * support UNICAST filtering.  Without unicast filtering support
+        * we'll end up re-enabling promisc mode anyway, so just check for
+        * it here.
+        */
+       if (!br_promisc_port(p) || !(p->dev->priv_flags & IFF_UNICAST_FLT))
+               return;
+
+       /* Since we'll be clearing the promisc mode, program the port
+        * first so that we don't have interruption in traffic.
+        */
+       err = br_fdb_sync_static(p->br, p);
+       if (err)
+               return;
+
+       dev_set_promiscuity(p->dev, -1);
+       p->flags &= ~BR_PROMISC;
+}
+
+/* When a port is added or removed or when certain port flags
+ * change, this function is called to automatically manage
+ * promiscuity setting of all the bridge ports.  We are always called
+ * under RTNL so can skip using rcu primitives.
+ */
+void br_manage_promisc(struct net_bridge *br)
+{
+       struct net_bridge_port *p;
+       bool set_all = false;
+
+       /* If vlan filtering is disabled or bridge interface is placed
+        * into promiscuous mode, place all ports in promiscuous mode.
+        */
+       if ((br->dev->flags & IFF_PROMISC) || !br_vlan_enabled(br))
+               set_all = true;
+
+       list_for_each_entry(p, &br->port_list, list) {
+               if (set_all) {
+                       br_port_set_promisc(p);
+               } else {
+                       /* If the number of auto-ports is <= 1, then all other
+                        * ports will have their output configuration
+                        * statically specified through fdbs.  Since ingress
+                        * on the auto-port becomes forwarding/egress to other
+                        * ports and egress configuration is statically known,
+                        * we can say that ingress configuration of the
+                        * auto-port is also statically known.
+                        * This lets us disable promiscuous mode and write
+                        * this config to hw.
+                        */
+                       if (br->auto_cnt <= br_auto_port(p))
+                               br_port_clear_promisc(p);
+                       else
+                               br_port_set_promisc(p);
+               }
+       }
+}
+
+static void nbp_update_port_count(struct net_bridge *br)
+{
+       struct net_bridge_port *p;
+       u32 cnt = 0;
+
+       list_for_each_entry(p, &br->port_list, list) {
+               if (br_auto_port(p))
+                       cnt++;
+       }
+       if (br->auto_cnt != cnt) {
+               br->auto_cnt = cnt;
+               br_manage_promisc(br);
+       }
+}
+
+static void nbp_delete_promisc(struct net_bridge_port *p)
+{
+       /* If port is currently promiscuous, unset promiscuity.
+        * Otherwise, it is a static port so remove all addresses
+        * from it.
+        */
+       dev_set_allmulti(p->dev, -1);
+       if (br_promisc_port(p))
+               dev_set_promiscuity(p->dev, -1);
+       else
+               br_fdb_unsync_static(p->br, p);
+}
+
 static void release_nbp(struct kobject *kobj)
 {
        struct net_bridge_port *p
@@ -133,7 +237,7 @@ static void del_nbp(struct net_bridge_port *p)
 
        sysfs_remove_link(br->ifobj, p->dev->name);
 
-       dev_set_promiscuity(dev, -1);
+       nbp_delete_promisc(p);
 
        spin_lock_bh(&br->lock);
        br_stp_disable_port(p);
@@ -141,10 +245,11 @@ static void del_nbp(struct net_bridge_port *p)
 
        br_ifinfo_notify(RTM_DELLINK, p);
 
+       list_del_rcu(&p->list);
+
        nbp_vlan_flush(p);
        br_fdb_delete_by_port(br, p, 1);
-
-       list_del_rcu(&p->list);
+       nbp_update_port_count(br);
 
        dev->priv_flags &= ~IFF_BRIDGE_PORT;
 
@@ -353,7 +458,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
 
        call_netdevice_notifiers(NETDEV_JOIN, dev);
 
-       err = dev_set_promiscuity(dev, 1);
+       err = dev_set_allmulti(dev, 1);
        if (err)
                goto put_back;
 
@@ -384,6 +489,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
 
        list_add_rcu(&p->list, &br->port_list);
 
+       nbp_update_port_count(br);
+
        netdev_update_features(br->dev);
 
        if (br->dev->needed_headroom < dev->needed_headroom)
@@ -455,3 +562,11 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
 
        return 0;
 }
+
+void br_port_flags_change(struct net_bridge_port *p, unsigned long mask)
+{
+       struct net_bridge *br = p->br;
+
+       if (mask & BR_AUTO_MASK)
+               nbp_update_port_count(br);
+}
index 80e1b0f60a30214002684a42b1bab1a02e9d9962..a615264cf01a950aafd894109d43f55cfd8dff91 100644 (file)
@@ -535,7 +535,7 @@ static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct
        if (brnf_pass_vlan_indev == 0 || !vlan_tx_tag_present(skb))
                return br;
 
-       vlan = __vlan_find_dev_deep(br, skb->vlan_proto,
+       vlan = __vlan_find_dev_deep_rcu(br, skb->vlan_proto,
                                    vlan_tx_tag_get(skb) & VLAN_VID_MASK);
 
        return vlan ? vlan : br;
@@ -859,12 +859,12 @@ static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops,
        return NF_STOLEN;
 }
 
-#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV4)
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
 static int br_nf_dev_queue_xmit(struct sk_buff *skb)
 {
        int ret;
 
-       if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) &&
+       if (skb->protocol == htons(ETH_P_IP) &&
            skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
            !skb_is_gso(skb)) {
                if (br_parse_ip_options(skb))
index e74b6d530cb6a3ab3dc650ee1900df06d3b3441a..26edb518b839b38240ab1de7cb619cd5ba924b6f 100644 (file)
@@ -328,6 +328,7 @@ static void br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[],
 static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
 {
        int err;
+       unsigned long old_flags = p->flags;
 
        br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
        br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
@@ -353,6 +354,8 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
                if (err)
                        return err;
        }
+
+       br_port_flags_change(p, old_flags ^ p->flags);
        return 0;
 }
 
@@ -445,6 +448,20 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
        return 0;
 }
 
+static int br_dev_newlink(struct net *src_net, struct net_device *dev,
+                         struct nlattr *tb[], struct nlattr *data[])
+{
+       struct net_bridge *br = netdev_priv(dev);
+
+       if (tb[IFLA_ADDRESS]) {
+               spin_lock_bh(&br->lock);
+               br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
+               spin_unlock_bh(&br->lock);
+       }
+
+       return register_netdevice(dev);
+}
+
 static size_t br_get_link_af_size(const struct net_device *dev)
 {
        struct net_port_vlans *pv;
@@ -473,6 +490,7 @@ struct rtnl_link_ops br_link_ops __read_mostly = {
        .priv_size      = sizeof(struct net_bridge),
        .setup          = br_dev_setup,
        .validate       = br_validate,
+       .newlink        = br_dev_newlink,
        .dellink        = br_dev_delete,
 };
 
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
deleted file mode 100644 (file)
index 2998dd1..0000000
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- *     Device event handling
- *     Linux ethernet bridge
- *
- *     Authors:
- *     Lennert Buytenhek               <buytenh@gnu.org>
- *
- *     This program is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License
- *     as published by the Free Software Foundation; either version
- *     2 of the License, or (at your option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/rtnetlink.h>
-#include <net/net_namespace.h>
-
-#include "br_private.h"
-
-static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr);
-
-struct notifier_block br_device_notifier = {
-       .notifier_call = br_device_event
-};
-
-/*
- * Handle changes in state of network devices enslaved to a bridge.
- *
- * Note: don't care about up/down if bridge itself is down, because
- *     port state is checked when bridge is brought up.
- */
-static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
-{
-       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-       struct net_bridge_port *p;
-       struct net_bridge *br;
-       bool changed_addr;
-       int err;
-
-       /* register of bridge completed, add sysfs entries */
-       if ((dev->priv_flags & IFF_EBRIDGE) && event == NETDEV_REGISTER) {
-               br_sysfs_addbr(dev);
-               return NOTIFY_DONE;
-       }
-
-       /* not a port of a bridge */
-       p = br_port_get_rtnl(dev);
-       if (!p)
-               return NOTIFY_DONE;
-
-       br = p->br;
-
-       switch (event) {
-       case NETDEV_CHANGEMTU:
-               dev_set_mtu(br->dev, br_min_mtu(br));
-               break;
-
-       case NETDEV_CHANGEADDR:
-               spin_lock_bh(&br->lock);
-               br_fdb_changeaddr(p, dev->dev_addr);
-               changed_addr = br_stp_recalculate_bridge_id(br);
-               spin_unlock_bh(&br->lock);
-
-               if (changed_addr)
-                       call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
-
-               break;
-
-       case NETDEV_CHANGE:
-               br_port_carrier_check(p);
-               break;
-
-       case NETDEV_FEAT_CHANGE:
-               netdev_update_features(br->dev);
-               break;
-
-       case NETDEV_DOWN:
-               spin_lock_bh(&br->lock);
-               if (br->dev->flags & IFF_UP)
-                       br_stp_disable_port(p);
-               spin_unlock_bh(&br->lock);
-               break;
-
-       case NETDEV_UP:
-               if (netif_running(br->dev) && netif_oper_up(dev)) {
-                       spin_lock_bh(&br->lock);
-                       br_stp_enable_port(p);
-                       spin_unlock_bh(&br->lock);
-               }
-               break;
-
-       case NETDEV_UNREGISTER:
-               br_del_if(br, dev);
-               break;
-
-       case NETDEV_CHANGENAME:
-               err = br_sysfs_renameif(p);
-               if (err)
-                       return notifier_from_errno(err);
-               break;
-
-       case NETDEV_PRE_TYPE_CHANGE:
-               /* Forbid underlaying device to change its type. */
-               return NOTIFY_BAD;
-
-       case NETDEV_RESEND_IGMP:
-               /* Propagate to master device */
-               call_netdevice_notifiers(event, br->dev);
-               break;
-       }
-
-       /* Events that may cause spanning tree to refresh */
-       if (event == NETDEV_CHANGEADDR || event == NETDEV_UP ||
-           event == NETDEV_CHANGE || event == NETDEV_DOWN)
-               br_ifinfo_notify(RTM_NEWLINK, p);
-
-       return NOTIFY_DONE;
-}
index 06811d79f89f9e7712344d99fdc97194c62f0aef..53d6e32965fcd027dfc2b2b7f87baf88650c9f6c 100644 (file)
@@ -174,6 +174,8 @@ struct net_bridge_port
 #define BR_ADMIN_COST          0x00000010
 #define BR_LEARNING            0x00000020
 #define BR_FLOOD               0x00000040
+#define BR_AUTO_MASK (BR_FLOOD | BR_LEARNING)
+#define BR_PROMISC             0x00000080
 
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
        struct bridge_mcast_query       ip4_query;
@@ -198,6 +200,9 @@ struct net_bridge_port
 #endif
 };
 
+#define br_auto_port(p) ((p)->flags & BR_AUTO_MASK)
+#define br_promisc_port(p) ((p)->flags & BR_PROMISC)
+
 #define br_port_exists(dev) (dev->priv_flags & IFF_BRIDGE_PORT)
 
 static inline struct net_bridge_port *br_port_get_rcu(const struct net_device *dev)
@@ -290,6 +295,7 @@ struct net_bridge
        struct timer_list               topology_change_timer;
        struct timer_list               gc_timer;
        struct kobject                  *ifobj;
+       u32                             auto_cnt;
 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
        u8                              vlan_enabled;
        struct net_port_vlans __rcu     *vlan_info;
@@ -327,8 +333,6 @@ struct br_input_skb_cb {
 #define br_debug(br, format, args...)                  \
        pr_debug("%s: " format,  (br)->dev->name, ##args)
 
-extern struct notifier_block br_device_notifier;
-
 /* called under bridge lock */
 static inline int br_is_root_bridge(const struct net_bridge *br)
 {
@@ -395,6 +399,8 @@ int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[], struct net_device *dev,
               const unsigned char *addr, u16 nlh_flags);
 int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
                struct net_device *dev, int idx);
+int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p);
+void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p);
 
 /* br_forward.c */
 void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb);
@@ -415,6 +421,8 @@ int br_del_if(struct net_bridge *br, struct net_device *dev);
 int br_min_mtu(const struct net_bridge *br);
 netdev_features_t br_features_recompute(struct net_bridge *br,
                                        netdev_features_t features);
+void br_port_flags_change(struct net_bridge_port *port, unsigned long mask);
+void br_manage_promisc(struct net_bridge *br);
 
 /* br_input.c */
 int br_handle_frame_finish(struct sk_buff *skb);
@@ -632,6 +640,10 @@ static inline u16 br_get_pvid(const struct net_port_vlans *v)
        return v->pvid ?: VLAN_N_VID;
 }
 
+static inline int br_vlan_enabled(struct net_bridge *br)
+{
+       return br->vlan_enabled;
+}
 #else
 static inline bool br_allowed_ingress(struct net_bridge *br,
                                      struct net_port_vlans *v,
@@ -712,6 +724,11 @@ static inline u16 br_get_pvid(const struct net_port_vlans *v)
 {
        return VLAN_N_VID;      /* Returns invalid vid */
 }
+
+static inline int br_vlan_enabled(struct net_bridge *br)
+{
+       return 0;
+}
 #endif
 
 /* br_netfilter.c */
index dd595bd7fa820444f6e8c424eb7dd4f307998af2..e561cd59b8a6ef0e764b3028d350b13954deac05 100644 (file)
@@ -41,20 +41,30 @@ static ssize_t show_##_name(struct net_bridge_port *p, char *buf) \
 }                                                              \
 static int store_##_name(struct net_bridge_port *p, unsigned long v) \
 {                                                              \
-       unsigned long flags = p->flags;                         \
-       if (v)                                                  \
-               flags |= _mask;                                 \
-       else                                                    \
-               flags &= ~_mask;                                \
-       if (flags != p->flags) {                                \
-               p->flags = flags;                               \
-               br_ifinfo_notify(RTM_NEWLINK, p);               \
-       }                                                       \
-       return 0;                                               \
+       return store_flag(p, v, _mask);                         \
 }                                                              \
 static BRPORT_ATTR(_name, S_IRUGO | S_IWUSR,                   \
                   show_##_name, store_##_name)
 
+static int store_flag(struct net_bridge_port *p, unsigned long v,
+                     unsigned long mask)
+{
+       unsigned long flags;
+
+       flags = p->flags;
+
+       if (v)
+               flags |= mask;
+       else
+               flags &= ~mask;
+
+       if (flags != p->flags) {
+               p->flags = flags;
+               br_port_flags_change(p, mask);
+               br_ifinfo_notify(RTM_NEWLINK, p);
+       }
+       return 0;
+}
 
 static ssize_t show_path_cost(struct net_bridge_port *p, char *buf)
 {
index 4a37161027899ab12d0f2dd5f4c5d69a1ed98716..24c5cc55589f128a9a8b630b5cc75ada3a27b6c3 100644 (file)
@@ -332,6 +332,7 @@ int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
                goto unlock;
 
        br->vlan_enabled = val;
+       br_manage_promisc(br);
 
 unlock:
        rtnl_unlock();
index 5ca74a0e595fe5ce782985f6b2e17f185b5a52ec..629dc77874a9975feed56bb7b6abefad76cfad94 100644 (file)
@@ -2,14 +2,23 @@
 # Bridge netfilter configuration
 #
 #
-config NF_TABLES_BRIDGE
-       depends on NF_TABLES
+menuconfig NF_TABLES_BRIDGE
+       depends on BRIDGE && NETFILTER && NF_TABLES
        tristate "Ethernet Bridge nf_tables support"
 
+if NF_TABLES_BRIDGE
+
+config NFT_BRIDGE_META
+       tristate "Netfilter nf_table bridge meta support"
+       depends on NFT_META
+       help
+         Add support for bridge dedicated meta key.
+
+endif # NF_TABLES_BRIDGE
+
 menuconfig BRIDGE_NF_EBTABLES
        tristate "Ethernet Bridge tables (ebtables) support"
-       depends on BRIDGE && NETFILTER
-       select NETFILTER_XTABLES
+       depends on BRIDGE && NETFILTER && NETFILTER_XTABLES
        help
          ebtables is a general, extensible frame/packet identification
          framework. Say 'Y' or 'M' here if you want to do Ethernet
index ea7629f58b3d1c44e28524df8a0937de3a18546b..6f2f3943d66f34b43c72be21b603bbf51ba0d289 100644 (file)
@@ -3,6 +3,7 @@
 #
 
 obj-$(CONFIG_NF_TABLES_BRIDGE) += nf_tables_bridge.o
+obj-$(CONFIG_NFT_BRIDGE_META)  += nft_meta_bridge.o
 
 obj-$(CONFIG_BRIDGE_NF_EBTABLES) += ebtables.o
 
diff --git a/net/bridge/netfilter/nft_meta_bridge.c b/net/bridge/netfilter/nft_meta_bridge.c
new file mode 100644 (file)
index 0000000..4f02109
--- /dev/null
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2014 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nft_meta.h>
+
+#include "../br_private.h"
+
+static void nft_meta_bridge_get_eval(const struct nft_expr *expr,
+                                    struct nft_data data[NFT_REG_MAX + 1],
+                                    const struct nft_pktinfo *pkt)
+{
+       const struct nft_meta *priv = nft_expr_priv(expr);
+       const struct net_device *in = pkt->in, *out = pkt->out;
+       struct nft_data *dest = &data[priv->dreg];
+       const struct net_bridge_port *p;
+
+       switch (priv->key) {
+       case NFT_META_BRI_IIFNAME:
+               if (in == NULL || (p = br_port_get_rcu(in)) == NULL)
+                       goto err;
+               break;
+       case NFT_META_BRI_OIFNAME:
+               if (out == NULL || (p = br_port_get_rcu(out)) == NULL)
+                       goto err;
+               break;
+       default:
+               goto out;
+       }
+
+       strncpy((char *)dest->data, p->br->dev->name, sizeof(dest->data));
+       return;
+out:
+       return nft_meta_get_eval(expr, data, pkt);
+err:
+       data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static int nft_meta_bridge_get_init(const struct nft_ctx *ctx,
+                                   const struct nft_expr *expr,
+                                   const struct nlattr * const tb[])
+{
+       struct nft_meta *priv = nft_expr_priv(expr);
+       int err;
+
+       priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
+       switch (priv->key) {
+       case NFT_META_BRI_IIFNAME:
+       case NFT_META_BRI_OIFNAME:
+               break;
+       default:
+               return nft_meta_get_init(ctx, expr, tb);
+       }
+
+       priv->dreg = ntohl(nla_get_be32(tb[NFTA_META_DREG]));
+       err = nft_validate_output_register(priv->dreg);
+       if (err < 0)
+               return err;
+
+       err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
+static struct nft_expr_type nft_meta_bridge_type;
+static const struct nft_expr_ops nft_meta_bridge_get_ops = {
+       .type           = &nft_meta_bridge_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
+       .eval           = nft_meta_bridge_get_eval,
+       .init           = nft_meta_bridge_get_init,
+       .dump           = nft_meta_get_dump,
+};
+
+static const struct nft_expr_ops nft_meta_bridge_set_ops = {
+       .type           = &nft_meta_bridge_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
+       .eval           = nft_meta_set_eval,
+       .init           = nft_meta_set_init,
+       .dump           = nft_meta_set_dump,
+};
+
+static const struct nft_expr_ops *
+nft_meta_bridge_select_ops(const struct nft_ctx *ctx,
+                          const struct nlattr * const tb[])
+{
+       if (tb[NFTA_META_KEY] == NULL)
+               return ERR_PTR(-EINVAL);
+
+       if (tb[NFTA_META_DREG] && tb[NFTA_META_SREG])
+               return ERR_PTR(-EINVAL);
+
+       if (tb[NFTA_META_DREG])
+               return &nft_meta_bridge_get_ops;
+
+       if (tb[NFTA_META_SREG])
+               return &nft_meta_bridge_set_ops;
+
+       return ERR_PTR(-EINVAL);
+}
+
+static struct nft_expr_type nft_meta_bridge_type __read_mostly = {
+       .family         = NFPROTO_BRIDGE,
+       .name           = "meta",
+       .select_ops     = &nft_meta_bridge_select_ops,
+       .policy         = nft_meta_policy,
+       .maxattr        = NFTA_META_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_meta_bridge_module_init(void)
+{
+       return nft_register_expr(&nft_meta_bridge_type);
+}
+
+static void __exit nft_meta_bridge_module_exit(void)
+{
+       nft_unregister_expr(&nft_meta_bridge_type);
+}
+
+module_init(nft_meta_bridge_module_init);
+module_exit(nft_meta_bridge_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>");
+MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "meta");
index a27f8aad9e991f95cc5366bce3e975bff4f16bdd..ce82337521f665c5847819402d8a9c167452fb90 100644 (file)
@@ -337,6 +337,29 @@ static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev)
                return (struct dev_rcv_lists *)dev->ml_priv;
 }
 
+/**
+ * effhash - hash function for 29 bit CAN identifier reduction
+ * @can_id: 29 bit CAN identifier
+ *
+ * Description:
+ *  To reduce the linear traversal in one linked list of _single_ EFF CAN
+ *  frame subscriptions the 29 bit identifier is mapped to 10 bits.
+ *  (see CAN_EFF_RCV_HASH_BITS definition)
+ *
+ * Return:
+ *  Hash value from 0x000 - 0x3FF ( enforced by CAN_EFF_RCV_HASH_BITS mask )
+ */
+static unsigned int effhash(canid_t can_id)
+{
+       unsigned int hash;
+
+       hash = can_id;
+       hash ^= can_id >> CAN_EFF_RCV_HASH_BITS;
+       hash ^= can_id >> (2 * CAN_EFF_RCV_HASH_BITS);
+
+       return hash & ((1 << CAN_EFF_RCV_HASH_BITS) - 1);
+}
+
 /**
  * find_rcv_list - determine optimal filterlist inside device filter struct
  * @can_id: pointer to CAN identifier of a given can_filter
@@ -400,10 +423,8 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
            !(*can_id & CAN_RTR_FLAG)) {
 
                if (*can_id & CAN_EFF_FLAG) {
-                       if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) {
-                               /* RFC: a future use-case for hash-tables? */
-                               return &d->rx[RX_EFF];
-                       }
+                       if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS))
+                               return &d->rx_eff[effhash(*can_id)];
                } else {
                        if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS))
                                return &d->rx_sff[*can_id];
@@ -632,7 +653,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
                return matches;
 
        if (can_id & CAN_EFF_FLAG) {
-               hlist_for_each_entry_rcu(r, &d->rx[RX_EFF], list) {
+               hlist_for_each_entry_rcu(r, &d->rx_eff[effhash(can_id)], list) {
                        if (r->can_id == can_id) {
                                deliver(skb, r);
                                matches++;
index 6de58b40535cc309e59f1a61ed624c930b7c7ffe..fca0fe9fc45a497cdf3da82d5414e846e7cc61b7 100644 (file)
@@ -59,12 +59,17 @@ struct receiver {
        char *ident;
 };
 
-enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_EFF, RX_MAX };
+#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
+#define CAN_EFF_RCV_HASH_BITS 10
+#define CAN_EFF_RCV_ARRAY_SZ (1 << CAN_EFF_RCV_HASH_BITS)
+
+enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_MAX };
 
 /* per device receive filters linked at dev->ml_priv */
 struct dev_rcv_lists {
        struct hlist_head rx[RX_MAX];
-       struct hlist_head rx_sff[0x800];
+       struct hlist_head rx_sff[CAN_SFF_RCV_ARRAY_SZ];
+       struct hlist_head rx_eff[CAN_EFF_RCV_ARRAY_SZ];
        int remove_on_zero_entries;
        int entries;
 };
index ac31891967da1ed811247d131dffbabfd7d28d11..050a2110d43f6b78f331b599569eaaf2d8803c24 100644 (file)
@@ -804,7 +804,7 @@ static int cgw_create_job(struct sk_buff *skb,  struct nlmsghdr *nlh)
        u8 limhops = 0;
        int err = 0;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        if (nlmsg_len(nlh) < sizeof(*r))
@@ -893,7 +893,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
        u8 limhops = 0;
        int err = 0;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        if (nlmsg_len(nlh) < sizeof(*r))
index b543470c8f8b5ef7e5196b7f895cf30a9b0db90e..1a19b985a8685b0aff4450acda4a8d4f429568bc 100644 (file)
@@ -80,7 +80,6 @@ static const char rx_list_name[][8] = {
        [RX_ALL] = "rx_all",
        [RX_FIL] = "rx_fil",
        [RX_INV] = "rx_inv",
-       [RX_EFF] = "rx_eff",
 };
 
 /*
@@ -389,25 +388,26 @@ static const struct file_operations can_rcvlist_proc_fops = {
        .release        = single_release,
 };
 
-static inline void can_rcvlist_sff_proc_show_one(struct seq_file *m,
-                                                struct net_device *dev,
-                                                struct dev_rcv_lists *d)
+static inline void can_rcvlist_proc_show_array(struct seq_file *m,
+                                              struct net_device *dev,
+                                              struct hlist_head *rcv_array,
+                                              unsigned int rcv_array_sz)
 {
-       int i;
+       unsigned int i;
        int all_empty = 1;
 
        /* check whether at least one list is non-empty */
-       for (i = 0; i < 0x800; i++)
-               if (!hlist_empty(&d->rx_sff[i])) {
+       for (i = 0; i < rcv_array_sz; i++)
+               if (!hlist_empty(&rcv_array[i])) {
                        all_empty = 0;
                        break;
                }
 
        if (!all_empty) {
                can_print_recv_banner(m);
-               for (i = 0; i < 0x800; i++) {
-                       if (!hlist_empty(&d->rx_sff[i]))
-                               can_print_rcvlist(m, &d->rx_sff[i], dev);
+               for (i = 0; i < rcv_array_sz; i++) {
+                       if (!hlist_empty(&rcv_array[i]))
+                               can_print_rcvlist(m, &rcv_array[i], dev);
                }
        } else
                seq_printf(m, "  (%s: no entry)\n", DNAME(dev));
@@ -425,12 +425,15 @@ static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v)
 
        /* sff receive list for 'all' CAN devices (dev == NULL) */
        d = &can_rx_alldev_list;
-       can_rcvlist_sff_proc_show_one(m, NULL, d);
+       can_rcvlist_proc_show_array(m, NULL, d->rx_sff, ARRAY_SIZE(d->rx_sff));
 
        /* sff receive list for registered CAN devices */
        for_each_netdev_rcu(&init_net, dev) {
-               if (dev->type == ARPHRD_CAN && dev->ml_priv)
-                       can_rcvlist_sff_proc_show_one(m, dev, dev->ml_priv);
+               if (dev->type == ARPHRD_CAN && dev->ml_priv) {
+                       d = dev->ml_priv;
+                       can_rcvlist_proc_show_array(m, dev, d->rx_sff,
+                                                   ARRAY_SIZE(d->rx_sff));
+               }
        }
 
        rcu_read_unlock();
@@ -452,6 +455,49 @@ static const struct file_operations can_rcvlist_sff_proc_fops = {
        .release        = single_release,
 };
 
+
+static int can_rcvlist_eff_proc_show(struct seq_file *m, void *v)
+{
+       struct net_device *dev;
+       struct dev_rcv_lists *d;
+
+       /* RX_EFF */
+       seq_puts(m, "\nreceive list 'rx_eff':\n");
+
+       rcu_read_lock();
+
+       /* eff receive list for 'all' CAN devices (dev == NULL) */
+       d = &can_rx_alldev_list;
+       can_rcvlist_proc_show_array(m, NULL, d->rx_eff, ARRAY_SIZE(d->rx_eff));
+
+       /* eff receive list for registered CAN devices */
+       for_each_netdev_rcu(&init_net, dev) {
+               if (dev->type == ARPHRD_CAN && dev->ml_priv) {
+                       d = dev->ml_priv;
+                       can_rcvlist_proc_show_array(m, dev, d->rx_eff,
+                                                   ARRAY_SIZE(d->rx_eff));
+               }
+       }
+
+       rcu_read_unlock();
+
+       seq_putc(m, '\n');
+       return 0;
+}
+
+static int can_rcvlist_eff_proc_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, can_rcvlist_eff_proc_show, NULL);
+}
+
+static const struct file_operations can_rcvlist_eff_proc_fops = {
+       .owner          = THIS_MODULE,
+       .open           = can_rcvlist_eff_proc_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
 /*
  * proc utility functions
  */
@@ -491,8 +537,8 @@ void can_init_proc(void)
                                           &can_rcvlist_proc_fops, (void *)RX_FIL);
        pde_rcvlist_inv = proc_create_data(CAN_PROC_RCVLIST_INV, 0644, can_dir,
                                           &can_rcvlist_proc_fops, (void *)RX_INV);
-       pde_rcvlist_eff = proc_create_data(CAN_PROC_RCVLIST_EFF, 0644, can_dir,
-                                          &can_rcvlist_proc_fops, (void *)RX_EFF);
+       pde_rcvlist_eff = proc_create(CAN_PROC_RCVLIST_EFF, 0644, can_dir,
+                                     &can_rcvlist_eff_proc_fops);
        pde_rcvlist_sff = proc_create(CAN_PROC_RCVLIST_SFF, 0644, can_dir,
                                      &can_rcvlist_sff_proc_fops);
 }
index dac7f9b986877efa88f8e308e783c216563ad243..1948d592aa54c7a1831df546702904898cd68da4 100644 (file)
@@ -557,7 +557,7 @@ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
        return r;
 }
 
-static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
+static int __ceph_tcp_sendpage(struct socket *sock, struct page *page,
                     int offset, size_t size, bool more)
 {
        int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR);
@@ -570,6 +570,24 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
        return ret;
 }
 
+static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
+                    int offset, size_t size, bool more)
+{
+       int ret;
+       struct kvec iov;
+
+       /* sendpage cannot properly handle pages with page_count == 0,
+        * we need to fallback to sendmsg if that's the case */
+       if (page_count(page) >= 1)
+               return __ceph_tcp_sendpage(sock, page, offset, size, more);
+
+       iov.iov_base = kmap(page) + offset;
+       iov.iov_len = size;
+       ret = ceph_tcp_sendmsg(sock, &iov, 1, size, more);
+       kunmap(page);
+
+       return ret;
+}
 
 /*
  * Shutdown/close the socket for the given connection.
index e632b5a52f5b89cb2e275b64494905cc7ebfc8e7..c547e46084d360c14abd34f97b6d1d3592d1c641 100644 (file)
@@ -329,6 +329,11 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
        dout("crush decode tunable chooseleaf_descend_once = %d",
             c->chooseleaf_descend_once);
 
+       ceph_decode_need(p, end, sizeof(u8), done);
+       c->chooseleaf_vary_r = ceph_decode_8(p);
+       dout("crush decode tunable chooseleaf_vary_r = %d",
+            c->chooseleaf_vary_r);
+
 done:
        dout("crush_decode success\n");
        return c;
@@ -1548,8 +1553,10 @@ static void apply_primary_affinity(struct ceph_osdmap *osdmap, u32 pps,
                return;
 
        for (i = 0; i < len; i++) {
-               if (osds[i] != CRUSH_ITEM_NONE &&
-                   osdmap->osd_primary_affinity[i] !=
+               int osd = osds[i];
+
+               if (osd != CRUSH_ITEM_NONE &&
+                   osdmap->osd_primary_affinity[osd] !=
                                        CEPH_OSD_DEFAULT_PRIMARY_AFFINITY) {
                        break;
                }
@@ -1563,10 +1570,9 @@ static void apply_primary_affinity(struct ceph_osdmap *osdmap, u32 pps,
         * osd's pgs get rejected as primary.
         */
        for (i = 0; i < len; i++) {
-               int osd;
+               int osd = osds[i];
                u32 aff;
 
-               osd = osds[i];
                if (osd == CRUSH_ITEM_NONE)
                        continue;
 
index 826b925aa4530a0de280b7b01442beb191cf8b5c..71093d94ad2bb22e01e09676c482abd3d8d37ab5 100644 (file)
@@ -9,7 +9,7 @@ obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
 
 obj-y               += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \
                        neighbour.o rtnetlink.o utils.o link_watch.o filter.o \
-                       sock_diag.o dev_ioctl.o
+                       sock_diag.o dev_ioctl.o tso.o
 
 obj-$(CONFIG_XFRM) += flow.o
 obj-y += net-sysfs.o
index 11d70e3afefa467ceb64fccd3fad436b6abb9189..0355ca5d2924337372822841a74d87c48dceede6 100644 (file)
@@ -2424,7 +2424,7 @@ EXPORT_SYMBOL(netdev_rx_csum_fault);
  * 2. No high memory really exists on this machine.
  */
 
-static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb)
+static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
 {
 #ifdef CONFIG_HIGHMEM
        int i;
@@ -2499,38 +2499,36 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
 }
 
 static netdev_features_t harmonize_features(struct sk_buff *skb,
-                                           const struct net_device *dev,
-                                           netdev_features_t features)
+       netdev_features_t features)
 {
        int tmp;
 
        if (skb->ip_summed != CHECKSUM_NONE &&
            !can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) {
                features &= ~NETIF_F_ALL_CSUM;
-       } else if (illegal_highdma(dev, skb)) {
+       } else if (illegal_highdma(skb->dev, skb)) {
                features &= ~NETIF_F_SG;
        }
 
        return features;
 }
 
-netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
-                                        const struct net_device *dev)
+netdev_features_t netif_skb_features(struct sk_buff *skb)
 {
        __be16 protocol = skb->protocol;
-       netdev_features_t features = dev->features;
+       netdev_features_t features = skb->dev->features;
 
-       if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs)
+       if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
                features &= ~NETIF_F_GSO_MASK;
 
        if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
                struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
                protocol = veh->h_vlan_encapsulated_proto;
        } else if (!vlan_tx_tag_present(skb)) {
-               return harmonize_features(skb, dev, features);
+               return harmonize_features(skb, features);
        }
 
-       features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
+       features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
                                               NETIF_F_HW_VLAN_STAG_TX);
 
        if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
@@ -2538,9 +2536,9 @@ netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
                                NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
                                NETIF_F_HW_VLAN_STAG_TX;
 
-       return harmonize_features(skb, dev, features);
+       return harmonize_features(skb, features);
 }
-EXPORT_SYMBOL(netif_skb_dev_features);
+EXPORT_SYMBOL(netif_skb_features);
 
 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                        struct netdev_queue *txq)
@@ -3959,6 +3957,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
        }
        NAPI_GRO_CB(skb)->count = 1;
        NAPI_GRO_CB(skb)->age = jiffies;
+       NAPI_GRO_CB(skb)->last = skb;
        skb_shinfo(skb)->gso_size = skb_gro_len(skb);
        skb->next = napi->gro_list;
        napi->gro_list = skb;
@@ -4548,6 +4547,32 @@ void *netdev_adjacent_get_private(struct list_head *adj_list)
 }
 EXPORT_SYMBOL(netdev_adjacent_get_private);
 
+/**
+ * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
+ * @dev: device
+ * @iter: list_head ** of the current position
+ *
+ * Gets the next device from the dev's upper list, starting from iter
+ * position. The caller must hold RCU read lock.
+ */
+struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
+                                                struct list_head **iter)
+{
+       struct netdev_adjacent *upper;
+
+       WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
+
+       upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
+
+       if (&upper->list == &dev->adj_list.upper)
+               return NULL;
+
+       *iter = &upper->list;
+
+       return upper->dev;
+}
+EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
+
 /**
  * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
  * @dev: device
@@ -4629,6 +4654,32 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
 }
 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
 
+/**
+ * netdev_lower_get_next - Get the next device from the lower neighbour
+ *                         list
+ * @dev: device
+ * @iter: list_head ** of the current position
+ *
+ * Gets the next netdev_adjacent from the dev's lower neighbour
+ * list, starting from iter position. The caller must hold RTNL lock or
+ * its own locking that guarantees that the neighbour lower
+ * list will remain unchainged.
+ */
+void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
+{
+       struct netdev_adjacent *lower;
+
+       lower = list_entry((*iter)->next, struct netdev_adjacent, list);
+
+       if (&lower->list == &dev->adj_list.lower)
+               return NULL;
+
+       *iter = &lower->list;
+
+       return lower->dev;
+}
+EXPORT_SYMBOL(netdev_lower_get_next);
+
 /**
  * netdev_lower_get_first_private_rcu - Get the first ->private from the
  *                                    lower neighbour list, RCU
@@ -5079,6 +5130,30 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
 }
 EXPORT_SYMBOL(netdev_lower_dev_get_private);
 
+
+int dev_get_nest_level(struct net_device *dev,
+                      bool (*type_check)(struct net_device *dev))
+{
+       struct net_device *lower = NULL;
+       struct list_head *iter;
+       int max_nest = -1;
+       int nest;
+
+       ASSERT_RTNL();
+
+       netdev_for_each_lower_dev(dev, lower, iter) {
+               nest = dev_get_nest_level(lower, type_check);
+               if (max_nest < nest)
+                       max_nest = nest;
+       }
+
+       if (type_check(dev))
+               max_nest++;
+
+       return max_nest;
+}
+EXPORT_SYMBOL(dev_get_nest_level);
+
 static void dev_change_rx_flags(struct net_device *dev, int flags)
 {
        const struct net_device_ops *ops = dev->netdev_ops;
@@ -5244,7 +5319,6 @@ void __dev_set_rx_mode(struct net_device *dev)
        if (ops->ndo_set_rx_mode)
                ops->ndo_set_rx_mode(dev);
 }
-EXPORT_SYMBOL(__dev_set_rx_mode);
 
 void dev_set_rx_mode(struct net_device *dev)
 {
@@ -5549,7 +5623,7 @@ static int dev_new_index(struct net *net)
 
 /* Delayed registration/unregisteration */
 static LIST_HEAD(net_todo_list);
-static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
+DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
 
 static void net_set_todo(struct net_device *dev)
 {
@@ -5606,10 +5680,6 @@ static void rollback_registered_many(struct list_head *head)
                */
                call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
 
-               if (!dev->rtnl_link_ops ||
-                   dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
-                       rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
-
                /*
                 *      Flush the unicast and multicast chains
                 */
@@ -5619,6 +5689,10 @@ static void rollback_registered_many(struct list_head *head)
                if (dev->netdev_ops->ndo_uninit)
                        dev->netdev_ops->ndo_uninit(dev);
 
+               if (!dev->rtnl_link_ops ||
+                   dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
+                       rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
+
                /* Notifier chain MUST detach us all upper devices. */
                WARN_ON(netdev_has_any_upper_dev(dev));
 
index 1d72786ef866cc6ae728a60cf90d1a74793b7a74..aa8978ac47d28b8588ff78e02e9607b1d616d6d1 100644 (file)
@@ -568,8 +568,10 @@ static int ethtool_copy_validate_indir(u32 *indir, void __user *useraddr,
 
        /* Validate ring indices */
        for (i = 0; i < size; i++) {
-               if (indir[i] >= rx_rings->data)
+               if (indir[i] >= rx_rings->data) {
                        ret = -EINVAL;
+                       break;
+               }
        }
        return ret;
 }
index 78a636e60a0b182061358a068c2c2ed4d36051cb..2c2d35d9d10119c99276ce4ee22900c5d9cb3b2f 100644 (file)
 #include <linux/seccomp.h>
 #include <linux/if_vlan.h>
 
+/* Registers */
+#define BPF_R0 regs[BPF_REG_0]
+#define BPF_R1 regs[BPF_REG_1]
+#define BPF_R2 regs[BPF_REG_2]
+#define BPF_R3 regs[BPF_REG_3]
+#define BPF_R4 regs[BPF_REG_4]
+#define BPF_R5 regs[BPF_REG_5]
+#define BPF_R6 regs[BPF_REG_6]
+#define BPF_R7 regs[BPF_REG_7]
+#define BPF_R8 regs[BPF_REG_8]
+#define BPF_R9 regs[BPF_REG_9]
+#define BPF_R10        regs[BPF_REG_10]
+
+/* Named registers */
+#define A      regs[insn->a_reg]
+#define X      regs[insn->x_reg]
+#define FP     regs[BPF_REG_FP]
+#define ARG1   regs[BPF_REG_ARG1]
+#define CTX    regs[BPF_REG_CTX]
+#define K      insn->imm
+
 /* No hurry in this branch
  *
  * Exported for the bpf jit load helper.
@@ -57,9 +78,9 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns
                ptr = skb_network_header(skb) + k - SKF_NET_OFF;
        else if (k >= SKF_LL_OFF)
                ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
-
        if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
                return ptr;
+
        return NULL;
 }
 
@@ -68,6 +89,7 @@ static inline void *load_pointer(const struct sk_buff *skb, int k,
 {
        if (k >= 0)
                return skb_header_pointer(skb, k, size, buffer);
+
        return bpf_internal_load_pointer_neg_helper(skb, k, size);
 }
 
@@ -131,210 +153,213 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
  * keep, 0 for none. @ctx is the data we are operating on, @insn is the
  * array of filter instructions.
  */
-unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
+static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
 {
        u64 stack[MAX_BPF_STACK / sizeof(u64)];
        u64 regs[MAX_BPF_REG], tmp;
-       void *ptr;
-       int off;
-
-#define K  insn->imm
-#define A  regs[insn->a_reg]
-#define X  regs[insn->x_reg]
-#define R0 regs[0]
-
-#define CONT    ({insn++; goto select_insn; })
-#define CONT_JMP ({insn++; goto select_insn; })
-
        static const void *jumptable[256] = {
                [0 ... 255] = &&default_label,
                /* Now overwrite non-defaults ... */
-#define DL(A, B, C)    [A|B|C] = &&A##_##B##_##C
-               DL(BPF_ALU, BPF_ADD, BPF_X),
-               DL(BPF_ALU, BPF_ADD, BPF_K),
-               DL(BPF_ALU, BPF_SUB, BPF_X),
-               DL(BPF_ALU, BPF_SUB, BPF_K),
-               DL(BPF_ALU, BPF_AND, BPF_X),
-               DL(BPF_ALU, BPF_AND, BPF_K),
-               DL(BPF_ALU, BPF_OR, BPF_X),
-               DL(BPF_ALU, BPF_OR, BPF_K),
-               DL(BPF_ALU, BPF_LSH, BPF_X),
-               DL(BPF_ALU, BPF_LSH, BPF_K),
-               DL(BPF_ALU, BPF_RSH, BPF_X),
-               DL(BPF_ALU, BPF_RSH, BPF_K),
-               DL(BPF_ALU, BPF_XOR, BPF_X),
-               DL(BPF_ALU, BPF_XOR, BPF_K),
-               DL(BPF_ALU, BPF_MUL, BPF_X),
-               DL(BPF_ALU, BPF_MUL, BPF_K),
-               DL(BPF_ALU, BPF_MOV, BPF_X),
-               DL(BPF_ALU, BPF_MOV, BPF_K),
-               DL(BPF_ALU, BPF_DIV, BPF_X),
-               DL(BPF_ALU, BPF_DIV, BPF_K),
-               DL(BPF_ALU, BPF_MOD, BPF_X),
-               DL(BPF_ALU, BPF_MOD, BPF_K),
-               DL(BPF_ALU, BPF_NEG, 0),
-               DL(BPF_ALU, BPF_END, BPF_TO_BE),
-               DL(BPF_ALU, BPF_END, BPF_TO_LE),
-               DL(BPF_ALU64, BPF_ADD, BPF_X),
-               DL(BPF_ALU64, BPF_ADD, BPF_K),
-               DL(BPF_ALU64, BPF_SUB, BPF_X),
-               DL(BPF_ALU64, BPF_SUB, BPF_K),
-               DL(BPF_ALU64, BPF_AND, BPF_X),
-               DL(BPF_ALU64, BPF_AND, BPF_K),
-               DL(BPF_ALU64, BPF_OR, BPF_X),
-               DL(BPF_ALU64, BPF_OR, BPF_K),
-               DL(BPF_ALU64, BPF_LSH, BPF_X),
-               DL(BPF_ALU64, BPF_LSH, BPF_K),
-               DL(BPF_ALU64, BPF_RSH, BPF_X),
-               DL(BPF_ALU64, BPF_RSH, BPF_K),
-               DL(BPF_ALU64, BPF_XOR, BPF_X),
-               DL(BPF_ALU64, BPF_XOR, BPF_K),
-               DL(BPF_ALU64, BPF_MUL, BPF_X),
-               DL(BPF_ALU64, BPF_MUL, BPF_K),
-               DL(BPF_ALU64, BPF_MOV, BPF_X),
-               DL(BPF_ALU64, BPF_MOV, BPF_K),
-               DL(BPF_ALU64, BPF_ARSH, BPF_X),
-               DL(BPF_ALU64, BPF_ARSH, BPF_K),
-               DL(BPF_ALU64, BPF_DIV, BPF_X),
-               DL(BPF_ALU64, BPF_DIV, BPF_K),
-               DL(BPF_ALU64, BPF_MOD, BPF_X),
-               DL(BPF_ALU64, BPF_MOD, BPF_K),
-               DL(BPF_ALU64, BPF_NEG, 0),
-               DL(BPF_JMP, BPF_CALL, 0),
-               DL(BPF_JMP, BPF_JA, 0),
-               DL(BPF_JMP, BPF_JEQ, BPF_X),
-               DL(BPF_JMP, BPF_JEQ, BPF_K),
-               DL(BPF_JMP, BPF_JNE, BPF_X),
-               DL(BPF_JMP, BPF_JNE, BPF_K),
-               DL(BPF_JMP, BPF_JGT, BPF_X),
-               DL(BPF_JMP, BPF_JGT, BPF_K),
-               DL(BPF_JMP, BPF_JGE, BPF_X),
-               DL(BPF_JMP, BPF_JGE, BPF_K),
-               DL(BPF_JMP, BPF_JSGT, BPF_X),
-               DL(BPF_JMP, BPF_JSGT, BPF_K),
-               DL(BPF_JMP, BPF_JSGE, BPF_X),
-               DL(BPF_JMP, BPF_JSGE, BPF_K),
-               DL(BPF_JMP, BPF_JSET, BPF_X),
-               DL(BPF_JMP, BPF_JSET, BPF_K),
-               DL(BPF_JMP, BPF_EXIT, 0),
-               DL(BPF_STX, BPF_MEM, BPF_B),
-               DL(BPF_STX, BPF_MEM, BPF_H),
-               DL(BPF_STX, BPF_MEM, BPF_W),
-               DL(BPF_STX, BPF_MEM, BPF_DW),
-               DL(BPF_STX, BPF_XADD, BPF_W),
-               DL(BPF_STX, BPF_XADD, BPF_DW),
-               DL(BPF_ST, BPF_MEM, BPF_B),
-               DL(BPF_ST, BPF_MEM, BPF_H),
-               DL(BPF_ST, BPF_MEM, BPF_W),
-               DL(BPF_ST, BPF_MEM, BPF_DW),
-               DL(BPF_LDX, BPF_MEM, BPF_B),
-               DL(BPF_LDX, BPF_MEM, BPF_H),
-               DL(BPF_LDX, BPF_MEM, BPF_W),
-               DL(BPF_LDX, BPF_MEM, BPF_DW),
-               DL(BPF_LD, BPF_ABS, BPF_W),
-               DL(BPF_LD, BPF_ABS, BPF_H),
-               DL(BPF_LD, BPF_ABS, BPF_B),
-               DL(BPF_LD, BPF_IND, BPF_W),
-               DL(BPF_LD, BPF_IND, BPF_H),
-               DL(BPF_LD, BPF_IND, BPF_B),
-#undef DL
+               /* 32 bit ALU operations */
+               [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
+               [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
+               [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
+               [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
+               [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
+               [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
+               [BPF_ALU | BPF_OR | BPF_X]  = &&ALU_OR_X,
+               [BPF_ALU | BPF_OR | BPF_K]  = &&ALU_OR_K,
+               [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
+               [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
+               [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
+               [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
+               [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
+               [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
+               [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
+               [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
+               [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
+               [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
+               [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
+               [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
+               [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
+               [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
+               [BPF_ALU | BPF_NEG] = &&ALU_NEG,
+               [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
+               [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
+               /* 64 bit ALU operations */
+               [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
+               [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
+               [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
+               [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
+               [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
+               [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
+               [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
+               [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
+               [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
+               [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
+               [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
+               [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
+               [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
+               [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
+               [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
+               [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
+               [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
+               [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
+               [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
+               [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
+               [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
+               [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
+               [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
+               [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
+               [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
+               /* Call instruction */
+               [BPF_JMP | BPF_CALL] = &&JMP_CALL,
+               /* Jumps */
+               [BPF_JMP | BPF_JA] = &&JMP_JA,
+               [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
+               [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
+               [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
+               [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
+               [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
+               [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
+               [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
+               [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
+               [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
+               [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
+               [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
+               [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
+               [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
+               [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
+               /* Program return */
+               [BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
+               /* Store instructions */
+               [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
+               [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
+               [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
+               [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
+               [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
+               [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
+               [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
+               [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
+               [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
+               [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
+               /* Load instructions */
+               [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
+               [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
+               [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
+               [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
+               [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
+               [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
+               [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
+               [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
+               [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
+               [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
        };
+       void *ptr;
+       int off;
+
+#define CONT    ({ insn++; goto select_insn; })
+#define CONT_JMP ({ insn++; goto select_insn; })
+
+       FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
+       ARG1 = (u64) (unsigned long) ctx;
 
-       regs[FP_REG]  = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
-       regs[ARG1_REG] = (u64) (unsigned long) ctx;
+       /* Register for user BPF programs need to be reset first. */
+       regs[BPF_REG_A] = 0;
+       regs[BPF_REG_X] = 0;
 
 select_insn:
        goto *jumptable[insn->code];
 
        /* ALU */
 #define ALU(OPCODE, OP)                        \
-       BPF_ALU64_##OPCODE##_BPF_X:     \
+       ALU64_##OPCODE##_X:             \
                A = A OP X;             \
                CONT;                   \
-       BPF_ALU_##OPCODE##_BPF_X:       \
+       ALU_##OPCODE##_X:               \
                A = (u32) A OP (u32) X; \
                CONT;                   \
-       BPF_ALU64_##OPCODE##_BPF_K:     \
+       ALU64_##OPCODE##_K:             \
                A = A OP K;             \
                CONT;                   \
-       BPF_ALU_##OPCODE##_BPF_K:       \
+       ALU_##OPCODE##_K:               \
                A = (u32) A OP (u32) K; \
                CONT;
 
-       ALU(BPF_ADD,  +)
-       ALU(BPF_SUB,  -)
-       ALU(BPF_AND,  &)
-       ALU(BPF_OR,   |)
-       ALU(BPF_LSH, <<)
-       ALU(BPF_RSH, >>)
-       ALU(BPF_XOR,  ^)
-       ALU(BPF_MUL,  *)
+       ALU(ADD,  +)
+       ALU(SUB,  -)
+       ALU(AND,  &)
+       ALU(OR,   |)
+       ALU(LSH, <<)
+       ALU(RSH, >>)
+       ALU(XOR,  ^)
+       ALU(MUL,  *)
 #undef ALU
-       BPF_ALU_BPF_NEG_0:
+       ALU_NEG:
                A = (u32) -A;
                CONT;
-       BPF_ALU64_BPF_NEG_0:
+       ALU64_NEG:
                A = -A;
                CONT;
-       BPF_ALU_BPF_MOV_BPF_X:
+       ALU_MOV_X:
                A = (u32) X;
                CONT;
-       BPF_ALU_BPF_MOV_BPF_K:
+       ALU_MOV_K:
                A = (u32) K;
                CONT;
-       BPF_ALU64_BPF_MOV_BPF_X:
+       ALU64_MOV_X:
                A = X;
                CONT;
-       BPF_ALU64_BPF_MOV_BPF_K:
+       ALU64_MOV_K:
                A = K;
                CONT;
-       BPF_ALU64_BPF_ARSH_BPF_X:
+       ALU64_ARSH_X:
                (*(s64 *) &A) >>= X;
                CONT;
-       BPF_ALU64_BPF_ARSH_BPF_K:
+       ALU64_ARSH_K:
                (*(s64 *) &A) >>= K;
                CONT;
-       BPF_ALU64_BPF_MOD_BPF_X:
+       ALU64_MOD_X:
                if (unlikely(X == 0))
                        return 0;
                tmp = A;
                A = do_div(tmp, X);
                CONT;
-       BPF_ALU_BPF_MOD_BPF_X:
+       ALU_MOD_X:
                if (unlikely(X == 0))
                        return 0;
                tmp = (u32) A;
                A = do_div(tmp, (u32) X);
                CONT;
-       BPF_ALU64_BPF_MOD_BPF_K:
+       ALU64_MOD_K:
                tmp = A;
                A = do_div(tmp, K);
                CONT;
-       BPF_ALU_BPF_MOD_BPF_K:
+       ALU_MOD_K:
                tmp = (u32) A;
                A = do_div(tmp, (u32) K);
                CONT;
-       BPF_ALU64_BPF_DIV_BPF_X:
+       ALU64_DIV_X:
                if (unlikely(X == 0))
                        return 0;
                do_div(A, X);
                CONT;
-       BPF_ALU_BPF_DIV_BPF_X:
+       ALU_DIV_X:
                if (unlikely(X == 0))
                        return 0;
                tmp = (u32) A;
                do_div(tmp, (u32) X);
                A = (u32) tmp;
                CONT;
-       BPF_ALU64_BPF_DIV_BPF_K:
+       ALU64_DIV_K:
                do_div(A, K);
                CONT;
-       BPF_ALU_BPF_DIV_BPF_K:
+       ALU_DIV_K:
                tmp = (u32) A;
                do_div(tmp, (u32) K);
                A = (u32) tmp;
                CONT;
-       BPF_ALU_BPF_END_BPF_TO_BE:
+       ALU_END_TO_BE:
                switch (K) {
                case 16:
                        A = (__force u16) cpu_to_be16(A);
@@ -347,7 +372,7 @@ select_insn:
                        break;
                }
                CONT;
-       BPF_ALU_BPF_END_BPF_TO_LE:
+       ALU_END_TO_LE:
                switch (K) {
                case 16:
                        A = (__force u16) cpu_to_le16(A);
@@ -362,142 +387,144 @@ select_insn:
                CONT;
 
        /* CALL */
-       BPF_JMP_BPF_CALL_0:
-               /* Function call scratches R1-R5 registers, preserves R6-R9,
-                * and stores return value into R0.
+       JMP_CALL:
+               /* Function call scratches BPF_R1-BPF_R5 registers,
+                * preserves BPF_R6-BPF_R9, and stores return value
+                * into BPF_R0.
                 */
-               R0 = (__bpf_call_base + insn->imm)(regs[1], regs[2], regs[3],
-                                                  regs[4], regs[5]);
+               BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
+                                                      BPF_R4, BPF_R5);
                CONT;
 
        /* JMP */
-       BPF_JMP_BPF_JA_0:
+       JMP_JA:
                insn += insn->off;
                CONT;
-       BPF_JMP_BPF_JEQ_BPF_X:
+       JMP_JEQ_X:
                if (A == X) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JEQ_BPF_K:
+       JMP_JEQ_K:
                if (A == K) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JNE_BPF_X:
+       JMP_JNE_X:
                if (A != X) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JNE_BPF_K:
+       JMP_JNE_K:
                if (A != K) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JGT_BPF_X:
+       JMP_JGT_X:
                if (A > X) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JGT_BPF_K:
+       JMP_JGT_K:
                if (A > K) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JGE_BPF_X:
+       JMP_JGE_X:
                if (A >= X) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JGE_BPF_K:
+       JMP_JGE_K:
                if (A >= K) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JSGT_BPF_X:
-               if (((s64)A) > ((s64)X)) {
+       JMP_JSGT_X:
+               if (((s64) A) > ((s64) X)) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JSGT_BPF_K:
-               if (((s64)A) > ((s64)K)) {
+       JMP_JSGT_K:
+               if (((s64) A) > ((s64) K)) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JSGE_BPF_X:
-               if (((s64)A) >= ((s64)X)) {
+       JMP_JSGE_X:
+               if (((s64) A) >= ((s64) X)) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JSGE_BPF_K:
-               if (((s64)A) >= ((s64)K)) {
+       JMP_JSGE_K:
+               if (((s64) A) >= ((s64) K)) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JSET_BPF_X:
+       JMP_JSET_X:
                if (A & X) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JSET_BPF_K:
+       JMP_JSET_K:
                if (A & K) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_EXIT_0:
-               return R0;
+       JMP_EXIT:
+               return BPF_R0;
 
        /* STX and ST and LDX*/
 #define LDST(SIZEOP, SIZE)                                     \
-       BPF_STX_BPF_MEM_##SIZEOP:                               \
+       STX_MEM_##SIZEOP:                                       \
                *(SIZE *)(unsigned long) (A + insn->off) = X;   \
                CONT;                                           \
-       BPF_ST_BPF_MEM_##SIZEOP:                                \
+       ST_MEM_##SIZEOP:                                        \
                *(SIZE *)(unsigned long) (A + insn->off) = K;   \
                CONT;                                           \
-       BPF_LDX_BPF_MEM_##SIZEOP:                               \
+       LDX_MEM_##SIZEOP:                                       \
                A = *(SIZE *)(unsigned long) (X + insn->off);   \
                CONT;
 
-       LDST(BPF_B,   u8)
-       LDST(BPF_H,  u16)
-       LDST(BPF_W,  u32)
-       LDST(BPF_DW, u64)
+       LDST(B,   u8)
+       LDST(H,  u16)
+       LDST(W,  u32)
+       LDST(DW, u64)
 #undef LDST
-       BPF_STX_BPF_XADD_BPF_W: /* lock xadd *(u32 *)(A + insn->off) += X */
+       STX_XADD_W: /* lock xadd *(u32 *)(A + insn->off) += X */
                atomic_add((u32) X, (atomic_t *)(unsigned long)
                           (A + insn->off));
                CONT;
-       BPF_STX_BPF_XADD_BPF_DW: /* lock xadd *(u64 *)(A + insn->off) += X */
+       STX_XADD_DW: /* lock xadd *(u64 *)(A + insn->off) += X */
                atomic64_add((u64) X, (atomic64_t *)(unsigned long)
                             (A + insn->off));
                CONT;
-       BPF_LD_BPF_ABS_BPF_W: /* R0 = ntohl(*(u32 *) (skb->data + K)) */
+       LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + K)) */
                off = K;
 load_word:
-               /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only
-                * appearing in the programs where ctx == skb. All programs
-                * keep 'ctx' in regs[CTX_REG] == R6, sk_convert_filter()
-                * saves it in R6, internal BPF verifier will check that
-                * R6 == ctx.
+               /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
+                * only appearing in the programs where ctx ==
+                * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
+                * == BPF_R6, sk_convert_filter() saves it in BPF_R6,
+                * internal BPF verifier will check that BPF_R6 ==
+                * ctx.
                 *
-                * BPF_ABS and BPF_IND are wrappers of function calls, so
-                * they scratch R1-R5 registers, preserve R6-R9, and store
-                * return value into R0.
+                * BPF_ABS and BPF_IND are wrappers of function calls,
+                * so they scratch BPF_R1-BPF_R5 registers, preserve
+                * BPF_R6-BPF_R9, and store return value into BPF_R0.
                 *
                 * Implicit input:
                 *   ctx
@@ -507,39 +534,39 @@ load_word:
                 *   K == 32-bit immediate
                 *
                 * Output:
-                *   R0 - 8/16/32-bit skb data converted to cpu endianness
+                *   BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
                 */
                ptr = load_pointer((struct sk_buff *) ctx, off, 4, &tmp);
                if (likely(ptr != NULL)) {
-                       R0 = get_unaligned_be32(ptr);
+                       BPF_R0 = get_unaligned_be32(ptr);
                        CONT;
                }
                return 0;
-       BPF_LD_BPF_ABS_BPF_H: /* R0 = ntohs(*(u16 *) (skb->data + K)) */
+       LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + K)) */
                off = K;
 load_half:
                ptr = load_pointer((struct sk_buff *) ctx, off, 2, &tmp);
                if (likely(ptr != NULL)) {
-                       R0 = get_unaligned_be16(ptr);
+                       BPF_R0 = get_unaligned_be16(ptr);
                        CONT;
                }
                return 0;
-       BPF_LD_BPF_ABS_BPF_B: /* R0 = *(u8 *) (ctx + K) */
+       LD_ABS_B: /* BPF_R0 = *(u8 *) (ctx + K) */
                off = K;
 load_byte:
                ptr = load_pointer((struct sk_buff *) ctx, off, 1, &tmp);
                if (likely(ptr != NULL)) {
-                       R0 = *(u8 *)ptr;
+                       BPF_R0 = *(u8 *)ptr;
                        CONT;
                }
                return 0;
-       BPF_LD_BPF_IND_BPF_W: /* R0 = ntohl(*(u32 *) (skb->data + X + K)) */
+       LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + X + K)) */
                off = K + X;
                goto load_word;
-       BPF_LD_BPF_IND_BPF_H: /* R0 = ntohs(*(u16 *) (skb->data + X + K)) */
+       LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + X + K)) */
                off = K + X;
                goto load_half;
-       BPF_LD_BPF_IND_BPF_B: /* R0 = *(u8 *) (skb->data + X + K) */
+       LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + X + K) */
                off = K + X;
                goto load_byte;
 
@@ -547,24 +574,8 @@ load_byte:
                /* If we ever reach this, we have a bug somewhere. */
                WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
                return 0;
-#undef CONT_JMP
-#undef CONT
-
-#undef R0
-#undef X
-#undef A
-#undef K
 }
 
-u32 sk_run_filter_int_seccomp(const struct seccomp_data *ctx,
-                             const struct sock_filter_int *insni)
-    __attribute__ ((alias ("__sk_run_filter")));
-
-u32 sk_run_filter_int_skb(const struct sk_buff *ctx,
-                         const struct sock_filter_int *insni)
-    __attribute__ ((alias ("__sk_run_filter")));
-EXPORT_SYMBOL_GPL(sk_run_filter_int_skb);
-
 /* Helper to find the offset of pkt_type in sk_buff structure. We want
  * to make sure its still a 3bit field starting at a byte boundary;
  * taken from arch/x86/net/bpf_jit_comp.c.
@@ -585,16 +596,14 @@ static unsigned int pkt_type_offset(void)
        return -1;
 }
 
-static u64 __skb_get_pay_offset(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
+static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
 {
-       struct sk_buff *skb = (struct sk_buff *)(long) ctx;
-
-       return __skb_get_poff(skb);
+       return __skb_get_poff((struct sk_buff *)(unsigned long) ctx);
 }
 
-static u64 __skb_get_nlattr(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
+static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
 {
-       struct sk_buff *skb = (struct sk_buff *)(long) ctx;
+       struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
        struct nlattr *nla;
 
        if (skb_is_nonlinear(skb))
@@ -603,19 +612,19 @@ static u64 __skb_get_nlattr(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
        if (skb->len < sizeof(struct nlattr))
                return 0;
 
-       if (A > skb->len - sizeof(struct nlattr))
+       if (a > skb->len - sizeof(struct nlattr))
                return 0;
 
-       nla = nla_find((struct nlattr *) &skb->data[A], skb->len - A, X);
+       nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
        if (nla)
                return (void *) nla - (void *) skb->data;
 
        return 0;
 }
 
-static u64 __skb_get_nlattr_nest(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
+static u64 __skb_get_nlattr_nest(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
 {
-       struct sk_buff *skb = (struct sk_buff *)(long) ctx;
+       struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
        struct nlattr *nla;
 
        if (skb_is_nonlinear(skb))
@@ -624,38 +633,31 @@ static u64 __skb_get_nlattr_nest(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
        if (skb->len < sizeof(struct nlattr))
                return 0;
 
-       if (A > skb->len - sizeof(struct nlattr))
+       if (a > skb->len - sizeof(struct nlattr))
                return 0;
 
-       nla = (struct nlattr *) &skb->data[A];
-       if (nla->nla_len > skb->len - A)
+       nla = (struct nlattr *) &skb->data[a];
+       if (nla->nla_len > skb->len - a)
                return 0;
 
-       nla = nla_find_nested(nla, X);
+       nla = nla_find_nested(nla, x);
        if (nla)
                return (void *) nla - (void *) skb->data;
 
        return 0;
 }
 
-static u64 __get_raw_cpu_id(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
+static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
 {
        return raw_smp_processor_id();
 }
 
 /* note that this only generates 32-bit random numbers */
-static u64 __get_random_u32(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
+static u64 __get_random_u32(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
 {
-       return (u64)prandom_u32();
+       return prandom_u32();
 }
 
-/* Register mappings for user programs. */
-#define A_REG          0
-#define X_REG          7
-#define TMP_REG                8
-#define ARG2_REG       2
-#define ARG3_REG       3
-
 static bool convert_bpf_extensions(struct sock_filter *fp,
                                   struct sock_filter_int **insnp)
 {
@@ -665,57 +667,46 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
        case SKF_AD_OFF + SKF_AD_PROTOCOL:
                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
 
-               insn->code = BPF_LDX | BPF_MEM | BPF_H;
-               insn->a_reg = A_REG;
-               insn->x_reg = CTX_REG;
-               insn->off = offsetof(struct sk_buff, protocol);
+               /* A = *(u16 *) (ctx + offsetof(protocol)) */
+               *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
+                                   offsetof(struct sk_buff, protocol));
                insn++;
 
                /* A = ntohs(A) [emitting a nop or swap16] */
                insn->code = BPF_ALU | BPF_END | BPF_FROM_BE;
-               insn->a_reg = A_REG;
+               insn->a_reg = BPF_REG_A;
                insn->imm = 16;
                break;
 
        case SKF_AD_OFF + SKF_AD_PKTTYPE:
-               insn->code = BPF_LDX | BPF_MEM | BPF_B;
-               insn->a_reg = A_REG;
-               insn->x_reg = CTX_REG;
-               insn->off = pkt_type_offset();
+               *insn = BPF_LDX_MEM(BPF_B, BPF_REG_A, BPF_REG_CTX,
+                                   pkt_type_offset());
                if (insn->off < 0)
                        return false;
                insn++;
 
-               insn->code = BPF_ALU | BPF_AND | BPF_K;
-               insn->a_reg = A_REG;
-               insn->imm = PKT_TYPE_MAX;
+               *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, PKT_TYPE_MAX);
                break;
 
        case SKF_AD_OFF + SKF_AD_IFINDEX:
        case SKF_AD_OFF + SKF_AD_HATYPE:
-               if (FIELD_SIZEOF(struct sk_buff, dev) == 8)
-                       insn->code = BPF_LDX | BPF_MEM | BPF_DW;
-               else
-                       insn->code = BPF_LDX | BPF_MEM | BPF_W;
-               insn->a_reg = TMP_REG;
-               insn->x_reg = CTX_REG;
-               insn->off = offsetof(struct sk_buff, dev);
+               *insn = BPF_LDX_MEM(size_to_bpf(FIELD_SIZEOF(struct sk_buff, dev)),
+                                   BPF_REG_TMP, BPF_REG_CTX,
+                                   offsetof(struct sk_buff, dev));
                insn++;
 
-               insn->code = BPF_JMP | BPF_JNE | BPF_K;
-               insn->a_reg = TMP_REG;
-               insn->imm = 0;
-               insn->off = 1;
+               /* if (tmp != 0) goto pc+1 */
+               *insn = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
                insn++;
 
-               insn->code = BPF_JMP | BPF_EXIT;
+               *insn = BPF_EXIT_INSN();
                insn++;
 
                BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
                BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
 
-               insn->a_reg = A_REG;
-               insn->x_reg = TMP_REG;
+               insn->a_reg = BPF_REG_A;
+               insn->x_reg = BPF_REG_TMP;
 
                if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX) {
                        insn->code = BPF_LDX | BPF_MEM | BPF_W;
@@ -729,55 +720,45 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
        case SKF_AD_OFF + SKF_AD_MARK:
                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
 
-               insn->code = BPF_LDX | BPF_MEM | BPF_W;
-               insn->a_reg = A_REG;
-               insn->x_reg = CTX_REG;
-               insn->off = offsetof(struct sk_buff, mark);
+               *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
+                                   offsetof(struct sk_buff, mark));
                break;
 
        case SKF_AD_OFF + SKF_AD_RXHASH:
                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
 
-               insn->code = BPF_LDX | BPF_MEM | BPF_W;
-               insn->a_reg = A_REG;
-               insn->x_reg = CTX_REG;
-               insn->off = offsetof(struct sk_buff, hash);
+               *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
+                                   offsetof(struct sk_buff, hash));
                break;
 
        case SKF_AD_OFF + SKF_AD_QUEUE:
                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
 
-               insn->code = BPF_LDX | BPF_MEM | BPF_H;
-               insn->a_reg = A_REG;
-               insn->x_reg = CTX_REG;
-               insn->off = offsetof(struct sk_buff, queue_mapping);
+               *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
+                                   offsetof(struct sk_buff, queue_mapping));
                break;
 
        case SKF_AD_OFF + SKF_AD_VLAN_TAG:
        case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
 
-               insn->code = BPF_LDX | BPF_MEM | BPF_H;
-               insn->a_reg = A_REG;
-               insn->x_reg = CTX_REG;
-               insn->off = offsetof(struct sk_buff, vlan_tci);
+               /* A = *(u16 *) (ctx + offsetof(vlan_tci)) */
+               *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
+                                   offsetof(struct sk_buff, vlan_tci));
                insn++;
 
                BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
 
                if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) {
-                       insn->code = BPF_ALU | BPF_AND | BPF_K;
-                       insn->a_reg = A_REG;
-                       insn->imm = ~VLAN_TAG_PRESENT;
+                       *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A,
+                                             ~VLAN_TAG_PRESENT);
                } else {
-                       insn->code = BPF_ALU | BPF_RSH | BPF_K;
-                       insn->a_reg = A_REG;
-                       insn->imm = 12;
+                       /* A >>= 12 */
+                       *insn = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 12);
                        insn++;
 
-                       insn->code = BPF_ALU | BPF_AND | BPF_K;
-                       insn->a_reg = A_REG;
-                       insn->imm = 1;
+                       /* A &= 1 */
+                       *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 1);
                }
                break;
 
@@ -787,21 +768,15 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
        case SKF_AD_OFF + SKF_AD_CPU:
        case SKF_AD_OFF + SKF_AD_RANDOM:
                /* arg1 = ctx */
-               insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
-               insn->a_reg = ARG1_REG;
-               insn->x_reg = CTX_REG;
+               *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_ARG1, BPF_REG_CTX);
                insn++;
 
                /* arg2 = A */
-               insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
-               insn->a_reg = ARG2_REG;
-               insn->x_reg = A_REG;
+               *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_ARG2, BPF_REG_A);
                insn++;
 
                /* arg3 = X */
-               insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
-               insn->a_reg = ARG3_REG;
-               insn->x_reg = X_REG;
+               *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_ARG3, BPF_REG_X);
                insn++;
 
                /* Emit call(ctx, arg2=A, arg3=X) */
@@ -826,9 +801,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
                break;
 
        case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
-               insn->code = BPF_ALU | BPF_XOR | BPF_X;
-               insn->a_reg = A_REG;
-               insn->x_reg = X_REG;
+               /* A ^= X */
+               *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
                break;
 
        default:
@@ -878,7 +852,7 @@ int sk_convert_filter(struct sock_filter *prog, int len,
        u8 bpf_src;
 
        BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
-       BUILD_BUG_ON(FP_REG + 1 != MAX_BPF_REG);
+       BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
 
        if (len <= 0 || len >= BPF_MAXINSNS)
                return -EINVAL;
@@ -894,9 +868,7 @@ do_pass:
        fp = prog;
 
        if (new_insn) {
-               new_insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
-               new_insn->a_reg = CTX_REG;
-               new_insn->x_reg = ARG1_REG;
+               *new_insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_CTX, BPF_REG_ARG1);
        }
        new_insn++;
 
@@ -946,8 +918,8 @@ do_pass:
                                break;
 
                        insn->code = fp->code;
-                       insn->a_reg = A_REG;
-                       insn->x_reg = X_REG;
+                       insn->a_reg = BPF_REG_A;
+                       insn->x_reg = BPF_REG_X;
                        insn->imm = fp->k;
                        break;
 
@@ -981,16 +953,16 @@ do_pass:
                                 * in compare insn.
                                 */
                                insn->code = BPF_ALU | BPF_MOV | BPF_K;
-                               insn->a_reg = TMP_REG;
+                               insn->a_reg = BPF_REG_TMP;
                                insn->imm = fp->k;
                                insn++;
 
-                               insn->a_reg = A_REG;
-                               insn->x_reg = TMP_REG;
+                               insn->a_reg = BPF_REG_A;
+                               insn->x_reg = BPF_REG_TMP;
                                bpf_src = BPF_X;
                        } else {
-                               insn->a_reg = A_REG;
-                               insn->x_reg = X_REG;
+                               insn->a_reg = BPF_REG_A;
+                               insn->x_reg = BPF_REG_X;
                                insn->imm = fp->k;
                                bpf_src = BPF_SRC(fp->code);
                        }
@@ -1024,34 +996,28 @@ do_pass:
 
                /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
                case BPF_LDX | BPF_MSH | BPF_B:
-                       insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
-                       insn->a_reg = TMP_REG;
-                       insn->x_reg = A_REG;
+                       /* tmp = A */
+                       *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_TMP, BPF_REG_A);
                        insn++;
 
-                       insn->code = BPF_LD | BPF_ABS | BPF_B;
-                       insn->a_reg = A_REG;
-                       insn->imm = fp->k;
+                       /* A = BPF_R0 = *(u8 *) (skb->data + K) */
+                       *insn = BPF_LD_ABS(BPF_B, fp->k);
                        insn++;
 
-                       insn->code = BPF_ALU | BPF_AND | BPF_K;
-                       insn->a_reg = A_REG;
-                       insn->imm = 0xf;
+                       /* A &= 0xf */
+                       *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
                        insn++;
 
-                       insn->code = BPF_ALU | BPF_LSH | BPF_K;
-                       insn->a_reg = A_REG;
-                       insn->imm = 2;
+                       /* A <<= 2 */
+                       *insn = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
                        insn++;
 
-                       insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
-                       insn->a_reg = X_REG;
-                       insn->x_reg = A_REG;
+                       /* X = A */
+                       *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_X, BPF_REG_A);
                        insn++;
 
-                       insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
-                       insn->a_reg = A_REG;
-                       insn->x_reg = TMP_REG;
+                       /* A = tmp */
+                       *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_A, BPF_REG_TMP);
                        break;
 
                /* RET_K, RET_A are remaped into 2 insns. */
@@ -1061,19 +1027,20 @@ do_pass:
                                     (BPF_RVAL(fp->code) == BPF_K ?
                                      BPF_K : BPF_X);
                        insn->a_reg = 0;
-                       insn->x_reg = A_REG;
+                       insn->x_reg = BPF_REG_A;
                        insn->imm = fp->k;
                        insn++;
 
-                       insn->code = BPF_JMP | BPF_EXIT;
+                       *insn = BPF_EXIT_INSN();
                        break;
 
                /* Store to stack. */
                case BPF_ST:
                case BPF_STX:
                        insn->code = BPF_STX | BPF_MEM | BPF_W;
-                       insn->a_reg = FP_REG;
-                       insn->x_reg = fp->code == BPF_ST ? A_REG : X_REG;
+                       insn->a_reg = BPF_REG_FP;
+                       insn->x_reg = fp->code == BPF_ST ?
+                                     BPF_REG_A : BPF_REG_X;
                        insn->off = -(BPF_MEMWORDS - fp->k) * 4;
                        break;
 
@@ -1082,8 +1049,8 @@ do_pass:
                case BPF_LDX | BPF_MEM:
                        insn->code = BPF_LDX | BPF_MEM | BPF_W;
                        insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ?
-                                     A_REG : X_REG;
-                       insn->x_reg = FP_REG;
+                                     BPF_REG_A : BPF_REG_X;
+                       insn->x_reg = BPF_REG_FP;
                        insn->off = -(BPF_MEMWORDS - fp->k) * 4;
                        break;
 
@@ -1092,22 +1059,18 @@ do_pass:
                case BPF_LDX | BPF_IMM:
                        insn->code = BPF_ALU | BPF_MOV | BPF_K;
                        insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ?
-                                     A_REG : X_REG;
+                                     BPF_REG_A : BPF_REG_X;
                        insn->imm = fp->k;
                        break;
 
                /* X = A */
                case BPF_MISC | BPF_TAX:
-                       insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
-                       insn->a_reg = X_REG;
-                       insn->x_reg = A_REG;
+                       *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_X, BPF_REG_A);
                        break;
 
                /* A = X */
                case BPF_MISC | BPF_TXA:
-                       insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
-                       insn->a_reg = A_REG;
-                       insn->x_reg = X_REG;
+                       *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_A, BPF_REG_X);
                        break;
 
                /* A = skb->len or X = skb->len */
@@ -1115,17 +1078,15 @@ do_pass:
                case BPF_LDX | BPF_W | BPF_LEN:
                        insn->code = BPF_LDX | BPF_MEM | BPF_W;
                        insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ?
-                                     A_REG : X_REG;
-                       insn->x_reg = CTX_REG;
+                                     BPF_REG_A : BPF_REG_X;
+                       insn->x_reg = BPF_REG_CTX;
                        insn->off = offsetof(struct sk_buff, len);
                        break;
 
                /* access seccomp_data fields */
                case BPF_LDX | BPF_ABS | BPF_W:
-                       insn->code = BPF_LDX | BPF_MEM | BPF_W;
-                       insn->a_reg = A_REG;
-                       insn->x_reg = CTX_REG;
-                       insn->off = fp->k;
+                       /* A = *(u32 *) (ctx + K) */
+                       *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
                        break;
 
                default:
@@ -1432,7 +1393,7 @@ static void sk_filter_release_rcu(struct rcu_head *rcu)
        struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
 
        sk_release_orig_filter(fp);
-       bpf_jit_free(fp);
+       sk_filter_free(fp);
 }
 
 /**
@@ -1470,7 +1431,7 @@ static struct sk_filter *__sk_migrate_realloc(struct sk_filter *fp,
 
        fp_new = sock_kmalloc(sk, len, GFP_KERNEL);
        if (fp_new) {
-               memcpy(fp_new, fp, sizeof(struct sk_filter));
+               *fp_new = *fp;
                /* As we're kepping orig_prog in fp_new along,
                 * we need to make sure we're not evicting it
                 * from the old fp.
@@ -1532,7 +1493,6 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
                goto out_err_free;
        }
 
-       fp->bpf_func = sk_run_filter_int_skb;
        fp->len = new_len;
 
        /* 2nd pass: remap sock_filter insns into sock_filter_int insns. */
@@ -1545,6 +1505,8 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
                 */
                goto out_err_free;
 
+       sk_filter_select_runtime(fp);
+
        kfree(old_prog);
        return fp;
 
@@ -1559,6 +1521,33 @@ out_err:
        return ERR_PTR(err);
 }
 
+void __weak bpf_int_jit_compile(struct sk_filter *prog)
+{
+}
+
+/**
+ *     sk_filter_select_runtime - select execution runtime for BPF program
+ *     @fp: sk_filter populated with internal BPF program
+ *
+ * try to JIT internal BPF program, if JIT is not available select interpreter
+ * BPF program will be executed via SK_RUN_FILTER() macro
+ */
+void sk_filter_select_runtime(struct sk_filter *fp)
+{
+       fp->bpf_func = (void *) __sk_run_filter;
+
+       /* Probe if internal BPF can be JITed */
+       bpf_int_jit_compile(fp);
+}
+EXPORT_SYMBOL_GPL(sk_filter_select_runtime);
+
+/* free internal BPF program */
+void sk_filter_free(struct sk_filter *fp)
+{
+       bpf_jit_free(fp);
+}
+EXPORT_SYMBOL_GPL(sk_filter_free);
+
 static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
                                             struct sock *sk)
 {
@@ -1596,7 +1585,7 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
  * a negative errno code is returned. On success the return is zero.
  */
 int sk_unattached_filter_create(struct sk_filter **pfp,
-                               struct sock_fprog *fprog)
+                               struct sock_fprog_kern *fprog)
 {
        unsigned int fsize = sk_filter_proglen(fprog);
        struct sk_filter *fp;
index 8f8a96ef9f3f64ba519fe4c872d46c7b7c680ec9..32d872eec7f5c535221898cdb45ab8f235d0b4bb 100644 (file)
@@ -1248,8 +1248,8 @@ void __neigh_set_probe_once(struct neighbour *neigh)
        neigh->updated = jiffies;
        if (!(neigh->nud_state & NUD_FAILED))
                return;
-       neigh->nud_state = NUD_PROBE;
-       atomic_set(&neigh->probes, NEIGH_VAR(neigh->parms, UCAST_PROBES));
+       neigh->nud_state = NUD_INCOMPLETE;
+       atomic_set(&neigh->probes, neigh_max_probes(neigh));
        neigh_add_timer(neigh,
                        jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
 }
index 81d3a9a084536541867afe9350602c0c73253006..85b62691f4f2d18b9b39bc9c610d1916f8f09273 100644 (file)
@@ -24,7 +24,7 @@
 
 static LIST_HEAD(pernet_list);
 static struct list_head *first_device = &pernet_list;
-static DEFINE_MUTEX(net_mutex);
+DEFINE_MUTEX(net_mutex);
 
 LIST_HEAD(net_namespace_list);
 EXPORT_SYMBOL_GPL(net_namespace_list);
@@ -273,7 +273,7 @@ static void cleanup_net(struct work_struct *work)
 {
        const struct pernet_operations *ops;
        struct net *net, *tmp;
-       LIST_HEAD(net_kill_list);
+       struct list_head net_kill_list;
        LIST_HEAD(net_exit_list);
 
        /* Atomically snapshot the list of namespaces to cleanup */
index 0304f981f7ffa5f005b28f08f17ff8264c41bb65..fc17a9d309ac028fc61ac7db6e35a05c77513a24 100644 (file)
@@ -573,7 +573,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
                   is_zero_ether_addr(pkt_dev->src_mac) ?
                             pkt_dev->odev->dev_addr : pkt_dev->src_mac);
 
-       seq_printf(seq, "dst_mac: ");
+       seq_puts(seq, "dst_mac: ");
        seq_printf(seq, "%pM\n", pkt_dev->dst_mac);
 
        seq_printf(seq,
@@ -588,7 +588,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
 
        if (pkt_dev->nr_labels) {
                unsigned int i;
-               seq_printf(seq, "     mpls: ");
+               seq_puts(seq, "     mpls: ");
                for (i = 0; i < pkt_dev->nr_labels; i++)
                        seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]),
                                   i == pkt_dev->nr_labels-1 ? "\n" : ", ");
@@ -613,67 +613,67 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
        if (pkt_dev->node >= 0)
                seq_printf(seq, "     node: %d\n", pkt_dev->node);
 
-       seq_printf(seq, "     Flags: ");
+       seq_puts(seq, "     Flags: ");
 
        if (pkt_dev->flags & F_IPV6)
-               seq_printf(seq, "IPV6  ");
+               seq_puts(seq, "IPV6  ");
 
        if (pkt_dev->flags & F_IPSRC_RND)
-               seq_printf(seq, "IPSRC_RND  ");
+               seq_puts(seq, "IPSRC_RND  ");
 
        if (pkt_dev->flags & F_IPDST_RND)
-               seq_printf(seq, "IPDST_RND  ");
+               seq_puts(seq, "IPDST_RND  ");
 
        if (pkt_dev->flags & F_TXSIZE_RND)
-               seq_printf(seq, "TXSIZE_RND  ");
+               seq_puts(seq, "TXSIZE_RND  ");
 
        if (pkt_dev->flags & F_UDPSRC_RND)
-               seq_printf(seq, "UDPSRC_RND  ");
+               seq_puts(seq, "UDPSRC_RND  ");
 
        if (pkt_dev->flags & F_UDPDST_RND)
-               seq_printf(seq, "UDPDST_RND  ");
+               seq_puts(seq, "UDPDST_RND  ");
 
        if (pkt_dev->flags & F_UDPCSUM)
-               seq_printf(seq, "UDPCSUM  ");
+               seq_puts(seq, "UDPCSUM  ");
 
        if (pkt_dev->flags & F_MPLS_RND)
-               seq_printf(seq,  "MPLS_RND  ");
+               seq_puts(seq,  "MPLS_RND  ");
 
        if (pkt_dev->flags & F_QUEUE_MAP_RND)
-               seq_printf(seq,  "QUEUE_MAP_RND  ");
+               seq_puts(seq,  "QUEUE_MAP_RND  ");
 
        if (pkt_dev->flags & F_QUEUE_MAP_CPU)
-               seq_printf(seq,  "QUEUE_MAP_CPU  ");
+               seq_puts(seq,  "QUEUE_MAP_CPU  ");
 
        if (pkt_dev->cflows) {
                if (pkt_dev->flags & F_FLOW_SEQ)
-                       seq_printf(seq,  "FLOW_SEQ  "); /*in sequence flows*/
+                       seq_puts(seq,  "FLOW_SEQ  "); /*in sequence flows*/
                else
-                       seq_printf(seq,  "FLOW_RND  ");
+                       seq_puts(seq,  "FLOW_RND  ");
        }
 
 #ifdef CONFIG_XFRM
        if (pkt_dev->flags & F_IPSEC_ON) {
-               seq_printf(seq,  "IPSEC  ");
+               seq_puts(seq,  "IPSEC  ");
                if (pkt_dev->spi)
                        seq_printf(seq, "spi:%u", pkt_dev->spi);
        }
 #endif
 
        if (pkt_dev->flags & F_MACSRC_RND)
-               seq_printf(seq, "MACSRC_RND  ");
+               seq_puts(seq, "MACSRC_RND  ");
 
        if (pkt_dev->flags & F_MACDST_RND)
-               seq_printf(seq, "MACDST_RND  ");
+               seq_puts(seq, "MACDST_RND  ");
 
        if (pkt_dev->flags & F_VID_RND)
-               seq_printf(seq, "VID_RND  ");
+               seq_puts(seq, "VID_RND  ");
 
        if (pkt_dev->flags & F_SVID_RND)
-               seq_printf(seq, "SVID_RND  ");
+               seq_puts(seq, "SVID_RND  ");
 
        if (pkt_dev->flags & F_NODE)
-               seq_printf(seq, "NODE_ALLOC  ");
+               seq_puts(seq, "NODE_ALLOC  ");
 
        seq_puts(seq, "\n");
 
@@ -716,7 +716,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
        if (pkt_dev->result[0])
                seq_printf(seq, "Result: %s\n", pkt_dev->result);
        else
-               seq_printf(seq, "Result: Idle\n");
+               seq_puts(seq, "Result: Idle\n");
 
        return 0;
 }
@@ -1735,14 +1735,14 @@ static int pktgen_thread_show(struct seq_file *seq, void *v)
 
        BUG_ON(!t);
 
-       seq_printf(seq, "Running: ");
+       seq_puts(seq, "Running: ");
 
        if_lock(t);
        list_for_each_entry(pkt_dev, &t->if_list, list)
                if (pkt_dev->running)
                        seq_printf(seq, "%s ", pkt_dev->odevname);
 
-       seq_printf(seq, "\nStopped: ");
+       seq_puts(seq, "\nStopped: ");
 
        list_for_each_entry(pkt_dev, &t->if_list, list)
                if (!pkt_dev->running)
@@ -1751,7 +1751,7 @@ static int pktgen_thread_show(struct seq_file *seq, void *v)
        if (t->result[0])
                seq_printf(seq, "\nResult: %s\n", t->result);
        else
-               seq_printf(seq, "\nResult: NA\n");
+               seq_puts(seq, "\nResult: NA\n");
 
        if_unlock(t);
 
index eaba0f68f8608618870a7009f27352523bee108f..d3027a73fd4bbc152f13011cb09335af365f19dc 100644 (file)
@@ -88,7 +88,7 @@ EXPORT_SYMBOL_GPL(ptp_classify_raw);
 
 void __init ptp_classifier_init(void)
 {
-       static struct sock_filter ptp_filter[] = {
+       static struct sock_filter ptp_filter[] __initdata = {
                { 0x28,  0,  0, 0x0000000c },
                { 0x15,  0, 12, 0x00000800 },
                { 0x30,  0,  0, 0x00000017 },
@@ -133,7 +133,7 @@ void __init ptp_classifier_init(void)
                { 0x16,  0,  0, 0x00000000 },
                { 0x06,  0,  0, 0x00000000 },
        };
-       struct sock_fprog ptp_prog = {
+       struct sock_fprog_kern ptp_prog = {
                .len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter,
        };
 
index d4ff41739b0f23fcb572905dd34288cb1d8ebd49..f31268dbc0d1b983e10277666b2788f6fb03ac13 100644 (file)
@@ -353,15 +353,46 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
 }
 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
 
+/* Return with the rtnl_lock held when there are no network
+ * devices unregistering in any network namespace.
+ */
+static void rtnl_lock_unregistering_all(void)
+{
+       struct net *net;
+       bool unregistering;
+       DEFINE_WAIT(wait);
+
+       for (;;) {
+               prepare_to_wait(&netdev_unregistering_wq, &wait,
+                               TASK_UNINTERRUPTIBLE);
+               unregistering = false;
+               rtnl_lock();
+               for_each_net(net) {
+                       if (net->dev_unreg_count > 0) {
+                               unregistering = true;
+                               break;
+                       }
+               }
+               if (!unregistering)
+                       break;
+               __rtnl_unlock();
+               schedule();
+       }
+       finish_wait(&netdev_unregistering_wq, &wait);
+}
+
 /**
  * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
  * @ops: struct rtnl_link_ops * to unregister
  */
 void rtnl_link_unregister(struct rtnl_link_ops *ops)
 {
-       rtnl_lock();
+       /* Close the race with cleanup_net() */
+       mutex_lock(&net_mutex);
+       rtnl_lock_unregistering_all();
        __rtnl_link_unregister(ops);
        rtnl_unlock();
+       mutex_unlock(&net_mutex);
 }
 EXPORT_SYMBOL_GPL(rtnl_link_unregister);
 
@@ -767,14 +798,15 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
                size += num_vfs *
                        (nla_total_size(sizeof(struct ifla_vf_mac)) +
                         nla_total_size(sizeof(struct ifla_vf_vlan)) +
-                        nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
-                        nla_total_size(sizeof(struct ifla_vf_spoofchk)));
+                        nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
+                        nla_total_size(sizeof(struct ifla_vf_rate)));
                return size;
        } else
                return 0;
 }
 
-static size_t rtnl_port_size(const struct net_device *dev)
+static size_t rtnl_port_size(const struct net_device *dev,
+                            u32 ext_filter_mask)
 {
        size_t port_size = nla_total_size(4)            /* PORT_VF */
                + nla_total_size(PORT_PROFILE_MAX)      /* PORT_PROFILE */
@@ -790,7 +822,8 @@ static size_t rtnl_port_size(const struct net_device *dev)
        size_t port_self_size = nla_total_size(sizeof(struct nlattr))
                + port_size;
 
-       if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent)
+       if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
+           !(ext_filter_mask & RTEXT_FILTER_VF))
                return 0;
        if (dev_num_vf(dev->dev.parent))
                return port_self_size + vf_ports_size +
@@ -826,7 +859,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
               + nla_total_size(ext_filter_mask
                                & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
               + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
-              + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
+              + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
               + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
               + rtnl_link_get_af_size(dev) /* IFLA_AF_SPEC */
               + nla_total_size(MAX_PHYS_PORT_ID_LEN); /* IFLA_PHYS_PORT_ID */
@@ -888,11 +921,13 @@ static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
        return 0;
 }
 
-static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev)
+static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
+                         u32 ext_filter_mask)
 {
        int err;
 
-       if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent)
+       if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
+           !(ext_filter_mask & RTEXT_FILTER_VF))
                return 0;
 
        err = rtnl_port_self_fill(skb, dev);
@@ -1030,6 +1065,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
                        struct ifla_vf_info ivi;
                        struct ifla_vf_mac vf_mac;
                        struct ifla_vf_vlan vf_vlan;
+                       struct ifla_vf_rate vf_rate;
                        struct ifla_vf_tx_rate vf_tx_rate;
                        struct ifla_vf_spoofchk vf_spoofchk;
                        struct ifla_vf_link_state vf_linkstate;
@@ -1050,6 +1086,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
                                break;
                        vf_mac.vf =
                                vf_vlan.vf =
+                               vf_rate.vf =
                                vf_tx_rate.vf =
                                vf_spoofchk.vf =
                                vf_linkstate.vf = ivi.vf;
@@ -1057,7 +1094,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
                        memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
                        vf_vlan.vlan = ivi.vlan;
                        vf_vlan.qos = ivi.qos;
-                       vf_tx_rate.rate = ivi.tx_rate;
+                       vf_tx_rate.rate = ivi.max_tx_rate;
+                       vf_rate.min_tx_rate = ivi.min_tx_rate;
+                       vf_rate.max_tx_rate = ivi.max_tx_rate;
                        vf_spoofchk.setting = ivi.spoofchk;
                        vf_linkstate.link_state = ivi.linkstate;
                        vf = nla_nest_start(skb, IFLA_VF_INFO);
@@ -1067,6 +1106,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
                        }
                        if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
                            nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
+                           nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
+                                   &vf_rate) ||
                            nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
                                    &vf_tx_rate) ||
                            nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
@@ -1079,7 +1120,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
                nla_nest_end(skb, vfinfo);
        }
 
-       if (rtnl_port_fill(skb, dev))
+       if (rtnl_port_fill(skb, dev, ext_filter_mask))
                goto nla_put_failure;
 
        if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
@@ -1173,6 +1214,8 @@ static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
                                    .len = sizeof(struct ifla_vf_tx_rate) },
        [IFLA_VF_SPOOFCHK]      = { .type = NLA_BINARY,
                                    .len = sizeof(struct ifla_vf_spoofchk) },
+       [IFLA_VF_RATE]          = { .type = NLA_BINARY,
+                                   .len = sizeof(struct ifla_vf_rate) },
 };
 
 static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
@@ -1198,6 +1241,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
        struct hlist_head *head;
        struct nlattr *tb[IFLA_MAX+1];
        u32 ext_filter_mask = 0;
+       int err;
 
        s_h = cb->args[0];
        s_idx = cb->args[1];
@@ -1218,11 +1262,17 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
                hlist_for_each_entry_rcu(dev, head, index_hlist) {
                        if (idx < s_idx)
                                goto cont;
-                       if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
-                                            NETLINK_CB(cb->skb).portid,
-                                            cb->nlh->nlmsg_seq, 0,
-                                            NLM_F_MULTI,
-                                            ext_filter_mask) <= 0)
+                       err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
+                                              NETLINK_CB(cb->skb).portid,
+                                              cb->nlh->nlmsg_seq, 0,
+                                              NLM_F_MULTI,
+                                              ext_filter_mask);
+                       /* If we ran out of room on the first message,
+                        * we're in trouble
+                        */
+                       WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
+
+                       if (err <= 0)
                                goto out;
 
                        nl_dump_check_consistent(cb, nlmsg_hdr(skb));
@@ -1325,11 +1375,29 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr *attr)
                }
                case IFLA_VF_TX_RATE: {
                        struct ifla_vf_tx_rate *ivt;
+                       struct ifla_vf_info ivf;
+                       ivt = nla_data(vf);
+                       err = -EOPNOTSUPP;
+                       if (ops->ndo_get_vf_config)
+                               err = ops->ndo_get_vf_config(dev, ivt->vf,
+                                                            &ivf);
+                       if (err)
+                               break;
+                       err = -EOPNOTSUPP;
+                       if (ops->ndo_set_vf_rate)
+                               err = ops->ndo_set_vf_rate(dev, ivt->vf,
+                                                          ivf.min_tx_rate,
+                                                          ivt->rate);
+                       break;
+               }
+               case IFLA_VF_RATE: {
+                       struct ifla_vf_rate *ivt;
                        ivt = nla_data(vf);
                        err = -EOPNOTSUPP;
-                       if (ops->ndo_set_vf_tx_rate)
-                               err = ops->ndo_set_vf_tx_rate(dev, ivt->vf,
-                                                             ivt->rate);
+                       if (ops->ndo_set_vf_rate)
+                               err = ops->ndo_set_vf_rate(dev, ivt->vf,
+                                                          ivt->min_tx_rate,
+                                                          ivt->max_tx_rate);
                        break;
                }
                case IFLA_VF_SPOOFCHK: {
@@ -1395,7 +1463,8 @@ static int do_set_master(struct net_device *dev, int ifindex)
        return 0;
 }
 
-static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
+static int do_setlink(const struct sk_buff *skb,
+                     struct net_device *dev, struct ifinfomsg *ifm,
                      struct nlattr **tb, char *ifname, int modified)
 {
        const struct net_device_ops *ops = dev->netdev_ops;
@@ -1407,7 +1476,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
                        err = PTR_ERR(net);
                        goto errout;
                }
-               if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) {
+               if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
                        err = -EPERM;
                        goto errout;
                }
@@ -1661,7 +1730,7 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
        if (err < 0)
                goto errout;
 
-       err = do_setlink(dev, ifm, tb, ifname, 0);
+       err = do_setlink(skb, dev, ifm, tb, ifname, 0);
 errout:
        return err;
 }
@@ -1778,7 +1847,8 @@ err:
 }
 EXPORT_SYMBOL(rtnl_create_link);
 
-static int rtnl_group_changelink(struct net *net, int group,
+static int rtnl_group_changelink(const struct sk_buff *skb,
+               struct net *net, int group,
                struct ifinfomsg *ifm,
                struct nlattr **tb)
 {
@@ -1787,7 +1857,7 @@ static int rtnl_group_changelink(struct net *net, int group,
 
        for_each_netdev(net, dev) {
                if (dev->group == group) {
-                       err = do_setlink(dev, ifm, tb, NULL, 0);
+                       err = do_setlink(skb, dev, ifm, tb, NULL, 0);
                        if (err < 0)
                                return err;
                }
@@ -1929,12 +1999,12 @@ replay:
                                modified = 1;
                        }
 
-                       return do_setlink(dev, ifm, tb, ifname, modified);
+                       return do_setlink(skb, dev, ifm, tb, ifname, modified);
                }
 
                if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
                        if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
-                               return rtnl_group_changelink(net,
+                               return rtnl_group_changelink(skb, net,
                                                nla_get_u32(tb[IFLA_GROUP]),
                                                ifm, tb);
                        return -ENODEV;
@@ -2321,7 +2391,7 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
        int err = -EINVAL;
        __u8 *addr;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
@@ -2773,7 +2843,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
        sz_idx = type>>2;
        kind = type&3;
 
-       if (kind != 2 && !ns_capable(net->user_ns, CAP_NET_ADMIN))
+       if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
index 1b62343f58378b3d8fc0e3ea048dbb45ce1e3a76..3f6c7e8be8a4ae912afafd744c8bffb4e70c76dc 100644 (file)
@@ -694,7 +694,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 #endif
        memcpy(new->cb, old->cb, sizeof(old->cb));
        new->csum               = old->csum;
-       new->local_df           = old->local_df;
+       new->ignore_df          = old->ignore_df;
        new->pkt_type           = old->pkt_type;
        new->ip_summed          = old->ip_summed;
        skb_copy_queue_mapping(new, old);
@@ -3076,7 +3076,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
        if (unlikely(p->len + len >= 65536))
                return -E2BIG;
 
-       lp = NAPI_GRO_CB(p)->last ?: p;
+       lp = NAPI_GRO_CB(p)->last;
        pinfo = skb_shinfo(lp);
 
        if (headlen <= offset) {
@@ -3192,7 +3192,7 @@ merge:
 
        __skb_pull(skb, offset);
 
-       if (!NAPI_GRO_CB(p)->last)
+       if (NAPI_GRO_CB(p)->last == p)
                skb_shinfo(p)->frag_list = skb;
        else
                NAPI_GRO_CB(p)->last->next = skb;
@@ -3913,7 +3913,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
        skb->tstamp.tv64 = 0;
        skb->pkt_type = PACKET_HOST;
        skb->skb_iif = 0;
-       skb->local_df = 0;
+       skb->ignore_df = 0;
        skb_dst_drop(skb);
        skb->mark = 0;
        secpath_reset(skb);
index b4fff008136fafcca363e3a41ef441c2a1be878b..026e01f70274f8a5550af6a7a06fd6846735e4ae 100644 (file)
 static DEFINE_MUTEX(proto_list_mutex);
 static LIST_HEAD(proto_list);
 
+/**
+ * sk_ns_capable - General socket capability test
+ * @sk: Socket to use a capability on or through
+ * @user_ns: The user namespace of the capability to use
+ * @cap: The capability to use
+ *
+ * Test to see if the opener of the socket had when the socket was
+ * created and the current process has the capability @cap in the user
+ * namespace @user_ns.
+ */
+bool sk_ns_capable(const struct sock *sk,
+                  struct user_namespace *user_ns, int cap)
+{
+       return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
+               ns_capable(user_ns, cap);
+}
+EXPORT_SYMBOL(sk_ns_capable);
+
+/**
+ * sk_capable - Socket global capability test
+ * @sk: Socket to use a capability on or through
+ * @cap: The global capbility to use
+ *
+ * Test to see if the opener of the socket had when the socket was
+ * created and the current process has the capability @cap in all user
+ * namespaces.
+ */
+bool sk_capable(const struct sock *sk, int cap)
+{
+       return sk_ns_capable(sk, &init_user_ns, cap);
+}
+EXPORT_SYMBOL(sk_capable);
+
+/**
+ * sk_net_capable - Network namespace socket capability test
+ * @sk: Socket to use a capability on or through
+ * @cap: The capability to use
+ *
+ * Test to see if the opener of the socket had when the socke was created
+ * and the current process has the capability @cap over the network namespace
+ * the socket is a member of.
+ */
+bool sk_net_capable(const struct sock *sk, int cap)
+{
+       return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
+}
+EXPORT_SYMBOL(sk_net_capable);
+
+
 #ifdef CONFIG_MEMCG_KMEM
 int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
 {
@@ -735,7 +784,7 @@ set_rcvbuf:
                break;
 
        case SO_NO_CHECK:
-               sk->sk_no_check = valbool;
+               sk->sk_no_check_tx = valbool;
                break;
 
        case SO_PRIORITY:
@@ -1015,7 +1064,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
                break;
 
        case SO_NO_CHECK:
-               v.val = sk->sk_no_check;
+               v.val = sk->sk_no_check_tx;
                break;
 
        case SO_PRIORITY:
index d7af1885932269eb9f4196fe1211a6d09e298b97..a4216a4c95720f105b0cd7841cd59e84d3dda7a6 100644 (file)
@@ -49,7 +49,7 @@ int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
 }
 EXPORT_SYMBOL_GPL(sock_diag_put_meminfo);
 
-int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk,
+int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
                             struct sk_buff *skb, int attrtype)
 {
        struct sock_fprog_kern *fprog;
@@ -58,7 +58,7 @@ int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk,
        unsigned int flen;
        int err = 0;
 
-       if (!ns_capable(user_ns, CAP_NET_ADMIN)) {
+       if (!may_report_filterinfo) {
                nla_reserve(skb, attrtype, 0);
                return 0;
        }
diff --git a/net/core/tso.c b/net/core/tso.c
new file mode 100644 (file)
index 0000000..8c3203c
--- /dev/null
@@ -0,0 +1,77 @@
+#include <linux/export.h>
+#include <net/ip.h>
+#include <net/tso.h>
+
+/* Calculate expected number of TX descriptors */
+int tso_count_descs(struct sk_buff *skb)
+{
+       /* The Marvell Way */
+       return skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags;
+}
+EXPORT_SYMBOL(tso_count_descs);
+
+void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso,
+                  int size, bool is_last)
+{
+       struct iphdr *iph;
+       struct tcphdr *tcph;
+       int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+       int mac_hdr_len = skb_network_offset(skb);
+
+       memcpy(hdr, skb->data, hdr_len);
+       iph = (struct iphdr *)(hdr + mac_hdr_len);
+       iph->id = htons(tso->ip_id);
+       iph->tot_len = htons(size + hdr_len - mac_hdr_len);
+       tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb));
+       tcph->seq = htonl(tso->tcp_seq);
+       tso->ip_id++;
+
+       if (!is_last) {
+               /* Clear all special flags for not last packet */
+               tcph->psh = 0;
+               tcph->fin = 0;
+               tcph->rst = 0;
+       }
+}
+EXPORT_SYMBOL(tso_build_hdr);
+
+void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size)
+{
+       tso->tcp_seq += size;
+       tso->size -= size;
+       tso->data += size;
+
+       if ((tso->size == 0) &&
+           (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx];
+
+               /* Move to next segment */
+               tso->size = frag->size;
+               tso->data = page_address(frag->page.p) + frag->page_offset;
+               tso->next_frag_idx++;
+       }
+}
+EXPORT_SYMBOL(tso_build_data);
+
+void tso_start(struct sk_buff *skb, struct tso_t *tso)
+{
+       int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+       tso->ip_id = ntohs(ip_hdr(skb)->id);
+       tso->tcp_seq = ntohl(tcp_hdr(skb)->seq);
+       tso->next_frag_idx = 0;
+
+       /* Build first data */
+       tso->size = skb_headlen(skb) - hdr_len;
+       tso->data = skb->data + hdr_len;
+       if ((tso->size == 0) &&
+           (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx];
+
+               /* Move to next segment */
+               tso->size = frag->size;
+               tso->data = page_address(frag->page.p) + frag->page_offset;
+               tso->next_frag_idx++;
+       }
+}
+EXPORT_SYMBOL(tso_start);
index 2f737bf90b3fe4235c75ccca6640c735ecaa076b..eed34338736c275aa02bfa40448801d46dda736b 100644 (file)
@@ -348,8 +348,8 @@ static void __net_random_once_deferred(struct work_struct *w)
 {
        struct __net_random_once_work *work =
                container_of(w, struct __net_random_once_work, work);
-       if (!static_key_enabled(work->key))
-               static_key_slow_inc(work->key);
+       BUG_ON(!static_key_enabled(work->key));
+       static_key_slow_dec(work->key);
        kfree(work);
 }
 
@@ -367,7 +367,7 @@ static void __net_random_once_disable_jump(struct static_key *key)
 }
 
 bool __net_get_random_once(void *buf, int nbytes, bool *done,
-                          struct static_key *done_key)
+                          struct static_key *once_key)
 {
        static DEFINE_SPINLOCK(lock);
        unsigned long flags;
@@ -382,7 +382,7 @@ bool __net_get_random_once(void *buf, int nbytes, bool *done,
        *done = true;
        spin_unlock_irqrestore(&lock, flags);
 
-       __net_random_once_disable_jump(done_key);
+       __net_random_once_disable_jump(once_key);
 
        return true;
 }
index 553644402670b3461bde7fb26705ddde8729be75..f8b98d89c28527f049b4c3132aa7f0b412cfa0ef 100644 (file)
@@ -1669,7 +1669,7 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
        struct nlmsghdr *reply_nlh = NULL;
        const struct reply_func *fn;
 
-       if ((nlh->nlmsg_type == RTM_SETDCB) && !capable(CAP_NET_ADMIN))
+       if ((nlh->nlmsg_type == RTM_SETDCB) && !netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
index 22b5d818b2001b177b765cbb67eb2551e87502ce..6ca645c4b48e8b56a88f7d549b073fb6dc82eeb7 100644 (file)
@@ -1024,7 +1024,6 @@ static struct inet_protosw dccp_v4_protosw = {
        .protocol       = IPPROTO_DCCP,
        .prot           = &dccp_v4_prot,
        .ops            = &inet_dccp_ops,
-       .no_check       = 0,
        .flags          = INET_PROTOSW_ICSK,
 };
 
index eb892b4f48144966e47f386108942f51b8b85e50..de2c1e7193057dee2e994386f5bb685e05b47163 100644 (file)
@@ -1084,14 +1084,15 @@ EXPORT_SYMBOL_GPL(dccp_shutdown);
 
 static inline int dccp_mib_init(void)
 {
-       return snmp_mib_init((void __percpu **)dccp_statistics,
-                            sizeof(struct dccp_mib),
-                            __alignof__(struct dccp_mib));
+       dccp_statistics = alloc_percpu(struct dccp_mib);
+       if (!dccp_statistics)
+               return -ENOMEM;
+       return 0;
 }
 
 static inline void dccp_mib_exit(void)
 {
-       snmp_mib_free((void __percpu **)dccp_statistics);
+       free_percpu(dccp_statistics);
 }
 
 static int thash_entries;
index 607ab71b5a0cb3af65067e7d69badb862d5bfb5a..53731e45403c83ba2edf48da01e71aeda48359b0 100644 (file)
@@ -20,6 +20,7 @@
 
 /* Boundary values */
 static int             zero     = 0,
+                       one      = 1,
                        u8_max   = 0xFF;
 static unsigned long   seqw_min = DCCPF_SEQ_WMIN,
                        seqw_max = 0xFFFFFFFF;          /* maximum on 32 bit */
@@ -58,7 +59,7 @@ static struct ctl_table dccp_default_table[] = {
                .maxlen         = sizeof(sysctl_dccp_request_retries),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &zero,
+               .extra1         = &one,
                .extra2         = &u8_max,
        },
        {
index 16f0b223102e6619b3b3430a108be740e2e7b373..1cd46a345cb04387a50843a251637b6e3cbd7501 100644 (file)
@@ -280,7 +280,7 @@ static ktime_t dccp_timestamp_seed;
  */
 u32 dccp_timestamp(void)
 {
-       s64 delta = ktime_us_delta(ktime_get_real(), dccp_timestamp_seed);
+       u64 delta = (u64)ktime_us_delta(ktime_get_real(), dccp_timestamp_seed);
 
        do_div(delta, 10);
        return delta;
index 4c04848953bdb4caddeae9debd195ea3d004ee0d..ae011b46c0710fc96204deda6bddb1d912d78f22 100644 (file)
@@ -481,7 +481,7 @@ static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gf
 
        sk->sk_backlog_rcv = dn_nsp_backlog_rcv;
        sk->sk_destruct    = dn_destruct;
-       sk->sk_no_check    = 1;
+       sk->sk_no_check_tx = 1;
        sk->sk_family      = PF_DECnet;
        sk->sk_protocol    = 0;
        sk->sk_allocation  = gfp;
index a603823a3e279c850d1641e5f4d59be983400488..3b726f31c64c0b88efcfacd4d64df7177260ad5c 100644 (file)
@@ -574,7 +574,7 @@ static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
        struct dn_ifaddr __rcu **ifap;
        int err = -EINVAL;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        if (!net_eq(net, &init_net))
@@ -618,7 +618,7 @@ static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
        struct dn_ifaddr *ifa;
        int err;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        if (!net_eq(net, &init_net))
index 57dc159245ecfff38e318626cf0ea1ffa9db1cae..d332aefb0846f86a11d924e3e1e7ad23e279dda2 100644 (file)
@@ -505,7 +505,7 @@ static int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
        struct nlattr *attrs[RTA_MAX+1];
        int err;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        if (!net_eq(net, &init_net))
@@ -530,7 +530,7 @@ static int dn_fib_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
        struct nlattr *attrs[RTA_MAX+1];
        int err;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        if (!net_eq(net, &init_net))
index e83015cecfa7507d551bd19e4b4121ad0f25eeaf..e4d9560a910b0eb96ed3a4ad59d63771f865de3c 100644 (file)
@@ -107,7 +107,7 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb)
        if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
                return;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                RCV_SKB_FAIL(-EPERM);
 
        /* Eventually we might send routing messages too */
index 0eb5d5e76dfbe1f99537e8a561f1671389c09e16..5db37cef50a9ccd80c642118f54dd4ecf04219b8 100644 (file)
@@ -406,8 +406,9 @@ static int dsa_of_probe(struct platform_device *pdev)
                goto out_free;
        }
 
-       chip_index = 0;
+       chip_index = -1;
        for_each_available_child_of_node(np, child) {
+               chip_index++;
                cd = &pd->chip[chip_index];
 
                cd->mii_bus = &mdio_bus->dev;
index 02c0e1716f641c947a5b906ee82c755e568a4a7c..64c5af0a10dd82169ccada3d82baa866a5b82cc8 100644 (file)
@@ -346,7 +346,7 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent,
                return slave_dev;
 
        slave_dev->features = master->vlan_features;
-       SET_ETHTOOL_OPS(slave_dev, &dsa_slave_ethtool_ops);
+       slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
        eth_hw_addr_inherit(slave_dev, master);
        slave_dev->tx_queue_len = 0;
 
index 0f5a69ed746d8384891ff9e3fd1aa570f04f91d5..1ae8a5628fb5b5e188926d8ae7e3d5e747fac092 100644 (file)
@@ -92,6 +92,7 @@ static int lowpan_header_create(struct sk_buff *skb,
        const u8 *saddr = _saddr;
        const u8 *daddr = _daddr;
        struct ieee802154_addr sa, da;
+       struct ieee802154_mac_cb *cb = mac_cb_init(skb);
 
        /* TODO:
         * if this package isn't ipv6 one, where should it be routed?
@@ -115,8 +116,7 @@ static int lowpan_header_create(struct sk_buff *skb,
         * from MAC subif of the 'dev' and 'real_dev' network devices, but
         * this isn't implemented in mainline yet, so currently we assign 0xff
         */
-       mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
-       mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
+       cb->type = IEEE802154_FC_TYPE_DATA;
 
        /* prepare wpan address data */
        sa.mode = IEEE802154_ADDR_LONG;
@@ -135,11 +135,10 @@ static int lowpan_header_create(struct sk_buff *skb,
        } else {
                da.mode = IEEE802154_ADDR_LONG;
                da.extended_addr = ieee802154_devaddr_from_raw(daddr);
-
-               /* request acknowledgment */
-               mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ;
        }
 
+       cb->ackreq = !lowpan_is_addr_broadcast(daddr);
+
        return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
                        type, (void *)&da, (void *)&sa, 0);
 }
@@ -221,139 +220,149 @@ static int lowpan_set_address(struct net_device *dev, void *p)
        return 0;
 }
 
-static int
-lowpan_fragment_xmit(struct sk_buff *skb, u8 *head,
-                    int mlen, int plen, int offset, int type)
+static struct sk_buff*
+lowpan_alloc_frag(struct sk_buff *skb, int size,
+                 const struct ieee802154_hdr *master_hdr)
 {
+       struct net_device *real_dev = lowpan_dev_info(skb->dev)->real_dev;
        struct sk_buff *frag;
-       int hlen;
-
-       hlen = (type == LOWPAN_DISPATCH_FRAG1) ?
-                       LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE;
-
-       raw_dump_inline(__func__, "6lowpan fragment header", head, hlen);
+       int rc;
+
+       frag = alloc_skb(real_dev->hard_header_len +
+                        real_dev->needed_tailroom + size,
+                        GFP_ATOMIC);
+
+       if (likely(frag)) {
+               frag->dev = real_dev;
+               frag->priority = skb->priority;
+               skb_reserve(frag, real_dev->hard_header_len);
+               skb_reset_network_header(frag);
+               *mac_cb(frag) = *mac_cb(skb);
+
+               rc = dev_hard_header(frag, real_dev, 0, &master_hdr->dest,
+                                    &master_hdr->source, size);
+               if (rc < 0) {
+                       kfree_skb(frag);
+                       return ERR_PTR(-rc);
+               }
+       } else {
+               frag = ERR_PTR(ENOMEM);
+       }
 
-       frag = netdev_alloc_skb(skb->dev,
-                               hlen + mlen + plen + IEEE802154_MFR_SIZE);
-       if (!frag)
-               return -ENOMEM;
+       return frag;
+}
 
-       frag->priority = skb->priority;
+static int
+lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr,
+                    u8 *frag_hdr, int frag_hdrlen,
+                    int offset, int len)
+{
+       struct sk_buff *frag;
 
-       /* copy header, MFR and payload */
-       skb_put(frag, mlen);
-       skb_copy_to_linear_data(frag, skb_mac_header(skb), mlen);
+       raw_dump_inline(__func__, " fragment header", frag_hdr, frag_hdrlen);
 
-       skb_put(frag, hlen);
-       skb_copy_to_linear_data_offset(frag, mlen, head, hlen);
+       frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr);
+       if (IS_ERR(frag))
+               return -PTR_ERR(frag);
 
-       skb_put(frag, plen);
-       skb_copy_to_linear_data_offset(frag, mlen + hlen,
-                                      skb_network_header(skb) + offset, plen);
+       memcpy(skb_put(frag, frag_hdrlen), frag_hdr, frag_hdrlen);
+       memcpy(skb_put(frag, len), skb_network_header(skb) + offset, len);
 
-       raw_dump_table(__func__, " raw fragment dump", frag->data, frag->len);
+       raw_dump_table(__func__, " fragment dump", frag->data, frag->len);
 
        return dev_queue_xmit(frag);
 }
 
 static int
-lowpan_skb_fragmentation(struct sk_buff *skb, struct net_device *dev)
+lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *dev,
+                      const struct ieee802154_hdr *wpan_hdr)
 {
-       int err;
-       u16 dgram_offset, dgram_size, payload_length, header_length,
-           lowpan_size, frag_plen, offset;
-       __be16 tag;
-       u8 head[5];
-
-       header_length = skb->mac_len;
-       payload_length = skb->len - header_length;
-       tag = lowpan_dev_info(dev)->fragment_tag++;
-       lowpan_size = skb_network_header_len(skb);
+       u16 dgram_size, dgram_offset;
+       __be16 frag_tag;
+       u8 frag_hdr[5];
+       int frag_cap, frag_len, payload_cap, rc;
+       int skb_unprocessed, skb_offset;
+
        dgram_size = lowpan_uncompress_size(skb, &dgram_offset) -
-                    header_length;
+                    skb->mac_len;
+       frag_tag = lowpan_dev_info(dev)->fragment_tag++;
 
-       /* first fragment header */
-       head[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x7);
-       head[1] = dgram_size & 0xff;
-       memcpy(head + 2, &tag, sizeof(tag));
+       frag_hdr[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x07);
+       frag_hdr[1] = dgram_size & 0xff;
+       memcpy(frag_hdr + 2, &frag_tag, sizeof(frag_tag));
 
-       /* calc the nearest payload length(divided to 8) for first fragment
-        * which fits into a IEEE802154_MTU
-        */
-       frag_plen = round_down(IEEE802154_MTU - header_length -
-                              LOWPAN_FRAG1_HEAD_SIZE - lowpan_size -
-                              IEEE802154_MFR_SIZE, 8);
-
-       err = lowpan_fragment_xmit(skb, head, header_length,
-                                  frag_plen + lowpan_size, 0,
-                                  LOWPAN_DISPATCH_FRAG1);
-       if (err) {
-               pr_debug("%s unable to send FRAG1 packet (tag: %d)",
-                        __func__, tag);
-               goto exit;
-       }
+       payload_cap = ieee802154_max_payload(wpan_hdr);
 
-       offset = lowpan_size + frag_plen;
-       dgram_offset += frag_plen;
+       frag_len = round_down(payload_cap - LOWPAN_FRAG1_HEAD_SIZE -
+                             skb_network_header_len(skb), 8);
 
-       /* next fragment header */
-       head[0] &= ~LOWPAN_DISPATCH_FRAG1;
-       head[0] |= LOWPAN_DISPATCH_FRAGN;
+       skb_offset = skb_network_header_len(skb);
+       skb_unprocessed = skb->len - skb->mac_len - skb_offset;
 
-       frag_plen = round_down(IEEE802154_MTU - header_length -
-                              LOWPAN_FRAGN_HEAD_SIZE - IEEE802154_MFR_SIZE, 8);
+       rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
+                                 LOWPAN_FRAG1_HEAD_SIZE, 0,
+                                 frag_len + skb_network_header_len(skb));
+       if (rc) {
+               pr_debug("%s unable to send FRAG1 packet (tag: %d)",
+                        __func__, frag_tag);
+               goto err;
+       }
 
-       while (payload_length - offset > 0) {
-               int len = frag_plen;
+       frag_hdr[0] &= ~LOWPAN_DISPATCH_FRAG1;
+       frag_hdr[0] |= LOWPAN_DISPATCH_FRAGN;
+       frag_cap = round_down(payload_cap - LOWPAN_FRAGN_HEAD_SIZE, 8);
 
-               head[4] = dgram_offset >> 3;
+       while (skb_unprocessed >= frag_cap) {
+               dgram_offset += frag_len;
+               skb_offset += frag_len;
+               skb_unprocessed -= frag_len;
+               frag_len = min(frag_cap, skb_unprocessed);
 
-               if (payload_length - offset < len)
-                       len = payload_length - offset;
+               frag_hdr[4] = dgram_offset >> 3;
 
-               err = lowpan_fragment_xmit(skb, head, header_length, len,
-                                          offset, LOWPAN_DISPATCH_FRAGN);
-               if (err) {
+               rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
+                                         LOWPAN_FRAGN_HEAD_SIZE, skb_offset,
+                                         frag_len);
+               if (rc) {
                        pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n",
-                                __func__, tag, offset);
-                       goto exit;
+                                __func__, frag_tag, skb_offset);
+                       goto err;
                }
-
-               offset += len;
-               dgram_offset += len;
        }
 
-exit:
-       return err;
+       consume_skb(skb);
+       return NET_XMIT_SUCCESS;
+
+err:
+       kfree_skb(skb);
+       return rc;
 }
 
 static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-       int err = -1;
+       struct ieee802154_hdr wpan_hdr;
+       int max_single;
 
        pr_debug("package xmit\n");
 
-       skb->dev = lowpan_dev_info(dev)->real_dev;
-       if (skb->dev == NULL) {
-               pr_debug("ERROR: no real wpan device found\n");
-               goto error;
+       if (ieee802154_hdr_peek(skb, &wpan_hdr) < 0) {
+               kfree_skb(skb);
+               return NET_XMIT_DROP;
        }
 
-       /* Send directly if less than the MTU minus the 2 checksum bytes. */
-       if (skb->len <= IEEE802154_MTU - IEEE802154_MFR_SIZE) {
-               err = dev_queue_xmit(skb);
-               goto out;
-       }
+       max_single = ieee802154_max_payload(&wpan_hdr);
 
-       pr_debug("frame is too big, fragmentation is needed\n");
-       err = lowpan_skb_fragmentation(skb, dev);
-error:
-       dev_kfree_skb(skb);
-out:
-       if (err)
-               pr_debug("ERROR: xmit failed\n");
+       if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) {
+               skb->dev = lowpan_dev_info(dev)->real_dev;
+               return dev_queue_xmit(skb);
+       } else {
+               netdev_tx_t rc;
+
+               pr_debug("frame is too big, fragmentation is needed\n");
+               rc = lowpan_xmit_fragmented(skb, dev, &wpan_hdr);
 
-       return (err < 0) ? NET_XMIT_DROP : err;
+               return rc < 0 ? NET_XMIT_DROP : rc;
+       }
 }
 
 static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
index 786437bc0c08531785d3f5fa1deb5bff8efe9e62..4f0ed8780194502465f0d5b60383bf6794bfadf0 100644 (file)
@@ -21,6 +21,7 @@
  * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
  */
 
+#include <linux/capability.h>
 #include <linux/net.h>
 #include <linux/module.h>
 #include <linux/if_arp.h>
@@ -45,7 +46,12 @@ struct dgram_sock {
        struct ieee802154_addr dst_addr;
 
        unsigned int bound:1;
+       unsigned int connected:1;
        unsigned int want_ack:1;
+       unsigned int secen:1;
+       unsigned int secen_override:1;
+       unsigned int seclevel:3;
+       unsigned int seclevel_override:1;
 };
 
 static inline struct dgram_sock *dgram_sk(const struct sock *sk)
@@ -73,10 +79,7 @@ static int dgram_init(struct sock *sk)
 {
        struct dgram_sock *ro = dgram_sk(sk);
 
-       ro->dst_addr.mode = IEEE802154_ADDR_LONG;
-       ro->dst_addr.pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST);
        ro->want_ack = 1;
-       memset(&ro->dst_addr.extended_addr, 0xff, IEEE802154_ADDR_LEN);
        return 0;
 }
 
@@ -183,6 +186,7 @@ static int dgram_connect(struct sock *sk, struct sockaddr *uaddr,
        }
 
        ieee802154_addr_from_sa(&ro->dst_addr, &addr->addr);
+       ro->connected = 1;
 
 out:
        release_sock(sk);
@@ -194,10 +198,7 @@ static int dgram_disconnect(struct sock *sk, int flags)
        struct dgram_sock *ro = dgram_sk(sk);
 
        lock_sock(sk);
-
-       ro->dst_addr.mode = IEEE802154_ADDR_LONG;
-       memset(&ro->dst_addr.extended_addr, 0xff, IEEE802154_ADDR_LEN);
-
+       ro->connected = 0;
        release_sock(sk);
 
        return 0;
@@ -209,7 +210,9 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
        struct net_device *dev;
        unsigned int mtu;
        struct sk_buff *skb;
+       struct ieee802154_mac_cb *cb;
        struct dgram_sock *ro = dgram_sk(sk);
+       struct ieee802154_addr dst_addr;
        int hlen, tlen;
        int err;
 
@@ -218,6 +221,11 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
                return -EOPNOTSUPP;
        }
 
+       if (!ro->connected && !msg->msg_name)
+               return -EDESTADDRREQ;
+       else if (ro->connected && msg->msg_name)
+               return -EISCONN;
+
        if (!ro->bound)
                dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154);
        else
@@ -249,18 +257,28 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
 
        skb_reset_network_header(skb);
 
-       mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
-       if (ro->want_ack)
-               mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ;
+       cb = mac_cb_init(skb);
+       cb->type = IEEE802154_FC_TYPE_DATA;
+       cb->ackreq = ro->want_ack;
+
+       if (msg->msg_name) {
+               DECLARE_SOCKADDR(struct sockaddr_ieee802154*, daddr, msg->msg_name);
 
-       mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
-       err = dev_hard_header(skb, dev, ETH_P_IEEE802154, &ro->dst_addr,
-                       ro->bound ? &ro->src_addr : NULL, size);
+               ieee802154_addr_from_sa(&dst_addr, &daddr->addr);
+       } else {
+               dst_addr = ro->dst_addr;
+       }
+
+       cb->secen = ro->secen;
+       cb->secen_override = ro->secen_override;
+       cb->seclevel = ro->seclevel;
+       cb->seclevel_override = ro->seclevel_override;
+
+       err = dev_hard_header(skb, dev, ETH_P_IEEE802154, &dst_addr,
+                             ro->bound ? &ro->src_addr : NULL, size);
        if (err < 0)
                goto out_skb;
 
-       skb_reset_mac_header(skb);
-
        err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
        if (err < 0)
                goto out_skb;
@@ -419,6 +437,20 @@ static int dgram_getsockopt(struct sock *sk, int level, int optname,
        case WPAN_WANTACK:
                val = ro->want_ack;
                break;
+       case WPAN_SECURITY:
+               if (!ro->secen_override)
+                       val = WPAN_SECURITY_DEFAULT;
+               else if (ro->secen)
+                       val = WPAN_SECURITY_ON;
+               else
+                       val = WPAN_SECURITY_OFF;
+               break;
+       case WPAN_SECURITY_LEVEL:
+               if (!ro->seclevel_override)
+                       val = WPAN_SECURITY_LEVEL_DEFAULT;
+               else
+                       val = ro->seclevel;
+               break;
        default:
                return -ENOPROTOOPT;
        }
@@ -434,6 +466,7 @@ static int dgram_setsockopt(struct sock *sk, int level, int optname,
                    char __user *optval, unsigned int optlen)
 {
        struct dgram_sock *ro = dgram_sk(sk);
+       struct net *net = sock_net(sk);
        int val;
        int err = 0;
 
@@ -449,6 +482,47 @@ static int dgram_setsockopt(struct sock *sk, int level, int optname,
        case WPAN_WANTACK:
                ro->want_ack = !!val;
                break;
+       case WPAN_SECURITY:
+               if (!ns_capable(net->user_ns, CAP_NET_ADMIN) &&
+                   !ns_capable(net->user_ns, CAP_NET_RAW)) {
+                       err = -EPERM;
+                       break;
+               }
+
+               switch (val) {
+               case WPAN_SECURITY_DEFAULT:
+                       ro->secen_override = 0;
+                       break;
+               case WPAN_SECURITY_ON:
+                       ro->secen_override = 1;
+                       ro->secen = 1;
+                       break;
+               case WPAN_SECURITY_OFF:
+                       ro->secen_override = 1;
+                       ro->secen = 0;
+                       break;
+               default:
+                       err = -EINVAL;
+                       break;
+               }
+               break;
+       case WPAN_SECURITY_LEVEL:
+               if (!ns_capable(net->user_ns, CAP_NET_ADMIN) &&
+                   !ns_capable(net->user_ns, CAP_NET_RAW)) {
+                       err = -EPERM;
+                       break;
+               }
+
+               if (val < WPAN_SECURITY_LEVEL_DEFAULT ||
+                   val > IEEE802154_SCF_SECLEVEL_ENC_MIC128) {
+                       err = -EINVAL;
+               } else if (val == WPAN_SECURITY_LEVEL_DEFAULT) {
+                       ro->seclevel_override = 0;
+               } else {
+                       ro->seclevel_override = 1;
+                       ro->seclevel = val;
+               }
+               break;
        default:
                err = -ENOPROTOOPT;
                break;
index bed42a48408c6cc71cf4a47ba5bd897603cfaf7b..c09294e39ca60326d5b40c8431bf202ce5559225 100644 (file)
@@ -195,15 +195,16 @@ ieee802154_hdr_get_sechdr(const u8 *buf, struct ieee802154_sechdr *hdr)
        return pos;
 }
 
+static int ieee802154_sechdr_lengths[4] = {
+       [IEEE802154_SCF_KEY_IMPLICIT] = 5,
+       [IEEE802154_SCF_KEY_INDEX] = 6,
+       [IEEE802154_SCF_KEY_SHORT_INDEX] = 10,
+       [IEEE802154_SCF_KEY_HW_INDEX] = 14,
+};
+
 static int ieee802154_hdr_sechdr_len(u8 sc)
 {
-       switch (IEEE802154_SCF_KEY_ID_MODE(sc)) {
-       case IEEE802154_SCF_KEY_IMPLICIT: return 5;
-       case IEEE802154_SCF_KEY_INDEX: return 6;
-       case IEEE802154_SCF_KEY_SHORT_INDEX: return 10;
-       case IEEE802154_SCF_KEY_HW_INDEX: return 14;
-       default: return -EINVAL;
-       }
+       return ieee802154_sechdr_lengths[IEEE802154_SCF_KEY_ID_MODE(sc)];
 }
 
 static int ieee802154_hdr_minlen(const struct ieee802154_hdr *hdr)
@@ -285,3 +286,40 @@ ieee802154_hdr_peek_addrs(const struct sk_buff *skb, struct ieee802154_hdr *hdr)
        return pos;
 }
 EXPORT_SYMBOL_GPL(ieee802154_hdr_peek_addrs);
+
+int
+ieee802154_hdr_peek(const struct sk_buff *skb, struct ieee802154_hdr *hdr)
+{
+       const u8 *buf = skb_mac_header(skb);
+       int pos;
+
+       pos = ieee802154_hdr_peek_addrs(skb, hdr);
+       if (pos < 0)
+               return -EINVAL;
+
+       if (hdr->fc.security_enabled) {
+               u8 key_id_mode = IEEE802154_SCF_KEY_ID_MODE(*(buf + pos));
+               int want = pos + ieee802154_sechdr_lengths[key_id_mode];
+
+               if (buf + want > skb_tail_pointer(skb))
+                       return -EINVAL;
+
+               pos += ieee802154_hdr_get_sechdr(buf + pos, &hdr->sec);
+       }
+
+       return pos;
+}
+EXPORT_SYMBOL_GPL(ieee802154_hdr_peek);
+
+int ieee802154_max_payload(const struct ieee802154_hdr *hdr)
+{
+       int hlen = ieee802154_hdr_minlen(hdr);
+
+       if (hdr->fc.security_enabled) {
+               hlen += ieee802154_sechdr_lengths[hdr->sec.key_id_mode] - 1;
+               hlen += ieee802154_sechdr_authtag_len(&hdr->sec);
+       }
+
+       return IEEE802154_MTU - hlen - IEEE802154_MFR_SIZE;
+}
+EXPORT_SYMBOL_GPL(ieee802154_max_payload);
index 6693a5cf01ce5e5fc39fcd33a64547dc4bdb5748..8b83a231299e46a0668b3fe329803fa1a6154791 100644 (file)
@@ -68,4 +68,23 @@ int ieee802154_list_iface(struct sk_buff *skb, struct genl_info *info);
 int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb);
 int ieee802154_set_macparams(struct sk_buff *skb, struct genl_info *info);
 
+int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info);
+int ieee802154_llsec_setparams(struct sk_buff *skb, struct genl_info *info);
+int ieee802154_llsec_add_key(struct sk_buff *skb, struct genl_info *info);
+int ieee802154_llsec_del_key(struct sk_buff *skb, struct genl_info *info);
+int ieee802154_llsec_dump_keys(struct sk_buff *skb,
+                              struct netlink_callback *cb);
+int ieee802154_llsec_add_dev(struct sk_buff *skb, struct genl_info *info);
+int ieee802154_llsec_del_dev(struct sk_buff *skb, struct genl_info *info);
+int ieee802154_llsec_dump_devs(struct sk_buff *skb,
+                              struct netlink_callback *cb);
+int ieee802154_llsec_add_devkey(struct sk_buff *skb, struct genl_info *info);
+int ieee802154_llsec_del_devkey(struct sk_buff *skb, struct genl_info *info);
+int ieee802154_llsec_dump_devkeys(struct sk_buff *skb,
+                                 struct netlink_callback *cb);
+int ieee802154_llsec_add_seclevel(struct sk_buff *skb, struct genl_info *info);
+int ieee802154_llsec_del_seclevel(struct sk_buff *skb, struct genl_info *info);
+int ieee802154_llsec_dump_seclevels(struct sk_buff *skb,
+                                   struct netlink_callback *cb);
+
 #endif
index 04b20589d97ab91eeb203ba0527122184936dc06..26efcf4fd2ff72079a678ef3a4dbd0ae887848e1 100644 (file)
@@ -124,6 +124,26 @@ static const struct genl_ops ieee8021154_ops[] = {
        IEEE802154_DUMP(IEEE802154_LIST_IFACE, ieee802154_list_iface,
                        ieee802154_dump_iface),
        IEEE802154_OP(IEEE802154_SET_MACPARAMS, ieee802154_set_macparams),
+       IEEE802154_OP(IEEE802154_LLSEC_GETPARAMS, ieee802154_llsec_getparams),
+       IEEE802154_OP(IEEE802154_LLSEC_SETPARAMS, ieee802154_llsec_setparams),
+       IEEE802154_DUMP(IEEE802154_LLSEC_LIST_KEY, NULL,
+                       ieee802154_llsec_dump_keys),
+       IEEE802154_OP(IEEE802154_LLSEC_ADD_KEY, ieee802154_llsec_add_key),
+       IEEE802154_OP(IEEE802154_LLSEC_DEL_KEY, ieee802154_llsec_del_key),
+       IEEE802154_DUMP(IEEE802154_LLSEC_LIST_DEV, NULL,
+                       ieee802154_llsec_dump_devs),
+       IEEE802154_OP(IEEE802154_LLSEC_ADD_DEV, ieee802154_llsec_add_dev),
+       IEEE802154_OP(IEEE802154_LLSEC_DEL_DEV, ieee802154_llsec_del_dev),
+       IEEE802154_DUMP(IEEE802154_LLSEC_LIST_DEVKEY, NULL,
+                       ieee802154_llsec_dump_devkeys),
+       IEEE802154_OP(IEEE802154_LLSEC_ADD_DEVKEY, ieee802154_llsec_add_devkey),
+       IEEE802154_OP(IEEE802154_LLSEC_DEL_DEVKEY, ieee802154_llsec_del_devkey),
+       IEEE802154_DUMP(IEEE802154_LLSEC_LIST_SECLEVEL, NULL,
+                       ieee802154_llsec_dump_seclevels),
+       IEEE802154_OP(IEEE802154_LLSEC_ADD_SECLEVEL,
+                     ieee802154_llsec_add_seclevel),
+       IEEE802154_OP(IEEE802154_LLSEC_DEL_SECLEVEL,
+                     ieee802154_llsec_del_seclevel),
 };
 
 static const struct genl_multicast_group ieee802154_mcgrps[] = {
index 5d285498c0f691a906de5d9c82c64f867a857418..a3281b8bfd5bf1fa24bd05a351b476031776797f 100644 (file)
@@ -715,3 +715,812 @@ out:
        dev_put(dev);
        return rc;
 }
+
+
+
+static int
+ieee802154_llsec_parse_key_id(struct genl_info *info,
+                             struct ieee802154_llsec_key_id *desc)
+{
+       memset(desc, 0, sizeof(*desc));
+
+       if (!info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE])
+               return -EINVAL;
+
+       desc->mode = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE]);
+
+       if (desc->mode == IEEE802154_SCF_KEY_IMPLICIT) {
+               if (!info->attrs[IEEE802154_ATTR_PAN_ID] &&
+                   !(info->attrs[IEEE802154_ATTR_SHORT_ADDR] ||
+                     info->attrs[IEEE802154_ATTR_HW_ADDR]))
+                       return -EINVAL;
+
+               desc->device_addr.pan_id = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_PAN_ID]);
+
+               if (info->attrs[IEEE802154_ATTR_SHORT_ADDR]) {
+                       desc->device_addr.mode = IEEE802154_ADDR_SHORT;
+                       desc->device_addr.short_addr = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_SHORT_ADDR]);
+               } else {
+                       desc->device_addr.mode = IEEE802154_ADDR_LONG;
+                       desc->device_addr.extended_addr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
+               }
+       }
+
+       if (desc->mode != IEEE802154_SCF_KEY_IMPLICIT &&
+           !info->attrs[IEEE802154_ATTR_LLSEC_KEY_ID])
+               return -EINVAL;
+
+       if (desc->mode == IEEE802154_SCF_KEY_SHORT_INDEX &&
+           !info->attrs[IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT])
+               return -EINVAL;
+
+       if (desc->mode == IEEE802154_SCF_KEY_HW_INDEX &&
+           !info->attrs[IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED])
+               return -EINVAL;
+
+       if (desc->mode != IEEE802154_SCF_KEY_IMPLICIT)
+               desc->id = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_KEY_ID]);
+
+       switch (desc->mode) {
+       case IEEE802154_SCF_KEY_SHORT_INDEX:
+       {
+               u32 source = nla_get_u32(info->attrs[IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT]);
+               desc->short_source = cpu_to_le32(source);
+               break;
+       }
+       case IEEE802154_SCF_KEY_HW_INDEX:
+               desc->extended_source = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED]);
+               break;
+       }
+
+       return 0;
+}
+
+static int
+ieee802154_llsec_fill_key_id(struct sk_buff *msg,
+                            const struct ieee802154_llsec_key_id *desc)
+{
+       if (nla_put_u8(msg, IEEE802154_ATTR_LLSEC_KEY_MODE, desc->mode))
+               return -EMSGSIZE;
+
+       if (desc->mode == IEEE802154_SCF_KEY_IMPLICIT) {
+               if (nla_put_shortaddr(msg, IEEE802154_ATTR_PAN_ID,
+                                     desc->device_addr.pan_id))
+                       return -EMSGSIZE;
+
+               if (desc->device_addr.mode == IEEE802154_ADDR_SHORT &&
+                   nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR,
+                                     desc->device_addr.short_addr))
+                       return -EMSGSIZE;
+
+               if (desc->device_addr.mode == IEEE802154_ADDR_LONG &&
+                   nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR,
+                                  desc->device_addr.extended_addr))
+                       return -EMSGSIZE;
+       }
+
+       if (desc->mode != IEEE802154_SCF_KEY_IMPLICIT &&
+           nla_put_u8(msg, IEEE802154_ATTR_LLSEC_KEY_ID, desc->id))
+               return -EMSGSIZE;
+
+       if (desc->mode == IEEE802154_SCF_KEY_SHORT_INDEX &&
+           nla_put_u32(msg, IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT,
+                       le32_to_cpu(desc->short_source)))
+               return -EMSGSIZE;
+
+       if (desc->mode == IEEE802154_SCF_KEY_HW_INDEX &&
+           nla_put_hwaddr(msg, IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED,
+                          desc->extended_source))
+               return -EMSGSIZE;
+
+       return 0;
+}
+
+int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info)
+{
+       struct sk_buff *msg;
+       struct net_device *dev = NULL;
+       int rc = -ENOBUFS;
+       struct ieee802154_mlme_ops *ops;
+       void *hdr;
+       struct ieee802154_llsec_params params;
+
+       pr_debug("%s\n", __func__);
+
+       dev = ieee802154_nl_get_dev(info);
+       if (!dev)
+               return -ENODEV;
+
+       ops = ieee802154_mlme_ops(dev);
+       if (!ops->llsec) {
+               rc = -EOPNOTSUPP;
+               goto out_dev;
+       }
+
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!msg)
+               goto out_dev;
+
+       hdr = genlmsg_put(msg, 0, info->snd_seq, &nl802154_family, 0,
+               IEEE802154_LLSEC_GETPARAMS);
+       if (!hdr)
+               goto out_free;
+
+       rc = ops->llsec->get_params(dev, &params);
+       if (rc < 0)
+               goto out_free;
+
+       if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+           nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+           nla_put_u8(msg, IEEE802154_ATTR_LLSEC_ENABLED, params.enabled) ||
+           nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVEL, params.out_level) ||
+           nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
+                       be32_to_cpu(params.frame_counter)) ||
+           ieee802154_llsec_fill_key_id(msg, &params.out_key))
+               goto out_free;
+
+       dev_put(dev);
+
+       return ieee802154_nl_reply(msg, info);
+out_free:
+       nlmsg_free(msg);
+out_dev:
+       dev_put(dev);
+       return rc;
+}
+
+int ieee802154_llsec_setparams(struct sk_buff *skb, struct genl_info *info)
+{
+       struct net_device *dev = NULL;
+       int rc = -EINVAL;
+       struct ieee802154_mlme_ops *ops;
+       struct ieee802154_llsec_params params;
+       int changed = 0;
+
+       pr_debug("%s\n", __func__);
+
+       dev = ieee802154_nl_get_dev(info);
+       if (!dev)
+               return -ENODEV;
+
+       if (!info->attrs[IEEE802154_ATTR_LLSEC_ENABLED] &&
+           !info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE] &&
+           !info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL])
+               goto out;
+
+       ops = ieee802154_mlme_ops(dev);
+       if (!ops->llsec) {
+               rc = -EOPNOTSUPP;
+               goto out;
+       }
+
+       if (info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL] &&
+           nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL]) > 7)
+               goto out;
+
+       if (info->attrs[IEEE802154_ATTR_LLSEC_ENABLED]) {
+               params.enabled = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_ENABLED]);
+               changed |= IEEE802154_LLSEC_PARAM_ENABLED;
+       }
+
+       if (info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE]) {
+               if (ieee802154_llsec_parse_key_id(info, &params.out_key))
+                       goto out;
+
+               changed |= IEEE802154_LLSEC_PARAM_OUT_KEY;
+       }
+
+       if (info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL]) {
+               params.out_level = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL]);
+               changed |= IEEE802154_LLSEC_PARAM_OUT_LEVEL;
+       }
+
+       if (info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER]) {
+               u32 fc = nla_get_u32(info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER]);
+
+               params.frame_counter = cpu_to_be32(fc);
+               changed |= IEEE802154_LLSEC_PARAM_FRAME_COUNTER;
+       }
+
+       rc = ops->llsec->set_params(dev, &params, changed);
+
+       dev_put(dev);
+
+       return rc;
+out:
+       dev_put(dev);
+       return rc;
+}
+
+
+
+struct llsec_dump_data {
+       struct sk_buff *skb;
+       int s_idx, s_idx2;
+       int portid;
+       int nlmsg_seq;
+       struct net_device *dev;
+       struct ieee802154_mlme_ops *ops;
+       struct ieee802154_llsec_table *table;
+};
+
+static int
+ieee802154_llsec_dump_table(struct sk_buff *skb, struct netlink_callback *cb,
+                           int (*step)(struct llsec_dump_data*))
+{
+       struct net *net = sock_net(skb->sk);
+       struct net_device *dev;
+       struct llsec_dump_data data;
+       int idx = 0;
+       int first_dev = cb->args[0];
+       int rc;
+
+       for_each_netdev(net, dev) {
+               if (idx < first_dev || dev->type != ARPHRD_IEEE802154)
+                       goto skip;
+
+               data.ops = ieee802154_mlme_ops(dev);
+               if (!data.ops->llsec)
+                       goto skip;
+
+               data.skb = skb;
+               data.s_idx = cb->args[1];
+               data.s_idx2 = cb->args[2];
+               data.dev = dev;
+               data.portid = NETLINK_CB(cb->skb).portid;
+               data.nlmsg_seq = cb->nlh->nlmsg_seq;
+
+               data.ops->llsec->lock_table(dev);
+               data.ops->llsec->get_table(data.dev, &data.table);
+               rc = step(&data);
+               data.ops->llsec->unlock_table(dev);
+
+               if (rc < 0)
+                       break;
+
+skip:
+               idx++;
+       }
+       cb->args[0] = idx;
+
+       return skb->len;
+}
+
+static int
+ieee802154_nl_llsec_change(struct sk_buff *skb, struct genl_info *info,
+                          int (*fn)(struct net_device*, struct genl_info*))
+{
+       struct net_device *dev = NULL;
+       int rc = -EINVAL;
+
+       dev = ieee802154_nl_get_dev(info);
+       if (!dev)
+               return -ENODEV;
+
+       if (!ieee802154_mlme_ops(dev)->llsec)
+               rc = -EOPNOTSUPP;
+       else
+               rc = fn(dev, info);
+
+       dev_put(dev);
+       return rc;
+}
+
+
+
+static int
+ieee802154_llsec_parse_key(struct genl_info *info,
+                          struct ieee802154_llsec_key *key)
+{
+       u8 frames;
+       u32 commands[256 / 32];
+
+       memset(key, 0, sizeof(*key));
+
+       if (!info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES] ||
+           !info->attrs[IEEE802154_ATTR_LLSEC_KEY_BYTES])
+               return -EINVAL;
+
+       frames = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES]);
+       if ((frames & BIT(IEEE802154_FC_TYPE_MAC_CMD)) &&
+           !info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS])
+               return -EINVAL;
+
+       if (info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS]) {
+               nla_memcpy(commands,
+                          info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS],
+                          256 / 8);
+
+               if (commands[0] || commands[1] || commands[2] || commands[3] ||
+                   commands[4] || commands[5] || commands[6] ||
+                   commands[7] >= BIT(IEEE802154_CMD_GTS_REQ + 1))
+                       return -EINVAL;
+
+               key->cmd_frame_ids = commands[7];
+       }
+
+       key->frame_types = frames;
+
+       nla_memcpy(key->key, info->attrs[IEEE802154_ATTR_LLSEC_KEY_BYTES],
+                  IEEE802154_LLSEC_KEY_SIZE);
+
+       return 0;
+}
+
+static int llsec_add_key(struct net_device *dev, struct genl_info *info)
+{
+       struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
+       struct ieee802154_llsec_key key;
+       struct ieee802154_llsec_key_id id;
+
+       if (ieee802154_llsec_parse_key(info, &key) ||
+           ieee802154_llsec_parse_key_id(info, &id))
+               return -EINVAL;
+
+       return ops->llsec->add_key(dev, &id, &key);
+}
+
+int ieee802154_llsec_add_key(struct sk_buff *skb, struct genl_info *info)
+{
+       if ((info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) !=
+           (NLM_F_CREATE | NLM_F_EXCL))
+               return -EINVAL;
+
+       return ieee802154_nl_llsec_change(skb, info, llsec_add_key);
+}
+
+static int llsec_remove_key(struct net_device *dev, struct genl_info *info)
+{
+       struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
+       struct ieee802154_llsec_key_id id;
+
+       if (ieee802154_llsec_parse_key_id(info, &id))
+               return -EINVAL;
+
+       return ops->llsec->del_key(dev, &id);
+}
+
+int ieee802154_llsec_del_key(struct sk_buff *skb, struct genl_info *info)
+{
+       return ieee802154_nl_llsec_change(skb, info, llsec_remove_key);
+}
+
+static int
+ieee802154_nl_fill_key(struct sk_buff *msg, u32 portid, u32 seq,
+                      const struct ieee802154_llsec_key_entry *key,
+                      const struct net_device *dev)
+{
+       void *hdr;
+       u32 commands[256 / 32];
+
+       hdr = genlmsg_put(msg, 0, seq, &nl802154_family, NLM_F_MULTI,
+                         IEEE802154_LLSEC_LIST_KEY);
+       if (!hdr)
+               goto out;
+
+       if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+           nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+           ieee802154_llsec_fill_key_id(msg, &key->id) ||
+           nla_put_u8(msg, IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES,
+                      key->key->frame_types))
+               goto nla_put_failure;
+
+       if (key->key->frame_types & BIT(IEEE802154_FC_TYPE_MAC_CMD)) {
+               memset(commands, 0, sizeof(commands));
+               commands[7] = key->key->cmd_frame_ids;
+               if (nla_put(msg, IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS,
+                           sizeof(commands), commands))
+                       goto nla_put_failure;
+       }
+
+       if (nla_put(msg, IEEE802154_ATTR_LLSEC_KEY_BYTES,
+                   IEEE802154_LLSEC_KEY_SIZE, key->key->key))
+               goto nla_put_failure;
+
+       genlmsg_end(msg, hdr);
+       return 0;
+
+nla_put_failure:
+       genlmsg_cancel(msg, hdr);
+out:
+       return -EMSGSIZE;
+}
+
+static int llsec_iter_keys(struct llsec_dump_data *data)
+{
+       struct ieee802154_llsec_key_entry *pos;
+       int rc = 0, idx = 0;
+
+       list_for_each_entry(pos, &data->table->keys, list) {
+               if (idx++ < data->s_idx)
+                       continue;
+
+               if (ieee802154_nl_fill_key(data->skb, data->portid,
+                                          data->nlmsg_seq, pos, data->dev)) {
+                       rc = -EMSGSIZE;
+                       break;
+               }
+
+               data->s_idx++;
+       }
+
+       return rc;
+}
+
+int ieee802154_llsec_dump_keys(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       return ieee802154_llsec_dump_table(skb, cb, llsec_iter_keys);
+}
+
+
+
+static int
+llsec_parse_dev(struct genl_info *info,
+               struct ieee802154_llsec_device *dev)
+{
+       memset(dev, 0, sizeof(*dev));
+
+       if (!info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER] ||
+           !info->attrs[IEEE802154_ATTR_HW_ADDR] ||
+           !info->attrs[IEEE802154_ATTR_LLSEC_DEV_OVERRIDE] ||
+           !info->attrs[IEEE802154_ATTR_LLSEC_DEV_KEY_MODE] ||
+           (!!info->attrs[IEEE802154_ATTR_PAN_ID] !=
+            !!info->attrs[IEEE802154_ATTR_SHORT_ADDR]))
+               return -EINVAL;
+
+       if (info->attrs[IEEE802154_ATTR_PAN_ID]) {
+               dev->pan_id = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_PAN_ID]);
+               dev->short_addr = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_SHORT_ADDR]);
+       } else {
+               dev->short_addr = cpu_to_le16(IEEE802154_ADDR_UNDEF);
+       }
+
+       dev->hwaddr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
+       dev->frame_counter = nla_get_u32(info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER]);
+       dev->seclevel_exempt = !!nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_DEV_OVERRIDE]);
+       dev->key_mode = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_DEV_KEY_MODE]);
+
+       if (dev->key_mode >= __IEEE802154_LLSEC_DEVKEY_MAX)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int llsec_add_dev(struct net_device *dev, struct genl_info *info)
+{
+       struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
+       struct ieee802154_llsec_device desc;
+
+       if (llsec_parse_dev(info, &desc))
+               return -EINVAL;
+
+       return ops->llsec->add_dev(dev, &desc);
+}
+
+int ieee802154_llsec_add_dev(struct sk_buff *skb, struct genl_info *info)
+{
+       if ((info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) !=
+           (NLM_F_CREATE | NLM_F_EXCL))
+               return -EINVAL;
+
+       return ieee802154_nl_llsec_change(skb, info, llsec_add_dev);
+}
+
+static int llsec_del_dev(struct net_device *dev, struct genl_info *info)
+{
+       struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
+       __le64 devaddr;
+
+       if (!info->attrs[IEEE802154_ATTR_HW_ADDR])
+               return -EINVAL;
+
+       devaddr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
+
+       return ops->llsec->del_dev(dev, devaddr);
+}
+
+int ieee802154_llsec_del_dev(struct sk_buff *skb, struct genl_info *info)
+{
+       return ieee802154_nl_llsec_change(skb, info, llsec_del_dev);
+}
+
+static int
+ieee802154_nl_fill_dev(struct sk_buff *msg, u32 portid, u32 seq,
+                      const struct ieee802154_llsec_device *desc,
+                      const struct net_device *dev)
+{
+       void *hdr;
+
+       hdr = genlmsg_put(msg, 0, seq, &nl802154_family, NLM_F_MULTI,
+                         IEEE802154_LLSEC_LIST_DEV);
+       if (!hdr)
+               goto out;
+
+       if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+           nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+           nla_put_shortaddr(msg, IEEE802154_ATTR_PAN_ID, desc->pan_id) ||
+           nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR,
+                             desc->short_addr) ||
+           nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, desc->hwaddr) ||
+           nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
+                       desc->frame_counter) ||
+           nla_put_u8(msg, IEEE802154_ATTR_LLSEC_DEV_OVERRIDE,
+                      desc->seclevel_exempt) ||
+           nla_put_u8(msg, IEEE802154_ATTR_LLSEC_DEV_KEY_MODE, desc->key_mode))
+               goto nla_put_failure;
+
+       genlmsg_end(msg, hdr);
+       return 0;
+
+nla_put_failure:
+       genlmsg_cancel(msg, hdr);
+out:
+       return -EMSGSIZE;
+}
+
+static int llsec_iter_devs(struct llsec_dump_data *data)
+{
+       struct ieee802154_llsec_device *pos;
+       int rc = 0, idx = 0;
+
+       list_for_each_entry(pos, &data->table->devices, list) {
+               if (idx++ < data->s_idx)
+                       continue;
+
+               if (ieee802154_nl_fill_dev(data->skb, data->portid,
+                                          data->nlmsg_seq, pos, data->dev)) {
+                       rc = -EMSGSIZE;
+                       break;
+               }
+
+               data->s_idx++;
+       }
+
+       return rc;
+}
+
+int ieee802154_llsec_dump_devs(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       return ieee802154_llsec_dump_table(skb, cb, llsec_iter_devs);
+}
+
+
+
+static int llsec_add_devkey(struct net_device *dev, struct genl_info *info)
+{
+       struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
+       struct ieee802154_llsec_device_key key;
+       __le64 devaddr;
+
+       if (!info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER] ||
+           !info->attrs[IEEE802154_ATTR_HW_ADDR] ||
+           ieee802154_llsec_parse_key_id(info, &key.key_id))
+               return -EINVAL;
+
+       devaddr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
+       key.frame_counter = nla_get_u32(info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER]);
+
+       return ops->llsec->add_devkey(dev, devaddr, &key);
+}
+
+int ieee802154_llsec_add_devkey(struct sk_buff *skb, struct genl_info *info)
+{
+       if ((info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) !=
+           (NLM_F_CREATE | NLM_F_EXCL))
+               return -EINVAL;
+
+       return ieee802154_nl_llsec_change(skb, info, llsec_add_devkey);
+}
+
+static int llsec_del_devkey(struct net_device *dev, struct genl_info *info)
+{
+       struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
+       struct ieee802154_llsec_device_key key;
+       __le64 devaddr;
+
+       if (!info->attrs[IEEE802154_ATTR_HW_ADDR] ||
+           ieee802154_llsec_parse_key_id(info, &key.key_id))
+               return -EINVAL;
+
+       devaddr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
+
+       return ops->llsec->del_devkey(dev, devaddr, &key);
+}
+
+int ieee802154_llsec_del_devkey(struct sk_buff *skb, struct genl_info *info)
+{
+       return ieee802154_nl_llsec_change(skb, info, llsec_del_devkey);
+}
+
+static int
+ieee802154_nl_fill_devkey(struct sk_buff *msg, u32 portid, u32 seq,
+                         __le64 devaddr,
+                         const struct ieee802154_llsec_device_key *devkey,
+                         const struct net_device *dev)
+{
+       void *hdr;
+
+       hdr = genlmsg_put(msg, 0, seq, &nl802154_family, NLM_F_MULTI,
+                         IEEE802154_LLSEC_LIST_DEVKEY);
+       if (!hdr)
+               goto out;
+
+       if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+           nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+           nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, devaddr) ||
+           nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
+                       devkey->frame_counter) ||
+           ieee802154_llsec_fill_key_id(msg, &devkey->key_id))
+               goto nla_put_failure;
+
+       genlmsg_end(msg, hdr);
+       return 0;
+
+nla_put_failure:
+       genlmsg_cancel(msg, hdr);
+out:
+       return -EMSGSIZE;
+}
+
+static int llsec_iter_devkeys(struct llsec_dump_data *data)
+{
+       struct ieee802154_llsec_device *dpos;
+       struct ieee802154_llsec_device_key *kpos;
+       int rc = 0, idx = 0, idx2;
+
+       list_for_each_entry(dpos, &data->table->devices, list) {
+               if (idx++ < data->s_idx)
+                       continue;
+
+               idx2 = 0;
+
+               list_for_each_entry(kpos, &dpos->keys, list) {
+                       if (idx2++ < data->s_idx2)
+                               continue;
+
+                       if (ieee802154_nl_fill_devkey(data->skb, data->portid,
+                                                     data->nlmsg_seq,
+                                                     dpos->hwaddr, kpos,
+                                                     data->dev)) {
+                               return rc = -EMSGSIZE;
+                       }
+
+                       data->s_idx2++;
+               }
+
+               data->s_idx++;
+       }
+
+       return rc;
+}
+
+int ieee802154_llsec_dump_devkeys(struct sk_buff *skb,
+                                 struct netlink_callback *cb)
+{
+       return ieee802154_llsec_dump_table(skb, cb, llsec_iter_devkeys);
+}
+
+
+
+static int
+llsec_parse_seclevel(struct genl_info *info,
+                    struct ieee802154_llsec_seclevel *sl)
+{
+       memset(sl, 0, sizeof(*sl));
+
+       if (!info->attrs[IEEE802154_ATTR_LLSEC_FRAME_TYPE] ||
+           !info->attrs[IEEE802154_ATTR_LLSEC_SECLEVELS] ||
+           !info->attrs[IEEE802154_ATTR_LLSEC_DEV_OVERRIDE])
+               return -EINVAL;
+
+       sl->frame_type = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_FRAME_TYPE]);
+       if (sl->frame_type == IEEE802154_FC_TYPE_MAC_CMD) {
+               if (!info->attrs[IEEE802154_ATTR_LLSEC_CMD_FRAME_ID])
+                       return -EINVAL;
+
+               sl->cmd_frame_id = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_CMD_FRAME_ID]);
+       }
+
+       sl->sec_levels = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_SECLEVELS]);
+       sl->device_override = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_DEV_OVERRIDE]);
+
+       return 0;
+}
+
+static int llsec_add_seclevel(struct net_device *dev, struct genl_info *info)
+{
+       struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
+       struct ieee802154_llsec_seclevel sl;
+
+       if (llsec_parse_seclevel(info, &sl))
+               return -EINVAL;
+
+       return ops->llsec->add_seclevel(dev, &sl);
+}
+
+int ieee802154_llsec_add_seclevel(struct sk_buff *skb, struct genl_info *info)
+{
+       if ((info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) !=
+           (NLM_F_CREATE | NLM_F_EXCL))
+               return -EINVAL;
+
+       return ieee802154_nl_llsec_change(skb, info, llsec_add_seclevel);
+}
+
+static int llsec_del_seclevel(struct net_device *dev, struct genl_info *info)
+{
+       struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
+       struct ieee802154_llsec_seclevel sl;
+
+       if (llsec_parse_seclevel(info, &sl))
+               return -EINVAL;
+
+       return ops->llsec->del_seclevel(dev, &sl);
+}
+
+int ieee802154_llsec_del_seclevel(struct sk_buff *skb, struct genl_info *info)
+{
+       return ieee802154_nl_llsec_change(skb, info, llsec_del_seclevel);
+}
+
+static int
+ieee802154_nl_fill_seclevel(struct sk_buff *msg, u32 portid, u32 seq,
+                           const struct ieee802154_llsec_seclevel *sl,
+                           const struct net_device *dev)
+{
+       void *hdr;
+
+       hdr = genlmsg_put(msg, 0, seq, &nl802154_family, NLM_F_MULTI,
+                         IEEE802154_LLSEC_LIST_SECLEVEL);
+       if (!hdr)
+               goto out;
+
+       if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+           nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+           nla_put_u8(msg, IEEE802154_ATTR_LLSEC_FRAME_TYPE, sl->frame_type) ||
+           nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVELS, sl->sec_levels) ||
+           nla_put_u8(msg, IEEE802154_ATTR_LLSEC_DEV_OVERRIDE,
+                      sl->device_override))
+               goto nla_put_failure;
+
+       if (sl->frame_type == IEEE802154_FC_TYPE_MAC_CMD &&
+           nla_put_u8(msg, IEEE802154_ATTR_LLSEC_CMD_FRAME_ID,
+                      sl->cmd_frame_id))
+               goto nla_put_failure;
+
+       genlmsg_end(msg, hdr);
+       return 0;
+
+nla_put_failure:
+       genlmsg_cancel(msg, hdr);
+out:
+       return -EMSGSIZE;
+}
+
+static int llsec_iter_seclevels(struct llsec_dump_data *data)
+{
+       struct ieee802154_llsec_seclevel *pos;
+       int rc = 0, idx = 0;
+
+       list_for_each_entry(pos, &data->table->security_levels, list) {
+               if (idx++ < data->s_idx)
+                       continue;
+
+               if (ieee802154_nl_fill_seclevel(data->skb, data->portid,
+                                               data->nlmsg_seq, pos,
+                                               data->dev)) {
+                       rc = -EMSGSIZE;
+                       break;
+               }
+
+               data->s_idx++;
+       }
+
+       return rc;
+}
+
+int ieee802154_llsec_dump_seclevels(struct sk_buff *skb,
+                                   struct netlink_callback *cb)
+{
+       return ieee802154_llsec_dump_table(skb, cb, llsec_iter_seclevels);
+}
index fd7be5e45cefb99d37df9d173c9bf7c0ab1d7dc3..3a703ab88348fc38481ec583ff5d0885ae8c9655 100644 (file)
@@ -62,5 +62,21 @@ const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = {
        [IEEE802154_ATTR_CSMA_MAX_BE] = { .type = NLA_U8, },
 
        [IEEE802154_ATTR_FRAME_RETRIES] = { .type = NLA_S8, },
+
+       [IEEE802154_ATTR_LLSEC_ENABLED] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_LLSEC_SECLEVEL] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_LLSEC_KEY_MODE] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT] = { .type = NLA_U32, },
+       [IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED] = { .type = NLA_HW_ADDR, },
+       [IEEE802154_ATTR_LLSEC_KEY_ID] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_LLSEC_FRAME_COUNTER] = { .type = NLA_U32 },
+       [IEEE802154_ATTR_LLSEC_KEY_BYTES] = { .len = 16, },
+       [IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS] = { .len = 258 / 8 },
+       [IEEE802154_ATTR_LLSEC_FRAME_TYPE] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_LLSEC_CMD_FRAME_ID] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_LLSEC_SECLEVELS] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_LLSEC_DEV_OVERRIDE] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_LLSEC_DEV_KEY_MODE] = { .type = NLA_U8, },
 };
 
index 8c54870db792cab059bff464d29776510ec3e5ec..0e9bb08a91e424ca1f5c0351825aaa32d0dc3734 100644 (file)
@@ -254,7 +254,6 @@ static int inet_create(struct net *net, struct socket *sock, int protocol,
        struct inet_sock *inet;
        struct proto *answer_prot;
        unsigned char answer_flags;
-       char answer_no_check;
        int try_loading_module = 0;
        int err;
 
@@ -312,7 +311,6 @@ lookup_protocol:
 
        sock->ops = answer->ops;
        answer_prot = answer->prot;
-       answer_no_check = answer->no_check;
        answer_flags = answer->flags;
        rcu_read_unlock();
 
@@ -324,7 +322,6 @@ lookup_protocol:
                goto out;
 
        err = 0;
-       sk->sk_no_check = answer_no_check;
        if (INET_PROTOSW_REUSE & answer_flags)
                sk->sk_reuse = SK_CAN_REUSE;
 
@@ -1002,7 +999,6 @@ static struct inet_protosw inetsw_array[] =
                .protocol =   IPPROTO_TCP,
                .prot =       &tcp_prot,
                .ops =        &inet_stream_ops,
-               .no_check =   0,
                .flags =      INET_PROTOSW_PERMANENT |
                              INET_PROTOSW_ICSK,
        },
@@ -1012,7 +1008,6 @@ static struct inet_protosw inetsw_array[] =
                .protocol =   IPPROTO_UDP,
                .prot =       &udp_prot,
                .ops =        &inet_dgram_ops,
-               .no_check =   UDP_CSUM_DEFAULT,
                .flags =      INET_PROTOSW_PERMANENT,
        },
 
@@ -1021,7 +1016,6 @@ static struct inet_protosw inetsw_array[] =
                .protocol =   IPPROTO_ICMP,
                .prot =       &ping_prot,
                .ops =        &inet_dgram_ops,
-               .no_check =   UDP_CSUM_DEFAULT,
                .flags =      INET_PROTOSW_REUSE,
        },
 
@@ -1030,7 +1024,6 @@ static struct inet_protosw inetsw_array[] =
               .protocol =   IPPROTO_IP,        /* wild card */
               .prot =       &raw_prot,
               .ops =        &inet_sockraw_ops,
-              .no_check =   UDP_CSUM_DEFAULT,
               .flags =      INET_PROTOSW_REUSE,
        }
 };
@@ -1476,22 +1469,20 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
 }
 EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
 
-unsigned long snmp_fold_field(void __percpu *mib[], int offt)
+unsigned long snmp_fold_field(void __percpu *mib, int offt)
 {
        unsigned long res = 0;
-       int i, j;
+       int i;
 
-       for_each_possible_cpu(i) {
-               for (j = 0; j < SNMP_ARRAY_SZ; j++)
-                       res += *(((unsigned long *) per_cpu_ptr(mib[j], i)) + offt);
-       }
+       for_each_possible_cpu(i)
+               res += *(((unsigned long *) per_cpu_ptr(mib, i)) + offt);
        return res;
 }
 EXPORT_SYMBOL_GPL(snmp_fold_field);
 
 #if BITS_PER_LONG==32
 
-u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset)
+u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_offset)
 {
        u64 res = 0;
        int cpu;
@@ -1502,7 +1493,7 @@ u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset)
                u64 v;
                unsigned int start;
 
-               bhptr = per_cpu_ptr(mib[0], cpu);
+               bhptr = per_cpu_ptr(mib, cpu);
                syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
                do {
                        start = u64_stats_fetch_begin_irq(syncp);
@@ -1516,25 +1507,6 @@ u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset)
 EXPORT_SYMBOL_GPL(snmp_fold_field64);
 #endif
 
-int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align)
-{
-       BUG_ON(ptr == NULL);
-       ptr[0] = __alloc_percpu(mibsize, align);
-       if (!ptr[0])
-               return -ENOMEM;
-
-#if SNMP_ARRAY_SZ == 2
-       ptr[1] = __alloc_percpu(mibsize, align);
-       if (!ptr[1]) {
-               free_percpu(ptr[0]);
-               ptr[0] = NULL;
-               return -ENOMEM;
-       }
-#endif
-       return 0;
-}
-EXPORT_SYMBOL_GPL(snmp_mib_init);
-
 #ifdef CONFIG_IP_MULTICAST
 static const struct net_protocol igmp_protocol = {
        .handler =      igmp_rcv,
@@ -1570,40 +1542,30 @@ static __net_init int ipv4_mib_init_net(struct net *net)
 {
        int i;
 
-       if (snmp_mib_init((void __percpu **)net->mib.tcp_statistics,
-                         sizeof(struct tcp_mib),
-                         __alignof__(struct tcp_mib)) < 0)
+       net->mib.tcp_statistics = alloc_percpu(struct tcp_mib);
+       if (!net->mib.tcp_statistics)
                goto err_tcp_mib;
-       if (snmp_mib_init((void __percpu **)net->mib.ip_statistics,
-                         sizeof(struct ipstats_mib),
-                         __alignof__(struct ipstats_mib)) < 0)
+       net->mib.ip_statistics = alloc_percpu(struct ipstats_mib);
+       if (!net->mib.ip_statistics)
                goto err_ip_mib;
 
        for_each_possible_cpu(i) {
                struct ipstats_mib *af_inet_stats;
-               af_inet_stats = per_cpu_ptr(net->mib.ip_statistics[0], i);
-               u64_stats_init(&af_inet_stats->syncp);
-#if SNMP_ARRAY_SZ == 2
-               af_inet_stats = per_cpu_ptr(net->mib.ip_statistics[1], i);
+               af_inet_stats = per_cpu_ptr(net->mib.ip_statistics, i);
                u64_stats_init(&af_inet_stats->syncp);
-#endif
        }
 
-       if (snmp_mib_init((void __percpu **)net->mib.net_statistics,
-                         sizeof(struct linux_mib),
-                         __alignof__(struct linux_mib)) < 0)
+       net->mib.net_statistics = alloc_percpu(struct linux_mib);
+       if (!net->mib.net_statistics)
                goto err_net_mib;
-       if (snmp_mib_init((void __percpu **)net->mib.udp_statistics,
-                         sizeof(struct udp_mib),
-                         __alignof__(struct udp_mib)) < 0)
+       net->mib.udp_statistics = alloc_percpu(struct udp_mib);
+       if (!net->mib.udp_statistics)
                goto err_udp_mib;
-       if (snmp_mib_init((void __percpu **)net->mib.udplite_statistics,
-                         sizeof(struct udp_mib),
-                         __alignof__(struct udp_mib)) < 0)
+       net->mib.udplite_statistics = alloc_percpu(struct udp_mib);
+       if (!net->mib.udplite_statistics)
                goto err_udplite_mib;
-       if (snmp_mib_init((void __percpu **)net->mib.icmp_statistics,
-                         sizeof(struct icmp_mib),
-                         __alignof__(struct icmp_mib)) < 0)
+       net->mib.icmp_statistics = alloc_percpu(struct icmp_mib);
+       if (!net->mib.icmp_statistics)
                goto err_icmp_mib;
        net->mib.icmpmsg_statistics = kzalloc(sizeof(struct icmpmsg_mib),
                                              GFP_KERNEL);
@@ -1614,17 +1576,17 @@ static __net_init int ipv4_mib_init_net(struct net *net)
        return 0;
 
 err_icmpmsg_mib:
-       snmp_mib_free((void __percpu **)net->mib.icmp_statistics);
+       free_percpu(net->mib.icmp_statistics);
 err_icmp_mib:
-       snmp_mib_free((void __percpu **)net->mib.udplite_statistics);
+       free_percpu(net->mib.udplite_statistics);
 err_udplite_mib:
-       snmp_mib_free((void __percpu **)net->mib.udp_statistics);
+       free_percpu(net->mib.udp_statistics);
 err_udp_mib:
-       snmp_mib_free((void __percpu **)net->mib.net_statistics);
+       free_percpu(net->mib.net_statistics);
 err_net_mib:
-       snmp_mib_free((void __percpu **)net->mib.ip_statistics);
+       free_percpu(net->mib.ip_statistics);
 err_ip_mib:
-       snmp_mib_free((void __percpu **)net->mib.tcp_statistics);
+       free_percpu(net->mib.tcp_statistics);
 err_tcp_mib:
        return -ENOMEM;
 }
@@ -1632,12 +1594,12 @@ err_tcp_mib:
 static __net_exit void ipv4_mib_exit_net(struct net *net)
 {
        kfree(net->mib.icmpmsg_statistics);
-       snmp_mib_free((void __percpu **)net->mib.icmp_statistics);
-       snmp_mib_free((void __percpu **)net->mib.udplite_statistics);
-       snmp_mib_free((void __percpu **)net->mib.udp_statistics);
-       snmp_mib_free((void __percpu **)net->mib.net_statistics);
-       snmp_mib_free((void __percpu **)net->mib.ip_statistics);
-       snmp_mib_free((void __percpu **)net->mib.tcp_statistics);
+       free_percpu(net->mib.icmp_statistics);
+       free_percpu(net->mib.udplite_statistics);
+       free_percpu(net->mib.udp_statistics);
+       free_percpu(net->mib.net_statistics);
+       free_percpu(net->mib.ip_statistics);
+       free_percpu(net->mib.tcp_statistics);
 }
 
 static __net_initdata struct pernet_operations ipv4_mib_ops = {
@@ -1650,6 +1612,39 @@ static int __init init_ipv4_mibs(void)
        return register_pernet_subsys(&ipv4_mib_ops);
 }
 
+static __net_init int inet_init_net(struct net *net)
+{
+       /*
+        * Set defaults for local port range
+        */
+       seqlock_init(&net->ipv4.ip_local_ports.lock);
+       net->ipv4.ip_local_ports.range[0] =  32768;
+       net->ipv4.ip_local_ports.range[1] =  61000;
+
+       seqlock_init(&net->ipv4.ping_group_range.lock);
+       /*
+        * Sane defaults - nobody may create ping sockets.
+        * Boot scripts should set this to distro-specific group.
+        */
+       net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1);
+       net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0);
+       return 0;
+}
+
+static __net_exit void inet_exit_net(struct net *net)
+{
+}
+
+static __net_initdata struct pernet_operations af_inet_ops = {
+       .init = inet_init_net,
+       .exit = inet_exit_net,
+};
+
+static int __init init_inet_pernet_ops(void)
+{
+       return register_pernet_subsys(&af_inet_ops);
+}
+
 static int ipv4_proc_init(void);
 
 /*
@@ -1703,13 +1698,9 @@ static int __init inet_init(void)
 
        BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
 
-       sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
-       if (!sysctl_local_reserved_ports)
-               goto out;
-
        rc = proto_register(&tcp_prot, 1);
        if (rc)
-               goto out_free_reserved_ports;
+               goto out;
 
        rc = proto_register(&udp_prot, 1);
        if (rc)
@@ -1794,6 +1785,9 @@ static int __init inet_init(void)
        if (ip_mr_init())
                pr_crit("%s: Cannot init ipv4 mroute\n", __func__);
 #endif
+
+       if (init_inet_pernet_ops())
+               pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__);
        /*
         *      Initialise per-cpu ipv4 mibs
         */
@@ -1816,8 +1810,6 @@ out_unregister_udp_proto:
        proto_unregister(&udp_prot);
 out_unregister_tcp_proto:
        proto_unregister(&tcp_prot);
-out_free_reserved_ports:
-       kfree(sysctl_local_reserved_ports);
        goto out;
 }
 
index bdbf68bb2e2d194fcdf94553bd41a4ac9f184d7c..e9449376b58e4293b7735362912e1ea1191a8815 100644 (file)
@@ -106,7 +106,6 @@ static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
 #define IN4_ADDR_HSIZE         (1U << IN4_ADDR_HSIZE_SHIFT)
 
 static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
-static DEFINE_SPINLOCK(inet_addr_hash_lock);
 
 static u32 inet_addr_hash(struct net *net, __be32 addr)
 {
@@ -119,16 +118,14 @@ static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
 {
        u32 hash = inet_addr_hash(net, ifa->ifa_local);
 
-       spin_lock(&inet_addr_hash_lock);
+       ASSERT_RTNL();
        hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
-       spin_unlock(&inet_addr_hash_lock);
 }
 
 static void inet_hash_remove(struct in_ifaddr *ifa)
 {
-       spin_lock(&inet_addr_hash_lock);
+       ASSERT_RTNL();
        hlist_del_init_rcu(&ifa->hash);
-       spin_unlock(&inet_addr_hash_lock);
 }
 
 /**
@@ -830,7 +827,7 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
        ifa_existing = find_matching_ifa(ifa);
        if (!ifa_existing) {
                /* It would be best to check for !NLM_F_CREATE here but
-                * userspace alreay relies on not having to provide this.
+                * userspace already relies on not having to provide this.
                 */
                set_ifa_lifetime(ifa, valid_lft, prefered_lft);
                return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid);
index 8a043f03c88ecbb418b5466953abefd54c50b1d0..b10cd43a4722730205272d7822d699bc49ea71d3 100644 (file)
@@ -821,13 +821,13 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
        fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
        if (fi == NULL)
                goto failure;
+       fib_info_cnt++;
        if (cfg->fc_mx) {
                fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
                if (!fi->fib_metrics)
                        goto failure;
        } else
                fi->fib_metrics = (u32 *) dst_default_metrics;
-       fib_info_cnt++;
 
        fi->fib_net = hold_net(net);
        fi->fib_protocol = cfg->fc_protocol;
index 250be7421ab36c50ce00a25dcd3c659ca1c97f18..fbfd829f4049a36d1d3d590ec5d913785740fb24 100644 (file)
@@ -93,28 +93,6 @@ void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
 }
 EXPORT_SYMBOL_GPL(gre_build_header);
 
-static __sum16 check_checksum(struct sk_buff *skb)
-{
-       __sum16 csum = 0;
-
-       switch (skb->ip_summed) {
-       case CHECKSUM_COMPLETE:
-               csum = csum_fold(skb->csum);
-
-               if (!csum)
-                       break;
-               /* Fall through. */
-
-       case CHECKSUM_NONE:
-               skb->csum = 0;
-               csum = __skb_checksum_complete(skb);
-               skb->ip_summed = CHECKSUM_COMPLETE;
-               break;
-       }
-
-       return csum;
-}
-
 static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
                            bool *csum_err)
 {
@@ -141,7 +119,7 @@ static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
 
        options = (__be32 *)(greh + 1);
        if (greh->flags & GRE_CSUM) {
-               if (check_checksum(skb)) {
+               if (skb_checksum_simple_validate(skb)) {
                        *csum_err = true;
                        return -EINVAL;
                }
index 0134663fdbce86f6da39d8f6c9d27ce6c404687c..79c3d947a48128a8a58b58776e99f3a6602d868c 100644 (file)
@@ -337,6 +337,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
        struct sock *sk;
        struct inet_sock *inet;
        __be32 daddr, saddr;
+       u32 mark = IP4_REPLY_MARK(net, skb->mark);
 
        if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb))
                return;
@@ -349,6 +350,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
        icmp_param->data.icmph.checksum = 0;
 
        inet->tos = ip_hdr(skb)->tos;
+       sk->sk_mark = mark;
        daddr = ipc.addr = ip_hdr(skb)->saddr;
        saddr = fib_compute_spec_dst(skb);
        ipc.opt = NULL;
@@ -364,6 +366,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
        memset(&fl4, 0, sizeof(fl4));
        fl4.daddr = daddr;
        fl4.saddr = saddr;
+       fl4.flowi4_mark = mark;
        fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
        fl4.flowi4_proto = IPPROTO_ICMP;
        security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
@@ -382,7 +385,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
                                        struct flowi4 *fl4,
                                        struct sk_buff *skb_in,
                                        const struct iphdr *iph,
-                                       __be32 saddr, u8 tos,
+                                       __be32 saddr, u8 tos, u32 mark,
                                        int type, int code,
                                        struct icmp_bxm *param)
 {
@@ -394,6 +397,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
        fl4->daddr = (param->replyopts.opt.opt.srr ?
                      param->replyopts.opt.opt.faddr : iph->saddr);
        fl4->saddr = saddr;
+       fl4->flowi4_mark = mark;
        fl4->flowi4_tos = RT_TOS(tos);
        fl4->flowi4_proto = IPPROTO_ICMP;
        fl4->fl4_icmp_type = type;
@@ -491,6 +495,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
        struct flowi4 fl4;
        __be32 saddr;
        u8  tos;
+       u32 mark;
        struct net *net;
        struct sock *sk;
 
@@ -592,6 +597,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
        tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) |
                                           IPTOS_PREC_INTERNETCONTROL) :
                                          iph->tos;
+       mark = IP4_REPLY_MARK(net, skb_in->mark);
 
        if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb_in))
                goto out_unlock;
@@ -608,13 +614,14 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
        icmp_param->skb   = skb_in;
        icmp_param->offset = skb_network_offset(skb_in);
        inet_sk(sk)->tos = tos;
+       sk->sk_mark = mark;
        ipc.addr = iph->saddr;
        ipc.opt = &icmp_param->replyopts.opt;
        ipc.tx_flags = 0;
        ipc.ttl = 0;
        ipc.tos = -1;
 
-       rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos,
+       rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark,
                               type, code, icmp_param);
        if (IS_ERR(rt))
                goto out_unlock;
@@ -908,16 +915,8 @@ int icmp_rcv(struct sk_buff *skb)
 
        ICMP_INC_STATS_BH(net, ICMP_MIB_INMSGS);
 
-       switch (skb->ip_summed) {
-       case CHECKSUM_COMPLETE:
-               if (!csum_fold(skb->csum))
-                       break;
-               /* fall through */
-       case CHECKSUM_NONE:
-               skb->csum = 0;
-               if (__skb_checksum_complete(skb))
-                       goto csum_error;
-       }
+       if (skb_checksum_simple_validate(skb))
+               goto csum_error;
 
        if (!pskb_pull(skb, sizeof(*icmph)))
                goto error;
index 97e4d1655d26bb65121c1a8954af2d7565c288a6..17d34e3c2ac34f326cf68d74e4c7193f6d86d8e6 100644 (file)
@@ -988,16 +988,8 @@ int igmp_rcv(struct sk_buff *skb)
        if (!pskb_may_pull(skb, sizeof(struct igmphdr)))
                goto drop;
 
-       switch (skb->ip_summed) {
-       case CHECKSUM_COMPLETE:
-               if (!csum_fold(skb->csum))
-                       break;
-               /* fall through */
-       case CHECKSUM_NONE:
-               skb->csum = 0;
-               if (__skb_checksum_complete(skb))
-                       goto drop;
-       }
+       if (skb_checksum_simple_validate(skb))
+               goto drop;
 
        ih = igmp_hdr(skb);
        switch (ih->type) {
index 0d1e2cb877ec43692c5a7b4fe57e16cf921a8c97..14d02ea905b6bea37240f88054f0cd42db73c4c2 100644 (file)
@@ -29,19 +29,16 @@ const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
 EXPORT_SYMBOL(inet_csk_timer_bug_msg);
 #endif
 
-unsigned long *sysctl_local_reserved_ports;
-EXPORT_SYMBOL(sysctl_local_reserved_ports);
-
 void inet_get_local_port_range(struct net *net, int *low, int *high)
 {
        unsigned int seq;
 
        do {
-               seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
+               seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
 
-               *low = net->ipv4.sysctl_local_ports.range[0];
-               *high = net->ipv4.sysctl_local_ports.range[1];
-       } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
+               *low = net->ipv4.ip_local_ports.range[0];
+               *high = net->ipv4.ip_local_ports.range[1];
+       } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
 }
 EXPORT_SYMBOL(inet_get_local_port_range);
 
@@ -113,7 +110,7 @@ again:
 
                smallest_size = -1;
                do {
-                       if (inet_is_reserved_local_port(rover))
+                       if (inet_is_local_reserved_port(net, rover))
                                goto next_nolock;
                        head = &hashinfo->bhash[inet_bhashfn(net, rover,
                                        hashinfo->bhash_size)];
@@ -408,7 +405,7 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
        struct net *net = sock_net(sk);
        int flags = inet_sk_flowi_flags(sk);
 
-       flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
+       flowi4_init_output(fl4, sk->sk_bound_dev_if, ireq->ir_mark,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
                           sk->sk_protocol,
                           flags,
@@ -445,7 +442,7 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
 
        rcu_read_lock();
        opt = rcu_dereference(newinet->inet_opt);
-       flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
+       flowi4_init_output(fl4, sk->sk_bound_dev_if, inet_rsk(req)->ir_mark,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
                           sk->sk_protocol, inet_sk_flowi_flags(sk),
                           (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
@@ -680,6 +677,8 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
                inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
                newsk->sk_write_space = sk_stream_write_space;
 
+               newsk->sk_mark = inet_rsk(req)->ir_mark;
+
                newicsk->icsk_retransmits = 0;
                newicsk->icsk_backoff     = 0;
                newicsk->icsk_probes_out  = 0;
index 8b9cf279450d6cf0c24e64a20fb0d05b9fb89a82..43116e8c8e1323cdd8d902c5b88e42187c8df991 100644 (file)
@@ -274,7 +274,7 @@ struct sock *__inet_lookup_established(struct net *net,
                                  const __be32 daddr, const u16 hnum,
                                  const int dif)
 {
-       INET_ADDR_COOKIE(acookie, saddr, daddr)
+       INET_ADDR_COOKIE(acookie, saddr, daddr);
        const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
        struct sock *sk;
        const struct hlist_nulls_node *node;
@@ -327,7 +327,7 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
        __be32 daddr = inet->inet_rcv_saddr;
        __be32 saddr = inet->inet_daddr;
        int dif = sk->sk_bound_dev_if;
-       INET_ADDR_COOKIE(acookie, saddr, daddr)
+       INET_ADDR_COOKIE(acookie, saddr, daddr);
        const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
        struct net *net = sock_net(sk);
        unsigned int hash = inet_ehashfn(net, daddr, lport,
@@ -500,7 +500,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
                local_bh_disable();
                for (i = 1; i <= remaining; i++) {
                        port = low + (i + offset) % remaining;
-                       if (inet_is_reserved_local_port(port))
+                       if (inet_is_local_reserved_port(net, port))
                                continue;
                        head = &hinfo->bhash[inet_bhashfn(net, port,
                                        hinfo->bhash_size)];
index 48f4244651125fb4ef7bcfda155137f287b07c77..c98cf141f4ed66aa57595423baf83b6fa92e19da 100644 (file)
@@ -120,7 +120,7 @@ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ;  /* usual time to live: 10 min
 static void inetpeer_gc_worker(struct work_struct *work)
 {
        struct inet_peer *p, *n, *c;
-       LIST_HEAD(list);
+       struct list_head list;
 
        spin_lock_bh(&gc_lock);
        list_replace_init(&gc_list, &list);
index be8abe73bb9f464a2e68679255acde3b708ce84b..3a83ce5efa80e3fc2c062ec08465840018159b14 100644 (file)
 static bool ip_may_fragment(const struct sk_buff *skb)
 {
        return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
-              !skb->local_df;
+               skb->ignore_df;
 }
 
 static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
 {
-       if (skb->len <= mtu || skb->local_df)
+       if (skb->len <= mtu)
                return false;
 
        if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
@@ -56,53 +56,6 @@ static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
        return true;
 }
 
-static bool ip_gso_exceeds_dst_mtu(const struct sk_buff *skb)
-{
-       unsigned int mtu;
-
-       if (skb->local_df || !skb_is_gso(skb))
-               return false;
-
-       mtu = ip_dst_mtu_maybe_forward(skb_dst(skb), true);
-
-       /* if seglen > mtu, do software segmentation for IP fragmentation on
-        * output.  DF bit cannot be set since ip_forward would have sent
-        * icmp error.
-        */
-       return skb_gso_network_seglen(skb) > mtu;
-}
-
-/* called if GSO skb needs to be fragmented on forward */
-static int ip_forward_finish_gso(struct sk_buff *skb)
-{
-       struct dst_entry *dst = skb_dst(skb);
-       netdev_features_t features;
-       struct sk_buff *segs;
-       int ret = 0;
-
-       features = netif_skb_dev_features(skb, dst->dev);
-       segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
-       if (IS_ERR(segs)) {
-               kfree_skb(skb);
-               return -ENOMEM;
-       }
-
-       consume_skb(skb);
-
-       do {
-               struct sk_buff *nskb = segs->next;
-               int err;
-
-               segs->next = NULL;
-               err = dst_output(segs);
-
-               if (err && ret == 0)
-                       ret = err;
-               segs = nskb;
-       } while (segs);
-
-       return ret;
-}
 
 static int ip_forward_finish(struct sk_buff *skb)
 {
@@ -114,9 +67,6 @@ static int ip_forward_finish(struct sk_buff *skb)
        if (unlikely(opt->optlen))
                ip_forward_options(skb);
 
-       if (ip_gso_exceeds_dst_mtu(skb))
-               return ip_forward_finish_gso(skb);
-
        return dst_output(skb);
 }
 
index c10a3ce5cbff0fc0bd0f23ac72188fd9e39fa83f..ed32313e307c43202a4710c6f5b74e14c19a4c20 100644 (file)
@@ -232,8 +232,9 @@ static void ip_expire(unsigned long arg)
                 * "Fragment Reassembly Timeout" message, per RFC792.
                 */
                if (qp->user == IP_DEFRAG_AF_PACKET ||
-                   (qp->user == IP_DEFRAG_CONNTRACK_IN &&
-                    skb_rtable(head)->rt_type != RTN_LOCAL))
+                   ((qp->user >= IP_DEFRAG_CONNTRACK_IN) &&
+                    (qp->user <= __IP_DEFRAG_CONNTRACK_IN_END) &&
+                    (skb_rtable(head)->rt_type != RTN_LOCAL)))
                        goto out_rcu_unlock;
 
 
index 94213c89156511d86682b2c3e034f9e5b6ef5ccb..c5a557a06a31ae589a0938d2d1a12073ae3706df 100644 (file)
@@ -410,7 +410,7 @@ static int ipgre_open(struct net_device *dev)
                struct flowi4 fl4;
                struct rtable *rt;
 
-               rt = ip_route_output_gre(dev_net(dev), &fl4,
+               rt = ip_route_output_gre(t->net, &fl4,
                                         t->parms.iph.daddr,
                                         t->parms.iph.saddr,
                                         t->parms.o_key,
@@ -434,7 +434,7 @@ static int ipgre_close(struct net_device *dev)
 
        if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
                struct in_device *in_dev;
-               in_dev = inetdev_by_index(dev_net(dev), t->mlink);
+               in_dev = inetdev_by_index(t->net, t->mlink);
                if (in_dev)
                        ip_mc_dec_group(in_dev, t->parms.iph.daddr);
        }
@@ -478,7 +478,7 @@ static void __gre_tunnel_init(struct net_device *dev)
        dev->needed_headroom    = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
        dev->mtu                = ETH_DATA_LEN - sizeof(struct iphdr) - 4;
 
-       dev->features           |= NETIF_F_NETNS_LOCAL | GRE_FEATURES;
+       dev->features           |= GRE_FEATURES;
        dev->hw_features        |= GRE_FEATURES;
 
        if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
index f4ab72e19af923536656e734996f4b9201684b58..5e7aecea05cd2afbd3e3e13f417e26687517b468 100644 (file)
@@ -364,7 +364,7 @@ int ip_options_compile(struct net *net,
                        }
                        if (optptr[2] <= optlen) {
                                unsigned char *timeptr = NULL;
-                               if (optptr[2]+3 > optptr[1]) {
+                               if (optptr[2]+3 > optlen) {
                                        pp_ptr = optptr + 2;
                                        goto error;
                                }
@@ -376,7 +376,7 @@ int ip_options_compile(struct net *net,
                                        optptr[2] += 4;
                                        break;
                                case IPOPT_TS_TSANDADDR:
-                                       if (optptr[2]+7 > optptr[1]) {
+                                       if (optptr[2]+7 > optlen) {
                                                pp_ptr = optptr + 2;
                                                goto error;
                                        }
@@ -390,7 +390,7 @@ int ip_options_compile(struct net *net,
                                        optptr[2] += 8;
                                        break;
                                case IPOPT_TS_PRESPEC:
-                                       if (optptr[2]+7 > optptr[1]) {
+                                       if (optptr[2]+7 > optlen) {
                                                pp_ptr = optptr + 2;
                                                goto error;
                                        }
index 1cbeba5edff90fa1ac891d4dd23cfb65464878a4..6e231ab58d65d1c93ddb8ee315b070c833a8573c 100644 (file)
@@ -211,6 +211,48 @@ static inline int ip_finish_output2(struct sk_buff *skb)
        return -EINVAL;
 }
 
+static int ip_finish_output_gso(struct sk_buff *skb)
+{
+       netdev_features_t features;
+       struct sk_buff *segs;
+       int ret = 0;
+
+       /* common case: locally created skb or seglen is <= mtu */
+       if (((IPCB(skb)->flags & IPSKB_FORWARDED) == 0) ||
+             skb_gso_network_seglen(skb) <= ip_skb_dst_mtu(skb))
+               return ip_finish_output2(skb);
+
+       /* Slowpath -  GSO segment length is exceeding the dst MTU.
+        *
+        * This can happen in two cases:
+        * 1) TCP GRO packet, DF bit not set
+        * 2) skb arrived via virtio-net, we thus get TSO/GSO skbs directly
+        * from host network stack.
+        */
+       features = netif_skb_features(skb);
+       segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+       if (IS_ERR(segs)) {
+               kfree_skb(skb);
+               return -ENOMEM;
+       }
+
+       consume_skb(skb);
+
+       do {
+               struct sk_buff *nskb = segs->next;
+               int err;
+
+               segs->next = NULL;
+               err = ip_fragment(segs, ip_finish_output2);
+
+               if (err && ret == 0)
+                       ret = err;
+               segs = nskb;
+       } while (segs);
+
+       return ret;
+}
+
 static int ip_finish_output(struct sk_buff *skb)
 {
 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
@@ -220,10 +262,13 @@ static int ip_finish_output(struct sk_buff *skb)
                return dst_output(skb);
        }
 #endif
-       if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb))
+       if (skb_is_gso(skb))
+               return ip_finish_output_gso(skb);
+
+       if (skb->len > ip_skb_dst_mtu(skb))
                return ip_fragment(skb, ip_finish_output2);
-       else
-               return ip_finish_output2(skb);
+
+       return ip_finish_output2(skb);
 }
 
 int ip_mc_output(struct sock *sk, struct sk_buff *skb)
@@ -370,7 +415,7 @@ packet_routed:
        skb_reset_network_header(skb);
        iph = ip_hdr(skb);
        *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
-       if (ip_dont_fragment(sk, &rt->dst) && !skb->local_df)
+       if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df)
                iph->frag_off = htons(IP_DF);
        else
                iph->frag_off = 0;
@@ -456,7 +501,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
        iph = ip_hdr(skb);
 
        mtu = ip_skb_dst_mtu(skb);
-       if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->local_df) ||
+       if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) ||
                     (IPCB(skb)->frag_max_size &&
                      IPCB(skb)->frag_max_size > mtu))) {
                IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
@@ -821,7 +866,7 @@ static int __ip_append_data(struct sock *sk,
 
        fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
        maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
-       maxnonfragsize = ip_sk_local_df(sk) ? 0xFFFF : mtu;
+       maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
 
        if (cork->length + length > maxnonfragsize - fragheaderlen) {
                ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
@@ -1144,7 +1189,7 @@ ssize_t   ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
 
        fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
        maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
-       maxnonfragsize = ip_sk_local_df(sk) ? 0xFFFF : mtu;
+       maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
 
        if (cork->length + size > maxnonfragsize - fragheaderlen) {
                ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
@@ -1305,10 +1350,10 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
         * to fragment the frame generated here. No matter, what transforms
         * how transforms change size of the packet, it will come out.
         */
-       skb->local_df = ip_sk_local_df(sk);
+       skb->ignore_df = ip_sk_ignore_df(sk);
 
        /* DF bit is set when we want to see DF on outgoing frames.
-        * If local_df is set too, we still allow to fragment this frame
+        * If ignore_df is set too, we still allow to fragment this frame
         * locally. */
        if (inet->pmtudisc == IP_PMTUDISC_DO ||
            inet->pmtudisc == IP_PMTUDISC_PROBE ||
@@ -1501,7 +1546,8 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
                        daddr = replyopts.opt.opt.faddr;
        }
 
-       flowi4_init_output(&fl4, arg->bound_dev_if, 0,
+       flowi4_init_output(&fl4, arg->bound_dev_if,
+                          IP4_REPLY_MARK(net, skb->mark),
                           RT_TOS(arg->tos),
                           RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
                           ip_reply_arg_flowi_flags(arg),
index fa5b7519765f10c61b8855c6646b79915384062f..86a00bd6684c94e4ec99a185262f8562dab7d5ad 100644 (file)
@@ -395,11 +395,10 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net,
                                          struct ip_tunnel_net *itn,
                                          struct ip_tunnel_parm *parms)
 {
-       struct ip_tunnel *nt, *fbt;
+       struct ip_tunnel *nt;
        struct net_device *dev;
 
        BUG_ON(!itn->fb_tunnel_dev);
-       fbt = netdev_priv(itn->fb_tunnel_dev);
        dev = __ip_tunnel_create(net, itn->fb_tunnel_dev->rtnl_link_ops, parms);
        if (IS_ERR(dev))
                return ERR_CAST(dev);
@@ -442,6 +441,8 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
                tunnel->i_seqno = ntohl(tpi->seq) + 1;
        }
 
+       skb_reset_network_header(skb);
+
        err = IP_ECN_decapsulate(iph, skb);
        if (unlikely(err)) {
                if (log_ecn_error)
@@ -538,9 +539,10 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
        unsigned int max_headroom;      /* The extra header space needed */
        __be32 dst;
        int err;
-       bool connected = true;
+       bool connected;
 
        inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
+       connected = (tunnel->parms.iph.daddr != 0);
 
        dst = tnl_params->daddr;
        if (dst == 0) {
@@ -753,10 +755,8 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
 
                if (!t && (cmd == SIOCADDTUNNEL)) {
                        t = ip_tunnel_create(net, itn, p);
-                       if (IS_ERR(t)) {
-                               err = PTR_ERR(t);
-                               break;
-                       }
+                       err = PTR_ERR_OR_ZERO(t);
+                       break;
                }
                if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
                        if (t != NULL) {
@@ -880,6 +880,7 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
         */
        if (!IS_ERR(itn->fb_tunnel_dev)) {
                itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
+               itn->fb_tunnel_dev->mtu = ip_tunnel_bind_dev(itn->fb_tunnel_dev);
                ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
        }
        rtnl_unlock();
index afcee51b90ede30a846bbd7a5c16b6996d955889..13ef00f1e17b88943ddee9ae9ba52b7d0efe7832 100644 (file)
@@ -239,6 +239,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
 static int vti4_err(struct sk_buff *skb, u32 info)
 {
        __be32 spi;
+       __u32 mark;
        struct xfrm_state *x;
        struct ip_tunnel *tunnel;
        struct ip_esp_hdr *esph;
@@ -254,6 +255,8 @@ static int vti4_err(struct sk_buff *skb, u32 info)
        if (!tunnel)
                return -1;
 
+       mark = be32_to_cpu(tunnel->parms.o_key);
+
        switch (protocol) {
        case IPPROTO_ESP:
                esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
@@ -281,7 +284,7 @@ static int vti4_err(struct sk_buff *skb, u32 info)
                return 0;
        }
 
-       x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
+       x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr,
                              spi, protocol, AF_INET);
        if (!x)
                return 0;
index 812b1835146255fe065264dd236e901fe6937578..4bc508f0db90287f09d6dcbaea1705ee8a9a1168 100644 (file)
@@ -486,4 +486,5 @@ static void __exit ipip_fini(void)
 module_init(ipip_init);
 module_exit(ipip_fini);
 MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK("ipip");
 MODULE_ALIAS_NETDEV("tunl0");
index d84dc8d4c916e7f50260a2b29df5d79f4adb4c1e..2bc9cc47f246ed4953c3ad7b683ea56a0be2c8e2 100644 (file)
@@ -484,7 +484,7 @@ static void reg_vif_setup(struct net_device *dev)
        dev->type               = ARPHRD_PIMREG;
        dev->mtu                = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
        dev->flags              = IFF_NOARP;
-       dev->netdev_ops         = &reg_vif_netdev_ops,
+       dev->netdev_ops         = &reg_vif_netdev_ops;
        dev->destructor         = free_netdev;
        dev->features           |= NETIF_F_NETNS_LOCAL;
 }
index 12e13bd82b5bba4fdd183d5ba2cda098a1c0c683..b8f6381c7d0b15f49973a3748937ea23325bee03 100644 (file)
@@ -22,7 +22,6 @@
 #endif
 #include <net/netfilter/nf_conntrack_zones.h>
 
-/* Returns new sk_buff, or NULL */
 static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
 {
        int err;
@@ -33,8 +32,10 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
        err = ip_defrag(skb, user);
        local_bh_enable();
 
-       if (!err)
+       if (!err) {
                ip_send_check(ip_hdr(skb));
+               skb->ignore_df = 1;
+       }
 
        return err;
 }
index 8210964a9f19bedf17d6f3266c1fd0775f3de144..044a0ddf6a791ace04fbb1802e64563bf3fc5518 100644 (file)
@@ -236,15 +236,15 @@ exit:
 static void inet_get_ping_group_range_net(struct net *net, kgid_t *low,
                                          kgid_t *high)
 {
-       kgid_t *data = net->ipv4.sysctl_ping_group_range;
+       kgid_t *data = net->ipv4.ping_group_range.range;
        unsigned int seq;
 
        do {
-               seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
+               seq = read_seqbegin(&net->ipv4.ping_group_range.lock);
 
                *low = data[0];
                *high = data[1];
-       } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
+       } while (read_seqretry(&net->ipv4.ping_group_range.lock, seq));
 }
 
 
index ad737fad6d8b82dec74fab1260015e539647271d..ae0af9386f7ccf4cabafbcc8aff6c37309e820ea 100644 (file)
@@ -345,15 +345,15 @@ static void icmp_put(struct seq_file *seq)
        for (i = 0; icmpmibmap[i].name != NULL; i++)
                seq_printf(seq, " Out%s", icmpmibmap[i].name);
        seq_printf(seq, "\nIcmp: %lu %lu %lu",
-               snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INMSGS),
-               snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INERRORS),
-               snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_CSUMERRORS));
+               snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_INMSGS),
+               snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_INERRORS),
+               snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_CSUMERRORS));
        for (i = 0; icmpmibmap[i].name != NULL; i++)
                seq_printf(seq, " %lu",
                           atomic_long_read(ptr + icmpmibmap[i].index));
        seq_printf(seq, " %lu %lu",
-               snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTMSGS),
-               snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTERRORS));
+               snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_OUTMSGS),
+               snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_OUTERRORS));
        for (i = 0; icmpmibmap[i].name != NULL; i++)
                seq_printf(seq, " %lu",
                           atomic_long_read(ptr + (icmpmibmap[i].index | 0x100)));
@@ -379,7 +379,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
        BUILD_BUG_ON(offsetof(struct ipstats_mib, mibs) != 0);
        for (i = 0; snmp4_ipstats_list[i].name != NULL; i++)
                seq_printf(seq, " %llu",
-                          snmp_fold_field64((void __percpu **)net->mib.ip_statistics,
+                          snmp_fold_field64(net->mib.ip_statistics,
                                             snmp4_ipstats_list[i].entry,
                                             offsetof(struct ipstats_mib, syncp)));
 
@@ -395,11 +395,11 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
                /* MaxConn field is signed, RFC 2012 */
                if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN)
                        seq_printf(seq, " %ld",
-                                  snmp_fold_field((void __percpu **)net->mib.tcp_statistics,
+                                  snmp_fold_field(net->mib.tcp_statistics,
                                                   snmp4_tcp_list[i].entry));
                else
                        seq_printf(seq, " %lu",
-                                  snmp_fold_field((void __percpu **)net->mib.tcp_statistics,
+                                  snmp_fold_field(net->mib.tcp_statistics,
                                                   snmp4_tcp_list[i].entry));
        }
 
@@ -410,7 +410,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
        seq_puts(seq, "\nUdp:");
        for (i = 0; snmp4_udp_list[i].name != NULL; i++)
                seq_printf(seq, " %lu",
-                          snmp_fold_field((void __percpu **)net->mib.udp_statistics,
+                          snmp_fold_field(net->mib.udp_statistics,
                                           snmp4_udp_list[i].entry));
 
        /* the UDP and UDP-Lite MIBs are the same */
@@ -421,7 +421,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
        seq_puts(seq, "\nUdpLite:");
        for (i = 0; snmp4_udp_list[i].name != NULL; i++)
                seq_printf(seq, " %lu",
-                          snmp_fold_field((void __percpu **)net->mib.udplite_statistics,
+                          snmp_fold_field(net->mib.udplite_statistics,
                                           snmp4_udp_list[i].entry));
 
        seq_putc(seq, '\n');
@@ -458,7 +458,7 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
        seq_puts(seq, "\nTcpExt:");
        for (i = 0; snmp4_net_list[i].name != NULL; i++)
                seq_printf(seq, " %lu",
-                          snmp_fold_field((void __percpu **)net->mib.net_statistics,
+                          snmp_fold_field(net->mib.net_statistics,
                                           snmp4_net_list[i].entry));
 
        seq_puts(seq, "\nIpExt:");
@@ -468,7 +468,7 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
        seq_puts(seq, "\nIpExt:");
        for (i = 0; snmp4_ipextstats_list[i].name != NULL; i++)
                seq_printf(seq, " %llu",
-                          snmp_fold_field64((void __percpu **)net->mib.ip_statistics,
+                          snmp_fold_field64(net->mib.ip_statistics,
                                             snmp4_ipextstats_list[i].entry,
                                             offsetof(struct ipstats_mib, syncp)));
 
index db1e0da871f40a2284d67bd48c0f21d772b923f3..4154eb76b0adc4736a4c1c266b084ab9bd458976 100644 (file)
@@ -993,6 +993,9 @@ void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
        struct flowi4 fl4;
        struct rtable *rt;
 
+       if (!mark)
+               mark = IP4_REPLY_MARK(net, skb->mark);
+
        __build_flow_key(&fl4, NULL, iph, oif,
                         RT_TOS(iph->tos), protocol, mark, flow_flags);
        rt = __ip_route_output_key(net, &fl4);
@@ -1010,6 +1013,10 @@ static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
        struct rtable *rt;
 
        __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
+
+       if (!fl4.flowi4_mark)
+               fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
+
        rt = __ip_route_output_key(sock_net(sk), &fl4);
        if (!IS_ERR(rt)) {
                __ip_rt_update_pmtu(rt, &fl4, mtu);
@@ -1519,7 +1526,7 @@ static int __mkroute_input(struct sk_buff *skb,
        struct in_device *out_dev;
        unsigned int flags = 0;
        bool do_cache;
-       u32 itag;
+       u32 itag = 0;
 
        /* get a working reference to the output device */
        out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
index f2ed13c2125f7d34820c9e92a3080678f30f46fd..c86624b36a62ece1dd34bf39561d52e34f467bd3 100644 (file)
@@ -303,6 +303,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
        ireq->ir_rmt_port       = th->source;
        ireq->ir_loc_addr       = ip_hdr(skb)->daddr;
        ireq->ir_rmt_addr       = ip_hdr(skb)->saddr;
+       ireq->ir_mark           = inet_request_mark(sk, skb);
        ireq->ecn_ok            = ecn_ok;
        ireq->snd_wscale        = tcp_opt.snd_wscale;
        ireq->sack_ok           = tcp_opt.sack_ok;
@@ -339,7 +340,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
         * hasn't changed since we received the original syn, but I see
         * no easy way to do this.
         */
-       flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark,
+       flowi4_init_output(&fl4, sk->sk_bound_dev_if, ireq->ir_mark,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
                           inet_sk_flowi_flags(sk),
                           (opt && opt->srr) ? opt->faddr : ireq->ir_rmt_addr,
index 44eba052b43d3ab49ba7630bcd82e73e5b094472..79a007c5255883f9d96011682c67ea8aac15a835 100644 (file)
@@ -45,10 +45,10 @@ static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
 /* Update system visible IP port range */
 static void set_local_port_range(struct net *net, int range[2])
 {
-       write_seqlock(&net->ipv4.sysctl_local_ports.lock);
-       net->ipv4.sysctl_local_ports.range[0] = range[0];
-       net->ipv4.sysctl_local_ports.range[1] = range[1];
-       write_sequnlock(&net->ipv4.sysctl_local_ports.lock);
+       write_seqlock(&net->ipv4.ip_local_ports.lock);
+       net->ipv4.ip_local_ports.range[0] = range[0];
+       net->ipv4.ip_local_ports.range[1] = range[1];
+       write_sequnlock(&net->ipv4.ip_local_ports.lock);
 }
 
 /* Validate changes from /proc interface. */
@@ -57,7 +57,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
                                 size_t *lenp, loff_t *ppos)
 {
        struct net *net =
-               container_of(table->data, struct net, ipv4.sysctl_local_ports.range);
+               container_of(table->data, struct net, ipv4.ip_local_ports.range);
        int ret;
        int range[2];
        struct ctl_table tmp = {
@@ -87,14 +87,14 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low
 {
        kgid_t *data = table->data;
        struct net *net =
-               container_of(table->data, struct net, ipv4.sysctl_ping_group_range);
+               container_of(table->data, struct net, ipv4.ping_group_range.range);
        unsigned int seq;
        do {
-               seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
+               seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
 
                *low = data[0];
                *high = data[1];
-       } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
+       } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
 }
 
 /* Update system visible IP port range */
@@ -102,11 +102,11 @@ static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t hig
 {
        kgid_t *data = table->data;
        struct net *net =
-               container_of(table->data, struct net, ipv4.sysctl_ping_group_range);
-       write_seqlock(&net->ipv4.sysctl_local_ports.lock);
+               container_of(table->data, struct net, ipv4.ping_group_range.range);
+       write_seqlock(&net->ipv4.ip_local_ports.lock);
        data[0] = low;
        data[1] = high;
-       write_sequnlock(&net->ipv4.sysctl_local_ports.lock);
+       write_sequnlock(&net->ipv4.ip_local_ports.lock);
 }
 
 /* Validate changes from /proc interface. */
@@ -436,13 +436,6 @@ static struct ctl_table ipv4_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
-       {
-               .procname       = "ip_local_reserved_ports",
-               .data           = NULL, /* initialized in sysctl_ipv4_init */
-               .maxlen         = 65536,
-               .mode           = 0644,
-               .proc_handler   = proc_do_large_bitmap,
-       },
        {
                .procname       = "igmp_max_memberships",
                .data           = &sysctl_igmp_max_memberships,
@@ -805,7 +798,7 @@ static struct ctl_table ipv4_net_table[] = {
        },
        {
                .procname       = "ping_group_range",
-               .data           = &init_net.ipv4.sysctl_ping_group_range,
+               .data           = &init_net.ipv4.ping_group_range.range,
                .maxlen         = sizeof(gid_t)*2,
                .mode           = 0644,
                .proc_handler   = ipv4_ping_group_range,
@@ -819,11 +812,18 @@ static struct ctl_table ipv4_net_table[] = {
        },
        {
                .procname       = "ip_local_port_range",
-               .maxlen         = sizeof(init_net.ipv4.sysctl_local_ports.range),
-               .data           = &init_net.ipv4.sysctl_local_ports.range,
+               .maxlen         = sizeof(init_net.ipv4.ip_local_ports.range),
+               .data           = &init_net.ipv4.ip_local_ports.range,
                .mode           = 0644,
                .proc_handler   = ipv4_local_port_range,
        },
+       {
+               .procname       = "ip_local_reserved_ports",
+               .data           = &init_net.ipv4.sysctl_local_reserved_ports,
+               .maxlen         = 65536,
+               .mode           = 0644,
+               .proc_handler   = proc_do_large_bitmap,
+       },
        {
                .procname       = "ip_no_pmtu_disc",
                .data           = &init_net.ipv4.sysctl_ip_no_pmtu_disc,
@@ -838,6 +838,20 @@ static struct ctl_table ipv4_net_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
+       {
+               .procname       = "fwmark_reflect",
+               .data           = &init_net.ipv4.sysctl_fwmark_reflect,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "tcp_fwmark_accept",
+               .data           = &init_net.ipv4.sysctl_tcp_fwmark_accept,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
        { }
 };
 
@@ -858,26 +872,18 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
                        table[i].data += (void *)net - (void *)&init_net;
        }
 
-       /*
-        * Sane defaults - nobody may create ping sockets.
-        * Boot scripts should set this to distro-specific group.
-        */
-       net->ipv4.sysctl_ping_group_range[0] = make_kgid(&init_user_ns, 1);
-       net->ipv4.sysctl_ping_group_range[1] = make_kgid(&init_user_ns, 0);
-
-       /*
-        * Set defaults for local port range
-        */
-       seqlock_init(&net->ipv4.sysctl_local_ports.lock);
-       net->ipv4.sysctl_local_ports.range[0] =  32768;
-       net->ipv4.sysctl_local_ports.range[1] =  61000;
-
        net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
        if (net->ipv4.ipv4_hdr == NULL)
                goto err_reg;
 
+       net->ipv4.sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
+       if (!net->ipv4.sysctl_local_reserved_ports)
+               goto err_ports;
+
        return 0;
 
+err_ports:
+       unregister_net_sysctl_table(net->ipv4.ipv4_hdr);
 err_reg:
        if (!net_eq(net, &init_net))
                kfree(table);
@@ -889,6 +895,7 @@ static __net_exit void ipv4_sysctl_exit_net(struct net *net)
 {
        struct ctl_table *table;
 
+       kfree(net->ipv4.sysctl_local_reserved_ports);
        table = net->ipv4.ipv4_hdr->ctl_table_arg;
        unregister_net_sysctl_table(net->ipv4.ipv4_hdr);
        kfree(table);
@@ -902,16 +909,6 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
 static __init int sysctl_ipv4_init(void)
 {
        struct ctl_table_header *hdr;
-       struct ctl_table *i;
-
-       for (i = ipv4_table; i->procname; i++) {
-               if (strcmp(i->procname, "ip_local_reserved_ports") == 0) {
-                       i->data = sysctl_local_reserved_ports;
-                       break;
-               }
-       }
-       if (!i->procname)
-               return -EINVAL;
 
        hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
        if (hdr == NULL)
index 821846fb0a7e211fc870b1afce5991bc3de28494..d5de69bc04f581ac589ae0f6fe7fcc3646b2cc3e 100644 (file)
@@ -140,13 +140,12 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
                ca->cnt = 1;
 }
 
-static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked,
-                             u32 in_flight)
+static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct bictcp *ca = inet_csk_ca(sk);
 
-       if (!tcp_is_cwnd_limited(sk, in_flight))
+       if (!tcp_is_cwnd_limited(sk))
                return;
 
        if (tp->snd_cwnd <= tp->snd_ssthresh)
index 2b9464c93b8859fcbef0f900f70a4bed2dc6e617..7b09d8b49fa51271cc0fa99a3fcda9f017c0f469 100644 (file)
@@ -276,26 +276,6 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
        return err;
 }
 
-/* RFC2861 Check whether we are limited by application or congestion window
- * This is the inverse of cwnd check in tcp_tso_should_defer
- */
-bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
-{
-       const struct tcp_sock *tp = tcp_sk(sk);
-       u32 left;
-
-       if (in_flight >= tp->snd_cwnd)
-               return true;
-
-       left = tp->snd_cwnd - in_flight;
-       if (sk_can_gso(sk) &&
-           left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
-           left < tp->xmit_size_goal_segs)
-               return true;
-       return left <= tcp_max_tso_deferred_mss(tp);
-}
-EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited);
-
 /* Slow start is used when congestion window is no greater than the slow start
  * threshold. We base on RFC2581 and also handle stretch ACKs properly.
  * We do not implement RFC3465 Appropriate Byte Counting (ABC) per se but
@@ -337,11 +317,11 @@ EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
 /* This is Jacobson's slow start and congestion avoidance.
  * SIGCOMM '88, p. 328.
  */
-void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight)
+void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       if (!tcp_is_cwnd_limited(sk, in_flight))
+       if (!tcp_is_cwnd_limited(sk))
                return;
 
        /* In "safe" area, increase. */
index 8bf224516ba2a26a661d16f89aaee32301d09397..a9bd8a4828a9e5c2da275626c11c6c953a1b3dd6 100644 (file)
@@ -304,13 +304,12 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
                ca->cnt = 1;
 }
 
-static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked,
-                             u32 in_flight)
+static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct bictcp *ca = inet_csk_ca(sk);
 
-       if (!tcp_is_cwnd_limited(sk, in_flight))
+       if (!tcp_is_cwnd_limited(sk))
                return;
 
        if (tp->snd_cwnd <= tp->snd_ssthresh) {
@@ -409,7 +408,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
                ratio -= ca->delayed_ack >> ACK_RATIO_SHIFT;
                ratio += cnt;
 
-               ca->delayed_ack = min(ratio, ACK_RATIO_LIMIT);
+               ca->delayed_ack = clamp(ratio, 1U, ACK_RATIO_LIMIT);
        }
 
        /* Some calls are for duplicates without timetamps */
index f195d9316e55d3d7ef6dc5285606ea9de0f52a51..62e48cf84e602a005ab2ce61c058ca709702098a 100644 (file)
@@ -72,25 +72,224 @@ error:             kfree(ctx);
        return err;
 }
 
-/* Computes the fastopen cookie for the IP path.
- * The path is a 128 bits long (pad with zeros for IPv4).
- *
- * The caller must check foc->len to determine if a valid cookie
- * has been generated successfully.
-*/
-void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
-                            struct tcp_fastopen_cookie *foc)
+static bool __tcp_fastopen_cookie_gen(const void *path,
+                                     struct tcp_fastopen_cookie *foc)
 {
-       __be32 path[4] = { src, dst, 0, 0 };
        struct tcp_fastopen_context *ctx;
+       bool ok = false;
 
        tcp_fastopen_init_key_once(true);
 
        rcu_read_lock();
        ctx = rcu_dereference(tcp_fastopen_ctx);
        if (ctx) {
-               crypto_cipher_encrypt_one(ctx->tfm, foc->val, (__u8 *)path);
+               crypto_cipher_encrypt_one(ctx->tfm, foc->val, path);
                foc->len = TCP_FASTOPEN_COOKIE_SIZE;
+               ok = true;
        }
        rcu_read_unlock();
+       return ok;
+}
+
+/* Generate the fastopen cookie by doing aes128 encryption on both
+ * the source and destination addresses. Pad 0s for IPv4 or IPv4-mapped-IPv6
+ * addresses. For the longer IPv6 addresses use CBC-MAC.
+ *
+ * XXX (TFO) - refactor when TCP_FASTOPEN_COOKIE_SIZE != AES_BLOCK_SIZE.
+ */
+static bool tcp_fastopen_cookie_gen(struct request_sock *req,
+                                   struct sk_buff *syn,
+                                   struct tcp_fastopen_cookie *foc)
+{
+       if (req->rsk_ops->family == AF_INET) {
+               const struct iphdr *iph = ip_hdr(syn);
+
+               __be32 path[4] = { iph->saddr, iph->daddr, 0, 0 };
+               return __tcp_fastopen_cookie_gen(path, foc);
+       }
+
+#if IS_ENABLED(CONFIG_IPV6)
+       if (req->rsk_ops->family == AF_INET6) {
+               const struct ipv6hdr *ip6h = ipv6_hdr(syn);
+               struct tcp_fastopen_cookie tmp;
+
+               if (__tcp_fastopen_cookie_gen(&ip6h->saddr, &tmp)) {
+                       struct in6_addr *buf = (struct in6_addr *) tmp.val;
+                       int i = 4;
+
+                       for (i = 0; i < 4; i++)
+                               buf->s6_addr32[i] ^= ip6h->daddr.s6_addr32[i];
+                       return __tcp_fastopen_cookie_gen(buf, foc);
+               }
+       }
+#endif
+       return false;
+}
+
+static bool tcp_fastopen_create_child(struct sock *sk,
+                                     struct sk_buff *skb,
+                                     struct dst_entry *dst,
+                                     struct request_sock *req)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
+       struct sock *child;
+
+       req->num_retrans = 0;
+       req->num_timeout = 0;
+       req->sk = NULL;
+
+       child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
+       if (child == NULL)
+               return false;
+
+       spin_lock(&queue->fastopenq->lock);
+       queue->fastopenq->qlen++;
+       spin_unlock(&queue->fastopenq->lock);
+
+       /* Initialize the child socket. Have to fix some values to take
+        * into account the child is a Fast Open socket and is created
+        * only out of the bits carried in the SYN packet.
+        */
+       tp = tcp_sk(child);
+
+       tp->fastopen_rsk = req;
+       /* Do a hold on the listner sk so that if the listener is being
+        * closed, the child that has been accepted can live on and still
+        * access listen_lock.
+        */
+       sock_hold(sk);
+       tcp_rsk(req)->listener = sk;
+
+       /* RFC1323: The window in SYN & SYN/ACK segments is never
+        * scaled. So correct it appropriately.
+        */
+       tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
+
+       /* Activate the retrans timer so that SYNACK can be retransmitted.
+        * The request socket is not added to the SYN table of the parent
+        * because it's been added to the accept queue directly.
+        */
+       inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
+                                 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
+
+       /* Add the child socket directly into the accept queue */
+       inet_csk_reqsk_queue_add(sk, req, child);
+
+       /* Now finish processing the fastopen child socket. */
+       inet_csk(child)->icsk_af_ops->rebuild_header(child);
+       tcp_init_congestion_control(child);
+       tcp_mtup_init(child);
+       tcp_init_metrics(child);
+       tcp_init_buffer_space(child);
+
+       /* Queue the data carried in the SYN packet. We need to first
+        * bump skb's refcnt because the caller will attempt to free it.
+        *
+        * XXX (TFO) - we honor a zero-payload TFO request for now,
+        * (any reason not to?) but no need to queue the skb since
+        * there is no data. How about SYN+FIN?
+        */
+       if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1) {
+               skb = skb_get(skb);
+               skb_dst_drop(skb);
+               __skb_pull(skb, tcp_hdr(skb)->doff * 4);
+               skb_set_owner_r(skb, child);
+               __skb_queue_tail(&child->sk_receive_queue, skb);
+               tp->syn_data_acked = 1;
+       }
+       tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+       sk->sk_data_ready(sk);
+       bh_unlock_sock(child);
+       sock_put(child);
+       WARN_ON(req->sk == NULL);
+       return true;
+}
+EXPORT_SYMBOL(tcp_fastopen_create_child);
+
+static bool tcp_fastopen_queue_check(struct sock *sk)
+{
+       struct fastopen_queue *fastopenq;
+
+       /* Make sure the listener has enabled fastopen, and we don't
+        * exceed the max # of pending TFO requests allowed before trying
+        * to validating the cookie in order to avoid burning CPU cycles
+        * unnecessarily.
+        *
+        * XXX (TFO) - The implication of checking the max_qlen before
+        * processing a cookie request is that clients can't differentiate
+        * between qlen overflow causing Fast Open to be disabled
+        * temporarily vs a server not supporting Fast Open at all.
+        */
+       fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
+       if (fastopenq == NULL || fastopenq->max_qlen == 0)
+               return false;
+
+       if (fastopenq->qlen >= fastopenq->max_qlen) {
+               struct request_sock *req1;
+               spin_lock(&fastopenq->lock);
+               req1 = fastopenq->rskq_rst_head;
+               if ((req1 == NULL) || time_after(req1->expires, jiffies)) {
+                       spin_unlock(&fastopenq->lock);
+                       NET_INC_STATS_BH(sock_net(sk),
+                                        LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
+                       return false;
+               }
+               fastopenq->rskq_rst_head = req1->dl_next;
+               fastopenq->qlen--;
+               spin_unlock(&fastopenq->lock);
+               reqsk_free(req1);
+       }
+       return true;
+}
+
+/* Returns true if we should perform Fast Open on the SYN. The cookie (foc)
+ * may be updated and return the client in the SYN-ACK later. E.g., Fast Open
+ * cookie request (foc->len == 0).
+ */
+bool tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
+                     struct request_sock *req,
+                     struct tcp_fastopen_cookie *foc,
+                     struct dst_entry *dst)
+{
+       struct tcp_fastopen_cookie valid_foc = { .len = -1 };
+       bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1;
+
+       if (!((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) &&
+             (syn_data || foc->len >= 0) &&
+             tcp_fastopen_queue_check(sk))) {
+               foc->len = -1;
+               return false;
+       }
+
+       if (syn_data && (sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD))
+               goto fastopen;
+
+       if (tcp_fastopen_cookie_gen(req, skb, &valid_foc) &&
+           foc->len == TCP_FASTOPEN_COOKIE_SIZE &&
+           foc->len == valid_foc.len &&
+           !memcmp(foc->val, valid_foc.val, foc->len)) {
+               /* Cookie is valid. Create a (full) child socket to accept
+                * the data in SYN before returning a SYN-ACK to ack the
+                * data. If we fail to create the socket, fall back and
+                * ack the ISN only but includes the same cookie.
+                *
+                * Note: Data-less SYN with valid cookie is allowed to send
+                * data in SYN_RECV state.
+                */
+fastopen:
+               if (tcp_fastopen_create_child(sk, skb, dst, req)) {
+                       foc->len = -1;
+                       NET_INC_STATS_BH(sock_net(sk),
+                                        LINUX_MIB_TCPFASTOPENPASSIVE);
+                       return true;
+               }
+       }
+
+       NET_INC_STATS_BH(sock_net(sk), foc->len ?
+                        LINUX_MIB_TCPFASTOPENPASSIVEFAIL :
+                        LINUX_MIB_TCPFASTOPENCOOKIEREQD);
+       *foc = valid_foc;
+       return false;
 }
+EXPORT_SYMBOL(tcp_try_fastopen);
index 8b9e7bad77c09a0c07706b955c29b81d4e71a542..1c4908280d921fbe9a9e21e4fd55d1a87f2db706 100644 (file)
@@ -109,12 +109,12 @@ static void hstcp_init(struct sock *sk)
        tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128);
 }
 
-static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight)
+static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct hstcp *ca = inet_csk_ca(sk);
 
-       if (!tcp_is_cwnd_limited(sk, in_flight))
+       if (!tcp_is_cwnd_limited(sk))
                return;
 
        if (tp->snd_cwnd <= tp->snd_ssthresh)
index 4a194acfd9237f1aaadfe9f9831358f5c9037cf7..031361311a8b92b1f7ab1172fb00e3bbb0503129 100644 (file)
@@ -227,12 +227,12 @@ static u32 htcp_recalc_ssthresh(struct sock *sk)
        return max((tp->snd_cwnd * ca->beta) >> 7, 2U);
 }
 
-static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight)
+static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct htcp *ca = inet_csk_ca(sk);
 
-       if (!tcp_is_cwnd_limited(sk, in_flight))
+       if (!tcp_is_cwnd_limited(sk))
                return;
 
        if (tp->snd_cwnd <= tp->snd_ssthresh)
index a15a799bf76888f3633b27cf57e8e05f30339601..d8f8f05a49516ec2d9213f10af77b82fbf32bff7 100644 (file)
@@ -87,8 +87,7 @@ static inline u32 hybla_fraction(u32 odds)
  *     o Give cwnd a new value based on the model proposed
  *     o remember increments <1
  */
-static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked,
-                            u32 in_flight)
+static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct hybla *ca = inet_csk_ca(sk);
@@ -101,11 +100,11 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked,
                ca->minrtt_us = tp->srtt_us;
        }
 
-       if (!tcp_is_cwnd_limited(sk, in_flight))
+       if (!tcp_is_cwnd_limited(sk))
                return;
 
        if (!ca->hybla_en) {
-               tcp_reno_cong_avoid(sk, ack, acked, in_flight);
+               tcp_reno_cong_avoid(sk, ack, acked);
                return;
        }
 
index 863d105e30150391e9ac3ce8545c0411e4d083ab..5999b3972e6449d616facb628d27116ab8876ac2 100644 (file)
@@ -255,8 +255,7 @@ static void tcp_illinois_state(struct sock *sk, u8 new_state)
 /*
  * Increase window in response to successful acknowledgment.
  */
-static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked,
-                                   u32 in_flight)
+static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct illinois *ca = inet_csk_ca(sk);
@@ -265,7 +264,7 @@ static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked,
                update_params(sk);
 
        /* RFC2861 only increase cwnd if fully utilized */
-       if (!tcp_is_cwnd_limited(sk, in_flight))
+       if (!tcp_is_cwnd_limited(sk))
                return;
 
        /* In slow start */
index 6efed134ab63dc68ccd069acb2c4e4345cdc6a99..350b2072f0ab82776f196dfbe9c5b87b80a79046 100644 (file)
@@ -2938,10 +2938,11 @@ static void tcp_synack_rtt_meas(struct sock *sk, const u32 synack_stamp)
                tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt_us, -1L);
 }
 
-static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight)
+static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
-       icsk->icsk_ca_ops->cong_avoid(sk, ack, acked, in_flight);
+
+       icsk->icsk_ca_ops->cong_avoid(sk, ack, acked);
        tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp;
 }
 
@@ -3364,7 +3365,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        u32 ack_seq = TCP_SKB_CB(skb)->seq;
        u32 ack = TCP_SKB_CB(skb)->ack_seq;
        bool is_dupack = false;
-       u32 prior_in_flight;
        u32 prior_fackets;
        int prior_packets = tp->packets_out;
        const int prior_unsacked = tp->packets_out - tp->sacked_out;
@@ -3397,7 +3397,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
                flag |= FLAG_SND_UNA_ADVANCED;
 
        prior_fackets = tp->fackets_out;
-       prior_in_flight = tcp_packets_in_flight(tp);
 
        /* ts_recent update must be made after we are sure that the packet
         * is in window.
@@ -3452,7 +3451,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
 
        /* Advance cwnd if state allows */
        if (tcp_may_raise_cwnd(sk, flag))
-               tcp_cong_avoid(sk, ack, acked, prior_in_flight);
+               tcp_cong_avoid(sk, ack, acked);
 
        if (tcp_ack_is_dubious(sk, flag)) {
                is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
index 438f3b95143df0bffa01322c62b44c8fcd6d6b8b..77cccda1ad0c6dc62c8cb70d932eca2322304c81 100644 (file)
@@ -336,8 +336,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
        const int code = icmp_hdr(icmp_skb)->code;
        struct sock *sk;
        struct sk_buff *skb;
-       struct request_sock *req;
-       __u32 seq;
+       struct request_sock *fastopen;
+       __u32 seq, snd_una;
        __u32 remaining;
        int err;
        struct net *net = dev_net(icmp_skb->dev);
@@ -378,12 +378,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
 
        icsk = inet_csk(sk);
        tp = tcp_sk(sk);
-       req = tp->fastopen_rsk;
        seq = ntohl(th->seq);
+       /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
+       fastopen = tp->fastopen_rsk;
+       snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
        if (sk->sk_state != TCP_LISTEN &&
-           !between(seq, tp->snd_una, tp->snd_nxt) &&
-           (req == NULL || seq != tcp_rsk(req)->snt_isn)) {
-               /* For a Fast Open socket, allow seq to be snt_isn. */
+           !between(seq, snd_una, tp->snd_nxt)) {
                NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
                goto out;
        }
@@ -426,11 +426,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
                if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
                        break;
                if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
-                   !icsk->icsk_backoff)
+                   !icsk->icsk_backoff || fastopen)
                        break;
 
-               /* XXX (TFO) - revisit the following logic for TFO */
-
                if (sock_owned_by_user(sk))
                        break;
 
@@ -462,14 +460,6 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
                goto out;
        }
 
-       /* XXX (TFO) - if it's a TFO socket and has been accepted, rather
-        * than following the TCP_SYN_RECV case and closing the socket,
-        * we ignore the ICMP error and keep trying like a fully established
-        * socket. Is this the right thing to do?
-        */
-       if (req && req->sk == NULL)
-               goto out;
-
        switch (sk->sk_state) {
                struct request_sock *req, **prev;
        case TCP_LISTEN:
@@ -502,10 +492,13 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
                goto out;
 
        case TCP_SYN_SENT:
-       case TCP_SYN_RECV:  /* Cannot happen.
-                              It can f.e. if SYNs crossed,
-                              or Fast Open.
-                            */
+       case TCP_SYN_RECV:
+               /* Only in fast or simultaneous open. If a fast open socket is
+                * is already accepted it is treated as a connected one below.
+                */
+               if (fastopen && fastopen->sk == NULL)
+                       break;
+
                if (!sock_owned_by_user(sk)) {
                        sk->sk_err = err;
 
@@ -822,7 +815,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
  */
 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
                              struct request_sock *req,
-                             u16 queue_mapping)
+                             u16 queue_mapping,
+                             struct tcp_fastopen_cookie *foc)
 {
        const struct inet_request_sock *ireq = inet_rsk(req);
        struct flowi4 fl4;
@@ -833,7 +827,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
        if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
                return -1;
 
-       skb = tcp_make_synack(sk, dst, req, NULL);
+       skb = tcp_make_synack(sk, dst, req, foc);
 
        if (skb) {
                __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
@@ -852,7 +846,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
 
 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
 {
-       int res = tcp_v4_send_synack(sk, NULL, req, 0);
+       int res = tcp_v4_send_synack(sk, NULL, req, 0, NULL);
 
        if (!res) {
                TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
@@ -1260,187 +1254,6 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
 };
 #endif
 
-static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
-                              struct request_sock *req,
-                              struct tcp_fastopen_cookie *foc,
-                              struct tcp_fastopen_cookie *valid_foc)
-{
-       bool skip_cookie = false;
-       struct fastopen_queue *fastopenq;
-
-       if (likely(!fastopen_cookie_present(foc))) {
-               /* See include/net/tcp.h for the meaning of these knobs */
-               if ((sysctl_tcp_fastopen & TFO_SERVER_ALWAYS) ||
-                   ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD) &&
-                   (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1)))
-                       skip_cookie = true; /* no cookie to validate */
-               else
-                       return false;
-       }
-       fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
-       /* A FO option is present; bump the counter. */
-       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVE);
-
-       /* Make sure the listener has enabled fastopen, and we don't
-        * exceed the max # of pending TFO requests allowed before trying
-        * to validating the cookie in order to avoid burning CPU cycles
-        * unnecessarily.
-        *
-        * XXX (TFO) - The implication of checking the max_qlen before
-        * processing a cookie request is that clients can't differentiate
-        * between qlen overflow causing Fast Open to be disabled
-        * temporarily vs a server not supporting Fast Open at all.
-        */
-       if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) == 0 ||
-           fastopenq == NULL || fastopenq->max_qlen == 0)
-               return false;
-
-       if (fastopenq->qlen >= fastopenq->max_qlen) {
-               struct request_sock *req1;
-               spin_lock(&fastopenq->lock);
-               req1 = fastopenq->rskq_rst_head;
-               if ((req1 == NULL) || time_after(req1->expires, jiffies)) {
-                       spin_unlock(&fastopenq->lock);
-                       NET_INC_STATS_BH(sock_net(sk),
-                           LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
-                       /* Avoid bumping LINUX_MIB_TCPFASTOPENPASSIVEFAIL*/
-                       foc->len = -1;
-                       return false;
-               }
-               fastopenq->rskq_rst_head = req1->dl_next;
-               fastopenq->qlen--;
-               spin_unlock(&fastopenq->lock);
-               reqsk_free(req1);
-       }
-       if (skip_cookie) {
-               tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
-               return true;
-       }
-
-       if (foc->len == TCP_FASTOPEN_COOKIE_SIZE) {
-               if ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED) == 0) {
-                       tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
-                                               ip_hdr(skb)->daddr, valid_foc);
-                       if ((valid_foc->len != TCP_FASTOPEN_COOKIE_SIZE) ||
-                           memcmp(&foc->val[0], &valid_foc->val[0],
-                           TCP_FASTOPEN_COOKIE_SIZE) != 0)
-                               return false;
-                       valid_foc->len = -1;
-               }
-               /* Acknowledge the data received from the peer. */
-               tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
-               return true;
-       } else if (foc->len == 0) { /* Client requesting a cookie */
-               tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
-                                       ip_hdr(skb)->daddr, valid_foc);
-               NET_INC_STATS_BH(sock_net(sk),
-                   LINUX_MIB_TCPFASTOPENCOOKIEREQD);
-       } else {
-               /* Client sent a cookie with wrong size. Treat it
-                * the same as invalid and return a valid one.
-                */
-               tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
-                                       ip_hdr(skb)->daddr, valid_foc);
-       }
-       return false;
-}
-
-static int tcp_v4_conn_req_fastopen(struct sock *sk,
-                                   struct sk_buff *skb,
-                                   struct sk_buff *skb_synack,
-                                   struct request_sock *req)
-{
-       struct tcp_sock *tp = tcp_sk(sk);
-       struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
-       const struct inet_request_sock *ireq = inet_rsk(req);
-       struct sock *child;
-       int err;
-
-       req->num_retrans = 0;
-       req->num_timeout = 0;
-       req->sk = NULL;
-
-       child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
-       if (child == NULL) {
-               NET_INC_STATS_BH(sock_net(sk),
-                                LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
-               kfree_skb(skb_synack);
-               return -1;
-       }
-       err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr,
-                                   ireq->ir_rmt_addr, ireq->opt);
-       err = net_xmit_eval(err);
-       if (!err)
-               tcp_rsk(req)->snt_synack = tcp_time_stamp;
-       /* XXX (TFO) - is it ok to ignore error and continue? */
-
-       spin_lock(&queue->fastopenq->lock);
-       queue->fastopenq->qlen++;
-       spin_unlock(&queue->fastopenq->lock);
-
-       /* Initialize the child socket. Have to fix some values to take
-        * into account the child is a Fast Open socket and is created
-        * only out of the bits carried in the SYN packet.
-        */
-       tp = tcp_sk(child);
-
-       tp->fastopen_rsk = req;
-       /* Do a hold on the listner sk so that if the listener is being
-        * closed, the child that has been accepted can live on and still
-        * access listen_lock.
-        */
-       sock_hold(sk);
-       tcp_rsk(req)->listener = sk;
-
-       /* RFC1323: The window in SYN & SYN/ACK segments is never
-        * scaled. So correct it appropriately.
-        */
-       tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
-
-       /* Activate the retrans timer so that SYNACK can be retransmitted.
-        * The request socket is not added to the SYN table of the parent
-        * because it's been added to the accept queue directly.
-        */
-       inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
-           TCP_TIMEOUT_INIT, TCP_RTO_MAX);
-
-       /* Add the child socket directly into the accept queue */
-       inet_csk_reqsk_queue_add(sk, req, child);
-
-       /* Now finish processing the fastopen child socket. */
-       inet_csk(child)->icsk_af_ops->rebuild_header(child);
-       tcp_init_congestion_control(child);
-       tcp_mtup_init(child);
-       tcp_init_metrics(child);
-       tcp_init_buffer_space(child);
-
-       /* Queue the data carried in the SYN packet. We need to first
-        * bump skb's refcnt because the caller will attempt to free it.
-        *
-        * XXX (TFO) - we honor a zero-payload TFO request for now.
-        * (Any reason not to?)
-        */
-       if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq + 1) {
-               /* Don't queue the skb if there is no payload in SYN.
-                * XXX (TFO) - How about SYN+FIN?
-                */
-               tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
-       } else {
-               skb = skb_get(skb);
-               skb_dst_drop(skb);
-               __skb_pull(skb, tcp_hdr(skb)->doff * 4);
-               skb_set_owner_r(skb, child);
-               __skb_queue_tail(&child->sk_receive_queue, skb);
-               tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
-               tp->syn_data_acked = 1;
-       }
-       sk->sk_data_ready(sk);
-       bh_unlock_sock(child);
-       sock_put(child);
-       WARN_ON(req->sk == NULL);
-       return 0;
-}
-
 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
 {
        struct tcp_options_received tmp_opt;
@@ -1451,12 +1264,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
        __be32 saddr = ip_hdr(skb)->saddr;
        __be32 daddr = ip_hdr(skb)->daddr;
        __u32 isn = TCP_SKB_CB(skb)->when;
-       bool want_cookie = false;
+       bool want_cookie = false, fastopen;
        struct flowi4 fl4;
        struct tcp_fastopen_cookie foc = { .len = -1 };
-       struct tcp_fastopen_cookie valid_foc = { .len = -1 };
-       struct sk_buff *skb_synack;
-       int do_fastopen;
+       int err;
 
        /* Never answer to SYNs send to broadcast or multicast */
        if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@@ -1507,6 +1318,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
        ireq->ir_rmt_addr = saddr;
        ireq->no_srccheck = inet_sk(sk)->transparent;
        ireq->opt = tcp_v4_save_options(skb);
+       ireq->ir_mark = inet_request_mark(sk, skb);
 
        if (security_inet_conn_request(sk, skb, req))
                goto drop_and_free;
@@ -1555,52 +1367,24 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
 
                isn = tcp_v4_init_sequence(skb);
        }
-       tcp_rsk(req)->snt_isn = isn;
-
-       if (dst == NULL) {
-               dst = inet_csk_route_req(sk, &fl4, req);
-               if (dst == NULL)
-                       goto drop_and_free;
-       }
-       do_fastopen = tcp_fastopen_check(sk, skb, req, &foc, &valid_foc);
-
-       /* We don't call tcp_v4_send_synack() directly because we need
-        * to make sure a child socket can be created successfully before
-        * sending back synack!
-        *
-        * XXX (TFO) - Ideally one would simply call tcp_v4_send_synack()
-        * (or better yet, call tcp_send_synack() in the child context
-        * directly, but will have to fix bunch of other code first)
-        * after syn_recv_sock() except one will need to first fix the
-        * latter to remove its dependency on the current implementation
-        * of tcp_v4_send_synack()->tcp_select_initial_window().
-        */
-       skb_synack = tcp_make_synack(sk, dst, req,
-           fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
-
-       if (skb_synack) {
-               __tcp_v4_send_check(skb_synack, ireq->ir_loc_addr, ireq->ir_rmt_addr);
-               skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
-       } else
+       if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
                goto drop_and_free;
 
-       if (likely(!do_fastopen)) {
-               int err;
-               err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr,
-                    ireq->ir_rmt_addr, ireq->opt);
-               err = net_xmit_eval(err);
+       tcp_rsk(req)->snt_isn = isn;
+       tcp_rsk(req)->snt_synack = tcp_time_stamp;
+       tcp_openreq_init_rwin(req, sk, dst);
+       fastopen = !want_cookie &&
+                  tcp_try_fastopen(sk, skb, req, &foc, dst);
+       err = tcp_v4_send_synack(sk, dst, req,
+                                skb_get_queue_mapping(skb), &foc);
+       if (!fastopen) {
                if (err || want_cookie)
                        goto drop_and_free;
 
                tcp_rsk(req)->snt_synack = tcp_time_stamp;
                tcp_rsk(req)->listener = NULL;
-               /* Add the request_sock to the SYN table */
                inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
-               if (fastopen_cookie_present(&foc) && foc.len != 0)
-                       NET_INC_STATS_BH(sock_net(sk),
-                           LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
-       } else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req))
-               goto drop_and_free;
+       }
 
        return 0;
 
@@ -1744,28 +1528,6 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
        return sk;
 }
 
-static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
-{
-       const struct iphdr *iph = ip_hdr(skb);
-
-       if (skb->ip_summed == CHECKSUM_COMPLETE) {
-               if (!tcp_v4_check(skb->len, iph->saddr,
-                                 iph->daddr, skb->csum)) {
-                       skb->ip_summed = CHECKSUM_UNNECESSARY;
-                       return 0;
-               }
-       }
-
-       skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
-                                      skb->len, IPPROTO_TCP, 0);
-
-       if (skb->len <= 76) {
-               return __skb_checksum_complete(skb);
-       }
-       return 0;
-}
-
-
 /* The socket must have it's spinlock held when we get
  * here.
  *
@@ -1960,7 +1722,8 @@ int tcp_v4_rcv(struct sk_buff *skb)
         * Packet length and doff are validated by header prediction,
         * provided case of th->doff==0 is eliminated.
         * So, we defer the checks. */
-       if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
+
+       if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
                goto csum_error;
 
        th = tcp_hdr(skb);
index c9aecae313276d134ef56d1385ac44266c200a51..1e70fa8fa793fdbca3ca782231f8b775ed96f205 100644 (file)
@@ -115,13 +115,12 @@ static void tcp_lp_init(struct sock *sk)
  * Will only call newReno CA when away from inference.
  * From TCP-LP's paper, this will be handled in additive increasement.
  */
-static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 acked,
-                             u32 in_flight)
+static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 {
        struct lp *lp = inet_csk_ca(sk);
 
        if (!(lp->flag & LP_WITHIN_INF))
-               tcp_reno_cong_avoid(sk, ack, acked, in_flight);
+               tcp_reno_cong_avoid(sk, ack, acked);
 }
 
 /**
index 05c1b155251d39d2e559d050f1e191f51f32b3e0..e68e0d4af6c97bcd0f8c983890ba555adbfb3a00 100644 (file)
@@ -362,6 +362,37 @@ void tcp_twsk_destructor(struct sock *sk)
 }
 EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
 
+void tcp_openreq_init_rwin(struct request_sock *req,
+                          struct sock *sk, struct dst_entry *dst)
+{
+       struct inet_request_sock *ireq = inet_rsk(req);
+       struct tcp_sock *tp = tcp_sk(sk);
+       __u8 rcv_wscale;
+       int mss = dst_metric_advmss(dst);
+
+       if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
+               mss = tp->rx_opt.user_mss;
+
+       /* Set this up on the first call only */
+       req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
+
+       /* limit the window selection if the user enforce a smaller rx buffer */
+       if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
+           (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0))
+               req->window_clamp = tcp_full_space(sk);
+
+       /* tcp_full_space because it is guaranteed to be the first packet */
+       tcp_select_initial_window(tcp_full_space(sk),
+               mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
+               &req->rcv_wnd,
+               &req->window_clamp,
+               ireq->wscale_ok,
+               &rcv_wscale,
+               dst_metric(dst, RTAX_INITRWND));
+       ireq->rcv_wscale = rcv_wscale;
+}
+EXPORT_SYMBOL(tcp_openreq_init_rwin);
+
 static inline void TCP_ECN_openreq_child(struct tcp_sock *tp,
                                         struct request_sock *req)
 {
index 20847de991eaaabf24dd9510f16d449edcd0148a..d463c35db33d8a8873bef9ebf51a9a0ab112fd51 100644 (file)
@@ -627,7 +627,7 @@ static unsigned int tcp_synack_options(struct sock *sk,
                if (unlikely(!ireq->tstamp_ok))
                        remaining -= TCPOLEN_SACKPERM_ALIGNED;
        }
-       if (foc != NULL) {
+       if (foc != NULL && foc->len >= 0) {
                u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
                need = (need + 3) & ~3U;  /* Align to 32 bits */
                if (remaining >= need) {
@@ -1402,12 +1402,21 @@ static void tcp_cwnd_application_limited(struct sock *sk)
        tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
-/* Congestion window validation. (RFC2861) */
-static void tcp_cwnd_validate(struct sock *sk)
+static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       if (tp->packets_out >= tp->snd_cwnd) {
+       /* Track the maximum number of outstanding packets in each
+        * window, and remember whether we were cwnd-limited then.
+        */
+       if (!before(tp->snd_una, tp->max_packets_seq) ||
+           tp->packets_out > tp->max_packets_out) {
+               tp->max_packets_out = tp->packets_out;
+               tp->max_packets_seq = tp->snd_nxt;
+               tp->is_cwnd_limited = is_cwnd_limited;
+       }
+
+       if (tcp_is_cwnd_limited(sk)) {
                /* Network is feed fully. */
                tp->snd_cwnd_used = 0;
                tp->snd_cwnd_stamp = tcp_time_stamp;
@@ -1659,7 +1668,8 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
  *
  * This algorithm is from John Heffner.
  */
-static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
+static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
+                                bool *is_cwnd_limited)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1723,6 +1733,9 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
        if (!tp->tso_deferred)
                tp->tso_deferred = 1 | (jiffies << 1);
 
+       if (cong_win < send_win && cong_win < skb->len)
+               *is_cwnd_limited = true;
+
        return true;
 
 send_now:
@@ -1883,6 +1896,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
        unsigned int tso_segs, sent_pkts;
        int cwnd_quota;
        int result;
+       bool is_cwnd_limited = false;
 
        sent_pkts = 0;
 
@@ -1907,6 +1921,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
 
                cwnd_quota = tcp_cwnd_test(tp, skb);
                if (!cwnd_quota) {
+                       is_cwnd_limited = true;
                        if (push_one == 2)
                                /* Force out a loss probe pkt. */
                                cwnd_quota = 1;
@@ -1923,7 +1938,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                                                      nonagle : TCP_NAGLE_PUSH))))
                                break;
                } else {
-                       if (!push_one && tcp_tso_should_defer(sk, skb))
+                       if (!push_one &&
+                           tcp_tso_should_defer(sk, skb, &is_cwnd_limited))
                                break;
                }
 
@@ -1990,7 +2006,7 @@ repair:
                /* Send one loss probe per tail loss episode. */
                if (push_one != 2)
                        tcp_schedule_loss_probe(sk);
-               tcp_cwnd_validate(sk);
+               tcp_cwnd_validate(sk, is_cwnd_limited);
                return false;
        }
        return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk));
@@ -2481,8 +2497,14 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
                err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
        }
 
-       if (likely(!err))
+       if (likely(!err)) {
                TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
+               /* Update global TCP statistics. */
+               TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
+               if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
+                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
+               tp->total_retrans++;
+       }
        return err;
 }
 
@@ -2492,12 +2514,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
        int err = __tcp_retransmit_skb(sk, skb);
 
        if (err == 0) {
-               /* Update global TCP statistics. */
-               TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
-               if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
-                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
-               tp->total_retrans++;
-
 #if FASTRETRANS_DEBUG > 0
                if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
                        net_dbg_ratelimited("retrans_out leaked\n");
@@ -2796,27 +2812,6 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
        if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
                mss = tp->rx_opt.user_mss;
 
-       if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
-               __u8 rcv_wscale;
-               /* Set this up on the first call only */
-               req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
-
-               /* limit the window selection if the user enforce a smaller rx buffer */
-               if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
-                   (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0))
-                       req->window_clamp = tcp_full_space(sk);
-
-               /* tcp_full_space because it is guaranteed to be the first packet */
-               tcp_select_initial_window(tcp_full_space(sk),
-                       mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
-                       &req->rcv_wnd,
-                       &req->window_clamp,
-                       ireq->wscale_ok,
-                       &rcv_wscale,
-                       dst_metric(dst, RTAX_INITRWND));
-               ireq->rcv_wscale = rcv_wscale;
-       }
-
        memset(&opts, 0, sizeof(opts));
 #ifdef CONFIG_SYN_COOKIES
        if (unlikely(req->cookie_ts))
index 0ac50836da4d42832f3aa35c9a4cebbf79f69981..8250949b88538db4e0f2d318050518855e74cf3d 100644 (file)
 #define TCP_SCALABLE_AI_CNT    50U
 #define TCP_SCALABLE_MD_SCALE  3
 
-static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked,
-                                   u32 in_flight)
+static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       if (!tcp_is_cwnd_limited(sk, in_flight))
+       if (!tcp_is_cwnd_limited(sk))
                return;
 
        if (tp->snd_cwnd <= tp->snd_ssthresh)
index 48539fff6357a4e778c537b99bb9a7fd49eb43b3..9a5e05f27f4f7cce16cb285698cb0fc73ef49a91 100644 (file)
@@ -163,14 +163,13 @@ static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp)
        return  min(tp->snd_ssthresh, tp->snd_cwnd-1);
 }
 
-static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked,
-                                u32 in_flight)
+static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct vegas *vegas = inet_csk_ca(sk);
 
        if (!vegas->doing_vegas_now) {
-               tcp_reno_cong_avoid(sk, ack, acked, in_flight);
+               tcp_reno_cong_avoid(sk, ack, acked);
                return;
        }
 
@@ -195,7 +194,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked,
                        /* We don't have enough RTT samples to do the Vegas
                         * calculation, so we'll behave like Reno.
                         */
-                       tcp_reno_cong_avoid(sk, ack, acked, in_flight);
+                       tcp_reno_cong_avoid(sk, ack, acked);
                } else {
                        u32 rtt, diff;
                        u64 target_cwnd;
index 1b8e28fcd7e1cab3edd586db1b716742a4402fd7..27b9825753d15d89717d7b76e1c38fae4758cfbc 100644 (file)
@@ -114,19 +114,18 @@ static void tcp_veno_cwnd_event(struct sock *sk, enum tcp_ca_event event)
                tcp_veno_init(sk);
 }
 
-static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked,
-                               u32 in_flight)
+static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct veno *veno = inet_csk_ca(sk);
 
        if (!veno->doing_veno_now) {
-               tcp_reno_cong_avoid(sk, ack, acked, in_flight);
+               tcp_reno_cong_avoid(sk, ack, acked);
                return;
        }
 
        /* limited by applications */
-       if (!tcp_is_cwnd_limited(sk, in_flight))
+       if (!tcp_is_cwnd_limited(sk))
                return;
 
        /* We do the Veno calculations only if we got enough rtt samples */
@@ -134,7 +133,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked,
                /* We don't have enough rtt samples to do the Veno
                 * calculation, so we'll behave like Reno.
                 */
-               tcp_reno_cong_avoid(sk, ack, acked, in_flight);
+               tcp_reno_cong_avoid(sk, ack, acked);
        } else {
                u64 target_cwnd;
                u32 rtt;
index 5ede0e727945add71904a2d3c57d334e77e94baf..599b79b8eac07298a34cff43ae87e546bdb36847 100644 (file)
@@ -69,13 +69,12 @@ static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, s32 rtt_us)
        tcp_vegas_pkts_acked(sk, pkts_acked, rtt_us);
 }
 
-static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked,
-                               u32 in_flight)
+static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct yeah *yeah = inet_csk_ca(sk);
 
-       if (!tcp_is_cwnd_limited(sk, in_flight))
+       if (!tcp_is_cwnd_limited(sk))
                return;
 
        if (tp->snd_cwnd <= tp->snd_ssthresh)
index 4468e1adc094a1f6f12eb20cf7c79a9b9187826d..e07d52b8617a867aefedd3e4bb8dfb4bfd50154b 100644 (file)
@@ -246,7 +246,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
                        do {
                                if (low <= snum && snum <= high &&
                                    !test_bit(snum >> udptable->log, bitmap) &&
-                                   !inet_is_reserved_local_port(snum))
+                                   !inet_is_local_reserved_port(net, snum))
                                        goto found;
                                snum += rand;
                        } while (snum != first);
@@ -785,7 +785,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
        if (is_udplite)                                  /*     UDP-Lite      */
                csum = udplite_csum(skb);
 
-       else if (sk->sk_no_check == UDP_CSUM_NOXMIT) {   /* UDP csum disabled */
+       else if (sk->sk_no_check_tx) {   /* UDP csum disabled */
 
                skb->ip_summed = CHECKSUM_NONE;
                goto send;
@@ -1495,6 +1495,10 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
                if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) {
                        int ret;
 
+                       /* Verify checksum before giving to encap */
+                       if (udp_lib_checksum_complete(skb))
+                               goto csum_error;
+
                        ret = encap_rcv(sk, skb);
                        if (ret <= 0) {
                                UDP_INC_STATS_BH(sock_net(sk),
@@ -1672,7 +1676,6 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
                                 int proto)
 {
-       const struct iphdr *iph;
        int err;
 
        UDP_SKB_CB(skb)->partial_cov = 0;
@@ -1684,22 +1687,8 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
                        return err;
        }
 
-       iph = ip_hdr(skb);
-       if (uh->check == 0) {
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
-       } else if (skb->ip_summed == CHECKSUM_COMPLETE) {
-               if (!csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
-                                     proto, skb->csum))
-                       skb->ip_summed = CHECKSUM_UNNECESSARY;
-       }
-       if (!skb_csum_unnecessary(skb))
-               skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
-                                              skb->len, proto, 0);
-       /* Probably, we should checksum udp header (it should be in cache
-        * in any case) and data in tiny packets (< rx copybreak).
-        */
-
-       return 0;
+       return skb_checksum_init_zero_check(skb, proto, uh->check,
+                                           inet_compute_pseudo);
 }
 
 /*
@@ -1886,7 +1875,7 @@ static struct sock *__udp4_lib_demux_lookup(struct net *net,
        unsigned int hash2 = udp4_portaddr_hash(net, loc_addr, hnum);
        unsigned int slot2 = hash2 & udp_table.mask;
        struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
-       INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr)
+       INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr);
        const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
 
        rcu_read_lock();
@@ -1979,7 +1968,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
                       int (*push_pending_frames)(struct sock *))
 {
        struct udp_sock *up = udp_sk(sk);
-       int val;
+       int val, valbool;
        int err = 0;
        int is_udplite = IS_UDPLITE(sk);
 
@@ -1989,6 +1978,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
        if (get_user(val, (int __user *)optval))
                return -EFAULT;
 
+       valbool = val ? 1 : 0;
+
        switch (optname) {
        case UDP_CORK:
                if (val != 0) {
@@ -2018,6 +2009,14 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
                }
                break;
 
+       case UDP_NO_CHECK6_TX:
+               up->no_check6_tx = valbool;
+               break;
+
+       case UDP_NO_CHECK6_RX:
+               up->no_check6_rx = valbool;
+               break;
+
        /*
         *      UDP-Lite's partial checksum coverage (RFC 3828).
         */
@@ -2100,6 +2099,14 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
                val = up->encap_type;
                break;
 
+       case UDP_NO_CHECK6_TX:
+               val = up->no_check6_tx;
+               break;
+
+       case UDP_NO_CHECK6_RX:
+               val = up->no_check6_rx;
+               break;
+
        /* The following two cannot be changed on UDP sockets, the return is
         * always 0 (which corresponds to the full checksum coverage of UDP). */
        case UDPLITE_SEND_CSCOV:
index 2c46acd4cc3636e33fb240a963367ec629b482b5..3b3efbda48e13941b35388238508706844c043aa 100644 (file)
@@ -70,7 +70,6 @@ static struct inet_protosw udplite4_protosw = {
        .protocol       =  IPPROTO_UDPLITE,
        .prot           =  &udplite_prot,
        .ops            =  &inet_dgram_ops,
-       .no_check       =  0,           /* must checksum (RFC 3828) */
        .flags          =  INET_PROTOSW_PERMANENT,
 };
 
index 40e701f2e1e0324af6f0af781ac6715866ad88d3..d5f6bd9a210ab93ed27177ecffc9499259dbe999 100644 (file)
@@ -25,7 +25,7 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
        if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
                goto out;
 
-       if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df)
+       if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->ignore_df)
                goto out;
 
        mtu = dst_mtu(skb_dst(skb));
@@ -62,10 +62,7 @@ int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
        if (err)
                return err;
 
-       memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
-       IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED;
-
-       skb->protocol = htons(ETH_P_IP);
+       IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
 
        return x->outer_mode->output2(x, skb);
 }
@@ -73,27 +70,34 @@ EXPORT_SYMBOL(xfrm4_prepare_output);
 
 int xfrm4_output_finish(struct sk_buff *skb)
 {
+       memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+       skb->protocol = htons(ETH_P_IP);
+
+#ifdef CONFIG_NETFILTER
+       IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
+#endif
+
+       return xfrm_output(skb);
+}
+
+static int __xfrm4_output(struct sk_buff *skb)
+{
+       struct xfrm_state *x = skb_dst(skb)->xfrm;
+
 #ifdef CONFIG_NETFILTER
-       if (!skb_dst(skb)->xfrm) {
+       if (!x) {
                IPCB(skb)->flags |= IPSKB_REROUTED;
                return dst_output(skb);
        }
-
-       IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
 #endif
 
-       skb->protocol = htons(ETH_P_IP);
-       return xfrm_output(skb);
+       return x->outer_mode->afinfo->output_finish(skb);
 }
 
 int xfrm4_output(struct sock *sk, struct sk_buff *skb)
 {
-       struct dst_entry *dst = skb_dst(skb);
-       struct xfrm_state *x = dst->xfrm;
-
        return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb,
-                           NULL, dst->dev,
-                           x->outer_mode->afinfo->output_finish,
+                           NULL, skb_dst(skb)->dev, __xfrm4_output,
                            !(IPCB(skb)->flags & IPSKB_REROUTED));
 }
 
index 7f7b243e8139defccf14165971a92a7261c29b71..a2ce0101eaac846b1e36e69a5a89c804ca247c44 100644 (file)
@@ -50,8 +50,12 @@ int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err)
 {
        int ret;
        struct xfrm4_protocol *handler;
+       struct xfrm4_protocol __rcu **head = proto_handlers(protocol);
 
-       for_each_protocol_rcu(*proto_handlers(protocol), handler)
+       if (!head)
+               return 0;
+
+       for_each_protocol_rcu(*head, handler)
                if ((ret = handler->cb_handler(skb, err)) <= 0)
                        return ret;
 
@@ -64,15 +68,20 @@ int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
 {
        int ret;
        struct xfrm4_protocol *handler;
+       struct xfrm4_protocol __rcu **head = proto_handlers(nexthdr);
 
        XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
        XFRM_SPI_SKB_CB(skb)->family = AF_INET;
        XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
 
-       for_each_protocol_rcu(*proto_handlers(nexthdr), handler)
+       if (!head)
+               goto out;
+
+       for_each_protocol_rcu(*head, handler)
                if ((ret = handler->input_handler(skb, nexthdr, spi, encap_type)) != -EINVAL)
                        return ret;
 
+out:
        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
 
        kfree_skb(skb);
@@ -208,6 +217,9 @@ int xfrm4_protocol_register(struct xfrm4_protocol *handler,
        int ret = -EEXIST;
        int priority = handler->priority;
 
+       if (!proto_handlers(protocol) || !netproto(protocol))
+               return -EINVAL;
+
        mutex_lock(&xfrm4_protocol_mutex);
 
        if (!rcu_dereference_protected(*proto_handlers(protocol),
@@ -250,6 +262,9 @@ int xfrm4_protocol_deregister(struct xfrm4_protocol *handler,
        struct xfrm4_protocol *t;
        int ret = -ENOENT;
 
+       if (!proto_handlers(protocol) || !netproto(protocol))
+               return -EINVAL;
+
        mutex_lock(&xfrm4_protocol_mutex);
 
        for (pprev = proto_handlers(protocol);
index fc203db2211a56735a824e1c41130a9503f4ae7f..5667b3003af9b51779ff322717e999282113c4b7 100644 (file)
@@ -275,19 +275,14 @@ static int snmp6_alloc_dev(struct inet6_dev *idev)
 {
        int i;
 
-       if (snmp_mib_init((void __percpu **)idev->stats.ipv6,
-                         sizeof(struct ipstats_mib),
-                         __alignof__(struct ipstats_mib)) < 0)
+       idev->stats.ipv6 = alloc_percpu(struct ipstats_mib);
+       if (!idev->stats.ipv6)
                goto err_ip;
 
        for_each_possible_cpu(i) {
                struct ipstats_mib *addrconf_stats;
-               addrconf_stats = per_cpu_ptr(idev->stats.ipv6[0], i);
+               addrconf_stats = per_cpu_ptr(idev->stats.ipv6, i);
                u64_stats_init(&addrconf_stats->syncp);
-#if SNMP_ARRAY_SZ == 2
-               addrconf_stats = per_cpu_ptr(idev->stats.ipv6[1], i);
-               u64_stats_init(&addrconf_stats->syncp);
-#endif
        }
 
 
@@ -305,7 +300,7 @@ static int snmp6_alloc_dev(struct inet6_dev *idev)
 err_icmpmsg:
        kfree(idev->stats.icmpv6dev);
 err_icmp:
-       snmp_mib_free((void __percpu **)idev->stats.ipv6);
+       free_percpu(idev->stats.ipv6);
 err_ip:
        return -ENOMEM;
 }
@@ -2818,18 +2813,6 @@ static void addrconf_gre_config(struct net_device *dev)
 }
 #endif
 
-static inline int
-ipv6_inherit_linklocal(struct inet6_dev *idev, struct net_device *link_dev)
-{
-       struct in6_addr lladdr;
-
-       if (!ipv6_get_lladdr(link_dev, &lladdr, IFA_F_TENTATIVE)) {
-               addrconf_add_linklocal(idev, &lladdr);
-               return 0;
-       }
-       return -1;
-}
-
 static int addrconf_notify(struct notifier_block *this, unsigned long event,
                           void *ptr)
 {
@@ -4375,7 +4358,7 @@ static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
        memset(&stats[items], 0, pad);
 }
 
-static inline void __snmp6_fill_stats64(u64 *stats, void __percpu **mib,
+static inline void __snmp6_fill_stats64(u64 *stats, void __percpu *mib,
                                      int items, int bytes, size_t syncpoff)
 {
        int i;
@@ -4395,7 +4378,7 @@ static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
 {
        switch (attrtype) {
        case IFLA_INET6_STATS:
-               __snmp6_fill_stats64(stats, (void __percpu **)idev->stats.ipv6,
+               __snmp6_fill_stats64(stats, idev->stats.ipv6,
                                     IPSTATS_MIB_MAX, bytes, offsetof(struct ipstats_mib, syncp));
                break;
        case IFLA_INET6_ICMP6STATS:
index 4c11cbcf83089152052b60072dbe11d147d10803..e6960457f62582c4ca41c674cc531d64ca76b038 100644 (file)
@@ -123,7 +123,7 @@ static void snmp6_free_dev(struct inet6_dev *idev)
 {
        kfree(idev->stats.icmpv6msgdev);
        kfree(idev->stats.icmpv6dev);
-       snmp_mib_free((void __percpu **)idev->stats.ipv6);
+       free_percpu(idev->stats.ipv6);
 }
 
 /* Nobody refers to this device, we may destroy it. */
index d935889f1008ae93ff1efe52badeedf41352f257..7cb4392690dd614b1672ad51cc57bbe36218e34c 100644 (file)
@@ -106,7 +106,6 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol,
        struct inet_protosw *answer;
        struct proto *answer_prot;
        unsigned char answer_flags;
-       char answer_no_check;
        int try_loading_module = 0;
        int err;
 
@@ -162,7 +161,6 @@ lookup_protocol:
 
        sock->ops = answer->ops;
        answer_prot = answer->prot;
-       answer_no_check = answer->no_check;
        answer_flags = answer->flags;
        rcu_read_unlock();
 
@@ -176,7 +174,6 @@ lookup_protocol:
        sock_init_data(sock, sk);
 
        err = 0;
-       sk->sk_no_check = answer_no_check;
        if (INET_PROTOSW_REUSE & answer_flags)
                sk->sk_reuse = SK_CAN_REUSE;
 
@@ -715,33 +712,25 @@ static int __net_init ipv6_init_mibs(struct net *net)
 {
        int i;
 
-       if (snmp_mib_init((void __percpu **)net->mib.udp_stats_in6,
-                         sizeof(struct udp_mib),
-                         __alignof__(struct udp_mib)) < 0)
+       net->mib.udp_stats_in6 = alloc_percpu(struct udp_mib);
+       if (!net->mib.udp_stats_in6)
                return -ENOMEM;
-       if (snmp_mib_init((void __percpu **)net->mib.udplite_stats_in6,
-                         sizeof(struct udp_mib),
-                         __alignof__(struct udp_mib)) < 0)
+       net->mib.udplite_stats_in6 = alloc_percpu(struct udp_mib);
+       if (!net->mib.udplite_stats_in6)
                goto err_udplite_mib;
-       if (snmp_mib_init((void __percpu **)net->mib.ipv6_statistics,
-                         sizeof(struct ipstats_mib),
-                         __alignof__(struct ipstats_mib)) < 0)
+       net->mib.ipv6_statistics = alloc_percpu(struct ipstats_mib);
+       if (!net->mib.ipv6_statistics)
                goto err_ip_mib;
 
        for_each_possible_cpu(i) {
                struct ipstats_mib *af_inet6_stats;
-               af_inet6_stats = per_cpu_ptr(net->mib.ipv6_statistics[0], i);
+               af_inet6_stats = per_cpu_ptr(net->mib.ipv6_statistics, i);
                u64_stats_init(&af_inet6_stats->syncp);
-#if SNMP_ARRAY_SZ == 2
-               af_inet6_stats = per_cpu_ptr(net->mib.ipv6_statistics[1], i);
-               u64_stats_init(&af_inet6_stats->syncp);
-#endif
        }
 
 
-       if (snmp_mib_init((void __percpu **)net->mib.icmpv6_statistics,
-                         sizeof(struct icmpv6_mib),
-                         __alignof__(struct icmpv6_mib)) < 0)
+       net->mib.icmpv6_statistics = alloc_percpu(struct icmpv6_mib);
+       if (!net->mib.icmpv6_statistics)
                goto err_icmp_mib;
        net->mib.icmpv6msg_statistics = kzalloc(sizeof(struct icmpv6msg_mib),
                                                GFP_KERNEL);
@@ -750,22 +739,22 @@ static int __net_init ipv6_init_mibs(struct net *net)
        return 0;
 
 err_icmpmsg_mib:
-       snmp_mib_free((void __percpu **)net->mib.icmpv6_statistics);
+       free_percpu(net->mib.icmpv6_statistics);
 err_icmp_mib:
-       snmp_mib_free((void __percpu **)net->mib.ipv6_statistics);
+       free_percpu(net->mib.ipv6_statistics);
 err_ip_mib:
-       snmp_mib_free((void __percpu **)net->mib.udplite_stats_in6);
+       free_percpu(net->mib.udplite_stats_in6);
 err_udplite_mib:
-       snmp_mib_free((void __percpu **)net->mib.udp_stats_in6);
+       free_percpu(net->mib.udp_stats_in6);
        return -ENOMEM;
 }
 
 static void ipv6_cleanup_mibs(struct net *net)
 {
-       snmp_mib_free((void __percpu **)net->mib.udp_stats_in6);
-       snmp_mib_free((void __percpu **)net->mib.udplite_stats_in6);
-       snmp_mib_free((void __percpu **)net->mib.ipv6_statistics);
-       snmp_mib_free((void __percpu **)net->mib.icmpv6_statistics);
+       free_percpu(net->mib.udp_stats_in6);
+       free_percpu(net->mib.udplite_stats_in6);
+       free_percpu(net->mib.ipv6_statistics);
+       free_percpu(net->mib.icmpv6_statistics);
        kfree(net->mib.icmpv6msg_statistics);
 }
 
index 7b326529e6a2cba57695697cfff436357c347b2c..f6c84a6eb2389c55f4abd55fefbd073c73743a2a 100644 (file)
@@ -400,6 +400,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
        int len;
        int hlimit;
        int err = 0;
+       u32 mark = IP6_REPLY_MARK(net, skb->mark);
 
        if ((u8 *)hdr < skb->head ||
            (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb))
@@ -466,6 +467,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
        fl6.daddr = hdr->saddr;
        if (saddr)
                fl6.saddr = *saddr;
+       fl6.flowi6_mark = mark;
        fl6.flowi6_oif = iif;
        fl6.fl6_icmp_type = type;
        fl6.fl6_icmp_code = code;
@@ -474,6 +476,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
        sk = icmpv6_xmit_lock(net);
        if (sk == NULL)
                return;
+       sk->sk_mark = mark;
        np = inet6_sk(sk);
 
        if (!icmpv6_xrlim_allow(sk, type, &fl6))
@@ -493,12 +496,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
        if (IS_ERR(dst))
                goto out;
 
-       if (ipv6_addr_is_multicast(&fl6.daddr))
-               hlimit = np->mcast_hops;
-       else
-               hlimit = np->hop_limit;
-       if (hlimit < 0)
-               hlimit = ip6_dst_hoplimit(dst);
+       hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
 
        msg.skb = skb;
        msg.offset = skb_network_offset(skb);
@@ -556,6 +554,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
        int err = 0;
        int hlimit;
        u8 tclass;
+       u32 mark = IP6_REPLY_MARK(net, skb->mark);
 
        saddr = &ipv6_hdr(skb)->daddr;
 
@@ -574,11 +573,13 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
                fl6.saddr = *saddr;
        fl6.flowi6_oif = skb->dev->ifindex;
        fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY;
+       fl6.flowi6_mark = mark;
        security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 
        sk = icmpv6_xmit_lock(net);
        if (sk == NULL)
                return;
+       sk->sk_mark = mark;
        np = inet6_sk(sk);
 
        if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
@@ -593,12 +594,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
        if (IS_ERR(dst))
                goto out;
 
-       if (ipv6_addr_is_multicast(&fl6.daddr))
-               hlimit = np->mcast_hops;
-       else
-               hlimit = np->hop_limit;
-       if (hlimit < 0)
-               hlimit = ip6_dst_hoplimit(dst);
+       hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
 
        idev = __in6_dev_get(skb->dev);
 
@@ -702,22 +698,11 @@ static int icmpv6_rcv(struct sk_buff *skb)
        saddr = &ipv6_hdr(skb)->saddr;
        daddr = &ipv6_hdr(skb)->daddr;
 
-       /* Perform checksum. */
-       switch (skb->ip_summed) {
-       case CHECKSUM_COMPLETE:
-               if (!csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6,
-                                    skb->csum))
-                       break;
-               /* fall through */
-       case CHECKSUM_NONE:
-               skb->csum = ~csum_unfold(csum_ipv6_magic(saddr, daddr, skb->len,
-                                            IPPROTO_ICMPV6, 0));
-               if (__skb_checksum_complete(skb)) {
-                       LIMIT_NETDEBUG(KERN_DEBUG
-                                      "ICMPv6 checksum failed [%pI6c > %pI6c]\n",
-                                      saddr, daddr);
-                       goto csum_error;
-               }
+       if (skb_checksum_validate(skb, IPPROTO_ICMPV6, ip6_compute_pseudo)) {
+               LIMIT_NETDEBUG(KERN_DEBUG
+                              "ICMPv6 checksum failed [%pI6c > %pI6c]\n",
+                              saddr, daddr);
+               goto csum_error;
        }
 
        if (!pskb_pull(skb, sizeof(*hdr)))
index d4ade34ab37566d8cca9e164f5fde5fb5a762fe6..a245e5ddffbd0450968c44de7d3fcd8a1dd055cf 100644 (file)
@@ -81,7 +81,7 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
        final_p = fl6_update_dst(fl6, np->opt, &final);
        fl6->saddr = ireq->ir_v6_loc_addr;
        fl6->flowi6_oif = ireq->ir_iif;
-       fl6->flowi6_mark = sk->sk_mark;
+       fl6->flowi6_mark = ireq->ir_mark;
        fl6->fl6_dport = ireq->ir_rmt_port;
        fl6->fl6_sport = htons(ireq->ir_num);
        security_req_classify_flow(req, flowi6_to_flowi(fl6));
index ee7a97f510cbd9f94fa24eafa43ddba201c75f3e..da26224a599323250303d0c07d42254b536b5d82 100644 (file)
@@ -75,25 +75,12 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
                        return err;
        }
 
-       if (uh->check == 0) {
-               /* RFC 2460 section 8.1 says that we SHOULD log
-                  this error. Well, it is reasonable.
-                */
-               LIMIT_NETDEBUG(KERN_INFO "IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
-                              &ipv6_hdr(skb)->saddr, ntohs(uh->source),
-                              &ipv6_hdr(skb)->daddr, ntohs(uh->dest));
-               return 1;
-       }
-       if (skb->ip_summed == CHECKSUM_COMPLETE &&
-           !csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
-                            skb->len, proto, skb->csum))
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
-
-       if (!skb_csum_unnecessary(skb))
-               skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-                                                        &ipv6_hdr(skb)->daddr,
-                                                        skb->len, proto, 0));
-
-       return 0;
+       /* To support RFC 6936 (allow zero checksum in UDP/IPV6 for tunnels)
+        * we accept a checksum of zero here. When we find the socket
+        * for the UDP packet we'll check if that socket allows zero checksum
+        * for IPv6 (set by socket option).
+        */
+       return skb_checksum_init_zero_check(skb, proto, uh->check,
+                                          ip6_compute_pseudo);
 }
 EXPORT_SYMBOL(udp6_csum_init);
index 34e0ded5c14b028ebbb1bb03c1b30e8f25f98811..cb4459bd1d294d1901cc03b9c203fbcbf761f53f 100644 (file)
@@ -71,8 +71,7 @@ static DEFINE_RWLOCK(fib6_walker_lock);
 #define FWS_INIT FWS_L
 #endif
 
-static void fib6_prune_clones(struct net *net, struct fib6_node *fn,
-                             struct rt6_info *rt);
+static void fib6_prune_clones(struct net *net, struct fib6_node *fn);
 static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn);
 static struct fib6_node *fib6_repair_tree(struct net *net, struct fib6_node *fn);
 static int fib6_walk(struct fib6_walker_t *w);
@@ -941,7 +940,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info,
        if (!err) {
                fib6_start_gc(info->nl_net, rt);
                if (!(rt->rt6i_flags & RTF_CACHE))
-                       fib6_prune_clones(info->nl_net, pn, rt);
+                       fib6_prune_clones(info->nl_net, pn);
        }
 
 out:
@@ -1375,7 +1374,7 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info)
                        pn = pn->parent;
                }
 #endif
-               fib6_prune_clones(info->nl_net, pn, rt);
+               fib6_prune_clones(info->nl_net, pn);
        }
 
        /*
@@ -1459,7 +1458,7 @@ static int fib6_walk_continue(struct fib6_walker_t *w)
 
                                if (w->skip) {
                                        w->skip--;
-                                       continue;
+                                       goto skip;
                                }
 
                                err = w->func(w);
@@ -1469,6 +1468,7 @@ static int fib6_walk_continue(struct fib6_walker_t *w)
                                w->count++;
                                continue;
                        }
+skip:
                        w->state = FWS_U;
                case FWS_U:
                        if (fn == w->root)
@@ -1600,10 +1600,9 @@ static int fib6_prune_clone(struct rt6_info *rt, void *arg)
        return 0;
 }
 
-static void fib6_prune_clones(struct net *net, struct fib6_node *fn,
-                             struct rt6_info *rt)
+static void fib6_prune_clones(struct net *net, struct fib6_node *fn)
 {
-       fib6_clean_tree(net, fn, fib6_prune_clone, 1, rt);
+       fib6_clean_tree(net, fn, fib6_prune_clone, 1, NULL);
 }
 
 /*
index 0961b5ef866d04803cf91243aec32bb9e2ea8cf7..4052694c6f2cb196b54ef7d5235d61d74e4ea69c 100644 (file)
@@ -26,7 +26,6 @@
 #include <net/sock.h>
 
 #include <net/ipv6.h>
-#include <net/addrconf.h>
 #include <net/rawv6.h>
 #include <net/transp_v6.h>
 
index 9d921462b57f293f9f49f6ec78936c84a3f756a0..3873181ed85614a28f9857d7d53acf2be9d2b9fb 100644 (file)
@@ -72,6 +72,7 @@ struct ip6gre_net {
 };
 
 static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
+static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
 static int ip6gre_tunnel_init(struct net_device *dev);
 static void ip6gre_tunnel_setup(struct net_device *dev);
 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
@@ -353,10 +354,10 @@ failed_free:
 
 static void ip6gre_tunnel_uninit(struct net_device *dev)
 {
-       struct net *net = dev_net(dev);
-       struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+       struct ip6_tnl *t = netdev_priv(dev);
+       struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
 
-       ip6gre_tunnel_unlink(ign, netdev_priv(dev));
+       ip6gre_tunnel_unlink(ign, t);
        dev_put(dev);
 }
 
@@ -467,17 +468,7 @@ static int ip6gre_rcv(struct sk_buff *skb)
                        goto drop;
 
                if (flags&GRE_CSUM) {
-                       switch (skb->ip_summed) {
-                       case CHECKSUM_COMPLETE:
-                               csum = csum_fold(skb->csum);
-                               if (!csum)
-                                       break;
-                               /* fall through */
-                       case CHECKSUM_NONE:
-                               skb->csum = 0;
-                               csum = __skb_checksum_complete(skb);
-                               skb->ip_summed = CHECKSUM_COMPLETE;
-                       }
+                       csum = skb_checksum_simple_validate(skb);
                        offset += 4;
                }
                if (flags&GRE_KEY) {
@@ -611,8 +602,8 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
                         int encap_limit,
                         __u32 *pmtu)
 {
-       struct net *net = dev_net(dev);
        struct ip6_tnl *tunnel = netdev_priv(dev);
+       struct net *net = tunnel->net;
        struct net_device *tdev;    /* Device to other host */
        struct ipv6hdr  *ipv6h;     /* Our new IP header */
        unsigned int max_headroom = 0; /* The extra header space needed */
@@ -979,7 +970,7 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
                int strict = (ipv6_addr_type(&p->raddr) &
                              (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
 
-               struct rt6_info *rt = rt6_lookup(dev_net(dev),
+               struct rt6_info *rt = rt6_lookup(t->net,
                                                 &p->raddr, &p->laddr,
                                                 p->link, strict);
 
@@ -1063,13 +1054,12 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
        int err = 0;
        struct ip6_tnl_parm2 p;
        struct __ip6_tnl_parm p1;
-       struct ip6_tnl *t;
-       struct net *net = dev_net(dev);
+       struct ip6_tnl *t = netdev_priv(dev);
+       struct net *net = t->net;
        struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
 
        switch (cmd) {
        case SIOCGETTUNNEL:
-               t = NULL;
                if (dev == ign->fb_tunnel_dev) {
                        if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
                                err = -EFAULT;
@@ -1077,9 +1067,9 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
                        }
                        ip6gre_tnl_parm_from_user(&p1, &p);
                        t = ip6gre_tunnel_locate(net, &p1, 0);
+                       if (t == NULL)
+                               t = netdev_priv(dev);
                }
-               if (t == NULL)
-                       t = netdev_priv(dev);
                memset(&p, 0, sizeof(p));
                ip6gre_tnl_parm_to_user(&p, &t->parms);
                if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
@@ -1242,7 +1232,6 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
        dev->flags |= IFF_NOARP;
        dev->iflink = 0;
        dev->addr_len = sizeof(struct in6_addr);
-       dev->features |= NETIF_F_NETNS_LOCAL;
        dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
 }
 
@@ -1297,11 +1286,17 @@ static struct inet6_protocol ip6gre_protocol __read_mostly = {
        .flags       = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
 };
 
-static void ip6gre_destroy_tunnels(struct ip6gre_net *ign,
-       struct list_head *head)
+static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head)
 {
+       struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+       struct net_device *dev, *aux;
        int prio;
 
+       for_each_netdev_safe(net, dev, aux)
+               if (dev->rtnl_link_ops == &ip6gre_link_ops ||
+                   dev->rtnl_link_ops == &ip6gre_tap_ops)
+                       unregister_netdevice_queue(dev, head);
+
        for (prio = 0; prio < 4; prio++) {
                int h;
                for (h = 0; h < HASH_SIZE; h++) {
@@ -1310,7 +1305,12 @@ static void ip6gre_destroy_tunnels(struct ip6gre_net *ign,
                        t = rtnl_dereference(ign->tunnels[prio][h]);
 
                        while (t != NULL) {
-                               unregister_netdevice_queue(t->dev, head);
+                               /* If dev is in the same netns, it has already
+                                * been added to the list by the previous loop.
+                                */
+                               if (!net_eq(dev_net(t->dev), net))
+                                       unregister_netdevice_queue(t->dev,
+                                                                  head);
                                t = rtnl_dereference(t->next);
                        }
                }
@@ -1329,6 +1329,11 @@ static int __net_init ip6gre_init_net(struct net *net)
                goto err_alloc_dev;
        }
        dev_net_set(ign->fb_tunnel_dev, net);
+       /* FB netdevice is special: we have one, and only one per netns.
+        * Allowing to move it to another netns is clearly unsafe.
+        */
+       ign->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
+
 
        ip6gre_fb_tunnel_init(ign->fb_tunnel_dev);
        ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops;
@@ -1349,12 +1354,10 @@ err_alloc_dev:
 
 static void __net_exit ip6gre_exit_net(struct net *net)
 {
-       struct ip6gre_net *ign;
        LIST_HEAD(list);
 
-       ign = net_generic(net, ip6gre_net_id);
        rtnl_lock();
-       ip6gre_destroy_tunnels(ign, &list);
+       ip6gre_destroy_tunnels(net, &list);
        unregister_netdevice_many(&list);
        rtnl_unlock();
 }
@@ -1531,15 +1534,14 @@ out:
 static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
                            struct nlattr *data[])
 {
-       struct ip6_tnl *t, *nt;
-       struct net *net = dev_net(dev);
+       struct ip6_tnl *t, *nt = netdev_priv(dev);
+       struct net *net = nt->net;
        struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
        struct __ip6_tnl_parm p;
 
        if (dev == ign->fb_tunnel_dev)
                return -EINVAL;
 
-       nt = netdev_priv(dev);
        ip6gre_netlink_parms(data, &p);
 
        t = ip6gre_tunnel_locate(net, &p, 0);
index 59f95affceb0773d052184bdf5fec9f276433d63..b2f091566f88453bce5eb3c33b47e1c9c040447c 100644 (file)
@@ -196,7 +196,6 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
        unsigned int off;
        u16 flush = 1;
        int proto;
-       __wsum csum;
 
        off = skb_gro_offset(skb);
        hlen = off + sizeof(*iph);
@@ -264,13 +263,10 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
 
        NAPI_GRO_CB(skb)->flush |= flush;
 
-       csum = skb->csum;
-       skb_postpull_rcsum(skb, iph, skb_network_header_len(skb));
+       skb_gro_postpull_rcsum(skb, iph, nlen);
 
        pp = ops->callbacks.gro_receive(head, skb);
 
-       skb->csum = csum;
-
 out_unlock:
        rcu_read_unlock();
 
index 40e7581374f7006c6f8c436ed686919ac93c2b19..85aaeca1f7f36b5c66bd643910a8439f96a98cb9 100644 (file)
@@ -219,7 +219,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
        skb->mark = sk->sk_mark;
 
        mtu = dst_mtu(dst);
-       if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) {
+       if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
                IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
                              IPSTATS_MIB_OUT, skb->len);
                return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
@@ -344,12 +344,16 @@ static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
 
 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
 {
-       if (skb->len <= mtu || skb->local_df)
+       if (skb->len <= mtu)
                return false;
 
+       /* ipv6 conntrack defrag sets max_frag_size + ignore_df */
        if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
                return true;
 
+       if (skb->ignore_df)
+               return false;
+
        if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
                return false;
 
@@ -555,7 +559,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
        /* We must not fragment if the socket is set to force MTU discovery
         * or if the skb it not generated by a local socket.
         */
-       if (unlikely(!skb->local_df && skb->len > mtu) ||
+       if (unlikely(!skb->ignore_df && skb->len > mtu) ||
                     (IP6CB(skb)->frag_max_size &&
                      IP6CB(skb)->frag_max_size > mtu)) {
                if (skb->sk && dst_allfrag(skb_dst(skb)))
@@ -1225,12 +1229,12 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
                unsigned int maxnonfragsize, headersize;
 
                headersize = sizeof(struct ipv6hdr) +
-                            (opt ? opt->tot_len : 0) +
+                            (opt ? opt->opt_flen + opt->opt_nflen : 0) +
                             (dst_allfrag(&rt->dst) ?
                              sizeof(struct frag_hdr) : 0) +
                             rt->rt6i_nfheader_len;
 
-               if (ip6_sk_local_df(sk))
+               if (ip6_sk_ignore_df(sk))
                        maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
                else
                        maxnonfragsize = mtu;
@@ -1540,7 +1544,7 @@ int ip6_push_pending_frames(struct sock *sk)
        }
 
        /* Allow local fragmentation. */
-       skb->local_df = ip6_sk_local_df(sk);
+       skb->ignore_df = ip6_sk_ignore_df(sk);
 
        *final_dst = fl6->daddr;
        __skb_pull(skb, skb_network_header_len(skb));
index b05b609f69d1cd3e58bd525cb0b5e8b11d429b80..afa082458360216ff33012ee43e93354888e44b9 100644 (file)
@@ -61,6 +61,7 @@
 MODULE_AUTHOR("Ville Nuorvala");
 MODULE_DESCRIPTION("IPv6 tunneling device");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK("ip6tnl");
 MODULE_ALIAS_NETDEV("ip6tnl0");
 
 #ifdef IP6_TNL_DEBUG
@@ -1557,7 +1558,7 @@ static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[])
 {
        u8 proto;
 
-       if (!data)
+       if (!data || !data[IFLA_IPTUN_PROTO])
                return 0;
 
        proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
index b7c0f827140b402685cc29049cb56646471c2cf2..9aaa6bb229e485fd657a5ca4bd30b6ebb9e90c5f 100644 (file)
@@ -511,6 +511,7 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                    u8 type, u8 code, int offset, __be32 info)
 {
        __be32 spi;
+       __u32 mark;
        struct xfrm_state *x;
        struct ip6_tnl *t;
        struct ip_esp_hdr *esph;
@@ -524,6 +525,8 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        if (!t)
                return -1;
 
+       mark = be32_to_cpu(t->parms.o_key);
+
        switch (protocol) {
        case IPPROTO_ESP:
                esph = (struct ip_esp_hdr *)(skb->data + offset);
@@ -545,7 +548,7 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
            type != NDISC_REDIRECT)
                return 0;
 
-       x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
+       x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr,
                              spi, protocol, AF_INET6);
        if (!x)
                return 0;
@@ -792,15 +795,12 @@ static const struct net_device_ops vti6_netdev_ops = {
  **/
 static void vti6_dev_setup(struct net_device *dev)
 {
-       struct ip6_tnl *t;
-
        dev->netdev_ops = &vti6_netdev_ops;
        dev->destructor = vti6_dev_free;
 
        dev->type = ARPHRD_TUNNEL6;
        dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr);
        dev->mtu = ETH_DATA_LEN;
-       t = netdev_priv(dev);
        dev->flags |= IFF_NOARP;
        dev->addr_len = sizeof(struct in6_addr);
        dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
@@ -1097,7 +1097,6 @@ static int __init vti6_tunnel_init(void)
 
        err = xfrm6_protocol_register(&vti_esp6_protocol, IPPROTO_ESP);
        if (err < 0) {
-               unregister_pernet_device(&vti6_net_ops);
                pr_err("%s: can't register vti6 protocol\n", __func__);
 
                goto out;
@@ -1106,7 +1105,6 @@ static int __init vti6_tunnel_init(void)
        err = xfrm6_protocol_register(&vti_ah6_protocol, IPPROTO_AH);
        if (err < 0) {
                xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
-               unregister_pernet_device(&vti6_net_ops);
                pr_err("%s: can't register vti6 protocol\n", __func__);
 
                goto out;
@@ -1116,7 +1114,6 @@ static int __init vti6_tunnel_init(void)
        if (err < 0) {
                xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH);
                xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
-               unregister_pernet_device(&vti6_net_ops);
                pr_err("%s: can't register vti6 protocol\n", __func__);
 
                goto out;
index 8659067da28e8a8557af6f4109fc52056ccdee1b..8250474ab7dc0e10b3340bca3e68aaf9377a81f2 100644 (file)
@@ -1633,7 +1633,7 @@ struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
 {
        struct mr6_table *mrt;
        struct flowi6 fl6 = {
-               .flowi6_iif     = skb->skb_iif,
+               .flowi6_iif     = skb->skb_iif ? : LOOPBACK_IFINDEX,
                .flowi6_oif     = skb->dev->ifindex,
                .flowi6_mark    = skb->mark,
        };
index 09a22f4f36c9e069c6dfb3074909691ef2c82399..ca8d4ea48a5d9fa641bf129a6fc5e3b428799fa4 100644 (file)
@@ -851,7 +851,7 @@ out:
 static void ndisc_recv_na(struct sk_buff *skb)
 {
        struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb);
-       const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
+       struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
        const struct in6_addr *daddr = &ipv6_hdr(skb)->daddr;
        u8 *lladdr = NULL;
        u32 ndoptlen = skb_tail_pointer(skb) - (skb_transport_header(skb) +
@@ -944,10 +944,7 @@ static void ndisc_recv_na(struct sk_buff *skb)
                        /*
                         * Change: router to host
                         */
-                       struct rt6_info *rt;
-                       rt = rt6_get_dflt_router(saddr, dev);
-                       if (rt)
-                               ip6_del_rt(rt);
+                       rt6_clean_tohost(dev_net(dev),  saddr);
                }
 
 out:
index 95f3f1da0d7f2ff20c3afa3eeda315dd9e2e6b5f..d38e6a8d8b9fb82ec7d583a5ab2abc652838d470 100644 (file)
@@ -30,13 +30,15 @@ int ip6_route_me_harder(struct sk_buff *skb)
                .daddr = iph->daddr,
                .saddr = iph->saddr,
        };
+       int err;
 
        dst = ip6_route_output(net, skb->sk, &fl6);
-       if (dst->error) {
+       err = dst->error;
+       if (err) {
                IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
                LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n");
                dst_release(dst);
-               return dst->error;
+               return err;
        }
 
        /* Drop old route. */
index e0983f3648a628410c6f6bfd9549ec339a325353..790e0c6b19e1caa41b31c5f279de719581bd6065 100644 (file)
@@ -33,6 +33,7 @@ static bool rpfilter_lookup_reverse6(const struct sk_buff *skb,
        struct ipv6hdr *iph = ipv6_hdr(skb);
        bool ret = false;
        struct flowi6 fl6 = {
+               .flowi6_iif = LOOPBACK_IFINDEX,
                .flowlabel = (* (__be32 *) iph) & IPV6_FLOWINFO_MASK,
                .flowi6_proto = iph->nexthdr,
                .daddr = iph->saddr,
index 767ab8da82189479456c632ecec95eef67da4bae..0d5279fd852a48643b5b0b54834b2ee68a893116 100644 (file)
@@ -451,7 +451,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
        }
        sub_frag_mem_limit(&fq->q, head->truesize);
 
-       head->local_df = 1;
+       head->ignore_df = 1;
        head->next = NULL;
        head->dev = dev;
        head->tstamp = fq->q.stamp;
index bda74291c3e0d09c94ff5961cf393cb7695ee518..5b7a1ed2aba95a837a0063b96cb9803ff8925f0c 100644 (file)
@@ -51,7 +51,6 @@ static struct inet_protosw pingv6_protosw = {
        .protocol =  IPPROTO_ICMPV6,
        .prot =      &pingv6_prot,
        .ops =       &inet6_dgram_ops,
-       .no_check =  UDP_CSUM_DEFAULT,
        .flags =     INET_PROTOSW_REUSE,
 };
 
@@ -168,12 +167,7 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        pfh.wcheck = 0;
        pfh.family = AF_INET6;
 
-       if (ipv6_addr_is_multicast(&fl6.daddr))
-               hlimit = np->mcast_hops;
-       else
-               hlimit = np->hop_limit;
-       if (hlimit < 0)
-               hlimit = ip6_dst_hoplimit(dst);
+       hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
 
        lock_sock(sk);
        err = ip6_append_data(sk, ping_getfrag, &pfh, len,
index 091d066a57b3711c5bd0eb5a352573d0aa2f3776..3317440ea34174032a7b0bee73d8f5b36cfb08ee 100644 (file)
@@ -186,7 +186,7 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, atomic_long_t *smib)
 /* can be called either with percpu mib (pcpumib != NULL),
  * or shared one (smib != NULL)
  */
-static void snmp6_seq_show_item(struct seq_file *seq, void __percpu **pcpumib,
+static void snmp6_seq_show_item(struct seq_file *seq, void __percpu *pcpumib,
                                atomic_long_t *smib,
                                const struct snmp_mib *itemlist)
 {
@@ -201,7 +201,7 @@ static void snmp6_seq_show_item(struct seq_file *seq, void __percpu **pcpumib,
        }
 }
 
-static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu **mib,
+static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu *mib,
                                  const struct snmp_mib *itemlist, size_t syncpoff)
 {
        int i;
@@ -215,14 +215,14 @@ static int snmp6_seq_show(struct seq_file *seq, void *v)
 {
        struct net *net = (struct net *)seq->private;
 
-       snmp6_seq_show_item64(seq, (void __percpu **)net->mib.ipv6_statistics,
+       snmp6_seq_show_item64(seq, net->mib.ipv6_statistics,
                            snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp));
-       snmp6_seq_show_item(seq, (void __percpu **)net->mib.icmpv6_statistics,
+       snmp6_seq_show_item(seq, net->mib.icmpv6_statistics,
                            NULL, snmp6_icmp6_list);
        snmp6_seq_show_icmpv6msg(seq, net->mib.icmpv6msg_statistics->mibs);
-       snmp6_seq_show_item(seq, (void __percpu **)net->mib.udp_stats_in6,
+       snmp6_seq_show_item(seq, net->mib.udp_stats_in6,
                            NULL, snmp6_udp6_list);
-       snmp6_seq_show_item(seq, (void __percpu **)net->mib.udplite_stats_in6,
+       snmp6_seq_show_item(seq, net->mib.udplite_stats_in6,
                            NULL, snmp6_udplite6_list);
        return 0;
 }
@@ -245,7 +245,7 @@ static int snmp6_dev_seq_show(struct seq_file *seq, void *v)
        struct inet6_dev *idev = (struct inet6_dev *)seq->private;
 
        seq_printf(seq, "%-32s\t%u\n", "ifIndex", idev->dev->ifindex);
-       snmp6_seq_show_item64(seq, (void __percpu **)idev->stats.ipv6,
+       snmp6_seq_show_item64(seq, idev->stats.ipv6,
                            snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp));
        snmp6_seq_show_item(seq, NULL, idev->stats.icmpv6dev->mibs,
                            snmp6_icmp6_list);
index 1f29996e368a23e67bfe09d9b2ff10bd9b61805d..b2dc60b0c76403d38a6a96c8a1c299977ecb7bad 100644 (file)
@@ -873,14 +873,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
                err = PTR_ERR(dst);
                goto out;
        }
-       if (hlimit < 0) {
-               if (ipv6_addr_is_multicast(&fl6.daddr))
-                       hlimit = np->mcast_hops;
-               else
-                       hlimit = np->hop_limit;
-               if (hlimit < 0)
-                       hlimit = ip6_dst_hoplimit(dst);
-       }
+       if (hlimit < 0)
+               hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
 
        if (tclass < 0)
                tclass = np->tclass;
@@ -1328,7 +1322,6 @@ static struct inet_protosw rawv6_protosw = {
        .protocol       = IPPROTO_IP,   /* wild card */
        .prot           = &rawv6_prot,
        .ops            = &inet6_sockraw_ops,
-       .no_check       = UDP_CSUM_DEFAULT,
        .flags          = INET_PROTOSW_REUSE,
 };
 
index 4011617cca688850c4d530f2e35d9890203dedaf..f23fbd28a501ed5c3438abb7f1cbbec85233688b 100644 (file)
@@ -1176,7 +1176,7 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
 
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_oif = oif;
-       fl6.flowi6_mark = mark;
+       fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
        fl6.daddr = iph->daddr;
        fl6.saddr = iph->saddr;
        fl6.flowlabel = ip6_flowinfo(iph);
@@ -1273,6 +1273,7 @@ void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
        struct flowi6 fl6;
 
        memset(&fl6, 0, sizeof(fl6));
+       fl6.flowi6_iif = LOOPBACK_IFINDEX;
        fl6.flowi6_oif = oif;
        fl6.flowi6_mark = mark;
        fl6.daddr = iph->daddr;
@@ -1294,6 +1295,7 @@ void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
        struct flowi6 fl6;
 
        memset(&fl6, 0, sizeof(fl6));
+       fl6.flowi6_iif = LOOPBACK_IFINDEX;
        fl6.flowi6_oif = oif;
        fl6.flowi6_mark = mark;
        fl6.daddr = msg->dest;
@@ -1453,7 +1455,7 @@ static int ip6_dst_gc(struct dst_ops *ops)
                goto out;
 
        net->ipv6.ip6_rt_gc_expire++;
-       fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, entries > rt_max_size);
+       fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
        entries = dst_entries_get_slow(ops);
        if (entries < ops->gc_thresh)
                net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
@@ -2232,6 +2234,27 @@ void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
        fib6_clean_all(net, fib6_remove_prefsrc, &adni);
 }
 
+#define RTF_RA_ROUTER          (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
+#define RTF_CACHE_GATEWAY      (RTF_GATEWAY | RTF_CACHE)
+
+/* Remove routers and update dst entries when gateway turn into host. */
+static int fib6_clean_tohost(struct rt6_info *rt, void *arg)
+{
+       struct in6_addr *gateway = (struct in6_addr *)arg;
+
+       if ((((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) ||
+            ((rt->rt6i_flags & RTF_CACHE_GATEWAY) == RTF_CACHE_GATEWAY)) &&
+            ipv6_addr_equal(gateway, &rt->rt6i_gateway)) {
+               return -1;
+       }
+       return 0;
+}
+
+void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
+{
+       fib6_clean_all(net, fib6_clean_tohost, gateway);
+}
+
 struct arg_dev_net {
        struct net_device *dev;
        struct net *net;
@@ -2707,6 +2730,9 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh)
        if (tb[RTA_OIF])
                oif = nla_get_u32(tb[RTA_OIF]);
 
+       if (tb[RTA_MARK])
+               fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
+
        if (iif) {
                struct net_device *dev;
                int flags = 0;
index e5a453ca302e1e55e4d8e6ca7069f97fbb2b6347..f4380041f5e7b04211d7d73a05cb3989e8a9af94 100644 (file)
@@ -1828,4 +1828,5 @@ xfrm_tunnel_failed:
 module_init(sit_init);
 module_exit(sit_cleanup);
 MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK("sit");
 MODULE_ALIAS_NETDEV("sit0");
index bb53a5e73c1ab67c7a11430488b8418c4edbf98b..a822b880689b5fea5adeed30956afd2328a9c8b9 100644 (file)
@@ -216,6 +216,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
            ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
                ireq->ir_iif = inet6_iif(skb);
 
+       ireq->ir_mark = inet_request_mark(sk, skb);
+
        req->expires = 0UL;
        req->num_retrans = 0;
        ireq->ecn_ok            = ecn_ok;
@@ -242,7 +244,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
                final_p = fl6_update_dst(&fl6, np->opt, &final);
                fl6.saddr = ireq->ir_v6_loc_addr;
                fl6.flowi6_oif = sk->sk_bound_dev_if;
-               fl6.flowi6_mark = sk->sk_mark;
+               fl6.flowi6_mark = ireq->ir_mark;
                fl6.fl6_dport = ireq->ir_rmt_port;
                fl6.fl6_sport = inet_sk(sk)->inet_sport;
                security_req_classify_flow(req, flowi6_to_flowi(&fl6));
index 7f405a168822afab4fa5349317ef43f2ed8e3a0f..058f3eca2e53efd1fe016cfe8450d3ab0a9c13b1 100644 (file)
@@ -38,6 +38,13 @@ static struct ctl_table ipv6_table_template[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
+       {
+               .procname       = "fwmark_reflect",
+               .data           = &init_net.ipv6.sysctl.fwmark_reflect,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
        { }
 };
 
index e289830ed6e35a3be4feda700b5b6789dac20292..229239ad96b1645de84bfc5b0ad76311e295f82c 100644 (file)
@@ -340,7 +340,8 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        struct sock *sk;
        int err;
        struct tcp_sock *tp;
-       __u32 seq;
+       struct request_sock *fastopen;
+       __u32 seq, snd_una;
        struct net *net = dev_net(skb->dev);
 
        sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
@@ -371,8 +372,11 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 
        tp = tcp_sk(sk);
        seq = ntohl(th->seq);
+       /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
+       fastopen = tp->fastopen_rsk;
+       snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
        if (sk->sk_state != TCP_LISTEN &&
-           !between(seq, tp->snd_una, tp->snd_nxt)) {
+           !between(seq, snd_una, tp->snd_nxt)) {
                NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
                goto out;
        }
@@ -436,8 +440,13 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                goto out;
 
        case TCP_SYN_SENT:
-       case TCP_SYN_RECV:  /* Cannot happen.
-                              It can, it SYNs are crossed. --ANK */
+       case TCP_SYN_RECV:
+               /* Only in fast or simultaneous open. If a fast open socket is
+                * is already accepted it is treated as a connected one below.
+                */
+               if (fastopen && fastopen->sk == NULL)
+                       break;
+
                if (!sock_owned_by_user(sk)) {
                        sk->sk_err = err;
                        sk->sk_error_report(sk);                /* Wake people up to see the error (see connect in sock.c) */
@@ -463,7 +472,8 @@ out:
 static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
                              struct flowi6 *fl6,
                              struct request_sock *req,
-                             u16 queue_mapping)
+                             u16 queue_mapping,
+                             struct tcp_fastopen_cookie *foc)
 {
        struct inet_request_sock *ireq = inet_rsk(req);
        struct ipv6_pinfo *np = inet6_sk(sk);
@@ -474,7 +484,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
        if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
                goto done;
 
-       skb = tcp_make_synack(sk, dst, req, NULL);
+       skb = tcp_make_synack(sk, dst, req, foc);
 
        if (skb) {
                __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
@@ -498,7 +508,7 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req)
        struct flowi6 fl6;
        int res;
 
-       res = tcp_v6_send_synack(sk, NULL, &fl6, req, 0);
+       res = tcp_v6_send_synack(sk, NULL, &fl6, req, 0, NULL);
        if (!res) {
                TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
@@ -802,6 +812,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
                fl6.flowi6_oif = inet6_iif(skb);
        else
                fl6.flowi6_oif = oif;
+       fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
        fl6.fl6_dport = t1->dest;
        fl6.fl6_sport = t1->source;
        security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
@@ -917,7 +928,12 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
                                  struct request_sock *req)
 {
-       tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1,
+       /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
+        * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
+        */
+       tcp_v6_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
+                       tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
+                       tcp_rsk(req)->rcv_nxt,
                        req->rcv_wnd, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
                        tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
                        0, 0);
@@ -969,8 +985,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        struct tcp_sock *tp = tcp_sk(sk);
        __u32 isn = TCP_SKB_CB(skb)->when;
        struct dst_entry *dst = NULL;
+       struct tcp_fastopen_cookie foc = { .len = -1 };
+       bool want_cookie = false, fastopen;
        struct flowi6 fl6;
-       bool want_cookie = false;
+       int err;
 
        if (skb->protocol == htons(ETH_P_IP))
                return tcp_v4_conn_request(sk, skb);
@@ -1001,7 +1019,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        tcp_clear_options(&tmp_opt);
        tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
        tmp_opt.user_mss = tp->rx_opt.user_mss;
-       tcp_parse_options(skb, &tmp_opt, 0, NULL);
+       tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
 
        if (want_cookie && !tmp_opt.saw_tstamp)
                tcp_clear_options(&tmp_opt);
@@ -1016,6 +1034,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
                TCP_ECN_create_request(req, skb, sock_net(sk));
 
        ireq->ir_iif = sk->sk_bound_dev_if;
+       ireq->ir_mark = inet_request_mark(sk, skb);
 
        /* So that link locals have meaning */
        if (!sk->sk_bound_dev_if &&
@@ -1074,19 +1093,27 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
                isn = tcp_v6_init_sequence(skb);
        }
 have_isn:
-       tcp_rsk(req)->snt_isn = isn;
 
        if (security_inet_conn_request(sk, skb, req))
                goto drop_and_release;
 
-       if (tcp_v6_send_synack(sk, dst, &fl6, req,
-                              skb_get_queue_mapping(skb)) ||
-           want_cookie)
+       if (!dst && (dst = inet6_csk_route_req(sk, &fl6, req)) == NULL)
                goto drop_and_free;
 
+       tcp_rsk(req)->snt_isn = isn;
        tcp_rsk(req)->snt_synack = tcp_time_stamp;
-       tcp_rsk(req)->listener = NULL;
-       inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
+       tcp_openreq_init_rwin(req, sk, dst);
+       fastopen = !want_cookie &&
+                  tcp_try_fastopen(sk, skb, req, &foc, dst);
+       err = tcp_v6_send_synack(sk, dst, &fl6, req,
+                                skb_get_queue_mapping(skb), &foc);
+       if (!fastopen) {
+               if (err || want_cookie)
+                       goto drop_and_free;
+
+               tcp_rsk(req)->listener = NULL;
+               inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
+       }
        return 0;
 
 drop_and_release:
@@ -1294,25 +1321,6 @@ out:
        return NULL;
 }
 
-static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
-{
-       if (skb->ip_summed == CHECKSUM_COMPLETE) {
-               if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
-                                 &ipv6_hdr(skb)->daddr, skb->csum)) {
-                       skb->ip_summed = CHECKSUM_UNNECESSARY;
-                       return 0;
-               }
-       }
-
-       skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
-                                             &ipv6_hdr(skb)->saddr,
-                                             &ipv6_hdr(skb)->daddr, 0));
-
-       if (skb->len <= 76)
-               return __skb_checksum_complete(skb);
-       return 0;
-}
-
 /* The socket must have it's spinlock held when we get
  * here.
  *
@@ -1486,7 +1494,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
        if (!pskb_may_pull(skb, th->doff*4))
                goto discard_it;
 
-       if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
+       if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
                goto csum_error;
 
        th = tcp_hdr(skb);
@@ -1779,6 +1787,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
        const struct inet_sock *inet = inet_sk(sp);
        const struct tcp_sock *tp = tcp_sk(sp);
        const struct inet_connection_sock *icsk = inet_csk(sp);
+       struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
 
        dest  = &sp->sk_v6_daddr;
        src   = &sp->sk_v6_rcv_saddr;
@@ -1821,7 +1830,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
                   jiffies_to_clock_t(icsk->icsk_ack.ato),
                   (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
                   tp->snd_cwnd,
-                  tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
+                  sp->sk_state == TCP_LISTEN ?
+                       (fastopenq ? fastopenq->max_qlen : 0) :
+                       (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
                   );
 }
 
@@ -1981,7 +1992,6 @@ static struct inet_protosw tcpv6_protosw = {
        .protocol       =       IPPROTO_TCP,
        .prot           =       &tcpv6_prot,
        .ops            =       &inet6_stream_ops,
-       .no_check       =       0,
        .flags          =       INET_PROTOSW_PERMANENT |
                                INET_PROTOSW_ICSK,
 };
index 0d78132ff18aa018fa4e9918dbfb0dd57f95147a..8517d3cd1aed460bbfb1bfb0f515924f008b790d 100644 (file)
@@ -42,7 +42,7 @@ static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
        if (NAPI_GRO_CB(skb)->flush)
                goto skip_csum;
 
-       wsum = skb->csum;
+       wsum = NAPI_GRO_CB(skb)->csum;
 
        switch (skb->ip_summed) {
        case CHECKSUM_NONE:
index 1e586d92260e1e75957060b896a8748e9beaa61a..60325236446a97fbb753ddb0c29ac31accf73ed1 100644 (file)
@@ -634,6 +634,10 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
                if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) {
                        int ret;
 
+                       /* Verify checksum before giving to encap */
+                       if (udp_lib_checksum_complete(skb))
+                               goto csum_error;
+
                        ret = encap_rcv(sk, skb);
                        if (ret <= 0) {
                                UDP_INC_STATS_BH(sock_net(sk),
@@ -760,6 +764,17 @@ static void flush_stack(struct sock **stack, unsigned int count,
        if (unlikely(skb1))
                kfree_skb(skb1);
 }
+
+static void udp6_csum_zero_error(struct sk_buff *skb)
+{
+       /* RFC 2460 section 8.1 says that we SHOULD log
+        * this error. Well, it is reasonable.
+        */
+       LIMIT_NETDEBUG(KERN_INFO "IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
+                      &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
+                      &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
+}
+
 /*
  * Note: called only from the BH handler context,
  * so we don't need to lock the hashes.
@@ -779,7 +794,12 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
        dif = inet6_iif(skb);
        sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif);
        while (sk) {
-               stack[count++] = sk;
+               /* If zero checksum and no_check is not on for
+                * the socket then skip it.
+                */
+               if (uh->check || udp_sk(sk)->no_check6_rx)
+                       stack[count++] = sk;
+
                sk = udp_v6_mcast_next(net, sk_nulls_next(sk), uh->dest, daddr,
                                       uh->source, saddr, dif);
                if (unlikely(count == ARRAY_SIZE(stack))) {
@@ -867,6 +887,12 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
        if (sk != NULL) {
                int ret;
 
+               if (!uh->check && !udp_sk(sk)->no_check6_rx) {
+                       sock_put(sk);
+                       udp6_csum_zero_error(skb);
+                       goto csum_error;
+               }
+
                ret = udpv6_queue_rcv_skb(sk, skb);
                sock_put(sk);
 
@@ -879,6 +905,11 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
                return 0;
        }
 
+       if (!uh->check) {
+               udp6_csum_zero_error(skb);
+               goto csum_error;
+       }
+
        if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
                goto discard;
 
@@ -1006,7 +1037,10 @@ static int udp_v6_push_pending_frames(struct sock *sk)
 
        if (is_udplite)
                csum = udplite_csum_outgoing(sk, skb);
-       else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
+       else if (up->no_check6_tx) {   /* UDP csum disabled */
+               skb->ip_summed = CHECKSUM_NONE;
+               goto send;
+       } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
                udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr,
                                     up->len);
                goto send;
@@ -1232,14 +1266,8 @@ do_udp_sendmsg:
                goto out;
        }
 
-       if (hlimit < 0) {
-               if (ipv6_addr_is_multicast(&fl6.daddr))
-                       hlimit = np->mcast_hops;
-               else
-                       hlimit = np->hop_limit;
-               if (hlimit < 0)
-                       hlimit = ip6_dst_hoplimit(dst);
-       }
+       if (hlimit < 0)
+               hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
 
        if (tclass < 0)
                tclass = np->tclass;
@@ -1479,7 +1507,6 @@ static struct inet_protosw udpv6_protosw = {
        .protocol =  IPPROTO_UDP,
        .prot =      &udpv6_prot,
        .ops =       &inet6_dgram_ops,
-       .no_check =  UDP_CSUM_DEFAULT,
        .flags =     INET_PROTOSW_PERMANENT,
 };
 
index dfcc4be46898281f09038bbd7638edfefa04ea3e..9cf097e206e931c6e3c184f22b8adcabedc4c03a 100644 (file)
@@ -64,7 +64,6 @@ static struct inet_protosw udplite6_protosw = {
        .protocol       = IPPROTO_UDPLITE,
        .prot           = &udplitev6_prot,
        .ops            = &inet6_dgram_ops,
-       .no_check       = 0,
        .flags          = INET_PROTOSW_PERMANENT,
 };
 
index 19ef329bdbf8e7418fa1d352bb6c90218935831e..433672d07d0b55e1e436be704780e8c6f5777447 100644 (file)
@@ -78,7 +78,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
        if (mtu < IPV6_MIN_MTU)
                mtu = IPV6_MIN_MTU;
 
-       if (!skb->local_df && skb->len > mtu) {
+       if (!skb->ignore_df && skb->len > mtu) {
                skb->dev = dst->dev;
 
                if (xfrm6_local_dontfrag(skb))
@@ -114,13 +114,7 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
        if (err)
                return err;
 
-       memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
-#ifdef CONFIG_NETFILTER
-       IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
-#endif
-
-       skb->protocol = htons(ETH_P_IPV6);
-       skb->local_df = 1;
+       skb->ignore_df = 1;
 
        return x->outer_mode->output2(x, skb);
 }
@@ -128,11 +122,13 @@ EXPORT_SYMBOL(xfrm6_prepare_output);
 
 int xfrm6_output_finish(struct sk_buff *skb)
 {
+       memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+       skb->protocol = htons(ETH_P_IPV6);
+
 #ifdef CONFIG_NETFILTER
        IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
 #endif
 
-       skb->protocol = htons(ETH_P_IPV6);
        return xfrm_output(skb);
 }
 
@@ -142,6 +138,13 @@ static int __xfrm6_output(struct sk_buff *skb)
        struct xfrm_state *x = dst->xfrm;
        int mtu;
 
+#ifdef CONFIG_NETFILTER
+       if (!x) {
+               IP6CB(skb)->flags |= IP6SKB_REROUTED;
+               return dst_output(skb);
+       }
+#endif
+
        if (skb->protocol == htons(ETH_P_IPV6))
                mtu = ip6_skb_dst_mtu(skb);
        else
@@ -150,7 +153,7 @@ static int __xfrm6_output(struct sk_buff *skb)
        if (skb->len > mtu && xfrm6_local_dontfrag(skb)) {
                xfrm6_local_rxpmtu(skb, mtu);
                return -EMSGSIZE;
-       } else if (!skb->local_df && skb->len > mtu && skb->sk) {
+       } else if (!skb->ignore_df && skb->len > mtu && skb->sk) {
                xfrm_local_error(skb, mtu);
                return -EMSGSIZE;
        }
@@ -165,6 +168,7 @@ static int __xfrm6_output(struct sk_buff *skb)
 
 int xfrm6_output(struct sock *sk, struct sk_buff *skb)
 {
-       return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL,
-                      skb_dst(skb)->dev, __xfrm6_output);
+       return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb,
+                           NULL, skb_dst(skb)->dev, __xfrm6_output,
+                           !(IP6CB(skb)->flags & IP6SKB_REROUTED));
 }
index 6ab989c486f7ee66c5cd6235ce4f758fe9f277a6..54d13f8dbbae10670756eee0b16b898423d06060 100644 (file)
@@ -50,6 +50,10 @@ int xfrm6_rcv_cb(struct sk_buff *skb, u8 protocol, int err)
 {
        int ret;
        struct xfrm6_protocol *handler;
+       struct xfrm6_protocol __rcu **head = proto_handlers(protocol);
+
+       if (!head)
+               return 0;
 
        for_each_protocol_rcu(*proto_handlers(protocol), handler)
                if ((ret = handler->cb_handler(skb, err)) <= 0)
@@ -184,10 +188,12 @@ int xfrm6_protocol_register(struct xfrm6_protocol *handler,
        struct xfrm6_protocol __rcu **pprev;
        struct xfrm6_protocol *t;
        bool add_netproto = false;
-
        int ret = -EEXIST;
        int priority = handler->priority;
 
+       if (!proto_handlers(protocol) || !netproto(protocol))
+               return -EINVAL;
+
        mutex_lock(&xfrm6_protocol_mutex);
 
        if (!rcu_dereference_protected(*proto_handlers(protocol),
@@ -230,6 +236,9 @@ int xfrm6_protocol_deregister(struct xfrm6_protocol *handler,
        struct xfrm6_protocol *t;
        int ret = -ENOENT;
 
+       if (!proto_handlers(protocol) || !netproto(protocol))
+               return -EINVAL;
+
        mutex_lock(&xfrm6_protocol_mutex);
 
        for (pprev = proto_handlers(protocol);
index 41e4e93cb3aae37df41ff419ef34be9c2255b5cc..91729b807c7d041ae379e89df335acefe5218635 100644 (file)
@@ -1353,7 +1353,7 @@ static int ipx_create(struct net *net, struct socket *sock, int protocol,
 
        sk_refcnt_debug_inc(sk);
        sock_init_data(sock, sk);
-       sk->sk_no_check = 1;            /* Checksum off by default */
+       sk->sk_no_check_tx = 1;         /* Checksum off by default */
        sock->ops = &ipx_dgram_ops;
        rc = 0;
 out:
index c1f03185c5e115ffe359e39a0bfb17efe8d4c38a..67e7ad3d46b1fb4489a175836351607e7f5ae741 100644 (file)
@@ -236,7 +236,8 @@ int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
        }
 
        /* Apply checksum. Not allowed on 802.3 links. */
-       if (sk->sk_no_check || intrfc->if_dlink_type == htons(IPX_FRAME_8023))
+       if (sk->sk_no_check_tx ||
+           intrfc->if_dlink_type == htons(IPX_FRAME_8023))
                ipx->ipx_checksum = htons(0xFFFF);
        else
                ipx->ipx_checksum = ipx_cksum(ipx, len + sizeof(struct ipxhdr));
index 01e77b0ae0755d037093e7597a42db2a66378a51..7a95fa4a3de1e558a07485bd8f6dbb3b4dcf32b3 100644 (file)
@@ -682,6 +682,18 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
        return NULL;
 }
 
+static void __iucv_auto_name(struct iucv_sock *iucv)
+{
+       char name[12];
+
+       sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
+       while (__iucv_get_sock_by_name(name)) {
+               sprintf(name, "%08x",
+                       atomic_inc_return(&iucv_sk_list.autobind_name));
+       }
+       memcpy(iucv->src_name, name, 8);
+}
+
 /* Bind an unbound socket */
 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
                          int addr_len)
@@ -724,8 +736,12 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
        rcu_read_lock();
        for_each_netdev_rcu(&init_net, dev) {
                if (!memcmp(dev->perm_addr, uid, 8)) {
-                       memcpy(iucv->src_name, sa->siucv_name, 8);
                        memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
+                       /* Check for unitialized siucv_name */
+                       if (strncmp(sa->siucv_name, "        ", 8) == 0)
+                               __iucv_auto_name(iucv);
+                       else
+                               memcpy(iucv->src_name, sa->siucv_name, 8);
                        sk->sk_bound_dev_if = dev->ifindex;
                        iucv->hs_dev = dev;
                        dev_hold(dev);
@@ -763,7 +779,6 @@ done:
 static int iucv_sock_autobind(struct sock *sk)
 {
        struct iucv_sock *iucv = iucv_sk(sk);
-       char name[12];
        int err = 0;
 
        if (unlikely(!pr_iucv))
@@ -772,17 +787,9 @@ static int iucv_sock_autobind(struct sock *sk)
        memcpy(iucv->src_user_id, iucv_userid, 8);
 
        write_lock_bh(&iucv_sk_list.lock);
-
-       sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
-       while (__iucv_get_sock_by_name(name)) {
-               sprintf(name, "%08x",
-                       atomic_inc_return(&iucv_sk_list.autobind_name));
-       }
-
+       __iucv_auto_name(iucv);
        write_unlock_bh(&iucv_sk_list.lock);
 
-       memcpy(&iucv->src_name, name, 8);
-
        if (!iucv->msglimit)
                iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
 
@@ -1830,7 +1837,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
                spin_lock_irqsave(&list->lock, flags);
 
                while (list_skb != (struct sk_buff *)list) {
-                       if (msg->tag != IUCV_SKB_CB(list_skb)->tag) {
+                       if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
                                this = list_skb;
                                break;
                        }
@@ -1936,11 +1943,10 @@ static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
            sk_acceptq_is_full(sk) ||
            !nsk) {
                /* error on server socket - connection refused */
-               if (nsk)
-                       sk_free(nsk);
                afiucv_swap_src_dest(skb);
                trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
                err = dev_queue_xmit(skb);
+               iucv_sock_kill(nsk);
                bh_unlock_sock(sk);
                goto out;
        }
index f3c83073afc49f7aad98262cc5776b3edddc7f87..ba2a2f95911c99732dc2e3fb1ae37b9f977d1f5a 100644 (file)
@@ -1476,9 +1476,7 @@ static int pfkey_add(struct sock *sk, struct sk_buff *skb, const struct sadb_msg
        else
                err = xfrm_state_update(x);
 
-       xfrm_audit_state_add(x, err ? 0 : 1,
-                            audit_get_loginuid(current),
-                            audit_get_sessionid(current), 0);
+       xfrm_audit_state_add(x, err ? 0 : 1, true);
 
        if (err < 0) {
                x->km.state = XFRM_STATE_DEAD;
@@ -1532,9 +1530,7 @@ static int pfkey_delete(struct sock *sk, struct sk_buff *skb, const struct sadb_
        c.event = XFRM_MSG_DELSA;
        km_state_notify(x, &c);
 out:
-       xfrm_audit_state_delete(x, err ? 0 : 1,
-                               audit_get_loginuid(current),
-                               audit_get_sessionid(current), 0);
+       xfrm_audit_state_delete(x, err ? 0 : 1, true);
        xfrm_state_put(x);
 
        return err;
@@ -1726,17 +1722,13 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_m
        struct net *net = sock_net(sk);
        unsigned int proto;
        struct km_event c;
-       struct xfrm_audit audit_info;
        int err, err2;
 
        proto = pfkey_satype2proto(hdr->sadb_msg_satype);
        if (proto == 0)
                return -EINVAL;
 
-       audit_info.loginuid = audit_get_loginuid(current);
-       audit_info.sessionid = audit_get_sessionid(current);
-       audit_info.secid = 0;
-       err = xfrm_state_flush(net, proto, &audit_info);
+       err = xfrm_state_flush(net, proto, true);
        err2 = unicast_flush_resp(sk, hdr);
        if (err || err2) {
                if (err == -ESRCH) /* empty table - go quietly */
@@ -2288,9 +2280,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_
        err = xfrm_policy_insert(pol->sadb_x_policy_dir-1, xp,
                                 hdr->sadb_msg_type != SADB_X_SPDUPDATE);
 
-       xfrm_audit_policy_add(xp, err ? 0 : 1,
-                             audit_get_loginuid(current),
-                             audit_get_sessionid(current), 0);
+       xfrm_audit_policy_add(xp, err ? 0 : 1, true);
 
        if (err)
                goto out;
@@ -2372,9 +2362,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa
        if (xp == NULL)
                return -ENOENT;
 
-       xfrm_audit_policy_delete(xp, err ? 0 : 1,
-                                audit_get_loginuid(current),
-                                audit_get_sessionid(current), 0);
+       xfrm_audit_policy_delete(xp, err ? 0 : 1, true);
 
        if (err)
                goto out;
@@ -2553,7 +2541,7 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
                sel.sport_mask = htons(0xffff);
 
        /* set destination address info of selector */
-       sa = ext_hdrs[SADB_EXT_ADDRESS_DST - 1],
+       sa = ext_hdrs[SADB_EXT_ADDRESS_DST - 1];
        pfkey_sadb_addr2xfrm_addr(sa, &sel.daddr);
        sel.prefixlen_d = sa->sadb_address_prefixlen;
        sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
@@ -2622,9 +2610,7 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_
                return -ENOENT;
 
        if (delete) {
-               xfrm_audit_policy_delete(xp, err ? 0 : 1,
-                               audit_get_loginuid(current),
-                               audit_get_sessionid(current), 0);
+               xfrm_audit_policy_delete(xp, err ? 0 : 1, true);
 
                if (err)
                        goto out;
@@ -2733,13 +2719,9 @@ static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, const struct sad
 {
        struct net *net = sock_net(sk);
        struct km_event c;
-       struct xfrm_audit audit_info;
        int err, err2;
 
-       audit_info.loginuid = audit_get_loginuid(current);
-       audit_info.sessionid = audit_get_sessionid(current);
-       audit_info.secid = 0;
-       err = xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
+       err = xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, true);
        err2 = unicast_flush_resp(sk, hdr);
        if (err || err2) {
                if (err == -ESRCH) /* empty table - old silent behavior */
index a4e37d7158dcca42455a04eaf0460a48e39242fd..379558014b60f40a19964fdbe6c0f8ea9fd3c03e 100644 (file)
@@ -495,52 +495,6 @@ out:
        spin_unlock_bh(&session->reorder_q.lock);
 }
 
-static inline int l2tp_verify_udp_checksum(struct sock *sk,
-                                          struct sk_buff *skb)
-{
-       struct udphdr *uh = udp_hdr(skb);
-       u16 ulen = ntohs(uh->len);
-       __wsum psum;
-
-       if (sk->sk_no_check || skb_csum_unnecessary(skb))
-               return 0;
-
-#if IS_ENABLED(CONFIG_IPV6)
-       if (sk->sk_family == PF_INET6 && !l2tp_tunnel(sk)->v4mapped) {
-               if (!uh->check) {
-                       LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n");
-                       return 1;
-               }
-               if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
-                   !csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-                                    &ipv6_hdr(skb)->daddr, ulen,
-                                    IPPROTO_UDP, skb->csum)) {
-                       skb->ip_summed = CHECKSUM_UNNECESSARY;
-                       return 0;
-               }
-               skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-                                                        &ipv6_hdr(skb)->daddr,
-                                                        skb->len, IPPROTO_UDP,
-                                                        0));
-       } else
-#endif
-       {
-               struct inet_sock *inet;
-               if (!uh->check)
-                       return 0;
-               inet = inet_sk(sk);
-               psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr,
-                                         ulen, IPPROTO_UDP, 0);
-
-               if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
-                   !csum_fold(csum_add(psum, skb->csum)))
-                       return 0;
-               skb->csum = psum;
-       }
-
-       return __skb_checksum_complete(skb);
-}
-
 static int l2tp_seq_check_rx_window(struct l2tp_session *session, u32 nr)
 {
        u32 nws;
@@ -895,8 +849,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
        u16 version;
        int length;
 
-       if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb))
-               goto discard_bad_csum;
+       /* UDP has verifed checksum */
 
        /* UDP always verifies the packet length. */
        __skb_pull(skb, sizeof(struct udphdr));
@@ -979,14 +932,6 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
 
        return 0;
 
-discard_bad_csum:
-       LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);
-       UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0);
-       atomic_long_inc(&tunnel->stats.rx_errors);
-       kfree_skb(skb);
-
-       return 0;
-
 error:
        /* Put UDP header back */
        __skb_push(skb, sizeof(struct udphdr));
@@ -1128,7 +1073,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
        }
 
        /* Queue the packet to IP for output */
-       skb->local_df = 1;
+       skb->ignore_df = 1;
 #if IS_ENABLED(CONFIG_IPV6)
        if (tunnel->sock->sk_family == PF_INET6 && !tunnel->v4mapped)
                error = inet6_csk_xmit(tunnel->sock, skb, NULL);
@@ -1157,7 +1102,9 @@ static void l2tp_xmit_ipv6_csum(struct sock *sk, struct sk_buff *skb,
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct udphdr *uh = udp_hdr(skb);
 
-       if (!skb_dst(skb) || !skb_dst(skb)->dev ||
+       if (udp_get_no_check6_tx(sk))
+               skb->ip_summed = CHECKSUM_NONE;
+       else if (!skb_dst(skb) || !skb_dst(skb)->dev ||
            !(skb_dst(skb)->dev->features & NETIF_F_IPV6_CSUM)) {
                __wsum csum = skb_checksum(skb, 0, udp_len, 0);
                skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1243,7 +1190,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
                        l2tp_xmit_ipv6_csum(sk, skb, udp_len);
                else
 #endif
-               if (sk->sk_no_check == UDP_CSUM_NOXMIT)
+               if (sk->sk_no_check_tx)
                        skb->ip_summed = CHECKSUM_NONE;
                else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
                         (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) {
@@ -1490,6 +1437,11 @@ static int l2tp_tunnel_sock_create(struct net *net,
                                             sizeof(udp6_addr), 0);
                        if (err < 0)
                                goto out;
+
+                       if (cfg->udp6_zero_tx_checksums)
+                               udp_set_no_check6_tx(sock->sk, true);
+                       if (cfg->udp6_zero_rx_checksums)
+                               udp_set_no_check6_rx(sock->sk, true);
                } else
 #endif
                {
@@ -1518,7 +1470,7 @@ static int l2tp_tunnel_sock_create(struct net *net,
                }
 
                if (!cfg->use_udp_checksums)
-                       sock->sk->sk_no_check = UDP_CSUM_NOXMIT;
+                       sock->sk->sk_no_check_tx = 1;
 
                break;
 
index 3f93ccd6ba9768fe171f4e35e79d613226c1dbc7..68aa9ffd4ae4d972cdd57ea742ddf3b78ba497d6 100644 (file)
@@ -162,7 +162,9 @@ struct l2tp_tunnel_cfg {
 #endif
        u16                     local_udp_port;
        u16                     peer_udp_port;
-       unsigned int            use_udp_checksums:1;
+       unsigned int            use_udp_checksums:1,
+                               udp6_zero_tx_checksums:1,
+                               udp6_zero_rx_checksums:1;
 };
 
 struct l2tp_tunnel {
index 3397fe6897c0326d3efb2277a2824f5c2038d3c4..369a9822488c45fc10be28ca00300e9382645a9e 100644 (file)
@@ -606,7 +606,6 @@ static struct inet_protosw l2tp_ip_protosw = {
        .protocol       = IPPROTO_L2TP,
        .prot           = &l2tp_ip_prot,
        .ops            = &l2tp_ip_ops,
-       .no_check       = 0,
 };
 
 static struct net_protocol l2tp_ip_protocol __read_mostly = {
index 7704ea9502fdc9e49a2b8bb7fda4b9b9ec8d1722..f3f98a156ceed8ebc5843368aa0d1988175e5fc7 100644 (file)
@@ -605,14 +605,8 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
                goto out;
        }
 
-       if (hlimit < 0) {
-               if (ipv6_addr_is_multicast(&fl6.daddr))
-                       hlimit = np->mcast_hops;
-               else
-                       hlimit = np->hop_limit;
-               if (hlimit < 0)
-                       hlimit = ip6_dst_hoplimit(dst);
-       }
+       if (hlimit < 0)
+               hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
 
        if (tclass < 0)
                tclass = np->tclass;
@@ -761,7 +755,6 @@ static struct inet_protosw l2tp_ip6_protosw = {
        .protocol       = IPPROTO_L2TP,
        .prot           = &l2tp_ip6_prot,
        .ops            = &l2tp_ip6_ops,
-       .no_check       = 0,
 };
 
 static struct inet6_protocol l2tp_ip6_protocol __read_mostly = {
index bd7387adea9eff25ce7ffd0683589dcdcc020ad0..0ac907adb2f472c0d49508ca6843363db27129b1 100644 (file)
@@ -161,6 +161,13 @@ static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info
                        cfg.peer_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_DPORT]);
                if (info->attrs[L2TP_ATTR_UDP_CSUM])
                        cfg.use_udp_checksums = nla_get_flag(info->attrs[L2TP_ATTR_UDP_CSUM]);
+
+#if IS_ENABLED(CONFIG_IPV6)
+               if (info->attrs[L2TP_ATTR_UDP_ZERO_CSUM6_TX])
+                       cfg.udp6_zero_tx_checksums = nla_get_flag(info->attrs[L2TP_ATTR_UDP_ZERO_CSUM6_TX]);
+               if (info->attrs[L2TP_ATTR_UDP_ZERO_CSUM6_RX])
+                       cfg.udp6_zero_rx_checksums = nla_get_flag(info->attrs[L2TP_ATTR_UDP_ZERO_CSUM6_RX]);
+#endif
        }
 
        if (info->attrs[L2TP_ATTR_DEBUG])
@@ -297,8 +304,7 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla
        case L2TP_ENCAPTYPE_UDP:
                if (nla_put_u16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)) ||
                    nla_put_u16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)) ||
-                   nla_put_u8(skb, L2TP_ATTR_UDP_CSUM,
-                              (sk->sk_no_check != UDP_CSUM_NOXMIT)))
+                   nla_put_u8(skb, L2TP_ATTR_UDP_CSUM, !sk->sk_no_check_tx))
                        goto nla_put_failure;
                /* NOBREAK */
        case L2TP_ENCAPTYPE_IP:
index 9d7d840aac6d11630ef902f4ce57db7a8491d1d4..1e46ffa69167973921b795f8757f903234a61b94 100644 (file)
@@ -25,7 +25,8 @@ mac80211-y := \
        wme.o \
        event.o \
        chan.o \
-       trace.o mlme.o
+       trace.o mlme.o \
+       tdls.o
 
 mac80211-$(CONFIG_MAC80211_LEDS) += led.o
 mac80211-$(CONFIG_MAC80211_DEBUGFS) += \
index 7c7df475a401693dd44cb1a6b9d68dd255b2acd5..ec24378caaafaf333152e856aa0e2e920ddbb13f 100644 (file)
@@ -23,12 +23,13 @@ void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
                               u8 *data, size_t data_len, u8 *mic)
 {
        struct scatterlist assoc, pt, ct[2];
-       struct {
-               struct aead_request     req;
-               u8                      priv[crypto_aead_reqsize(tfm)];
-       } aead_req;
 
-       memset(&aead_req, 0, sizeof(aead_req));
+       char aead_req_data[sizeof(struct aead_request) +
+                          crypto_aead_reqsize(tfm)]
+               __aligned(__alignof__(struct aead_request));
+       struct aead_request *aead_req = (void *) aead_req_data;
+
+       memset(aead_req, 0, sizeof(aead_req_data));
 
        sg_init_one(&pt, data, data_len);
        sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad));
@@ -36,23 +37,23 @@ void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
        sg_set_buf(&ct[0], data, data_len);
        sg_set_buf(&ct[1], mic, IEEE80211_CCMP_MIC_LEN);
 
-       aead_request_set_tfm(&aead_req.req, tfm);
-       aead_request_set_assoc(&aead_req.req, &assoc, assoc.length);
-       aead_request_set_crypt(&aead_req.req, &pt, ct, data_len, b_0);
+       aead_request_set_tfm(aead_req, tfm);
+       aead_request_set_assoc(aead_req, &assoc, assoc.length);
+       aead_request_set_crypt(aead_req, &pt, ct, data_len, b_0);
 
-       crypto_aead_encrypt(&aead_req.req);
+       crypto_aead_encrypt(aead_req);
 }
 
 int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
                              u8 *data, size_t data_len, u8 *mic)
 {
        struct scatterlist assoc, pt, ct[2];
-       struct {
-               struct aead_request     req;
-               u8                      priv[crypto_aead_reqsize(tfm)];
-       } aead_req;
+       char aead_req_data[sizeof(struct aead_request) +
+                          crypto_aead_reqsize(tfm)]
+               __aligned(__alignof__(struct aead_request));
+       struct aead_request *aead_req = (void *) aead_req_data;
 
-       memset(&aead_req, 0, sizeof(aead_req));
+       memset(aead_req, 0, sizeof(aead_req_data));
 
        sg_init_one(&pt, data, data_len);
        sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad));
@@ -60,12 +61,12 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
        sg_set_buf(&ct[0], data, data_len);
        sg_set_buf(&ct[1], mic, IEEE80211_CCMP_MIC_LEN);
 
-       aead_request_set_tfm(&aead_req.req, tfm);
-       aead_request_set_assoc(&aead_req.req, &assoc, assoc.length);
-       aead_request_set_crypt(&aead_req.req, ct, &pt,
+       aead_request_set_tfm(aead_req, tfm);
+       aead_request_set_assoc(aead_req, &assoc, assoc.length);
+       aead_request_set_crypt(aead_req, ct, &pt,
                               data_len + IEEE80211_CCMP_MIC_LEN, b_0);
 
-       return crypto_aead_decrypt(&aead_req.req);
+       return crypto_aead_decrypt(aead_req);
 }
 
 struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[])
index aaa59d719592c0b7dc6ef3ddb4df8aaa578bc45c..d7513a503be11b180031342dcf316450fd6c69d3 100644 (file)
@@ -109,6 +109,15 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
 static int ieee80211_start_p2p_device(struct wiphy *wiphy,
                                      struct wireless_dev *wdev)
 {
+       struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+       int ret;
+
+       mutex_lock(&sdata->local->chanctx_mtx);
+       ret = ieee80211_check_combinations(sdata, NULL, 0, 0);
+       mutex_unlock(&sdata->local->chanctx_mtx);
+       if (ret < 0)
+               return ret;
+
        return ieee80211_do_open(wdev, true);
 }
 
@@ -463,8 +472,10 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
 {
        struct ieee80211_sub_if_data *sdata = sta->sdata;
        struct ieee80211_local *local = sdata->local;
+       struct rate_control_ref *ref = local->rate_ctrl;
        struct timespec uptime;
        u64 packets = 0;
+       u32 thr = 0;
        int i, ac;
 
        sinfo->generation = sdata->local->sta_generation;
@@ -578,6 +589,17 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
                sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_ASSOCIATED);
        if (test_sta_flag(sta, WLAN_STA_TDLS_PEER))
                sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER);
+
+       /* check if the driver has a SW RC implementation */
+       if (ref && ref->ops->get_expected_throughput)
+               thr = ref->ops->get_expected_throughput(sta->rate_ctrl_priv);
+       else
+               thr = drv_get_expected_throughput(local, &sta->sta);
+
+       if (thr != 0) {
+               sinfo->filled |= STATION_INFO_EXPECTED_THROUGHPUT;
+               sinfo->expected_throughput = thr;
+       }
 }
 
 static const char ieee80211_gstrings_sta_stats[][ETH_GSTRING_LEN] = {
@@ -768,7 +790,7 @@ static void ieee80211_get_et_strings(struct wiphy *wiphy,
 }
 
 static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
-                                int idx, u8 *mac, struct station_info *sinfo)
+                                 int idx, u8 *mac, struct station_info *sinfo)
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
        struct ieee80211_local *local = sdata->local;
@@ -798,7 +820,7 @@ static int ieee80211_dump_survey(struct wiphy *wiphy, struct net_device *dev,
 }
 
 static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev,
-                                u8 *mac, struct station_info *sinfo)
+                                const u8 *mac, struct station_info *sinfo)
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
        struct ieee80211_local *local = sdata->local;
@@ -972,13 +994,13 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
        sdata->needed_rx_chains = sdata->local->rx_chains;
 
        mutex_lock(&local->mtx);
-       sdata->radar_required = params->radar_required;
        err = ieee80211_vif_use_channel(sdata, &params->chandef,
                                        IEEE80211_CHANCTX_SHARED);
+       if (!err)
+               ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
        mutex_unlock(&local->mtx);
        if (err)
                return err;
-       ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
 
        /*
         * Apply control port protocol, this allows us to
@@ -1075,6 +1097,31 @@ static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
        return 0;
 }
 
+bool ieee80211_csa_needs_block_tx(struct ieee80211_local *local)
+{
+       struct ieee80211_sub_if_data *sdata;
+
+       lockdep_assert_held(&local->mtx);
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+               if (!ieee80211_sdata_running(sdata))
+                       continue;
+
+               if (!sdata->vif.csa_active)
+                       continue;
+
+               if (!sdata->csa_block_tx)
+                       continue;
+
+               rcu_read_unlock();
+               return true;
+       }
+       rcu_read_unlock();
+
+       return false;
+}
+
 static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -1092,7 +1139,14 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
        old_probe_resp = sdata_dereference(sdata->u.ap.probe_resp, sdata);
 
        /* abort any running channel switch */
+       mutex_lock(&local->mtx);
        sdata->vif.csa_active = false;
+       if (!ieee80211_csa_needs_block_tx(local))
+               ieee80211_wake_queues_by_reason(&local->hw,
+                                       IEEE80211_MAX_QUEUE_MAP,
+                                       IEEE80211_QUEUE_STOP_REASON_CSA);
+       mutex_unlock(&local->mtx);
+
        kfree(sdata->u.ap.next_beacon);
        sdata->u.ap.next_beacon = NULL;
 
@@ -1131,8 +1185,8 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
        local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf);
        skb_queue_purge(&sdata->u.ap.ps.bc_buf);
 
-       ieee80211_vif_copy_chanctx_to_vlans(sdata, true);
        mutex_lock(&local->mtx);
+       ieee80211_vif_copy_chanctx_to_vlans(sdata, true);
        ieee80211_vif_release_channel(sdata);
        mutex_unlock(&local->mtx);
 
@@ -1416,7 +1470,8 @@ static int sta_apply_parameters(struct ieee80211_local *local,
 }
 
 static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
-                                u8 *mac, struct station_parameters *params)
+                                const u8 *mac,
+                                struct station_parameters *params)
 {
        struct ieee80211_local *local = wiphy_priv(wiphy);
        struct sta_info *sta;
@@ -1450,6 +1505,8 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
        if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))) {
                sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
                sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
+       } else {
+               sta->sta.tdls = true;
        }
 
        err = sta_apply_parameters(local, sta, params);
@@ -1483,7 +1540,7 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
 }
 
 static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev,
-                                u8 *mac)
+                                const u8 *mac)
 {
        struct ieee80211_sub_if_data *sdata;
 
@@ -1497,7 +1554,7 @@ static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev,
 }
 
 static int ieee80211_change_station(struct wiphy *wiphy,
-                                   struct net_device *dev, u8 *mac,
+                                   struct net_device *dev, const u8 *mac,
                                    struct station_parameters *params)
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -1566,7 +1623,7 @@ static int ieee80211_change_station(struct wiphy *wiphy,
 
                if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
                    sta->sdata->u.vlan.sta) {
-                       rcu_assign_pointer(sta->sdata->u.vlan.sta, NULL);
+                       RCU_INIT_POINTER(sta->sdata->u.vlan.sta, NULL);
                        prev_4addr = true;
                }
 
@@ -1622,7 +1679,7 @@ out_err:
 
 #ifdef CONFIG_MAC80211_MESH
 static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
-                                u8 *dst, u8 *next_hop)
+                              const u8 *dst, const u8 *next_hop)
 {
        struct ieee80211_sub_if_data *sdata;
        struct mesh_path *mpath;
@@ -1650,7 +1707,7 @@ static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
 }
 
 static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev,
-                              u8 *dst)
+                              const u8 *dst)
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
@@ -1661,9 +1718,8 @@ static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev,
        return 0;
 }
 
-static int ieee80211_change_mpath(struct wiphy *wiphy,
-                                   struct net_device *dev,
-                                   u8 *dst, u8 *next_hop)
+static int ieee80211_change_mpath(struct wiphy *wiphy, struct net_device *dev,
+                                 const u8 *dst, const u8 *next_hop)
 {
        struct ieee80211_sub_if_data *sdata;
        struct mesh_path *mpath;
@@ -1755,8 +1811,8 @@ static int ieee80211_get_mpath(struct wiphy *wiphy, struct net_device *dev,
 }
 
 static int ieee80211_dump_mpath(struct wiphy *wiphy, struct net_device *dev,
-                                int idx, u8 *dst, u8 *next_hop,
-                                struct mpath_info *pinfo)
+                               int idx, u8 *dst, u8 *next_hop,
+                               struct mpath_info *pinfo)
 {
        struct ieee80211_sub_if_data *sdata;
        struct mesh_path *mpath;
@@ -2930,7 +2986,6 @@ static int ieee80211_start_radar_detection(struct wiphy *wiphy,
        /* whatever, but channel contexts should not complain about that one */
        sdata->smps_mode = IEEE80211_SMPS_OFF;
        sdata->needed_rx_chains = local->rx_chains;
-       sdata->radar_required = true;
 
        err = ieee80211_vif_use_channel(sdata, chandef,
                                        IEEE80211_CHANCTX_SHARED);
@@ -3011,26 +3066,11 @@ void ieee80211_csa_finish(struct ieee80211_vif *vif)
 }
 EXPORT_SYMBOL(ieee80211_csa_finish);
 
-static void ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata)
+static int ieee80211_set_after_csa_beacon(struct ieee80211_sub_if_data *sdata,
+                                         u32 *changed)
 {
-       struct ieee80211_local *local = sdata->local;
-       int err, changed = 0;
-
-       sdata_assert_lock(sdata);
-
-       mutex_lock(&local->mtx);
-       sdata->radar_required = sdata->csa_radar_required;
-       err = ieee80211_vif_change_channel(sdata, &changed);
-       mutex_unlock(&local->mtx);
-       if (WARN_ON(err < 0))
-               return;
-
-       if (!local->use_chanctx) {
-               local->_oper_chandef = sdata->csa_chandef;
-               ieee80211_hw_config(local, 0);
-       }
+       int err;
 
-       sdata->vif.csa_active = false;
        switch (sdata->vif.type) {
        case NL80211_IFTYPE_AP:
                err = ieee80211_assign_beacon(sdata, sdata->u.ap.next_beacon);
@@ -3038,35 +3078,74 @@ static void ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata)
                sdata->u.ap.next_beacon = NULL;
 
                if (err < 0)
-                       return;
-               changed |= err;
+                       return err;
+               *changed |= err;
                break;
        case NL80211_IFTYPE_ADHOC:
                err = ieee80211_ibss_finish_csa(sdata);
                if (err < 0)
-                       return;
-               changed |= err;
+                       return err;
+               *changed |= err;
                break;
 #ifdef CONFIG_MAC80211_MESH
        case NL80211_IFTYPE_MESH_POINT:
                err = ieee80211_mesh_finish_csa(sdata);
                if (err < 0)
-                       return;
-               changed |= err;
+                       return err;
+               *changed |= err;
                break;
 #endif
        default:
                WARN_ON(1);
-               return;
+               return -EINVAL;
        }
 
+       return 0;
+}
+
+static int __ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata)
+{
+       struct ieee80211_local *local = sdata->local;
+       u32 changed = 0;
+       int err;
+
+       sdata_assert_lock(sdata);
+       lockdep_assert_held(&local->mtx);
+
+       sdata->radar_required = sdata->csa_radar_required;
+       err = ieee80211_vif_change_channel(sdata, &changed);
+       if (err < 0)
+               return err;
+
+       if (!local->use_chanctx) {
+               local->_oper_chandef = sdata->csa_chandef;
+               ieee80211_hw_config(local, 0);
+       }
+
+       sdata->vif.csa_active = false;
+
+       err = ieee80211_set_after_csa_beacon(sdata, &changed);
+       if (err)
+               return err;
+
        ieee80211_bss_info_change_notify(sdata, changed);
+       cfg80211_ch_switch_notify(sdata->dev, &sdata->csa_chandef);
 
-       ieee80211_wake_queues_by_reason(&sdata->local->hw,
+       if (!ieee80211_csa_needs_block_tx(local))
+               ieee80211_wake_queues_by_reason(&local->hw,
                                        IEEE80211_MAX_QUEUE_MAP,
                                        IEEE80211_QUEUE_STOP_REASON_CSA);
 
-       cfg80211_ch_switch_notify(sdata->dev, &sdata->csa_chandef);
+       return 0;
+}
+
+static void ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata)
+{
+       if (__ieee80211_csa_finalize(sdata)) {
+               sdata_info(sdata, "failed to finalize CSA, disconnecting\n");
+               cfg80211_stop_iface(sdata->local->hw.wiphy, &sdata->wdev,
+                                   GFP_KERNEL);
+       }
 }
 
 void ieee80211_csa_finalize_work(struct work_struct *work)
@@ -3074,8 +3153,11 @@ void ieee80211_csa_finalize_work(struct work_struct *work)
        struct ieee80211_sub_if_data *sdata =
                container_of(work, struct ieee80211_sub_if_data,
                             csa_finalize_work);
+       struct ieee80211_local *local = sdata->local;
 
        sdata_lock(sdata);
+       mutex_lock(&local->mtx);
+
        /* AP might have been stopped while waiting for the lock. */
        if (!sdata->vif.csa_active)
                goto unlock;
@@ -3086,6 +3168,7 @@ void ieee80211_csa_finalize_work(struct work_struct *work)
        ieee80211_csa_finalize(sdata);
 
 unlock:
+       mutex_unlock(&local->mtx);
        sdata_unlock(sdata);
 }
 
@@ -3121,9 +3204,25 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
                if (params->count <= 1)
                        break;
 
-               sdata->csa_counter_offset_beacon =
-                       params->counter_offset_beacon;
-               sdata->csa_counter_offset_presp = params->counter_offset_presp;
+               if ((params->n_counter_offsets_beacon >
+                    IEEE80211_MAX_CSA_COUNTERS_NUM) ||
+                   (params->n_counter_offsets_presp >
+                    IEEE80211_MAX_CSA_COUNTERS_NUM))
+                       return -EINVAL;
+
+               /* make sure we don't have garbage in other counters */
+               memset(sdata->csa_counter_offset_beacon, 0,
+                      sizeof(sdata->csa_counter_offset_beacon));
+               memset(sdata->csa_counter_offset_presp, 0,
+                      sizeof(sdata->csa_counter_offset_presp));
+
+               memcpy(sdata->csa_counter_offset_beacon,
+                      params->counter_offsets_beacon,
+                      params->n_counter_offsets_beacon * sizeof(u16));
+               memcpy(sdata->csa_counter_offset_presp,
+                      params->counter_offsets_presp,
+                      params->n_counter_offsets_presp * sizeof(u16));
+
                err = ieee80211_assign_beacon(sdata, &params->beacon_csa);
                if (err < 0) {
                        kfree(sdata->u.ap.next_beacon);
@@ -3212,16 +3311,18 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
        return 0;
 }
 
-int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
-                            struct cfg80211_csa_settings *params)
+static int
+__ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
+                          struct cfg80211_csa_settings *params)
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
        struct ieee80211_local *local = sdata->local;
-       struct ieee80211_chanctx_conf *chanctx_conf;
+       struct ieee80211_chanctx_conf *conf;
        struct ieee80211_chanctx *chanctx;
        int err, num_chanctx, changed = 0;
 
        sdata_assert_lock(sdata);
+       lockdep_assert_held(&local->mtx);
 
        if (!list_empty(&local->roc_list) || local->scanning)
                return -EBUSY;
@@ -3233,23 +3334,24 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
                                       &sdata->vif.bss_conf.chandef))
                return -EINVAL;
 
-       rcu_read_lock();
-       chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
-       if (!chanctx_conf) {
-               rcu_read_unlock();
+       mutex_lock(&local->chanctx_mtx);
+       conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+                                        lockdep_is_held(&local->chanctx_mtx));
+       if (!conf) {
+               mutex_unlock(&local->chanctx_mtx);
                return -EBUSY;
        }
 
        /* don't handle for multi-VIF cases */
-       chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, conf);
-       if (chanctx->refcount > 1) {
-               rcu_read_unlock();
+       chanctx = container_of(conf, struct ieee80211_chanctx, conf);
+       if (ieee80211_chanctx_refcount(local, chanctx) > 1) {
+               mutex_unlock(&local->chanctx_mtx);
                return -EBUSY;
        }
        num_chanctx = 0;
        list_for_each_entry_rcu(chanctx, &local->chanctx_list, list)
                num_chanctx++;
-       rcu_read_unlock();
+       mutex_unlock(&local->chanctx_mtx);
 
        if (num_chanctx > 1)
                return -EBUSY;
@@ -3263,15 +3365,16 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
                return err;
 
        sdata->csa_radar_required = params->radar_required;
-
-       if (params->block_tx)
-               ieee80211_stop_queues_by_reason(&local->hw,
-                               IEEE80211_MAX_QUEUE_MAP,
-                               IEEE80211_QUEUE_STOP_REASON_CSA);
-
        sdata->csa_chandef = params->chandef;
+       sdata->csa_block_tx = params->block_tx;
+       sdata->csa_current_counter = params->count;
        sdata->vif.csa_active = true;
 
+       if (sdata->csa_block_tx)
+               ieee80211_stop_queues_by_reason(&local->hw,
+                                       IEEE80211_MAX_QUEUE_MAP,
+                                       IEEE80211_QUEUE_STOP_REASON_CSA);
+
        if (changed) {
                ieee80211_bss_info_change_notify(sdata, changed);
                drv_channel_switch_beacon(sdata, &params->chandef);
@@ -3283,6 +3386,20 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
        return 0;
 }
 
+int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
+                            struct cfg80211_csa_settings *params)
+{
+       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       struct ieee80211_local *local = sdata->local;
+       int err;
+
+       mutex_lock(&local->mtx);
+       err = __ieee80211_channel_switch(wiphy, dev, params);
+       mutex_unlock(&local->mtx);
+
+       return err;
+}
+
 static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
                             struct cfg80211_mgmt_tx_params *params,
                             u64 *cookie)
@@ -3295,6 +3412,7 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
        bool need_offchan = false;
        u32 flags;
        int ret;
+       u8 *data;
 
        if (params->dont_wait_for_ack)
                flags = IEEE80211_TX_CTL_NO_ACK;
@@ -3388,7 +3506,20 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
        }
        skb_reserve(skb, local->hw.extra_tx_headroom);
 
-       memcpy(skb_put(skb, params->len), params->buf, params->len);
+       data = skb_put(skb, params->len);
+       memcpy(data, params->buf, params->len);
+
+       /* Update CSA counters */
+       if (sdata->vif.csa_active &&
+           (sdata->vif.type == NL80211_IFTYPE_AP ||
+            sdata->vif.type == NL80211_IFTYPE_ADHOC) &&
+           params->n_csa_offsets) {
+               int i;
+               u8 c = sdata->csa_current_counter;
+
+               for (i = 0; i < params->n_csa_offsets; i++)
+                       data[params->csa_offsets[i]] = c;
+       }
 
        IEEE80211_SKB_CB(skb)->flags = flags;
 
@@ -3497,320 +3628,6 @@ static int ieee80211_set_rekey_data(struct wiphy *wiphy,
        return 0;
 }
 
-static void ieee80211_tdls_add_ext_capab(struct sk_buff *skb)
-{
-       u8 *pos = (void *)skb_put(skb, 7);
-
-       *pos++ = WLAN_EID_EXT_CAPABILITY;
-       *pos++ = 5; /* len */
-       *pos++ = 0x0;
-       *pos++ = 0x0;
-       *pos++ = 0x0;
-       *pos++ = 0x0;
-       *pos++ = WLAN_EXT_CAPA5_TDLS_ENABLED;
-}
-
-static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_sub_if_data *sdata)
-{
-       struct ieee80211_local *local = sdata->local;
-       u16 capab;
-
-       capab = 0;
-       if (ieee80211_get_sdata_band(sdata) != IEEE80211_BAND_2GHZ)
-               return capab;
-
-       if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
-               capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
-       if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
-               capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
-
-       return capab;
-}
-
-static void ieee80211_tdls_add_link_ie(struct sk_buff *skb, u8 *src_addr,
-                                      u8 *peer, u8 *bssid)
-{
-       struct ieee80211_tdls_lnkie *lnkid;
-
-       lnkid = (void *)skb_put(skb, sizeof(struct ieee80211_tdls_lnkie));
-
-       lnkid->ie_type = WLAN_EID_LINK_ID;
-       lnkid->ie_len = sizeof(struct ieee80211_tdls_lnkie) - 2;
-
-       memcpy(lnkid->bssid, bssid, ETH_ALEN);
-       memcpy(lnkid->init_sta, src_addr, ETH_ALEN);
-       memcpy(lnkid->resp_sta, peer, ETH_ALEN);
-}
-
-static int
-ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
-                              u8 *peer, u8 action_code, u8 dialog_token,
-                              u16 status_code, struct sk_buff *skb)
-{
-       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-       enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
-       struct ieee80211_tdls_data *tf;
-
-       tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
-
-       memcpy(tf->da, peer, ETH_ALEN);
-       memcpy(tf->sa, sdata->vif.addr, ETH_ALEN);
-       tf->ether_type = cpu_to_be16(ETH_P_TDLS);
-       tf->payload_type = WLAN_TDLS_SNAP_RFTYPE;
-
-       switch (action_code) {
-       case WLAN_TDLS_SETUP_REQUEST:
-               tf->category = WLAN_CATEGORY_TDLS;
-               tf->action_code = WLAN_TDLS_SETUP_REQUEST;
-
-               skb_put(skb, sizeof(tf->u.setup_req));
-               tf->u.setup_req.dialog_token = dialog_token;
-               tf->u.setup_req.capability =
-                       cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
-
-               ieee80211_add_srates_ie(sdata, skb, false, band);
-               ieee80211_add_ext_srates_ie(sdata, skb, false, band);
-               ieee80211_tdls_add_ext_capab(skb);
-               break;
-       case WLAN_TDLS_SETUP_RESPONSE:
-               tf->category = WLAN_CATEGORY_TDLS;
-               tf->action_code = WLAN_TDLS_SETUP_RESPONSE;
-
-               skb_put(skb, sizeof(tf->u.setup_resp));
-               tf->u.setup_resp.status_code = cpu_to_le16(status_code);
-               tf->u.setup_resp.dialog_token = dialog_token;
-               tf->u.setup_resp.capability =
-                       cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
-
-               ieee80211_add_srates_ie(sdata, skb, false, band);
-               ieee80211_add_ext_srates_ie(sdata, skb, false, band);
-               ieee80211_tdls_add_ext_capab(skb);
-               break;
-       case WLAN_TDLS_SETUP_CONFIRM:
-               tf->category = WLAN_CATEGORY_TDLS;
-               tf->action_code = WLAN_TDLS_SETUP_CONFIRM;
-
-               skb_put(skb, sizeof(tf->u.setup_cfm));
-               tf->u.setup_cfm.status_code = cpu_to_le16(status_code);
-               tf->u.setup_cfm.dialog_token = dialog_token;
-               break;
-       case WLAN_TDLS_TEARDOWN:
-               tf->category = WLAN_CATEGORY_TDLS;
-               tf->action_code = WLAN_TDLS_TEARDOWN;
-
-               skb_put(skb, sizeof(tf->u.teardown));
-               tf->u.teardown.reason_code = cpu_to_le16(status_code);
-               break;
-       case WLAN_TDLS_DISCOVERY_REQUEST:
-               tf->category = WLAN_CATEGORY_TDLS;
-               tf->action_code = WLAN_TDLS_DISCOVERY_REQUEST;
-
-               skb_put(skb, sizeof(tf->u.discover_req));
-               tf->u.discover_req.dialog_token = dialog_token;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int
-ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
-                          u8 *peer, u8 action_code, u8 dialog_token,
-                          u16 status_code, struct sk_buff *skb)
-{
-       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-       enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
-       struct ieee80211_mgmt *mgmt;
-
-       mgmt = (void *)skb_put(skb, 24);
-       memset(mgmt, 0, 24);
-       memcpy(mgmt->da, peer, ETH_ALEN);
-       memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
-       memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
-
-       mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
-                                         IEEE80211_STYPE_ACTION);
-
-       switch (action_code) {
-       case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
-               skb_put(skb, 1 + sizeof(mgmt->u.action.u.tdls_discover_resp));
-               mgmt->u.action.category = WLAN_CATEGORY_PUBLIC;
-               mgmt->u.action.u.tdls_discover_resp.action_code =
-                       WLAN_PUB_ACTION_TDLS_DISCOVER_RES;
-               mgmt->u.action.u.tdls_discover_resp.dialog_token =
-                       dialog_token;
-               mgmt->u.action.u.tdls_discover_resp.capability =
-                       cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
-
-               ieee80211_add_srates_ie(sdata, skb, false, band);
-               ieee80211_add_ext_srates_ie(sdata, skb, false, band);
-               ieee80211_tdls_add_ext_capab(skb);
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
-                              u8 *peer, u8 action_code, u8 dialog_token,
-                              u16 status_code, u32 peer_capability,
-                              const u8 *extra_ies, size_t extra_ies_len)
-{
-       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-       struct ieee80211_local *local = sdata->local;
-       struct sk_buff *skb = NULL;
-       bool send_direct;
-       int ret;
-
-       if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
-               return -ENOTSUPP;
-
-       /* make sure we are in managed mode, and associated */
-       if (sdata->vif.type != NL80211_IFTYPE_STATION ||
-           !sdata->u.mgd.associated)
-               return -EINVAL;
-
-       tdls_dbg(sdata, "TDLS mgmt action %d peer %pM\n",
-                action_code, peer);
-
-       skb = dev_alloc_skb(local->hw.extra_tx_headroom +
-                           max(sizeof(struct ieee80211_mgmt),
-                               sizeof(struct ieee80211_tdls_data)) +
-                           50 + /* supported rates */
-                           7 + /* ext capab */
-                           extra_ies_len +
-                           sizeof(struct ieee80211_tdls_lnkie));
-       if (!skb)
-               return -ENOMEM;
-
-       skb_reserve(skb, local->hw.extra_tx_headroom);
-
-       switch (action_code) {
-       case WLAN_TDLS_SETUP_REQUEST:
-       case WLAN_TDLS_SETUP_RESPONSE:
-       case WLAN_TDLS_SETUP_CONFIRM:
-       case WLAN_TDLS_TEARDOWN:
-       case WLAN_TDLS_DISCOVERY_REQUEST:
-               ret = ieee80211_prep_tdls_encap_data(wiphy, dev, peer,
-                                                    action_code, dialog_token,
-                                                    status_code, skb);
-               send_direct = false;
-               break;
-       case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
-               ret = ieee80211_prep_tdls_direct(wiphy, dev, peer, action_code,
-                                                dialog_token, status_code,
-                                                skb);
-               send_direct = true;
-               break;
-       default:
-               ret = -ENOTSUPP;
-               break;
-       }
-
-       if (ret < 0)
-               goto fail;
-
-       if (extra_ies_len)
-               memcpy(skb_put(skb, extra_ies_len), extra_ies, extra_ies_len);
-
-       /* the TDLS link IE is always added last */
-       switch (action_code) {
-       case WLAN_TDLS_SETUP_REQUEST:
-       case WLAN_TDLS_SETUP_CONFIRM:
-       case WLAN_TDLS_TEARDOWN:
-       case WLAN_TDLS_DISCOVERY_REQUEST:
-               /* we are the initiator */
-               ieee80211_tdls_add_link_ie(skb, sdata->vif.addr, peer,
-                                          sdata->u.mgd.bssid);
-               break;
-       case WLAN_TDLS_SETUP_RESPONSE:
-       case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
-               /* we are the responder */
-               ieee80211_tdls_add_link_ie(skb, peer, sdata->vif.addr,
-                                          sdata->u.mgd.bssid);
-               break;
-       default:
-               ret = -ENOTSUPP;
-               goto fail;
-       }
-
-       if (send_direct) {
-               ieee80211_tx_skb(sdata, skb);
-               return 0;
-       }
-
-       /*
-        * According to 802.11z: Setup req/resp are sent in AC_BK, otherwise
-        * we should default to AC_VI.
-        */
-       switch (action_code) {
-       case WLAN_TDLS_SETUP_REQUEST:
-       case WLAN_TDLS_SETUP_RESPONSE:
-               skb_set_queue_mapping(skb, IEEE80211_AC_BK);
-               skb->priority = 2;
-               break;
-       default:
-               skb_set_queue_mapping(skb, IEEE80211_AC_VI);
-               skb->priority = 5;
-               break;
-       }
-
-       /* disable bottom halves when entering the Tx path */
-       local_bh_disable();
-       ret = ieee80211_subif_start_xmit(skb, dev);
-       local_bh_enable();
-
-       return ret;
-
-fail:
-       dev_kfree_skb(skb);
-       return ret;
-}
-
-static int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
-                              u8 *peer, enum nl80211_tdls_operation oper)
-{
-       struct sta_info *sta;
-       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
-       if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
-               return -ENOTSUPP;
-
-       if (sdata->vif.type != NL80211_IFTYPE_STATION)
-               return -EINVAL;
-
-       tdls_dbg(sdata, "TDLS oper %d peer %pM\n", oper, peer);
-
-       switch (oper) {
-       case NL80211_TDLS_ENABLE_LINK:
-               rcu_read_lock();
-               sta = sta_info_get(sdata, peer);
-               if (!sta) {
-                       rcu_read_unlock();
-                       return -ENOLINK;
-               }
-
-               set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH);
-               rcu_read_unlock();
-               break;
-       case NL80211_TDLS_DISABLE_LINK:
-               return sta_info_destroy_addr(sdata, peer);
-       case NL80211_TDLS_TEARDOWN:
-       case NL80211_TDLS_SETUP:
-       case NL80211_TDLS_DISCOVERY_REQ:
-               /* We don't support in-driver setup/teardown/discovery */
-               return -ENOTSUPP;
-       default:
-               return -ENOTSUPP;
-       }
-
-       return 0;
-}
-
 static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
                                  const u8 *peer, u64 *cookie)
 {
@@ -3949,6 +3766,21 @@ static int ieee80211_set_qos_map(struct wiphy *wiphy,
        return 0;
 }
 
+static int ieee80211_set_ap_chanwidth(struct wiphy *wiphy,
+                                     struct net_device *dev,
+                                     struct cfg80211_chan_def *chandef)
+{
+       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       int ret;
+       u32 changed = 0;
+
+       ret = ieee80211_vif_change_bandwidth(sdata, chandef, &changed);
+       if (ret == 0)
+               ieee80211_bss_info_change_notify(sdata, changed);
+
+       return ret;
+}
+
 const struct cfg80211_ops mac80211_config_ops = {
        .add_virtual_intf = ieee80211_add_iface,
        .del_virtual_intf = ieee80211_del_iface,
@@ -4029,4 +3861,5 @@ const struct cfg80211_ops mac80211_config_ops = {
        .start_radar_detection = ieee80211_start_radar_detection,
        .channel_switch = ieee80211_channel_switch,
        .set_qos_map = ieee80211_set_qos_map,
+       .set_ap_chanwidth = ieee80211_set_ap_chanwidth,
 };
index 75b5dd2c9267f10e8cb0e5680c3aa11c94a5dbe4..a310e33972de8881bf4dd71bdff36d55fa966226 100644 (file)
@@ -9,6 +9,170 @@
 #include "ieee80211_i.h"
 #include "driver-ops.h"
 
+static int ieee80211_chanctx_num_assigned(struct ieee80211_local *local,
+                                         struct ieee80211_chanctx *ctx)
+{
+       struct ieee80211_sub_if_data *sdata;
+       int num = 0;
+
+       lockdep_assert_held(&local->chanctx_mtx);
+
+       list_for_each_entry(sdata, &ctx->assigned_vifs, assigned_chanctx_list)
+               num++;
+
+       return num;
+}
+
+static int ieee80211_chanctx_num_reserved(struct ieee80211_local *local,
+                                         struct ieee80211_chanctx *ctx)
+{
+       struct ieee80211_sub_if_data *sdata;
+       int num = 0;
+
+       lockdep_assert_held(&local->chanctx_mtx);
+
+       list_for_each_entry(sdata, &ctx->reserved_vifs, reserved_chanctx_list)
+               num++;
+
+       return num;
+}
+
+int ieee80211_chanctx_refcount(struct ieee80211_local *local,
+                              struct ieee80211_chanctx *ctx)
+{
+       return ieee80211_chanctx_num_assigned(local, ctx) +
+              ieee80211_chanctx_num_reserved(local, ctx);
+}
+
+static int ieee80211_num_chanctx(struct ieee80211_local *local)
+{
+       struct ieee80211_chanctx *ctx;
+       int num = 0;
+
+       lockdep_assert_held(&local->chanctx_mtx);
+
+       list_for_each_entry(ctx, &local->chanctx_list, list)
+               num++;
+
+       return num;
+}
+
+static bool ieee80211_can_create_new_chanctx(struct ieee80211_local *local)
+{
+       lockdep_assert_held(&local->chanctx_mtx);
+       return ieee80211_num_chanctx(local) < ieee80211_max_num_channels(local);
+}
+
+static const struct cfg80211_chan_def *
+ieee80211_chanctx_reserved_chandef(struct ieee80211_local *local,
+                                  struct ieee80211_chanctx *ctx,
+                                  const struct cfg80211_chan_def *compat)
+{
+       struct ieee80211_sub_if_data *sdata;
+
+       lockdep_assert_held(&local->chanctx_mtx);
+
+       list_for_each_entry(sdata, &ctx->reserved_vifs,
+                           reserved_chanctx_list) {
+               if (!compat)
+                       compat = &sdata->reserved_chandef;
+
+               compat = cfg80211_chandef_compatible(&sdata->reserved_chandef,
+                                                    compat);
+               if (!compat)
+                       break;
+       }
+
+       return compat;
+}
+
+static const struct cfg80211_chan_def *
+ieee80211_chanctx_non_reserved_chandef(struct ieee80211_local *local,
+                                      struct ieee80211_chanctx *ctx,
+                                      const struct cfg80211_chan_def *compat)
+{
+       struct ieee80211_sub_if_data *sdata;
+
+       lockdep_assert_held(&local->chanctx_mtx);
+
+       list_for_each_entry(sdata, &ctx->assigned_vifs,
+                           assigned_chanctx_list) {
+               if (sdata->reserved_chanctx != NULL)
+                       continue;
+
+               if (!compat)
+                       compat = &sdata->vif.bss_conf.chandef;
+
+               compat = cfg80211_chandef_compatible(
+                               &sdata->vif.bss_conf.chandef, compat);
+               if (!compat)
+                       break;
+       }
+
+       return compat;
+}
+
+static const struct cfg80211_chan_def *
+ieee80211_chanctx_combined_chandef(struct ieee80211_local *local,
+                                  struct ieee80211_chanctx *ctx,
+                                  const struct cfg80211_chan_def *compat)
+{
+       lockdep_assert_held(&local->chanctx_mtx);
+
+       compat = ieee80211_chanctx_reserved_chandef(local, ctx, compat);
+       if (!compat)
+               return NULL;
+
+       compat = ieee80211_chanctx_non_reserved_chandef(local, ctx, compat);
+       if (!compat)
+               return NULL;
+
+       return compat;
+}
+
+static bool
+ieee80211_chanctx_can_reserve_chandef(struct ieee80211_local *local,
+                                     struct ieee80211_chanctx *ctx,
+                                     const struct cfg80211_chan_def *def)
+{
+       lockdep_assert_held(&local->chanctx_mtx);
+
+       if (ieee80211_chanctx_combined_chandef(local, ctx, def))
+               return true;
+
+       if (!list_empty(&ctx->reserved_vifs) &&
+           ieee80211_chanctx_reserved_chandef(local, ctx, def))
+               return true;
+
+       return false;
+}
+
+static struct ieee80211_chanctx *
+ieee80211_find_reservation_chanctx(struct ieee80211_local *local,
+                                  const struct cfg80211_chan_def *chandef,
+                                  enum ieee80211_chanctx_mode mode)
+{
+       struct ieee80211_chanctx *ctx;
+
+       lockdep_assert_held(&local->chanctx_mtx);
+
+       if (mode == IEEE80211_CHANCTX_EXCLUSIVE)
+               return NULL;
+
+       list_for_each_entry(ctx, &local->chanctx_list, list) {
+               if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE)
+                       continue;
+
+               if (!ieee80211_chanctx_can_reserve_chandef(local, ctx,
+                                                          chandef))
+                       continue;
+
+               return ctx;
+       }
+
+       return NULL;
+}
+
 static enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta)
 {
        switch (sta->bandwidth) {
@@ -190,6 +354,11 @@ ieee80211_find_chanctx(struct ieee80211_local *local,
                if (!compat)
                        continue;
 
+               compat = ieee80211_chanctx_reserved_chandef(local, ctx,
+                                                           compat);
+               if (!compat)
+                       continue;
+
                ieee80211_change_chanctx(local, ctx, compat);
 
                return ctx;
@@ -217,62 +386,91 @@ static bool ieee80211_is_radar_required(struct ieee80211_local *local)
 }
 
 static struct ieee80211_chanctx *
-ieee80211_new_chanctx(struct ieee80211_local *local,
-                     const struct cfg80211_chan_def *chandef,
-                     enum ieee80211_chanctx_mode mode)
+ieee80211_alloc_chanctx(struct ieee80211_local *local,
+                       const struct cfg80211_chan_def *chandef,
+                       enum ieee80211_chanctx_mode mode)
 {
        struct ieee80211_chanctx *ctx;
-       u32 changed;
-       int err;
 
        lockdep_assert_held(&local->chanctx_mtx);
 
        ctx = kzalloc(sizeof(*ctx) + local->hw.chanctx_data_size, GFP_KERNEL);
        if (!ctx)
-               return ERR_PTR(-ENOMEM);
+               return NULL;
 
+       INIT_LIST_HEAD(&ctx->assigned_vifs);
+       INIT_LIST_HEAD(&ctx->reserved_vifs);
        ctx->conf.def = *chandef;
        ctx->conf.rx_chains_static = 1;
        ctx->conf.rx_chains_dynamic = 1;
        ctx->mode = mode;
        ctx->conf.radar_enabled = ieee80211_is_radar_required(local);
        ieee80211_recalc_chanctx_min_def(local, ctx);
+
+       return ctx;
+}
+
+static int ieee80211_add_chanctx(struct ieee80211_local *local,
+                                struct ieee80211_chanctx *ctx)
+{
+       u32 changed;
+       int err;
+
+       lockdep_assert_held(&local->mtx);
+       lockdep_assert_held(&local->chanctx_mtx);
+
        if (!local->use_chanctx)
                local->hw.conf.radar_enabled = ctx->conf.radar_enabled;
 
-       /* we hold the mutex to prevent idle from changing */
-       lockdep_assert_held(&local->mtx);
        /* turn idle off *before* setting channel -- some drivers need that */
        changed = ieee80211_idle_off(local);
        if (changed)
                ieee80211_hw_config(local, changed);
 
        if (!local->use_chanctx) {
-               local->_oper_chandef = *chandef;
+               local->_oper_chandef = ctx->conf.def;
                ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
        } else {
                err = drv_add_chanctx(local, ctx);
                if (err) {
-                       kfree(ctx);
                        ieee80211_recalc_idle(local);
-                       return ERR_PTR(err);
+                       return err;
                }
        }
 
-       /* and keep the mutex held until the new chanctx is on the list */
-       list_add_rcu(&ctx->list, &local->chanctx_list);
+       return 0;
+}
 
+static struct ieee80211_chanctx *
+ieee80211_new_chanctx(struct ieee80211_local *local,
+                     const struct cfg80211_chan_def *chandef,
+                     enum ieee80211_chanctx_mode mode)
+{
+       struct ieee80211_chanctx *ctx;
+       int err;
+
+       lockdep_assert_held(&local->mtx);
+       lockdep_assert_held(&local->chanctx_mtx);
+
+       ctx = ieee80211_alloc_chanctx(local, chandef, mode);
+       if (!ctx)
+               return ERR_PTR(-ENOMEM);
+
+       err = ieee80211_add_chanctx(local, ctx);
+       if (err) {
+               kfree(ctx);
+               return ERR_PTR(err);
+       }
+
+       list_add_rcu(&ctx->list, &local->chanctx_list);
        return ctx;
 }
 
-static void ieee80211_free_chanctx(struct ieee80211_local *local,
-                                  struct ieee80211_chanctx *ctx)
+static void ieee80211_del_chanctx(struct ieee80211_local *local,
+                                 struct ieee80211_chanctx *ctx)
 {
-       bool check_single_channel = false;
        lockdep_assert_held(&local->chanctx_mtx);
 
-       WARN_ON_ONCE(ctx->refcount != 0);
-
        if (!local->use_chanctx) {
                struct cfg80211_chan_def *chandef = &local->_oper_chandef;
                chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
@@ -282,8 +480,9 @@ static void ieee80211_free_chanctx(struct ieee80211_local *local,
                /* NOTE: Disabling radar is only valid here for
                 * single channel context. To be sure, check it ...
                 */
-               if (local->hw.conf.radar_enabled)
-                       check_single_channel = true;
+               WARN_ON(local->hw.conf.radar_enabled &&
+                       !list_empty(&local->chanctx_list));
+
                local->hw.conf.radar_enabled = false;
 
                ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
@@ -291,39 +490,19 @@ static void ieee80211_free_chanctx(struct ieee80211_local *local,
                drv_remove_chanctx(local, ctx);
        }
 
-       list_del_rcu(&ctx->list);
-       kfree_rcu(ctx, rcu_head);
-
-       /* throw a warning if this wasn't the only channel context. */
-       WARN_ON(check_single_channel && !list_empty(&local->chanctx_list));
-
        ieee80211_recalc_idle(local);
 }
 
-static int ieee80211_assign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
-                                       struct ieee80211_chanctx *ctx)
+static void ieee80211_free_chanctx(struct ieee80211_local *local,
+                                  struct ieee80211_chanctx *ctx)
 {
-       struct ieee80211_local *local = sdata->local;
-       int ret;
-
        lockdep_assert_held(&local->chanctx_mtx);
 
-       ret = drv_assign_vif_chanctx(local, sdata, ctx);
-       if (ret)
-               return ret;
+       WARN_ON_ONCE(ieee80211_chanctx_refcount(local, ctx) != 0);
 
-       rcu_assign_pointer(sdata->vif.chanctx_conf, &ctx->conf);
-       ctx->refcount++;
-
-       ieee80211_recalc_txpower(sdata);
-       ieee80211_recalc_chanctx_min_def(local, ctx);
-       sdata->vif.bss_conf.idle = false;
-
-       if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
-           sdata->vif.type != NL80211_IFTYPE_MONITOR)
-               ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
-
-       return 0;
+       list_del_rcu(&ctx->list);
+       ieee80211_del_chanctx(local, ctx);
+       kfree_rcu(ctx, rcu_head);
 }
 
 static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
@@ -384,30 +563,58 @@ static void ieee80211_recalc_radar_chanctx(struct ieee80211_local *local,
        drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RADAR);
 }
 
-static void ieee80211_unassign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
-                                          struct ieee80211_chanctx *ctx)
+static int ieee80211_assign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
+                                       struct ieee80211_chanctx *new_ctx)
 {
        struct ieee80211_local *local = sdata->local;
+       struct ieee80211_chanctx_conf *conf;
+       struct ieee80211_chanctx *curr_ctx = NULL;
+       int ret = 0;
 
-       lockdep_assert_held(&local->chanctx_mtx);
+       conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+                                        lockdep_is_held(&local->chanctx_mtx));
 
-       ctx->refcount--;
-       rcu_assign_pointer(sdata->vif.chanctx_conf, NULL);
+       if (conf) {
+               curr_ctx = container_of(conf, struct ieee80211_chanctx, conf);
 
-       sdata->vif.bss_conf.idle = true;
+               drv_unassign_vif_chanctx(local, sdata, curr_ctx);
+               conf = NULL;
+               list_del(&sdata->assigned_chanctx_list);
+       }
 
-       if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
-           sdata->vif.type != NL80211_IFTYPE_MONITOR)
-               ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
+       if (new_ctx) {
+               ret = drv_assign_vif_chanctx(local, sdata, new_ctx);
+               if (ret)
+                       goto out;
 
-       drv_unassign_vif_chanctx(local, sdata, ctx);
+               conf = &new_ctx->conf;
+               list_add(&sdata->assigned_chanctx_list,
+                        &new_ctx->assigned_vifs);
+       }
+
+out:
+       rcu_assign_pointer(sdata->vif.chanctx_conf, conf);
+
+       sdata->vif.bss_conf.idle = !conf;
+
+       if (curr_ctx && ieee80211_chanctx_num_assigned(local, curr_ctx) > 0) {
+               ieee80211_recalc_chanctx_chantype(local, curr_ctx);
+               ieee80211_recalc_smps_chanctx(local, curr_ctx);
+               ieee80211_recalc_radar_chanctx(local, curr_ctx);
+               ieee80211_recalc_chanctx_min_def(local, curr_ctx);
+       }
 
-       if (ctx->refcount > 0) {
-               ieee80211_recalc_chanctx_chantype(sdata->local, ctx);
-               ieee80211_recalc_smps_chanctx(local, ctx);
-               ieee80211_recalc_radar_chanctx(local, ctx);
-               ieee80211_recalc_chanctx_min_def(local, ctx);
+       if (new_ctx && ieee80211_chanctx_num_assigned(local, new_ctx) > 0) {
+               ieee80211_recalc_txpower(sdata);
+               ieee80211_recalc_chanctx_min_def(local, new_ctx);
        }
+
+       if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
+           sdata->vif.type != NL80211_IFTYPE_MONITOR)
+               ieee80211_bss_info_change_notify(sdata,
+                                                BSS_CHANGED_IDLE);
+
+       return ret;
 }
 
 static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
@@ -425,8 +632,11 @@ static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
 
        ctx = container_of(conf, struct ieee80211_chanctx, conf);
 
-       ieee80211_unassign_vif_chanctx(sdata, ctx);
-       if (ctx->refcount == 0)
+       if (sdata->reserved_chanctx)
+               ieee80211_vif_unreserve_chanctx(sdata);
+
+       ieee80211_assign_vif_chanctx(sdata, NULL);
+       if (ieee80211_chanctx_refcount(local, ctx) == 0)
                ieee80211_free_chanctx(local, ctx);
 }
 
@@ -526,6 +736,7 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_chanctx *ctx;
+       u8 radar_detect_width = 0;
        int ret;
 
        lockdep_assert_held(&local->mtx);
@@ -533,6 +744,22 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
        WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev));
 
        mutex_lock(&local->chanctx_mtx);
+
+       ret = cfg80211_chandef_dfs_required(local->hw.wiphy,
+                                           chandef,
+                                           sdata->wdev.iftype);
+       if (ret < 0)
+               goto out;
+       if (ret > 0)
+               radar_detect_width = BIT(chandef->width);
+
+       sdata->radar_required = ret;
+
+       ret = ieee80211_check_combinations(sdata, chandef, mode,
+                                          radar_detect_width);
+       if (ret < 0)
+               goto out;
+
        __ieee80211_vif_release_channel(sdata);
 
        ctx = ieee80211_find_chanctx(local, chandef, mode);
@@ -548,7 +775,7 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
        ret = ieee80211_assign_vif_chanctx(sdata, ctx);
        if (ret) {
                /* if assign fails refcount stays the same */
-               if (ctx->refcount == 0)
+               if (ieee80211_chanctx_refcount(local, ctx) == 0)
                        ieee80211_free_chanctx(local, ctx);
                goto out;
        }
@@ -560,15 +787,47 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
        return ret;
 }
 
+static int __ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
+                                         struct ieee80211_chanctx *ctx,
+                                         u32 *changed)
+{
+       struct ieee80211_local *local = sdata->local;
+       const struct cfg80211_chan_def *chandef = &sdata->csa_chandef;
+       u32 chanctx_changed = 0;
+
+       if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
+                                    IEEE80211_CHAN_DISABLED))
+               return -EINVAL;
+
+       if (ieee80211_chanctx_refcount(local, ctx) != 1)
+               return -EINVAL;
+
+       if (sdata->vif.bss_conf.chandef.width != chandef->width) {
+               chanctx_changed = IEEE80211_CHANCTX_CHANGE_WIDTH;
+               *changed |= BSS_CHANGED_BANDWIDTH;
+       }
+
+       sdata->vif.bss_conf.chandef = *chandef;
+       ctx->conf.def = *chandef;
+
+       chanctx_changed |= IEEE80211_CHANCTX_CHANGE_CHANNEL;
+       drv_change_chanctx(local, ctx, chanctx_changed);
+
+       ieee80211_recalc_chanctx_chantype(local, ctx);
+       ieee80211_recalc_smps_chanctx(local, ctx);
+       ieee80211_recalc_radar_chanctx(local, ctx);
+       ieee80211_recalc_chanctx_min_def(local, ctx);
+
+       return 0;
+}
+
 int ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
                                 u32 *changed)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_chanctx_conf *conf;
        struct ieee80211_chanctx *ctx;
-       const struct cfg80211_chan_def *chandef = &sdata->csa_chandef;
        int ret;
-       u32 chanctx_changed = 0;
 
        lockdep_assert_held(&local->mtx);
 
@@ -576,11 +835,94 @@ int ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
        if (WARN_ON(!sdata->vif.csa_active))
                return -EINVAL;
 
-       if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
-                                    IEEE80211_CHAN_DISABLED))
+       mutex_lock(&local->chanctx_mtx);
+       conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+                                        lockdep_is_held(&local->chanctx_mtx));
+       if (!conf) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       ctx = container_of(conf, struct ieee80211_chanctx, conf);
+
+       ret = __ieee80211_vif_change_channel(sdata, ctx, changed);
+ out:
+       mutex_unlock(&local->chanctx_mtx);
+       return ret;
+}
+
+static void
+__ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
+                                     bool clear)
+{
+       struct ieee80211_local *local __maybe_unused = sdata->local;
+       struct ieee80211_sub_if_data *vlan;
+       struct ieee80211_chanctx_conf *conf;
+
+       if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP))
+               return;
+
+       lockdep_assert_held(&local->mtx);
+
+       /* Check that conf exists, even when clearing this function
+        * must be called with the AP's channel context still there
+        * as it would otherwise cause VLANs to have an invalid
+        * channel context pointer for a while, possibly pointing
+        * to a channel context that has already been freed.
+        */
+       conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+                                        lockdep_is_held(&local->chanctx_mtx));
+       WARN_ON(!conf);
+
+       if (clear)
+               conf = NULL;
+
+       list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
+               rcu_assign_pointer(vlan->vif.chanctx_conf, conf);
+}
+
+void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
+                                        bool clear)
+{
+       struct ieee80211_local *local = sdata->local;
+
+       mutex_lock(&local->chanctx_mtx);
+
+       __ieee80211_vif_copy_chanctx_to_vlans(sdata, clear);
+
+       mutex_unlock(&local->chanctx_mtx);
+}
+
+int ieee80211_vif_unreserve_chanctx(struct ieee80211_sub_if_data *sdata)
+{
+       struct ieee80211_chanctx *ctx = sdata->reserved_chanctx;
+
+       lockdep_assert_held(&sdata->local->chanctx_mtx);
+
+       if (WARN_ON(!ctx))
                return -EINVAL;
 
+       list_del(&sdata->reserved_chanctx_list);
+       sdata->reserved_chanctx = NULL;
+
+       if (ieee80211_chanctx_refcount(sdata->local, ctx) == 0)
+               ieee80211_free_chanctx(sdata->local, ctx);
+
+       return 0;
+}
+
+int ieee80211_vif_reserve_chanctx(struct ieee80211_sub_if_data *sdata,
+                                 const struct cfg80211_chan_def *chandef,
+                                 enum ieee80211_chanctx_mode mode,
+                                 bool radar_required)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_chanctx_conf *conf;
+       struct ieee80211_chanctx *new_ctx, *curr_ctx;
+       int ret = 0;
+
        mutex_lock(&local->chanctx_mtx);
+
        conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
                                         lockdep_is_held(&local->chanctx_mtx));
        if (!conf) {
@@ -588,30 +930,108 @@ int ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
                goto out;
        }
 
-       ctx = container_of(conf, struct ieee80211_chanctx, conf);
-       if (ctx->refcount != 1) {
+       curr_ctx = container_of(conf, struct ieee80211_chanctx, conf);
+
+       new_ctx = ieee80211_find_reservation_chanctx(local, chandef, mode);
+       if (!new_ctx) {
+               if (ieee80211_chanctx_refcount(local, curr_ctx) == 1 &&
+                   (local->hw.flags & IEEE80211_HW_CHANGE_RUNNING_CHANCTX)) {
+                       /* if we're the only users of the chanctx and
+                        * the driver supports changing a running
+                        * context, reserve our current context
+                        */
+                       new_ctx = curr_ctx;
+               } else if (ieee80211_can_create_new_chanctx(local)) {
+                       /* create a new context and reserve it */
+                       new_ctx = ieee80211_new_chanctx(local, chandef, mode);
+                       if (IS_ERR(new_ctx)) {
+                               ret = PTR_ERR(new_ctx);
+                               goto out;
+                       }
+               } else {
+                       ret = -EBUSY;
+                       goto out;
+               }
+       }
+
+       list_add(&sdata->reserved_chanctx_list, &new_ctx->reserved_vifs);
+       sdata->reserved_chanctx = new_ctx;
+       sdata->reserved_chandef = *chandef;
+       sdata->reserved_radar_required = radar_required;
+out:
+       mutex_unlock(&local->chanctx_mtx);
+       return ret;
+}
+
+int ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata,
+                                      u32 *changed)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_chanctx *ctx;
+       struct ieee80211_chanctx *old_ctx;
+       struct ieee80211_chanctx_conf *conf;
+       int ret;
+       u32 tmp_changed = *changed;
+
+       /* TODO: need to recheck if the chandef is usable etc.? */
+
+       lockdep_assert_held(&local->mtx);
+
+       mutex_lock(&local->chanctx_mtx);
+
+       ctx = sdata->reserved_chanctx;
+       if (WARN_ON(!ctx)) {
                ret = -EINVAL;
                goto out;
        }
 
-       if (sdata->vif.bss_conf.chandef.width != chandef->width) {
-               chanctx_changed = IEEE80211_CHANCTX_CHANGE_WIDTH;
-               *changed |= BSS_CHANGED_BANDWIDTH;
+       conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+                                        lockdep_is_held(&local->chanctx_mtx));
+       if (!conf) {
+               ret = -EINVAL;
+               goto out;
        }
 
-       sdata->vif.bss_conf.chandef = *chandef;
-       ctx->conf.def = *chandef;
+       old_ctx = container_of(conf, struct ieee80211_chanctx, conf);
 
-       chanctx_changed |= IEEE80211_CHANCTX_CHANGE_CHANNEL;
-       drv_change_chanctx(local, ctx, chanctx_changed);
+       if (sdata->vif.bss_conf.chandef.width != sdata->reserved_chandef.width)
+               tmp_changed |= BSS_CHANGED_BANDWIDTH;
+
+       sdata->vif.bss_conf.chandef = sdata->reserved_chandef;
+
+       /* unref our reservation */
+       sdata->reserved_chanctx = NULL;
+       sdata->radar_required = sdata->reserved_radar_required;
+       list_del(&sdata->reserved_chanctx_list);
+
+       if (old_ctx == ctx) {
+               /* This is our own context, just change it */
+               ret = __ieee80211_vif_change_channel(sdata, old_ctx,
+                                                    &tmp_changed);
+               if (ret)
+                       goto out;
+       } else {
+               ret = ieee80211_assign_vif_chanctx(sdata, ctx);
+               if (ieee80211_chanctx_refcount(local, old_ctx) == 0)
+                       ieee80211_free_chanctx(local, old_ctx);
+               if (ret) {
+                       /* if assign fails refcount stays the same */
+                       if (ieee80211_chanctx_refcount(local, ctx) == 0)
+                               ieee80211_free_chanctx(local, ctx);
+                       goto out;
+               }
+
+               if (sdata->vif.type == NL80211_IFTYPE_AP)
+                       __ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
+       }
+
+       *changed = tmp_changed;
 
        ieee80211_recalc_chanctx_chantype(local, ctx);
        ieee80211_recalc_smps_chanctx(local, ctx);
        ieee80211_recalc_radar_chanctx(local, ctx);
        ieee80211_recalc_chanctx_min_def(local, ctx);
-
-       ret = 0;
- out:
+out:
        mutex_unlock(&local->chanctx_mtx);
        return ret;
 }
@@ -695,40 +1115,6 @@ void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata)
        mutex_unlock(&local->chanctx_mtx);
 }
 
-void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
-                                        bool clear)
-{
-       struct ieee80211_local *local = sdata->local;
-       struct ieee80211_sub_if_data *vlan;
-       struct ieee80211_chanctx_conf *conf;
-
-       ASSERT_RTNL();
-
-       if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP))
-               return;
-
-       mutex_lock(&local->chanctx_mtx);
-
-       /*
-        * Check that conf exists, even when clearing this function
-        * must be called with the AP's channel context still there
-        * as it would otherwise cause VLANs to have an invalid
-        * channel context pointer for a while, possibly pointing
-        * to a channel context that has already been freed.
-        */
-       conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
-                               lockdep_is_held(&local->chanctx_mtx));
-       WARN_ON(!conf);
-
-       if (clear)
-               conf = NULL;
-
-       list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
-               rcu_assign_pointer(vlan->vif.chanctx_conf, conf);
-
-       mutex_unlock(&local->chanctx_mtx);
-}
-
 void ieee80211_iter_chan_contexts_atomic(
        struct ieee80211_hw *hw,
        void (*iter)(struct ieee80211_hw *hw,
index fa16e54980a1d3e76ce2f85fcb3253eb2599e838..0e963bc1ceac3109f378431a617b853a7166df37 100644 (file)
@@ -128,7 +128,7 @@ static ssize_t sta_tx_latency_stat_write(struct file *file,
        if (!strcmp(buf, TX_LATENCY_DISABLED)) {
                if (!tx_latency)
                        goto unlock;
-               rcu_assign_pointer(local->tx_latency, NULL);
+               RCU_INIT_POINTER(local->tx_latency, NULL);
                synchronize_rcu();
                kfree(tx_latency);
                goto unlock;
index 214ed4ecd739f10ae201e6dfa9112c0dd943f5b8..60c35afee29d551727a2969b25b84998ca5501c4 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __MAC80211_DEBUGFS_H
 #define __MAC80211_DEBUGFS_H
 
+#include "ieee80211_i.h"
+
 #ifdef CONFIG_MAC80211_DEBUGFS
 void debugfs_hw_add(struct ieee80211_local *local);
 int __printf(4, 5) mac80211_format_buffer(char __user *userbuf, size_t count,
index 79025e79f4d6459dd99de5ad496e351e123f53b7..9f5501a9a79506266decdb83d0ad78c9d6e9bdf9 100644 (file)
@@ -3,6 +3,8 @@
 #ifndef __IEEE80211_DEBUGFS_NETDEV_H
 #define __IEEE80211_DEBUGFS_NETDEV_H
 
+#include "ieee80211_i.h"
+
 #ifdef CONFIG_MAC80211_DEBUGFS
 void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata);
 void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata);
index fc689f5d971e259381f0a26e13079f1a727fe704..696ef78b1fb754fa37305a2c77b870d05cdbcbec 100644 (file)
@@ -5,11 +5,11 @@
 #include "ieee80211_i.h"
 #include "trace.h"
 
-static inline void check_sdata_in_driver(struct ieee80211_sub_if_data *sdata)
+static inline bool check_sdata_in_driver(struct ieee80211_sub_if_data *sdata)
 {
-       WARN(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER),
-            "%s:  Failed check-sdata-in-driver check, flags: 0x%x\n",
-            sdata->dev ? sdata->dev->name : sdata->name, sdata->flags);
+       return !WARN(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER),
+                    "%s:  Failed check-sdata-in-driver check, flags: 0x%x\n",
+                    sdata->dev ? sdata->dev->name : sdata->name, sdata->flags);
 }
 
 static inline struct ieee80211_sub_if_data *
@@ -168,7 +168,8 @@ static inline int drv_change_interface(struct ieee80211_local *local,
 
        might_sleep();
 
-       check_sdata_in_driver(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return -EIO;
 
        trace_drv_change_interface(local, sdata, type, p2p);
        ret = local->ops->change_interface(&local->hw, &sdata->vif, type, p2p);
@@ -181,7 +182,8 @@ static inline void drv_remove_interface(struct ieee80211_local *local,
 {
        might_sleep();
 
-       check_sdata_in_driver(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return;
 
        trace_drv_remove_interface(local, sdata);
        local->ops->remove_interface(&local->hw, &sdata->vif);
@@ -219,7 +221,8 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local,
                         sdata->vif.type == NL80211_IFTYPE_MONITOR))
                return;
 
-       check_sdata_in_driver(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return;
 
        trace_drv_bss_info_changed(local, sdata, info, changed);
        if (local->ops->bss_info_changed)
@@ -278,7 +281,8 @@ static inline int drv_set_key(struct ieee80211_local *local,
        might_sleep();
 
        sdata = get_bss_sdata(sdata);
-       check_sdata_in_driver(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return -EIO;
 
        trace_drv_set_key(local, cmd, sdata, sta, key);
        ret = local->ops->set_key(&local->hw, cmd, &sdata->vif, sta, key);
@@ -298,7 +302,8 @@ static inline void drv_update_tkip_key(struct ieee80211_local *local,
                ista = &sta->sta;
 
        sdata = get_bss_sdata(sdata);
-       check_sdata_in_driver(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return;
 
        trace_drv_update_tkip_key(local, sdata, conf, ista, iv32);
        if (local->ops->update_tkip_key)
@@ -315,7 +320,8 @@ static inline int drv_hw_scan(struct ieee80211_local *local,
 
        might_sleep();
 
-       check_sdata_in_driver(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return -EIO;
 
        trace_drv_hw_scan(local, sdata);
        ret = local->ops->hw_scan(&local->hw, &sdata->vif, req);
@@ -328,7 +334,8 @@ static inline void drv_cancel_hw_scan(struct ieee80211_local *local,
 {
        might_sleep();
 
-       check_sdata_in_driver(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return;
 
        trace_drv_cancel_hw_scan(local, sdata);
        local->ops->cancel_hw_scan(&local->hw, &sdata->vif);
@@ -345,7 +352,8 @@ drv_sched_scan_start(struct ieee80211_local *local,
 
        might_sleep();
 
-       check_sdata_in_driver(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return -EIO;
 
        trace_drv_sched_scan_start(local, sdata);
        ret = local->ops->sched_scan_start(&local->hw, &sdata->vif,
@@ -361,7 +369,8 @@ static inline int drv_sched_scan_stop(struct ieee80211_local *local,
 
        might_sleep();
 
-       check_sdata_in_driver(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return -EIO;
 
        trace_drv_sched_scan_stop(local, sdata);
        ret = local->ops->sched_scan_stop(&local->hw, &sdata->vif);
@@ -462,7 +471,8 @@ static inline void drv_sta_notify(struct ieee80211_local *local,
                                  struct ieee80211_sta *sta)
 {
        sdata = get_bss_sdata(sdata);
-       check_sdata_in_driver(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return;
 
        trace_drv_sta_notify(local, sdata, cmd, sta);
        if (local->ops->sta_notify)
@@ -479,7 +489,8 @@ static inline int drv_sta_add(struct ieee80211_local *local,
        might_sleep();
 
        sdata = get_bss_sdata(sdata);
-       check_sdata_in_driver(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return -EIO;
 
        trace_drv_sta_add(local, sdata, sta);
        if (local->ops->sta_add)
@@ -497,7 +508,8 @@ static inline void drv_sta_remove(struct ieee80211_local *local,
        might_sleep();
 
        sdata = get_bss_sdata(sdata);
-       check_sdata_in_driver(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return;
 
        trace_drv_sta_remove(local, sdata, sta);
        if (local->ops->sta_remove)
@@ -515,7 +527,8 @@ static inline void drv_sta_add_debugfs(struct ieee80211_local *local,
        might_sleep();
 
        sdata = get_bss_sdata(sdata);
-       check_sdata_in_driver(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return;
 
        if (local->ops->sta_add_debugfs)
                local->ops->sta_add_debugfs(&local->hw, &sdata->vif,
@@ -545,7 +558,8 @@ static inline void drv_sta_pre_rcu_remove(struct ieee80211_local *local,
        might_sleep();
 
        sdata = get_bss_sdata(sdata);
-       check_sdata_in_driver(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return;
 
        trace_drv_sta_pre_rcu_remove(local, sdata, &sta->sta);
        if (local->ops->sta_pre_rcu_remove)
@@ -566,7 +580,8 @@ int drv_sta_state(struct ieee80211_local *local,
        might_sleep();
 
        sdata = get_bss_sdata(sdata);
-       check_sdata_in_driver(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return -EIO;
 
        trace_drv_sta_state(local, sdata, &sta->sta, old_state, new_state);
        if (local->ops->sta_state) {
@@ -590,7 +605,8 @@ static inline void drv_sta_rc_update(struct ieee80211_local *local,
                                     struct ieee80211_sta *sta, u32 changed)
 {
        sdata = get_bss_sdata(sdata);
-       check_sdata_in_driver(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return;
 
        WARN_ON(changed & IEEE80211_RC_SUPP_RATES_CHANGED &&
                (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
@@ -612,7 +628,8 @@ static inline int drv_conf_tx(struct ieee80211_local *local,
 
        might_sleep();
 
-       check_sdata_in_driver(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return -EIO;
 
        trace_drv_conf_tx(local, sdata, ac, params);
        if (local->ops->conf_tx)
@@ -629,7 +646,8 @@ static inline u64 drv_get_tsf(struct ieee80211_local *local,
 
        might_sleep();
 
-       check_sdata_in_driver(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return ret;
 
        trace_drv_get_tsf(local, sdata);
        if (local->ops->get_tsf)
@@ -644,7 +662,8 @@ static inline void drv_set_tsf(struct ieee80211_local *local,
 {
        might_sleep();
 
-       check_sdata_in_driver(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return;
 
        trace_drv_set_tsf(local, sdata, tsf);
        if (local->ops->set_tsf)
@@ -657,7 +676,8 @@ static inline void drv_reset_tsf(struct ieee80211_local *local,
 {
        might_sleep();
 
-       check_sdata_in_driver(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return;
 
        trace_drv_reset_tsf(local, sdata);
        if (local->ops->reset_tsf)
@@ -689,7 +709,8 @@ static inline int drv_ampdu_action(struct ieee80211_local *local,
        might_sleep();
 
        sdata = get_bss_sdata(sdata);
-       check_sdata_in_driver(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return -EIO;
 
        trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, buf_size);
 
@@ -726,13 +747,19 @@ static inline void drv_rfkill_poll(struct ieee80211_local *local)
 }
 
 static inline void drv_flush(struct ieee80211_local *local,
+                            struct ieee80211_sub_if_data *sdata,
                             u32 queues, bool drop)
 {
+       struct ieee80211_vif *vif = sdata ? &sdata->vif : NULL;
+
        might_sleep();
 
+       if (sdata && !check_sdata_in_driver(sdata))
+               return;
+
        trace_drv_flush(local, queues, drop);
        if (local->ops->flush)
-               local->ops->flush(&local->hw, queues, drop);
+               local->ops->flush(&local->hw, vif, queues, drop);
        trace_drv_return_void(local);
 }
 
@@ -848,7 +875,8 @@ static inline int drv_set_bitrate_mask(struct ieee80211_local *local,
 
        might_sleep();
 
-       check_sdata_in_driver(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return -EIO;
 
        trace_drv_set_bitrate_mask(local, sdata, mask);
        if (local->ops->set_bitrate_mask)
@@ -863,7 +891,8 @@ static inline void drv_set_rekey_data(struct ieee80211_local *local,
                                      struct ieee80211_sub_if_data *sdata,
                                      struct cfg80211_gtk_rekey_data *data)
 {
-       check_sdata_in_driver(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return;
 
        trace_drv_set_rekey_data(local, sdata, data);
        if (local->ops->set_rekey_data)
@@ -931,7 +960,8 @@ static inline void drv_mgd_prepare_tx(struct ieee80211_local *local,
 {
        might_sleep();
 
-       check_sdata_in_driver(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return;
        WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION);
 
        trace_drv_mgd_prepare_tx(local, sdata);
@@ -958,6 +988,9 @@ static inline int drv_add_chanctx(struct ieee80211_local *local,
 static inline void drv_remove_chanctx(struct ieee80211_local *local,
                                      struct ieee80211_chanctx *ctx)
 {
+       if (WARN_ON(!ctx->driver_present))
+               return;
+
        trace_drv_remove_chanctx(local, ctx);
        if (local->ops->remove_chanctx)
                local->ops->remove_chanctx(&local->hw, &ctx->conf);
@@ -983,7 +1016,8 @@ static inline int drv_assign_vif_chanctx(struct ieee80211_local *local,
 {
        int ret = 0;
 
-       check_sdata_in_driver(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return -EIO;
 
        trace_drv_assign_vif_chanctx(local, sdata, ctx);
        if (local->ops->assign_vif_chanctx) {
@@ -1001,7 +1035,8 @@ static inline void drv_unassign_vif_chanctx(struct ieee80211_local *local,
                                            struct ieee80211_sub_if_data *sdata,
                                            struct ieee80211_chanctx *ctx)
 {
-       check_sdata_in_driver(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return;
 
        trace_drv_unassign_vif_chanctx(local, sdata, ctx);
        if (local->ops->unassign_vif_chanctx) {
@@ -1018,7 +1053,8 @@ static inline int drv_start_ap(struct ieee80211_local *local,
 {
        int ret = 0;
 
-       check_sdata_in_driver(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return -EIO;
 
        trace_drv_start_ap(local, sdata, &sdata->vif.bss_conf);
        if (local->ops->start_ap)
@@ -1030,7 +1066,8 @@ static inline int drv_start_ap(struct ieee80211_local *local,
 static inline void drv_stop_ap(struct ieee80211_local *local,
                               struct ieee80211_sub_if_data *sdata)
 {
-       check_sdata_in_driver(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return;
 
        trace_drv_stop_ap(local, sdata);
        if (local->ops->stop_ap)
@@ -1053,7 +1090,8 @@ drv_set_default_unicast_key(struct ieee80211_local *local,
                            struct ieee80211_sub_if_data *sdata,
                            int key_idx)
 {
-       check_sdata_in_driver(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return;
 
        WARN_ON_ONCE(key_idx < -1 || key_idx > 3);
 
@@ -1095,7 +1133,8 @@ static inline int drv_join_ibss(struct ieee80211_local *local,
        int ret = 0;
 
        might_sleep();
-       check_sdata_in_driver(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return -EIO;
 
        trace_drv_join_ibss(local, sdata, &sdata->vif.bss_conf);
        if (local->ops->join_ibss)
@@ -1108,7 +1147,8 @@ static inline void drv_leave_ibss(struct ieee80211_local *local,
                                  struct ieee80211_sub_if_data *sdata)
 {
        might_sleep();
-       check_sdata_in_driver(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return;
 
        trace_drv_leave_ibss(local, sdata);
        if (local->ops->leave_ibss)
@@ -1116,4 +1156,17 @@ static inline void drv_leave_ibss(struct ieee80211_local *local,
        trace_drv_return_void(local);
 }
 
+static inline u32 drv_get_expected_throughput(struct ieee80211_local *local,
+                                             struct ieee80211_sta *sta)
+{
+       u32 ret = 0;
+
+       trace_drv_get_expected_throughput(sta);
+       if (local->ops->get_expected_throughput)
+               ret = local->ops->get_expected_throughput(sta);
+       trace_drv_return_u32(local, ret);
+
+       return ret;
+}
+
 #endif /* __MAC80211_DRIVER_OPS */
index c150b68436d78ada5bfbb0825d128d8e89f916e3..15702ff64a4c89fb89541f620b9da7dcccc2a7bd 100644 (file)
@@ -31,6 +31,18 @@ static void __check_htcap_disable(struct ieee80211_ht_cap *ht_capa,
        }
 }
 
+static void __check_htcap_enable(struct ieee80211_ht_cap *ht_capa,
+                                 struct ieee80211_ht_cap *ht_capa_mask,
+                                 struct ieee80211_sta_ht_cap *ht_cap,
+                                 u16 flag)
+{
+       __le16 le_flag = cpu_to_le16(flag);
+
+       if ((ht_capa_mask->cap_info & le_flag) &&
+           (ht_capa->cap_info & le_flag))
+               ht_cap->cap |= flag;
+}
+
 void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
                                     struct ieee80211_sta_ht_cap *ht_cap)
 {
@@ -59,7 +71,7 @@ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
        smask = (u8 *)(&ht_capa_mask->mcs.rx_mask);
 
        /* NOTE:  If you add more over-rides here, update register_hw
-        * ht_capa_mod_msk logic in main.c as well.
+        * ht_capa_mod_mask logic in main.c as well.
         * And, if this method can ever change ht_cap.ht_supported, fix
         * the check in ieee80211_add_ht_ie.
         */
@@ -86,6 +98,14 @@ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
        __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap,
                              IEEE80211_HT_CAP_MAX_AMSDU);
 
+       /* Allow user to disable LDPC */
+       __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap,
+                             IEEE80211_HT_CAP_LDPC_CODING);
+
+       /* Allow user to enable 40 MHz intolerant bit. */
+       __check_htcap_enable(ht_capa, ht_capa_mask, ht_cap,
+                            IEEE80211_HT_CAP_40MHZ_INTOLERANT);
+
        /* Allow user to decrease AMPDU factor */
        if (ht_capa_mask->ampdu_params_info &
            IEEE80211_HT_AMPDU_PARM_FACTOR) {
index 06d28787945b513e6672457a1e6990da0fd644d8..1bbac94da58d6a33218e8dc7eb474a7a5d26122a 100644 (file)
@@ -143,7 +143,7 @@ ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata,
                *pos++ = csa_settings->block_tx ? 1 : 0;
                *pos++ = ieee80211_frequency_to_channel(
                                csa_settings->chandef.chan->center_freq);
-               sdata->csa_counter_offset_beacon = (pos - presp->head);
+               sdata->csa_counter_offset_beacon[0] = (pos - presp->head);
                *pos++ = csa_settings->count;
        }
 
@@ -228,7 +228,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
        struct beacon_data *presp;
        enum nl80211_bss_scan_width scan_width;
        bool have_higher_than_11mbit;
-       bool radar_required = false;
+       bool radar_required;
        int err;
 
        sdata_assert_lock(sdata);
@@ -253,7 +253,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
 
        presp = rcu_dereference_protected(ifibss->presp,
                                          lockdep_is_held(&sdata->wdev.mtx));
-       rcu_assign_pointer(ifibss->presp, NULL);
+       RCU_INIT_POINTER(ifibss->presp, NULL);
        if (presp)
                kfree_rcu(presp, rcu_head);
 
@@ -262,7 +262,8 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
        /* make a copy of the chandef, it could be modified below. */
        chandef = *req_chandef;
        chan = chandef.chan;
-       if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) {
+       if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef,
+                                    NL80211_IFTYPE_ADHOC)) {
                if (chandef.width == NL80211_CHAN_WIDTH_5 ||
                    chandef.width == NL80211_CHAN_WIDTH_10 ||
                    chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
@@ -274,7 +275,8 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
                chandef.width = NL80211_CHAN_WIDTH_20;
                chandef.center_freq1 = chan->center_freq;
                /* check again for downgraded chandef */
-               if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) {
+               if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef,
+                                            NL80211_IFTYPE_ADHOC)) {
                        sdata_info(sdata,
                                   "Failed to join IBSS, beacons forbidden\n");
                        return;
@@ -282,21 +284,20 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
        }
 
        err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
-                                           &chandef);
+                                           &chandef, NL80211_IFTYPE_ADHOC);
        if (err < 0) {
                sdata_info(sdata,
                           "Failed to join IBSS, invalid chandef\n");
                return;
        }
-       if (err > 0) {
-               if (!ifibss->userspace_handles_dfs) {
-                       sdata_info(sdata,
-                                  "Failed to join IBSS, DFS channel without control program\n");
-                       return;
-               }
-               radar_required = true;
+       if (err > 0 && !ifibss->userspace_handles_dfs) {
+               sdata_info(sdata,
+                          "Failed to join IBSS, DFS channel without control program\n");
+               return;
        }
 
+       radar_required = err;
+
        mutex_lock(&local->mtx);
        if (ieee80211_vif_use_channel(sdata, &chandef,
                                      ifibss->fixed_channel ?
@@ -775,7 +776,8 @@ static void ieee80211_ibss_csa_mark_radar(struct ieee80211_sub_if_data *sdata)
         * unavailable.
         */
        err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
-                                           &ifibss->chandef);
+                                           &ifibss->chandef,
+                                           NL80211_IFTYPE_ADHOC);
        if (err > 0)
                cfg80211_radar_event(sdata->local->hw.wiphy, &ifibss->chandef,
                                     GFP_ATOMIC);
@@ -861,7 +863,8 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
                goto disconnect;
        }
 
-       if (!cfg80211_reg_can_beacon(sdata->local->hw.wiphy, &params.chandef)) {
+       if (!cfg80211_reg_can_beacon(sdata->local->hw.wiphy, &params.chandef,
+                                    NL80211_IFTYPE_ADHOC)) {
                sdata_info(sdata,
                           "IBSS %pM switches to unsupported channel (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n",
                           ifibss->bssid,
@@ -873,17 +876,17 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
        }
 
        err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
-                                           &params.chandef);
+                                           &params.chandef,
+                                           NL80211_IFTYPE_ADHOC);
        if (err < 0)
                goto disconnect;
-       if (err) {
+       if (err > 0 && !ifibss->userspace_handles_dfs) {
                /* IBSS-DFS only allowed with a control program */
-               if (!ifibss->userspace_handles_dfs)
-                       goto disconnect;
-
-               params.radar_required = true;
+               goto disconnect;
        }
 
+       params.radar_required = err;
+
        if (cfg80211_chandef_identical(&params.chandef,
                                       &sdata->vif.bss_conf.chandef)) {
                ibss_dbg(sdata,
@@ -1636,7 +1639,33 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
        u32 changed = 0;
        u32 rate_flags;
        struct ieee80211_supported_band *sband;
+       enum ieee80211_chanctx_mode chanmode;
+       struct ieee80211_local *local = sdata->local;
+       int radar_detect_width = 0;
        int i;
+       int ret;
+
+       ret = cfg80211_chandef_dfs_required(local->hw.wiphy,
+                                           &params->chandef,
+                                           sdata->wdev.iftype);
+       if (ret < 0)
+               return ret;
+
+       if (ret > 0) {
+               if (!params->userspace_handles_dfs)
+                       return -EINVAL;
+               radar_detect_width = BIT(params->chandef.width);
+       }
+
+       chanmode = (params->channel_fixed && !ret) ?
+               IEEE80211_CHANCTX_SHARED : IEEE80211_CHANCTX_EXCLUSIVE;
+
+       mutex_lock(&local->chanctx_mtx);
+       ret = ieee80211_check_combinations(sdata, &params->chandef, chanmode,
+                                          radar_detect_width);
+       mutex_unlock(&local->chanctx_mtx);
+       if (ret < 0)
+               return ret;
 
        if (params->bssid) {
                memcpy(sdata->u.ibss.bssid, params->bssid, ETH_ALEN);
@@ -1651,7 +1680,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
 
        /* fix basic_rates if channel does not support these rates */
        rate_flags = ieee80211_chandef_rate_flags(&params->chandef);
-       sband = sdata->local->hw.wiphy->bands[params->chandef.chan->band];
+       sband = local->hw.wiphy->bands[params->chandef.chan->band];
        for (i = 0; i < sband->n_bitrates; i++) {
                if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
                        sdata->u.ibss.basic_rates &= ~BIT(i);
@@ -1700,9 +1729,9 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
        ieee80211_bss_info_change_notify(sdata, changed);
 
        sdata->smps_mode = IEEE80211_SMPS_OFF;
-       sdata->needed_rx_chains = sdata->local->rx_chains;
+       sdata->needed_rx_chains = local->rx_chains;
 
-       ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+       ieee80211_queue_work(&local->hw, &sdata->work);
 
        return 0;
 }
index 222c28b75315f1ab43226e08566a5f911c6bacc7..ac9836e0aab335ddf75295b5ad020d77e0e3dd0a 100644 (file)
@@ -260,7 +260,7 @@ struct ieee80211_if_ap {
 
        /* to be used after channel switch. */
        struct cfg80211_beacon_data *next_beacon;
-       struct list_head vlans;
+       struct list_head vlans; /* write-protected with RTNL and local->mtx */
 
        struct ps_data ps;
        atomic_t num_mcast_sta; /* number of stations receiving multicast */
@@ -276,7 +276,7 @@ struct ieee80211_if_wds {
 };
 
 struct ieee80211_if_vlan {
-       struct list_head list;
+       struct list_head list; /* write-protected with RTNL and local->mtx */
 
        /* used for all tx if the VLAN is configured to 4-addr mode */
        struct sta_info __rcu *sta;
@@ -317,6 +317,7 @@ struct ieee80211_roc_work {
 
        bool started, abort, hw_begun, notified;
        bool to_be_freed;
+       bool on_channel;
 
        unsigned long hw_start_time;
 
@@ -691,8 +692,10 @@ struct ieee80211_chanctx {
        struct list_head list;
        struct rcu_head rcu_head;
 
+       struct list_head assigned_vifs;
+       struct list_head reserved_vifs;
+
        enum ieee80211_chanctx_mode mode;
-       int refcount;
        bool driver_present;
 
        struct ieee80211_chanctx_conf conf;
@@ -751,11 +754,21 @@ struct ieee80211_sub_if_data {
        struct mac80211_qos_map __rcu *qos_map;
 
        struct work_struct csa_finalize_work;
-       int csa_counter_offset_beacon;
-       int csa_counter_offset_presp;
+       u16 csa_counter_offset_beacon[IEEE80211_MAX_CSA_COUNTERS_NUM];
+       u16 csa_counter_offset_presp[IEEE80211_MAX_CSA_COUNTERS_NUM];
        bool csa_radar_required;
+       bool csa_block_tx; /* write-protected by sdata_lock and local->mtx */
        struct cfg80211_chan_def csa_chandef;
 
+       struct list_head assigned_chanctx_list; /* protected by chanctx_mtx */
+       struct list_head reserved_chanctx_list; /* protected by chanctx_mtx */
+
+       /* context reservation -- protected with chanctx_mtx */
+       struct ieee80211_chanctx *reserved_chanctx;
+       struct cfg80211_chan_def reserved_chandef;
+       bool reserved_radar_required;
+       u8 csa_current_counter;
+
        /* used to reconfigure hardware SM PS */
        struct work_struct recalc_smps;
 
@@ -1448,6 +1461,7 @@ __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
 int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
                                       struct cfg80211_sched_scan_request *req);
 int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata);
+void ieee80211_sched_scan_end(struct ieee80211_local *local);
 void ieee80211_sched_scan_stopped_work(struct work_struct *work);
 
 /* off-channel helpers */
@@ -1462,6 +1476,7 @@ void ieee80211_sw_roc_work(struct work_struct *work);
 void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc);
 
 /* channel switch handling */
+bool ieee80211_csa_needs_block_tx(struct ieee80211_local *local);
 void ieee80211_csa_finalize_work(struct work_struct *work);
 int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
                             struct cfg80211_csa_settings *params);
@@ -1770,6 +1785,16 @@ int __must_check
 ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
                          const struct cfg80211_chan_def *chandef,
                          enum ieee80211_chanctx_mode mode);
+int __must_check
+ieee80211_vif_reserve_chanctx(struct ieee80211_sub_if_data *sdata,
+                             const struct cfg80211_chan_def *chandef,
+                             enum ieee80211_chanctx_mode mode,
+                             bool radar_required);
+int __must_check
+ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata,
+                                  u32 *changed);
+int ieee80211_vif_unreserve_chanctx(struct ieee80211_sub_if_data *sdata);
+
 int __must_check
 ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
                               const struct cfg80211_chan_def *chandef,
@@ -1782,6 +1807,8 @@ void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata);
 void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata);
 void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
                                         bool clear);
+int ieee80211_chanctx_refcount(struct ieee80211_local *local,
+                              struct ieee80211_chanctx *ctx);
 
 void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
                                   struct ieee80211_chanctx *chanctx);
@@ -1805,6 +1832,20 @@ int ieee80211_cs_headroom(struct ieee80211_local *local,
                          enum nl80211_iftype iftype);
 void ieee80211_recalc_dtim(struct ieee80211_local *local,
                           struct ieee80211_sub_if_data *sdata);
+int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
+                                const struct cfg80211_chan_def *chandef,
+                                enum ieee80211_chanctx_mode chanmode,
+                                u8 radar_detect);
+int ieee80211_max_num_channels(struct ieee80211_local *local);
+
+/* TDLS */
+int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+                       const u8 *peer, u8 action_code, u8 dialog_token,
+                       u16 status_code, u32 peer_capability,
+                       const u8 *extra_ies, size_t extra_ies_len);
+int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
+                       const u8 *peer, enum nl80211_tdls_operation oper);
+
 
 #ifdef CONFIG_MAC80211_NOINLINE
 #define debug_noinline noinline
index b8d331e7d883d50fd4fc3adf12c2869ac3beedb7..79fc98815da8600511316113e52223de05911d6a 100644 (file)
@@ -250,6 +250,7 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_sub_if_data *nsdata;
+       int ret;
 
        ASSERT_RTNL();
 
@@ -300,7 +301,10 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
                }
        }
 
-       return 0;
+       mutex_lock(&local->chanctx_mtx);
+       ret = ieee80211_check_combinations(sdata, NULL, 0, 0);
+       mutex_unlock(&local->chanctx_mtx);
+       return ret;
 }
 
 static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata,
@@ -423,7 +427,7 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
        mutex_unlock(&local->mtx);
        if (ret) {
                mutex_lock(&local->iflist_mtx);
-               rcu_assign_pointer(local->monitor_sdata, NULL);
+               RCU_INIT_POINTER(local->monitor_sdata, NULL);
                mutex_unlock(&local->iflist_mtx);
                synchronize_net();
                drv_remove_interface(local, sdata);
@@ -452,7 +456,7 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
                return;
        }
 
-       rcu_assign_pointer(local->monitor_sdata, NULL);
+       RCU_INIT_POINTER(local->monitor_sdata, NULL);
        mutex_unlock(&local->iflist_mtx);
 
        synchronize_net();
@@ -492,7 +496,9 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
                if (!sdata->bss)
                        return -ENOLINK;
 
+               mutex_lock(&local->mtx);
                list_add(&sdata->u.vlan.list, &sdata->bss->vlans);
+               mutex_unlock(&local->mtx);
 
                master = container_of(sdata->bss,
                                      struct ieee80211_sub_if_data, u.ap);
@@ -722,8 +728,11 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
                drv_stop(local);
  err_del_bss:
        sdata->bss = NULL;
-       if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+       if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
+               mutex_lock(&local->mtx);
                list_del(&sdata->u.vlan.list);
+               mutex_unlock(&local->mtx);
+       }
        /* might already be clear but that doesn't matter */
        clear_bit(SDATA_STATE_RUNNING, &sdata->state);
        return res;
@@ -829,8 +838,15 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
 
        cancel_work_sync(&sdata->recalc_smps);
        sdata_lock(sdata);
+       mutex_lock(&local->mtx);
        sdata->vif.csa_active = false;
+       if (!ieee80211_csa_needs_block_tx(local))
+               ieee80211_wake_queues_by_reason(&local->hw,
+                                       IEEE80211_MAX_QUEUE_MAP,
+                                       IEEE80211_QUEUE_STOP_REASON_CSA);
+       mutex_unlock(&local->mtx);
        sdata_unlock(sdata);
+
        cancel_work_sync(&sdata->csa_finalize_work);
 
        cancel_delayed_work_sync(&sdata->dfs_cac_timer_work);
@@ -875,8 +891,10 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
 
        switch (sdata->vif.type) {
        case NL80211_IFTYPE_AP_VLAN:
+               mutex_lock(&local->mtx);
                list_del(&sdata->u.vlan.list);
-               rcu_assign_pointer(sdata->vif.chanctx_conf, NULL);
+               mutex_unlock(&local->mtx);
+               RCU_INIT_POINTER(sdata->vif.chanctx_conf, NULL);
                /* no need to tell driver */
                break;
        case NL80211_IFTYPE_MONITOR:
@@ -895,7 +913,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                break;
        case NL80211_IFTYPE_P2P_DEVICE:
                /* relies on synchronize_rcu() below */
-               rcu_assign_pointer(local->p2p_sdata, NULL);
+               RCU_INIT_POINTER(local->p2p_sdata, NULL);
                /* fall through */
        default:
                cancel_work_sync(&sdata->work);
@@ -1280,6 +1298,8 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
        INIT_WORK(&sdata->work, ieee80211_iface_work);
        INIT_WORK(&sdata->recalc_smps, ieee80211_recalc_smps_work);
        INIT_WORK(&sdata->csa_finalize_work, ieee80211_csa_finalize_work);
+       INIT_LIST_HEAD(&sdata->assigned_chanctx_list);
+       INIT_LIST_HEAD(&sdata->reserved_chanctx_list);
 
        switch (type) {
        case NL80211_IFTYPE_P2P_GO:
@@ -1774,20 +1794,19 @@ static int netdev_notify(struct notifier_block *nb,
        struct ieee80211_sub_if_data *sdata;
 
        if (state != NETDEV_CHANGENAME)
-               return 0;
+               return NOTIFY_DONE;
 
        if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy)
-               return 0;
+               return NOTIFY_DONE;
 
        if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
-               return 0;
+               return NOTIFY_DONE;
 
        sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
        memcpy(sdata->name, dev->name, IFNAMSIZ);
-
        ieee80211_debugfs_rename_netdev(sdata);
-       return 0;
+
+       return NOTIFY_OK;
 }
 
 static struct notifier_block mac80211_netdev_notifier = {
index 6ff65a1ebaa905ffecfbdedebc7428882aecc551..16d97f044a202e61020f3ef563941e4c9adace0a 100644 (file)
@@ -325,7 +325,8 @@ ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
        struct ieee80211_key *key;
        int i, j, err;
 
-       BUG_ON(idx < 0 || idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS);
+       if (WARN_ON(idx < 0 || idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS))
+               return ERR_PTR(-EINVAL);
 
        key = kzalloc(sizeof(struct ieee80211_key) + key_len, GFP_KERNEL);
        if (!key)
@@ -481,8 +482,8 @@ int ieee80211_key_link(struct ieee80211_key *key,
        int idx, ret;
        bool pairwise;
 
-       BUG_ON(!sdata);
-       BUG_ON(!key);
+       if (WARN_ON(!sdata || !key))
+               return -EINVAL;
 
        pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
        idx = key->conf.keyidx;
index 4c1bf61bc778683dc352ebb32a585d45649168a6..d17c26d6e369f8db71061f3c73f4f27196ed9e3d 100644 (file)
@@ -340,7 +340,7 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
 
        sdata_unlock(sdata);
 
-       return NOTIFY_DONE;
+       return NOTIFY_OK;
 }
 #endif
 
@@ -371,7 +371,7 @@ static int ieee80211_ifa6_changed(struct notifier_block *nb,
 
        drv_ipv6_addr_change(local, sdata, idev);
 
-       return NOTIFY_DONE;
+       return NOTIFY_OK;
 }
 #endif
 
@@ -446,7 +446,9 @@ static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = {
        .cap_info = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
                                IEEE80211_HT_CAP_MAX_AMSDU |
                                IEEE80211_HT_CAP_SGI_20 |
-                               IEEE80211_HT_CAP_SGI_40),
+                               IEEE80211_HT_CAP_SGI_40 |
+                               IEEE80211_HT_CAP_LDPC_CODING |
+                               IEEE80211_HT_CAP_40MHZ_INTOLERANT),
        .mcs = {
                .rx_mask = { 0xff, 0xff, 0xff, 0xff, 0xff,
                             0xff, 0xff, 0xff, 0xff, 0xff, },
@@ -954,6 +956,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
        if (local->hw.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS)
                local->hw.wiphy->flags |= WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
 
+       local->hw.wiphy->max_num_csa_counters = IEEE80211_MAX_CSA_COUNTERS_NUM;
+
        result = wiphy_register(local->hw.wiphy);
        if (result < 0)
                goto fail_wiphy_register;
index f70e9cd10552dac6729d703edd2e9ae12750a3a6..6495a3f0428dae6a93bafea04da26b7390f45599 100644 (file)
@@ -366,20 +366,15 @@ int mesh_add_rsn_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
                return 0;
 
        /* find RSN IE */
-       data = ifmsh->ie;
-       while (data < ifmsh->ie + ifmsh->ie_len) {
-               if (*data == WLAN_EID_RSN) {
-                       len = data[1] + 2;
-                       break;
-               }
-               data++;
-       }
+       data = cfg80211_find_ie(WLAN_EID_RSN, ifmsh->ie, ifmsh->ie_len);
+       if (!data)
+               return 0;
 
-       if (len) {
-               if (skb_tailroom(skb) < len)
-                       return -ENOMEM;
-               memcpy(skb_put(skb, len), data, len);
-       }
+       len = data[1] + 2;
+
+       if (skb_tailroom(skb) < len)
+               return -ENOMEM;
+       memcpy(skb_put(skb, len), data, len);
 
        return 0;
 }
@@ -684,7 +679,7 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
                *pos++ = 0x0;
                *pos++ = ieee80211_frequency_to_channel(
                                csa->settings.chandef.chan->center_freq);
-               sdata->csa_counter_offset_beacon = hdr_len + 6;
+               sdata->csa_counter_offset_beacon[0] = hdr_len + 6;
                *pos++ = csa->settings.count;
                *pos++ = WLAN_EID_CHAN_SWITCH_PARAM;
                *pos++ = 6;
@@ -829,7 +824,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
        ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
        bcn = rcu_dereference_protected(ifmsh->beacon,
                                        lockdep_is_held(&sdata->wdev.mtx));
-       rcu_assign_pointer(ifmsh->beacon, NULL);
+       RCU_INIT_POINTER(ifmsh->beacon, NULL);
        kfree_rcu(bcn, rcu_head);
 
        /* flush STAs and mpaths on this iface */
@@ -903,14 +898,15 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
        }
 
        err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
-                                           &params.chandef);
+                                           &params.chandef,
+                                           NL80211_IFTYPE_MESH_POINT);
        if (err < 0)
                return false;
-       if (err) {
-               params.radar_required = true;
+       if (err > 0)
                /* TODO: DFS not (yet) supported */
                return false;
-       }
+
+       params.radar_required = err;
 
        if (cfg80211_chandef_identical(&params.chandef,
                                       &sdata->vif.bss_conf.chandef)) {
@@ -1068,7 +1064,7 @@ int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata)
 
        /* Remove the CSA and MCSP elements from the beacon */
        tmp_csa_settings = rcu_dereference(ifmsh->csa);
-       rcu_assign_pointer(ifmsh->csa, NULL);
+       RCU_INIT_POINTER(ifmsh->csa, NULL);
        if (tmp_csa_settings)
                kfree_rcu(tmp_csa_settings, rcu_head);
        ret = ieee80211_mesh_rebuild_beacon(sdata);
@@ -1102,7 +1098,7 @@ int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata,
        ret = ieee80211_mesh_rebuild_beacon(sdata);
        if (ret) {
                tmp_csa_settings = rcu_dereference(ifmsh->csa);
-               rcu_assign_pointer(ifmsh->csa, NULL);
+               RCU_INIT_POINTER(ifmsh->csa, NULL);
                kfree_rcu(tmp_csa_settings, rcu_head);
                return ret;
        }
index f9514685d45a54802cb0f21dce0953ccbe9b78f4..94758b9c9ed48a5d1ea9921f4f9f0da40e34bd0b 100644 (file)
@@ -37,7 +37,7 @@ static inline u32 u32_field_get(const u8 *preq_elem, int offset, bool ae)
        return get_unaligned_le32(preq_elem + offset);
 }
 
-static inline u32 u16_field_get(const u8 *preq_elem, int offset, bool ae)
+static inline u16 u16_field_get(const u8 *preq_elem, int offset, bool ae)
 {
        if (ae)
                offset += 6;
@@ -544,9 +544,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
                if (time_after(jiffies, ifmsh->last_sn_update +
                                        net_traversal_jiffies(sdata)) ||
                    time_before(jiffies, ifmsh->last_sn_update)) {
-                       target_sn = ++ifmsh->sn;
+                       ++ifmsh->sn;
                        ifmsh->last_sn_update = jiffies;
                }
+               target_sn = ifmsh->sn;
        } else if (is_broadcast_ether_addr(target_addr) &&
                   (target_flags & IEEE80211_PREQ_TO_FLAG)) {
                rcu_read_lock();
index 7d050ed6fe5a4ec879bc9711c088db8360154448..cf032a8db9d78e9c13fe9df1ee2f2c35dbe8ec62 100644 (file)
@@ -287,8 +287,10 @@ static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
        struct sk_buff_head failq;
        unsigned long flags;
 
-       BUG_ON(gate_mpath == from_mpath);
-       BUG_ON(!gate_mpath->next_hop);
+       if (WARN_ON(gate_mpath == from_mpath))
+               return;
+       if (WARN_ON(!gate_mpath->next_hop))
+               return;
 
        __skb_queue_head_init(&failq);
 
index 2bc5dc25d5adc79a92cd29bc94d058e141d011c6..09625d6205c31418edba53a62ee027245f232050 100644 (file)
@@ -171,7 +171,7 @@ static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata,
        u8 cap;
 
        WARN_ON(ifmsh->mesh_sp_id != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET);
-       BUG_ON(!rcu_read_lock_held());
+       WARN_ON(!rcu_read_lock_held());
        cap = beacon->meshconf->meshconf_cap;
 
        spin_lock_bh(&ifmsh->sync_offset_lock);
index 3b848dad958762ccfc48732623d9ddf1f56aa8ae..0e4886f881f1e49f438fa0f61d5c2b021877bf30 100644 (file)
@@ -11,6 +11,7 @@
 #define MICHAEL_H
 
 #include <linux/types.h>
+#include <linux/ieee80211.h>
 
 #define MICHAEL_MIC_LEN 8
 
index dee50aefd6e868e247ba869e9e9883d4640330e3..3345401be1b3c26744cb2ab6e384672a0cab0d6b 100644 (file)
@@ -975,16 +975,23 @@ static void ieee80211_chswitch_work(struct work_struct *work)
        /* XXX: shouldn't really modify cfg80211-owned data! */
        ifmgd->associated->channel = sdata->csa_chandef.chan;
 
+       ieee80211_bss_info_change_notify(sdata, changed);
+
+       mutex_lock(&local->mtx);
+       sdata->vif.csa_active = false;
        /* XXX: wait for a beacon first? */
-       ieee80211_wake_queues_by_reason(&local->hw,
+       if (!ieee80211_csa_needs_block_tx(local))
+               ieee80211_wake_queues_by_reason(&local->hw,
                                        IEEE80211_MAX_QUEUE_MAP,
                                        IEEE80211_QUEUE_STOP_REASON_CSA);
+       mutex_unlock(&local->mtx);
 
-       ieee80211_bss_info_change_notify(sdata, changed);
-
- out:
-       sdata->vif.csa_active = false;
        ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
+
+       ieee80211_sta_reset_beacon_monitor(sdata);
+       ieee80211_sta_reset_conn_monitor(sdata);
+
+out:
        sdata_unlock(sdata);
 }
 
@@ -1089,7 +1096,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
        }
        chanctx = container_of(rcu_access_pointer(sdata->vif.chanctx_conf),
                               struct ieee80211_chanctx, conf);
-       if (chanctx->refcount > 1) {
+       if (ieee80211_chanctx_refcount(local, chanctx) > 1) {
                sdata_info(sdata,
                           "channel switch with multiple interfaces on the same channel, disconnecting\n");
                ieee80211_queue_work(&local->hw,
@@ -1100,12 +1107,16 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
        mutex_unlock(&local->chanctx_mtx);
 
        sdata->csa_chandef = csa_ie.chandef;
+
+       mutex_lock(&local->mtx);
        sdata->vif.csa_active = true;
+       sdata->csa_block_tx = csa_ie.mode;
 
-       if (csa_ie.mode)
+       if (sdata->csa_block_tx)
                ieee80211_stop_queues_by_reason(&local->hw,
-                               IEEE80211_MAX_QUEUE_MAP,
-                               IEEE80211_QUEUE_STOP_REASON_CSA);
+                                       IEEE80211_MAX_QUEUE_MAP,
+                                       IEEE80211_QUEUE_STOP_REASON_CSA);
+       mutex_unlock(&local->mtx);
 
        if (local->ops->channel_switch) {
                /* use driver's channel switch callback */
@@ -1817,6 +1828,12 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        ifmgd->flags = 0;
        mutex_lock(&local->mtx);
        ieee80211_vif_release_channel(sdata);
+
+       sdata->vif.csa_active = false;
+       if (!ieee80211_csa_needs_block_tx(local))
+               ieee80211_wake_queues_by_reason(&local->hw,
+                                       IEEE80211_MAX_QUEUE_MAP,
+                                       IEEE80211_QUEUE_STOP_REASON_CSA);
        mutex_unlock(&local->mtx);
 
        sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM;
@@ -2045,6 +2062,7 @@ EXPORT_SYMBOL(ieee80211_ap_probereq_get);
 
 static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
 {
+       struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
 
@@ -2058,10 +2076,14 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
                               WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
                               true, frame_buf);
        ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
+
+       mutex_lock(&local->mtx);
        sdata->vif.csa_active = false;
-       ieee80211_wake_queues_by_reason(&sdata->local->hw,
+       if (!ieee80211_csa_needs_block_tx(local))
+               ieee80211_wake_queues_by_reason(&local->hw,
                                        IEEE80211_MAX_QUEUE_MAP,
                                        IEEE80211_QUEUE_STOP_REASON_CSA);
+       mutex_unlock(&local->mtx);
 
        cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
                              IEEE80211_DEAUTH_FRAME_LEN);
@@ -3546,6 +3568,9 @@ static void ieee80211_sta_bcn_mon_timer(unsigned long data)
        if (local->quiescing)
                return;
 
+       if (sdata->vif.csa_active)
+               return;
+
        sdata->u.mgd.connection_loss = false;
        ieee80211_queue_work(&sdata->local->hw,
                             &sdata->u.mgd.beacon_connection_loss_work);
@@ -3561,6 +3586,9 @@ static void ieee80211_sta_conn_mon_timer(unsigned long data)
        if (local->quiescing)
                return;
 
+       if (sdata->vif.csa_active)
+               return;
+
        ieee80211_queue_work(&local->hw, &ifmgd->monitor_work);
 }
 
@@ -3598,18 +3626,24 @@ void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata)
 
        sdata_lock(sdata);
 
-       if (ifmgd->auth_data) {
+       if (ifmgd->auth_data || ifmgd->assoc_data) {
+               const u8 *bssid = ifmgd->auth_data ?
+                               ifmgd->auth_data->bss->bssid :
+                               ifmgd->assoc_data->bss->bssid;
+
                /*
-                * If we are trying to authenticate while suspending, cfg80211
-                * won't know and won't actually abort those attempts, thus we
-                * need to do that ourselves.
+                * If we are trying to authenticate / associate while suspending,
+                * cfg80211 won't know and won't actually abort those attempts,
+                * thus we need to do that ourselves.
                 */
-               ieee80211_send_deauth_disassoc(sdata,
-                                              ifmgd->auth_data->bss->bssid,
+               ieee80211_send_deauth_disassoc(sdata, bssid,
                                               IEEE80211_STYPE_DEAUTH,
                                               WLAN_REASON_DEAUTH_LEAVING,
                                               false, frame_buf);
-               ieee80211_destroy_auth_data(sdata, false);
+               if (ifmgd->assoc_data)
+                       ieee80211_destroy_assoc_data(sdata, false);
+               if (ifmgd->auth_data)
+                       ieee80211_destroy_auth_data(sdata, false);
                cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
                                      IEEE80211_DEAUTH_FRAME_LEN);
        }
@@ -3701,7 +3735,7 @@ int ieee80211_max_network_latency(struct notifier_block *nb,
        ieee80211_recalc_ps(local, latency_usec);
        mutex_unlock(&local->iflist_mtx);
 
-       return 0;
+       return NOTIFY_OK;
 }
 
 static u8 ieee80211_ht_vht_rx_chains(struct ieee80211_sub_if_data *sdata,
index 6fb38558a5e6c79d81fc6ba4a4b03ff4313a701a..7a17decd27f91af8646da20b9ab75fc3e303e3c4 100644 (file)
@@ -333,7 +333,7 @@ void ieee80211_sw_roc_work(struct work_struct *work)
                container_of(work, struct ieee80211_roc_work, work.work);
        struct ieee80211_sub_if_data *sdata = roc->sdata;
        struct ieee80211_local *local = sdata->local;
-       bool started;
+       bool started, on_channel;
 
        mutex_lock(&local->mtx);
 
@@ -354,14 +354,26 @@ void ieee80211_sw_roc_work(struct work_struct *work)
        if (!roc->started) {
                struct ieee80211_roc_work *dep;
 
-               /* start this ROC */
-               ieee80211_offchannel_stop_vifs(local);
+               WARN_ON(local->use_chanctx);
+
+               /* If actually operating on the desired channel (with at least
+                * 20 MHz channel width) don't stop all the operations but still
+                * treat it as though the ROC operation started properly, so
+                * other ROC operations won't interfere with this one.
+                */
+               roc->on_channel = roc->chan == local->_oper_chandef.chan &&
+                                 local->_oper_chandef.width != NL80211_CHAN_WIDTH_5 &&
+                                 local->_oper_chandef.width != NL80211_CHAN_WIDTH_10;
 
-               /* switch channel etc */
+               /* start this ROC */
                ieee80211_recalc_idle(local);
 
-               local->tmp_channel = roc->chan;
-               ieee80211_hw_config(local, 0);
+               if (!roc->on_channel) {
+                       ieee80211_offchannel_stop_vifs(local);
+
+                       local->tmp_channel = roc->chan;
+                       ieee80211_hw_config(local, 0);
+               }
 
                /* tell userspace or send frame */
                ieee80211_handle_roc_started(roc);
@@ -380,9 +392,10 @@ void ieee80211_sw_roc_work(struct work_struct *work)
  finish:
                list_del(&roc->list);
                started = roc->started;
+               on_channel = roc->on_channel;
                ieee80211_roc_notify_destroy(roc, !roc->abort);
 
-               if (started) {
+               if (started && !on_channel) {
                        ieee80211_flush_queues(local, NULL);
 
                        local->tmp_channel = NULL;
index 26fd94fa0aedb86e43382781f791b1f36de44954..1c1469c36dca05cbdf55e2aba55dc296962aba55 100644 (file)
@@ -657,6 +657,17 @@ minstrel_free(void *priv)
        kfree(priv);
 }
 
+static u32 minstrel_get_expected_throughput(void *priv_sta)
+{
+       struct minstrel_sta_info *mi = priv_sta;
+       int idx = mi->max_tp_rate[0];
+
+       /* convert pkt per sec in kbps (1200 is the average pkt size used for
+        * computing cur_tp
+        */
+       return MINSTREL_TRUNC(mi->r[idx].cur_tp) * 1200 * 8 / 1024;
+}
+
 const struct rate_control_ops mac80211_minstrel = {
        .name = "minstrel",
        .tx_status = minstrel_tx_status,
@@ -670,6 +681,7 @@ const struct rate_control_ops mac80211_minstrel = {
        .add_sta_debugfs = minstrel_add_sta_debugfs,
        .remove_sta_debugfs = minstrel_remove_sta_debugfs,
 #endif
+       .get_expected_throughput = minstrel_get_expected_throughput,
 };
 
 int __init
index bccaf854a309e9434fb9044d0e98082e3de287de..85c1e74b7714c2160ffd6976bbec4322bb7e428d 100644 (file)
@@ -22,7 +22,7 @@
 #define MCS_NBITS (AVG_PKT_SIZE << 3)
 
 /* Number of symbols for a packet with (bps) bits per symbol */
-#define MCS_NSYMS(bps) ((MCS_NBITS + (bps) - 1) / (bps))
+#define MCS_NSYMS(bps) DIV_ROUND_UP(MCS_NBITS, (bps))
 
 /* Transmission time (nanoseconds) for a packet containing (syms) symbols */
 #define MCS_SYMBOL_TIME(sgi, syms)                                     \
@@ -226,8 +226,9 @@ minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate)
                nsecs = 1000 * mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len);
 
        nsecs += minstrel_mcs_groups[group].duration[rate];
-       tp = 1000000 * ((prob * 1000) / nsecs);
 
+       /* prob is scaled - see MINSTREL_FRAC above */
+       tp = 1000000 * ((prob * 1000) / nsecs);
        mr->cur_tp = MINSTREL_TRUNC(tp);
 }
 
@@ -1031,6 +1032,22 @@ minstrel_ht_free(void *priv)
        mac80211_minstrel.free(priv);
 }
 
+static u32 minstrel_ht_get_expected_throughput(void *priv_sta)
+{
+       struct minstrel_ht_sta_priv *msp = priv_sta;
+       struct minstrel_ht_sta *mi = &msp->ht;
+       int i, j;
+
+       if (!msp->is_ht)
+               return mac80211_minstrel.get_expected_throughput(priv_sta);
+
+       i = mi->max_tp_rate / MCS_GROUP_RATES;
+       j = mi->max_tp_rate % MCS_GROUP_RATES;
+
+       /* convert cur_tp from pkt per second in kbps */
+       return mi->groups[i].rates[j].cur_tp * AVG_PKT_SIZE * 8 / 1024;
+}
+
 static const struct rate_control_ops mac80211_minstrel_ht = {
        .name = "minstrel_ht",
        .tx_status = minstrel_ht_tx_status,
@@ -1045,6 +1062,7 @@ static const struct rate_control_ops mac80211_minstrel_ht = {
        .add_sta_debugfs = minstrel_ht_add_sta_debugfs,
        .remove_sta_debugfs = minstrel_ht_remove_sta_debugfs,
 #endif
+       .get_expected_throughput = minstrel_ht_get_expected_throughput,
 };
 
 
index 216c45b949e513382447050eb560098a5edaa4b3..394e201cde6d3b6d4375f973937df55395547fea 100644 (file)
@@ -54,24 +54,25 @@ static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
        return skb;
 }
 
-static inline int should_drop_frame(struct sk_buff *skb, int present_fcs_len)
+static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len)
 {
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
-       struct ieee80211_hdr *hdr;
-
-       hdr = (void *)(skb->data);
+       struct ieee80211_hdr *hdr = (void *)skb->data;
 
        if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
                            RX_FLAG_FAILED_PLCP_CRC |
                            RX_FLAG_AMPDU_IS_ZEROLEN))
-               return 1;
+               return true;
+
        if (unlikely(skb->len < 16 + present_fcs_len))
-               return 1;
+               return true;
+
        if (ieee80211_is_ctl(hdr->frame_control) &&
            !ieee80211_is_pspoll(hdr->frame_control) &&
            !ieee80211_is_back_req(hdr->frame_control))
-               return 1;
-       return 0;
+               return true;
+
+       return false;
 }
 
 static int
@@ -1231,7 +1232,8 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
                if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
                    test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
                        sta->last_rx = jiffies;
-                       if (ieee80211_is_data(hdr->frame_control)) {
+                       if (ieee80211_is_data(hdr->frame_control) &&
+                           !is_multicast_ether_addr(hdr->addr1)) {
                                sta->last_rx_rate_idx = status->rate_idx;
                                sta->last_rx_rate_flag = status->flag;
                                sta->last_rx_rate_vht_flag = status->vht_flag;
@@ -3190,7 +3192,7 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
 }
 
 /*
- * This is the actual Rx frames handler. as it blongs to Rx path it must
+ * This is the actual Rx frames handler. as it belongs to Rx path it must
  * be called with rcu_read_lock protection.
  */
 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
index 3ce7f2c8539a1f626f7488833ee966fb3af5d502..f40661eb75b578dd2e409757331c085d36461723 100644 (file)
@@ -309,7 +309,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
        if (local->scan_req != local->int_scan_req)
                cfg80211_scan_done(local->scan_req, aborted);
        local->scan_req = NULL;
-       rcu_assign_pointer(local->scan_sdata, NULL);
+       RCU_INIT_POINTER(local->scan_sdata, NULL);
 
        local->scanning = 0;
        local->scan_chandef.chan = NULL;
@@ -559,7 +559,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
                ieee80211_recalc_idle(local);
 
                local->scan_req = NULL;
-               rcu_assign_pointer(local->scan_sdata, NULL);
+               RCU_INIT_POINTER(local->scan_sdata, NULL);
        }
 
        return rc;
@@ -773,7 +773,7 @@ void ieee80211_scan_work(struct work_struct *work)
                int rc;
 
                local->scan_req = NULL;
-               rcu_assign_pointer(local->scan_sdata, NULL);
+               RCU_INIT_POINTER(local->scan_sdata, NULL);
 
                rc = __ieee80211_start_scan(sdata, req);
                if (rc) {
@@ -1014,7 +1014,7 @@ out_free:
 
        if (ret) {
                /* Clean in case of failure after HW restart or upon resume. */
-               rcu_assign_pointer(local->sched_scan_sdata, NULL);
+               RCU_INIT_POINTER(local->sched_scan_sdata, NULL);
                local->sched_scan_req = NULL;
        }
 
@@ -1076,12 +1076,8 @@ void ieee80211_sched_scan_results(struct ieee80211_hw *hw)
 }
 EXPORT_SYMBOL(ieee80211_sched_scan_results);
 
-void ieee80211_sched_scan_stopped_work(struct work_struct *work)
+void ieee80211_sched_scan_end(struct ieee80211_local *local)
 {
-       struct ieee80211_local *local =
-               container_of(work, struct ieee80211_local,
-                            sched_scan_stopped_work);
-
        mutex_lock(&local->mtx);
 
        if (!rcu_access_pointer(local->sched_scan_sdata)) {
@@ -1089,7 +1085,7 @@ void ieee80211_sched_scan_stopped_work(struct work_struct *work)
                return;
        }
 
-       rcu_assign_pointer(local->sched_scan_sdata, NULL);
+       RCU_INIT_POINTER(local->sched_scan_sdata, NULL);
 
        /* If sched scan was aborted by the driver. */
        local->sched_scan_req = NULL;
@@ -1099,6 +1095,15 @@ void ieee80211_sched_scan_stopped_work(struct work_struct *work)
        cfg80211_sched_scan_stopped(local->hw.wiphy);
 }
 
+void ieee80211_sched_scan_stopped_work(struct work_struct *work)
+{
+       struct ieee80211_local *local =
+               container_of(work, struct ieee80211_local,
+                            sched_scan_stopped_work);
+
+       ieee80211_sched_scan_end(local);
+}
+
 void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw)
 {
        struct ieee80211_local *local = hw_to_local(hw);
index 137a192e64bc3c2aa61cc9c5912a89bd3008cbe3..632d372bb5117fa464dff4d5cb587f56a226647a 100644 (file)
@@ -552,7 +552,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
 int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
 {
        struct ieee80211_local *local = sta->local;
-       int err = 0;
+       int err;
 
        might_sleep();
 
@@ -570,7 +570,6 @@ int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
 
        return 0;
  out_free:
-       BUG_ON(!err);
        sta_info_free(local, sta);
        return err;
 }
@@ -1148,7 +1147,8 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
        atomic_dec(&ps->num_sta_ps);
 
        /* This station just woke up and isn't aware of our SMPS state */
-       if (!ieee80211_smps_is_restrictive(sta->known_smps_mode,
+       if (!ieee80211_vif_is_mesh(&sdata->vif) &&
+           !ieee80211_smps_is_restrictive(sta->known_smps_mode,
                                           sdata->smps_mode) &&
            sta->known_smps_mode != sdata->bss->req_smps &&
            sta_info_tx_streams(sta) != 1) {
index 00ba90b02ab2ab79c01d58dc5ba25785993f8dd6..60cb7a665976e10e7a909a9545b7643cf34e67a4 100644 (file)
@@ -314,10 +314,9 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
            !is_multicast_ether_addr(hdr->addr1))
                txflags |= IEEE80211_RADIOTAP_F_TX_FAIL;
 
-       if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
-           (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
+       if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
                txflags |= IEEE80211_RADIOTAP_F_TX_CTS;
-       else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
+       if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
                txflags |= IEEE80211_RADIOTAP_F_TX_RTS;
 
        put_unaligned_le16(txflags, pos);
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
new file mode 100644 (file)
index 0000000..652813b
--- /dev/null
@@ -0,0 +1,325 @@
+/*
+ * mac80211 TDLS handling code
+ *
+ * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2014, Intel Corporation
+ *
+ * This file is GPLv2 as found in COPYING.
+ */
+
+#include <linux/ieee80211.h>
+#include "ieee80211_i.h"
+
+static void ieee80211_tdls_add_ext_capab(struct sk_buff *skb)
+{
+       u8 *pos = (void *)skb_put(skb, 7);
+
+       *pos++ = WLAN_EID_EXT_CAPABILITY;
+       *pos++ = 5; /* len */
+       *pos++ = 0x0;
+       *pos++ = 0x0;
+       *pos++ = 0x0;
+       *pos++ = 0x0;
+       *pos++ = WLAN_EXT_CAPA5_TDLS_ENABLED;
+}
+
+static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_sub_if_data *sdata)
+{
+       struct ieee80211_local *local = sdata->local;
+       u16 capab;
+
+       capab = 0;
+       if (ieee80211_get_sdata_band(sdata) != IEEE80211_BAND_2GHZ)
+               return capab;
+
+       if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
+               capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
+       if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
+               capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
+
+       return capab;
+}
+
+static void ieee80211_tdls_add_link_ie(struct sk_buff *skb, const u8 *src_addr,
+                                      const u8 *peer, const u8 *bssid)
+{
+       struct ieee80211_tdls_lnkie *lnkid;
+
+       lnkid = (void *)skb_put(skb, sizeof(struct ieee80211_tdls_lnkie));
+
+       lnkid->ie_type = WLAN_EID_LINK_ID;
+       lnkid->ie_len = sizeof(struct ieee80211_tdls_lnkie) - 2;
+
+       memcpy(lnkid->bssid, bssid, ETH_ALEN);
+       memcpy(lnkid->init_sta, src_addr, ETH_ALEN);
+       memcpy(lnkid->resp_sta, peer, ETH_ALEN);
+}
+
+static int
+ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
+                              const u8 *peer, u8 action_code, u8 dialog_token,
+                              u16 status_code, struct sk_buff *skb)
+{
+       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+       struct ieee80211_tdls_data *tf;
+
+       tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
+
+       memcpy(tf->da, peer, ETH_ALEN);
+       memcpy(tf->sa, sdata->vif.addr, ETH_ALEN);
+       tf->ether_type = cpu_to_be16(ETH_P_TDLS);
+       tf->payload_type = WLAN_TDLS_SNAP_RFTYPE;
+
+       switch (action_code) {
+       case WLAN_TDLS_SETUP_REQUEST:
+               tf->category = WLAN_CATEGORY_TDLS;
+               tf->action_code = WLAN_TDLS_SETUP_REQUEST;
+
+               skb_put(skb, sizeof(tf->u.setup_req));
+               tf->u.setup_req.dialog_token = dialog_token;
+               tf->u.setup_req.capability =
+                       cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
+
+               ieee80211_add_srates_ie(sdata, skb, false, band);
+               ieee80211_add_ext_srates_ie(sdata, skb, false, band);
+               ieee80211_tdls_add_ext_capab(skb);
+               break;
+       case WLAN_TDLS_SETUP_RESPONSE:
+               tf->category = WLAN_CATEGORY_TDLS;
+               tf->action_code = WLAN_TDLS_SETUP_RESPONSE;
+
+               skb_put(skb, sizeof(tf->u.setup_resp));
+               tf->u.setup_resp.status_code = cpu_to_le16(status_code);
+               tf->u.setup_resp.dialog_token = dialog_token;
+               tf->u.setup_resp.capability =
+                       cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
+
+               ieee80211_add_srates_ie(sdata, skb, false, band);
+               ieee80211_add_ext_srates_ie(sdata, skb, false, band);
+               ieee80211_tdls_add_ext_capab(skb);
+               break;
+       case WLAN_TDLS_SETUP_CONFIRM:
+               tf->category = WLAN_CATEGORY_TDLS;
+               tf->action_code = WLAN_TDLS_SETUP_CONFIRM;
+
+               skb_put(skb, sizeof(tf->u.setup_cfm));
+               tf->u.setup_cfm.status_code = cpu_to_le16(status_code);
+               tf->u.setup_cfm.dialog_token = dialog_token;
+               break;
+       case WLAN_TDLS_TEARDOWN:
+               tf->category = WLAN_CATEGORY_TDLS;
+               tf->action_code = WLAN_TDLS_TEARDOWN;
+
+               skb_put(skb, sizeof(tf->u.teardown));
+               tf->u.teardown.reason_code = cpu_to_le16(status_code);
+               break;
+       case WLAN_TDLS_DISCOVERY_REQUEST:
+               tf->category = WLAN_CATEGORY_TDLS;
+               tf->action_code = WLAN_TDLS_DISCOVERY_REQUEST;
+
+               skb_put(skb, sizeof(tf->u.discover_req));
+               tf->u.discover_req.dialog_token = dialog_token;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int
+ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
+                          const u8 *peer, u8 action_code, u8 dialog_token,
+                          u16 status_code, struct sk_buff *skb)
+{
+       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+       struct ieee80211_mgmt *mgmt;
+
+       mgmt = (void *)skb_put(skb, 24);
+       memset(mgmt, 0, 24);
+       memcpy(mgmt->da, peer, ETH_ALEN);
+       memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
+       memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
+
+       mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+                                         IEEE80211_STYPE_ACTION);
+
+       switch (action_code) {
+       case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
+               skb_put(skb, 1 + sizeof(mgmt->u.action.u.tdls_discover_resp));
+               mgmt->u.action.category = WLAN_CATEGORY_PUBLIC;
+               mgmt->u.action.u.tdls_discover_resp.action_code =
+                       WLAN_PUB_ACTION_TDLS_DISCOVER_RES;
+               mgmt->u.action.u.tdls_discover_resp.dialog_token =
+                       dialog_token;
+               mgmt->u.action.u.tdls_discover_resp.capability =
+                       cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
+
+               ieee80211_add_srates_ie(sdata, skb, false, band);
+               ieee80211_add_ext_srates_ie(sdata, skb, false, band);
+               ieee80211_tdls_add_ext_capab(skb);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+                       const u8 *peer, u8 action_code, u8 dialog_token,
+                       u16 status_code, u32 peer_capability,
+                       const u8 *extra_ies, size_t extra_ies_len)
+{
+       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       struct ieee80211_local *local = sdata->local;
+       struct sk_buff *skb = NULL;
+       bool send_direct;
+       int ret;
+
+       if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
+               return -ENOTSUPP;
+
+       /* make sure we are in managed mode, and associated */
+       if (sdata->vif.type != NL80211_IFTYPE_STATION ||
+           !sdata->u.mgd.associated)
+               return -EINVAL;
+
+       tdls_dbg(sdata, "TDLS mgmt action %d peer %pM\n",
+                action_code, peer);
+
+       skb = dev_alloc_skb(local->hw.extra_tx_headroom +
+                           max(sizeof(struct ieee80211_mgmt),
+                               sizeof(struct ieee80211_tdls_data)) +
+                           50 + /* supported rates */
+                           7 + /* ext capab */
+                           extra_ies_len +
+                           sizeof(struct ieee80211_tdls_lnkie));
+       if (!skb)
+               return -ENOMEM;
+
+       skb_reserve(skb, local->hw.extra_tx_headroom);
+
+       switch (action_code) {
+       case WLAN_TDLS_SETUP_REQUEST:
+       case WLAN_TDLS_SETUP_RESPONSE:
+       case WLAN_TDLS_SETUP_CONFIRM:
+       case WLAN_TDLS_TEARDOWN:
+       case WLAN_TDLS_DISCOVERY_REQUEST:
+               ret = ieee80211_prep_tdls_encap_data(wiphy, dev, peer,
+                                                    action_code, dialog_token,
+                                                    status_code, skb);
+               send_direct = false;
+               break;
+       case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
+               ret = ieee80211_prep_tdls_direct(wiphy, dev, peer, action_code,
+                                                dialog_token, status_code,
+                                                skb);
+               send_direct = true;
+               break;
+       default:
+               ret = -ENOTSUPP;
+               break;
+       }
+
+       if (ret < 0)
+               goto fail;
+
+       if (extra_ies_len)
+               memcpy(skb_put(skb, extra_ies_len), extra_ies, extra_ies_len);
+
+       /* the TDLS link IE is always added last */
+       switch (action_code) {
+       case WLAN_TDLS_SETUP_REQUEST:
+       case WLAN_TDLS_SETUP_CONFIRM:
+       case WLAN_TDLS_TEARDOWN:
+       case WLAN_TDLS_DISCOVERY_REQUEST:
+               /* we are the initiator */
+               ieee80211_tdls_add_link_ie(skb, sdata->vif.addr, peer,
+                                          sdata->u.mgd.bssid);
+               break;
+       case WLAN_TDLS_SETUP_RESPONSE:
+       case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
+               /* we are the responder */
+               ieee80211_tdls_add_link_ie(skb, peer, sdata->vif.addr,
+                                          sdata->u.mgd.bssid);
+               break;
+       default:
+               ret = -ENOTSUPP;
+               goto fail;
+       }
+
+       if (send_direct) {
+               ieee80211_tx_skb(sdata, skb);
+               return 0;
+       }
+
+       /*
+        * According to 802.11z: Setup req/resp are sent in AC_BK, otherwise
+        * we should default to AC_VI.
+        */
+       switch (action_code) {
+       case WLAN_TDLS_SETUP_REQUEST:
+       case WLAN_TDLS_SETUP_RESPONSE:
+               skb_set_queue_mapping(skb, IEEE80211_AC_BK);
+               skb->priority = 2;
+               break;
+       default:
+               skb_set_queue_mapping(skb, IEEE80211_AC_VI);
+               skb->priority = 5;
+               break;
+       }
+
+       /* disable bottom halves when entering the Tx path */
+       local_bh_disable();
+       ret = ieee80211_subif_start_xmit(skb, dev);
+       local_bh_enable();
+
+       return ret;
+
+fail:
+       dev_kfree_skb(skb);
+       return ret;
+}
+
+int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
+                       const u8 *peer, enum nl80211_tdls_operation oper)
+{
+       struct sta_info *sta;
+       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+       if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
+               return -ENOTSUPP;
+
+       if (sdata->vif.type != NL80211_IFTYPE_STATION)
+               return -EINVAL;
+
+       tdls_dbg(sdata, "TDLS oper %d peer %pM\n", oper, peer);
+
+       switch (oper) {
+       case NL80211_TDLS_ENABLE_LINK:
+               rcu_read_lock();
+               sta = sta_info_get(sdata, peer);
+               if (!sta) {
+                       rcu_read_unlock();
+                       return -ENOLINK;
+               }
+
+               set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH);
+               rcu_read_unlock();
+               break;
+       case NL80211_TDLS_DISABLE_LINK:
+               return sta_info_destroy_addr(sdata, peer);
+       case NL80211_TDLS_TEARDOWN:
+       case NL80211_TDLS_SETUP:
+       case NL80211_TDLS_DISCOVERY_REQ:
+               /* We don't support in-driver setup/teardown/discovery */
+               return -ENOTSUPP;
+       default:
+               return -ENOTSUPP;
+       }
+
+       return 0;
+}
index a0b0aea76525c341711c129519a1c3f89a704bb9..762e4cd163869e58baedae86751534020ebc0d5c 100644 (file)
 
 #define VIF_ENTRY      __field(enum nl80211_iftype, vif_type) __field(void *, sdata)   \
                        __field(bool, p2p)                                              \
-                       __string(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
+                       __string(vif_name, sdata->name)
 #define VIF_ASSIGN     __entry->vif_type = sdata->vif.type; __entry->sdata = sdata;    \
                        __entry->p2p = sdata->vif.p2p;                                  \
-                       __assign_str(vif_name, sdata->dev ? sdata->dev->name : sdata->name)
+                       __assign_str(vif_name, sdata->name)
 #define VIF_PR_FMT     " vif:%s(%d%s)"
 #define VIF_PR_ARG     __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : ""
 
@@ -184,6 +184,20 @@ TRACE_EVENT(drv_return_bool,
                  "true" : "false")
 );
 
+TRACE_EVENT(drv_return_u32,
+       TP_PROTO(struct ieee80211_local *local, u32 ret),
+       TP_ARGS(local, ret),
+       TP_STRUCT__entry(
+               LOCAL_ENTRY
+               __field(u32, ret)
+       ),
+       TP_fast_assign(
+               LOCAL_ASSIGN;
+               __entry->ret = ret;
+       ),
+       TP_printk(LOCAL_PR_FMT " - %u", LOCAL_PR_ARG, __entry->ret)
+);
+
 TRACE_EVENT(drv_return_u64,
        TP_PROTO(struct ieee80211_local *local, u64 ret),
        TP_ARGS(local, ret),
@@ -1499,6 +1513,24 @@ DEFINE_EVENT(local_sdata_evt, drv_leave_ibss,
        TP_ARGS(local, sdata)
 );
 
+TRACE_EVENT(drv_get_expected_throughput,
+       TP_PROTO(struct ieee80211_sta *sta),
+
+       TP_ARGS(sta),
+
+       TP_STRUCT__entry(
+               STA_ENTRY
+       ),
+
+       TP_fast_assign(
+               STA_ASSIGN;
+       ),
+
+       TP_printk(
+               STA_PR_FMT, STA_PR_ARG
+       )
+);
+
 /*
  * Tracing for API calls that drivers call.
  */
index 19d36d4117e0da0b5524b7e3f8102a86810dceed..5214686d9fd1ec9ab4bc1e2a466532bd3c829c10 100644 (file)
@@ -2328,7 +2328,8 @@ void ieee80211_tx_pending(unsigned long data)
 /* functions for drivers to get certain frames */
 
 static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
-                                      struct ps_data *ps, struct sk_buff *skb)
+                                      struct ps_data *ps, struct sk_buff *skb,
+                                      bool is_template)
 {
        u8 *pos, *tim;
        int aid0 = 0;
@@ -2341,11 +2342,12 @@ static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
                 * checking byte-for-byte */
                have_bits = !bitmap_empty((unsigned long *)ps->tim,
                                          IEEE80211_MAX_AID+1);
-
-       if (ps->dtim_count == 0)
-               ps->dtim_count = sdata->vif.bss_conf.dtim_period - 1;
-       else
-               ps->dtim_count--;
+       if (!is_template) {
+               if (ps->dtim_count == 0)
+                       ps->dtim_count = sdata->vif.bss_conf.dtim_period - 1;
+               else
+                       ps->dtim_count--;
+       }
 
        tim = pos = (u8 *) skb_put(skb, 6);
        *pos++ = WLAN_EID_TIM;
@@ -2391,7 +2393,8 @@ static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
 }
 
 static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
-                                   struct ps_data *ps, struct sk_buff *skb)
+                                   struct ps_data *ps, struct sk_buff *skb,
+                                   bool is_template)
 {
        struct ieee80211_local *local = sdata->local;
 
@@ -2403,24 +2406,24 @@ static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
         * of the tim bitmap in mac80211 and the driver.
         */
        if (local->tim_in_locked_section) {
-               __ieee80211_beacon_add_tim(sdata, ps, skb);
+               __ieee80211_beacon_add_tim(sdata, ps, skb, is_template);
        } else {
                spin_lock_bh(&local->tim_lock);
-               __ieee80211_beacon_add_tim(sdata, ps, skb);
+               __ieee80211_beacon_add_tim(sdata, ps, skb, is_template);
                spin_unlock_bh(&local->tim_lock);
        }
 
        return 0;
 }
 
-static void ieee80211_update_csa(struct ieee80211_sub_if_data *sdata,
-                                struct beacon_data *beacon)
+static void ieee80211_set_csa(struct ieee80211_sub_if_data *sdata,
+                             struct beacon_data *beacon)
 {
        struct probe_resp *resp;
-       int counter_offset_beacon = sdata->csa_counter_offset_beacon;
-       int counter_offset_presp = sdata->csa_counter_offset_presp;
        u8 *beacon_data;
        size_t beacon_data_len;
+       int i;
+       u8 count = sdata->csa_current_counter;
 
        switch (sdata->vif.type) {
        case NL80211_IFTYPE_AP:
@@ -2438,40 +2441,57 @@ static void ieee80211_update_csa(struct ieee80211_sub_if_data *sdata,
        default:
                return;
        }
-       if (WARN_ON(counter_offset_beacon >= beacon_data_len))
-               return;
 
-       /* Warn if the driver did not check for/react to csa
-        * completeness.  A beacon with CSA counter set to 0 should
-        * never occur, because a counter of 1 means switch just
-        * before the next beacon.
-        */
-       if (WARN_ON(beacon_data[counter_offset_beacon] == 1))
-               return;
+       for (i = 0; i < IEEE80211_MAX_CSA_COUNTERS_NUM; ++i) {
+               u16 counter_offset_beacon =
+                       sdata->csa_counter_offset_beacon[i];
+               u16 counter_offset_presp = sdata->csa_counter_offset_presp[i];
 
-       beacon_data[counter_offset_beacon]--;
+               if (counter_offset_beacon) {
+                       if (WARN_ON(counter_offset_beacon >= beacon_data_len))
+                               return;
 
-       if (sdata->vif.type == NL80211_IFTYPE_AP && counter_offset_presp) {
-               rcu_read_lock();
-               resp = rcu_dereference(sdata->u.ap.probe_resp);
+                       beacon_data[counter_offset_beacon] = count;
+               }
+
+               if (sdata->vif.type == NL80211_IFTYPE_AP &&
+                   counter_offset_presp) {
+                       rcu_read_lock();
+                       resp = rcu_dereference(sdata->u.ap.probe_resp);
 
-               /* if nl80211 accepted the offset, this should not happen. */
-               if (WARN_ON(!resp)) {
+                       /* If nl80211 accepted the offset, this should
+                        * not happen.
+                        */
+                       if (WARN_ON(!resp)) {
+                               rcu_read_unlock();
+                               return;
+                       }
+                       resp->data[counter_offset_presp] = count;
                        rcu_read_unlock();
-                       return;
                }
-               resp->data[counter_offset_presp]--;
-               rcu_read_unlock();
        }
 }
 
+u8 ieee80211_csa_update_counter(struct ieee80211_vif *vif)
+{
+       struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+       sdata->csa_current_counter--;
+
+       /* the counter should never reach 0 */
+       WARN_ON(!sdata->csa_current_counter);
+
+       return sdata->csa_current_counter;
+}
+EXPORT_SYMBOL(ieee80211_csa_update_counter);
+
 bool ieee80211_csa_is_complete(struct ieee80211_vif *vif)
 {
        struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
        struct beacon_data *beacon = NULL;
        u8 *beacon_data;
        size_t beacon_data_len;
-       int counter_beacon = sdata->csa_counter_offset_beacon;
+       int counter_beacon = sdata->csa_counter_offset_beacon[0];
        int ret = false;
 
        if (!ieee80211_sdata_running(sdata))
@@ -2521,9 +2541,11 @@ bool ieee80211_csa_is_complete(struct ieee80211_vif *vif)
 }
 EXPORT_SYMBOL(ieee80211_csa_is_complete);
 
-struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
-                                        struct ieee80211_vif *vif,
-                                        u16 *tim_offset, u16 *tim_length)
+static struct sk_buff *
+__ieee80211_beacon_get(struct ieee80211_hw *hw,
+                      struct ieee80211_vif *vif,
+                      struct ieee80211_mutable_offsets *offs,
+                      bool is_template)
 {
        struct ieee80211_local *local = hw_to_local(hw);
        struct sk_buff *skb = NULL;
@@ -2532,6 +2554,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
        enum ieee80211_band band;
        struct ieee80211_tx_rate_control txrc;
        struct ieee80211_chanctx_conf *chanctx_conf;
+       int csa_off_base = 0;
 
        rcu_read_lock();
 
@@ -2541,18 +2564,20 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
        if (!ieee80211_sdata_running(sdata) || !chanctx_conf)
                goto out;
 
-       if (tim_offset)
-               *tim_offset = 0;
-       if (tim_length)
-               *tim_length = 0;
+       if (offs)
+               memset(offs, 0, sizeof(*offs));
 
        if (sdata->vif.type == NL80211_IFTYPE_AP) {
                struct ieee80211_if_ap *ap = &sdata->u.ap;
                struct beacon_data *beacon = rcu_dereference(ap->beacon);
 
                if (beacon) {
-                       if (sdata->vif.csa_active)
-                               ieee80211_update_csa(sdata, beacon);
+                       if (sdata->vif.csa_active) {
+                               if (!is_template)
+                                       ieee80211_csa_update_counter(vif);
+
+                               ieee80211_set_csa(sdata, beacon);
+                       }
 
                        /*
                         * headroom, head length,
@@ -2569,12 +2594,16 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
                        memcpy(skb_put(skb, beacon->head_len), beacon->head,
                               beacon->head_len);
 
-                       ieee80211_beacon_add_tim(sdata, &ap->ps, skb);
+                       ieee80211_beacon_add_tim(sdata, &ap->ps, skb,
+                                                is_template);
 
-                       if (tim_offset)
-                               *tim_offset = beacon->head_len;
-                       if (tim_length)
-                               *tim_length = skb->len - beacon->head_len;
+                       if (offs) {
+                               offs->tim_offset = beacon->head_len;
+                               offs->tim_length = skb->len - beacon->head_len;
+
+                               /* for AP the csa offsets are from tail */
+                               csa_off_base = skb->len;
+                       }
 
                        if (beacon->tail)
                                memcpy(skb_put(skb, beacon->tail_len),
@@ -2589,9 +2618,12 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
                if (!presp)
                        goto out;
 
-               if (sdata->vif.csa_active)
-                       ieee80211_update_csa(sdata, presp);
+               if (sdata->vif.csa_active) {
+                       if (!is_template)
+                               ieee80211_csa_update_counter(vif);
 
+                       ieee80211_set_csa(sdata, presp);
+               }
 
                skb = dev_alloc_skb(local->tx_headroom + presp->head_len +
                                    local->hw.extra_beacon_tailroom);
@@ -2611,8 +2643,17 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
                if (!bcn)
                        goto out;
 
-               if (sdata->vif.csa_active)
-                       ieee80211_update_csa(sdata, bcn);
+               if (sdata->vif.csa_active) {
+                       if (!is_template)
+                               /* TODO: For mesh csa_counter is in TU, so
+                                * decrementing it by one isn't correct, but
+                                * for now we leave it consistent with overall
+                                * mac80211's behavior.
+                                */
+                               ieee80211_csa_update_counter(vif);
+
+                       ieee80211_set_csa(sdata, bcn);
+               }
 
                if (ifmsh->sync_ops)
                        ifmsh->sync_ops->adjust_tbtt(sdata, bcn);
@@ -2626,13 +2667,33 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
                        goto out;
                skb_reserve(skb, local->tx_headroom);
                memcpy(skb_put(skb, bcn->head_len), bcn->head, bcn->head_len);
-               ieee80211_beacon_add_tim(sdata, &ifmsh->ps, skb);
+               ieee80211_beacon_add_tim(sdata, &ifmsh->ps, skb, is_template);
+
+               if (offs) {
+                       offs->tim_offset = bcn->head_len;
+                       offs->tim_length = skb->len - bcn->head_len;
+               }
+
                memcpy(skb_put(skb, bcn->tail_len), bcn->tail, bcn->tail_len);
        } else {
                WARN_ON(1);
                goto out;
        }
 
+       /* CSA offsets */
+       if (offs) {
+               int i;
+
+               for (i = 0; i < IEEE80211_MAX_CSA_COUNTERS_NUM; i++) {
+                       u16 csa_off = sdata->csa_counter_offset_beacon[i];
+
+                       if (!csa_off)
+                               continue;
+
+                       offs->csa_counter_offs[i] = csa_off_base + csa_off;
+               }
+       }
+
        band = chanctx_conf->def.chan->band;
 
        info = IEEE80211_SKB_CB(skb);
@@ -2663,6 +2724,32 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
  out:
        rcu_read_unlock();
        return skb;
+
+}
+
+struct sk_buff *
+ieee80211_beacon_get_template(struct ieee80211_hw *hw,
+                             struct ieee80211_vif *vif,
+                             struct ieee80211_mutable_offsets *offs)
+{
+       return __ieee80211_beacon_get(hw, vif, offs, true);
+}
+EXPORT_SYMBOL(ieee80211_beacon_get_template);
+
+struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
+                                        struct ieee80211_vif *vif,
+                                        u16 *tim_offset, u16 *tim_length)
+{
+       struct ieee80211_mutable_offsets offs = {};
+       struct sk_buff *bcn = __ieee80211_beacon_get(hw, vif, &offs, false);
+
+       if (tim_offset)
+               *tim_offset = offs.tim_offset;
+
+       if (tim_length)
+               *tim_length = offs.tim_length;
+
+       return bcn;
 }
 EXPORT_SYMBOL(ieee80211_beacon_get_tim);
 
index 275c94f995f7c8401749cbbafb249bb52a418be7..6886601afe1c731c3cc7b5409745307b2f48e67c 100644 (file)
@@ -554,7 +554,7 @@ void ieee80211_flush_queues(struct ieee80211_local *local,
        ieee80211_stop_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP,
                                        IEEE80211_QUEUE_STOP_REASON_FLUSH);
 
-       drv_flush(local, queues, false);
+       drv_flush(local, sdata, queues, false);
 
        ieee80211_wake_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP,
                                        IEEE80211_QUEUE_STOP_REASON_FLUSH);
@@ -1457,6 +1457,44 @@ void ieee80211_stop_device(struct ieee80211_local *local)
        drv_stop(local);
 }
 
+static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local)
+{
+       struct ieee80211_sub_if_data *sdata;
+       struct ieee80211_chanctx *ctx;
+
+       /*
+        * We get here if during resume the device can't be restarted properly.
+        * We might also get here if this happens during HW reset, which is a
+        * slightly different situation and we need to drop all connections in
+        * the latter case.
+        *
+        * Ask cfg80211 to turn off all interfaces, this will result in more
+        * warnings but at least we'll then get into a clean stopped state.
+        */
+
+       local->resuming = false;
+       local->suspended = false;
+       local->started = false;
+
+       /* scheduled scan clearly can't be running any more, but tell
+        * cfg80211 and clear local state
+        */
+       ieee80211_sched_scan_end(local);
+
+       list_for_each_entry(sdata, &local->interfaces, list)
+               sdata->flags &= ~IEEE80211_SDATA_IN_DRIVER;
+
+       /* Mark channel contexts as not being in the driver any more to avoid
+        * removing them from the driver during the shutdown process...
+        */
+       mutex_lock(&local->chanctx_mtx);
+       list_for_each_entry(ctx, &local->chanctx_list, list)
+               ctx->driver_present = false;
+       mutex_unlock(&local->chanctx_mtx);
+
+       cfg80211_shutdown_all_interfaces(local->hw.wiphy);
+}
+
 static void ieee80211_assign_chanctx(struct ieee80211_local *local,
                                     struct ieee80211_sub_if_data *sdata)
 {
@@ -1520,9 +1558,11 @@ int ieee80211_reconfig(struct ieee80211_local *local)
         */
        res = drv_start(local);
        if (res) {
-               WARN(local->suspended, "Hardware became unavailable "
-                    "upon resume. This could be a software issue "
-                    "prior to suspend or a hardware issue.\n");
+               if (local->suspended)
+                       WARN(1, "Hardware became unavailable upon resume. This could be a software issue prior to suspend or a hardware issue.\n");
+               else
+                       WARN(1, "Hardware became unavailable during restart.\n");
+               ieee80211_handle_reconfig_failure(local);
                return res;
        }
 
@@ -1546,7 +1586,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                WARN_ON(local->resuming);
                res = drv_add_interface(local, sdata);
                if (WARN_ON(res)) {
-                       rcu_assign_pointer(local->monitor_sdata, NULL);
+                       RCU_INIT_POINTER(local->monitor_sdata, NULL);
                        synchronize_net();
                        kfree(sdata);
                }
@@ -1565,17 +1605,17 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                list_for_each_entry(ctx, &local->chanctx_list, list)
                        WARN_ON(drv_add_chanctx(local, ctx));
                mutex_unlock(&local->chanctx_mtx);
-       }
 
-       list_for_each_entry(sdata, &local->interfaces, list) {
-               if (!ieee80211_sdata_running(sdata))
-                       continue;
-               ieee80211_assign_chanctx(local, sdata);
-       }
+               list_for_each_entry(sdata, &local->interfaces, list) {
+                       if (!ieee80211_sdata_running(sdata))
+                               continue;
+                       ieee80211_assign_chanctx(local, sdata);
+               }
 
-       sdata = rtnl_dereference(local->monitor_sdata);
-       if (sdata && ieee80211_sdata_running(sdata))
-               ieee80211_assign_chanctx(local, sdata);
+               sdata = rtnl_dereference(local->monitor_sdata);
+               if (sdata && ieee80211_sdata_running(sdata))
+                       ieee80211_assign_chanctx(local, sdata);
+       }
 
        /* add STAs back */
        mutex_lock(&local->sta_mtx);
@@ -1671,13 +1711,10 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                        }
                        break;
                case NL80211_IFTYPE_WDS:
-                       break;
                case NL80211_IFTYPE_AP_VLAN:
                case NL80211_IFTYPE_MONITOR:
-                       /* ignore virtual */
-                       break;
                case NL80211_IFTYPE_P2P_DEVICE:
-                       changed = BSS_CHANGED_IDLE;
+                       /* nothing to do */
                        break;
                case NL80211_IFTYPE_UNSPECIFIED:
                case NUM_NL80211_IFTYPES:
@@ -1780,7 +1817,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
        mutex_unlock(&local->mtx);
 
        if (sched_scan_stopped)
-               cfg80211_sched_scan_stopped(local->hw.wiphy);
+               cfg80211_sched_scan_stopped_rtnl(local->hw.wiphy);
 
        /*
         * If this is for hw restart things are still running.
@@ -2797,3 +2834,121 @@ void ieee80211_recalc_dtim(struct ieee80211_local *local,
 
        ps->dtim_count = dtim_count;
 }
+
+int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
+                                const struct cfg80211_chan_def *chandef,
+                                enum ieee80211_chanctx_mode chanmode,
+                                u8 radar_detect)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_sub_if_data *sdata_iter;
+       enum nl80211_iftype iftype = sdata->wdev.iftype;
+       int num[NUM_NL80211_IFTYPES];
+       struct ieee80211_chanctx *ctx;
+       int num_different_channels = 0;
+       int total = 1;
+
+       lockdep_assert_held(&local->chanctx_mtx);
+
+       if (WARN_ON(hweight32(radar_detect) > 1))
+               return -EINVAL;
+
+       if (WARN_ON(chandef && chanmode == IEEE80211_CHANCTX_SHARED &&
+                   !chandef->chan))
+               return -EINVAL;
+
+       if (chandef)
+               num_different_channels = 1;
+
+       if (WARN_ON(iftype >= NUM_NL80211_IFTYPES))
+               return -EINVAL;
+
+       /* Always allow software iftypes */
+       if (local->hw.wiphy->software_iftypes & BIT(iftype)) {
+               if (radar_detect)
+                       return -EINVAL;
+               return 0;
+       }
+
+       memset(num, 0, sizeof(num));
+
+       if (iftype != NL80211_IFTYPE_UNSPECIFIED)
+               num[iftype] = 1;
+
+       list_for_each_entry(ctx, &local->chanctx_list, list) {
+               if (ctx->conf.radar_enabled)
+                       radar_detect |= BIT(ctx->conf.def.width);
+               if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE) {
+                       num_different_channels++;
+                       continue;
+               }
+               if (chandef && chanmode == IEEE80211_CHANCTX_SHARED &&
+                   cfg80211_chandef_compatible(chandef,
+                                               &ctx->conf.def))
+                       continue;
+               num_different_channels++;
+       }
+
+       list_for_each_entry_rcu(sdata_iter, &local->interfaces, list) {
+               struct wireless_dev *wdev_iter;
+
+               wdev_iter = &sdata_iter->wdev;
+
+               if (sdata_iter == sdata ||
+                   rcu_access_pointer(sdata_iter->vif.chanctx_conf) == NULL ||
+                   local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype))
+                       continue;
+
+               num[wdev_iter->iftype]++;
+               total++;
+       }
+
+       if (total == 1 && !radar_detect)
+               return 0;
+
+       return cfg80211_check_combinations(local->hw.wiphy,
+                                          num_different_channels,
+                                          radar_detect, num);
+}
+
+static void
+ieee80211_iter_max_chans(const struct ieee80211_iface_combination *c,
+                        void *data)
+{
+       u32 *max_num_different_channels = data;
+
+       *max_num_different_channels = max(*max_num_different_channels,
+                                         c->num_different_channels);
+}
+
+int ieee80211_max_num_channels(struct ieee80211_local *local)
+{
+       struct ieee80211_sub_if_data *sdata;
+       int num[NUM_NL80211_IFTYPES] = {};
+       struct ieee80211_chanctx *ctx;
+       int num_different_channels = 0;
+       u8 radar_detect = 0;
+       u32 max_num_different_channels = 1;
+       int err;
+
+       lockdep_assert_held(&local->chanctx_mtx);
+
+       list_for_each_entry(ctx, &local->chanctx_list, list) {
+               num_different_channels++;
+
+               if (ctx->conf.radar_enabled)
+                       radar_detect |= BIT(ctx->conf.def.width);
+       }
+
+       list_for_each_entry_rcu(sdata, &local->interfaces, list)
+               num[sdata->wdev.iftype]++;
+
+       err = cfg80211_iter_combinations(local->hw.wiphy,
+                                        num_different_channels, radar_detect,
+                                        num, ieee80211_iter_max_chans,
+                                        &max_num_different_channels);
+       if (err < 0)
+               return err;
+
+       return max_num_different_channels;
+}
index e9e36a256165842ac112e35e612ba79c8f86d305..9265adfdabfcf99acbdf1598067884188c500a49 100644 (file)
@@ -129,9 +129,12 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
        if (!vht_cap_ie || !sband->vht_cap.vht_supported)
                return;
 
-       /* A VHT STA must support 40 MHz */
-       if (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
-               return;
+       /*
+        * A VHT STA must support 40 MHz, but if we verify that here
+        * then we break a few things - some APs (e.g. Netgear R6300v2
+        * and others based on the BCM4360 chipset) will unset this
+        * capability bit when operating in 20 MHz.
+        */
 
        vht_cap->vht_supported = true;
 
index b8600e3c29c828d918b3676397f73a4d0fe7892c..9b3dcc201145dd3942bf30771e1638ea973759f7 100644 (file)
@@ -406,7 +406,10 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
 
        if (info->control.hw_key &&
            !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) &&
-           !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) {
+           !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
+           !((info->control.hw_key->flags &
+              IEEE80211_KEY_FLAG_GENERATE_IV_MGMT) &&
+             ieee80211_is_mgmt(hdr->frame_control))) {
                /*
                 * hwaccel has no need for preallocated room for CCMP
                 * header or MIC fields
index b33dd76d4307309bb02477606f8e7ccdefaa870d..1818a99b3081e5a87a5c1ed72e6dba1760f2c1bc 100644 (file)
@@ -2,6 +2,10 @@ config MAC802154
        tristate "Generic IEEE 802.15.4 Soft Networking Stack (mac802154)"
        depends on IEEE802154
        select CRC_CCITT
+       select CRYPTO_AUTHENC
+       select CRYPTO_CCM
+       select CRYPTO_CTR
+       select CRYPTO_AES
        ---help---
          This option enables the hardware independent IEEE 802.15.4
          networking stack for SoftMAC devices (the ones implementing
index 15d62df521825c8581fc25ec25a9caebc8902a72..9723d6f3f3e5b742e1d105b2667627949f1d7c7d 100644 (file)
@@ -1,4 +1,5 @@
 obj-$(CONFIG_MAC802154)        += mac802154.o
-mac802154-objs         := ieee802154_dev.o rx.o tx.o mac_cmd.o mib.o monitor.o wpan.o
+mac802154-objs         := ieee802154_dev.o rx.o tx.o mac_cmd.o mib.o \
+                          monitor.o wpan.o llsec.o
 
 ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c
new file mode 100644 (file)
index 0000000..e4a2558
--- /dev/null
@@ -0,0 +1,1069 @@
+/*
+ * Copyright (C) 2014 Fraunhofer ITWM
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Written by:
+ * Phoebe Buckheister <phoebe.buckheister@itwm.fraunhofer.de>
+ */
+
+#include <linux/err.h>
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <net/ieee802154.h>
+#include <crypto/algapi.h>
+
+#include "mac802154.h"
+#include "llsec.h"
+
+static void llsec_key_put(struct mac802154_llsec_key *key);
+static bool llsec_key_id_equal(const struct ieee802154_llsec_key_id *a,
+                              const struct ieee802154_llsec_key_id *b);
+
+static void llsec_dev_free(struct mac802154_llsec_device *dev);
+
+void mac802154_llsec_init(struct mac802154_llsec *sec)
+{
+       memset(sec, 0, sizeof(*sec));
+
+       memset(&sec->params.default_key_source, 0xFF, IEEE802154_ADDR_LEN);
+
+       INIT_LIST_HEAD(&sec->table.security_levels);
+       INIT_LIST_HEAD(&sec->table.devices);
+       INIT_LIST_HEAD(&sec->table.keys);
+       hash_init(sec->devices_short);
+       hash_init(sec->devices_hw);
+       rwlock_init(&sec->lock);
+}
+
+void mac802154_llsec_destroy(struct mac802154_llsec *sec)
+{
+       struct ieee802154_llsec_seclevel *sl, *sn;
+       struct ieee802154_llsec_device *dev, *dn;
+       struct ieee802154_llsec_key_entry *key, *kn;
+
+       list_for_each_entry_safe(sl, sn, &sec->table.security_levels, list) {
+               struct mac802154_llsec_seclevel *msl;
+
+               msl = container_of(sl, struct mac802154_llsec_seclevel, level);
+               list_del(&sl->list);
+               kfree(msl);
+       }
+
+       list_for_each_entry_safe(dev, dn, &sec->table.devices, list) {
+               struct mac802154_llsec_device *mdev;
+
+               mdev = container_of(dev, struct mac802154_llsec_device, dev);
+               list_del(&dev->list);
+               llsec_dev_free(mdev);
+       }
+
+       list_for_each_entry_safe(key, kn, &sec->table.keys, list) {
+               struct mac802154_llsec_key *mkey;
+
+               mkey = container_of(key->key, struct mac802154_llsec_key, key);
+               list_del(&key->list);
+               llsec_key_put(mkey);
+               kfree(key);
+       }
+}
+
+
+
+int mac802154_llsec_get_params(struct mac802154_llsec *sec,
+                              struct ieee802154_llsec_params *params)
+{
+       read_lock_bh(&sec->lock);
+       *params = sec->params;
+       read_unlock_bh(&sec->lock);
+
+       return 0;
+}
+
+int mac802154_llsec_set_params(struct mac802154_llsec *sec,
+                              const struct ieee802154_llsec_params *params,
+                              int changed)
+{
+       write_lock_bh(&sec->lock);
+
+       if (changed & IEEE802154_LLSEC_PARAM_ENABLED)
+               sec->params.enabled = params->enabled;
+       if (changed & IEEE802154_LLSEC_PARAM_FRAME_COUNTER)
+               sec->params.frame_counter = params->frame_counter;
+       if (changed & IEEE802154_LLSEC_PARAM_OUT_LEVEL)
+               sec->params.out_level = params->out_level;
+       if (changed & IEEE802154_LLSEC_PARAM_OUT_KEY)
+               sec->params.out_key = params->out_key;
+       if (changed & IEEE802154_LLSEC_PARAM_KEY_SOURCE)
+               sec->params.default_key_source = params->default_key_source;
+       if (changed & IEEE802154_LLSEC_PARAM_PAN_ID)
+               sec->params.pan_id = params->pan_id;
+       if (changed & IEEE802154_LLSEC_PARAM_HWADDR)
+               sec->params.hwaddr = params->hwaddr;
+       if (changed & IEEE802154_LLSEC_PARAM_COORD_HWADDR)
+               sec->params.coord_hwaddr = params->coord_hwaddr;
+       if (changed & IEEE802154_LLSEC_PARAM_COORD_SHORTADDR)
+               sec->params.coord_shortaddr = params->coord_shortaddr;
+
+       write_unlock_bh(&sec->lock);
+
+       return 0;
+}
+
+
+
+static struct mac802154_llsec_key*
+llsec_key_alloc(const struct ieee802154_llsec_key *template)
+{
+       const int authsizes[3] = { 4, 8, 16 };
+       struct mac802154_llsec_key *key;
+       int i;
+
+       key = kzalloc(sizeof(*key), GFP_KERNEL);
+       if (!key)
+               return NULL;
+
+       kref_init(&key->ref);
+       key->key = *template;
+
+       BUILD_BUG_ON(ARRAY_SIZE(authsizes) != ARRAY_SIZE(key->tfm));
+
+       for (i = 0; i < ARRAY_SIZE(key->tfm); i++) {
+               key->tfm[i] = crypto_alloc_aead("ccm(aes)", 0,
+                                               CRYPTO_ALG_ASYNC);
+               if (!key->tfm[i])
+                       goto err_tfm;
+               if (crypto_aead_setkey(key->tfm[i], template->key,
+                                      IEEE802154_LLSEC_KEY_SIZE))
+                       goto err_tfm;
+               if (crypto_aead_setauthsize(key->tfm[i], authsizes[i]))
+                       goto err_tfm;
+       }
+
+       key->tfm0 = crypto_alloc_blkcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
+       if (!key->tfm0)
+               goto err_tfm;
+
+       if (crypto_blkcipher_setkey(key->tfm0, template->key,
+                                   IEEE802154_LLSEC_KEY_SIZE))
+               goto err_tfm0;
+
+       return key;
+
+err_tfm0:
+       crypto_free_blkcipher(key->tfm0);
+err_tfm:
+       for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
+               if (key->tfm[i])
+                       crypto_free_aead(key->tfm[i]);
+
+       kfree(key);
+       return NULL;
+}
+
+static void llsec_key_release(struct kref *ref)
+{
+       struct mac802154_llsec_key *key;
+       int i;
+
+       key = container_of(ref, struct mac802154_llsec_key, ref);
+
+       for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
+               crypto_free_aead(key->tfm[i]);
+
+       crypto_free_blkcipher(key->tfm0);
+       kfree(key);
+}
+
+static struct mac802154_llsec_key*
+llsec_key_get(struct mac802154_llsec_key *key)
+{
+       kref_get(&key->ref);
+       return key;
+}
+
+static void llsec_key_put(struct mac802154_llsec_key *key)
+{
+       kref_put(&key->ref, llsec_key_release);
+}
+
+static bool llsec_key_id_equal(const struct ieee802154_llsec_key_id *a,
+                              const struct ieee802154_llsec_key_id *b)
+{
+       if (a->mode != b->mode)
+               return false;
+
+       if (a->mode == IEEE802154_SCF_KEY_IMPLICIT)
+               return ieee802154_addr_equal(&a->device_addr, &b->device_addr);
+
+       if (a->id != b->id)
+               return false;
+
+       switch (a->mode) {
+       case IEEE802154_SCF_KEY_INDEX:
+               return true;
+       case IEEE802154_SCF_KEY_SHORT_INDEX:
+               return a->short_source == b->short_source;
+       case IEEE802154_SCF_KEY_HW_INDEX:
+               return a->extended_source == b->extended_source;
+       }
+
+       return false;
+}
+
+int mac802154_llsec_key_add(struct mac802154_llsec *sec,
+                           const struct ieee802154_llsec_key_id *id,
+                           const struct ieee802154_llsec_key *key)
+{
+       struct mac802154_llsec_key *mkey = NULL;
+       struct ieee802154_llsec_key_entry *pos, *new;
+
+       if (!(key->frame_types & (1 << IEEE802154_FC_TYPE_MAC_CMD)) &&
+           key->cmd_frame_ids)
+               return -EINVAL;
+
+       list_for_each_entry(pos, &sec->table.keys, list) {
+               if (llsec_key_id_equal(&pos->id, id))
+                       return -EEXIST;
+
+               if (memcmp(pos->key->key, key->key,
+                          IEEE802154_LLSEC_KEY_SIZE))
+                       continue;
+
+               mkey = container_of(pos->key, struct mac802154_llsec_key, key);
+
+               /* Don't allow multiple instances of the same AES key to have
+                * different allowed frame types/command frame ids, as this is
+                * not possible in the 802.15.4 PIB.
+                */
+               if (pos->key->frame_types != key->frame_types ||
+                   pos->key->cmd_frame_ids != key->cmd_frame_ids)
+                       return -EEXIST;
+
+               break;
+       }
+
+       new = kzalloc(sizeof(*new), GFP_KERNEL);
+       if (!new)
+               return -ENOMEM;
+
+       if (!mkey)
+               mkey = llsec_key_alloc(key);
+       else
+               mkey = llsec_key_get(mkey);
+
+       if (!mkey)
+               goto fail;
+
+       new->id = *id;
+       new->key = &mkey->key;
+
+       list_add_rcu(&new->list, &sec->table.keys);
+
+       return 0;
+
+fail:
+       kfree(new);
+       return -ENOMEM;
+}
+
+int mac802154_llsec_key_del(struct mac802154_llsec *sec,
+                           const struct ieee802154_llsec_key_id *key)
+{
+       struct ieee802154_llsec_key_entry *pos;
+
+       list_for_each_entry(pos, &sec->table.keys, list) {
+               struct mac802154_llsec_key *mkey;
+
+               mkey = container_of(pos->key, struct mac802154_llsec_key, key);
+
+               if (llsec_key_id_equal(&pos->id, key)) {
+                       llsec_key_put(mkey);
+                       return 0;
+               }
+       }
+
+       return -ENOENT;
+}
+
+
+
+static bool llsec_dev_use_shortaddr(__le16 short_addr)
+{
+       return short_addr != cpu_to_le16(IEEE802154_ADDR_UNDEF) &&
+               short_addr != cpu_to_le16(0xffff);
+}
+
+static u32 llsec_dev_hash_short(__le16 short_addr, __le16 pan_id)
+{
+       return ((__force u16) short_addr) << 16 | (__force u16) pan_id;
+}
+
+static u64 llsec_dev_hash_long(__le64 hwaddr)
+{
+       return (__force u64) hwaddr;
+}
+
+static struct mac802154_llsec_device*
+llsec_dev_find_short(struct mac802154_llsec *sec, __le16 short_addr,
+                    __le16 pan_id)
+{
+       struct mac802154_llsec_device *dev;
+       u32 key = llsec_dev_hash_short(short_addr, pan_id);
+
+       hash_for_each_possible_rcu(sec->devices_short, dev, bucket_s, key) {
+               if (dev->dev.short_addr == short_addr &&
+                   dev->dev.pan_id == pan_id)
+                       return dev;
+       }
+
+       return NULL;
+}
+
+static struct mac802154_llsec_device*
+llsec_dev_find_long(struct mac802154_llsec *sec, __le64 hwaddr)
+{
+       struct mac802154_llsec_device *dev;
+       u64 key = llsec_dev_hash_long(hwaddr);
+
+       hash_for_each_possible_rcu(sec->devices_hw, dev, bucket_hw, key) {
+               if (dev->dev.hwaddr == hwaddr)
+                       return dev;
+       }
+
+       return NULL;
+}
+
+static void llsec_dev_free(struct mac802154_llsec_device *dev)
+{
+       struct ieee802154_llsec_device_key *pos, *pn;
+       struct mac802154_llsec_device_key *devkey;
+
+       list_for_each_entry_safe(pos, pn, &dev->dev.keys, list) {
+               devkey = container_of(pos, struct mac802154_llsec_device_key,
+                                     devkey);
+
+               list_del(&pos->list);
+               kfree(devkey);
+       }
+
+       kfree(dev);
+}
+
+int mac802154_llsec_dev_add(struct mac802154_llsec *sec,
+                           const struct ieee802154_llsec_device *dev)
+{
+       struct mac802154_llsec_device *entry;
+       u32 skey = llsec_dev_hash_short(dev->short_addr, dev->pan_id);
+       u64 hwkey = llsec_dev_hash_long(dev->hwaddr);
+
+       BUILD_BUG_ON(sizeof(hwkey) != IEEE802154_ADDR_LEN);
+
+       if ((llsec_dev_use_shortaddr(dev->short_addr) &&
+            llsec_dev_find_short(sec, dev->short_addr, dev->pan_id)) ||
+            llsec_dev_find_long(sec, dev->hwaddr))
+               return -EEXIST;
+
+       entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry)
+               return -ENOMEM;
+
+       entry->dev = *dev;
+       spin_lock_init(&entry->lock);
+       INIT_LIST_HEAD(&entry->dev.keys);
+
+       if (llsec_dev_use_shortaddr(dev->short_addr))
+               hash_add_rcu(sec->devices_short, &entry->bucket_s, skey);
+       else
+               INIT_HLIST_NODE(&entry->bucket_s);
+
+       hash_add_rcu(sec->devices_hw, &entry->bucket_hw, hwkey);
+       list_add_tail_rcu(&entry->dev.list, &sec->table.devices);
+
+       return 0;
+}
+
+static void llsec_dev_free_rcu(struct rcu_head *rcu)
+{
+       llsec_dev_free(container_of(rcu, struct mac802154_llsec_device, rcu));
+}
+
+int mac802154_llsec_dev_del(struct mac802154_llsec *sec, __le64 device_addr)
+{
+       struct mac802154_llsec_device *pos;
+
+       pos = llsec_dev_find_long(sec, device_addr);
+       if (!pos)
+               return -ENOENT;
+
+       hash_del_rcu(&pos->bucket_s);
+       hash_del_rcu(&pos->bucket_hw);
+       call_rcu(&pos->rcu, llsec_dev_free_rcu);
+
+       return 0;
+}
+
+
+
+static struct mac802154_llsec_device_key*
+llsec_devkey_find(struct mac802154_llsec_device *dev,
+                 const struct ieee802154_llsec_key_id *key)
+{
+       struct ieee802154_llsec_device_key *devkey;
+
+       list_for_each_entry_rcu(devkey, &dev->dev.keys, list) {
+               if (!llsec_key_id_equal(key, &devkey->key_id))
+                       continue;
+
+               return container_of(devkey, struct mac802154_llsec_device_key,
+                                   devkey);
+       }
+
+       return NULL;
+}
+
+int mac802154_llsec_devkey_add(struct mac802154_llsec *sec,
+                              __le64 dev_addr,
+                              const struct ieee802154_llsec_device_key *key)
+{
+       struct mac802154_llsec_device *dev;
+       struct mac802154_llsec_device_key *devkey;
+
+       dev = llsec_dev_find_long(sec, dev_addr);
+
+       if (!dev)
+               return -ENOENT;
+
+       if (llsec_devkey_find(dev, &key->key_id))
+               return -EEXIST;
+
+       devkey = kmalloc(sizeof(*devkey), GFP_KERNEL);
+       if (!devkey)
+               return -ENOMEM;
+
+       devkey->devkey = *key;
+       list_add_tail_rcu(&devkey->devkey.list, &dev->dev.keys);
+       return 0;
+}
+
+int mac802154_llsec_devkey_del(struct mac802154_llsec *sec,
+                              __le64 dev_addr,
+                              const struct ieee802154_llsec_device_key *key)
+{
+       struct mac802154_llsec_device *dev;
+       struct mac802154_llsec_device_key *devkey;
+
+       dev = llsec_dev_find_long(sec, dev_addr);
+
+       if (!dev)
+               return -ENOENT;
+
+       devkey = llsec_devkey_find(dev, &key->key_id);
+       if (!devkey)
+               return -ENOENT;
+
+       list_del_rcu(&devkey->devkey.list);
+       kfree_rcu(devkey, rcu);
+       return 0;
+}
+
+
+
+static struct mac802154_llsec_seclevel*
+llsec_find_seclevel(const struct mac802154_llsec *sec,
+                   const struct ieee802154_llsec_seclevel *sl)
+{
+       struct ieee802154_llsec_seclevel *pos;
+
+       list_for_each_entry(pos, &sec->table.security_levels, list) {
+               if (pos->frame_type != sl->frame_type ||
+                   (pos->frame_type == IEEE802154_FC_TYPE_MAC_CMD &&
+                    pos->cmd_frame_id != sl->cmd_frame_id) ||
+                   pos->device_override != sl->device_override ||
+                   pos->sec_levels != sl->sec_levels)
+                       continue;
+
+               return container_of(pos, struct mac802154_llsec_seclevel,
+                                   level);
+       }
+
+       return NULL;
+}
+
+int mac802154_llsec_seclevel_add(struct mac802154_llsec *sec,
+                                const struct ieee802154_llsec_seclevel *sl)
+{
+       struct mac802154_llsec_seclevel *entry;
+
+       if (llsec_find_seclevel(sec, sl))
+               return -EEXIST;
+
+       entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry)
+               return -ENOMEM;
+
+       entry->level = *sl;
+
+       list_add_tail_rcu(&entry->level.list, &sec->table.security_levels);
+
+       return 0;
+}
+
+int mac802154_llsec_seclevel_del(struct mac802154_llsec *sec,
+                                const struct ieee802154_llsec_seclevel *sl)
+{
+       struct mac802154_llsec_seclevel *pos;
+
+       pos = llsec_find_seclevel(sec, sl);
+       if (!pos)
+               return -ENOENT;
+
+       list_del_rcu(&pos->level.list);
+       kfree_rcu(pos, rcu);
+
+       return 0;
+}
+
+
+
+static int llsec_recover_addr(struct mac802154_llsec *sec,
+                             struct ieee802154_addr *addr)
+{
+       __le16 caddr = sec->params.coord_shortaddr;
+       addr->pan_id = sec->params.pan_id;
+
+       if (caddr == cpu_to_le16(IEEE802154_ADDR_BROADCAST)) {
+               return -EINVAL;
+       } else if (caddr == cpu_to_le16(IEEE802154_ADDR_UNDEF)) {
+               addr->extended_addr = sec->params.coord_hwaddr;
+               addr->mode = IEEE802154_ADDR_LONG;
+       } else {
+               addr->short_addr = sec->params.coord_shortaddr;
+               addr->mode = IEEE802154_ADDR_SHORT;
+       }
+
+       return 0;
+}
+
+static struct mac802154_llsec_key*
+llsec_lookup_key(struct mac802154_llsec *sec,
+                const struct ieee802154_hdr *hdr,
+                const struct ieee802154_addr *addr,
+                struct ieee802154_llsec_key_id *key_id)
+{
+       struct ieee802154_addr devaddr = *addr;
+       u8 key_id_mode = hdr->sec.key_id_mode;
+       struct ieee802154_llsec_key_entry *key_entry;
+       struct mac802154_llsec_key *key;
+
+       if (key_id_mode == IEEE802154_SCF_KEY_IMPLICIT &&
+           devaddr.mode == IEEE802154_ADDR_NONE) {
+               if (hdr->fc.type == IEEE802154_FC_TYPE_BEACON) {
+                       devaddr.extended_addr = sec->params.coord_hwaddr;
+                       devaddr.mode = IEEE802154_ADDR_LONG;
+               } else if (llsec_recover_addr(sec, &devaddr) < 0) {
+                       return NULL;
+               }
+       }
+
+       list_for_each_entry_rcu(key_entry, &sec->table.keys, list) {
+               const struct ieee802154_llsec_key_id *id = &key_entry->id;
+
+               if (!(key_entry->key->frame_types & BIT(hdr->fc.type)))
+                       continue;
+
+               if (id->mode != key_id_mode)
+                       continue;
+
+               if (key_id_mode == IEEE802154_SCF_KEY_IMPLICIT) {
+                       if (ieee802154_addr_equal(&devaddr, &id->device_addr))
+                               goto found;
+               } else {
+                       if (id->id != hdr->sec.key_id)
+                               continue;
+
+                       if ((key_id_mode == IEEE802154_SCF_KEY_INDEX) ||
+                           (key_id_mode == IEEE802154_SCF_KEY_SHORT_INDEX &&
+                            id->short_source == hdr->sec.short_src) ||
+                           (key_id_mode == IEEE802154_SCF_KEY_HW_INDEX &&
+                            id->extended_source == hdr->sec.extended_src))
+                               goto found;
+               }
+       }
+
+       return NULL;
+
+found:
+       key = container_of(key_entry->key, struct mac802154_llsec_key, key);
+       if (key_id)
+               *key_id = key_entry->id;
+       return llsec_key_get(key);
+}
+
+
+static void llsec_geniv(u8 iv[16], __le64 addr,
+                       const struct ieee802154_sechdr *sec)
+{
+       __be64 addr_bytes = (__force __be64) swab64((__force u64) addr);
+       __be32 frame_counter = (__force __be32) swab32((__force u32) sec->frame_counter);
+
+       iv[0] = 1; /* L' = L - 1 = 1 */
+       memcpy(iv + 1, &addr_bytes, sizeof(addr_bytes));
+       memcpy(iv + 9, &frame_counter, sizeof(frame_counter));
+       iv[13] = sec->level;
+       iv[14] = 0;
+       iv[15] = 1;
+}
+
+static int
+llsec_do_encrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
+                       const struct ieee802154_hdr *hdr,
+                       struct mac802154_llsec_key *key)
+{
+       u8 iv[16];
+       struct scatterlist src;
+       struct blkcipher_desc req = {
+               .tfm = key->tfm0,
+               .info = iv,
+               .flags = 0,
+       };
+
+       llsec_geniv(iv, sec->params.hwaddr, &hdr->sec);
+       sg_init_one(&src, skb->data, skb->len);
+       return crypto_blkcipher_encrypt_iv(&req, &src, &src, skb->len);
+}
+
+static struct crypto_aead*
+llsec_tfm_by_len(struct mac802154_llsec_key *key, int authlen)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
+               if (crypto_aead_authsize(key->tfm[i]) == authlen)
+                       return key->tfm[i];
+
+       BUG();
+}
+
+static int
+llsec_do_encrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec,
+                     const struct ieee802154_hdr *hdr,
+                     struct mac802154_llsec_key *key)
+{
+       u8 iv[16];
+       unsigned char *data;
+       int authlen, assoclen, datalen, rc;
+       struct scatterlist src, assoc[2], dst[2];
+       struct aead_request *req;
+
+       authlen = ieee802154_sechdr_authtag_len(&hdr->sec);
+       llsec_geniv(iv, sec->params.hwaddr, &hdr->sec);
+
+       req = aead_request_alloc(llsec_tfm_by_len(key, authlen), GFP_ATOMIC);
+       if (!req)
+               return -ENOMEM;
+
+       sg_init_table(assoc, 2);
+       sg_set_buf(&assoc[0], skb_mac_header(skb), skb->mac_len);
+       assoclen = skb->mac_len;
+
+       data = skb_mac_header(skb) + skb->mac_len;
+       datalen = skb_tail_pointer(skb) - data;
+
+       if (hdr->sec.level & IEEE802154_SCF_SECLEVEL_ENC) {
+               sg_set_buf(&assoc[1], data, 0);
+       } else {
+               sg_set_buf(&assoc[1], data, datalen);
+               assoclen += datalen;
+               datalen = 0;
+       }
+
+       sg_init_one(&src, data, datalen);
+
+       sg_init_table(dst, 2);
+       sg_set_buf(&dst[0], data, datalen);
+       sg_set_buf(&dst[1], skb_put(skb, authlen), authlen);
+
+       aead_request_set_callback(req, 0, NULL, NULL);
+       aead_request_set_assoc(req, assoc, assoclen);
+       aead_request_set_crypt(req, &src, dst, datalen, iv);
+
+       rc = crypto_aead_encrypt(req);
+
+       kfree(req);
+
+       return rc;
+}
+
+static int llsec_do_encrypt(struct sk_buff *skb,
+                           const struct mac802154_llsec *sec,
+                           const struct ieee802154_hdr *hdr,
+                           struct mac802154_llsec_key *key)
+{
+       if (hdr->sec.level == IEEE802154_SCF_SECLEVEL_ENC)
+               return llsec_do_encrypt_unauth(skb, sec, hdr, key);
+       else
+               return llsec_do_encrypt_auth(skb, sec, hdr, key);
+}
+
+int mac802154_llsec_encrypt(struct mac802154_llsec *sec, struct sk_buff *skb)
+{
+       struct ieee802154_hdr hdr;
+       int rc, authlen, hlen;
+       struct mac802154_llsec_key *key;
+       u32 frame_ctr;
+
+       hlen = ieee802154_hdr_pull(skb, &hdr);
+
+       if (hlen < 0 || hdr.fc.type != IEEE802154_FC_TYPE_DATA)
+               return -EINVAL;
+
+       if (!hdr.fc.security_enabled || hdr.sec.level == 0) {
+               skb_push(skb, hlen);
+               return 0;
+       }
+
+       authlen = ieee802154_sechdr_authtag_len(&hdr.sec);
+
+       if (skb->len + hlen + authlen + IEEE802154_MFR_SIZE > IEEE802154_MTU)
+               return -EMSGSIZE;
+
+       rcu_read_lock();
+
+       read_lock_bh(&sec->lock);
+
+       if (!sec->params.enabled) {
+               rc = -EINVAL;
+               goto fail_read;
+       }
+
+       key = llsec_lookup_key(sec, &hdr, &hdr.dest, NULL);
+       if (!key) {
+               rc = -ENOKEY;
+               goto fail_read;
+       }
+
+       read_unlock_bh(&sec->lock);
+
+       write_lock_bh(&sec->lock);
+
+       frame_ctr = be32_to_cpu(sec->params.frame_counter);
+       hdr.sec.frame_counter = cpu_to_le32(frame_ctr);
+       if (frame_ctr == 0xFFFFFFFF) {
+               write_unlock_bh(&sec->lock);
+               llsec_key_put(key);
+               rc = -EOVERFLOW;
+               goto fail;
+       }
+
+       sec->params.frame_counter = cpu_to_be32(frame_ctr + 1);
+
+       write_unlock_bh(&sec->lock);
+
+       rcu_read_unlock();
+
+       skb->mac_len = ieee802154_hdr_push(skb, &hdr);
+       skb_reset_mac_header(skb);
+
+       rc = llsec_do_encrypt(skb, sec, &hdr, key);
+       llsec_key_put(key);
+
+       return rc;
+
+fail_read:
+       read_unlock_bh(&sec->lock);
+fail:
+       rcu_read_unlock();
+       return rc;
+}
+
+
+
+static struct mac802154_llsec_device*
+llsec_lookup_dev(struct mac802154_llsec *sec,
+                const struct ieee802154_addr *addr)
+{
+       struct ieee802154_addr devaddr = *addr;
+       struct mac802154_llsec_device *dev = NULL;
+
+       if (devaddr.mode == IEEE802154_ADDR_NONE &&
+           llsec_recover_addr(sec, &devaddr) < 0)
+               return NULL;
+
+       if (devaddr.mode == IEEE802154_ADDR_SHORT) {
+               u32 key = llsec_dev_hash_short(devaddr.short_addr,
+                                              devaddr.pan_id);
+
+               hash_for_each_possible_rcu(sec->devices_short, dev,
+                                          bucket_s, key) {
+                       if (dev->dev.pan_id == devaddr.pan_id &&
+                           dev->dev.short_addr == devaddr.short_addr)
+                               return dev;
+               }
+       } else {
+               u64 key = llsec_dev_hash_long(devaddr.extended_addr);
+
+               hash_for_each_possible_rcu(sec->devices_hw, dev,
+                                          bucket_hw, key) {
+                       if (dev->dev.hwaddr == devaddr.extended_addr)
+                               return dev;
+               }
+       }
+
+       return NULL;
+}
+
+static int
+llsec_lookup_seclevel(const struct mac802154_llsec *sec,
+                     u8 frame_type, u8 cmd_frame_id,
+                     struct ieee802154_llsec_seclevel *rlevel)
+{
+       struct ieee802154_llsec_seclevel *level;
+
+       list_for_each_entry_rcu(level, &sec->table.security_levels, list) {
+               if (level->frame_type == frame_type &&
+                   (frame_type != IEEE802154_FC_TYPE_MAC_CMD ||
+                    level->cmd_frame_id == cmd_frame_id)) {
+                       *rlevel = *level;
+                       return 0;
+               }
+       }
+
+       return -EINVAL;
+}
+
+static int
+llsec_do_decrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
+                       const struct ieee802154_hdr *hdr,
+                       struct mac802154_llsec_key *key, __le64 dev_addr)
+{
+       u8 iv[16];
+       unsigned char *data;
+       int datalen;
+       struct scatterlist src;
+       struct blkcipher_desc req = {
+               .tfm = key->tfm0,
+               .info = iv,
+               .flags = 0,
+       };
+
+       llsec_geniv(iv, dev_addr, &hdr->sec);
+       data = skb_mac_header(skb) + skb->mac_len;
+       datalen = skb_tail_pointer(skb) - data;
+
+       sg_init_one(&src, data, datalen);
+
+       return crypto_blkcipher_decrypt_iv(&req, &src, &src, datalen);
+}
+
+static int
+llsec_do_decrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec,
+                     const struct ieee802154_hdr *hdr,
+                     struct mac802154_llsec_key *key, __le64 dev_addr)
+{
+       u8 iv[16];
+       unsigned char *data;
+       int authlen, datalen, assoclen, rc;
+       struct scatterlist src, assoc[2];
+       struct aead_request *req;
+
+       authlen = ieee802154_sechdr_authtag_len(&hdr->sec);
+       llsec_geniv(iv, dev_addr, &hdr->sec);
+
+       req = aead_request_alloc(llsec_tfm_by_len(key, authlen), GFP_ATOMIC);
+       if (!req)
+               return -ENOMEM;
+
+       sg_init_table(assoc, 2);
+       sg_set_buf(&assoc[0], skb_mac_header(skb), skb->mac_len);
+       assoclen = skb->mac_len;
+
+       data = skb_mac_header(skb) + skb->mac_len;
+       datalen = skb_tail_pointer(skb) - data;
+
+       if (hdr->sec.level & IEEE802154_SCF_SECLEVEL_ENC) {
+               sg_set_buf(&assoc[1], data, 0);
+       } else {
+               sg_set_buf(&assoc[1], data, datalen - authlen);
+               assoclen += datalen - authlen;
+               data += datalen - authlen;
+               datalen = authlen;
+       }
+
+       sg_init_one(&src, data, datalen);
+
+       aead_request_set_callback(req, 0, NULL, NULL);
+       aead_request_set_assoc(req, assoc, assoclen);
+       aead_request_set_crypt(req, &src, &src, datalen, iv);
+
+       rc = crypto_aead_decrypt(req);
+
+       kfree(req);
+       skb_trim(skb, skb->len - authlen);
+
+       return rc;
+}
+
+static int
+llsec_do_decrypt(struct sk_buff *skb, const struct mac802154_llsec *sec,
+                const struct ieee802154_hdr *hdr,
+                struct mac802154_llsec_key *key, __le64 dev_addr)
+{
+       if (hdr->sec.level == IEEE802154_SCF_SECLEVEL_ENC)
+               return llsec_do_decrypt_unauth(skb, sec, hdr, key, dev_addr);
+       else
+               return llsec_do_decrypt_auth(skb, sec, hdr, key, dev_addr);
+}
+
+static int
+llsec_update_devkey_record(struct mac802154_llsec_device *dev,
+                          const struct ieee802154_llsec_key_id *in_key)
+{
+       struct mac802154_llsec_device_key *devkey;
+
+       devkey = llsec_devkey_find(dev, in_key);
+
+       if (!devkey) {
+               struct mac802154_llsec_device_key *next;
+
+               next = kzalloc(sizeof(*devkey), GFP_ATOMIC);
+               if (!next)
+                       return -ENOMEM;
+
+               next->devkey.key_id = *in_key;
+
+               spin_lock_bh(&dev->lock);
+
+               devkey = llsec_devkey_find(dev, in_key);
+               if (!devkey)
+                       list_add_rcu(&next->devkey.list, &dev->dev.keys);
+               else
+                       kfree(next);
+
+               spin_unlock_bh(&dev->lock);
+       }
+
+       return 0;
+}
+
+static int
+llsec_update_devkey_info(struct mac802154_llsec_device *dev,
+                        const struct ieee802154_llsec_key_id *in_key,
+                        u32 frame_counter)
+{
+       struct mac802154_llsec_device_key *devkey = NULL;
+
+       if (dev->dev.key_mode == IEEE802154_LLSEC_DEVKEY_RESTRICT) {
+               devkey = llsec_devkey_find(dev, in_key);
+               if (!devkey)
+                       return -ENOENT;
+       }
+
+       if (dev->dev.key_mode == IEEE802154_LLSEC_DEVKEY_RECORD) {
+               int rc = llsec_update_devkey_record(dev, in_key);
+
+               if (rc < 0)
+                       return rc;
+       }
+
+       spin_lock_bh(&dev->lock);
+
+       if ((!devkey && frame_counter < dev->dev.frame_counter) ||
+           (devkey && frame_counter < devkey->devkey.frame_counter)) {
+               spin_unlock_bh(&dev->lock);
+               return -EINVAL;
+       }
+
+       if (devkey)
+               devkey->devkey.frame_counter = frame_counter + 1;
+       else
+               dev->dev.frame_counter = frame_counter + 1;
+
+       spin_unlock_bh(&dev->lock);
+
+       return 0;
+}
+
+int mac802154_llsec_decrypt(struct mac802154_llsec *sec, struct sk_buff *skb)
+{
+       struct ieee802154_hdr hdr;
+       struct mac802154_llsec_key *key;
+       struct ieee802154_llsec_key_id key_id;
+       struct mac802154_llsec_device *dev;
+       struct ieee802154_llsec_seclevel seclevel;
+       int err;
+       __le64 dev_addr;
+       u32 frame_ctr;
+
+       if (ieee802154_hdr_peek(skb, &hdr) < 0)
+               return -EINVAL;
+       if (!hdr.fc.security_enabled)
+               return 0;
+       if (hdr.fc.version == 0)
+               return -EINVAL;
+
+       read_lock_bh(&sec->lock);
+       if (!sec->params.enabled) {
+               read_unlock_bh(&sec->lock);
+               return -EINVAL;
+       }
+       read_unlock_bh(&sec->lock);
+
+       rcu_read_lock();
+
+       key = llsec_lookup_key(sec, &hdr, &hdr.source, &key_id);
+       if (!key) {
+               err = -ENOKEY;
+               goto fail;
+       }
+
+       dev = llsec_lookup_dev(sec, &hdr.source);
+       if (!dev) {
+               err = -EINVAL;
+               goto fail_dev;
+       }
+
+       if (llsec_lookup_seclevel(sec, hdr.fc.type, 0, &seclevel) < 0) {
+               err = -EINVAL;
+               goto fail_dev;
+       }
+
+       if (!(seclevel.sec_levels & BIT(hdr.sec.level)) &&
+           (hdr.sec.level == 0 && seclevel.device_override &&
+            !dev->dev.seclevel_exempt)) {
+               err = -EINVAL;
+               goto fail_dev;
+       }
+
+       frame_ctr = le32_to_cpu(hdr.sec.frame_counter);
+
+       if (frame_ctr == 0xffffffff) {
+               err = -EOVERFLOW;
+               goto fail_dev;
+       }
+
+       err = llsec_update_devkey_info(dev, &key_id, frame_ctr);
+       if (err)
+               goto fail_dev;
+
+       dev_addr = dev->dev.hwaddr;
+
+       rcu_read_unlock();
+
+       err = llsec_do_decrypt(skb, sec, &hdr, key, dev_addr);
+       llsec_key_put(key);
+       return err;
+
+fail_dev:
+       llsec_key_put(key);
+fail:
+       rcu_read_unlock();
+       return err;
+}
diff --git a/net/mac802154/llsec.h b/net/mac802154/llsec.h
new file mode 100644 (file)
index 0000000..950578e
--- /dev/null
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2014 Fraunhofer ITWM
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Written by:
+ * Phoebe Buckheister <phoebe.buckheister@itwm.fraunhofer.de>
+ */
+
+#ifndef MAC802154_LLSEC_H
+#define MAC802154_LLSEC_H
+
+#include <linux/slab.h>
+#include <linux/hashtable.h>
+#include <linux/crypto.h>
+#include <linux/kref.h>
+#include <linux/spinlock.h>
+#include <net/af_ieee802154.h>
+#include <net/ieee802154_netdev.h>
+
+struct mac802154_llsec_key {
+       struct ieee802154_llsec_key key;
+
+       /* one tfm for each authsize (4/8/16) */
+       struct crypto_aead *tfm[3];
+       struct crypto_blkcipher *tfm0;
+
+       struct kref ref;
+};
+
+struct mac802154_llsec_device_key {
+       struct ieee802154_llsec_device_key devkey;
+
+       struct rcu_head rcu;
+};
+
+struct mac802154_llsec_device {
+       struct ieee802154_llsec_device dev;
+
+       struct hlist_node bucket_s;
+       struct hlist_node bucket_hw;
+
+       /* protects dev.frame_counter and the elements of dev.keys */
+       spinlock_t lock;
+
+       struct rcu_head rcu;
+};
+
+struct mac802154_llsec_seclevel {
+       struct ieee802154_llsec_seclevel level;
+
+       struct rcu_head rcu;
+};
+
+struct mac802154_llsec {
+       struct ieee802154_llsec_params params;
+       struct ieee802154_llsec_table table;
+
+       DECLARE_HASHTABLE(devices_short, 6);
+       DECLARE_HASHTABLE(devices_hw, 6);
+
+       /* protects params, all other fields are fine with RCU */
+       rwlock_t lock;
+};
+
+void mac802154_llsec_init(struct mac802154_llsec *sec);
+void mac802154_llsec_destroy(struct mac802154_llsec *sec);
+
+int mac802154_llsec_get_params(struct mac802154_llsec *sec,
+                              struct ieee802154_llsec_params *params);
+int mac802154_llsec_set_params(struct mac802154_llsec *sec,
+                              const struct ieee802154_llsec_params *params,
+                              int changed);
+
+int mac802154_llsec_key_add(struct mac802154_llsec *sec,
+                           const struct ieee802154_llsec_key_id *id,
+                           const struct ieee802154_llsec_key *key);
+int mac802154_llsec_key_del(struct mac802154_llsec *sec,
+                           const struct ieee802154_llsec_key_id *key);
+
+int mac802154_llsec_dev_add(struct mac802154_llsec *sec,
+                           const struct ieee802154_llsec_device *dev);
+int mac802154_llsec_dev_del(struct mac802154_llsec *sec,
+                           __le64 device_addr);
+
+int mac802154_llsec_devkey_add(struct mac802154_llsec *sec,
+                              __le64 dev_addr,
+                              const struct ieee802154_llsec_device_key *key);
+int mac802154_llsec_devkey_del(struct mac802154_llsec *sec,
+                              __le64 dev_addr,
+                              const struct ieee802154_llsec_device_key *key);
+
+int mac802154_llsec_seclevel_add(struct mac802154_llsec *sec,
+                                const struct ieee802154_llsec_seclevel *sl);
+int mac802154_llsec_seclevel_del(struct mac802154_llsec *sec,
+                                const struct ieee802154_llsec_seclevel *sl);
+
+int mac802154_llsec_encrypt(struct mac802154_llsec *sec, struct sk_buff *skb);
+int mac802154_llsec_decrypt(struct mac802154_llsec *sec, struct sk_buff *skb);
+
+#endif /* MAC802154_LLSEC_H */
index 28ef59c566e6ee78df73edd1246d84d4c2c4d912..762a6f849c6b7d3edf3e8677ee6f2081c10c8fac 100644 (file)
 #ifndef MAC802154_H
 #define MAC802154_H
 
+#include <linux/mutex.h>
+#include <net/mac802154.h>
 #include <net/ieee802154_netdev.h>
 
+#include "llsec.h"
+
 /* mac802154 device private data */
 struct mac802154_priv {
        struct ieee802154_dev hw;
@@ -90,6 +94,13 @@ struct mac802154_sub_if_data {
        u8 bsn;
        /* MAC DSN field */
        u8 dsn;
+
+       /* protects sec from concurrent access by netlink. access by
+        * encrypt/decrypt/header_create safe without additional protection.
+        */
+       struct mutex sec_mtx;
+
+       struct mac802154_llsec sec;
 };
 
 #define mac802154_to_priv(_hw) container_of(_hw, struct mac802154_priv, hw)
@@ -125,4 +136,37 @@ int mac802154_set_mac_params(struct net_device *dev,
 void mac802154_get_mac_params(struct net_device *dev,
                              struct ieee802154_mac_params *params);
 
+int mac802154_get_params(struct net_device *dev,
+                        struct ieee802154_llsec_params *params);
+int mac802154_set_params(struct net_device *dev,
+                        const struct ieee802154_llsec_params *params,
+                        int changed);
+
+int mac802154_add_key(struct net_device *dev,
+                     const struct ieee802154_llsec_key_id *id,
+                     const struct ieee802154_llsec_key *key);
+int mac802154_del_key(struct net_device *dev,
+                     const struct ieee802154_llsec_key_id *id);
+
+int mac802154_add_dev(struct net_device *dev,
+                     const struct ieee802154_llsec_device *llsec_dev);
+int mac802154_del_dev(struct net_device *dev, __le64 dev_addr);
+
+int mac802154_add_devkey(struct net_device *dev,
+                        __le64 device_addr,
+                        const struct ieee802154_llsec_device_key *key);
+int mac802154_del_devkey(struct net_device *dev,
+                        __le64 device_addr,
+                        const struct ieee802154_llsec_device_key *key);
+
+int mac802154_add_seclevel(struct net_device *dev,
+                          const struct ieee802154_llsec_seclevel *sl);
+int mac802154_del_seclevel(struct net_device *dev,
+                          const struct ieee802154_llsec_seclevel *sl);
+
+void mac802154_lock_table(struct net_device *dev);
+void mac802154_get_table(struct net_device *dev,
+                        struct ieee802154_llsec_table **t);
+void mac802154_unlock_table(struct net_device *dev);
+
 #endif /* MAC802154_H */
index d40c0928bc622d5802c9dcd1bdb740150ee06744..bf809131eef776209040a8496ed1e2214e6bdebe 100644 (file)
@@ -40,6 +40,9 @@ static int mac802154_mlme_start_req(struct net_device *dev,
                                    u8 pan_coord, u8 blx,
                                    u8 coord_realign)
 {
+       struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
+       int rc = 0;
+
        BUG_ON(addr->mode != IEEE802154_ADDR_SHORT);
 
        mac802154_dev_set_pan_id(dev, addr->pan_id);
@@ -47,12 +50,31 @@ static int mac802154_mlme_start_req(struct net_device *dev,
        mac802154_dev_set_ieee_addr(dev);
        mac802154_dev_set_page_channel(dev, page, channel);
 
+       if (ops->llsec) {
+               struct ieee802154_llsec_params params;
+               int changed = 0;
+
+               params.coord_shortaddr = addr->short_addr;
+               changed |= IEEE802154_LLSEC_PARAM_COORD_SHORTADDR;
+
+               params.pan_id = addr->pan_id;
+               changed |= IEEE802154_LLSEC_PARAM_PAN_ID;
+
+               params.hwaddr = ieee802154_devaddr_from_raw(dev->dev_addr);
+               changed |= IEEE802154_LLSEC_PARAM_HWADDR;
+
+               params.coord_hwaddr = params.hwaddr;
+               changed |= IEEE802154_LLSEC_PARAM_COORD_HWADDR;
+
+               rc = ops->llsec->set_params(dev, &params, changed);
+       }
+
        /* FIXME: add validation for unused parameters to be sane
         * for SoftMAC
         */
        ieee802154_nl_start_confirm(dev, IEEE802154_SUCCESS);
 
-       return 0;
+       return rc;
 }
 
 static struct wpan_phy *mac802154_get_phy(const struct net_device *dev)
@@ -64,6 +86,22 @@ static struct wpan_phy *mac802154_get_phy(const struct net_device *dev)
        return to_phy(get_device(&priv->hw->phy->dev));
 }
 
+static struct ieee802154_llsec_ops mac802154_llsec_ops = {
+       .get_params = mac802154_get_params,
+       .set_params = mac802154_set_params,
+       .add_key = mac802154_add_key,
+       .del_key = mac802154_del_key,
+       .add_dev = mac802154_add_dev,
+       .del_dev = mac802154_del_dev,
+       .add_devkey = mac802154_add_devkey,
+       .del_devkey = mac802154_del_devkey,
+       .add_seclevel = mac802154_add_seclevel,
+       .del_seclevel = mac802154_del_seclevel,
+       .lock_table = mac802154_lock_table,
+       .get_table = mac802154_get_table,
+       .unlock_table = mac802154_unlock_table,
+};
+
 struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced = {
        .get_phy = mac802154_get_phy,
 };
@@ -75,6 +113,8 @@ struct ieee802154_mlme_ops mac802154_mlme_wpan = {
        .get_short_addr = mac802154_dev_get_short_addr,
        .get_dsn = mac802154_dev_get_dsn,
 
+       .llsec = &mac802154_llsec_ops,
+
        .set_mac_params = mac802154_set_mac_params,
        .get_mac_params = mac802154_get_mac_params,
 };
index f0991f2344d403f3b1ed3a1f44fc2aebf50c72b4..15aa2f2b03a78c29138db43c08073c4ba2817e54 100644 (file)
@@ -213,3 +213,190 @@ void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan)
        } else
                mutex_unlock(&priv->hw->phy->pib_lock);
 }
+
+
+int mac802154_get_params(struct net_device *dev,
+                        struct ieee802154_llsec_params *params)
+{
+       struct mac802154_sub_if_data *priv = netdev_priv(dev);
+       int res;
+
+       BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+       mutex_lock(&priv->sec_mtx);
+       res = mac802154_llsec_get_params(&priv->sec, params);
+       mutex_unlock(&priv->sec_mtx);
+
+       return res;
+}
+
+int mac802154_set_params(struct net_device *dev,
+                        const struct ieee802154_llsec_params *params,
+                        int changed)
+{
+       struct mac802154_sub_if_data *priv = netdev_priv(dev);
+       int res;
+
+       BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+       mutex_lock(&priv->sec_mtx);
+       res = mac802154_llsec_set_params(&priv->sec, params, changed);
+       mutex_unlock(&priv->sec_mtx);
+
+       return res;
+}
+
+
+int mac802154_add_key(struct net_device *dev,
+                     const struct ieee802154_llsec_key_id *id,
+                     const struct ieee802154_llsec_key *key)
+{
+       struct mac802154_sub_if_data *priv = netdev_priv(dev);
+       int res;
+
+       BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+       mutex_lock(&priv->sec_mtx);
+       res = mac802154_llsec_key_add(&priv->sec, id, key);
+       mutex_unlock(&priv->sec_mtx);
+
+       return res;
+}
+
+int mac802154_del_key(struct net_device *dev,
+                     const struct ieee802154_llsec_key_id *id)
+{
+       struct mac802154_sub_if_data *priv = netdev_priv(dev);
+       int res;
+
+       BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+       mutex_lock(&priv->sec_mtx);
+       res = mac802154_llsec_key_del(&priv->sec, id);
+       mutex_unlock(&priv->sec_mtx);
+
+       return res;
+}
+
+
+int mac802154_add_dev(struct net_device *dev,
+                     const struct ieee802154_llsec_device *llsec_dev)
+{
+       struct mac802154_sub_if_data *priv = netdev_priv(dev);
+       int res;
+
+       BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+       mutex_lock(&priv->sec_mtx);
+       res = mac802154_llsec_dev_add(&priv->sec, llsec_dev);
+       mutex_unlock(&priv->sec_mtx);
+
+       return res;
+}
+
+int mac802154_del_dev(struct net_device *dev, __le64 dev_addr)
+{
+       struct mac802154_sub_if_data *priv = netdev_priv(dev);
+       int res;
+
+       BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+       mutex_lock(&priv->sec_mtx);
+       res = mac802154_llsec_dev_del(&priv->sec, dev_addr);
+       mutex_unlock(&priv->sec_mtx);
+
+       return res;
+}
+
+
+int mac802154_add_devkey(struct net_device *dev,
+                        __le64 device_addr,
+                        const struct ieee802154_llsec_device_key *key)
+{
+       struct mac802154_sub_if_data *priv = netdev_priv(dev);
+       int res;
+
+       BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+       mutex_lock(&priv->sec_mtx);
+       res = mac802154_llsec_devkey_add(&priv->sec, device_addr, key);
+       mutex_unlock(&priv->sec_mtx);
+
+       return res;
+}
+
+int mac802154_del_devkey(struct net_device *dev,
+                        __le64 device_addr,
+                        const struct ieee802154_llsec_device_key *key)
+{
+       struct mac802154_sub_if_data *priv = netdev_priv(dev);
+       int res;
+
+       BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+       mutex_lock(&priv->sec_mtx);
+       res = mac802154_llsec_devkey_del(&priv->sec, device_addr, key);
+       mutex_unlock(&priv->sec_mtx);
+
+       return res;
+}
+
+
+int mac802154_add_seclevel(struct net_device *dev,
+                          const struct ieee802154_llsec_seclevel *sl)
+{
+       struct mac802154_sub_if_data *priv = netdev_priv(dev);
+       int res;
+
+       BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+       mutex_lock(&priv->sec_mtx);
+       res = mac802154_llsec_seclevel_add(&priv->sec, sl);
+       mutex_unlock(&priv->sec_mtx);
+
+       return res;
+}
+
+int mac802154_del_seclevel(struct net_device *dev,
+                          const struct ieee802154_llsec_seclevel *sl)
+{
+       struct mac802154_sub_if_data *priv = netdev_priv(dev);
+       int res;
+
+       BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+       mutex_lock(&priv->sec_mtx);
+       res = mac802154_llsec_seclevel_del(&priv->sec, sl);
+       mutex_unlock(&priv->sec_mtx);
+
+       return res;
+}
+
+
+void mac802154_lock_table(struct net_device *dev)
+{
+       struct mac802154_sub_if_data *priv = netdev_priv(dev);
+
+       BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+       mutex_lock(&priv->sec_mtx);
+}
+
+void mac802154_get_table(struct net_device *dev,
+                        struct ieee802154_llsec_table **t)
+{
+       struct mac802154_sub_if_data *priv = netdev_priv(dev);
+
+       BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+       *t = &priv->sec.table;
+}
+
+void mac802154_unlock_table(struct net_device *dev)
+{
+       struct mac802154_sub_if_data *priv = netdev_priv(dev);
+
+       BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+       mutex_unlock(&priv->sec_mtx);
+}
index 03855b0677ccf8efcb0819591bae63bd8609c693..0597b96dc9bac1e9150864d4e87c9566c60c149b 100644 (file)
@@ -59,8 +59,6 @@ mac802154_subif_rx(struct ieee802154_dev *hw, struct sk_buff *skb, u8 lqi)
        skb->protocol = htons(ETH_P_IEEE802154);
        skb_reset_mac_header(skb);
 
-       BUILD_BUG_ON(sizeof(struct ieee802154_mac_cb) > sizeof(skb->cb));
-
        if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) {
                u16 crc;
 
index 1df7a6a573865b4add87261300cc1d7ff01fa243..23bc91cf99c465232b0b2406f29a9bcd2b539b98 100644 (file)
 
 #include "mac802154.h"
 
+static int mac802154_wpan_update_llsec(struct net_device *dev)
+{
+       struct mac802154_sub_if_data *priv = netdev_priv(dev);
+       struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
+       int rc = 0;
+
+       if (ops->llsec) {
+               struct ieee802154_llsec_params params;
+               int changed = 0;
+
+               params.pan_id = priv->pan_id;
+               changed |= IEEE802154_LLSEC_PARAM_PAN_ID;
+
+               params.hwaddr = priv->extended_addr;
+               changed |= IEEE802154_LLSEC_PARAM_HWADDR;
+
+               rc = ops->llsec->set_params(dev, &params, changed);
+       }
+
+       return rc;
+}
+
 static int
 mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
@@ -81,7 +103,7 @@ mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                priv->pan_id = cpu_to_le16(sa->addr.pan_id);
                priv->short_addr = cpu_to_le16(sa->addr.short_addr);
 
-               err = 0;
+               err = mac802154_wpan_update_llsec(dev);
                break;
        }
 
@@ -99,7 +121,7 @@ static int mac802154_wpan_mac_addr(struct net_device *dev, void *p)
        /* FIXME: validate addr */
        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
        mac802154_dev_set_ieee_addr(dev);
-       return 0;
+       return mac802154_wpan_update_llsec(dev);
 }
 
 int mac802154_set_mac_params(struct net_device *dev,
@@ -124,7 +146,7 @@ void mac802154_get_mac_params(struct net_device *dev,
        mutex_unlock(&priv->hw->slaves_mtx);
 }
 
-int mac802154_wpan_open(struct net_device *dev)
+static int mac802154_wpan_open(struct net_device *dev)
 {
        int rc;
        struct mac802154_sub_if_data *priv = netdev_priv(dev);
@@ -183,6 +205,38 @@ out:
        return rc;
 }
 
+static int mac802154_set_header_security(struct mac802154_sub_if_data *priv,
+                                        struct ieee802154_hdr *hdr,
+                                        const struct ieee802154_mac_cb *cb)
+{
+       struct ieee802154_llsec_params params;
+       u8 level;
+
+       mac802154_llsec_get_params(&priv->sec, &params);
+
+       if (!params.enabled && cb->secen_override && cb->secen)
+               return -EINVAL;
+       if (!params.enabled ||
+           (cb->secen_override && !cb->secen) ||
+           !params.out_level)
+               return 0;
+       if (cb->seclevel_override && !cb->seclevel)
+               return -EINVAL;
+
+       level = cb->seclevel_override ? cb->seclevel : params.out_level;
+
+       hdr->fc.security_enabled = 1;
+       hdr->sec.level = level;
+       hdr->sec.key_id_mode = params.out_key.mode;
+       if (params.out_key.mode == IEEE802154_SCF_KEY_SHORT_INDEX)
+               hdr->sec.short_src = params.out_key.short_source;
+       else if (params.out_key.mode == IEEE802154_SCF_KEY_HW_INDEX)
+               hdr->sec.extended_src = params.out_key.extended_source;
+       hdr->sec.key_id = params.out_key.id;
+
+       return 0;
+}
+
 static int mac802154_header_create(struct sk_buff *skb,
                                   struct net_device *dev,
                                   unsigned short type,
@@ -192,15 +246,20 @@ static int mac802154_header_create(struct sk_buff *skb,
 {
        struct ieee802154_hdr hdr;
        struct mac802154_sub_if_data *priv = netdev_priv(dev);
+       struct ieee802154_mac_cb *cb = mac_cb(skb);
        int hlen;
 
        if (!daddr)
                return -EINVAL;
 
        memset(&hdr.fc, 0, sizeof(hdr.fc));
-       hdr.fc.type = mac_cb_type(skb);
-       hdr.fc.security_enabled = mac_cb_is_secen(skb);
-       hdr.fc.ack_request = mac_cb_is_ackreq(skb);
+       hdr.fc.type = cb->type;
+       hdr.fc.security_enabled = cb->secen;
+       hdr.fc.ack_request = cb->ackreq;
+       hdr.seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
+
+       if (mac802154_set_header_security(priv, &hdr, cb) < 0)
+               return -EINVAL;
 
        if (!saddr) {
                spin_lock_bh(&priv->mib_lock);
@@ -231,7 +290,7 @@ static int mac802154_header_create(struct sk_buff *skb,
        skb_reset_mac_header(skb);
        skb->mac_len = hlen;
 
-       if (hlen + len + 2 > dev->mtu)
+       if (len > ieee802154_max_payload(&hdr))
                return -EMSGSIZE;
 
        return hlen;
@@ -257,6 +316,7 @@ mac802154_wpan_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct mac802154_sub_if_data *priv;
        u8 chan, page;
+       int rc;
 
        priv = netdev_priv(dev);
 
@@ -272,6 +332,13 @@ mac802154_wpan_xmit(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_OK;
        }
 
+       rc = mac802154_llsec_encrypt(&priv->sec, skb);
+       if (rc) {
+               pr_warn("encryption failed: %i\n", rc);
+               kfree_skb(skb);
+               return NETDEV_TX_OK;
+       }
+
        skb->skb_iif = dev->ifindex;
        dev->stats.tx_packets++;
        dev->stats.tx_bytes += skb->len;
@@ -292,6 +359,15 @@ static const struct net_device_ops mac802154_wpan_ops = {
        .ndo_set_mac_address    = mac802154_wpan_mac_addr,
 };
 
+static void mac802154_wpan_free(struct net_device *dev)
+{
+       struct mac802154_sub_if_data *priv = netdev_priv(dev);
+
+       mac802154_llsec_destroy(&priv->sec);
+
+       free_netdev(dev);
+}
+
 void mac802154_wpan_setup(struct net_device *dev)
 {
        struct mac802154_sub_if_data *priv;
@@ -301,14 +377,14 @@ void mac802154_wpan_setup(struct net_device *dev)
 
        dev->hard_header_len    = MAC802154_FRAME_HARD_HEADER_LEN;
        dev->header_ops         = &mac802154_header_ops;
-       dev->needed_tailroom    = 2; /* FCS */
+       dev->needed_tailroom    = 2 + 16; /* FCS + MIC */
        dev->mtu                = IEEE802154_MTU;
        dev->tx_queue_len       = 300;
        dev->type               = ARPHRD_IEEE802154;
        dev->flags              = IFF_NOARP | IFF_BROADCAST;
        dev->watchdog_timeo     = 0;
 
-       dev->destructor         = free_netdev;
+       dev->destructor         = mac802154_wpan_free;
        dev->netdev_ops         = &mac802154_wpan_ops;
        dev->ml_priv            = &mac802154_mlme_wpan;
 
@@ -319,6 +395,7 @@ void mac802154_wpan_setup(struct net_device *dev)
        priv->page = 0;
 
        spin_lock_init(&priv->mib_lock);
+       mutex_init(&priv->sec_mtx);
 
        get_random_bytes(&priv->bsn, 1);
        get_random_bytes(&priv->dsn, 1);
@@ -331,6 +408,8 @@ void mac802154_wpan_setup(struct net_device *dev)
 
        priv->pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST);
        priv->short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
+
+       mac802154_llsec_init(&priv->sec);
 }
 
 static int mac802154_process_data(struct net_device *dev, struct sk_buff *skb)
@@ -339,9 +418,11 @@ static int mac802154_process_data(struct net_device *dev, struct sk_buff *skb)
 }
 
 static int
-mac802154_subif_frame(struct mac802154_sub_if_data *sdata, struct sk_buff *skb)
+mac802154_subif_frame(struct mac802154_sub_if_data *sdata, struct sk_buff *skb,
+                     const struct ieee802154_hdr *hdr)
 {
        __le16 span, sshort;
+       int rc;
 
        pr_debug("getting packet via slave interface %s\n", sdata->dev->name);
 
@@ -388,15 +469,21 @@ mac802154_subif_frame(struct mac802154_sub_if_data *sdata, struct sk_buff *skb)
 
        skb->dev = sdata->dev;
 
+       rc = mac802154_llsec_decrypt(&sdata->sec, skb);
+       if (rc) {
+               pr_debug("decryption failed: %i\n", rc);
+               return NET_RX_DROP;
+       }
+
        sdata->dev->stats.rx_packets++;
        sdata->dev->stats.rx_bytes += skb->len;
 
-       switch (mac_cb_type(skb)) {
+       switch (mac_cb(skb)->type) {
        case IEEE802154_FC_TYPE_DATA:
                return mac802154_process_data(sdata->dev, skb);
        default:
                pr_warn("ieee802154: bad frame received (type = %d)\n",
-                       mac_cb_type(skb));
+                       mac_cb(skb)->type);
                kfree_skb(skb);
                return NET_RX_DROP;
        }
@@ -419,62 +506,58 @@ static void mac802154_print_addr(const char *name,
        }
 }
 
-static int mac802154_parse_frame_start(struct sk_buff *skb)
+static int mac802154_parse_frame_start(struct sk_buff *skb,
+                                      struct ieee802154_hdr *hdr)
 {
        int hlen;
-       struct ieee802154_hdr hdr;
+       struct ieee802154_mac_cb *cb = mac_cb_init(skb);
 
-       hlen = ieee802154_hdr_pull(skb, &hdr);
+       hlen = ieee802154_hdr_pull(skb, hdr);
        if (hlen < 0)
                return -EINVAL;
 
        skb->mac_len = hlen;
 
-       pr_debug("fc: %04x dsn: %02x\n", le16_to_cpup((__le16 *)&hdr.fc),
-                hdr.seq);
-
-       mac_cb(skb)->flags = hdr.fc.type;
+       pr_debug("fc: %04x dsn: %02x\n", le16_to_cpup((__le16 *)&hdr->fc),
+                hdr->seq);
 
-       if (hdr.fc.ack_request)
-               mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ;
-       if (hdr.fc.security_enabled)
-               mac_cb(skb)->flags |= MAC_CB_FLAG_SECEN;
+       cb->type = hdr->fc.type;
+       cb->ackreq = hdr->fc.ack_request;
+       cb->secen = hdr->fc.security_enabled;
 
-       mac802154_print_addr("destination", &hdr.dest);
-       mac802154_print_addr("source", &hdr.source);
+       mac802154_print_addr("destination", &hdr->dest);
+       mac802154_print_addr("source", &hdr->source);
 
-       mac_cb(skb)->source = hdr.source;
-       mac_cb(skb)->dest = hdr.dest;
+       cb->source = hdr->source;
+       cb->dest = hdr->dest;
 
-       if (hdr.fc.security_enabled) {
+       if (hdr->fc.security_enabled) {
                u64 key;
 
-               pr_debug("seclevel %i\n", hdr.sec.level);
+               pr_debug("seclevel %i\n", hdr->sec.level);
 
-               switch (hdr.sec.key_id_mode) {
+               switch (hdr->sec.key_id_mode) {
                case IEEE802154_SCF_KEY_IMPLICIT:
                        pr_debug("implicit key\n");
                        break;
 
                case IEEE802154_SCF_KEY_INDEX:
-                       pr_debug("key %02x\n", hdr.sec.key_id);
+                       pr_debug("key %02x\n", hdr->sec.key_id);
                        break;
 
                case IEEE802154_SCF_KEY_SHORT_INDEX:
                        pr_debug("key %04x:%04x %02x\n",
-                                le32_to_cpu(hdr.sec.short_src) >> 16,
-                                le32_to_cpu(hdr.sec.short_src) & 0xffff,
-                                hdr.sec.key_id);
+                                le32_to_cpu(hdr->sec.short_src) >> 16,
+                                le32_to_cpu(hdr->sec.short_src) & 0xffff,
+                                hdr->sec.key_id);
                        break;
 
                case IEEE802154_SCF_KEY_HW_INDEX:
-                       key = swab64((__force u64) hdr.sec.extended_src);
+                       key = swab64((__force u64) hdr->sec.extended_src);
                        pr_debug("key source %8phC %02x\n", &key,
-                                hdr.sec.key_id);
+                                hdr->sec.key_id);
                        break;
                }
-
-               return -EINVAL;
        }
 
        return 0;
@@ -485,8 +568,9 @@ void mac802154_wpans_rx(struct mac802154_priv *priv, struct sk_buff *skb)
        int ret;
        struct sk_buff *sskb;
        struct mac802154_sub_if_data *sdata;
+       struct ieee802154_hdr hdr;
 
-       ret = mac802154_parse_frame_start(skb);
+       ret = mac802154_parse_frame_start(skb, &hdr);
        if (ret) {
                pr_debug("got invalid frame\n");
                return;
@@ -499,7 +583,7 @@ void mac802154_wpans_rx(struct mac802154_priv *priv, struct sk_buff *skb)
 
                sskb = skb_clone(skb, GFP_ATOMIC);
                if (sskb)
-                       mac802154_subif_frame(sdata, sskb);
+                       mac802154_subif_frame(sdata, sskb, &hdr);
        }
        rcu_read_unlock();
 }
index c47444e4cf8ccc9977fa0622689b4fb55799ff4b..487b55e04337b3f83b76eb7faa0b5d4e909d41c5 100644 (file)
@@ -562,7 +562,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
        ip_send_check(iph);
 
        /* Another hack: avoid icmp_send in ip_fragment */
-       skb->local_df = 1;
+       skb->ignore_df = 1;
 
        ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0);
        rcu_read_unlock();
@@ -590,7 +590,7 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
                goto tx_error;
 
        /* Another hack: avoid icmp_send in ip_fragment */
-       skb->local_df = 1;
+       skb->ignore_df = 1;
 
        ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0);
        rcu_read_unlock();
@@ -684,7 +684,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
           MTU problem. */
 
        /* Another hack: avoid icmp_send in ip_fragment */
-       skb->local_df = 1;
+       skb->ignore_df = 1;
 
        rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local);
        rcu_read_unlock();
@@ -774,7 +774,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
           MTU problem. */
 
        /* Another hack: avoid icmp_send in ip_fragment */
-       skb->local_df = 1;
+       skb->ignore_df = 1;
 
        rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local);
        rcu_read_unlock();
@@ -886,7 +886,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
        ip_select_ident(skb, &rt->dst, NULL);
 
        /* Another hack: avoid icmp_send in ip_fragment */
-       skb->local_df = 1;
+       skb->ignore_df = 1;
 
        ret = ip_vs_tunnel_xmit_prepare(skb, cp);
        if (ret == NF_ACCEPT)
@@ -974,7 +974,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
        iph->hop_limit          =       old_iph->hop_limit;
 
        /* Another hack: avoid icmp_send in ip_fragment */
-       skb->local_df = 1;
+       skb->ignore_df = 1;
 
        ret = ip_vs_tunnel_xmit_prepare(skb, cp);
        if (ret == NF_ACCEPT)
@@ -1023,7 +1023,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
        ip_send_check(ip_hdr(skb));
 
        /* Another hack: avoid icmp_send in ip_fragment */
-       skb->local_df = 1;
+       skb->ignore_df = 1;
 
        ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0);
        rcu_read_unlock();
@@ -1060,7 +1060,7 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
        }
 
        /* Another hack: avoid icmp_send in ip_fragment */
-       skb->local_df = 1;
+       skb->ignore_df = 1;
 
        ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0);
        rcu_read_unlock();
@@ -1157,7 +1157,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
        ip_vs_nat_icmp(skb, pp, cp, 0);
 
        /* Another hack: avoid icmp_send in ip_fragment */
-       skb->local_df = 1;
+       skb->ignore_df = 1;
 
        rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local);
        rcu_read_unlock();
@@ -1249,7 +1249,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
        ip_vs_nat_icmp_v6(skb, pp, cp, 0);
 
        /* Another hack: avoid icmp_send in ip_fragment */
-       skb->local_df = 1;
+       skb->ignore_df = 1;
 
        rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local);
        rcu_read_unlock();
index ccc46fa5edbce5e52710a22ae502e49a0f59e0a5..58579634427d2fcbf7f35556424a959697b8655e 100644 (file)
@@ -1336,6 +1336,9 @@ ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[])
 #ifdef CONFIG_NF_NAT_NEEDED
        int ret;
 
+       if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
+               return 0;
+
        ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST,
                                        cda[CTA_NAT_DST]);
        if (ret < 0)
index 3fd159db9f06bc31d3da81d8cb3e5e8f2bb3ebc2..047884776586db680a9b9547ceb94e9a2035bf9c 100644 (file)
@@ -88,6 +88,45 @@ nf_tables_afinfo_lookup(struct net *net, int family, bool autoload)
        return ERR_PTR(-EAFNOSUPPORT);
 }
 
+static void nft_ctx_init(struct nft_ctx *ctx,
+                        const struct sk_buff *skb,
+                        const struct nlmsghdr *nlh,
+                        struct nft_af_info *afi,
+                        struct nft_table *table,
+                        struct nft_chain *chain,
+                        const struct nlattr * const *nla)
+{
+       ctx->net        = sock_net(skb->sk);
+       ctx->afi        = afi;
+       ctx->table      = table;
+       ctx->chain      = chain;
+       ctx->nla        = nla;
+       ctx->portid     = NETLINK_CB(skb).portid;
+       ctx->report     = nlmsg_report(nlh);
+       ctx->seq        = nlh->nlmsg_seq;
+}
+
+static struct nft_trans *nft_trans_alloc(struct nft_ctx *ctx, int msg_type,
+                                        u32 size)
+{
+       struct nft_trans *trans;
+
+       trans = kzalloc(sizeof(struct nft_trans) + size, GFP_KERNEL);
+       if (trans == NULL)
+               return NULL;
+
+       trans->msg_type = msg_type;
+       trans->ctx      = *ctx;
+
+       return trans;
+}
+
+static void nft_trans_destroy(struct nft_trans *trans)
+{
+       list_del(&trans->list);
+       kfree(trans);
+}
+
 /*
  * Tables
  */
@@ -197,20 +236,13 @@ nla_put_failure:
        return -1;
 }
 
-static int nf_tables_table_notify(const struct sk_buff *oskb,
-                                 const struct nlmsghdr *nlh,
-                                 const struct nft_table *table,
-                                 int event, int family)
+static int nf_tables_table_notify(const struct nft_ctx *ctx, int event)
 {
        struct sk_buff *skb;
-       u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
-       u32 seq = nlh ? nlh->nlmsg_seq : 0;
-       struct net *net = oskb ? sock_net(oskb->sk) : &init_net;
-       bool report;
        int err;
 
-       report = nlh ? nlmsg_report(nlh) : false;
-       if (!report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
+       if (!ctx->report &&
+           !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
                return 0;
 
        err = -ENOBUFS;
@@ -218,18 +250,20 @@ static int nf_tables_table_notify(const struct sk_buff *oskb,
        if (skb == NULL)
                goto err;
 
-       err = nf_tables_fill_table_info(skb, portid, seq, event, 0,
-                                       family, table);
+       err = nf_tables_fill_table_info(skb, ctx->portid, ctx->seq, event, 0,
+                                       ctx->afi->family, ctx->table);
        if (err < 0) {
                kfree_skb(skb);
                goto err;
        }
 
-       err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report,
-                            GFP_KERNEL);
+       err = nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
+                            ctx->report, GFP_KERNEL);
 err:
-       if (err < 0)
-               nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err);
+       if (err < 0) {
+               nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES,
+                                 err);
+       }
        return err;
 }
 
@@ -269,6 +303,9 @@ done:
        return skb->len;
 }
 
+/* Internal table flags */
+#define NFT_TABLE_INACTIVE     (1 << 15)
+
 static int nf_tables_gettable(struct sock *nlsk, struct sk_buff *skb,
                              const struct nlmsghdr *nlh,
                              const struct nlattr * const nla[])
@@ -295,6 +332,8 @@ static int nf_tables_gettable(struct sock *nlsk, struct sk_buff *skb,
        table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME]);
        if (IS_ERR(table))
                return PTR_ERR(table);
+       if (table->flags & NFT_TABLE_INACTIVE)
+               return -ENOENT;
 
        skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
        if (!skb2)
@@ -343,7 +382,7 @@ err:
        return err;
 }
 
-static int nf_tables_table_disable(const struct nft_af_info *afi,
+static void nf_tables_table_disable(const struct nft_af_info *afi,
                                   struct nft_table *table)
 {
        struct nft_chain *chain;
@@ -353,45 +392,63 @@ static int nf_tables_table_disable(const struct nft_af_info *afi,
                        nf_unregister_hooks(nft_base_chain(chain)->ops,
                                            afi->nops);
        }
-
-       return 0;
 }
 
-static int nf_tables_updtable(struct sock *nlsk, struct sk_buff *skb,
-                             const struct nlmsghdr *nlh,
-                             const struct nlattr * const nla[],
-                             struct nft_af_info *afi, struct nft_table *table)
+static int nf_tables_updtable(struct nft_ctx *ctx)
 {
-       const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
-       int family = nfmsg->nfgen_family, ret = 0;
+       struct nft_trans *trans;
+       u32 flags;
+       int ret = 0;
 
-       if (nla[NFTA_TABLE_FLAGS]) {
-               u32 flags;
+       if (!ctx->nla[NFTA_TABLE_FLAGS])
+               return 0;
 
-               flags = ntohl(nla_get_be32(nla[NFTA_TABLE_FLAGS]));
-               if (flags & ~NFT_TABLE_F_DORMANT)
-                       return -EINVAL;
+       flags = ntohl(nla_get_be32(ctx->nla[NFTA_TABLE_FLAGS]));
+       if (flags & ~NFT_TABLE_F_DORMANT)
+               return -EINVAL;
+
+       trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE,
+                               sizeof(struct nft_trans_table));
+       if (trans == NULL)
+               return -ENOMEM;
 
-               if ((flags & NFT_TABLE_F_DORMANT) &&
-                   !(table->flags & NFT_TABLE_F_DORMANT)) {
-                       ret = nf_tables_table_disable(afi, table);
-                       if (ret >= 0)
-                               table->flags |= NFT_TABLE_F_DORMANT;
-               } else if (!(flags & NFT_TABLE_F_DORMANT) &&
-                          table->flags & NFT_TABLE_F_DORMANT) {
-                       ret = nf_tables_table_enable(afi, table);
-                       if (ret >= 0)
-                               table->flags &= ~NFT_TABLE_F_DORMANT;
+       if ((flags & NFT_TABLE_F_DORMANT) &&
+           !(ctx->table->flags & NFT_TABLE_F_DORMANT)) {
+               nft_trans_table_enable(trans) = false;
+       } else if (!(flags & NFT_TABLE_F_DORMANT) &&
+                  ctx->table->flags & NFT_TABLE_F_DORMANT) {
+               ret = nf_tables_table_enable(ctx->afi, ctx->table);
+               if (ret >= 0) {
+                       ctx->table->flags &= ~NFT_TABLE_F_DORMANT;
+                       nft_trans_table_enable(trans) = true;
                }
-               if (ret < 0)
-                       goto err;
        }
+       if (ret < 0)
+               goto err;
 
-       nf_tables_table_notify(skb, nlh, table, NFT_MSG_NEWTABLE, family);
+       nft_trans_table_update(trans) = true;
+       list_add_tail(&trans->list, &ctx->net->nft.commit_list);
+       return 0;
 err:
+       nft_trans_destroy(trans);
        return ret;
 }
 
+static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type)
+{
+       struct nft_trans *trans;
+
+       trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_table));
+       if (trans == NULL)
+               return -ENOMEM;
+
+       if (msg_type == NFT_MSG_NEWTABLE)
+               ctx->table->flags |= NFT_TABLE_INACTIVE;
+
+       list_add_tail(&trans->list, &ctx->net->nft.commit_list);
+       return 0;
+}
+
 static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
                              const struct nlmsghdr *nlh,
                              const struct nlattr * const nla[])
@@ -403,6 +460,8 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
        struct net *net = sock_net(skb->sk);
        int family = nfmsg->nfgen_family;
        u32 flags = 0;
+       struct nft_ctx ctx;
+       int err;
 
        afi = nf_tables_afinfo_lookup(net, family, true);
        if (IS_ERR(afi))
@@ -417,11 +476,15 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
        }
 
        if (table != NULL) {
+               if (table->flags & NFT_TABLE_INACTIVE)
+                       return -ENOENT;
                if (nlh->nlmsg_flags & NLM_F_EXCL)
                        return -EEXIST;
                if (nlh->nlmsg_flags & NLM_F_REPLACE)
                        return -EOPNOTSUPP;
-               return nf_tables_updtable(nlsk, skb, nlh, nla, afi, table);
+
+               nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla);
+               return nf_tables_updtable(&ctx);
        }
 
        if (nla[NFTA_TABLE_FLAGS]) {
@@ -444,8 +507,14 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
        INIT_LIST_HEAD(&table->sets);
        table->flags = flags;
 
+       nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla);
+       err = nft_trans_table_add(&ctx, NFT_MSG_NEWTABLE);
+       if (err < 0) {
+               kfree(table);
+               module_put(afi->owner);
+               return err;
+       }
        list_add_tail(&table->list, &afi->tables);
-       nf_tables_table_notify(skb, nlh, table, NFT_MSG_NEWTABLE, family);
        return 0;
 }
 
@@ -457,7 +526,8 @@ static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb,
        struct nft_af_info *afi;
        struct nft_table *table;
        struct net *net = sock_net(skb->sk);
-       int family = nfmsg->nfgen_family;
+       int family = nfmsg->nfgen_family, err;
+       struct nft_ctx ctx;
 
        afi = nf_tables_afinfo_lookup(net, family, false);
        if (IS_ERR(afi))
@@ -466,17 +536,27 @@ static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb,
        table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME]);
        if (IS_ERR(table))
                return PTR_ERR(table);
+       if (table->flags & NFT_TABLE_INACTIVE)
+               return -ENOENT;
 
        if (!list_empty(&table->chains) || !list_empty(&table->sets))
                return -EBUSY;
 
+       nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla);
+       err = nft_trans_table_add(&ctx, NFT_MSG_DELTABLE);
+       if (err < 0)
+               return err;
+
        list_del(&table->list);
-       nf_tables_table_notify(skb, nlh, table, NFT_MSG_DELTABLE, family);
-       kfree(table);
-       module_put(afi->owner);
        return 0;
 }
 
+static void nf_tables_table_destroy(struct nft_ctx *ctx)
+{
+       kfree(ctx->table);
+       module_put(ctx->afi->owner);
+}
+
 int nft_register_chain_type(const struct nf_chain_type *ctype)
 {
        int err = 0;
@@ -541,7 +621,7 @@ static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = {
                                    .len = NFT_CHAIN_MAXNAMELEN - 1 },
        [NFTA_CHAIN_HOOK]       = { .type = NLA_NESTED },
        [NFTA_CHAIN_POLICY]     = { .type = NLA_U32 },
-       [NFTA_CHAIN_TYPE]       = { .type = NLA_NUL_STRING },
+       [NFTA_CHAIN_TYPE]       = { .type = NLA_STRING },
        [NFTA_CHAIN_COUNTERS]   = { .type = NLA_NESTED },
 };
 
@@ -637,21 +717,13 @@ nla_put_failure:
        return -1;
 }
 
-static int nf_tables_chain_notify(const struct sk_buff *oskb,
-                                 const struct nlmsghdr *nlh,
-                                 const struct nft_table *table,
-                                 const struct nft_chain *chain,
-                                 int event, int family)
+static int nf_tables_chain_notify(const struct nft_ctx *ctx, int event)
 {
        struct sk_buff *skb;
-       u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
-       struct net *net = oskb ? sock_net(oskb->sk) : &init_net;
-       u32 seq = nlh ? nlh->nlmsg_seq : 0;
-       bool report;
        int err;
 
-       report = nlh ? nlmsg_report(nlh) : false;
-       if (!report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
+       if (!ctx->report &&
+           !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
                return 0;
 
        err = -ENOBUFS;
@@ -659,18 +731,21 @@ static int nf_tables_chain_notify(const struct sk_buff *oskb,
        if (skb == NULL)
                goto err;
 
-       err = nf_tables_fill_chain_info(skb, portid, seq, event, 0, family,
-                                       table, chain);
+       err = nf_tables_fill_chain_info(skb, ctx->portid, ctx->seq, event, 0,
+                                       ctx->afi->family, ctx->table,
+                                       ctx->chain);
        if (err < 0) {
                kfree_skb(skb);
                goto err;
        }
 
-       err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report,
-                            GFP_KERNEL);
+       err = nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
+                            ctx->report, GFP_KERNEL);
 err:
-       if (err < 0)
-               nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err);
+       if (err < 0) {
+               nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES,
+                                 err);
+       }
        return err;
 }
 
@@ -740,10 +815,14 @@ static int nf_tables_getchain(struct sock *nlsk, struct sk_buff *skb,
        table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]);
        if (IS_ERR(table))
                return PTR_ERR(table);
+       if (table->flags & NFT_TABLE_INACTIVE)
+               return -ENOENT;
 
        chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]);
        if (IS_ERR(chain))
                return PTR_ERR(chain);
+       if (chain->flags & NFT_CHAIN_INACTIVE)
+               return -ENOENT;
 
        skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
        if (!skb2)
@@ -767,8 +846,7 @@ static const struct nla_policy nft_counter_policy[NFTA_COUNTER_MAX + 1] = {
        [NFTA_COUNTER_BYTES]    = { .type = NLA_U64 },
 };
 
-static int
-nf_tables_counters(struct nft_base_chain *chain, const struct nlattr *attr)
+static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
 {
        struct nlattr *tb[NFTA_COUNTER_MAX+1];
        struct nft_stats __percpu *newstats;
@@ -777,14 +855,14 @@ nf_tables_counters(struct nft_base_chain *chain, const struct nlattr *attr)
 
        err = nla_parse_nested(tb, NFTA_COUNTER_MAX, attr, nft_counter_policy);
        if (err < 0)
-               return err;
+               return ERR_PTR(err);
 
        if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS])
-               return -EINVAL;
+               return ERR_PTR(-EINVAL);
 
        newstats = alloc_percpu(struct nft_stats);
        if (newstats == NULL)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
 
        /* Restore old counters on this cpu, no problem. Per-cpu statistics
         * are not exposed to userspace.
@@ -793,6 +871,12 @@ nf_tables_counters(struct nft_base_chain *chain, const struct nlattr *attr)
        stats->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
        stats->pkts = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
 
+       return newstats;
+}
+
+static void nft_chain_stats_replace(struct nft_base_chain *chain,
+                                   struct nft_stats __percpu *newstats)
+{
        if (chain->stats) {
                struct nft_stats __percpu *oldstats =
                                nft_dereference(chain->stats);
@@ -802,17 +886,43 @@ nf_tables_counters(struct nft_base_chain *chain, const struct nlattr *attr)
                free_percpu(oldstats);
        } else
                rcu_assign_pointer(chain->stats, newstats);
+}
+
+static int nft_trans_chain_add(struct nft_ctx *ctx, int msg_type)
+{
+       struct nft_trans *trans;
 
+       trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_chain));
+       if (trans == NULL)
+               return -ENOMEM;
+
+       if (msg_type == NFT_MSG_NEWCHAIN)
+               ctx->chain->flags |= NFT_CHAIN_INACTIVE;
+
+       list_add_tail(&trans->list, &ctx->net->nft.commit_list);
        return 0;
 }
 
+static void nf_tables_chain_destroy(struct nft_chain *chain)
+{
+       BUG_ON(chain->use > 0);
+
+       if (chain->flags & NFT_BASE_CHAIN) {
+               module_put(nft_base_chain(chain)->type->owner);
+               free_percpu(nft_base_chain(chain)->stats);
+               kfree(nft_base_chain(chain));
+       } else {
+               kfree(chain);
+       }
+}
+
 static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
                              const struct nlmsghdr *nlh,
                              const struct nlattr * const nla[])
 {
        const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
        const struct nlattr * uninitialized_var(name);
-       const struct nft_af_info *afi;
+       struct nft_af_info *afi;
        struct nft_table *table;
        struct nft_chain *chain;
        struct nft_base_chain *basechain = NULL;
@@ -822,8 +932,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
        u8 policy = NF_ACCEPT;
        u64 handle = 0;
        unsigned int i;
+       struct nft_stats __percpu *stats;
        int err;
        bool create;
+       struct nft_ctx ctx;
 
        create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false;
 
@@ -869,6 +981,11 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
        }
 
        if (chain != NULL) {
+               struct nft_stats *stats = NULL;
+               struct nft_trans *trans;
+
+               if (chain->flags & NFT_CHAIN_INACTIVE)
+                       return -ENOENT;
                if (nlh->nlmsg_flags & NLM_F_EXCL)
                        return -EEXIST;
                if (nlh->nlmsg_flags & NLM_F_REPLACE)
@@ -882,19 +999,31 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
                        if (!(chain->flags & NFT_BASE_CHAIN))
                                return -EOPNOTSUPP;
 
-                       err = nf_tables_counters(nft_base_chain(chain),
-                                                nla[NFTA_CHAIN_COUNTERS]);
-                       if (err < 0)
-                               return err;
+                       stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]);
+                       if (IS_ERR(stats))
+                               return PTR_ERR(stats);
                }
 
-               if (nla[NFTA_CHAIN_POLICY])
-                       nft_base_chain(chain)->policy = policy;
+               nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
+               trans = nft_trans_alloc(&ctx, NFT_MSG_NEWCHAIN,
+                                       sizeof(struct nft_trans_chain));
+               if (trans == NULL)
+                       return -ENOMEM;
+
+               nft_trans_chain_stats(trans) = stats;
+               nft_trans_chain_update(trans) = true;
 
-               if (nla[NFTA_CHAIN_HANDLE] && name)
-                       nla_strlcpy(chain->name, name, NFT_CHAIN_MAXNAMELEN);
+               if (nla[NFTA_CHAIN_POLICY])
+                       nft_trans_chain_policy(trans) = policy;
+               else
+                       nft_trans_chain_policy(trans) = -1;
 
-               goto notify;
+               if (nla[NFTA_CHAIN_HANDLE] && name) {
+                       nla_strlcpy(nft_trans_chain_name(trans), name,
+                                   NFT_CHAIN_MAXNAMELEN);
+               }
+               list_add_tail(&trans->list, &net->nft.commit_list);
+               return 0;
        }
 
        if (table->use == UINT_MAX)
@@ -939,23 +1068,21 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
                        return -ENOMEM;
 
                if (nla[NFTA_CHAIN_COUNTERS]) {
-                       err = nf_tables_counters(basechain,
-                                                nla[NFTA_CHAIN_COUNTERS]);
-                       if (err < 0) {
+                       stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]);
+                       if (IS_ERR(stats)) {
                                module_put(type->owner);
                                kfree(basechain);
-                               return err;
+                               return PTR_ERR(stats);
                        }
+                       basechain->stats = stats;
                } else {
-                       struct nft_stats __percpu *newstats;
-
-                       newstats = alloc_percpu(struct nft_stats);
-                       if (newstats == NULL) {
+                       stats = alloc_percpu(struct nft_stats);
+                       if (IS_ERR(stats)) {
                                module_put(type->owner);
                                kfree(basechain);
-                               return -ENOMEM;
+                               return PTR_ERR(stats);
                        }
-                       rcu_assign_pointer(basechain->stats, newstats);
+                       rcu_assign_pointer(basechain->stats, stats);
                }
 
                basechain->type = type;
@@ -992,31 +1119,26 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
        if (!(table->flags & NFT_TABLE_F_DORMANT) &&
            chain->flags & NFT_BASE_CHAIN) {
                err = nf_register_hooks(nft_base_chain(chain)->ops, afi->nops);
-               if (err < 0) {
-                       module_put(basechain->type->owner);
-                       free_percpu(basechain->stats);
-                       kfree(basechain);
-                       return err;
-               }
+               if (err < 0)
+                       goto err1;
        }
-       list_add_tail(&chain->list, &table->chains);
-       table->use++;
-notify:
-       nf_tables_chain_notify(skb, nlh, table, chain, NFT_MSG_NEWCHAIN,
-                              family);
-       return 0;
-}
 
-static void nf_tables_chain_destroy(struct nft_chain *chain)
-{
-       BUG_ON(chain->use > 0);
+       nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
+       err = nft_trans_chain_add(&ctx, NFT_MSG_NEWCHAIN);
+       if (err < 0)
+               goto err2;
 
-       if (chain->flags & NFT_BASE_CHAIN) {
-               module_put(nft_base_chain(chain)->type->owner);
-               free_percpu(nft_base_chain(chain)->stats);
-               kfree(nft_base_chain(chain));
-       } else
-               kfree(chain);
+       list_add_tail(&chain->list, &table->chains);
+       return 0;
+err2:
+       if (!(table->flags & NFT_TABLE_F_DORMANT) &&
+           chain->flags & NFT_BASE_CHAIN) {
+               nf_unregister_hooks(nft_base_chain(chain)->ops,
+                                   afi->nops);
+       }
+err1:
+       nf_tables_chain_destroy(chain);
+       return err;
 }
 
 static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
@@ -1024,11 +1146,13 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
                              const struct nlattr * const nla[])
 {
        const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
-       const struct nft_af_info *afi;
+       struct nft_af_info *afi;
        struct nft_table *table;
        struct nft_chain *chain;
        struct net *net = sock_net(skb->sk);
        int family = nfmsg->nfgen_family;
+       struct nft_ctx ctx;
+       int err;
 
        afi = nf_tables_afinfo_lookup(net, family, false);
        if (IS_ERR(afi))
@@ -1037,48 +1161,26 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
        table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]);
        if (IS_ERR(table))
                return PTR_ERR(table);
+       if (table->flags & NFT_TABLE_INACTIVE)
+               return -ENOENT;
 
        chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]);
        if (IS_ERR(chain))
                return PTR_ERR(chain);
-
+       if (chain->flags & NFT_CHAIN_INACTIVE)
+               return -ENOENT;
        if (!list_empty(&chain->rules) || chain->use > 0)
                return -EBUSY;
 
-       list_del(&chain->list);
-       table->use--;
-
-       if (!(table->flags & NFT_TABLE_F_DORMANT) &&
-           chain->flags & NFT_BASE_CHAIN)
-               nf_unregister_hooks(nft_base_chain(chain)->ops, afi->nops);
-
-       nf_tables_chain_notify(skb, nlh, table, chain, NFT_MSG_DELCHAIN,
-                              family);
-
-       /* Make sure all rule references are gone before this is released */
-       synchronize_rcu();
+       nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
+       err = nft_trans_chain_add(&ctx, NFT_MSG_DELCHAIN);
+       if (err < 0)
+               return err;
 
-       nf_tables_chain_destroy(chain);
+       list_del(&chain->list);
        return 0;
 }
 
-static void nft_ctx_init(struct nft_ctx *ctx,
-                        const struct sk_buff *skb,
-                        const struct nlmsghdr *nlh,
-                        const struct nft_af_info *afi,
-                        const struct nft_table *table,
-                        const struct nft_chain *chain,
-                        const struct nlattr * const *nla)
-{
-       ctx->net   = sock_net(skb->sk);
-       ctx->skb   = skb;
-       ctx->nlh   = nlh;
-       ctx->afi   = afi;
-       ctx->table = table;
-       ctx->chain = chain;
-       ctx->nla   = nla;
-}
-
 /*
  * Expressions
  */
@@ -1093,7 +1195,10 @@ static void nft_ctx_init(struct nft_ctx *ctx,
 int nft_register_expr(struct nft_expr_type *type)
 {
        nfnl_lock(NFNL_SUBSYS_NFTABLES);
-       list_add_tail(&type->list, &nf_tables_expressions);
+       if (type->family == NFPROTO_UNSPEC)
+               list_add_tail(&type->list, &nf_tables_expressions);
+       else
+               list_add(&type->list, &nf_tables_expressions);
        nfnl_unlock(NFNL_SUBSYS_NFTABLES);
        return 0;
 }
@@ -1361,22 +1466,15 @@ nla_put_failure:
        return -1;
 }
 
-static int nf_tables_rule_notify(const struct sk_buff *oskb,
-                                const struct nlmsghdr *nlh,
-                                const struct nft_table *table,
-                                const struct nft_chain *chain,
+static int nf_tables_rule_notify(const struct nft_ctx *ctx,
                                 const struct nft_rule *rule,
-                                int event, u32 flags, int family)
+                                int event)
 {
        struct sk_buff *skb;
-       u32 portid = NETLINK_CB(oskb).portid;
-       struct net *net = oskb ? sock_net(oskb->sk) : &init_net;
-       u32 seq = nlh->nlmsg_seq;
-       bool report;
        int err;
 
-       report = nlmsg_report(nlh);
-       if (!report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
+       if (!ctx->report &&
+           !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
                return 0;
 
        err = -ENOBUFS;
@@ -1384,18 +1482,21 @@ static int nf_tables_rule_notify(const struct sk_buff *oskb,
        if (skb == NULL)
                goto err;
 
-       err = nf_tables_fill_rule_info(skb, portid, seq, event, flags,
-                                      family, table, chain, rule);
+       err = nf_tables_fill_rule_info(skb, ctx->portid, ctx->seq, event, 0,
+                                      ctx->afi->family, ctx->table,
+                                      ctx->chain, rule);
        if (err < 0) {
                kfree_skb(skb);
                goto err;
        }
 
-       err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report,
-                            GFP_KERNEL);
+       err = nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
+                            ctx->report, GFP_KERNEL);
 err:
-       if (err < 0)
-               nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err);
+       if (err < 0) {
+               nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES,
+                                 err);
+       }
        return err;
 }
 
@@ -1511,10 +1612,14 @@ static int nf_tables_getrule(struct sock *nlsk, struct sk_buff *skb,
        table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]);
        if (IS_ERR(table))
                return PTR_ERR(table);
+       if (table->flags & NFT_TABLE_INACTIVE)
+               return -ENOENT;
 
        chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]);
        if (IS_ERR(chain))
                return PTR_ERR(chain);
+       if (chain->flags & NFT_CHAIN_INACTIVE)
+               return -ENOENT;
 
        rule = nf_tables_rule_lookup(chain, nla[NFTA_RULE_HANDLE]);
        if (IS_ERR(rule))
@@ -1554,37 +1659,36 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
        kfree(rule);
 }
 
-#define NFT_RULE_MAXEXPRS      128
-
-static struct nft_expr_info *info;
-
-static struct nft_rule_trans *
-nf_tables_trans_add(struct nft_ctx *ctx, struct nft_rule *rule)
+static struct nft_trans *nft_trans_rule_add(struct nft_ctx *ctx, int msg_type,
+                                           struct nft_rule *rule)
 {
-       struct nft_rule_trans *rupd;
+       struct nft_trans *trans;
 
-       rupd = kmalloc(sizeof(struct nft_rule_trans), GFP_KERNEL);
-       if (rupd == NULL)
-              return NULL;
+       trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_rule));
+       if (trans == NULL)
+               return NULL;
 
-       rupd->ctx = *ctx;
-       rupd->rule = rule;
-       list_add_tail(&rupd->list, &ctx->net->nft.commit_list);
+       nft_trans_rule(trans) = rule;
+       list_add_tail(&trans->list, &ctx->net->nft.commit_list);
 
-       return rupd;
+       return trans;
 }
 
+#define NFT_RULE_MAXEXPRS      128
+
+static struct nft_expr_info *info;
+
 static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
                             const struct nlmsghdr *nlh,
                             const struct nlattr * const nla[])
 {
        const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
-       const struct nft_af_info *afi;
+       struct nft_af_info *afi;
        struct net *net = sock_net(skb->sk);
        struct nft_table *table;
        struct nft_chain *chain;
        struct nft_rule *rule, *old_rule = NULL;
-       struct nft_rule_trans *repl = NULL;
+       struct nft_trans *trans = NULL;
        struct nft_expr *expr;
        struct nft_ctx ctx;
        struct nlattr *tmp;
@@ -1682,8 +1786,9 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
 
        if (nlh->nlmsg_flags & NLM_F_REPLACE) {
                if (nft_rule_is_active_next(net, old_rule)) {
-                       repl = nf_tables_trans_add(&ctx, old_rule);
-                       if (repl == NULL) {
+                       trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE,
+                                                  old_rule);
+                       if (trans == NULL) {
                                err = -ENOMEM;
                                goto err2;
                        }
@@ -1705,7 +1810,7 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
                        list_add_rcu(&rule->list, &chain->rules);
        }
 
-       if (nf_tables_trans_add(&ctx, rule) == NULL) {
+       if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
                err = -ENOMEM;
                goto err3;
        }
@@ -1713,11 +1818,10 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
 
 err3:
        list_del_rcu(&rule->list);
-       if (repl) {
-               list_del_rcu(&repl->rule->list);
-               list_del(&repl->list);
-               nft_rule_clear(net, repl->rule);
-               kfree(repl);
+       if (trans) {
+               list_del_rcu(&nft_trans_rule(trans)->list);
+               nft_rule_clear(net, nft_trans_rule(trans));
+               nft_trans_destroy(trans);
        }
 err2:
        nf_tables_rule_destroy(&ctx, rule);
@@ -1734,7 +1838,7 @@ nf_tables_delrule_one(struct nft_ctx *ctx, struct nft_rule *rule)
 {
        /* You cannot delete the same rule twice */
        if (nft_rule_is_active_next(ctx->net, rule)) {
-               if (nf_tables_trans_add(ctx, rule) == NULL)
+               if (nft_trans_rule_add(ctx, NFT_MSG_DELRULE, rule) == NULL)
                        return -ENOMEM;
                nft_rule_disactivate_next(ctx->net, rule);
                return 0;
@@ -1760,9 +1864,9 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
                             const struct nlattr * const nla[])
 {
        const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
-       const struct nft_af_info *afi;
+       struct nft_af_info *afi;
        struct net *net = sock_net(skb->sk);
-       const struct nft_table *table;
+       struct nft_table *table;
        struct nft_chain *chain = NULL;
        struct nft_rule *rule;
        int family = nfmsg->nfgen_family, err = 0;
@@ -1775,6 +1879,8 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
        table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]);
        if (IS_ERR(table))
                return PTR_ERR(table);
+       if (table->flags & NFT_TABLE_INACTIVE)
+               return -ENOENT;
 
        if (nla[NFTA_RULE_CHAIN]) {
                chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]);
@@ -1807,88 +1913,6 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
        return err;
 }
 
-static int nf_tables_commit(struct sk_buff *skb)
-{
-       struct net *net = sock_net(skb->sk);
-       struct nft_rule_trans *rupd, *tmp;
-
-       /* Bump generation counter, invalidate any dump in progress */
-       net->nft.genctr++;
-
-       /* A new generation has just started */
-       net->nft.gencursor = gencursor_next(net);
-
-       /* Make sure all packets have left the previous generation before
-        * purging old rules.
-        */
-       synchronize_rcu();
-
-       list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
-               /* This rule was inactive in the past and just became active.
-                * Clear the next bit of the genmask since its meaning has
-                * changed, now it is the future.
-                */
-               if (nft_rule_is_active(net, rupd->rule)) {
-                       nft_rule_clear(net, rupd->rule);
-                       nf_tables_rule_notify(skb, rupd->ctx.nlh,
-                                             rupd->ctx.table, rupd->ctx.chain,
-                                             rupd->rule, NFT_MSG_NEWRULE, 0,
-                                             rupd->ctx.afi->family);
-                       list_del(&rupd->list);
-                       kfree(rupd);
-                       continue;
-               }
-
-               /* This rule is in the past, get rid of it */
-               list_del_rcu(&rupd->rule->list);
-               nf_tables_rule_notify(skb, rupd->ctx.nlh,
-                                     rupd->ctx.table, rupd->ctx.chain,
-                                     rupd->rule, NFT_MSG_DELRULE, 0,
-                                     rupd->ctx.afi->family);
-       }
-
-       /* Make sure we don't see any packet traversing old rules */
-       synchronize_rcu();
-
-       /* Now we can safely release unused old rules */
-       list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
-               nf_tables_rule_destroy(&rupd->ctx, rupd->rule);
-               list_del(&rupd->list);
-               kfree(rupd);
-       }
-
-       return 0;
-}
-
-static int nf_tables_abort(struct sk_buff *skb)
-{
-       struct net *net = sock_net(skb->sk);
-       struct nft_rule_trans *rupd, *tmp;
-
-       list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
-               if (!nft_rule_is_active_next(net, rupd->rule)) {
-                       nft_rule_clear(net, rupd->rule);
-                       list_del(&rupd->list);
-                       kfree(rupd);
-                       continue;
-               }
-
-               /* This rule is inactive, get rid of it */
-               list_del_rcu(&rupd->rule->list);
-       }
-
-       /* Make sure we don't see any packet accessing aborted rules */
-       synchronize_rcu();
-
-       list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
-               nf_tables_rule_destroy(&rupd->ctx, rupd->rule);
-               list_del(&rupd->list);
-               kfree(rupd);
-       }
-
-       return 0;
-}
-
 /*
  * Sets
  */
@@ -1912,9 +1936,18 @@ void nft_unregister_set(struct nft_set_ops *ops)
 }
 EXPORT_SYMBOL_GPL(nft_unregister_set);
 
-static const struct nft_set_ops *nft_select_set_ops(const struct nlattr * const nla[])
+/*
+ * Select a set implementation based on the data characteristics and the
+ * given policy. The total memory use might not be known if no size is
+ * given, in that case the amount of memory per element is used.
+ */
+static const struct nft_set_ops *
+nft_select_set_ops(const struct nlattr * const nla[],
+                  const struct nft_set_desc *desc,
+                  enum nft_set_policies policy)
 {
-       const struct nft_set_ops *ops;
+       const struct nft_set_ops *ops, *bops;
+       struct nft_set_estimate est, best;
        u32 features;
 
 #ifdef CONFIG_MODULES
@@ -1932,15 +1965,45 @@ static const struct nft_set_ops *nft_select_set_ops(const struct nlattr * const
                features &= NFT_SET_INTERVAL | NFT_SET_MAP;
        }
 
-       // FIXME: implement selection properly
+       bops       = NULL;
+       best.size  = ~0;
+       best.class = ~0;
+
        list_for_each_entry(ops, &nf_tables_set_ops, list) {
                if ((ops->features & features) != features)
                        continue;
+               if (!ops->estimate(desc, features, &est))
+                       continue;
+
+               switch (policy) {
+               case NFT_SET_POL_PERFORMANCE:
+                       if (est.class < best.class)
+                               break;
+                       if (est.class == best.class && est.size < best.size)
+                               break;
+                       continue;
+               case NFT_SET_POL_MEMORY:
+                       if (est.size < best.size)
+                               break;
+                       if (est.size == best.size && est.class < best.class)
+                               break;
+                       continue;
+               default:
+                       break;
+               }
+
                if (!try_module_get(ops->owner))
                        continue;
-               return ops;
+               if (bops != NULL)
+                       module_put(bops->owner);
+
+               bops = ops;
+               best = est;
        }
 
+       if (bops != NULL)
+               return bops;
+
        return ERR_PTR(-EOPNOTSUPP);
 }
 
@@ -1953,6 +2016,13 @@ static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = {
        [NFTA_SET_KEY_LEN]              = { .type = NLA_U32 },
        [NFTA_SET_DATA_TYPE]            = { .type = NLA_U32 },
        [NFTA_SET_DATA_LEN]             = { .type = NLA_U32 },
+       [NFTA_SET_POLICY]               = { .type = NLA_U32 },
+       [NFTA_SET_DESC]                 = { .type = NLA_NESTED },
+       [NFTA_SET_ID]                   = { .type = NLA_U32 },
+};
+
+static const struct nla_policy nft_set_desc_policy[NFTA_SET_DESC_MAX + 1] = {
+       [NFTA_SET_DESC_SIZE]            = { .type = NLA_U32 },
 };
 
 static int nft_ctx_init_from_setattr(struct nft_ctx *ctx,
@@ -1962,8 +2032,8 @@ static int nft_ctx_init_from_setattr(struct nft_ctx *ctx,
 {
        struct net *net = sock_net(skb->sk);
        const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
-       const struct nft_af_info *afi = NULL;
-       const struct nft_table *table = NULL;
+       struct nft_af_info *afi = NULL;
+       struct nft_table *table = NULL;
 
        if (nfmsg->nfgen_family != NFPROTO_UNSPEC) {
                afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false);
@@ -1978,6 +2048,8 @@ static int nft_ctx_init_from_setattr(struct nft_ctx *ctx,
                table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]);
                if (IS_ERR(table))
                        return PTR_ERR(table);
+               if (table->flags & NFT_TABLE_INACTIVE)
+                       return -ENOENT;
        }
 
        nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla);
@@ -1999,13 +2071,27 @@ struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
        return ERR_PTR(-ENOENT);
 }
 
+struct nft_set *nf_tables_set_lookup_byid(const struct net *net,
+                                         const struct nlattr *nla)
+{
+       struct nft_trans *trans;
+       u32 id = ntohl(nla_get_be32(nla));
+
+       list_for_each_entry(trans, &net->nft.commit_list, list) {
+               if (trans->msg_type == NFT_MSG_NEWSET &&
+                   id == nft_trans_set_id(trans))
+                       return nft_trans_set(trans);
+       }
+       return ERR_PTR(-ENOENT);
+}
+
 static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
                                    const char *name)
 {
        const struct nft_set *i;
        const char *p;
        unsigned long *inuse;
-       unsigned int n = 0;
+       unsigned int n = 0, min = 0;
 
        p = strnchr(name, IFNAMSIZ, '%');
        if (p != NULL) {
@@ -2015,23 +2101,28 @@ static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
                inuse = (unsigned long *)get_zeroed_page(GFP_KERNEL);
                if (inuse == NULL)
                        return -ENOMEM;
-
+cont:
                list_for_each_entry(i, &ctx->table->sets, list) {
                        int tmp;
 
                        if (!sscanf(i->name, name, &tmp))
                                continue;
-                       if (tmp < 0 || tmp >= BITS_PER_BYTE * PAGE_SIZE)
+                       if (tmp < min || tmp >= min + BITS_PER_BYTE * PAGE_SIZE)
                                continue;
 
-                       set_bit(tmp, inuse);
+                       set_bit(tmp - min, inuse);
                }
 
                n = find_first_zero_bit(inuse, BITS_PER_BYTE * PAGE_SIZE);
+               if (n >= BITS_PER_BYTE * PAGE_SIZE) {
+                       min += BITS_PER_BYTE * PAGE_SIZE;
+                       memset(inuse, 0, PAGE_SIZE);
+                       goto cont;
+               }
                free_page((unsigned long)inuse);
        }
 
-       snprintf(set->name, sizeof(set->name), name, n);
+       snprintf(set->name, sizeof(set->name), name, min + n);
        list_for_each_entry(i, &ctx->table->sets, list) {
                if (!strcmp(set->name, i->name))
                        return -ENFILE;
@@ -2044,8 +2135,9 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
 {
        struct nfgenmsg *nfmsg;
        struct nlmsghdr *nlh;
-       u32 portid = NETLINK_CB(ctx->skb).portid;
-       u32 seq = ctx->nlh->nlmsg_seq;
+       struct nlattr *desc;
+       u32 portid = ctx->portid;
+       u32 seq = ctx->seq;
 
        event |= NFNL_SUBSYS_NFTABLES << 8;
        nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg),
@@ -2077,6 +2169,14 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
                        goto nla_put_failure;
        }
 
+       desc = nla_nest_start(skb, NFTA_SET_DESC);
+       if (desc == NULL)
+               goto nla_put_failure;
+       if (set->size &&
+           nla_put_be32(skb, NFTA_SET_DESC_SIZE, htonl(set->size)))
+               goto nla_put_failure;
+       nla_nest_end(skb, desc);
+
        return nlmsg_end(skb, nlh);
 
 nla_put_failure:
@@ -2089,12 +2189,11 @@ static int nf_tables_set_notify(const struct nft_ctx *ctx,
                                int event)
 {
        struct sk_buff *skb;
-       u32 portid = NETLINK_CB(ctx->skb).portid;
-       bool report;
+       u32 portid = ctx->portid;
        int err;
 
-       report = nlmsg_report(ctx->nlh);
-       if (!report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
+       if (!ctx->report &&
+           !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
                return 0;
 
        err = -ENOBUFS;
@@ -2108,8 +2207,8 @@ static int nf_tables_set_notify(const struct nft_ctx *ctx,
                goto err;
        }
 
-       err = nfnetlink_send(skb, ctx->net, portid, NFNLGRP_NFTABLES, report,
-                            GFP_KERNEL);
+       err = nfnetlink_send(skb, ctx->net, portid, NFNLGRP_NFTABLES,
+                            ctx->report, GFP_KERNEL);
 err:
        if (err < 0)
                nfnetlink_set_err(ctx->net, portid, NFNLGRP_NFTABLES, err);
@@ -2183,7 +2282,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
 {
        const struct nft_set *set;
        unsigned int idx, s_idx = cb->args[0];
-       const struct nft_af_info *afi;
+       struct nft_af_info *afi;
        struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2];
        struct net *net = sock_net(skb->sk);
        int cur_family = cb->args[3];
@@ -2260,6 +2359,8 @@ static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
        return ret;
 }
 
+#define NFT_SET_INACTIVE       (1 << 15)       /* Internal set flag */
+
 static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb,
                            const struct nlmsghdr *nlh,
                            const struct nlattr * const nla[])
@@ -2289,6 +2390,8 @@ static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb,
        set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
        if (IS_ERR(set))
                return PTR_ERR(set);
+       if (set->flags & NFT_SET_INACTIVE)
+               return -ENOENT;
 
        skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
        if (skb2 == NULL)
@@ -2305,13 +2408,50 @@ err:
        return err;
 }
 
+static int nf_tables_set_desc_parse(const struct nft_ctx *ctx,
+                                   struct nft_set_desc *desc,
+                                   const struct nlattr *nla)
+{
+       struct nlattr *da[NFTA_SET_DESC_MAX + 1];
+       int err;
+
+       err = nla_parse_nested(da, NFTA_SET_DESC_MAX, nla, nft_set_desc_policy);
+       if (err < 0)
+               return err;
+
+       if (da[NFTA_SET_DESC_SIZE] != NULL)
+               desc->size = ntohl(nla_get_be32(da[NFTA_SET_DESC_SIZE]));
+
+       return 0;
+}
+
+static int nft_trans_set_add(struct nft_ctx *ctx, int msg_type,
+                            struct nft_set *set)
+{
+       struct nft_trans *trans;
+
+       trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_set));
+       if (trans == NULL)
+               return -ENOMEM;
+
+       if (msg_type == NFT_MSG_NEWSET && ctx->nla[NFTA_SET_ID] != NULL) {
+               nft_trans_set_id(trans) =
+                       ntohl(nla_get_be32(ctx->nla[NFTA_SET_ID]));
+               set->flags |= NFT_SET_INACTIVE;
+       }
+       nft_trans_set(trans) = set;
+       list_add_tail(&trans->list, &ctx->net->nft.commit_list);
+
+       return 0;
+}
+
 static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
                            const struct nlmsghdr *nlh,
                            const struct nlattr * const nla[])
 {
        const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
        const struct nft_set_ops *ops;
-       const struct nft_af_info *afi;
+       struct nft_af_info *afi;
        struct net *net = sock_net(skb->sk);
        struct nft_table *table;
        struct nft_set *set;
@@ -2319,14 +2459,18 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
        char name[IFNAMSIZ];
        unsigned int size;
        bool create;
-       u32 ktype, klen, dlen, dtype, flags;
+       u32 ktype, dtype, flags, policy;
+       struct nft_set_desc desc;
        int err;
 
        if (nla[NFTA_SET_TABLE] == NULL ||
            nla[NFTA_SET_NAME] == NULL ||
-           nla[NFTA_SET_KEY_LEN] == NULL)
+           nla[NFTA_SET_KEY_LEN] == NULL ||
+           nla[NFTA_SET_ID] == NULL)
                return -EINVAL;
 
+       memset(&desc, 0, sizeof(desc));
+
        ktype = NFT_DATA_VALUE;
        if (nla[NFTA_SET_KEY_TYPE] != NULL) {
                ktype = ntohl(nla_get_be32(nla[NFTA_SET_KEY_TYPE]));
@@ -2334,8 +2478,8 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
                        return -EINVAL;
        }
 
-       klen = ntohl(nla_get_be32(nla[NFTA_SET_KEY_LEN]));
-       if (klen == 0 || klen > FIELD_SIZEOF(struct nft_data, data))
+       desc.klen = ntohl(nla_get_be32(nla[NFTA_SET_KEY_LEN]));
+       if (desc.klen == 0 || desc.klen > FIELD_SIZEOF(struct nft_data, data))
                return -EINVAL;
 
        flags = 0;
@@ -2347,7 +2491,6 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
        }
 
        dtype = 0;
-       dlen  = 0;
        if (nla[NFTA_SET_DATA_TYPE] != NULL) {
                if (!(flags & NFT_SET_MAP))
                        return -EINVAL;
@@ -2360,15 +2503,25 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
                if (dtype != NFT_DATA_VERDICT) {
                        if (nla[NFTA_SET_DATA_LEN] == NULL)
                                return -EINVAL;
-                       dlen = ntohl(nla_get_be32(nla[NFTA_SET_DATA_LEN]));
-                       if (dlen == 0 ||
-                           dlen > FIELD_SIZEOF(struct nft_data, data))
+                       desc.dlen = ntohl(nla_get_be32(nla[NFTA_SET_DATA_LEN]));
+                       if (desc.dlen == 0 ||
+                           desc.dlen > FIELD_SIZEOF(struct nft_data, data))
                                return -EINVAL;
                } else
-                       dlen = sizeof(struct nft_data);
+                       desc.dlen = sizeof(struct nft_data);
        } else if (flags & NFT_SET_MAP)
                return -EINVAL;
 
+       policy = NFT_SET_POL_PERFORMANCE;
+       if (nla[NFTA_SET_POLICY] != NULL)
+               policy = ntohl(nla_get_be32(nla[NFTA_SET_POLICY]));
+
+       if (nla[NFTA_SET_DESC] != NULL) {
+               err = nf_tables_set_desc_parse(&ctx, &desc, nla[NFTA_SET_DESC]);
+               if (err < 0)
+                       return err;
+       }
+
        create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false;
 
        afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, create);
@@ -2399,7 +2552,7 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
        if (!(nlh->nlmsg_flags & NLM_F_CREATE))
                return -ENOENT;
 
-       ops = nft_select_set_ops(nla);
+       ops = nft_select_set_ops(nla, &desc, policy);
        if (IS_ERR(ops))
                return PTR_ERR(ops);
 
@@ -2420,17 +2573,21 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
        INIT_LIST_HEAD(&set->bindings);
        set->ops   = ops;
        set->ktype = ktype;
-       set->klen  = klen;
+       set->klen  = desc.klen;
        set->dtype = dtype;
-       set->dlen  = dlen;
+       set->dlen  = desc.dlen;
        set->flags = flags;
+       set->size  = desc.size;
 
-       err = ops->init(set, nla);
+       err = ops->init(set, &desc, nla);
+       if (err < 0)
+               goto err2;
+
+       err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
        if (err < 0)
                goto err2;
 
        list_add_tail(&set->list, &table->sets);
-       nf_tables_set_notify(&ctx, set, NFT_MSG_NEWSET);
        return 0;
 
 err2:
@@ -2440,16 +2597,20 @@ err1:
        return err;
 }
 
-static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
+static void nft_set_destroy(struct nft_set *set)
 {
-       list_del(&set->list);
-       nf_tables_set_notify(ctx, set, NFT_MSG_DELSET);
-
        set->ops->destroy(set);
        module_put(set->ops->owner);
        kfree(set);
 }
 
+static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
+{
+       list_del(&set->list);
+       nf_tables_set_notify(ctx, set, NFT_MSG_DELSET);
+       nft_set_destroy(set);
+}
+
 static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
                            const struct nlmsghdr *nlh,
                            const struct nlattr * const nla[])
@@ -2471,10 +2632,16 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
        set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
        if (IS_ERR(set))
                return PTR_ERR(set);
+       if (set->flags & NFT_SET_INACTIVE)
+               return -ENOENT;
        if (!list_empty(&set->bindings))
                return -EBUSY;
 
-       nf_tables_set_destroy(&ctx, set);
+       err = nft_trans_set_add(&ctx, NFT_MSG_DELSET, set);
+       if (err < 0)
+               return err;
+
+       list_del(&set->list);
        return 0;
 }
 
@@ -2534,7 +2701,8 @@ void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
 {
        list_del(&binding->list);
 
-       if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS)
+       if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS &&
+           !(set->flags & NFT_SET_INACTIVE))
                nf_tables_set_destroy(ctx, set);
 }
 
@@ -2552,16 +2720,18 @@ static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX +
        [NFTA_SET_ELEM_LIST_TABLE]      = { .type = NLA_STRING },
        [NFTA_SET_ELEM_LIST_SET]        = { .type = NLA_STRING },
        [NFTA_SET_ELEM_LIST_ELEMENTS]   = { .type = NLA_NESTED },
+       [NFTA_SET_ELEM_LIST_SET_ID]     = { .type = NLA_U32 },
 };
 
 static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx,
                                      const struct sk_buff *skb,
                                      const struct nlmsghdr *nlh,
-                                     const struct nlattr * const nla[])
+                                     const struct nlattr * const nla[],
+                                     bool trans)
 {
        const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
-       const struct nft_af_info *afi;
-       const struct nft_table *table;
+       struct nft_af_info *afi;
+       struct nft_table *table;
        struct net *net = sock_net(skb->sk);
 
        afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false);
@@ -2571,6 +2741,8 @@ static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx,
        table = nf_tables_table_lookup(afi, nla[NFTA_SET_ELEM_LIST_TABLE]);
        if (IS_ERR(table))
                return PTR_ERR(table);
+       if (!trans && (table->flags & NFT_TABLE_INACTIVE))
+               return -ENOENT;
 
        nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla);
        return 0;
@@ -2644,13 +2816,16 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
        if (err < 0)
                return err;
 
-       err = nft_ctx_init_from_elemattr(&ctx, cb->skb, cb->nlh, (void *)nla);
+       err = nft_ctx_init_from_elemattr(&ctx, cb->skb, cb->nlh, (void *)nla,
+                                        false);
        if (err < 0)
                return err;
 
        set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]);
        if (IS_ERR(set))
                return PTR_ERR(set);
+       if (set->flags & NFT_SET_INACTIVE)
+               return -ENOENT;
 
        event  = NFT_MSG_NEWSETELEM;
        event |= NFNL_SUBSYS_NFTABLES << 8;
@@ -2707,13 +2882,15 @@ static int nf_tables_getsetelem(struct sock *nlsk, struct sk_buff *skb,
        struct nft_ctx ctx;
        int err;
 
-       err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla);
+       err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, false);
        if (err < 0)
                return err;
 
        set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]);
        if (IS_ERR(set))
                return PTR_ERR(set);
+       if (set->flags & NFT_SET_INACTIVE)
+               return -ENOENT;
 
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
                struct netlink_dump_control c = {
@@ -2724,7 +2901,98 @@ static int nf_tables_getsetelem(struct sock *nlsk, struct sk_buff *skb,
        return -EOPNOTSUPP;
 }
 
-static int nft_add_set_elem(const struct nft_ctx *ctx, struct nft_set *set,
+static int nf_tables_fill_setelem_info(struct sk_buff *skb,
+                                      const struct nft_ctx *ctx, u32 seq,
+                                      u32 portid, int event, u16 flags,
+                                      const struct nft_set *set,
+                                      const struct nft_set_elem *elem)
+{
+       struct nfgenmsg *nfmsg;
+       struct nlmsghdr *nlh;
+       struct nlattr *nest;
+       int err;
+
+       event |= NFNL_SUBSYS_NFTABLES << 8;
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg),
+                       flags);
+       if (nlh == NULL)
+               goto nla_put_failure;
+
+       nfmsg = nlmsg_data(nlh);
+       nfmsg->nfgen_family     = ctx->afi->family;
+       nfmsg->version          = NFNETLINK_V0;
+       nfmsg->res_id           = 0;
+
+       if (nla_put_string(skb, NFTA_SET_TABLE, ctx->table->name))
+               goto nla_put_failure;
+       if (nla_put_string(skb, NFTA_SET_NAME, set->name))
+               goto nla_put_failure;
+
+       nest = nla_nest_start(skb, NFTA_SET_ELEM_LIST_ELEMENTS);
+       if (nest == NULL)
+               goto nla_put_failure;
+
+       err = nf_tables_fill_setelem(skb, set, elem);
+       if (err < 0)
+               goto nla_put_failure;
+
+       nla_nest_end(skb, nest);
+
+       return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+       nlmsg_trim(skb, nlh);
+       return -1;
+}
+
+static int nf_tables_setelem_notify(const struct nft_ctx *ctx,
+                                   const struct nft_set *set,
+                                   const struct nft_set_elem *elem,
+                                   int event, u16 flags)
+{
+       struct net *net = ctx->net;
+       u32 portid = ctx->portid;
+       struct sk_buff *skb;
+       int err;
+
+       if (!ctx->report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
+               return 0;
+
+       err = -ENOBUFS;
+       skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+       if (skb == NULL)
+               goto err;
+
+       err = nf_tables_fill_setelem_info(skb, ctx, 0, portid, event, flags,
+                                         set, elem);
+       if (err < 0) {
+               kfree_skb(skb);
+               goto err;
+       }
+
+       err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, ctx->report,
+                            GFP_KERNEL);
+err:
+       if (err < 0)
+               nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err);
+       return err;
+}
+
+static struct nft_trans *nft_trans_elem_alloc(struct nft_ctx *ctx,
+                                             int msg_type,
+                                             struct nft_set *set)
+{
+       struct nft_trans *trans;
+
+       trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_elem));
+       if (trans == NULL)
+               return NULL;
+
+       nft_trans_elem_set(trans) = set;
+       return trans;
+}
+
+static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                            const struct nlattr *attr)
 {
        struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
@@ -2732,8 +3000,12 @@ static int nft_add_set_elem(const struct nft_ctx *ctx, struct nft_set *set,
        struct nft_set_elem elem;
        struct nft_set_binding *binding;
        enum nft_registers dreg;
+       struct nft_trans *trans;
        int err;
 
+       if (set->size && set->nelems == set->size)
+               return -ENFILE;
+
        err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr,
                               nft_set_elem_policy);
        if (err < 0)
@@ -2786,7 +3058,7 @@ static int nft_add_set_elem(const struct nft_ctx *ctx, struct nft_set *set,
                        struct nft_ctx bind_ctx = {
                                .afi    = ctx->afi,
                                .table  = ctx->table,
-                               .chain  = binding->chain,
+                               .chain  = (struct nft_chain *)binding->chain,
                        };
 
                        err = nft_validate_data_load(&bind_ctx, dreg,
@@ -2796,12 +3068,20 @@ static int nft_add_set_elem(const struct nft_ctx *ctx, struct nft_set *set,
                }
        }
 
+       trans = nft_trans_elem_alloc(ctx, NFT_MSG_NEWSETELEM, set);
+       if (trans == NULL)
+               goto err3;
+
        err = set->ops->insert(set, &elem);
        if (err < 0)
-               goto err3;
+               goto err4;
 
+       nft_trans_elem(trans) = elem;
+       list_add(&trans->list, &ctx->net->nft.commit_list);
        return 0;
 
+err4:
+       kfree(trans);
 err3:
        if (nla[NFTA_SET_ELEM_DATA] != NULL)
                nft_data_uninit(&elem.data, d2.type);
@@ -2815,35 +3095,44 @@ static int nf_tables_newsetelem(struct sock *nlsk, struct sk_buff *skb,
                                const struct nlmsghdr *nlh,
                                const struct nlattr * const nla[])
 {
+       struct net *net = sock_net(skb->sk);
        const struct nlattr *attr;
        struct nft_set *set;
        struct nft_ctx ctx;
-       int rem, err;
+       int rem, err = 0;
 
-       err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla);
+       err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, true);
        if (err < 0)
                return err;
 
        set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]);
-       if (IS_ERR(set))
-               return PTR_ERR(set);
+       if (IS_ERR(set)) {
+               if (nla[NFTA_SET_ELEM_LIST_SET_ID]) {
+                       set = nf_tables_set_lookup_byid(net,
+                                       nla[NFTA_SET_ELEM_LIST_SET_ID]);
+               }
+               if (IS_ERR(set))
+                       return PTR_ERR(set);
+       }
+
        if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT)
                return -EBUSY;
 
        nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
                err = nft_add_set_elem(&ctx, set, attr);
                if (err < 0)
-                       return err;
+                       break;
        }
-       return 0;
+       return err;
 }
 
-static int nft_del_setelem(const struct nft_ctx *ctx, struct nft_set *set,
+static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
                           const struct nlattr *attr)
 {
        struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
        struct nft_data_desc desc;
        struct nft_set_elem elem;
+       struct nft_trans *trans;
        int err;
 
        err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr,
@@ -2867,7 +3156,12 @@ static int nft_del_setelem(const struct nft_ctx *ctx, struct nft_set *set,
        if (err < 0)
                goto err2;
 
-       set->ops->remove(set, &elem);
+       trans = nft_trans_elem_alloc(ctx, NFT_MSG_DELSETELEM, set);
+       if (trans == NULL)
+               goto err2;
+
+       nft_trans_elem(trans) = elem;
+       list_add(&trans->list, &ctx->net->nft.commit_list);
 
        nft_data_uninit(&elem.key, NFT_DATA_VALUE);
        if (set->flags & NFT_SET_MAP)
@@ -2886,9 +3180,9 @@ static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb,
        const struct nlattr *attr;
        struct nft_set *set;
        struct nft_ctx ctx;
-       int rem, err;
+       int rem, err = 0;
 
-       err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla);
+       err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, false);
        if (err < 0)
                return err;
 
@@ -2901,14 +3195,14 @@ static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb,
        nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
                err = nft_del_setelem(&ctx, set, attr);
                if (err < 0)
-                       return err;
+                       break;
        }
-       return 0;
+       return err;
 }
 
 static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
        [NFT_MSG_NEWTABLE] = {
-               .call           = nf_tables_newtable,
+               .call_batch     = nf_tables_newtable,
                .attr_count     = NFTA_TABLE_MAX,
                .policy         = nft_table_policy,
        },
@@ -2918,12 +3212,12 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
                .policy         = nft_table_policy,
        },
        [NFT_MSG_DELTABLE] = {
-               .call           = nf_tables_deltable,
+               .call_batch     = nf_tables_deltable,
                .attr_count     = NFTA_TABLE_MAX,
                .policy         = nft_table_policy,
        },
        [NFT_MSG_NEWCHAIN] = {
-               .call           = nf_tables_newchain,
+               .call_batch     = nf_tables_newchain,
                .attr_count     = NFTA_CHAIN_MAX,
                .policy         = nft_chain_policy,
        },
@@ -2933,7 +3227,7 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
                .policy         = nft_chain_policy,
        },
        [NFT_MSG_DELCHAIN] = {
-               .call           = nf_tables_delchain,
+               .call_batch     = nf_tables_delchain,
                .attr_count     = NFTA_CHAIN_MAX,
                .policy         = nft_chain_policy,
        },
@@ -2953,7 +3247,7 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
                .policy         = nft_rule_policy,
        },
        [NFT_MSG_NEWSET] = {
-               .call           = nf_tables_newset,
+               .call_batch     = nf_tables_newset,
                .attr_count     = NFTA_SET_MAX,
                .policy         = nft_set_policy,
        },
@@ -2963,12 +3257,12 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
                .policy         = nft_set_policy,
        },
        [NFT_MSG_DELSET] = {
-               .call           = nf_tables_delset,
+               .call_batch     = nf_tables_delset,
                .attr_count     = NFTA_SET_MAX,
                .policy         = nft_set_policy,
        },
        [NFT_MSG_NEWSETELEM] = {
-               .call           = nf_tables_newsetelem,
+               .call_batch     = nf_tables_newsetelem,
                .attr_count     = NFTA_SET_ELEM_LIST_MAX,
                .policy         = nft_set_elem_list_policy,
        },
@@ -2978,12 +3272,270 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
                .policy         = nft_set_elem_list_policy,
        },
        [NFT_MSG_DELSETELEM] = {
-               .call           = nf_tables_delsetelem,
+               .call_batch     = nf_tables_delsetelem,
                .attr_count     = NFTA_SET_ELEM_LIST_MAX,
                .policy         = nft_set_elem_list_policy,
        },
 };
 
+static void nft_chain_commit_update(struct nft_trans *trans)
+{
+       struct nft_base_chain *basechain;
+
+       if (nft_trans_chain_name(trans)[0])
+               strcpy(trans->ctx.chain->name, nft_trans_chain_name(trans));
+
+       if (!(trans->ctx.chain->flags & NFT_BASE_CHAIN))
+               return;
+
+       basechain = nft_base_chain(trans->ctx.chain);
+       nft_chain_stats_replace(basechain, nft_trans_chain_stats(trans));
+
+       switch (nft_trans_chain_policy(trans)) {
+       case NF_DROP:
+       case NF_ACCEPT:
+               basechain->policy = nft_trans_chain_policy(trans);
+               break;
+       }
+}
+
+/* Schedule objects for release via rcu to make sure no packets are accesing
+ * removed rules.
+ */
+static void nf_tables_commit_release_rcu(struct rcu_head *rt)
+{
+       struct nft_trans *trans = container_of(rt, struct nft_trans, rcu_head);
+
+       switch (trans->msg_type) {
+       case NFT_MSG_DELTABLE:
+               nf_tables_table_destroy(&trans->ctx);
+               break;
+       case NFT_MSG_DELCHAIN:
+               nf_tables_chain_destroy(trans->ctx.chain);
+               break;
+       case NFT_MSG_DELRULE:
+               nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
+               break;
+       case NFT_MSG_DELSET:
+               nft_set_destroy(nft_trans_set(trans));
+               break;
+       }
+       kfree(trans);
+}
+
+static int nf_tables_commit(struct sk_buff *skb)
+{
+       struct net *net = sock_net(skb->sk);
+       struct nft_trans *trans, *next;
+       struct nft_set *set;
+
+       /* Bump generation counter, invalidate any dump in progress */
+       net->nft.genctr++;
+
+       /* A new generation has just started */
+       net->nft.gencursor = gencursor_next(net);
+
+       /* Make sure all packets have left the previous generation before
+        * purging old rules.
+        */
+       synchronize_rcu();
+
+       list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
+               switch (trans->msg_type) {
+               case NFT_MSG_NEWTABLE:
+                       if (nft_trans_table_update(trans)) {
+                               if (!nft_trans_table_enable(trans)) {
+                                       nf_tables_table_disable(trans->ctx.afi,
+                                                               trans->ctx.table);
+                                       trans->ctx.table->flags |= NFT_TABLE_F_DORMANT;
+                               }
+                       } else {
+                               trans->ctx.table->flags &= ~NFT_TABLE_INACTIVE;
+                       }
+                       nf_tables_table_notify(&trans->ctx, NFT_MSG_NEWTABLE);
+                       nft_trans_destroy(trans);
+                       break;
+               case NFT_MSG_DELTABLE:
+                       nf_tables_table_notify(&trans->ctx, NFT_MSG_DELTABLE);
+                       break;
+               case NFT_MSG_NEWCHAIN:
+                       if (nft_trans_chain_update(trans))
+                               nft_chain_commit_update(trans);
+                       else {
+                               trans->ctx.chain->flags &= ~NFT_CHAIN_INACTIVE;
+                               trans->ctx.table->use++;
+                       }
+                       nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN);
+                       nft_trans_destroy(trans);
+                       break;
+               case NFT_MSG_DELCHAIN:
+                       trans->ctx.table->use--;
+                       nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN);
+                       if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT) &&
+                           trans->ctx.chain->flags & NFT_BASE_CHAIN) {
+                               nf_unregister_hooks(nft_base_chain(trans->ctx.chain)->ops,
+                                                   trans->ctx.afi->nops);
+                       }
+                       break;
+               case NFT_MSG_NEWRULE:
+                       nft_rule_clear(trans->ctx.net, nft_trans_rule(trans));
+                       nf_tables_rule_notify(&trans->ctx,
+                                             nft_trans_rule(trans),
+                                             NFT_MSG_NEWRULE);
+                       nft_trans_destroy(trans);
+                       break;
+               case NFT_MSG_DELRULE:
+                       list_del_rcu(&nft_trans_rule(trans)->list);
+                       nf_tables_rule_notify(&trans->ctx,
+                                             nft_trans_rule(trans),
+                                             NFT_MSG_DELRULE);
+                       break;
+               case NFT_MSG_NEWSET:
+                       nft_trans_set(trans)->flags &= ~NFT_SET_INACTIVE;
+                       nf_tables_set_notify(&trans->ctx, nft_trans_set(trans),
+                                            NFT_MSG_NEWSET);
+                       nft_trans_destroy(trans);
+                       break;
+               case NFT_MSG_DELSET:
+                       nf_tables_set_notify(&trans->ctx, nft_trans_set(trans),
+                                            NFT_MSG_DELSET);
+                       break;
+               case NFT_MSG_NEWSETELEM:
+                       nft_trans_elem_set(trans)->nelems++;
+                       nf_tables_setelem_notify(&trans->ctx,
+                                                nft_trans_elem_set(trans),
+                                                &nft_trans_elem(trans),
+                                                NFT_MSG_NEWSETELEM, 0);
+                       nft_trans_destroy(trans);
+                       break;
+               case NFT_MSG_DELSETELEM:
+                       nft_trans_elem_set(trans)->nelems--;
+                       nf_tables_setelem_notify(&trans->ctx,
+                                                nft_trans_elem_set(trans),
+                                                &nft_trans_elem(trans),
+                                                NFT_MSG_DELSETELEM, 0);
+                       set = nft_trans_elem_set(trans);
+                       set->ops->get(set, &nft_trans_elem(trans));
+                       set->ops->remove(set, &nft_trans_elem(trans));
+                       nft_trans_destroy(trans);
+                       break;
+               }
+       }
+
+       list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
+               list_del(&trans->list);
+               trans->ctx.nla = NULL;
+               call_rcu(&trans->rcu_head, nf_tables_commit_release_rcu);
+       }
+
+       return 0;
+}
+
+/* Schedule objects for release via rcu to make sure no packets are accesing
+ * aborted rules.
+ */
+static void nf_tables_abort_release_rcu(struct rcu_head *rt)
+{
+       struct nft_trans *trans = container_of(rt, struct nft_trans, rcu_head);
+
+       switch (trans->msg_type) {
+       case NFT_MSG_NEWTABLE:
+               nf_tables_table_destroy(&trans->ctx);
+               break;
+       case NFT_MSG_NEWCHAIN:
+               nf_tables_chain_destroy(trans->ctx.chain);
+               break;
+       case NFT_MSG_NEWRULE:
+               nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
+               break;
+       case NFT_MSG_NEWSET:
+               nft_set_destroy(nft_trans_set(trans));
+               break;
+       }
+       kfree(trans);
+}
+
+static int nf_tables_abort(struct sk_buff *skb)
+{
+       struct net *net = sock_net(skb->sk);
+       struct nft_trans *trans, *next;
+       struct nft_set *set;
+
+       list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
+               switch (trans->msg_type) {
+               case NFT_MSG_NEWTABLE:
+                       if (nft_trans_table_update(trans)) {
+                               if (nft_trans_table_enable(trans)) {
+                                       nf_tables_table_disable(trans->ctx.afi,
+                                                               trans->ctx.table);
+                                       trans->ctx.table->flags |= NFT_TABLE_F_DORMANT;
+                               }
+                               nft_trans_destroy(trans);
+                       } else {
+                               list_del(&trans->ctx.table->list);
+                       }
+                       break;
+               case NFT_MSG_DELTABLE:
+                       list_add_tail(&trans->ctx.table->list,
+                                     &trans->ctx.afi->tables);
+                       nft_trans_destroy(trans);
+                       break;
+               case NFT_MSG_NEWCHAIN:
+                       if (nft_trans_chain_update(trans)) {
+                               if (nft_trans_chain_stats(trans))
+                                       free_percpu(nft_trans_chain_stats(trans));
+
+                               nft_trans_destroy(trans);
+                       } else {
+                               list_del(&trans->ctx.chain->list);
+                               if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT) &&
+                                   trans->ctx.chain->flags & NFT_BASE_CHAIN) {
+                                       nf_unregister_hooks(nft_base_chain(trans->ctx.chain)->ops,
+                                                           trans->ctx.afi->nops);
+                               }
+                       }
+                       break;
+               case NFT_MSG_DELCHAIN:
+                       list_add_tail(&trans->ctx.chain->list,
+                                     &trans->ctx.table->chains);
+                       nft_trans_destroy(trans);
+                       break;
+               case NFT_MSG_NEWRULE:
+                       list_del_rcu(&nft_trans_rule(trans)->list);
+                       break;
+               case NFT_MSG_DELRULE:
+                       nft_rule_clear(trans->ctx.net, nft_trans_rule(trans));
+                       nft_trans_destroy(trans);
+                       break;
+               case NFT_MSG_NEWSET:
+                       list_del(&nft_trans_set(trans)->list);
+                       break;
+               case NFT_MSG_DELSET:
+                       list_add_tail(&nft_trans_set(trans)->list,
+                                     &trans->ctx.table->sets);
+                       nft_trans_destroy(trans);
+                       break;
+               case NFT_MSG_NEWSETELEM:
+                       set = nft_trans_elem_set(trans);
+                       set->ops->get(set, &nft_trans_elem(trans));
+                       set->ops->remove(set, &nft_trans_elem(trans));
+                       nft_trans_destroy(trans);
+                       break;
+               case NFT_MSG_DELSETELEM:
+                       nft_trans_destroy(trans);
+                       break;
+               }
+       }
+
+       list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
+               list_del(&trans->list);
+               trans->ctx.nla = NULL;
+               call_rcu(&trans->rcu_head, nf_tables_abort_release_rcu);
+       }
+
+       return 0;
+}
+
 static const struct nfnetlink_subsystem nf_tables_subsys = {
        .name           = "nf_tables",
        .subsys_id      = NFNL_SUBSYS_NFTABLES,
index 804105391b9a903354ae9517d602c8c4638a8879..345acfb1720b14f00aae0e5937ab07bfb90e9482 100644 (file)
@@ -66,20 +66,6 @@ struct nft_jumpstack {
        int                     rulenum;
 };
 
-static inline void
-nft_chain_stats(const struct nft_chain *this, const struct nft_pktinfo *pkt,
-               struct nft_jumpstack *jumpstack, unsigned int stackptr)
-{
-       struct nft_stats __percpu *stats;
-       const struct nft_chain *chain = stackptr ? jumpstack[0].chain : this;
-
-       rcu_read_lock_bh();
-       stats = rcu_dereference(nft_base_chain(chain)->stats);
-       __this_cpu_inc(stats->pkts);
-       __this_cpu_add(stats->bytes, pkt->skb->len);
-       rcu_read_unlock_bh();
-}
-
 enum nft_trace {
        NFT_TRACE_RULE,
        NFT_TRACE_RETURN,
@@ -117,13 +103,14 @@ static void nft_trace_packet(const struct nft_pktinfo *pkt,
 unsigned int
 nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
 {
-       const struct nft_chain *chain = ops->priv;
+       const struct nft_chain *chain = ops->priv, *basechain = chain;
        const struct nft_rule *rule;
        const struct nft_expr *expr, *last;
        struct nft_data data[NFT_REG_MAX + 1];
        unsigned int stackptr = 0;
        struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE];
-       int rulenum = 0;
+       struct nft_stats __percpu *stats;
+       int rulenum;
        /*
         * Cache cursor to avoid problems in case that the cursor is updated
         * while traversing the ruleset.
@@ -131,6 +118,7 @@ nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
        unsigned int gencursor = ACCESS_ONCE(chain->net->nft.gencursor);
 
 do_chain:
+       rulenum = 0;
        rule = list_entry(&chain->rules, struct nft_rule, list);
 next_rule:
        data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
@@ -156,8 +144,10 @@ next_rule:
                switch (data[NFT_REG_VERDICT].verdict) {
                case NFT_BREAK:
                        data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
-                       /* fall through */
+                       continue;
                case NFT_CONTINUE:
+                       if (unlikely(pkt->skb->nf_trace))
+                               nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
                        continue;
                }
                break;
@@ -183,37 +173,44 @@ next_rule:
                jumpstack[stackptr].rule  = rule;
                jumpstack[stackptr].rulenum = rulenum;
                stackptr++;
-               /* fall through */
+               chain = data[NFT_REG_VERDICT].chain;
+               goto do_chain;
        case NFT_GOTO:
+               if (unlikely(pkt->skb->nf_trace))
+                       nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
+
                chain = data[NFT_REG_VERDICT].chain;
                goto do_chain;
        case NFT_RETURN:
                if (unlikely(pkt->skb->nf_trace))
                        nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RETURN);
-
-               /* fall through */
+               break;
        case NFT_CONTINUE:
+               if (unlikely(pkt->skb->nf_trace && !(chain->flags & NFT_BASE_CHAIN)))
+                       nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_RETURN);
                break;
        default:
                WARN_ON(1);
        }
 
        if (stackptr > 0) {
-               if (unlikely(pkt->skb->nf_trace))
-                       nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_RETURN);
-
                stackptr--;
                chain = jumpstack[stackptr].chain;
                rule  = jumpstack[stackptr].rule;
                rulenum = jumpstack[stackptr].rulenum;
                goto next_rule;
        }
-       nft_chain_stats(chain, pkt, jumpstack, stackptr);
 
        if (unlikely(pkt->skb->nf_trace))
-               nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_POLICY);
+               nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY);
+
+       rcu_read_lock_bh();
+       stats = rcu_dereference(nft_base_chain(basechain)->stats);
+       __this_cpu_inc(stats->pkts);
+       __this_cpu_add(stats->bytes, pkt->skb->len);
+       rcu_read_unlock_bh();
 
-       return nft_base_chain(chain)->policy;
+       return nft_base_chain(basechain)->policy;
 }
 EXPORT_SYMBOL_GPL(nft_do_chain);
 
index 6e42dcfad40ac783f5d941955ede1090989dcc06..c138b8fbe280af6886693421a7fe8d9a288156cf 100644 (file)
@@ -256,15 +256,15 @@ replay:
 #endif
                {
                        nfnl_unlock(subsys_id);
-                       kfree_skb(nskb);
-                       return netlink_ack(skb, nlh, -EOPNOTSUPP);
+                       netlink_ack(skb, nlh, -EOPNOTSUPP);
+                       return kfree_skb(nskb);
                }
        }
 
        if (!ss->commit || !ss->abort) {
                nfnl_unlock(subsys_id);
-               kfree_skb(nskb);
-               return netlink_ack(skb, nlh, -EOPNOTSUPP);
+               netlink_ack(skb, nlh, -EOPNOTSUPP);
+               return kfree_skb(skb);
        }
 
        while (skb->len >= nlmsg_total_size(0)) {
@@ -368,14 +368,13 @@ done:
 static void nfnetlink_rcv(struct sk_buff *skb)
 {
        struct nlmsghdr *nlh = nlmsg_hdr(skb);
-       struct net *net = sock_net(skb->sk);
        int msglen;
 
        if (nlh->nlmsg_len < NLMSG_HDRLEN ||
            skb->len < nlh->nlmsg_len)
                return;
 
-       if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) {
+       if (!netlink_net_capable(skb, CAP_NET_ADMIN)) {
                netlink_ack(skb, nlh, -EPERM);
                return;
        }
index bd0d41e693416167b4f149f64117e440a5134496..cc5603016242ea8e1f5cdce1d633e3a2687276ae 100644 (file)
@@ -215,22 +215,14 @@ static void nft_ct_l3proto_module_put(uint8_t family)
                nf_ct_l3proto_module_put(family);
 }
 
-static int nft_ct_init_validate_get(const struct nft_expr *expr,
-                                   const struct nlattr * const tb[])
+static int nft_ct_get_init(const struct nft_ctx *ctx,
+                          const struct nft_expr *expr,
+                          const struct nlattr * const tb[])
 {
        struct nft_ct *priv = nft_expr_priv(expr);
+       int err;
 
-       if (tb[NFTA_CT_DIRECTION] != NULL) {
-               priv->dir = nla_get_u8(tb[NFTA_CT_DIRECTION]);
-               switch (priv->dir) {
-               case IP_CT_DIR_ORIGINAL:
-               case IP_CT_DIR_REPLY:
-                       break;
-               default:
-                       return -EINVAL;
-               }
-       }
-
+       priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY]));
        switch (priv->key) {
        case NFT_CT_STATE:
        case NFT_CT_DIRECTION:
@@ -262,55 +254,55 @@ static int nft_ct_init_validate_get(const struct nft_expr *expr,
                return -EOPNOTSUPP;
        }
 
-       return 0;
-}
-
-static int nft_ct_init_validate_set(uint32_t key)
-{
-       switch (key) {
-       case NFT_CT_MARK:
-               break;
-       default:
-               return -EOPNOTSUPP;
+       if (tb[NFTA_CT_DIRECTION] != NULL) {
+               priv->dir = nla_get_u8(tb[NFTA_CT_DIRECTION]);
+               switch (priv->dir) {
+               case IP_CT_DIR_ORIGINAL:
+               case IP_CT_DIR_REPLY:
+                       break;
+               default:
+                       return -EINVAL;
+               }
        }
 
+       priv->dreg = ntohl(nla_get_be32(tb[NFTA_CT_DREG]));
+       err = nft_validate_output_register(priv->dreg);
+       if (err < 0)
+               return err;
+
+       err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+       if (err < 0)
+               return err;
+
+       err = nft_ct_l3proto_try_module_get(ctx->afi->family);
+       if (err < 0)
+               return err;
+
        return 0;
 }
 
-static int nft_ct_init(const struct nft_ctx *ctx,
-                      const struct nft_expr *expr,
-                      const struct nlattr * const tb[])
+static int nft_ct_set_init(const struct nft_ctx *ctx,
+                          const struct nft_expr *expr,
+                          const struct nlattr * const tb[])
 {
        struct nft_ct *priv = nft_expr_priv(expr);
        int err;
 
        priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY]));
-
-       if (tb[NFTA_CT_DREG]) {
-               err = nft_ct_init_validate_get(expr, tb);
-               if (err < 0)
-                       return err;
-
-               priv->dreg = ntohl(nla_get_be32(tb[NFTA_CT_DREG]));
-               err = nft_validate_output_register(priv->dreg);
-               if (err < 0)
-                       return err;
-
-               err = nft_validate_data_load(ctx, priv->dreg, NULL,
-                                            NFT_DATA_VALUE);
-               if (err < 0)
-                       return err;
-       } else {
-               err = nft_ct_init_validate_set(priv->key);
-               if (err < 0)
-                       return err;
-
-               priv->sreg = ntohl(nla_get_be32(tb[NFTA_CT_SREG]));
-               err = nft_validate_input_register(priv->sreg);
-               if (err < 0)
-                       return err;
+       switch (priv->key) {
+#ifdef CONFIG_NF_CONNTRACK_MARK
+       case NFT_CT_MARK:
+               break;
+#endif
+       default:
+               return -EOPNOTSUPP;
        }
 
+       priv->sreg = ntohl(nla_get_be32(tb[NFTA_CT_SREG]));
+       err = nft_validate_input_register(priv->sreg);
+       if (err < 0)
+               return err;
+
        err = nft_ct_l3proto_try_module_get(ctx->afi->family);
        if (err < 0)
                return err;
@@ -370,7 +362,7 @@ static const struct nft_expr_ops nft_ct_get_ops = {
        .type           = &nft_ct_type,
        .size           = NFT_EXPR_SIZE(sizeof(struct nft_ct)),
        .eval           = nft_ct_get_eval,
-       .init           = nft_ct_init,
+       .init           = nft_ct_get_init,
        .destroy        = nft_ct_destroy,
        .dump           = nft_ct_get_dump,
 };
@@ -379,7 +371,7 @@ static const struct nft_expr_ops nft_ct_set_ops = {
        .type           = &nft_ct_type,
        .size           = NFT_EXPR_SIZE(sizeof(struct nft_ct)),
        .eval           = nft_ct_set_eval,
-       .init           = nft_ct_init,
+       .init           = nft_ct_set_init,
        .destroy        = nft_ct_destroy,
        .dump           = nft_ct_set_dump,
 };
index 3b1ad876d6b028f987ccf8e78ceb9639d767e6bf..1dfeb6786832e83670968f577672e364baa7f039 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/list.h>
+#include <linux/log2.h>
 #include <linux/jhash.h>
 #include <linux/netlink.h>
 #include <linux/vmalloc.h>
@@ -19,7 +20,7 @@
 #include <linux/netfilter/nf_tables.h>
 #include <net/netfilter/nf_tables.h>
 
-#define NFT_HASH_MIN_SIZE      4
+#define NFT_HASH_MIN_SIZE      4UL
 
 struct nft_hash {
        struct nft_hash_table __rcu     *tbl;
@@ -27,7 +28,6 @@ struct nft_hash {
 
 struct nft_hash_table {
        unsigned int                    size;
-       unsigned int                    elements;
        struct nft_hash_elem __rcu      *buckets[];
 };
 
@@ -82,6 +82,11 @@ static void nft_hash_tbl_free(const struct nft_hash_table *tbl)
                kfree(tbl);
 }
 
+static unsigned int nft_hash_tbl_size(unsigned int nelem)
+{
+       return max(roundup_pow_of_two(nelem * 4 / 3), NFT_HASH_MIN_SIZE);
+}
+
 static struct nft_hash_table *nft_hash_tbl_alloc(unsigned int nbuckets)
 {
        struct nft_hash_table *tbl;
@@ -161,7 +166,6 @@ static int nft_hash_tbl_expand(const struct nft_set *set, struct nft_hash *priv)
                        break;
                }
        }
-       ntbl->elements = tbl->elements;
 
        /* Publish new table */
        rcu_assign_pointer(priv->tbl, ntbl);
@@ -201,7 +205,6 @@ static int nft_hash_tbl_shrink(const struct nft_set *set, struct nft_hash *priv)
                        ;
                RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]);
        }
-       ntbl->elements = tbl->elements;
 
        /* Publish new table */
        rcu_assign_pointer(priv->tbl, ntbl);
@@ -237,10 +240,9 @@ static int nft_hash_insert(const struct nft_set *set,
        h = nft_hash_data(&he->key, tbl->size, set->klen);
        RCU_INIT_POINTER(he->next, tbl->buckets[h]);
        rcu_assign_pointer(tbl->buckets[h], he);
-       tbl->elements++;
 
        /* Expand table when exceeding 75% load */
-       if (tbl->elements > tbl->size / 4 * 3)
+       if (set->nelems + 1 > tbl->size / 4 * 3)
                nft_hash_tbl_expand(set, priv);
 
        return 0;
@@ -268,10 +270,9 @@ static void nft_hash_remove(const struct nft_set *set,
        RCU_INIT_POINTER(*pprev, he->next);
        synchronize_rcu();
        kfree(he);
-       tbl->elements--;
 
        /* Shrink table beneath 30% load */
-       if (tbl->elements < tbl->size * 3 / 10 &&
+       if (set->nelems - 1 < tbl->size * 3 / 10 &&
            tbl->size > NFT_HASH_MIN_SIZE)
                nft_hash_tbl_shrink(set, priv);
 }
@@ -335,17 +336,23 @@ static unsigned int nft_hash_privsize(const struct nlattr * const nla[])
 }
 
 static int nft_hash_init(const struct nft_set *set,
+                        const struct nft_set_desc *desc,
                         const struct nlattr * const tb[])
 {
        struct nft_hash *priv = nft_set_priv(set);
        struct nft_hash_table *tbl;
+       unsigned int size;
 
        if (unlikely(!nft_hash_rnd_initted)) {
                get_random_bytes(&nft_hash_rnd, 4);
                nft_hash_rnd_initted = true;
        }
 
-       tbl = nft_hash_tbl_alloc(NFT_HASH_MIN_SIZE);
+       size = NFT_HASH_MIN_SIZE;
+       if (desc->size)
+               size = nft_hash_tbl_size(desc->size);
+
+       tbl = nft_hash_tbl_alloc(size);
        if (tbl == NULL)
                return -ENOMEM;
        RCU_INIT_POINTER(priv->tbl, tbl);
@@ -369,8 +376,37 @@ static void nft_hash_destroy(const struct nft_set *set)
        kfree(tbl);
 }
 
+static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features,
+                             struct nft_set_estimate *est)
+{
+       unsigned int esize;
+
+       esize = sizeof(struct nft_hash_elem);
+       if (features & NFT_SET_MAP)
+               esize += FIELD_SIZEOF(struct nft_hash_elem, data[0]);
+
+       if (desc->size) {
+               est->size = sizeof(struct nft_hash) +
+                           nft_hash_tbl_size(desc->size) *
+                           sizeof(struct nft_hash_elem *) +
+                           desc->size * esize;
+       } else {
+               /* Resizing happens when the load drops below 30% or goes
+                * above 75%. The average of 52.5% load (approximated by 50%)
+                * is used for the size estimation of the hash buckets,
+                * meaning we calculate two buckets per element.
+                */
+               est->size = esize + 2 * sizeof(struct nft_hash_elem *);
+       }
+
+       est->class = NFT_SET_CLASS_O_1;
+
+       return true;
+}
+
 static struct nft_set_ops nft_hash_ops __read_mostly = {
        .privsize       = nft_hash_privsize,
+       .estimate       = nft_hash_estimate,
        .init           = nft_hash_init,
        .destroy        = nft_hash_destroy,
        .get            = nft_hash_get,
index 7fd2bea8aa239f347dc461c7bc45869dac405573..6404a726d17b78fc6db6f411216195e68db63950 100644 (file)
@@ -56,8 +56,14 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
                return -EINVAL;
 
        set = nf_tables_set_lookup(ctx->table, tb[NFTA_LOOKUP_SET]);
-       if (IS_ERR(set))
-               return PTR_ERR(set);
+       if (IS_ERR(set)) {
+               if (tb[NFTA_LOOKUP_SET_ID]) {
+                       set = nf_tables_set_lookup_byid(ctx->net,
+                                                       tb[NFTA_LOOKUP_SET_ID]);
+               }
+               if (IS_ERR(set))
+                       return PTR_ERR(set);
+       }
 
        priv->sreg = ntohl(nla_get_be32(tb[NFTA_LOOKUP_SREG]));
        err = nft_validate_input_register(priv->sreg);
index 425cf39af8907f1d0618b0904121073ea4dc116a..852b178c6ae7fa2f7dbd6f7887404033da45971f 100644 (file)
 #include <net/sock.h>
 #include <net/tcp_states.h> /* for TCP_TIME_WAIT */
 #include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nft_meta.h>
 
-struct nft_meta {
-       enum nft_meta_keys      key:8;
-       union {
-               enum nft_registers      dreg:8;
-               enum nft_registers      sreg:8;
-       };
-};
-
-static void nft_meta_get_eval(const struct nft_expr *expr,
-                             struct nft_data data[NFT_REG_MAX + 1],
-                             const struct nft_pktinfo *pkt)
+void nft_meta_get_eval(const struct nft_expr *expr,
+                      struct nft_data data[NFT_REG_MAX + 1],
+                      const struct nft_pktinfo *pkt)
 {
        const struct nft_meta *priv = nft_expr_priv(expr);
        const struct sk_buff *skb = pkt->skb;
@@ -140,10 +133,11 @@ static void nft_meta_get_eval(const struct nft_expr *expr,
 err:
        data[NFT_REG_VERDICT].verdict = NFT_BREAK;
 }
+EXPORT_SYMBOL_GPL(nft_meta_get_eval);
 
-static void nft_meta_set_eval(const struct nft_expr *expr,
-                             struct nft_data data[NFT_REG_MAX + 1],
-                             const struct nft_pktinfo *pkt)
+void nft_meta_set_eval(const struct nft_expr *expr,
+                      struct nft_data data[NFT_REG_MAX + 1],
+                      const struct nft_pktinfo *pkt)
 {
        const struct nft_meta *meta = nft_expr_priv(expr);
        struct sk_buff *skb = pkt->skb;
@@ -163,28 +157,24 @@ static void nft_meta_set_eval(const struct nft_expr *expr,
                WARN_ON(1);
        }
 }
+EXPORT_SYMBOL_GPL(nft_meta_set_eval);
 
-static const struct nla_policy nft_meta_policy[NFTA_META_MAX + 1] = {
+const struct nla_policy nft_meta_policy[NFTA_META_MAX + 1] = {
        [NFTA_META_DREG]        = { .type = NLA_U32 },
        [NFTA_META_KEY]         = { .type = NLA_U32 },
        [NFTA_META_SREG]        = { .type = NLA_U32 },
 };
+EXPORT_SYMBOL_GPL(nft_meta_policy);
 
-static int nft_meta_init_validate_set(uint32_t key)
+int nft_meta_get_init(const struct nft_ctx *ctx,
+                     const struct nft_expr *expr,
+                     const struct nlattr * const tb[])
 {
-       switch (key) {
-       case NFT_META_MARK:
-       case NFT_META_PRIORITY:
-       case NFT_META_NFTRACE:
-               return 0;
-       default:
-               return -EOPNOTSUPP;
-       }
-}
+       struct nft_meta *priv = nft_expr_priv(expr);
+       int err;
 
-static int nft_meta_init_validate_get(uint32_t key)
-{
-       switch (key) {
+       priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
+       switch (priv->key) {
        case NFT_META_LEN:
        case NFT_META_PROTOCOL:
        case NFT_META_NFPROTO:
@@ -205,39 +195,41 @@ static int nft_meta_init_validate_get(uint32_t key)
 #ifdef CONFIG_NETWORK_SECMARK
        case NFT_META_SECMARK:
 #endif
-               return 0;
+               break;
        default:
                return -EOPNOTSUPP;
        }
 
+       priv->dreg = ntohl(nla_get_be32(tb[NFTA_META_DREG]));
+       err = nft_validate_output_register(priv->dreg);
+       if (err < 0)
+               return err;
+
+       err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+       if (err < 0)
+               return err;
+
+       return 0;
 }
+EXPORT_SYMBOL_GPL(nft_meta_get_init);
 
-static int nft_meta_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
-                        const struct nlattr * const tb[])
+int nft_meta_set_init(const struct nft_ctx *ctx,
+                     const struct nft_expr *expr,
+                     const struct nlattr * const tb[])
 {
        struct nft_meta *priv = nft_expr_priv(expr);
        int err;
 
        priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
-
-       if (tb[NFTA_META_DREG]) {
-               err = nft_meta_init_validate_get(priv->key);
-               if (err < 0)
-                       return err;
-
-               priv->dreg = ntohl(nla_get_be32(tb[NFTA_META_DREG]));
-               err = nft_validate_output_register(priv->dreg);
-               if (err < 0)
-                       return err;
-
-               return nft_validate_data_load(ctx, priv->dreg, NULL,
-                                             NFT_DATA_VALUE);
+       switch (priv->key) {
+       case NFT_META_MARK:
+       case NFT_META_PRIORITY:
+       case NFT_META_NFTRACE:
+               break;
+       default:
+               return -EOPNOTSUPP;
        }
 
-       err = nft_meta_init_validate_set(priv->key);
-       if (err < 0)
-               return err;
-
        priv->sreg = ntohl(nla_get_be32(tb[NFTA_META_SREG]));
        err = nft_validate_input_register(priv->sreg);
        if (err < 0)
@@ -245,9 +237,10 @@ static int nft_meta_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(nft_meta_set_init);
 
-static int nft_meta_get_dump(struct sk_buff *skb,
-                            const struct nft_expr *expr)
+int nft_meta_get_dump(struct sk_buff *skb,
+                     const struct nft_expr *expr)
 {
        const struct nft_meta *priv = nft_expr_priv(expr);
 
@@ -260,9 +253,10 @@ static int nft_meta_get_dump(struct sk_buff *skb,
 nla_put_failure:
        return -1;
 }
+EXPORT_SYMBOL_GPL(nft_meta_get_dump);
 
-static int nft_meta_set_dump(struct sk_buff *skb,
-                            const struct nft_expr *expr)
+int nft_meta_set_dump(struct sk_buff *skb,
+                     const struct nft_expr *expr)
 {
        const struct nft_meta *priv = nft_expr_priv(expr);
 
@@ -276,13 +270,14 @@ static int nft_meta_set_dump(struct sk_buff *skb,
 nla_put_failure:
        return -1;
 }
+EXPORT_SYMBOL_GPL(nft_meta_set_dump);
 
 static struct nft_expr_type nft_meta_type;
 static const struct nft_expr_ops nft_meta_get_ops = {
        .type           = &nft_meta_type,
        .size           = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
        .eval           = nft_meta_get_eval,
-       .init           = nft_meta_init,
+       .init           = nft_meta_get_init,
        .dump           = nft_meta_get_dump,
 };
 
@@ -290,7 +285,7 @@ static const struct nft_expr_ops nft_meta_set_ops = {
        .type           = &nft_meta_type,
        .size           = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
        .eval           = nft_meta_set_eval,
-       .init           = nft_meta_init,
+       .init           = nft_meta_set_init,
        .dump           = nft_meta_set_dump,
 };
 
index e21d69d13506b95946820f24641fe7e48d885866..072e611e9f712299b0ac049d2ca4d94c1b85cd86 100644 (file)
@@ -201,6 +201,7 @@ static unsigned int nft_rbtree_privsize(const struct nlattr * const nla[])
 }
 
 static int nft_rbtree_init(const struct nft_set *set,
+                          const struct nft_set_desc *desc,
                           const struct nlattr * const nla[])
 {
        struct nft_rbtree *priv = nft_set_priv(set);
@@ -222,8 +223,28 @@ static void nft_rbtree_destroy(const struct nft_set *set)
        }
 }
 
+static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
+                               struct nft_set_estimate *est)
+{
+       unsigned int nsize;
+
+       nsize = sizeof(struct nft_rbtree_elem);
+       if (features & NFT_SET_MAP)
+               nsize += FIELD_SIZEOF(struct nft_rbtree_elem, data[0]);
+
+       if (desc->size)
+               est->size = sizeof(struct nft_rbtree) + desc->size * nsize;
+       else
+               est->size = nsize;
+
+       est->class = NFT_SET_CLASS_O_LOG_N;
+
+       return true;
+}
+
 static struct nft_set_ops nft_rbtree_ops __read_mostly = {
        .privsize       = nft_rbtree_privsize,
+       .estimate       = nft_rbtree_estimate,
        .init           = nft_rbtree_init,
        .destroy        = nft_rbtree_destroy,
        .insert         = nft_rbtree_insert,
index 12d4da8e6c7728ed6bcc723f5b583fbfd0dc895c..bbffdbdaf6031bef784042c064dded271d6a406f 100644 (file)
@@ -23,10 +23,11 @@ MODULE_ALIAS("ip6t_bpf");
 static int bpf_mt_check(const struct xt_mtchk_param *par)
 {
        struct xt_bpf_info *info = par->matchinfo;
-       struct sock_fprog program;
+       struct sock_fprog_kern program;
 
        program.len = info->bpf_program_num_elem;
-       program.filter = (struct sock_filter __user *) info->bpf_program;
+       program.filter = info->bpf_program;
+
        if (sk_unattached_filter_create(&info->filter, &program)) {
                pr_info("bpf: check failed: parse error\n");
                return -EINVAL;
index 92f4b6915e8989ee6cb504dd3c84d505bd12b395..e0ccd84d4d6781ab761349e0ac913f6ddc3e8994 100644 (file)
@@ -1364,7 +1364,72 @@ retry:
        return err;
 }
 
-static inline int netlink_capable(const struct socket *sock, unsigned int flag)
+/**
+ * __netlink_ns_capable - General netlink message capability test
+ * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
+ * @user_ns: The user namespace of the capability to use
+ * @cap: The capability to use
+ *
+ * Test to see if the opener of the socket we received the message
+ * from had when the netlink socket was created and the sender of the
+ * message has has the capability @cap in the user namespace @user_ns.
+ */
+bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
+                       struct user_namespace *user_ns, int cap)
+{
+       return sk_ns_capable(nsp->sk, user_ns, cap);
+}
+EXPORT_SYMBOL(__netlink_ns_capable);
+
+/**
+ * netlink_ns_capable - General netlink message capability test
+ * @skb: socket buffer holding a netlink command from userspace
+ * @user_ns: The user namespace of the capability to use
+ * @cap: The capability to use
+ *
+ * Test to see if the opener of the socket we received the message
+ * from had when the netlink socket was created and the sender of the
+ * message has has the capability @cap in the user namespace @user_ns.
+ */
+bool netlink_ns_capable(const struct sk_buff *skb,
+                       struct user_namespace *user_ns, int cap)
+{
+       return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
+}
+EXPORT_SYMBOL(netlink_ns_capable);
+
+/**
+ * netlink_capable - Netlink global message capability test
+ * @skb: socket buffer holding a netlink command from userspace
+ * @cap: The capability to use
+ *
+ * Test to see if the opener of the socket we received the message
+ * from had when the netlink socket was created and the sender of the
+ * message has has the capability @cap in all user namespaces.
+ */
+bool netlink_capable(const struct sk_buff *skb, int cap)
+{
+       return netlink_ns_capable(skb, &init_user_ns, cap);
+}
+EXPORT_SYMBOL(netlink_capable);
+
+/**
+ * netlink_net_capable - Netlink network namespace message capability test
+ * @skb: socket buffer holding a netlink command from userspace
+ * @cap: The capability to use
+ *
+ * Test to see if the opener of the socket we received the message
+ * from had when the netlink socket was created and the sender of the
+ * message has has the capability @cap over the network namespace of
+ * the socket we received the message from.
+ */
+bool netlink_net_capable(const struct sk_buff *skb, int cap)
+{
+       return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
+}
+EXPORT_SYMBOL(netlink_net_capable);
+
+static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
 {
        return (nl_table[sock->sk->sk_protocol].flags & flag) ||
                ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
@@ -1446,7 +1511,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
 
        /* Only superuser is allowed to listen multicasts */
        if (groups) {
-               if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
+               if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
                        return -EPERM;
                err = netlink_realloc_groups(sk);
                if (err)
@@ -1516,7 +1581,7 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
                return -EINVAL;
 
        if ((nladdr->nl_groups || nladdr->nl_pid) &&
-           !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
+           !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
                return -EPERM;
 
        if (!nlk->portid)
@@ -2122,7 +2187,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
                break;
        case NETLINK_ADD_MEMBERSHIP:
        case NETLINK_DROP_MEMBERSHIP: {
-               if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
+               if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
                        return -EPERM;
                err = netlink_realloc_groups(sk);
                if (err)
@@ -2277,7 +2342,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
                dst_group = ffs(addr->nl_groups);
                err =  -EPERM;
                if ((dst_group || dst_portid) &&
-                   !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
+                   !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
                        goto out;
        } else {
                dst_portid = nlk->dst_portid;
index b1dcdb932a86ee919642f47f58dd3b716995ad76..a3ba3ca0ff9281dec15c0b4d42002394583c1d8d 100644 (file)
@@ -561,7 +561,7 @@ static int genl_family_rcv_msg(struct genl_family *family,
                return -EOPNOTSUPP;
 
        if ((ops->flags & GENL_ADMIN_PERM) &&
-           !capable(CAP_NET_ADMIN))
+           !netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
index 3759add68b1b8af4b4eae50af6eb4a3cd98fd345..71ad7eefddd4dfa69319ca06b3595e1bebca459f 100644 (file)
@@ -71,6 +71,7 @@ static inline int digital_in_send_cmd(struct nfc_digital_dev *ddev,
 void digital_poll_next_tech(struct nfc_digital_dev *ddev);
 
 int digital_in_send_sens_req(struct nfc_digital_dev *ddev, u8 rf_tech);
+int digital_in_send_sensb_req(struct nfc_digital_dev *ddev, u8 rf_tech);
 int digital_in_send_sensf_req(struct nfc_digital_dev *ddev, u8 rf_tech);
 int digital_in_send_iso15693_inv_req(struct nfc_digital_dev *ddev, u8 rf_tech);
 
index e01e15dbf1abe1541aeb2643cf5521e8e1419e75..b105cfb00e76bafe4fa6a3f1af379bd25094a06d 100644 (file)
@@ -22,6 +22,8 @@
 #define DIGITAL_PROTO_NFCA_RF_TECH \
        (NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK | NFC_PROTO_NFC_DEP_MASK)
 
+#define DIGITAL_PROTO_NFCB_RF_TECH     NFC_PROTO_ISO14443_B_MASK
+
 #define DIGITAL_PROTO_NFCF_RF_TECH \
        (NFC_PROTO_FELICA_MASK | NFC_PROTO_NFC_DEP_MASK)
 
@@ -345,6 +347,12 @@ int digital_target_found(struct nfc_digital_dev *ddev,
                add_crc = digital_skb_add_crc_a;
                break;
 
+       case NFC_PROTO_ISO14443_B:
+               framing = NFC_DIGITAL_FRAMING_NFCB_T4T;
+               check_crc = digital_skb_check_crc_b;
+               add_crc = digital_skb_add_crc_b;
+               break;
+
        default:
                pr_err("Invalid protocol %d\n", protocol);
                return -EINVAL;
@@ -475,6 +483,10 @@ static int digital_start_poll(struct nfc_dev *nfc_dev, __u32 im_protocols,
                digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_106A,
                                      digital_in_send_sens_req);
 
+       if (matching_im_protocols & DIGITAL_PROTO_NFCB_RF_TECH)
+               digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_106B,
+                                     digital_in_send_sensb_req);
+
        if (matching_im_protocols & DIGITAL_PROTO_NFCF_RF_TECH) {
                digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_212F,
                                      digital_in_send_sensf_req);
@@ -635,7 +647,8 @@ static void digital_in_send_complete(struct nfc_digital_dev *ddev, void *arg,
                goto done;
        }
 
-       if (ddev->curr_protocol == NFC_PROTO_ISO14443) {
+       if ((ddev->curr_protocol == NFC_PROTO_ISO14443) ||
+           (ddev->curr_protocol == NFC_PROTO_ISO14443_B)) {
                rc = digital_in_iso_dep_pull_sod(ddev, resp);
                if (rc)
                        goto done;
@@ -676,7 +689,8 @@ static int digital_in_send(struct nfc_dev *nfc_dev, struct nfc_target *target,
                goto exit;
        }
 
-       if (ddev->curr_protocol == NFC_PROTO_ISO14443) {
+       if ((ddev->curr_protocol == NFC_PROTO_ISO14443) ||
+           (ddev->curr_protocol == NFC_PROTO_ISO14443_B)) {
                rc = digital_in_iso_dep_push_sod(ddev, skb);
                if (rc)
                        goto exit;
@@ -747,6 +761,8 @@ struct nfc_digital_dev *nfc_digital_allocate_device(struct nfc_digital_ops *ops,
                ddev->protocols |= NFC_PROTO_ISO15693_MASK;
        if (supported_protocols & NFC_PROTO_ISO14443_MASK)
                ddev->protocols |= NFC_PROTO_ISO14443_MASK;
+       if (supported_protocols & NFC_PROTO_ISO14443_B_MASK)
+               ddev->protocols |= NFC_PROTO_ISO14443_B_MASK;
 
        ddev->tx_headroom = tx_headroom + DIGITAL_MAX_HEADER_LEN;
        ddev->tx_tailroom = tx_tailroom + DIGITAL_CRC_LEN;
index 278c3fed27e01f255374713ab2c0a545752671ee..12a233e9ece593d89e7f6df2b63c13c5e6d21275 100644 (file)
 #define DIGITAL_MIFARE_READ_RES_LEN 16
 #define DIGITAL_MIFARE_ACK_RES 0x0A
 
+#define DIGITAL_CMD_SENSB_REQ                  0x05
+#define DIGITAL_SENSB_ADVANCED                 BIT(5)
+#define DIGITAL_SENSB_EXTENDED                 BIT(4)
+#define DIGITAL_SENSB_ALLB_REQ                 BIT(3)
+#define DIGITAL_SENSB_N(n)                     ((n) & 0x7)
+
+#define DIGITAL_CMD_SENSB_RES                  0x50
+
+#define DIGITAL_CMD_ATTRIB_REQ                 0x1D
+#define DIGITAL_ATTRIB_P1_TR0_DEFAULT          (0x0 << 6)
+#define DIGITAL_ATTRIB_P1_TR1_DEFAULT          (0x0 << 4)
+#define DIGITAL_ATTRIB_P1_SUPRESS_EOS          BIT(3)
+#define DIGITAL_ATTRIB_P1_SUPRESS_SOS          BIT(2)
+#define DIGITAL_ATTRIB_P2_LISTEN_POLL_1                (0x0 << 6)
+#define DIGITAL_ATTRIB_P2_POLL_LISTEN_1                (0x0 << 4)
+#define DIGITAL_ATTRIB_P2_MAX_FRAME_256                0x8
+#define DIGITAL_ATTRIB_P4_DID(n)               ((n) & 0xf)
+
 #define DIGITAL_CMD_SENSF_REQ  0x00
 #define DIGITAL_CMD_SENSF_RES  0x01
 
@@ -75,6 +93,7 @@ static const u8 digital_ats_fsc[] = {
 };
 
 #define DIGITAL_ATS_FSCI(t0) ((t0) & 0x0F)
+#define DIGITAL_SENSB_FSCI(pi2) (((pi2) & 0xF0) >> 4)
 #define DIGITAL_ATS_MAX_FSC  256
 
 #define DIGITAL_RATS_BYTE1 0xE0
@@ -92,6 +111,32 @@ struct digital_sel_req {
        u8 bcc;
 } __packed;
 
+struct digital_sensb_req {
+       u8 cmd;
+       u8 afi;
+       u8 param;
+} __packed;
+
+struct digital_sensb_res {
+       u8 cmd;
+       u8 nfcid0[4];
+       u8 app_data[4];
+       u8 proto_info[3];
+} __packed;
+
+struct digital_attrib_req {
+       u8 cmd;
+       u8 nfcid0[4];
+       u8 param1;
+       u8 param2;
+       u8 param3;
+       u8 param4;
+} __packed;
+
+struct digital_attrib_res {
+       u8 mbli_did;
+} __packed;
+
 struct digital_sensf_req {
        u8 cmd;
        u8 sc1;
@@ -531,6 +576,175 @@ int digital_in_recv_mifare_res(struct sk_buff *resp)
        return -EIO;
 }
 
+static void digital_in_recv_attrib_res(struct nfc_digital_dev *ddev, void *arg,
+                                      struct sk_buff *resp)
+{
+       struct nfc_target *target = arg;
+       struct digital_attrib_res *attrib_res;
+       int rc;
+
+       if (IS_ERR(resp)) {
+               rc = PTR_ERR(resp);
+               resp = NULL;
+               goto exit;
+       }
+
+       if (resp->len < sizeof(*attrib_res)) {
+               PROTOCOL_ERR("12.6.2");
+               rc = -EIO;
+               goto exit;
+       }
+
+       attrib_res = (struct digital_attrib_res *)resp->data;
+
+       if (attrib_res->mbli_did & 0x0f) {
+               PROTOCOL_ERR("12.6.2.1");
+               rc = -EIO;
+               goto exit;
+       }
+
+       rc = digital_target_found(ddev, target, NFC_PROTO_ISO14443_B);
+
+exit:
+       dev_kfree_skb(resp);
+       kfree(target);
+
+       if (rc)
+               digital_poll_next_tech(ddev);
+}
+
+int digital_in_send_attrib_req(struct nfc_digital_dev *ddev,
+                              struct nfc_target *target,
+                              struct digital_sensb_res *sensb_res)
+{
+       struct digital_attrib_req *attrib_req;
+       struct sk_buff *skb;
+       int rc;
+
+       skb = digital_skb_alloc(ddev, sizeof(*attrib_req));
+       if (!skb)
+               return -ENOMEM;
+
+       attrib_req = (struct digital_attrib_req *)skb_put(skb,
+                                                         sizeof(*attrib_req));
+
+       attrib_req->cmd = DIGITAL_CMD_ATTRIB_REQ;
+       memcpy(attrib_req->nfcid0, sensb_res->nfcid0,
+              sizeof(attrib_req->nfcid0));
+       attrib_req->param1 = DIGITAL_ATTRIB_P1_TR0_DEFAULT |
+                            DIGITAL_ATTRIB_P1_TR1_DEFAULT;
+       attrib_req->param2 = DIGITAL_ATTRIB_P2_LISTEN_POLL_1 |
+                            DIGITAL_ATTRIB_P2_POLL_LISTEN_1 |
+                            DIGITAL_ATTRIB_P2_MAX_FRAME_256;
+       attrib_req->param3 = sensb_res->proto_info[1] & 0x07;
+       attrib_req->param4 = DIGITAL_ATTRIB_P4_DID(0);
+
+       rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_attrib_res,
+                                target);
+       if (rc)
+               kfree_skb(skb);
+
+       return rc;
+}
+
+static void digital_in_recv_sensb_res(struct nfc_digital_dev *ddev, void *arg,
+                                     struct sk_buff *resp)
+{
+       struct nfc_target *target = NULL;
+       struct digital_sensb_res *sensb_res;
+       u8 fsci;
+       int rc;
+
+       if (IS_ERR(resp)) {
+               rc = PTR_ERR(resp);
+               resp = NULL;
+               goto exit;
+       }
+
+       if (resp->len != sizeof(*sensb_res)) {
+               PROTOCOL_ERR("5.6.2.1");
+               rc = -EIO;
+               goto exit;
+       }
+
+       sensb_res = (struct digital_sensb_res *)resp->data;
+
+       if (sensb_res->cmd != DIGITAL_CMD_SENSB_RES) {
+               PROTOCOL_ERR("5.6.2");
+               rc = -EIO;
+               goto exit;
+       }
+
+       if (!(sensb_res->proto_info[1] & BIT(0))) {
+               PROTOCOL_ERR("5.6.2.12");
+               rc = -EIO;
+               goto exit;
+       }
+
+       if (sensb_res->proto_info[1] & BIT(3)) {
+               PROTOCOL_ERR("5.6.2.16");
+               rc = -EIO;
+               goto exit;
+       }
+
+       fsci = DIGITAL_SENSB_FSCI(sensb_res->proto_info[1]);
+       if (fsci >= 8)
+               ddev->target_fsc = DIGITAL_ATS_MAX_FSC;
+       else
+               ddev->target_fsc = digital_ats_fsc[fsci];
+
+       target = kzalloc(sizeof(struct nfc_target), GFP_KERNEL);
+       if (!target) {
+               rc = -ENOMEM;
+               goto exit;
+       }
+
+       rc = digital_in_send_attrib_req(ddev, target, sensb_res);
+
+exit:
+       dev_kfree_skb(resp);
+
+       if (rc) {
+               kfree(target);
+               digital_poll_next_tech(ddev);
+       }
+}
+
+int digital_in_send_sensb_req(struct nfc_digital_dev *ddev, u8 rf_tech)
+{
+       struct digital_sensb_req *sensb_req;
+       struct sk_buff *skb;
+       int rc;
+
+       rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH,
+                                    NFC_DIGITAL_RF_TECH_106B);
+       if (rc)
+               return rc;
+
+       rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+                                    NFC_DIGITAL_FRAMING_NFCB);
+       if (rc)
+               return rc;
+
+       skb = digital_skb_alloc(ddev, sizeof(*sensb_req));
+       if (!skb)
+               return -ENOMEM;
+
+       sensb_req = (struct digital_sensb_req *)skb_put(skb,
+                                                       sizeof(*sensb_req));
+
+       sensb_req->cmd = DIGITAL_CMD_SENSB_REQ;
+       sensb_req->afi = 0x00; /* All families and sub-families */
+       sensb_req->param = DIGITAL_SENSB_N(0);
+
+       rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sensb_res,
+                                NULL);
+       if (rc)
+               kfree_skb(skb);
+
+       return rc;
+}
+
 static void digital_in_recv_sensf_res(struct nfc_digital_dev *ddev, void *arg,
                                   struct sk_buff *resp)
 {
@@ -877,6 +1091,18 @@ exit:
        dev_kfree_skb(resp);
 }
 
+static void digital_tg_recv_atr_or_sensf_req(struct nfc_digital_dev *ddev,
+               void *arg, struct sk_buff *resp)
+{
+       if (!IS_ERR(resp) && (resp->len >= 2) &&
+                       (resp->data[1] == DIGITAL_CMD_SENSF_REQ))
+               digital_tg_recv_sensf_req(ddev, arg, resp);
+       else
+               digital_tg_recv_atr_req(ddev, arg, resp);
+
+       return;
+}
+
 static int digital_tg_send_sensf_res(struct nfc_digital_dev *ddev,
                              struct digital_sensf_req *sensf_req)
 {
@@ -887,7 +1113,7 @@ static int digital_tg_send_sensf_res(struct nfc_digital_dev *ddev,
 
        size = sizeof(struct digital_sensf_res);
 
-       if (sensf_req->rc != DIGITAL_SENSF_REQ_RC_NONE)
+       if (sensf_req->rc == DIGITAL_SENSF_REQ_RC_NONE)
                size -= sizeof(sensf_res->rd);
 
        skb = digital_skb_alloc(ddev, size);
@@ -922,7 +1148,7 @@ static int digital_tg_send_sensf_res(struct nfc_digital_dev *ddev,
                digital_skb_add_crc_f(skb);
 
        rc = digital_tg_send_cmd(ddev, skb, 300,
-                                digital_tg_recv_atr_req, NULL);
+                                digital_tg_recv_atr_or_sensf_req, NULL);
        if (rc)
                kfree_skb(skb);
 
index a9f4d2e62d8de267a94a689fe3fab53a1a79339d..677d24bb70f8af24569ef574404ba4b1b9e1c9f0 100644 (file)
@@ -26,6 +26,8 @@
 
 #include "hci.h"
 
+#define MAX_FWI 4949
+
 static int nfc_hci_execute_cmd_async(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
                               const u8 *param, size_t param_len,
                               data_exchange_cb_t cb, void *cb_context)
@@ -37,7 +39,7 @@ static int nfc_hci_execute_cmd_async(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
         * for all commands?
         */
        return nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_COMMAND, cmd,
-                                     param, param_len, cb, cb_context, 3000);
+                                     param, param_len, cb, cb_context, MAX_FWI);
 }
 
 /*
@@ -82,7 +84,7 @@ static int nfc_hci_execute_cmd(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
                                                    NFC_HCI_HCP_COMMAND, cmd,
                                                    param, param_len,
                                                    nfc_hci_execute_cb, &hcp_ew,
-                                                   3000);
+                                                   MAX_FWI);
        if (hcp_ew.exec_result < 0)
                return hcp_ew.exec_result;
 
index d45b638e77c78ec25b8228eacf2ea0ac754ad8d7..47403705197e85930d9f3e480d2e2d5dbd2012fa 100644 (file)
@@ -225,7 +225,7 @@ int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate)
                        goto exit;
                }
 
-               targets->sens_res = be16_to_cpu(*(u16 *)atqa_skb->data);
+               targets->sens_res = be16_to_cpu(*(__be16 *)atqa_skb->data);
                targets->sel_res = sak_skb->data[0];
 
                r = nfc_hci_get_param(hdev, NFC_HCI_RF_READER_A_GATE,
@@ -380,34 +380,31 @@ static int hci_dev_session_init(struct nfc_hci_dev *hdev)
        if (r < 0)
                goto disconnect_all;
 
-       if (skb->len && skb->len == strlen(hdev->init_data.session_id))
-               if (memcmp(hdev->init_data.session_id, skb->data,
-                          skb->len) == 0) {
-                       /* TODO ELa: restore gate<->pipe table from
-                        * some TBD location.
-                        * note: it doesn't seem possible to get the chip
-                        * currently open gate/pipe table.
-                        * It is only possible to obtain the supported
-                        * gate list.
-                        */
+       if (skb->len && skb->len == strlen(hdev->init_data.session_id) &&
+               (memcmp(hdev->init_data.session_id, skb->data,
+                          skb->len) == 0) && hdev->ops->load_session) {
+               /* Restore gate<->pipe table from some proprietary location. */
 
-                       /* goto exit
-                        * For now, always do a full initialization */
-               }
+               r = hdev->ops->load_session(hdev);
 
-       r = nfc_hci_disconnect_all_gates(hdev);
-       if (r < 0)
-               goto exit;
+               if (r < 0)
+                       goto disconnect_all;
+       } else {
 
-       r = hci_dev_connect_gates(hdev, hdev->init_data.gate_count,
-                                 hdev->init_data.gates);
-       if (r < 0)
-               goto disconnect_all;
+               r = nfc_hci_disconnect_all_gates(hdev);
+               if (r < 0)
+                       goto exit;
 
-       r = nfc_hci_set_param(hdev, NFC_HCI_ADMIN_GATE,
-                             NFC_HCI_ADMIN_SESSION_IDENTITY,
-                             hdev->init_data.session_id,
-                             strlen(hdev->init_data.session_id));
+               r = hci_dev_connect_gates(hdev, hdev->init_data.gate_count,
+                                         hdev->init_data.gates);
+               if (r < 0)
+                       goto disconnect_all;
+
+               r = nfc_hci_set_param(hdev, NFC_HCI_ADMIN_GATE,
+                               NFC_HCI_ADMIN_SESSION_IDENTITY,
+                               hdev->init_data.session_id,
+                               strlen(hdev->init_data.session_id));
+       }
        if (r == 0)
                goto exit;
 
index bec6ed15f5037ef9ffbc75f93e450e9ef1fd81b7..a3ad69a4c648c76779bf496153bfa7eb01f5354c 100644 (file)
@@ -387,7 +387,7 @@ int nfc_llcp_send_symm(struct nfc_dev *dev)
 
        __net_timestamp(skb);
 
-       nfc_llcp_send_to_raw_sock(local, skb, NFC_LLCP_DIRECTION_TX);
+       nfc_llcp_send_to_raw_sock(local, skb, NFC_DIRECTION_TX);
 
        return nfc_data_exchange(dev, local->target_idx, skb,
                                 nfc_llcp_recv, local);
index b4671958fcf935c3c3d984afc18f473bc882eae4..f6278da68763d2545af32d614278ccbe579a3a25 100644 (file)
@@ -680,16 +680,17 @@ void nfc_llcp_send_to_raw_sock(struct nfc_llcp_local *local,
                        continue;
 
                if (skb_copy == NULL) {
-                       skb_copy = __pskb_copy(skb, NFC_LLCP_RAW_HEADER_SIZE,
+                       skb_copy = __pskb_copy(skb, NFC_RAW_HEADER_SIZE,
                                               GFP_ATOMIC);
 
                        if (skb_copy == NULL)
                                continue;
 
-                       data = skb_push(skb_copy, NFC_LLCP_RAW_HEADER_SIZE);
+                       data = skb_push(skb_copy, NFC_RAW_HEADER_SIZE);
 
                        data[0] = local->dev ? local->dev->idx : 0xFF;
-                       data[1] = direction;
+                       data[1] = direction & 0x01;
+                       data[1] |= (RAW_PAYLOAD_LLCP << 1);
                }
 
                nskb = skb_clone(skb_copy, GFP_ATOMIC);
@@ -747,7 +748,7 @@ static void nfc_llcp_tx_work(struct work_struct *work)
                        __net_timestamp(skb);
 
                        nfc_llcp_send_to_raw_sock(local, skb,
-                                                 NFC_LLCP_DIRECTION_TX);
+                                                 NFC_DIRECTION_TX);
 
                        ret = nfc_data_exchange(local->dev, local->target_idx,
                                                skb, nfc_llcp_recv, local);
@@ -1476,7 +1477,7 @@ static void nfc_llcp_rx_work(struct work_struct *work)
 
        __net_timestamp(skb);
 
-       nfc_llcp_send_to_raw_sock(local, skb, NFC_LLCP_DIRECTION_RX);
+       nfc_llcp_send_to_raw_sock(local, skb, NFC_DIRECTION_RX);
 
        nfc_llcp_rx_skb(local, skb);
 
index 6c34ac978501705a0c743289b34b2f62750bc441..2b400e1a869522b9ecbca787a0ff2938f6109c26 100644 (file)
@@ -861,6 +861,10 @@ static int nci_send_frame(struct nci_dev *ndev, struct sk_buff *skb)
        /* Get rid of skb owner, prior to sending to the driver. */
        skb_orphan(skb);
 
+       /* Send copy to sniffer */
+       nfc_send_to_raw_sock(ndev->nfc_dev, skb,
+                            RAW_PAYLOAD_NCI, NFC_DIRECTION_TX);
+
        return ndev->ops->send(ndev, skb);
 }
 
@@ -935,6 +939,11 @@ static void nci_rx_work(struct work_struct *work)
        struct sk_buff *skb;
 
        while ((skb = skb_dequeue(&ndev->rx_q))) {
+
+               /* Send copy to sniffer */
+               nfc_send_to_raw_sock(ndev->nfc_dev, skb,
+                                    RAW_PAYLOAD_NCI, NFC_DIRECTION_RX);
+
                /* Process frame */
                switch (nci_mt(skb->data)) {
                case NCI_MT_RSP_PKT:
index 1e905097456b77501bc0a950aa932c45205a8eb1..f8f6af231381b336b111a9c01ce0ccb2dfc72856 100644 (file)
@@ -366,7 +366,6 @@ static int nci_extract_activation_params_nfc_dep(struct nci_dev *ndev,
                        struct nci_rf_intf_activated_ntf *ntf, __u8 *data)
 {
        struct activation_params_poll_nfc_dep *poll;
-       int i;
 
        switch (ntf->activation_rf_tech_and_mode) {
        case NCI_NFC_A_PASSIVE_POLL_MODE:
@@ -374,10 +373,8 @@ static int nci_extract_activation_params_nfc_dep(struct nci_dev *ndev,
                poll = &ntf->activation_params.poll_nfc_dep;
                poll->atr_res_len = min_t(__u8, *data++, 63);
                pr_debug("atr_res_len %d\n", poll->atr_res_len);
-               if (poll->atr_res_len > 0) {
-                       for (i = 0; i < poll->atr_res_len; i++)
-                               poll->atr_res[poll->atr_res_len-1-i] = data[i];
-               }
+               if (poll->atr_res_len > 0)
+                       memcpy(poll->atr_res, data, poll->atr_res_len);
                break;
 
        default:
index 9d6e74f7e6b34ba5ed0d9022a96fdc8061b479c7..88d60064890e3a9ca93db4c88fdcf840e077fabe 100644 (file)
@@ -40,6 +40,12 @@ struct nfc_rawsock {
        struct work_struct tx_work;
        bool tx_work_scheduled;
 };
+
+struct nfc_sock_list {
+       struct hlist_head head;
+       rwlock_t          lock;
+};
+
 #define nfc_rawsock(sk) ((struct nfc_rawsock *) sk)
 #define to_rawsock_sk(_tx_work) \
        ((struct sock *) container_of(_tx_work, struct nfc_rawsock, tx_work))
index c27a6e86cae459f0f70c2c4875614edac3518f98..8627c75063e232d218d813344067347711cb367e 100644 (file)
 
 #include "nfc.h"
 
+static struct nfc_sock_list raw_sk_list = {
+       .lock = __RW_LOCK_UNLOCKED(raw_sk_list.lock)
+};
+
+void nfc_sock_link(struct nfc_sock_list *l, struct sock *sk)
+{
+       write_lock(&l->lock);
+       sk_add_node(sk, &l->head);
+       write_unlock(&l->lock);
+}
+
+void nfc_sock_unlink(struct nfc_sock_list *l, struct sock *sk)
+{
+       write_lock(&l->lock);
+       sk_del_node_init(sk);
+       write_unlock(&l->lock);
+}
+
 static void rawsock_write_queue_purge(struct sock *sk)
 {
        pr_debug("sk=%p\n", sk);
@@ -57,6 +75,9 @@ static int rawsock_release(struct socket *sock)
        if (!sk)
                return 0;
 
+       if (sock->type == SOCK_RAW)
+               nfc_sock_unlink(&raw_sk_list, sk);
+
        sock_orphan(sk);
        sock_put(sk);
 
@@ -275,6 +296,26 @@ static const struct proto_ops rawsock_ops = {
        .mmap           = sock_no_mmap,
 };
 
+static const struct proto_ops rawsock_raw_ops = {
+       .family         = PF_NFC,
+       .owner          = THIS_MODULE,
+       .release        = rawsock_release,
+       .bind           = sock_no_bind,
+       .connect        = sock_no_connect,
+       .socketpair     = sock_no_socketpair,
+       .accept         = sock_no_accept,
+       .getname        = sock_no_getname,
+       .poll           = datagram_poll,
+       .ioctl          = sock_no_ioctl,
+       .listen         = sock_no_listen,
+       .shutdown       = sock_no_shutdown,
+       .setsockopt     = sock_no_setsockopt,
+       .getsockopt     = sock_no_getsockopt,
+       .sendmsg        = sock_no_sendmsg,
+       .recvmsg        = rawsock_recvmsg,
+       .mmap           = sock_no_mmap,
+};
+
 static void rawsock_destruct(struct sock *sk)
 {
        pr_debug("sk=%p\n", sk);
@@ -300,10 +341,13 @@ static int rawsock_create(struct net *net, struct socket *sock,
 
        pr_debug("sock=%p\n", sock);
 
-       if (sock->type != SOCK_SEQPACKET)
+       if ((sock->type != SOCK_SEQPACKET) && (sock->type != SOCK_RAW))
                return -ESOCKTNOSUPPORT;
 
-       sock->ops = &rawsock_ops;
+       if (sock->type == SOCK_RAW)
+               sock->ops = &rawsock_raw_ops;
+       else
+               sock->ops = &rawsock_ops;
 
        sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto);
        if (!sk)
@@ -313,13 +357,53 @@ static int rawsock_create(struct net *net, struct socket *sock,
        sk->sk_protocol = nfc_proto->id;
        sk->sk_destruct = rawsock_destruct;
        sock->state = SS_UNCONNECTED;
-
-       INIT_WORK(&nfc_rawsock(sk)->tx_work, rawsock_tx_work);
-       nfc_rawsock(sk)->tx_work_scheduled = false;
+       if (sock->type == SOCK_RAW)
+               nfc_sock_link(&raw_sk_list, sk);
+       else {
+               INIT_WORK(&nfc_rawsock(sk)->tx_work, rawsock_tx_work);
+               nfc_rawsock(sk)->tx_work_scheduled = false;
+       }
 
        return 0;
 }
 
+void nfc_send_to_raw_sock(struct nfc_dev *dev, struct sk_buff *skb,
+                         u8 payload_type, u8 direction)
+{
+       struct sk_buff *skb_copy = NULL, *nskb;
+       struct sock *sk;
+       u8 *data;
+
+       read_lock(&raw_sk_list.lock);
+
+       sk_for_each(sk, &raw_sk_list.head) {
+               if (!skb_copy) {
+                       skb_copy = __pskb_copy(skb, NFC_RAW_HEADER_SIZE,
+                                    GFP_ATOMIC);
+                       if (!skb_copy)
+                               continue;
+
+                       data = skb_push(skb_copy, NFC_RAW_HEADER_SIZE);
+
+                       data[0] = dev ? dev->idx : 0xFF;
+                       data[1] = direction & 0x01;
+                       data[1] |= (payload_type << 1);
+               }
+
+               nskb = skb_clone(skb_copy, GFP_ATOMIC);
+               if (!nskb)
+                       continue;
+
+               if (sock_queue_rcv_skb(sk, nskb))
+                       kfree_skb(nskb);
+       }
+
+       read_unlock(&raw_sk_list.lock);
+
+       kfree_skb(skb_copy);
+}
+EXPORT_SYMBOL(nfc_send_to_raw_sock);
+
 static struct proto rawsock_proto = {
        .name     = "NFC_RAW",
        .owner    = THIS_MODULE,
index 2c77e7b1a913241d85f11d19c25b5e182b5757f1..c36856a457ca963c735e89e36478a53ba60bb453 100644 (file)
@@ -134,8 +134,8 @@ static int set_eth_addr(struct sk_buff *skb,
 
        skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
 
-       memcpy(eth_hdr(skb)->h_source, eth_key->eth_src, ETH_ALEN);
-       memcpy(eth_hdr(skb)->h_dest, eth_key->eth_dst, ETH_ALEN);
+       ether_addr_copy(eth_hdr(skb)->h_source, eth_key->eth_src);
+       ether_addr_copy(eth_hdr(skb)->h_dest, eth_key->eth_dst);
 
        ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
 
index a3276e3c4feb065278b5195652378157619ff94c..0d407bca81e3573983bc47791dddf5561d4f8ee1 100644 (file)
 #include <linux/netfilter_ipv4.h>
 #include <linux/inetdevice.h>
 #include <linux/list.h>
-#include <linux/lockdep.h>
 #include <linux/openvswitch.h>
 #include <linux/rculist.h>
 #include <linux/dmi.h>
-#include <linux/workqueue.h>
+#include <linux/genetlink.h>
+#include <net/genetlink.h>
 #include <net/genetlink.h>
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
 
 int ovs_net_id __read_mostly;
 
+static struct genl_family dp_packet_genl_family;
+static struct genl_family dp_flow_genl_family;
+static struct genl_family dp_datapath_genl_family;
+
+static struct genl_multicast_group ovs_dp_flow_multicast_group = {
+       .name = OVS_FLOW_MCGROUP
+};
+
+static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
+       .name = OVS_DATAPATH_MCGROUP
+};
+
+struct genl_multicast_group ovs_dp_vport_multicast_group = {
+       .name = OVS_VPORT_MCGROUP
+};
+
+/* Check if need to build a reply message.
+ * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
+static bool ovs_must_notify(struct genl_info *info,
+                           const struct genl_multicast_group *grp)
+{
+       return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
+               netlink_has_listeners(genl_info_net(info)->genl_sock, 0);
+}
+
 static void ovs_notify(struct genl_family *family,
                       struct sk_buff *skb, struct genl_info *info)
 {
@@ -173,6 +198,7 @@ static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
        return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
 }
 
+/* Called with ovs_mutex or RCU read lock. */
 struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
 {
        struct vport *vport;
@@ -262,16 +288,6 @@ out:
        u64_stats_update_end(&stats->syncp);
 }
 
-static struct genl_family dp_packet_genl_family = {
-       .id = GENL_ID_GENERATE,
-       .hdrsize = sizeof(struct ovs_header),
-       .name = OVS_PACKET_FAMILY,
-       .version = OVS_PACKET_VERSION,
-       .maxattr = OVS_PACKET_ATTR_MAX,
-       .netnsok = true,
-       .parallel_ops = true,
-};
-
 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
                  const struct dp_upcall_info *upcall_info)
 {
@@ -524,7 +540,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
                packet->protocol = htons(ETH_P_802_2);
 
        /* Build an sw_flow for sending this packet. */
-       flow = ovs_flow_alloc(false);
+       flow = ovs_flow_alloc();
        err = PTR_ERR(flow);
        if (IS_ERR(flow))
                goto err_kfree_skb;
@@ -590,6 +606,18 @@ static const struct genl_ops dp_packet_genl_ops[] = {
        }
 };
 
+static struct genl_family dp_packet_genl_family = {
+       .id = GENL_ID_GENERATE,
+       .hdrsize = sizeof(struct ovs_header),
+       .name = OVS_PACKET_FAMILY,
+       .version = OVS_PACKET_VERSION,
+       .maxattr = OVS_PACKET_ATTR_MAX,
+       .netnsok = true,
+       .parallel_ops = true,
+       .ops = dp_packet_genl_ops,
+       .n_ops = ARRAY_SIZE(dp_packet_genl_ops),
+};
+
 static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats,
                         struct ovs_dp_megaflow_stats *mega_stats)
 {
@@ -621,26 +649,6 @@ static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats,
        }
 }
 
-static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
-       [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
-       [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
-       [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
-};
-
-static struct genl_family dp_flow_genl_family = {
-       .id = GENL_ID_GENERATE,
-       .hdrsize = sizeof(struct ovs_header),
-       .name = OVS_FLOW_FAMILY,
-       .version = OVS_FLOW_VERSION,
-       .maxattr = OVS_FLOW_ATTR_MAX,
-       .netnsok = true,
-       .parallel_ops = true,
-};
-
-static struct genl_multicast_group ovs_dp_flow_multicast_group = {
-       .name = OVS_FLOW_MCGROUP
-};
-
 static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
 {
        return NLMSG_ALIGN(sizeof(struct ovs_header))
@@ -652,8 +660,8 @@ static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
                + nla_total_size(acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */
 }
 
-/* Called with ovs_mutex. */
-static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
+/* Called with ovs_mutex or RCU read lock. */
+static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
                                  struct sk_buff *skb, u32 portid,
                                  u32 seq, u32 flags, u8 cmd)
 {
@@ -670,7 +678,7 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
        if (!ovs_header)
                return -EMSGSIZE;
 
-       ovs_header->dp_ifindex = get_dpifindex(dp);
+       ovs_header->dp_ifindex = dp_ifindex;
 
        /* Fill flow key. */
        nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
@@ -693,6 +701,7 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
        nla_nest_end(skb, nla);
 
        ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
+
        if (used &&
            nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
                goto nla_put_failure;
@@ -720,9 +729,9 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
                const struct sw_flow_actions *sf_acts;
 
                sf_acts = rcu_dereference_ovsl(flow->sf_acts);
-
                err = ovs_nla_put_actions(sf_acts->actions,
                                          sf_acts->actions_len, skb);
+
                if (!err)
                        nla_nest_end(skb, start);
                else {
@@ -743,113 +752,128 @@ error:
        return err;
 }
 
-static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow,
-                                              struct genl_info *info)
+/* May not be called with RCU read lock. */
+static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
+                                              struct genl_info *info,
+                                              bool always)
 {
-       size_t len;
+       struct sk_buff *skb;
+
+       if (!always && !ovs_must_notify(info, &ovs_dp_flow_multicast_group))
+               return NULL;
 
-       len = ovs_flow_cmd_msg_size(ovsl_dereference(flow->sf_acts));
+       skb = genlmsg_new_unicast(ovs_flow_cmd_msg_size(acts), info, GFP_KERNEL);
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
 
-       return genlmsg_new_unicast(len, info, GFP_KERNEL);
+       return skb;
 }
 
-static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow,
-                                              struct datapath *dp,
-                                              struct genl_info *info,
-                                              u8 cmd)
+/* Called with ovs_mutex. */
+static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
+                                              int dp_ifindex,
+                                              struct genl_info *info, u8 cmd,
+                                              bool always)
 {
        struct sk_buff *skb;
        int retval;
 
-       skb = ovs_flow_cmd_alloc_info(flow, info);
-       if (!skb)
-               return ERR_PTR(-ENOMEM);
+       skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts), info,
+                                     always);
+       if (!skb || IS_ERR(skb))
+               return skb;
 
-       retval = ovs_flow_cmd_fill_info(flow, dp, skb, info->snd_portid,
-                                       info->snd_seq, 0, cmd);
+       retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
+                                       info->snd_portid, info->snd_seq, 0,
+                                       cmd);
        BUG_ON(retval < 0);
        return skb;
 }
 
-static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
+static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
 {
        struct nlattr **a = info->attrs;
        struct ovs_header *ovs_header = info->userhdr;
-       struct sw_flow_key key, masked_key;
-       struct sw_flow *flow = NULL;
+       struct sw_flow *flow, *new_flow;
        struct sw_flow_mask mask;
        struct sk_buff *reply;
        struct datapath *dp;
-       struct sw_flow_actions *acts = NULL;
+       struct sw_flow_actions *acts;
        struct sw_flow_match match;
-       bool exact_5tuple;
        int error;
 
-       /* Extract key. */
+       /* Must have key and actions. */
        error = -EINVAL;
        if (!a[OVS_FLOW_ATTR_KEY])
                goto error;
+       if (!a[OVS_FLOW_ATTR_ACTIONS])
+               goto error;
 
-       ovs_match_init(&match, &key, &mask);
-       error = ovs_nla_get_match(&match, &exact_5tuple,
+       /* Most of the time we need to allocate a new flow, do it before
+        * locking.
+        */
+       new_flow = ovs_flow_alloc();
+       if (IS_ERR(new_flow)) {
+               error = PTR_ERR(new_flow);
+               goto error;
+       }
+
+       /* Extract key. */
+       ovs_match_init(&match, &new_flow->unmasked_key, &mask);
+       error = ovs_nla_get_match(&match,
                                  a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
        if (error)
-               goto error;
+               goto err_kfree_flow;
+
+       ovs_flow_mask_key(&new_flow->key, &new_flow->unmasked_key, &mask);
 
        /* Validate actions. */
-       if (a[OVS_FLOW_ATTR_ACTIONS]) {
-               acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
-               error = PTR_ERR(acts);
-               if (IS_ERR(acts))
-                       goto error;
+       acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
+       error = PTR_ERR(acts);
+       if (IS_ERR(acts))
+               goto err_kfree_flow;
 
-               ovs_flow_mask_key(&masked_key, &key, &mask);
-               error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS],
-                                            &masked_key, 0, &acts);
-               if (error) {
-                       OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
-                       goto err_kfree;
-               }
-       } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) {
-               error = -EINVAL;
-               goto error;
+       error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key,
+                                    0, &acts);
+       if (error) {
+               OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
+               goto err_kfree_acts;
+       }
+
+       reply = ovs_flow_cmd_alloc_info(acts, info, false);
+       if (IS_ERR(reply)) {
+               error = PTR_ERR(reply);
+               goto err_kfree_acts;
        }
 
        ovs_lock();
        dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
-       error = -ENODEV;
-       if (!dp)
+       if (unlikely(!dp)) {
+               error = -ENODEV;
                goto err_unlock_ovs;
-
+       }
        /* Check if this is a duplicate flow */
-       flow = ovs_flow_tbl_lookup(&dp->table, &key);
-       if (!flow) {
-               /* Bail out if we're not allowed to create a new flow. */
-               error = -ENOENT;
-               if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
-                       goto err_unlock_ovs;
-
-               /* Allocate flow. */
-               flow = ovs_flow_alloc(!exact_5tuple);
-               if (IS_ERR(flow)) {
-                       error = PTR_ERR(flow);
-                       goto err_unlock_ovs;
-               }
-
-               flow->key = masked_key;
-               flow->unmasked_key = key;
-               rcu_assign_pointer(flow->sf_acts, acts);
+       flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->unmasked_key);
+       if (likely(!flow)) {
+               rcu_assign_pointer(new_flow->sf_acts, acts);
 
                /* Put flow in bucket. */
-               error = ovs_flow_tbl_insert(&dp->table, flow, &mask);
-               if (error) {
+               error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
+               if (unlikely(error)) {
                        acts = NULL;
-                       goto err_flow_free;
+                       goto err_unlock_ovs;
                }
 
-               reply = ovs_flow_cmd_build_info(flow, dp, info, OVS_FLOW_CMD_NEW);
+               if (unlikely(reply)) {
+                       error = ovs_flow_cmd_fill_info(new_flow,
+                                                      ovs_header->dp_ifindex,
+                                                      reply, info->snd_portid,
+                                                      info->snd_seq, 0,
+                                                      OVS_FLOW_CMD_NEW);
+                       BUG_ON(error < 0);
+               }
+               ovs_unlock();
        } else {
-               /* We found a matching flow. */
                struct sw_flow_actions *old_acts;
 
                /* Bail out if we're not allowed to modify an existing flow.
@@ -858,40 +882,154 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
                 * request.  We also accept NLM_F_EXCL in case that bug ever
                 * gets fixed.
                 */
-               error = -EEXIST;
-               if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW &&
-                   info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
+               if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE
+                                                        | NLM_F_EXCL))) {
+                       error = -EEXIST;
                        goto err_unlock_ovs;
-
+               }
                /* The unmasked key has to be the same for flow updates. */
-               if (!ovs_flow_cmp_unmasked_key(flow, &match))
+               if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
+                       error = -EEXIST;
                        goto err_unlock_ovs;
-
+               }
                /* Update actions. */
                old_acts = ovsl_dereference(flow->sf_acts);
                rcu_assign_pointer(flow->sf_acts, acts);
+
+               if (unlikely(reply)) {
+                       error = ovs_flow_cmd_fill_info(flow,
+                                                      ovs_header->dp_ifindex,
+                                                      reply, info->snd_portid,
+                                                      info->snd_seq, 0,
+                                                      OVS_FLOW_CMD_NEW);
+                       BUG_ON(error < 0);
+               }
+               ovs_unlock();
+
                ovs_nla_free_flow_actions(old_acts);
+               ovs_flow_free(new_flow, false);
+       }
+
+       if (reply)
+               ovs_notify(&dp_flow_genl_family, reply, info);
+       return 0;
+
+err_unlock_ovs:
+       ovs_unlock();
+       kfree_skb(reply);
+err_kfree_acts:
+       kfree(acts);
+err_kfree_flow:
+       ovs_flow_free(new_flow, false);
+error:
+       return error;
+}
+
+static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
+{
+       struct nlattr **a = info->attrs;
+       struct ovs_header *ovs_header = info->userhdr;
+       struct sw_flow_key key, masked_key;
+       struct sw_flow *flow;
+       struct sw_flow_mask mask;
+       struct sk_buff *reply = NULL;
+       struct datapath *dp;
+       struct sw_flow_actions *old_acts = NULL, *acts = NULL;
+       struct sw_flow_match match;
+       int error;
+
+       /* Extract key. */
+       error = -EINVAL;
+       if (!a[OVS_FLOW_ATTR_KEY])
+               goto error;
+
+       ovs_match_init(&match, &key, &mask);
+       error = ovs_nla_get_match(&match,
+                                 a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
+       if (error)
+               goto error;
+
+       /* Validate actions. */
+       if (a[OVS_FLOW_ATTR_ACTIONS]) {
+               acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
+               error = PTR_ERR(acts);
+               if (IS_ERR(acts))
+                       goto error;
+
+               ovs_flow_mask_key(&masked_key, &key, &mask);
+               error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS],
+                                            &masked_key, 0, &acts);
+               if (error) {
+                       OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
+                       goto err_kfree_acts;
+               }
+       }
+
+       /* Can allocate before locking if have acts. */
+       if (acts) {
+               reply = ovs_flow_cmd_alloc_info(acts, info, false);
+               if (IS_ERR(reply)) {
+                       error = PTR_ERR(reply);
+                       goto err_kfree_acts;
+               }
+       }
 
-               reply = ovs_flow_cmd_build_info(flow, dp, info, OVS_FLOW_CMD_NEW);
+       ovs_lock();
+       dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
+       if (unlikely(!dp)) {
+               error = -ENODEV;
+               goto err_unlock_ovs;
+       }
+       /* Check that the flow exists. */
+       flow = ovs_flow_tbl_lookup(&dp->table, &key);
+       if (unlikely(!flow)) {
+               error = -ENOENT;
+               goto err_unlock_ovs;
+       }
+       /* The unmasked key has to be the same for flow updates. */
+       if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
+               error = -EEXIST;
+               goto err_unlock_ovs;
+       }
+       /* Update actions, if present. */
+       if (likely(acts)) {
+               old_acts = ovsl_dereference(flow->sf_acts);
+               rcu_assign_pointer(flow->sf_acts, acts);
 
-               /* Clear stats. */
-               if (a[OVS_FLOW_ATTR_CLEAR])
-                       ovs_flow_stats_clear(flow);
+               if (unlikely(reply)) {
+                       error = ovs_flow_cmd_fill_info(flow,
+                                                      ovs_header->dp_ifindex,
+                                                      reply, info->snd_portid,
+                                                      info->snd_seq, 0,
+                                                      OVS_FLOW_CMD_NEW);
+                       BUG_ON(error < 0);
+               }
+       } else {
+               /* Could not alloc without acts before locking. */
+               reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
+                                               info, OVS_FLOW_CMD_NEW, false);
+               if (unlikely(IS_ERR(reply))) {
+                       error = PTR_ERR(reply);
+                       goto err_unlock_ovs;
+               }
        }
+
+       /* Clear stats. */
+       if (a[OVS_FLOW_ATTR_CLEAR])
+               ovs_flow_stats_clear(flow);
        ovs_unlock();
 
-       if (!IS_ERR(reply))
+       if (reply)
                ovs_notify(&dp_flow_genl_family, reply, info);
-       else
-               genl_set_err(&dp_flow_genl_family, sock_net(skb->sk), 0,
-                            0, PTR_ERR(reply));
+       if (old_acts)
+               ovs_nla_free_flow_actions(old_acts);
+
        return 0;
 
-err_flow_free:
-       ovs_flow_free(flow, false);
 err_unlock_ovs:
        ovs_unlock();
-err_kfree:
+       kfree_skb(reply);
+err_kfree_acts:
        kfree(acts);
 error:
        return error;
@@ -914,7 +1052,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
        }
 
        ovs_match_init(&match, &key, NULL);
-       err = ovs_nla_get_match(&match, NULL, a[OVS_FLOW_ATTR_KEY], NULL);
+       err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
        if (err)
                return err;
 
@@ -931,7 +1069,8 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
                goto unlock;
        }
 
-       reply = ovs_flow_cmd_build_info(flow, dp, info, OVS_FLOW_CMD_NEW);
+       reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
+                                       OVS_FLOW_CMD_NEW, true);
        if (IS_ERR(reply)) {
                err = PTR_ERR(reply);
                goto unlock;
@@ -955,45 +1094,53 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
        struct sw_flow_match match;
        int err;
 
+       if (likely(a[OVS_FLOW_ATTR_KEY])) {
+               ovs_match_init(&match, &key, NULL);
+               err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
+               if (unlikely(err))
+                       return err;
+       }
+
        ovs_lock();
        dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
-       if (!dp) {
+       if (unlikely(!dp)) {
                err = -ENODEV;
                goto unlock;
        }
 
-       if (!a[OVS_FLOW_ATTR_KEY]) {
+       if (unlikely(!a[OVS_FLOW_ATTR_KEY])) {
                err = ovs_flow_tbl_flush(&dp->table);
                goto unlock;
        }
 
-       ovs_match_init(&match, &key, NULL);
-       err = ovs_nla_get_match(&match, NULL, a[OVS_FLOW_ATTR_KEY], NULL);
-       if (err)
-               goto unlock;
-
        flow = ovs_flow_tbl_lookup(&dp->table, &key);
-       if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) {
+       if (unlikely(!flow || !ovs_flow_cmp_unmasked_key(flow, &match))) {
                err = -ENOENT;
                goto unlock;
        }
 
-       reply = ovs_flow_cmd_alloc_info(flow, info);
-       if (!reply) {
-               err = -ENOMEM;
-               goto unlock;
-       }
-
        ovs_flow_tbl_remove(&dp->table, flow);
+       ovs_unlock();
 
-       err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid,
-                                    info->snd_seq, 0, OVS_FLOW_CMD_DEL);
-       BUG_ON(err < 0);
+       reply = ovs_flow_cmd_alloc_info((const struct sw_flow_actions __force *) flow->sf_acts,
+                                       info, false);
+       if (likely(reply)) {
+               if (likely(!IS_ERR(reply))) {
+                       rcu_read_lock();        /*To keep RCU checker happy. */
+                       err = ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex,
+                                                    reply, info->snd_portid,
+                                                    info->snd_seq, 0,
+                                                    OVS_FLOW_CMD_DEL);
+                       rcu_read_unlock();
+                       BUG_ON(err < 0);
+
+                       ovs_notify(&dp_flow_genl_family, reply, info);
+               } else {
+                       netlink_set_err(sock_net(skb->sk)->genl_sock, 0, 0, PTR_ERR(reply));
+               }
+       }
 
        ovs_flow_free(flow, true);
-       ovs_unlock();
-
-       ovs_notify(&dp_flow_genl_family, reply, info);
        return 0;
 unlock:
        ovs_unlock();
@@ -1024,7 +1171,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
                if (!flow)
                        break;
 
-               if (ovs_flow_cmd_fill_info(flow, dp, skb,
+               if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
                                           NETLINK_CB(cb->skb).portid,
                                           cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                           OVS_FLOW_CMD_NEW) < 0)
@@ -1037,11 +1184,17 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
        return skb->len;
 }
 
-static const struct genl_ops dp_flow_genl_ops[] = {
+static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
+       [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
+       [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
+       [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
+};
+
+static struct genl_ops dp_flow_genl_ops[] = {
        { .cmd = OVS_FLOW_CMD_NEW,
          .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
          .policy = flow_policy,
-         .doit = ovs_flow_cmd_new_or_set
+         .doit = ovs_flow_cmd_new
        },
        { .cmd = OVS_FLOW_CMD_DEL,
          .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
@@ -1057,28 +1210,22 @@ static const struct genl_ops dp_flow_genl_ops[] = {
        { .cmd = OVS_FLOW_CMD_SET,
          .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
          .policy = flow_policy,
-         .doit = ovs_flow_cmd_new_or_set,
+         .doit = ovs_flow_cmd_set,
        },
 };
 
-static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
-       [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
-       [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
-       [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
-};
-
-static struct genl_family dp_datapath_genl_family = {
+static struct genl_family dp_flow_genl_family = {
        .id = GENL_ID_GENERATE,
        .hdrsize = sizeof(struct ovs_header),
-       .name = OVS_DATAPATH_FAMILY,
-       .version = OVS_DATAPATH_VERSION,
-       .maxattr = OVS_DP_ATTR_MAX,
+       .name = OVS_FLOW_FAMILY,
+       .version = OVS_FLOW_VERSION,
+       .maxattr = OVS_FLOW_ATTR_MAX,
        .netnsok = true,
        .parallel_ops = true,
-};
-
-static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
-       .name = OVS_DATAPATH_MCGROUP
+       .ops = dp_flow_genl_ops,
+       .n_ops = ARRAY_SIZE(dp_flow_genl_ops),
+       .mcgrps = &ovs_dp_flow_multicast_group,
+       .n_mcgrps = 1,
 };
 
 static size_t ovs_dp_cmd_msg_size(void)
@@ -1093,6 +1240,7 @@ static size_t ovs_dp_cmd_msg_size(void)
        return msgsize;
 }
 
+/* Called with ovs_mutex or RCU read lock. */
 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
                                u32 portid, u32 seq, u32 flags, u8 cmd)
 {
@@ -1108,9 +1256,7 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
 
        ovs_header->dp_ifindex = get_dpifindex(dp);
 
-       rcu_read_lock();
        err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
-       rcu_read_unlock();
        if (err)
                goto nla_put_failure;
 
@@ -1135,25 +1281,12 @@ error:
        return -EMSGSIZE;
 }
 
-static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp,
-                                            struct genl_info *info, u8 cmd)
+static struct sk_buff *ovs_dp_cmd_alloc_info(struct genl_info *info)
 {
-       struct sk_buff *skb;
-       int retval;
-
-       skb = genlmsg_new_unicast(ovs_dp_cmd_msg_size(), info, GFP_KERNEL);
-       if (!skb)
-               return ERR_PTR(-ENOMEM);
-
-       retval = ovs_dp_cmd_fill_info(dp, skb, info->snd_portid, info->snd_seq, 0, cmd);
-       if (retval < 0) {
-               kfree_skb(skb);
-               return ERR_PTR(retval);
-       }
-       return skb;
+       return genlmsg_new_unicast(ovs_dp_cmd_msg_size(), info, GFP_KERNEL);
 }
 
-/* Called with ovs_mutex. */
+/* Called with rcu_read_lock or ovs_mutex. */
 static struct datapath *lookup_datapath(struct net *net,
                                        struct ovs_header *ovs_header,
                                        struct nlattr *a[OVS_DP_ATTR_MAX + 1])
@@ -1165,10 +1298,8 @@ static struct datapath *lookup_datapath(struct net *net,
        else {
                struct vport *vport;
 
-               rcu_read_lock();
                vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
                dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
-               rcu_read_unlock();
        }
        return dp ? dp : ERR_PTR(-ENODEV);
 }
@@ -1205,12 +1336,14 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
        if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
                goto err;
 
-       ovs_lock();
+       reply = ovs_dp_cmd_alloc_info(info);
+       if (!reply)
+               return -ENOMEM;
 
        err = -ENOMEM;
        dp = kzalloc(sizeof(*dp), GFP_KERNEL);
        if (dp == NULL)
-               goto err_unlock_ovs;
+               goto err_free_reply;
 
        ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
 
@@ -1245,6 +1378,9 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
 
        ovs_dp_change(dp, a);
 
+       /* So far only local changes have been made, now need the lock. */
+       ovs_lock();
+
        vport = new_vport(&parms);
        if (IS_ERR(vport)) {
                err = PTR_ERR(vport);
@@ -1263,10 +1399,9 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
                goto err_destroy_ports_array;
        }
 
-       reply = ovs_dp_cmd_build_info(dp, info, OVS_DP_CMD_NEW);
-       err = PTR_ERR(reply);
-       if (IS_ERR(reply))
-               goto err_destroy_local_port;
+       err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
+                                  info->snd_seq, 0, OVS_DP_CMD_NEW);
+       BUG_ON(err < 0);
 
        ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
        list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
@@ -1276,9 +1411,8 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
        ovs_notify(&dp_datapath_genl_family, reply, info);
        return 0;
 
-err_destroy_local_port:
-       ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
 err_destroy_ports_array:
+       ovs_unlock();
        kfree(dp->ports);
 err_destroy_percpu:
        free_percpu(dp->stats_percpu);
@@ -1287,8 +1421,8 @@ err_destroy_table:
 err_free_dp:
        release_net(ovs_dp_get_net(dp));
        kfree(dp);
-err_unlock_ovs:
-       ovs_unlock();
+err_free_reply:
+       kfree_skb(reply);
 err:
        return err;
 }
@@ -1326,16 +1460,19 @@ static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
        struct datapath *dp;
        int err;
 
+       reply = ovs_dp_cmd_alloc_info(info);
+       if (!reply)
+               return -ENOMEM;
+
        ovs_lock();
        dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
        err = PTR_ERR(dp);
        if (IS_ERR(dp))
-               goto unlock;
+               goto err_unlock_free;
 
-       reply = ovs_dp_cmd_build_info(dp, info, OVS_DP_CMD_DEL);
-       err = PTR_ERR(reply);
-       if (IS_ERR(reply))
-               goto unlock;
+       err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
+                                  info->snd_seq, 0, OVS_DP_CMD_DEL);
+       BUG_ON(err < 0);
 
        __dp_destroy(dp);
        ovs_unlock();
@@ -1343,8 +1480,10 @@ static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
        ovs_notify(&dp_datapath_genl_family, reply, info);
 
        return 0;
-unlock:
+
+err_unlock_free:
        ovs_unlock();
+       kfree_skb(reply);
        return err;
 }
 
@@ -1354,29 +1493,30 @@ static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
        struct datapath *dp;
        int err;
 
+       reply = ovs_dp_cmd_alloc_info(info);
+       if (!reply)
+               return -ENOMEM;
+
        ovs_lock();
        dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
        err = PTR_ERR(dp);
        if (IS_ERR(dp))
-               goto unlock;
+               goto err_unlock_free;
 
        ovs_dp_change(dp, info->attrs);
 
-       reply = ovs_dp_cmd_build_info(dp, info, OVS_DP_CMD_NEW);
-       if (IS_ERR(reply)) {
-               err = PTR_ERR(reply);
-               genl_set_err(&dp_datapath_genl_family, sock_net(skb->sk), 0,
-                            0, err);
-               err = 0;
-               goto unlock;
-       }
+       err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
+                                  info->snd_seq, 0, OVS_DP_CMD_NEW);
+       BUG_ON(err < 0);
 
        ovs_unlock();
        ovs_notify(&dp_datapath_genl_family, reply, info);
 
        return 0;
-unlock:
+
+err_unlock_free:
        ovs_unlock();
+       kfree_skb(reply);
        return err;
 }
 
@@ -1386,24 +1526,26 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
        struct datapath *dp;
        int err;
 
-       ovs_lock();
+       reply = ovs_dp_cmd_alloc_info(info);
+       if (!reply)
+               return -ENOMEM;
+
+       rcu_read_lock();
        dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
        if (IS_ERR(dp)) {
                err = PTR_ERR(dp);
-               goto unlock;
-       }
-
-       reply = ovs_dp_cmd_build_info(dp, info, OVS_DP_CMD_NEW);
-       if (IS_ERR(reply)) {
-               err = PTR_ERR(reply);
-               goto unlock;
+               goto err_unlock_free;
        }
+       err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
+                                  info->snd_seq, 0, OVS_DP_CMD_NEW);
+       BUG_ON(err < 0);
+       rcu_read_unlock();
 
-       ovs_unlock();
        return genlmsg_reply(reply, info);
 
-unlock:
-       ovs_unlock();
+err_unlock_free:
+       rcu_read_unlock();
+       kfree_skb(reply);
        return err;
 }
 
@@ -1430,7 +1572,13 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
        return skb->len;
 }
 
-static const struct genl_ops dp_datapath_genl_ops[] = {
+static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
+       [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
+       [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
+       [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
+};
+
+static struct genl_ops dp_datapath_genl_ops[] = {
        { .cmd = OVS_DP_CMD_NEW,
          .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
          .policy = datapath_policy,
@@ -1454,27 +1602,18 @@ static const struct genl_ops dp_datapath_genl_ops[] = {
        },
 };
 
-static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
-       [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
-       [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
-       [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
-       [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
-       [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
-       [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
-};
-
-struct genl_family dp_vport_genl_family = {
+static struct genl_family dp_datapath_genl_family = {
        .id = GENL_ID_GENERATE,
        .hdrsize = sizeof(struct ovs_header),
-       .name = OVS_VPORT_FAMILY,
-       .version = OVS_VPORT_VERSION,
-       .maxattr = OVS_VPORT_ATTR_MAX,
+       .name = OVS_DATAPATH_FAMILY,
+       .version = OVS_DATAPATH_VERSION,
+       .maxattr = OVS_DP_ATTR_MAX,
        .netnsok = true,
        .parallel_ops = true,
-};
-
-static struct genl_multicast_group ovs_dp_vport_multicast_group = {
-       .name = OVS_VPORT_MCGROUP
+       .ops = dp_datapath_genl_ops,
+       .n_ops = ARRAY_SIZE(dp_datapath_genl_ops),
+       .mcgrps = &ovs_dp_datapath_multicast_group,
+       .n_mcgrps = 1,
 };
 
 /* Called with ovs_mutex or RCU read lock. */
@@ -1516,7 +1655,12 @@ error:
        return err;
 }
 
-/* Called with ovs_mutex or RCU read lock. */
+static struct sk_buff *ovs_vport_cmd_alloc_info(void)
+{
+       return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+}
+
+/* Called with ovs_mutex, only via ovs_dp_notify_wq(). */
 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
                                         u32 seq, u8 cmd)
 {
@@ -1578,33 +1722,35 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
        u32 port_no;
        int err;
 
-       err = -EINVAL;
        if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
            !a[OVS_VPORT_ATTR_UPCALL_PID])
-               goto exit;
+               return -EINVAL;
+
+       port_no = a[OVS_VPORT_ATTR_PORT_NO]
+               ? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0;
+       if (port_no >= DP_MAX_PORTS)
+               return -EFBIG;
+
+       reply = ovs_vport_cmd_alloc_info();
+       if (!reply)
+               return -ENOMEM;
 
        ovs_lock();
        dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
        err = -ENODEV;
        if (!dp)
-               goto exit_unlock;
-
-       if (a[OVS_VPORT_ATTR_PORT_NO]) {
-               port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
-
-               err = -EFBIG;
-               if (port_no >= DP_MAX_PORTS)
-                       goto exit_unlock;
+               goto exit_unlock_free;
 
+       if (port_no) {
                vport = ovs_vport_ovsl(dp, port_no);
                err = -EBUSY;
                if (vport)
-                       goto exit_unlock;
+                       goto exit_unlock_free;
        } else {
                for (port_no = 1; ; port_no++) {
                        if (port_no >= DP_MAX_PORTS) {
                                err = -EFBIG;
-                               goto exit_unlock;
+                               goto exit_unlock_free;
                        }
                        vport = ovs_vport_ovsl(dp, port_no);
                        if (!vport)
@@ -1622,22 +1768,19 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
        vport = new_vport(&parms);
        err = PTR_ERR(vport);
        if (IS_ERR(vport))
-               goto exit_unlock;
+               goto exit_unlock_free;
 
-       err = 0;
-       reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq,
-                                        OVS_VPORT_CMD_NEW);
-       if (IS_ERR(reply)) {
-               err = PTR_ERR(reply);
-               ovs_dp_detach_port(vport);
-               goto exit_unlock;
-       }
+       err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
+                                     info->snd_seq, 0, OVS_VPORT_CMD_NEW);
+       BUG_ON(err < 0);
+       ovs_unlock();
 
        ovs_notify(&dp_vport_genl_family, reply, info);
+       return 0;
 
-exit_unlock:
+exit_unlock_free:
        ovs_unlock();
-exit:
+       kfree_skb(reply);
        return err;
 }
 
@@ -1648,28 +1791,26 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
        struct vport *vport;
        int err;
 
+       reply = ovs_vport_cmd_alloc_info();
+       if (!reply)
+               return -ENOMEM;
+
        ovs_lock();
        vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
        err = PTR_ERR(vport);
        if (IS_ERR(vport))
-               goto exit_unlock;
+               goto exit_unlock_free;
 
        if (a[OVS_VPORT_ATTR_TYPE] &&
            nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
                err = -EINVAL;
-               goto exit_unlock;
-       }
-
-       reply = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
-       if (!reply) {
-               err = -ENOMEM;
-               goto exit_unlock;
+               goto exit_unlock_free;
        }
 
        if (a[OVS_VPORT_ATTR_OPTIONS]) {
                err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
                if (err)
-                       goto exit_free;
+                       goto exit_unlock_free;
        }
 
        if (a[OVS_VPORT_ATTR_UPCALL_PID])
@@ -1683,10 +1824,9 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
        ovs_notify(&dp_vport_genl_family, reply, info);
        return 0;
 
-exit_free:
-       kfree_skb(reply);
-exit_unlock:
+exit_unlock_free:
        ovs_unlock();
+       kfree_skb(reply);
        return err;
 }
 
@@ -1697,30 +1837,33 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
        struct vport *vport;
        int err;
 
+       reply = ovs_vport_cmd_alloc_info();
+       if (!reply)
+               return -ENOMEM;
+
        ovs_lock();
        vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
        err = PTR_ERR(vport);
        if (IS_ERR(vport))
-               goto exit_unlock;
+               goto exit_unlock_free;
 
        if (vport->port_no == OVSP_LOCAL) {
                err = -EINVAL;
-               goto exit_unlock;
+               goto exit_unlock_free;
        }
 
-       reply = ovs_vport_cmd_build_info(vport, info->snd_portid,
-                                        info->snd_seq, OVS_VPORT_CMD_DEL);
-       err = PTR_ERR(reply);
-       if (IS_ERR(reply))
-               goto exit_unlock;
-
-       err = 0;
+       err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
+                                     info->snd_seq, 0, OVS_VPORT_CMD_DEL);
+       BUG_ON(err < 0);
        ovs_dp_detach_port(vport);
+       ovs_unlock();
 
        ovs_notify(&dp_vport_genl_family, reply, info);
+       return 0;
 
-exit_unlock:
+exit_unlock_free:
        ovs_unlock();
+       kfree_skb(reply);
        return err;
 }
 
@@ -1732,24 +1875,25 @@ static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
        struct vport *vport;
        int err;
 
+       reply = ovs_vport_cmd_alloc_info();
+       if (!reply)
+               return -ENOMEM;
+
        rcu_read_lock();
        vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
        err = PTR_ERR(vport);
        if (IS_ERR(vport))
-               goto exit_unlock;
-
-       reply = ovs_vport_cmd_build_info(vport, info->snd_portid,
-                                        info->snd_seq, OVS_VPORT_CMD_NEW);
-       err = PTR_ERR(reply);
-       if (IS_ERR(reply))
-               goto exit_unlock;
-
+               goto exit_unlock_free;
+       err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
+                                     info->snd_seq, 0, OVS_VPORT_CMD_NEW);
+       BUG_ON(err < 0);
        rcu_read_unlock();
 
        return genlmsg_reply(reply, info);
 
-exit_unlock:
+exit_unlock_free:
        rcu_read_unlock();
+       kfree_skb(reply);
        return err;
 }
 
@@ -1792,7 +1936,16 @@ out:
        return skb->len;
 }
 
-static const struct genl_ops dp_vport_genl_ops[] = {
+static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
+       [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
+       [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
+       [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
+       [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
+       [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
+       [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
+};
+
+static struct genl_ops dp_vport_genl_ops[] = {
        { .cmd = OVS_VPORT_CMD_NEW,
          .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
          .policy = vport_policy,
@@ -1816,26 +1969,25 @@ static const struct genl_ops dp_vport_genl_ops[] = {
        },
 };
 
-struct genl_family_and_ops {
-       struct genl_family *family;
-       const struct genl_ops *ops;
-       int n_ops;
-       const struct genl_multicast_group *group;
+struct genl_family dp_vport_genl_family = {
+       .id = GENL_ID_GENERATE,
+       .hdrsize = sizeof(struct ovs_header),
+       .name = OVS_VPORT_FAMILY,
+       .version = OVS_VPORT_VERSION,
+       .maxattr = OVS_VPORT_ATTR_MAX,
+       .netnsok = true,
+       .parallel_ops = true,
+       .ops = dp_vport_genl_ops,
+       .n_ops = ARRAY_SIZE(dp_vport_genl_ops),
+       .mcgrps = &ovs_dp_vport_multicast_group,
+       .n_mcgrps = 1,
 };
 
-static const struct genl_family_and_ops dp_genl_families[] = {
-       { &dp_datapath_genl_family,
-         dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops),
-         &ovs_dp_datapath_multicast_group },
-       { &dp_vport_genl_family,
-         dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
-         &ovs_dp_vport_multicast_group },
-       { &dp_flow_genl_family,
-         dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
-         &ovs_dp_flow_multicast_group },
-       { &dp_packet_genl_family,
-         dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
-         NULL },
+static struct genl_family * const dp_genl_families[] = {
+       &dp_datapath_genl_family,
+       &dp_vport_genl_family,
+       &dp_flow_genl_family,
+       &dp_packet_genl_family,
 };
 
 static void dp_unregister_genl(int n_families)
@@ -1843,33 +1995,25 @@ static void dp_unregister_genl(int n_families)
        int i;
 
        for (i = 0; i < n_families; i++)
-               genl_unregister_family(dp_genl_families[i].family);
+               genl_unregister_family(dp_genl_families[i]);
 }
 
 static int dp_register_genl(void)
 {
-       int n_registered;
        int err;
        int i;
 
-       n_registered = 0;
        for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
-               const struct genl_family_and_ops *f = &dp_genl_families[i];
 
-               f->family->ops = f->ops;
-               f->family->n_ops = f->n_ops;
-               f->family->mcgrps = f->group;
-               f->family->n_mcgrps = f->group ? 1 : 0;
-               err = genl_register_family(f->family);
+               err = genl_register_family(dp_genl_families[i]);
                if (err)
                        goto error;
-               n_registered++;
        }
 
        return 0;
 
 error:
-       dp_unregister_genl(n_registered);
+       dp_unregister_genl(i);
        return err;
 }
 
index 05317380fc03a03af708716f162738727cab5fff..7ede507500d7daa1cca3d352f9923122f5e63709 100644 (file)
@@ -194,7 +194,9 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq,
 int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb);
 void ovs_dp_notify_wq(struct work_struct *work);
 
-#define OVS_NLERR(fmt, ...) \
-       pr_info_once("netlink: " fmt, ##__VA_ARGS__)
-
+#define OVS_NLERR(fmt, ...)                                    \
+do {                                                           \
+       if (net_ratelimit())                                    \
+               pr_info("netlink: " fmt, ##__VA_ARGS__);        \
+} while (0)
 #endif /* datapath.h */
index 2998989e76db0a7ccb8e25ef11aa393180956593..334751cb15289c4f0ca00bedec960ee1cfe19ba0 100644 (file)
@@ -64,88 +64,110 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies)
 void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb)
 {
        struct flow_stats *stats;
-       __be16 tcp_flags = 0;
-
-       if (!flow->stats.is_percpu)
-               stats = flow->stats.stat;
-       else
-               stats = this_cpu_ptr(flow->stats.cpu_stats);
-
-       if ((flow->key.eth.type == htons(ETH_P_IP) ||
-            flow->key.eth.type == htons(ETH_P_IPV6)) &&
-           flow->key.ip.frag != OVS_FRAG_TYPE_LATER &&
-           flow->key.ip.proto == IPPROTO_TCP &&
-           likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
-               tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb));
+       __be16 tcp_flags = flow->key.tp.flags;
+       int node = numa_node_id();
+
+       stats = rcu_dereference(flow->stats[node]);
+
+       /* Check if already have node-specific stats. */
+       if (likely(stats)) {
+               spin_lock(&stats->lock);
+               /* Mark if we write on the pre-allocated stats. */
+               if (node == 0 && unlikely(flow->stats_last_writer != node))
+                       flow->stats_last_writer = node;
+       } else {
+               stats = rcu_dereference(flow->stats[0]); /* Pre-allocated. */
+               spin_lock(&stats->lock);
+
+               /* If the current NUMA-node is the only writer on the
+                * pre-allocated stats keep using them.
+                */
+               if (unlikely(flow->stats_last_writer != node)) {
+                       /* A previous locker may have already allocated the
+                        * stats, so we need to check again.  If node-specific
+                        * stats were already allocated, we update the pre-
+                        * allocated stats as we have already locked them.
+                        */
+                       if (likely(flow->stats_last_writer != NUMA_NO_NODE)
+                           && likely(!rcu_dereference(flow->stats[node]))) {
+                               /* Try to allocate node-specific stats. */
+                               struct flow_stats *new_stats;
+
+                               new_stats =
+                                       kmem_cache_alloc_node(flow_stats_cache,
+                                                             GFP_THISNODE |
+                                                             __GFP_NOMEMALLOC,
+                                                             node);
+                               if (likely(new_stats)) {
+                                       new_stats->used = jiffies;
+                                       new_stats->packet_count = 1;
+                                       new_stats->byte_count = skb->len;
+                                       new_stats->tcp_flags = tcp_flags;
+                                       spin_lock_init(&new_stats->lock);
+
+                                       rcu_assign_pointer(flow->stats[node],
+                                                          new_stats);
+                                       goto unlock;
+                               }
+                       }
+                       flow->stats_last_writer = node;
+               }
        }
 
-       spin_lock(&stats->lock);
        stats->used = jiffies;
        stats->packet_count++;
        stats->byte_count += skb->len;
        stats->tcp_flags |= tcp_flags;
+unlock:
        spin_unlock(&stats->lock);
 }
 
-static void stats_read(struct flow_stats *stats,
-                      struct ovs_flow_stats *ovs_stats,
-                      unsigned long *used, __be16 *tcp_flags)
-{
-       spin_lock(&stats->lock);
-       if (!*used || time_after(stats->used, *used))
-               *used = stats->used;
-       *tcp_flags |= stats->tcp_flags;
-       ovs_stats->n_packets += stats->packet_count;
-       ovs_stats->n_bytes += stats->byte_count;
-       spin_unlock(&stats->lock);
-}
-
-void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *ovs_stats,
+/* Must be called with rcu_read_lock or ovs_mutex. */
+void ovs_flow_stats_get(const struct sw_flow *flow,
+                       struct ovs_flow_stats *ovs_stats,
                        unsigned long *used, __be16 *tcp_flags)
 {
-       int cpu;
+       int node;
 
        *used = 0;
        *tcp_flags = 0;
        memset(ovs_stats, 0, sizeof(*ovs_stats));
 
-       local_bh_disable();
-       if (!flow->stats.is_percpu) {
-               stats_read(flow->stats.stat, ovs_stats, used, tcp_flags);
-       } else {
-               for_each_possible_cpu(cpu) {
-                       struct flow_stats *stats;
+       for_each_node(node) {
+               struct flow_stats *stats = rcu_dereference_ovsl(flow->stats[node]);
 
-                       stats = per_cpu_ptr(flow->stats.cpu_stats, cpu);
-                       stats_read(stats, ovs_stats, used, tcp_flags);
+               if (stats) {
+                       /* Local CPU may write on non-local stats, so we must
+                        * block bottom-halves here.
+                        */
+                       spin_lock_bh(&stats->lock);
+                       if (!*used || time_after(stats->used, *used))
+                               *used = stats->used;
+                       *tcp_flags |= stats->tcp_flags;
+                       ovs_stats->n_packets += stats->packet_count;
+                       ovs_stats->n_bytes += stats->byte_count;
+                       spin_unlock_bh(&stats->lock);
                }
        }
-       local_bh_enable();
-}
-
-static void stats_reset(struct flow_stats *stats)
-{
-       spin_lock(&stats->lock);
-       stats->used = 0;
-       stats->packet_count = 0;
-       stats->byte_count = 0;
-       stats->tcp_flags = 0;
-       spin_unlock(&stats->lock);
 }
 
+/* Called with ovs_mutex. */
 void ovs_flow_stats_clear(struct sw_flow *flow)
 {
-       int cpu;
-
-       local_bh_disable();
-       if (!flow->stats.is_percpu) {
-               stats_reset(flow->stats.stat);
-       } else {
-               for_each_possible_cpu(cpu) {
-                       stats_reset(per_cpu_ptr(flow->stats.cpu_stats, cpu));
+       int node;
+
+       for_each_node(node) {
+               struct flow_stats *stats = ovsl_dereference(flow->stats[node]);
+
+               if (stats) {
+                       spin_lock_bh(&stats->lock);
+                       stats->used = 0;
+                       stats->packet_count = 0;
+                       stats->byte_count = 0;
+                       stats->tcp_flags = 0;
+                       spin_unlock_bh(&stats->lock);
                }
        }
-       local_bh_enable();
 }
 
 static int check_header(struct sk_buff *skb, int len)
@@ -332,8 +354,8 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
        /* The ICMPv6 type and code fields use the 16-bit transport port
         * fields, so we need to store them in 16-bit network byte order.
         */
-       key->ipv6.tp.src = htons(icmp->icmp6_type);
-       key->ipv6.tp.dst = htons(icmp->icmp6_code);
+       key->tp.src = htons(icmp->icmp6_type);
+       key->tp.dst = htons(icmp->icmp6_code);
 
        if (icmp->icmp6_code == 0 &&
            (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
@@ -372,14 +394,14 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
                            && opt_len == 8) {
                                if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll)))
                                        goto invalid;
-                               memcpy(key->ipv6.nd.sll,
-                                   &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
+                               ether_addr_copy(key->ipv6.nd.sll,
+                                               &nd->opt[offset+sizeof(*nd_opt)]);
                        } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR
                                   && opt_len == 8) {
                                if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll)))
                                        goto invalid;
-                               memcpy(key->ipv6.nd.tll,
-                                   &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
+                               ether_addr_copy(key->ipv6.nd.tll,
+                                               &nd->opt[offset+sizeof(*nd_opt)]);
                        }
 
                        icmp_len -= opt_len;
@@ -439,8 +461,8 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
         * header in the linear data area.
         */
        eth = eth_hdr(skb);
-       memcpy(key->eth.src, eth->h_source, ETH_ALEN);
-       memcpy(key->eth.dst, eth->h_dest, ETH_ALEN);
+       ether_addr_copy(key->eth.src, eth->h_source);
+       ether_addr_copy(key->eth.dst, eth->h_dest);
 
        __skb_pull(skb, 2 * ETH_ALEN);
        /* We are going to push all headers that we pull, so no need to
@@ -495,21 +517,21 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
                if (key->ip.proto == IPPROTO_TCP) {
                        if (tcphdr_ok(skb)) {
                                struct tcphdr *tcp = tcp_hdr(skb);
-                               key->ipv4.tp.src = tcp->source;
-                               key->ipv4.tp.dst = tcp->dest;
-                               key->ipv4.tp.flags = TCP_FLAGS_BE16(tcp);
+                               key->tp.src = tcp->source;
+                               key->tp.dst = tcp->dest;
+                               key->tp.flags = TCP_FLAGS_BE16(tcp);
                        }
                } else if (key->ip.proto == IPPROTO_UDP) {
                        if (udphdr_ok(skb)) {
                                struct udphdr *udp = udp_hdr(skb);
-                               key->ipv4.tp.src = udp->source;
-                               key->ipv4.tp.dst = udp->dest;
+                               key->tp.src = udp->source;
+                               key->tp.dst = udp->dest;
                        }
                } else if (key->ip.proto == IPPROTO_SCTP) {
                        if (sctphdr_ok(skb)) {
                                struct sctphdr *sctp = sctp_hdr(skb);
-                               key->ipv4.tp.src = sctp->source;
-                               key->ipv4.tp.dst = sctp->dest;
+                               key->tp.src = sctp->source;
+                               key->tp.dst = sctp->dest;
                        }
                } else if (key->ip.proto == IPPROTO_ICMP) {
                        if (icmphdr_ok(skb)) {
@@ -517,8 +539,8 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
                                /* The ICMP type and code fields use the 16-bit
                                 * transport port fields, so we need to store
                                 * them in 16-bit network byte order. */
-                               key->ipv4.tp.src = htons(icmp->type);
-                               key->ipv4.tp.dst = htons(icmp->code);
+                               key->tp.src = htons(icmp->type);
+                               key->tp.dst = htons(icmp->code);
                        }
                }
 
@@ -538,8 +560,8 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
                                key->ip.proto = ntohs(arp->ar_op);
                        memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
                        memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
-                       memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN);
-                       memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN);
+                       ether_addr_copy(key->ipv4.arp.sha, arp->ar_sha);
+                       ether_addr_copy(key->ipv4.arp.tha, arp->ar_tha);
                }
        } else if (key->eth.type == htons(ETH_P_IPV6)) {
                int nh_len;             /* IPv6 Header + Extensions */
@@ -564,21 +586,21 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
                if (key->ip.proto == NEXTHDR_TCP) {
                        if (tcphdr_ok(skb)) {
                                struct tcphdr *tcp = tcp_hdr(skb);
-                               key->ipv6.tp.src = tcp->source;
-                               key->ipv6.tp.dst = tcp->dest;
-                               key->ipv6.tp.flags = TCP_FLAGS_BE16(tcp);
+                               key->tp.src = tcp->source;
+                               key->tp.dst = tcp->dest;
+                               key->tp.flags = TCP_FLAGS_BE16(tcp);
                        }
                } else if (key->ip.proto == NEXTHDR_UDP) {
                        if (udphdr_ok(skb)) {
                                struct udphdr *udp = udp_hdr(skb);
-                               key->ipv6.tp.src = udp->source;
-                               key->ipv6.tp.dst = udp->dest;
+                               key->tp.src = udp->source;
+                               key->tp.dst = udp->dest;
                        }
                } else if (key->ip.proto == NEXTHDR_SCTP) {
                        if (sctphdr_ok(skb)) {
                                struct sctphdr *sctp = sctp_hdr(skb);
-                               key->ipv6.tp.src = sctp->source;
-                               key->ipv6.tp.dst = sctp->dest;
+                               key->tp.src = sctp->source;
+                               key->tp.dst = sctp->dest;
                        }
                } else if (key->ip.proto == NEXTHDR_ICMP) {
                        if (icmp6hdr_ok(skb)) {
index 2d770e28a3a396f7da0a4c8e7baf4b29709ceb0c..ac395d2cd821631898116b89438af7eba5d40b2f 100644 (file)
@@ -47,7 +47,7 @@ struct ovs_key_ipv4_tunnel {
        __be16 tun_flags;
        u8   ipv4_tos;
        u8   ipv4_ttl;
-};
+} __packed __aligned(4); /* Minimize padding. */
 
 static inline void ovs_flow_tun_key_init(struct ovs_key_ipv4_tunnel *tun_key,
                                         const struct iphdr *iph, __be64 tun_id,
@@ -71,7 +71,7 @@ struct sw_flow_key {
                u32     priority;       /* Packet QoS priority. */
                u32     skb_mark;       /* SKB mark. */
                u16     in_port;        /* Input switch port (or DP_MAX_PORTS). */
-       } phy;
+       } __packed phy; /* Safe when right after 'tun_key'. */
        struct {
                u8     src[ETH_ALEN];   /* Ethernet source address. */
                u8     dst[ETH_ALEN];   /* Ethernet destination address. */
@@ -84,23 +84,21 @@ struct sw_flow_key {
                u8     ttl;             /* IP TTL/hop limit. */
                u8     frag;            /* One of OVS_FRAG_TYPE_*. */
        } ip;
+       struct {
+               __be16 src;             /* TCP/UDP/SCTP source port. */
+               __be16 dst;             /* TCP/UDP/SCTP destination port. */
+               __be16 flags;           /* TCP flags. */
+       } tp;
        union {
                struct {
                        struct {
                                __be32 src;     /* IP source address. */
                                __be32 dst;     /* IP destination address. */
                        } addr;
-                       union {
-                               struct {
-                                       __be16 src;             /* TCP/UDP/SCTP source port. */
-                                       __be16 dst;             /* TCP/UDP/SCTP destination port. */
-                                       __be16 flags;           /* TCP flags. */
-                               } tp;
-                               struct {
-                                       u8 sha[ETH_ALEN];       /* ARP source hardware address. */
-                                       u8 tha[ETH_ALEN];       /* ARP target hardware address. */
-                               } arp;
-                       };
+                       struct {
+                               u8 sha[ETH_ALEN];       /* ARP source hardware address. */
+                               u8 tha[ETH_ALEN];       /* ARP target hardware address. */
+                       } arp;
                } ipv4;
                struct {
                        struct {
@@ -108,11 +106,6 @@ struct sw_flow_key {
                                struct in6_addr dst;    /* IPv6 destination address. */
                        } addr;
                        __be32 label;                   /* IPv6 flow label. */
-                       struct {
-                               __be16 src;             /* TCP/UDP/SCTP source port. */
-                               __be16 dst;             /* TCP/UDP/SCTP destination port. */
-                               __be16 flags;           /* TCP flags. */
-                       } tp;
                        struct {
                                struct in6_addr target; /* ND target address. */
                                u8 sll[ETH_ALEN];       /* ND source link layer address. */
@@ -155,24 +148,22 @@ struct flow_stats {
        __be16 tcp_flags;               /* Union of seen TCP flags. */
 };
 
-struct sw_flow_stats {
-       bool is_percpu;
-       union {
-               struct flow_stats *stat;
-               struct flow_stats __percpu *cpu_stats;
-       };
-};
-
 struct sw_flow {
        struct rcu_head rcu;
        struct hlist_node hash_node[2];
        u32 hash;
-
+       int stats_last_writer;          /* NUMA-node id of the last writer on
+                                        * 'stats[0]'.
+                                        */
        struct sw_flow_key key;
        struct sw_flow_key unmasked_key;
        struct sw_flow_mask *mask;
        struct sw_flow_actions __rcu *sf_acts;
-       struct sw_flow_stats stats;
+       struct flow_stats __rcu *stats[]; /* One for each NUMA node.  First one
+                                          * is allocated at flow creation time,
+                                          * the rest are allocated on demand
+                                          * while holding the 'stats[0].lock'.
+                                          */
 };
 
 struct arp_eth_header {
@@ -189,10 +180,10 @@ struct arp_eth_header {
        unsigned char       ar_tip[4];          /* target IP address        */
 } __packed;
 
-void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb);
-void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *stats,
+void ovs_flow_stats_update(struct sw_flow *, struct sk_buff *);
+void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *,
                        unsigned long *used, __be16 *tcp_flags);
-void ovs_flow_stats_clear(struct sw_flow *flow);
+void ovs_flow_stats_clear(struct sw_flow *);
 u64 ovs_flow_used_time(unsigned long flow_jiffies);
 
 int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *);
index 4d000acaed0db5cc2052ae55f5b0cb7be6472b79..d757848da89ca734ac95cef806c2dd314f881bf6 100644 (file)
@@ -16,6 +16,8 @@
  * 02110-1301, USA
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include "flow.h"
 #include "datapath.h"
 #include <linux/uaccess.h>
@@ -202,11 +204,11 @@ static bool match_validate(const struct sw_flow_match *match,
                                if (match->mask && (match->mask->key.ip.proto == 0xff))
                                        mask_allowed |= 1 << OVS_KEY_ATTR_ICMPV6;
 
-                               if (match->key->ipv6.tp.src ==
+                               if (match->key->tp.src ==
                                                htons(NDISC_NEIGHBOUR_SOLICITATION) ||
-                                   match->key->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
+                                   match->key->tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
                                        key_expected |= 1 << OVS_KEY_ATTR_ND;
-                                       if (match->mask && (match->mask->key.ipv6.tp.src == htons(0xffff)))
+                                       if (match->mask && (match->mask->key.tp.src == htons(0xffff)))
                                                mask_allowed |= 1 << OVS_KEY_ATTR_ND;
                                }
                        }
@@ -216,14 +218,14 @@ static bool match_validate(const struct sw_flow_match *match,
        if ((key_attrs & key_expected) != key_expected) {
                /* Key attributes check failed. */
                OVS_NLERR("Missing expected key attributes (key_attrs=%llx, expected=%llx).\n",
-                               key_attrs, key_expected);
+                               (unsigned long long)key_attrs, (unsigned long long)key_expected);
                return false;
        }
 
        if ((mask_attrs & mask_allowed) != mask_attrs) {
                /* Mask attributes check failed. */
                OVS_NLERR("Contain more than allowed mask fields (mask_attrs=%llx, mask_allowed=%llx).\n",
-                               mask_attrs, mask_allowed);
+                               (unsigned long long)mask_attrs, (unsigned long long)mask_allowed);
                return false;
        }
 
@@ -266,20 +268,6 @@ static bool is_all_zero(const u8 *fp, size_t size)
        return true;
 }
 
-static bool is_all_set(const u8 *fp, size_t size)
-{
-       int i;
-
-       if (!fp)
-               return false;
-
-       for (i = 0; i < size; i++)
-               if (fp[i] != 0xff)
-                       return false;
-
-       return true;
-}
-
 static int __parse_flow_nlattrs(const struct nlattr *attr,
                                const struct nlattr *a[],
                                u64 *attrsp, bool nz)
@@ -501,9 +489,8 @@ static int metadata_from_nlattrs(struct sw_flow_match *match,  u64 *attrs,
        return 0;
 }
 
-static int ovs_key_from_nlattrs(struct sw_flow_match *match,  bool *exact_5tuple,
-                               u64 attrs, const struct nlattr **a,
-                               bool is_mask)
+static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
+                               const struct nlattr **a, bool is_mask)
 {
        int err;
        u64 orig_attrs = attrs;
@@ -560,11 +547,6 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match,  bool *exact_5tuple
                SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask);
        }
 
-       if (is_mask && exact_5tuple) {
-               if (match->mask->key.eth.type != htons(0xffff))
-                       *exact_5tuple = false;
-       }
-
        if (attrs & (1 << OVS_KEY_ATTR_IPV4)) {
                const struct ovs_key_ipv4 *ipv4_key;
 
@@ -587,13 +569,6 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match,  bool *exact_5tuple
                SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
                                ipv4_key->ipv4_dst, is_mask);
                attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
-
-               if (is_mask && exact_5tuple && *exact_5tuple) {
-                       if (ipv4_key->ipv4_proto != 0xff ||
-                           ipv4_key->ipv4_src != htonl(0xffffffff) ||
-                           ipv4_key->ipv4_dst != htonl(0xffffffff))
-                               *exact_5tuple = false;
-               }
        }
 
        if (attrs & (1 << OVS_KEY_ATTR_IPV6)) {
@@ -625,13 +600,6 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match,  bool *exact_5tuple
                                is_mask);
 
                attrs &= ~(1 << OVS_KEY_ATTR_IPV6);
-
-               if (is_mask && exact_5tuple && *exact_5tuple) {
-                       if (ipv6_key->ipv6_proto != 0xff ||
-                           !is_all_set((u8 *)ipv6_key->ipv6_src, sizeof(match->key->ipv6.addr.src)) ||
-                           !is_all_set((u8 *)ipv6_key->ipv6_dst, sizeof(match->key->ipv6.addr.dst)))
-                               *exact_5tuple = false;
-               }
        }
 
        if (attrs & (1 << OVS_KEY_ATTR_ARP)) {
@@ -662,32 +630,18 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match,  bool *exact_5tuple
                const struct ovs_key_tcp *tcp_key;
 
                tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
-               if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) {
-                       SW_FLOW_KEY_PUT(match, ipv4.tp.src,
-                                       tcp_key->tcp_src, is_mask);
-                       SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
-                                       tcp_key->tcp_dst, is_mask);
-               } else {
-                       SW_FLOW_KEY_PUT(match, ipv6.tp.src,
-                                       tcp_key->tcp_src, is_mask);
-                       SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
-                                       tcp_key->tcp_dst, is_mask);
-               }
+               SW_FLOW_KEY_PUT(match, tp.src, tcp_key->tcp_src, is_mask);
+               SW_FLOW_KEY_PUT(match, tp.dst, tcp_key->tcp_dst, is_mask);
                attrs &= ~(1 << OVS_KEY_ATTR_TCP);
-
-               if (is_mask && exact_5tuple && *exact_5tuple &&
-                   (tcp_key->tcp_src != htons(0xffff) ||
-                    tcp_key->tcp_dst != htons(0xffff)))
-                       *exact_5tuple = false;
        }
 
        if (attrs & (1 << OVS_KEY_ATTR_TCP_FLAGS)) {
                if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) {
-                       SW_FLOW_KEY_PUT(match, ipv4.tp.flags,
+                       SW_FLOW_KEY_PUT(match, tp.flags,
                                        nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]),
                                        is_mask);
                } else {
-                       SW_FLOW_KEY_PUT(match, ipv6.tp.flags,
+                       SW_FLOW_KEY_PUT(match, tp.flags,
                                        nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]),
                                        is_mask);
                }
@@ -698,40 +652,17 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match,  bool *exact_5tuple
                const struct ovs_key_udp *udp_key;
 
                udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
-               if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) {
-                       SW_FLOW_KEY_PUT(match, ipv4.tp.src,
-                                       udp_key->udp_src, is_mask);
-                       SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
-                                       udp_key->udp_dst, is_mask);
-               } else {
-                       SW_FLOW_KEY_PUT(match, ipv6.tp.src,
-                                       udp_key->udp_src, is_mask);
-                       SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
-                                       udp_key->udp_dst, is_mask);
-               }
+               SW_FLOW_KEY_PUT(match, tp.src, udp_key->udp_src, is_mask);
+               SW_FLOW_KEY_PUT(match, tp.dst, udp_key->udp_dst, is_mask);
                attrs &= ~(1 << OVS_KEY_ATTR_UDP);
-
-               if (is_mask && exact_5tuple && *exact_5tuple &&
-                   (udp_key->udp_src != htons(0xffff) ||
-                    udp_key->udp_dst != htons(0xffff)))
-                       *exact_5tuple = false;
        }
 
        if (attrs & (1 << OVS_KEY_ATTR_SCTP)) {
                const struct ovs_key_sctp *sctp_key;
 
                sctp_key = nla_data(a[OVS_KEY_ATTR_SCTP]);
-               if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) {
-                       SW_FLOW_KEY_PUT(match, ipv4.tp.src,
-                                       sctp_key->sctp_src, is_mask);
-                       SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
-                                       sctp_key->sctp_dst, is_mask);
-               } else {
-                       SW_FLOW_KEY_PUT(match, ipv6.tp.src,
-                                       sctp_key->sctp_src, is_mask);
-                       SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
-                                       sctp_key->sctp_dst, is_mask);
-               }
+               SW_FLOW_KEY_PUT(match, tp.src, sctp_key->sctp_src, is_mask);
+               SW_FLOW_KEY_PUT(match, tp.dst, sctp_key->sctp_dst, is_mask);
                attrs &= ~(1 << OVS_KEY_ATTR_SCTP);
        }
 
@@ -739,9 +670,9 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match,  bool *exact_5tuple
                const struct ovs_key_icmp *icmp_key;
 
                icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
-               SW_FLOW_KEY_PUT(match, ipv4.tp.src,
+               SW_FLOW_KEY_PUT(match, tp.src,
                                htons(icmp_key->icmp_type), is_mask);
-               SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
+               SW_FLOW_KEY_PUT(match, tp.dst,
                                htons(icmp_key->icmp_code), is_mask);
                attrs &= ~(1 << OVS_KEY_ATTR_ICMP);
        }
@@ -750,9 +681,9 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match,  bool *exact_5tuple
                const struct ovs_key_icmpv6 *icmpv6_key;
 
                icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
-               SW_FLOW_KEY_PUT(match, ipv6.tp.src,
+               SW_FLOW_KEY_PUT(match, tp.src,
                                htons(icmpv6_key->icmpv6_type), is_mask);
-               SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
+               SW_FLOW_KEY_PUT(match, tp.dst,
                                htons(icmpv6_key->icmpv6_code), is_mask);
                attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6);
        }
@@ -800,7 +731,6 @@ static void sw_flow_mask_set(struct sw_flow_mask *mask,
  * attribute specifies the mask field of the wildcarded flow.
  */
 int ovs_nla_get_match(struct sw_flow_match *match,
-                     bool *exact_5tuple,
                      const struct nlattr *key,
                      const struct nlattr *mask)
 {
@@ -848,13 +778,10 @@ int ovs_nla_get_match(struct sw_flow_match *match,
                }
        }
 
-       err = ovs_key_from_nlattrs(match, NULL, key_attrs, a, false);
+       err = ovs_key_from_nlattrs(match, key_attrs, a, false);
        if (err)
                return err;
 
-       if (exact_5tuple)
-               *exact_5tuple = true;
-
        if (mask) {
                err = parse_flow_mask_nlattrs(mask, a, &mask_attrs);
                if (err)
@@ -892,7 +819,7 @@ int ovs_nla_get_match(struct sw_flow_match *match,
                        }
                }
 
-               err = ovs_key_from_nlattrs(match, exact_5tuple, mask_attrs, a, true);
+               err = ovs_key_from_nlattrs(match, mask_attrs, a, true);
                if (err)
                        return err;
        } else {
@@ -982,8 +909,8 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey,
                goto nla_put_failure;
 
        eth_key = nla_data(nla);
-       memcpy(eth_key->eth_src, output->eth.src, ETH_ALEN);
-       memcpy(eth_key->eth_dst, output->eth.dst, ETH_ALEN);
+       ether_addr_copy(eth_key->eth_src, output->eth.src);
+       ether_addr_copy(eth_key->eth_dst, output->eth.dst);
 
        if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
                __be16 eth_type;
@@ -1055,8 +982,8 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey,
                arp_key->arp_sip = output->ipv4.addr.src;
                arp_key->arp_tip = output->ipv4.addr.dst;
                arp_key->arp_op = htons(output->ip.proto);
-               memcpy(arp_key->arp_sha, output->ipv4.arp.sha, ETH_ALEN);
-               memcpy(arp_key->arp_tha, output->ipv4.arp.tha, ETH_ALEN);
+               ether_addr_copy(arp_key->arp_sha, output->ipv4.arp.sha);
+               ether_addr_copy(arp_key->arp_tha, output->ipv4.arp.tha);
        }
 
        if ((swkey->eth.type == htons(ETH_P_IP) ||
@@ -1070,19 +997,11 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey,
                        if (!nla)
                                goto nla_put_failure;
                        tcp_key = nla_data(nla);
-                       if (swkey->eth.type == htons(ETH_P_IP)) {
-                               tcp_key->tcp_src = output->ipv4.tp.src;
-                               tcp_key->tcp_dst = output->ipv4.tp.dst;
-                               if (nla_put_be16(skb, OVS_KEY_ATTR_TCP_FLAGS,
-                                                output->ipv4.tp.flags))
-                                       goto nla_put_failure;
-                       } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
-                               tcp_key->tcp_src = output->ipv6.tp.src;
-                               tcp_key->tcp_dst = output->ipv6.tp.dst;
-                               if (nla_put_be16(skb, OVS_KEY_ATTR_TCP_FLAGS,
-                                                output->ipv6.tp.flags))
-                                       goto nla_put_failure;
-                       }
+                       tcp_key->tcp_src = output->tp.src;
+                       tcp_key->tcp_dst = output->tp.dst;
+                       if (nla_put_be16(skb, OVS_KEY_ATTR_TCP_FLAGS,
+                                        output->tp.flags))
+                               goto nla_put_failure;
                } else if (swkey->ip.proto == IPPROTO_UDP) {
                        struct ovs_key_udp *udp_key;
 
@@ -1090,13 +1009,8 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey,
                        if (!nla)
                                goto nla_put_failure;
                        udp_key = nla_data(nla);
-                       if (swkey->eth.type == htons(ETH_P_IP)) {
-                               udp_key->udp_src = output->ipv4.tp.src;
-                               udp_key->udp_dst = output->ipv4.tp.dst;
-                       } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
-                               udp_key->udp_src = output->ipv6.tp.src;
-                               udp_key->udp_dst = output->ipv6.tp.dst;
-                       }
+                       udp_key->udp_src = output->tp.src;
+                       udp_key->udp_dst = output->tp.dst;
                } else if (swkey->ip.proto == IPPROTO_SCTP) {
                        struct ovs_key_sctp *sctp_key;
 
@@ -1104,13 +1018,8 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey,
                        if (!nla)
                                goto nla_put_failure;
                        sctp_key = nla_data(nla);
-                       if (swkey->eth.type == htons(ETH_P_IP)) {
-                               sctp_key->sctp_src = swkey->ipv4.tp.src;
-                               sctp_key->sctp_dst = swkey->ipv4.tp.dst;
-                       } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
-                               sctp_key->sctp_src = swkey->ipv6.tp.src;
-                               sctp_key->sctp_dst = swkey->ipv6.tp.dst;
-                       }
+                       sctp_key->sctp_src = output->tp.src;
+                       sctp_key->sctp_dst = output->tp.dst;
                } else if (swkey->eth.type == htons(ETH_P_IP) &&
                           swkey->ip.proto == IPPROTO_ICMP) {
                        struct ovs_key_icmp *icmp_key;
@@ -1119,8 +1028,8 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey,
                        if (!nla)
                                goto nla_put_failure;
                        icmp_key = nla_data(nla);
-                       icmp_key->icmp_type = ntohs(output->ipv4.tp.src);
-                       icmp_key->icmp_code = ntohs(output->ipv4.tp.dst);
+                       icmp_key->icmp_type = ntohs(output->tp.src);
+                       icmp_key->icmp_code = ntohs(output->tp.dst);
                } else if (swkey->eth.type == htons(ETH_P_IPV6) &&
                           swkey->ip.proto == IPPROTO_ICMPV6) {
                        struct ovs_key_icmpv6 *icmpv6_key;
@@ -1130,8 +1039,8 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey,
                        if (!nla)
                                goto nla_put_failure;
                        icmpv6_key = nla_data(nla);
-                       icmpv6_key->icmpv6_type = ntohs(output->ipv6.tp.src);
-                       icmpv6_key->icmpv6_code = ntohs(output->ipv6.tp.dst);
+                       icmpv6_key->icmpv6_type = ntohs(output->tp.src);
+                       icmpv6_key->icmpv6_code = ntohs(output->tp.dst);
 
                        if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
                            icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
@@ -1143,8 +1052,8 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey,
                                nd_key = nla_data(nla);
                                memcpy(nd_key->nd_target, &output->ipv6.nd.target,
                                                        sizeof(nd_key->nd_target));
-                               memcpy(nd_key->nd_sll, output->ipv6.nd.sll, ETH_ALEN);
-                               memcpy(nd_key->nd_tll, output->ipv6.nd.tll, ETH_ALEN);
+                               ether_addr_copy(nd_key->nd_sll, output->ipv6.nd.sll);
+                               ether_addr_copy(nd_key->nd_tll, output->ipv6.nd.tll);
                        }
                }
        }
@@ -1309,13 +1218,10 @@ static int validate_and_copy_sample(const struct nlattr *attr,
 
 static int validate_tp_port(const struct sw_flow_key *flow_key)
 {
-       if (flow_key->eth.type == htons(ETH_P_IP)) {
-               if (flow_key->ipv4.tp.src || flow_key->ipv4.tp.dst)
-                       return 0;
-       } else if (flow_key->eth.type == htons(ETH_P_IPV6)) {
-               if (flow_key->ipv6.tp.src || flow_key->ipv6.tp.dst)
-                       return 0;
-       }
+       if ((flow_key->eth.type == htons(ETH_P_IP) ||
+            flow_key->eth.type == htons(ETH_P_IPV6)) &&
+           (flow_key->tp.src || flow_key->tp.dst))
+               return 0;
 
        return -EINVAL;
 }
index b31fbe28bc7a81a0640546b6e55be8841c32f8a5..440151045d3946329bf01e4fd5a1c81f0fd4e906 100644 (file)
@@ -45,7 +45,6 @@ int ovs_nla_put_flow(const struct sw_flow_key *,
 int ovs_nla_get_flow_metadata(struct sw_flow *flow,
                              const struct nlattr *attr);
 int ovs_nla_get_match(struct sw_flow_match *match,
-                     bool *exact_5tuple,
                      const struct nlattr *,
                      const struct nlattr *);
 
index 3c268b3d71c34baa0a8c70888823b37da454fffd..574c3abc9b307ef6609f8f8dc09ade4b3253b814 100644 (file)
@@ -48,6 +48,7 @@
 #define REHASH_INTERVAL                (10 * 60 * HZ)
 
 static struct kmem_cache *flow_cache;
+struct kmem_cache *flow_stats_cache __read_mostly;
 
 static u16 range_n_bytes(const struct sw_flow_key_range *range)
 {
@@ -57,8 +58,10 @@ static u16 range_n_bytes(const struct sw_flow_key_range *range)
 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
                       const struct sw_flow_mask *mask)
 {
-       const long *m = (long *)((u8 *)&mask->key + mask->range.start);
-       const long *s = (long *)((u8 *)src + mask->range.start);
+       const long *m = (const long *)((const u8 *)&mask->key +
+                               mask->range.start);
+       const long *s = (const long *)((const u8 *)src +
+                               mask->range.start);
        long *d = (long *)((u8 *)dst + mask->range.start);
        int i;
 
@@ -70,10 +73,11 @@ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
                *d++ = *s++ & *m++;
 }
 
-struct sw_flow *ovs_flow_alloc(bool percpu_stats)
+struct sw_flow *ovs_flow_alloc(void)
 {
        struct sw_flow *flow;
-       int cpu;
+       struct flow_stats *stats;
+       int node;
 
        flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
        if (!flow)
@@ -81,27 +85,22 @@ struct sw_flow *ovs_flow_alloc(bool percpu_stats)
 
        flow->sf_acts = NULL;
        flow->mask = NULL;
+       flow->stats_last_writer = NUMA_NO_NODE;
 
-       flow->stats.is_percpu = percpu_stats;
+       /* Initialize the default stat node. */
+       stats = kmem_cache_alloc_node(flow_stats_cache,
+                                     GFP_KERNEL | __GFP_ZERO, 0);
+       if (!stats)
+               goto err;
 
-       if (!percpu_stats) {
-               flow->stats.stat = kzalloc(sizeof(*flow->stats.stat), GFP_KERNEL);
-               if (!flow->stats.stat)
-                       goto err;
+       spin_lock_init(&stats->lock);
 
-               spin_lock_init(&flow->stats.stat->lock);
-       } else {
-               flow->stats.cpu_stats = alloc_percpu(struct flow_stats);
-               if (!flow->stats.cpu_stats)
-                       goto err;
+       RCU_INIT_POINTER(flow->stats[0], stats);
 
-               for_each_possible_cpu(cpu) {
-                       struct flow_stats *cpu_stats;
+       for_each_node(node)
+               if (node != 0)
+                       RCU_INIT_POINTER(flow->stats[node], NULL);
 
-                       cpu_stats = per_cpu_ptr(flow->stats.cpu_stats, cpu);
-                       spin_lock_init(&cpu_stats->lock);
-               }
-       }
        return flow;
 err:
        kmem_cache_free(flow_cache, flow);
@@ -138,11 +137,13 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets)
 
 static void flow_free(struct sw_flow *flow)
 {
-       kfree((struct sf_flow_acts __force *)flow->sf_acts);
-       if (flow->stats.is_percpu)
-               free_percpu(flow->stats.cpu_stats);
-       else
-               kfree(flow->stats.stat);
+       int node;
+
+       kfree((struct sw_flow_actions __force *)flow->sf_acts);
+       for_each_node(node)
+               if (flow->stats[node])
+                       kmem_cache_free(flow_stats_cache,
+                                       (struct flow_stats __force *)flow->stats[node]);
        kmem_cache_free(flow_cache, flow);
 }
 
@@ -158,25 +159,6 @@ void ovs_flow_free(struct sw_flow *flow, bool deferred)
        if (!flow)
                return;
 
-       if (flow->mask) {
-               struct sw_flow_mask *mask = flow->mask;
-
-               /* ovs-lock is required to protect mask-refcount and
-                * mask list.
-                */
-               ASSERT_OVSL();
-               BUG_ON(!mask->ref_count);
-               mask->ref_count--;
-
-               if (!mask->ref_count) {
-                       list_del_rcu(&mask->list);
-                       if (deferred)
-                               kfree_rcu(mask, rcu);
-                       else
-                               kfree(mask);
-               }
-       }
-
        if (deferred)
                call_rcu(&flow->rcu, rcu_free_flow_callback);
        else
@@ -375,7 +357,7 @@ int ovs_flow_tbl_flush(struct flow_table *flow_table)
 static u32 flow_hash(const struct sw_flow_key *key, int key_start,
                     int key_end)
 {
-       u32 *hash_key = (u32 *)((u8 *)key + key_start);
+       const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
        int hash_u32s = (key_end - key_start) >> 2;
 
        /* Make sure number of hash bytes are multiple of u32. */
@@ -397,8 +379,8 @@ static bool cmp_key(const struct sw_flow_key *key1,
                    const struct sw_flow_key *key2,
                    int key_start, int key_end)
 {
-       const long *cp1 = (long *)((u8 *)key1 + key_start);
-       const long *cp2 = (long *)((u8 *)key2 + key_start);
+       const long *cp1 = (const long *)((const u8 *)key1 + key_start);
+       const long *cp2 = (const long *)((const u8 *)key2 + key_start);
        long diffs = 0;
        int i;
 
@@ -490,6 +472,25 @@ static struct table_instance *table_instance_expand(struct table_instance *ti)
        return table_instance_rehash(ti, ti->n_buckets * 2);
 }
 
+/* Remove 'mask' from the mask list, if it is not needed any more. */
+static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
+{
+       if (mask) {
+               /* ovs-lock is required to protect mask-refcount and
+                * mask list.
+                */
+               ASSERT_OVSL();
+               BUG_ON(!mask->ref_count);
+               mask->ref_count--;
+
+               if (!mask->ref_count) {
+                       list_del_rcu(&mask->list);
+                       kfree_rcu(mask, rcu);
+               }
+       }
+}
+
+/* Must be called with OVS mutex held. */
 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
 {
        struct table_instance *ti = ovsl_dereference(table->ti);
@@ -497,6 +498,11 @@ void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
        BUG_ON(table->count == 0);
        hlist_del_rcu(&flow->hash_node[ti->node_ver]);
        table->count--;
+
+       /* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
+        * accessible as long as the RCU read lock is held.
+        */
+       flow_mask_remove(table, flow->mask);
 }
 
 static struct sw_flow_mask *mask_alloc(void)
@@ -513,8 +519,8 @@ static struct sw_flow_mask *mask_alloc(void)
 static bool mask_equal(const struct sw_flow_mask *a,
                       const struct sw_flow_mask *b)
 {
-       u8 *a_ = (u8 *)&a->key + a->range.start;
-       u8 *b_ = (u8 *)&b->key + b->range.start;
+       const u8 *a_ = (const u8 *)&a->key + a->range.start;
+       const u8 *b_ = (const u8 *)&b->key + b->range.start;
 
        return  (a->range.end == b->range.end)
                && (a->range.start == b->range.start)
@@ -559,6 +565,7 @@ static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
        return 0;
 }
 
+/* Must be called with OVS mutex held. */
 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
                        struct sw_flow_mask *mask)
 {
@@ -597,16 +604,28 @@ int ovs_flow_init(void)
        BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
        BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
 
-       flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
-                                       0, NULL);
+       flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
+                                      + (num_possible_nodes()
+                                         * sizeof(struct flow_stats *)),
+                                      0, 0, NULL);
        if (flow_cache == NULL)
                return -ENOMEM;
 
+       flow_stats_cache
+               = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
+                                   0, SLAB_HWCACHE_ALIGN, NULL);
+       if (flow_stats_cache == NULL) {
+               kmem_cache_destroy(flow_cache);
+               flow_cache = NULL;
+               return -ENOMEM;
+       }
+
        return 0;
 }
 
 /* Uninitializes the flow module. */
 void ovs_flow_exit(void)
 {
+       kmem_cache_destroy(flow_stats_cache);
        kmem_cache_destroy(flow_cache);
 }
index baaeb101924d81a4beb373be93e07287ed9be020..ca8a5820f6153f67fb9ad987c4c156be882c92ea 100644 (file)
@@ -52,10 +52,12 @@ struct flow_table {
        unsigned int count;
 };
 
+extern struct kmem_cache *flow_stats_cache;
+
 int ovs_flow_init(void);
 void ovs_flow_exit(void);
 
-struct sw_flow *ovs_flow_alloc(bool percpu_stats);
+struct sw_flow *ovs_flow_alloc(void);
 void ovs_flow_free(struct sw_flow *, bool deferred);
 
 int ovs_flow_tbl_init(struct flow_table *);
index ebb6e2442554c89fa2b02f1f8f06b0a3b8acf39e..35ec4fed09e228c7e6fe889d2701cb3a3de7748a 100644 (file)
@@ -172,7 +172,7 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
        df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
                htons(IP_DF) : 0;
 
-       skb->local_df = 1;
+       skb->ignore_df = 1;
 
        return iptunnel_xmit(skb->sk, rt, skb, fl.saddr,
                             OVS_CB(skb)->tun_key->ipv4_dst, IPPROTO_GRE,
@@ -256,7 +256,7 @@ static void gre_tnl_destroy(struct vport *vport)
 
        ovs_net = net_generic(net, ovs_net_id);
 
-       rcu_assign_pointer(ovs_net->vport_net.gre_vport, NULL);
+       RCU_INIT_POINTER(ovs_net->vport_net.gre_vport, NULL);
        ovs_vport_deferred_free(vport);
        gre_exit();
 }
index 729c68763fe70d150793e1f01a023d4d8f1b78c0..789af9280e77264b4d7f65ddb6c333e96fa4147f 100644 (file)
@@ -130,7 +130,7 @@ static void do_setup(struct net_device *netdev)
        netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
        netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
        netdev->destructor = internal_dev_destructor;
-       SET_ETHTOOL_OPS(netdev, &internal_dev_ethtool_ops);
+       netdev->ethtool_ops = &internal_dev_ethtool_ops;
        netdev->tx_queue_len = 0;
 
        netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST |
index e797a50ac2beec3a019bacf15e2e7c593355a46f..a93efa3f64c3eee0c9e575e619e056b276ba5754 100644 (file)
@@ -170,7 +170,7 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
        df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
                htons(IP_DF) : 0;
 
-       skb->local_df = 1;
+       skb->ignore_df = 1;
 
        inet_get_local_port_range(net, &port_min, &port_max);
        src_port = vxlan_src_port(port_min, port_max, skb);
@@ -180,7 +180,8 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
                             OVS_CB(skb)->tun_key->ipv4_tos,
                             OVS_CB(skb)->tun_key->ipv4_ttl, df,
                             src_port, dst_port,
-                            htonl(be64_to_cpu(OVS_CB(skb)->tun_key->tun_id) << 8));
+                            htonl(be64_to_cpu(OVS_CB(skb)->tun_key->tun_id) << 8),
+                            false);
        if (err < 0)
                ip_rt_put(rt);
 error:
index d7e50a17396c5563c778ca9e0e07f9c9a730b56a..8d721e62f388d9990b7850942196440550a2a6bb 100644 (file)
@@ -172,7 +172,7 @@ void ovs_vport_deferred_free(struct vport *vport);
  */
 static inline void *vport_priv(const struct vport *vport)
 {
-       return (u8 *)vport + ALIGN(sizeof(struct vport), VPORT_ALIGN);
+       return (u8 *)(uintptr_t)vport + ALIGN(sizeof(struct vport), VPORT_ALIGN);
 }
 
 /**
@@ -185,9 +185,9 @@ static inline void *vport_priv(const struct vport *vport)
  * the result of a hash table lookup.  @priv must point to the start of the
  * private data area.
  */
-static inline struct vport *vport_from_priv(const void *priv)
+static inline struct vport *vport_from_priv(void *priv)
 {
-       return (struct vport *)(priv - ALIGN(sizeof(struct vport), VPORT_ALIGN));
+       return (struct vport *)((u8 *)priv - ALIGN(sizeof(struct vport), VPORT_ALIGN));
 }
 
 void ovs_vport_receive(struct vport *, struct sk_buff *,
index 533ce4ff108ad94ff0a1e5205bc17f9c91c0b3ce..92f2c7107eec4f307cc50cdfedfb4ea2db0e59de 100644 (file)
@@ -128,6 +128,7 @@ static int pdiag_put_fanout(struct packet_sock *po, struct sk_buff *nlskb)
 
 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
                        struct packet_diag_req *req,
+                       bool may_report_filterinfo,
                        struct user_namespace *user_ns,
                        u32 portid, u32 seq, u32 flags, int sk_ino)
 {
@@ -172,7 +173,8 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
                goto out_nlmsg_trim;
 
        if ((req->pdiag_show & PACKET_SHOW_FILTER) &&
-           sock_diag_put_filterinfo(user_ns, sk, skb, PACKET_DIAG_FILTER))
+           sock_diag_put_filterinfo(may_report_filterinfo, sk, skb,
+                                    PACKET_DIAG_FILTER))
                goto out_nlmsg_trim;
 
        return nlmsg_end(skb, nlh);
@@ -188,9 +190,11 @@ static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
        struct packet_diag_req *req;
        struct net *net;
        struct sock *sk;
+       bool may_report_filterinfo;
 
        net = sock_net(skb->sk);
        req = nlmsg_data(cb->nlh);
+       may_report_filterinfo = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
 
        mutex_lock(&net->packet.sklist_lock);
        sk_for_each(sk, &net->packet.sklist) {
@@ -200,6 +204,7 @@ static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
                        goto next;
 
                if (sk_diag_fill(sk, skb, req,
+                                may_report_filterinfo,
                                 sk_user_ns(NETLINK_CB(cb->skb).sk),
                                 NETLINK_CB(cb->skb).portid,
                                 cb->nlh->nlmsg_seq, NLM_F_MULTI,
index dc15f430080831e74fade00799a661ab10cc6f84..b64151ade6b33a9cbacb0980d3ddbe03d8f7b4c8 100644 (file)
@@ -70,10 +70,10 @@ static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
        int err;
        u8 pnaddr;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
-       if (!capable(CAP_SYS_ADMIN))
+       if (!netlink_capable(skb, CAP_SYS_ADMIN))
                return -EPERM;
 
        ASSERT_RTNL();
@@ -233,10 +233,10 @@ static int route_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
        int err;
        u8 dst;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
-       if (!capable(CAP_SYS_ADMIN))
+       if (!netlink_capable(skb, CAP_SYS_ADMIN))
                return -EPERM;
 
        ASSERT_RTNL();
index 37be6e226d1b46fefe8e0cf554580b1faf19cfb8..1dde91e3dc7033c575dcfc041a23402f98e52239 100644 (file)
@@ -298,7 +298,7 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
                rds_ib_stats_inc(s_ib_tx_cq_event);
 
                if (wc.wr_id == RDS_IB_ACK_WR_ID) {
-                       if (ic->i_ack_queued + HZ/2 < jiffies)
+                       if (time_after(jiffies, ic->i_ack_queued + HZ/2))
                                rds_ib_stats_inc(s_ib_tx_stalled);
                        rds_ib_ack_send_complete(ic);
                        continue;
@@ -315,7 +315,7 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
 
                        rm = rds_ib_send_unmap_op(ic, send, wc.status);
 
-                       if (send->s_queued + HZ/2 < jiffies)
+                       if (time_after(jiffies, send->s_queued + HZ/2))
                                rds_ib_stats_inc(s_ib_tx_stalled);
 
                        if (send->s_op) {
index e40c3c5db2c41e543abed12149c791fa9edd2ec6..9105ea03aec5dc05bad0221eb00c4794eb6e463a 100644 (file)
@@ -232,7 +232,7 @@ void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context)
                }
 
                if (wc.wr_id == RDS_IW_ACK_WR_ID) {
-                       if (ic->i_ack_queued + HZ/2 < jiffies)
+                       if (time_after(jiffies, ic->i_ack_queued + HZ/2))
                                rds_iw_stats_inc(s_iw_tx_stalled);
                        rds_iw_ack_send_complete(ic);
                        continue;
@@ -267,7 +267,7 @@ void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context)
 
                        send->s_wr.opcode = 0xdead;
                        send->s_wr.num_sge = 1;
-                       if (send->s_queued + HZ/2 < jiffies)
+                       if (time_after(jiffies, send->s_queued + HZ/2))
                                rds_iw_stats_inc(s_iw_tx_stalled);
 
                        /* If a RDMA operation produced an error, signal this right
index 89c91515ed0c605b6ee63996d0c0a9692ea8fe91..139239d2cb228438e29f347b33035da15d5396c0 100644 (file)
@@ -111,8 +111,7 @@ static struct ctl_table rds_iw_sysctl_table[] = {
 
 void rds_iw_sysctl_exit(void)
 {
-       if (rds_iw_sysctl_hdr)
-               unregister_net_sysctl_table(rds_iw_sysctl_hdr);
+       unregister_net_sysctl_table(rds_iw_sysctl_hdr);
 }
 
 int rds_iw_sysctl_init(void)
index c2be901d19ee133b60c1b77006f02c88d8b1ec42..6cd9d1deafc395d6573b7e3b801e666106d1f257 100644 (file)
@@ -168,7 +168,7 @@ static int rds_rdma_listen_init(void)
                return ret;
        }
 
-       sin.sin_family = AF_INET,
+       sin.sin_family = AF_INET;
        sin.sin_addr.s_addr = (__force u32)htonl(INADDR_ANY);
        sin.sin_port = (__force u16)htons(RDS_PORT);
 
index b5cb2aa08f33aa62ac5bff73684fc228f74dce7e..c3b0cd43eb56689e395581c4757402bad531e271 100644 (file)
@@ -94,8 +94,7 @@ static struct ctl_table rds_sysctl_rds_table[] = {
 
 void rds_sysctl_exit(void)
 {
-       if (rds_sysctl_reg_table)
-               unregister_net_sysctl_table(rds_sysctl_reg_table);
+       unregister_net_sysctl_table(rds_sysctl_reg_table);
 }
 
 int rds_sysctl_init(void)
index 4e638f85118595d59c888917a9c3f072050c5780..23ab4dcd1d9f03942aa4d70bc7f6d9aa401f7707 100644 (file)
@@ -153,7 +153,7 @@ int rds_tcp_listen_init(void)
        sock->sk->sk_data_ready = rds_tcp_listen_data_ready;
        write_unlock_bh(&sock->sk->sk_callback_lock);
 
-       sin.sin_family = PF_INET,
+       sin.sin_family = PF_INET;
        sin.sin_addr.s_addr = (__force u32)htonl(INADDR_ANY);
        sin.sin_port = (__force u16)htons(RDS_TCP_PORT);
 
index bd2a5b90400cdf2688f249c1bcaaf382a4e918fd..14c98e48f261ee49656eabb198d62648890b63b3 100644 (file)
@@ -36,8 +36,6 @@ struct rfkill_gpio_data {
        struct gpio_desc        *shutdown_gpio;
 
        struct rfkill           *rfkill_dev;
-       char                    *reset_name;
-       char                    *shutdown_name;
        struct clk              *clk;
 
        bool                    clk_enabled;
@@ -47,17 +45,14 @@ static int rfkill_gpio_set_power(void *data, bool blocked)
 {
        struct rfkill_gpio_data *rfkill = data;
 
-       if (blocked) {
-               gpiod_set_value(rfkill->shutdown_gpio, 0);
-               gpiod_set_value(rfkill->reset_gpio, 0);
-               if (!IS_ERR(rfkill->clk) && rfkill->clk_enabled)
-                       clk_disable(rfkill->clk);
-       } else {
-               if (!IS_ERR(rfkill->clk) && !rfkill->clk_enabled)
-                       clk_enable(rfkill->clk);
-               gpiod_set_value(rfkill->reset_gpio, 1);
-               gpiod_set_value(rfkill->shutdown_gpio, 1);
-       }
+       if (!blocked && !IS_ERR(rfkill->clk) && !rfkill->clk_enabled)
+               clk_enable(rfkill->clk);
+
+       gpiod_set_value_cansleep(rfkill->shutdown_gpio, !blocked);
+       gpiod_set_value_cansleep(rfkill->reset_gpio, !blocked);
+
+       if (blocked && !IS_ERR(rfkill->clk) && rfkill->clk_enabled)
+               clk_disable(rfkill->clk);
 
        rfkill->clk_enabled = blocked;
 
@@ -87,10 +82,8 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
 {
        struct rfkill_gpio_platform_data *pdata = pdev->dev.platform_data;
        struct rfkill_gpio_data *rfkill;
-       const char *clk_name = NULL;
        struct gpio_desc *gpio;
        int ret;
-       int len;
 
        rfkill = devm_kzalloc(&pdev->dev, sizeof(*rfkill), GFP_KERNEL);
        if (!rfkill)
@@ -101,28 +94,15 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
                if (ret)
                        return ret;
        } else if (pdata) {
-               clk_name = pdata->power_clk_name;
                rfkill->name = pdata->name;
                rfkill->type = pdata->type;
        } else {
                return -ENODEV;
        }
 
-       len = strlen(rfkill->name);
-       rfkill->reset_name = devm_kzalloc(&pdev->dev, len + 7, GFP_KERNEL);
-       if (!rfkill->reset_name)
-               return -ENOMEM;
-
-       rfkill->shutdown_name = devm_kzalloc(&pdev->dev, len + 10, GFP_KERNEL);
-       if (!rfkill->shutdown_name)
-               return -ENOMEM;
+       rfkill->clk = devm_clk_get(&pdev->dev, NULL);
 
-       snprintf(rfkill->reset_name, len + 6 , "%s_reset", rfkill->name);
-       snprintf(rfkill->shutdown_name, len + 9, "%s_shutdown", rfkill->name);
-
-       rfkill->clk = devm_clk_get(&pdev->dev, clk_name);
-
-       gpio = devm_gpiod_get_index(&pdev->dev, rfkill->reset_name, 0);
+       gpio = devm_gpiod_get_index(&pdev->dev, "reset", 0);
        if (!IS_ERR(gpio)) {
                ret = gpiod_direction_output(gpio, 0);
                if (ret)
@@ -130,7 +110,7 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
                rfkill->reset_gpio = gpio;
        }
 
-       gpio = devm_gpiod_get_index(&pdev->dev, rfkill->shutdown_name, 1);
+       gpio = devm_gpiod_get_index(&pdev->dev, "shutdown", 1);
        if (!IS_ERR(gpio)) {
                ret = gpiod_direction_output(gpio, 0);
                if (ret)
@@ -146,14 +126,6 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       if (pdata && pdata->gpio_runtime_setup) {
-               ret = pdata->gpio_runtime_setup(pdev);
-               if (ret) {
-                       dev_err(&pdev->dev, "can't set up gpio\n");
-                       return ret;
-               }
-       }
-
        rfkill->rfkill_dev = rfkill_alloc(rfkill->name, &pdev->dev,
                                          rfkill->type, &rfkill_gpio_ops,
                                          rfkill);
@@ -174,20 +146,23 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
 static int rfkill_gpio_remove(struct platform_device *pdev)
 {
        struct rfkill_gpio_data *rfkill = platform_get_drvdata(pdev);
-       struct rfkill_gpio_platform_data *pdata = pdev->dev.platform_data;
 
-       if (pdata && pdata->gpio_runtime_close)
-               pdata->gpio_runtime_close(pdev);
        rfkill_unregister(rfkill->rfkill_dev);
        rfkill_destroy(rfkill->rfkill_dev);
 
        return 0;
 }
 
+#ifdef CONFIG_ACPI
 static const struct acpi_device_id rfkill_acpi_match[] = {
+       { "BCM2E1A", RFKILL_TYPE_BLUETOOTH },
+       { "BCM2E39", RFKILL_TYPE_BLUETOOTH },
+       { "BCM2E3D", RFKILL_TYPE_BLUETOOTH },
        { "BCM4752", RFKILL_TYPE_GPS },
+       { "LNV4752", RFKILL_TYPE_GPS },
        { },
 };
+#endif
 
 static struct platform_driver rfkill_gpio_driver = {
        .probe = rfkill_gpio_probe,
index 7633a752c65e99189c3e7603e2ebdd6e7438b356..0ad080790a32a341a1ddc57d632302563964a247 100644 (file)
@@ -99,7 +99,7 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
        _debug("tktlen: %x", tktlen);
        if (tktlen > AFSTOKEN_RK_TIX_MAX)
                return -EKEYREJECTED;
-       if (8 * 4 + tktlen != toklen)
+       if (toklen < 8 * 4 + tktlen)
                return -EKEYREJECTED;
 
        plen = sizeof(*token) + sizeof(*token->kad) + tktlen;
index 8a5ba5add4bcd60e59a9b2468df88812212012f4..648778aef1a254b9739443e93012798a134d0d86 100644 (file)
@@ -948,7 +948,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n)
        u32 portid = skb ? NETLINK_CB(skb).portid : 0;
        int ret = 0, ovr = 0;
 
-       if ((n->nlmsg_type != RTM_GETACTION) && !capable(CAP_NET_ADMIN))
+       if ((n->nlmsg_type != RTM_GETACTION) && !netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL);
index 29a30a14c31596cc51028be8db46eb4df9756f6e..45527e6b52dbf396cbb7415bb0613152a8320096 100644 (file)
@@ -134,7 +134,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n)
        int err;
        int tp_created = 0;
 
-       if ((n->nlmsg_type != RTM_GETTFILTER) && !capable(CAP_NET_ADMIN))
+       if ((n->nlmsg_type != RTM_GETTFILTER) &&
+           !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
                return -EPERM;
 
 replay:
@@ -317,7 +318,8 @@ replay:
                }
        }
 
-       err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh);
+       err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
+                             n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE);
        if (err == 0) {
                if (tp_created) {
                        spin_lock_bh(root_lock);
@@ -504,7 +506,7 @@ void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts)
 EXPORT_SYMBOL(tcf_exts_destroy);
 
 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
-                 struct nlattr *rate_tlv, struct tcf_exts *exts)
+                 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr)
 {
 #ifdef CONFIG_NET_CLS_ACT
        {
@@ -513,7 +515,7 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
                INIT_LIST_HEAD(&exts->actions);
                if (exts->police && tb[exts->police]) {
                        act = tcf_action_init_1(net, tb[exts->police], rate_tlv,
-                                               "police", TCA_ACT_NOREPLACE,
+                                               "police", ovr,
                                                TCA_ACT_BIND);
                        if (IS_ERR(act))
                                return PTR_ERR(act);
@@ -523,7 +525,7 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
                } else if (exts->action && tb[exts->action]) {
                        int err;
                        err = tcf_action_init(net, tb[exts->action], rate_tlv,
-                                             NULL, TCA_ACT_NOREPLACE,
+                                             NULL, ovr,
                                              TCA_ACT_BIND, &exts->actions);
                        if (err)
                                return err;
@@ -543,14 +545,12 @@ void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
                     struct tcf_exts *src)
 {
 #ifdef CONFIG_NET_CLS_ACT
-       if (!list_empty(&src->actions)) {
-               LIST_HEAD(tmp);
-               tcf_tree_lock(tp);
-               list_splice_init(&dst->actions, &tmp);
-               list_splice(&src->actions, &dst->actions);
-               tcf_tree_unlock(tp);
-               tcf_action_destroy(&tmp, TCA_ACT_UNBIND);
-       }
+       LIST_HEAD(tmp);
+       tcf_tree_lock(tp);
+       list_splice_init(&dst->actions, &tmp);
+       list_splice(&src->actions, &dst->actions);
+       tcf_tree_unlock(tp);
+       tcf_action_destroy(&tmp, TCA_ACT_UNBIND);
 #endif
 }
 EXPORT_SYMBOL(tcf_exts_change);
index e98ca99c202bb5af6db77260f0996e6cacb098cf..0ae1813e3e90d55a1bf5993364502cebd58c1b22 100644 (file)
@@ -130,14 +130,14 @@ static const struct nla_policy basic_policy[TCA_BASIC_MAX + 1] = {
 static int basic_set_parms(struct net *net, struct tcf_proto *tp,
                           struct basic_filter *f, unsigned long base,
                           struct nlattr **tb,
-                          struct nlattr *est)
+                          struct nlattr *est, bool ovr)
 {
        int err;
        struct tcf_exts e;
        struct tcf_ematch_tree t;
 
        tcf_exts_init(&e, TCA_BASIC_ACT, TCA_BASIC_POLICE);
-       err = tcf_exts_validate(net, tp, tb, est, &e);
+       err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
        if (err < 0)
                return err;
 
@@ -161,7 +161,7 @@ errout:
 
 static int basic_change(struct net *net, struct sk_buff *in_skb,
                        struct tcf_proto *tp, unsigned long base, u32 handle,
-                       struct nlattr **tca, unsigned long *arg)
+                       struct nlattr **tca, unsigned long *arg, bool ovr)
 {
        int err;
        struct basic_head *head = tp->root;
@@ -179,7 +179,7 @@ static int basic_change(struct net *net, struct sk_buff *in_skb,
        if (f != NULL) {
                if (handle && f->handle != handle)
                        return -EINVAL;
-               return basic_set_parms(net, tp, f, base, tb, tca[TCA_RATE]);
+               return basic_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr);
        }
 
        err = -ENOBUFS;
@@ -206,7 +206,7 @@ static int basic_change(struct net *net, struct sk_buff *in_skb,
                f->handle = head->hgenerator;
        }
 
-       err = basic_set_parms(net, tp, f, base, tb, tca[TCA_RATE]);
+       err = basic_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr);
        if (err < 0)
                goto errout;
 
index 8e3cf49118e3a2297214de12313bb499cbfe9054..13f64df2c710663f2753df92632c5575bc2fe317 100644 (file)
@@ -156,11 +156,11 @@ static void cls_bpf_put(struct tcf_proto *tp, unsigned long f)
 static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
                                   struct cls_bpf_prog *prog,
                                   unsigned long base, struct nlattr **tb,
-                                  struct nlattr *est)
+                                  struct nlattr *est, bool ovr)
 {
        struct sock_filter *bpf_ops, *bpf_old;
        struct tcf_exts exts;
-       struct sock_fprog tmp;
+       struct sock_fprog_kern tmp;
        struct sk_filter *fp, *fp_old;
        u16 bpf_size, bpf_len;
        u32 classid;
@@ -170,7 +170,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
                return -EINVAL;
 
        tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
-       ret = tcf_exts_validate(net, tp, tb, est, &exts);
+       ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
        if (ret < 0)
                return ret;
 
@@ -191,7 +191,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
        memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
 
        tmp.len = bpf_len;
-       tmp.filter = (struct sock_filter __user *) bpf_ops;
+       tmp.filter = bpf_ops;
 
        ret = sk_unattached_filter_create(&fp, &tmp);
        if (ret)
@@ -242,7 +242,7 @@ static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
                          struct tcf_proto *tp, unsigned long base,
                          u32 handle, struct nlattr **tca,
-                         unsigned long *arg)
+                         unsigned long *arg, bool ovr)
 {
        struct cls_bpf_head *head = tp->root;
        struct cls_bpf_prog *prog = (struct cls_bpf_prog *) *arg;
@@ -260,7 +260,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
                if (handle && prog->handle != handle)
                        return -EINVAL;
                return cls_bpf_modify_existing(net, tp, prog, base, tb,
-                                              tca[TCA_RATE]);
+                                              tca[TCA_RATE], ovr);
        }
 
        prog = kzalloc(sizeof(*prog), GFP_KERNEL);
@@ -277,7 +277,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
                goto errout;
        }
 
-       ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE]);
+       ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE], ovr);
        if (ret < 0)
                goto errout;
 
index 8e2158ab551c0c9d7258ffae3e474788bfc64565..cacf01bd04f0a96660050c0e71da0840c8af0f95 100644 (file)
@@ -83,7 +83,7 @@ static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
 static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
                             struct tcf_proto *tp, unsigned long base,
                             u32 handle, struct nlattr **tca,
-                            unsigned long *arg)
+                            unsigned long *arg, bool ovr)
 {
        struct nlattr *tb[TCA_CGROUP_MAX + 1];
        struct cls_cgroup_head *head = tp->root;
@@ -119,7 +119,7 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
                return err;
 
        tcf_exts_init(&e, TCA_CGROUP_ACT, TCA_CGROUP_POLICE);
-       err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e);
+       err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
        if (err < 0)
                return err;
 
index 257029c5433298f62101533ada3a81c246eb0d7a..35be16f7c192dc8d5d2ebdd38c2b27a144c41076 100644 (file)
@@ -349,7 +349,7 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
 static int flow_change(struct net *net, struct sk_buff *in_skb,
                       struct tcf_proto *tp, unsigned long base,
                       u32 handle, struct nlattr **tca,
-                      unsigned long *arg)
+                      unsigned long *arg, bool ovr)
 {
        struct flow_head *head = tp->root;
        struct flow_filter *f;
@@ -393,7 +393,7 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
        }
 
        tcf_exts_init(&e, TCA_FLOW_ACT, TCA_FLOW_POLICE);
-       err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e);
+       err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
        if (err < 0)
                return err;
 
index 63a3ce75c02ee959fe67d6b203e01420d86b03ad..861b03ccfed0a55007ceb001a297b05906b36ed3 100644 (file)
@@ -169,7 +169,7 @@ static const struct nla_policy fw_policy[TCA_FW_MAX + 1] = {
 
 static int
 fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f,
-       struct nlattr **tb, struct nlattr **tca, unsigned long base)
+       struct nlattr **tb, struct nlattr **tca, unsigned long base, bool ovr)
 {
        struct fw_head *head = tp->root;
        struct tcf_exts e;
@@ -177,7 +177,7 @@ fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f,
        int err;
 
        tcf_exts_init(&e, TCA_FW_ACT, TCA_FW_POLICE);
-       err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e);
+       err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
        if (err < 0)
                return err;
 
@@ -218,7 +218,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
                     struct tcf_proto *tp, unsigned long base,
                     u32 handle,
                     struct nlattr **tca,
-                    unsigned long *arg)
+                    unsigned long *arg, bool ovr)
 {
        struct fw_head *head = tp->root;
        struct fw_filter *f = (struct fw_filter *) *arg;
@@ -236,7 +236,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
        if (f != NULL) {
                if (f->id != handle && handle)
                        return -EINVAL;
-               return fw_change_attrs(net, tp, f, tb, tca, base);
+               return fw_change_attrs(net, tp, f, tb, tca, base, ovr);
        }
 
        if (!handle)
@@ -264,7 +264,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
        tcf_exts_init(&f->exts, TCA_FW_ACT, TCA_FW_POLICE);
        f->id = handle;
 
-       err = fw_change_attrs(net, tp, f, tb, tca, base);
+       err = fw_change_attrs(net, tp, f, tb, tca, base, ovr);
        if (err < 0)
                goto errout;
 
index 1ad3068f2ce16e2c6ba15985c40cf899a7030ba7..dd9fc2523c76a2b0b9fe4c5225328cc29c1649f6 100644 (file)
@@ -333,7 +333,8 @@ static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = {
 static int route4_set_parms(struct net *net, struct tcf_proto *tp,
                            unsigned long base, struct route4_filter *f,
                            u32 handle, struct route4_head *head,
-                           struct nlattr **tb, struct nlattr *est, int new)
+                           struct nlattr **tb, struct nlattr *est, int new,
+                           bool ovr)
 {
        int err;
        u32 id = 0, to = 0, nhandle = 0x8000;
@@ -343,7 +344,7 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp,
        struct tcf_exts e;
 
        tcf_exts_init(&e, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
-       err = tcf_exts_validate(net, tp, tb, est, &e);
+       err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
        if (err < 0)
                return err;
 
@@ -428,7 +429,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
                       struct tcf_proto *tp, unsigned long base,
                       u32 handle,
                       struct nlattr **tca,
-                      unsigned long *arg)
+                      unsigned long *arg, bool ovr)
 {
        struct route4_head *head = tp->root;
        struct route4_filter *f, *f1, **fp;
@@ -455,7 +456,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
                        old_handle = f->handle;
 
                err = route4_set_parms(net, tp, base, f, handle, head, tb,
-                       tca[TCA_RATE], 0);
+                       tca[TCA_RATE], 0, ovr);
                if (err < 0)
                        return err;
 
@@ -479,7 +480,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
 
        tcf_exts_init(&f->exts, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
        err = route4_set_parms(net, tp, base, f, handle, head, tb,
-               tca[TCA_RATE], 1);
+               tca[TCA_RATE], 1, ovr);
        if (err < 0)
                goto errout;
 
index 19f8e5dfa8bdaebcd9ab903050047e7e49690530..1020e233a5d6c74092fb153133b1bfed7f4177a9 100644 (file)
@@ -415,7 +415,7 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb,
                       struct tcf_proto *tp, unsigned long base,
                       u32 handle,
                       struct nlattr **tca,
-                      unsigned long *arg)
+                      unsigned long *arg, bool ovr)
 {
        struct rsvp_head *data = tp->root;
        struct rsvp_filter *f, **fp;
@@ -436,7 +436,7 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb,
                return err;
 
        tcf_exts_init(&e, TCA_RSVP_ACT, TCA_RSVP_POLICE);
-       err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e);
+       err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
        if (err < 0)
                return err;
 
index eed8404443d8f0145942c3e459b79934ada9c48d..c721cd4a469fe39f42186d82ec7ab04f17bcba26 100644 (file)
@@ -188,11 +188,17 @@ static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
        [TCA_TCINDEX_CLASSID]           = { .type = NLA_U32 },
 };
 
+static void tcindex_filter_result_init(struct tcindex_filter_result *r)
+{
+       memset(r, 0, sizeof(*r));
+       tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
+}
+
 static int
 tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
                  u32 handle, struct tcindex_data *p,
                  struct tcindex_filter_result *r, struct nlattr **tb,
-                struct nlattr *est)
+                 struct nlattr *est, bool ovr)
 {
        int err, balloc = 0;
        struct tcindex_filter_result new_filter_result, *old_r = r;
@@ -202,20 +208,16 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
        struct tcf_exts e;
 
        tcf_exts_init(&e, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
-       err = tcf_exts_validate(net, tp, tb, est, &e);
+       err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
        if (err < 0)
                return err;
 
        memcpy(&cp, p, sizeof(cp));
-       memset(&new_filter_result, 0, sizeof(new_filter_result));
-       tcf_exts_init(&new_filter_result.exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
+       tcindex_filter_result_init(&new_filter_result);
 
+       tcindex_filter_result_init(&cr);
        if (old_r)
-               memcpy(&cr, r, sizeof(cr));
-       else {
-               memset(&cr, 0, sizeof(cr));
-               tcf_exts_init(&cr.exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
-       }
+               cr.res = r->res;
 
        if (tb[TCA_TCINDEX_HASH])
                cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
@@ -267,9 +269,14 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
        err = -ENOMEM;
        if (!cp.perfect && !cp.h) {
                if (valid_perfect_hash(&cp)) {
+                       int i;
+
                        cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL);
                        if (!cp.perfect)
                                goto errout;
+                       for (i = 0; i < cp.hash; i++)
+                               tcf_exts_init(&cp.perfect[i].exts, TCA_TCINDEX_ACT,
+                                             TCA_TCINDEX_POLICE);
                        balloc = 1;
                } else {
                        cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL);
@@ -295,14 +302,17 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
                tcf_bind_filter(tp, &cr.res, base);
        }
 
-       tcf_exts_change(tp, &cr.exts, &e);
+       if (old_r)
+               tcf_exts_change(tp, &r->exts, &e);
+       else
+               tcf_exts_change(tp, &cr.exts, &e);
 
        tcf_tree_lock(tp);
        if (old_r && old_r != r)
-               memset(old_r, 0, sizeof(*old_r));
+               tcindex_filter_result_init(old_r);
 
        memcpy(p, &cp, sizeof(cp));
-       memcpy(r, &cr, sizeof(cr));
+       r->res = cr.res;
 
        if (r == &new_filter_result) {
                struct tcindex_filter **fp;
@@ -331,7 +341,7 @@ errout:
 static int
 tcindex_change(struct net *net, struct sk_buff *in_skb,
               struct tcf_proto *tp, unsigned long base, u32 handle,
-              struct nlattr **tca, unsigned long *arg)
+              struct nlattr **tca, unsigned long *arg, bool ovr)
 {
        struct nlattr *opt = tca[TCA_OPTIONS];
        struct nlattr *tb[TCA_TCINDEX_MAX + 1];
@@ -351,7 +361,7 @@ tcindex_change(struct net *net, struct sk_buff *in_skb,
                return err;
 
        return tcindex_set_parms(net, tp, base, handle, p, r, tb,
-                                tca[TCA_RATE]);
+                                tca[TCA_RATE], ovr);
 }
 
 
index 84c28daff8484f643e5bed4176f6b225eec34e66..c39b583ace3229d4bae6a7b3774593e5eebd7141 100644 (file)
@@ -486,13 +486,13 @@ static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
 static int u32_set_parms(struct net *net, struct tcf_proto *tp,
                         unsigned long base, struct tc_u_hnode *ht,
                         struct tc_u_knode *n, struct nlattr **tb,
-                        struct nlattr *est)
+                        struct nlattr *est, bool ovr)
 {
        int err;
        struct tcf_exts e;
 
        tcf_exts_init(&e, TCA_U32_ACT, TCA_U32_POLICE);
-       err = tcf_exts_validate(net, tp, tb, est, &e);
+       err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
        if (err < 0)
                return err;
 
@@ -545,7 +545,7 @@ errout:
 static int u32_change(struct net *net, struct sk_buff *in_skb,
                      struct tcf_proto *tp, unsigned long base, u32 handle,
                      struct nlattr **tca,
-                     unsigned long *arg)
+                     unsigned long *arg, bool ovr)
 {
        struct tc_u_common *tp_c = tp->data;
        struct tc_u_hnode *ht;
@@ -569,7 +569,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
                        return -EINVAL;
 
                return u32_set_parms(net, tp, base, n->ht_up, n, tb,
-                                    tca[TCA_RATE]);
+                                    tca[TCA_RATE], ovr);
        }
 
        if (tb[TCA_U32_DIVISOR]) {
@@ -656,7 +656,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
        }
 #endif
 
-       err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE]);
+       err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE], ovr);
        if (err == 0) {
                struct tc_u_knode **ins;
                for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next)
index a0b84e0e22deb4c9e998499b1e202ebaafda95f8..fd14df56e5ffdc2d96d61abbe55b2607c95179c5 100644 (file)
@@ -1084,7 +1084,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
        struct Qdisc *p = NULL;
        int err;
 
-       if ((n->nlmsg_type != RTM_GETQDISC) && !capable(CAP_NET_ADMIN))
+       if ((n->nlmsg_type != RTM_GETQDISC) &&
+           !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
                return -EPERM;
 
        err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -1151,7 +1152,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
        struct Qdisc *q, *p;
        int err;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
                return -EPERM;
 
 replay:
@@ -1490,7 +1491,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n)
        u32 qid;
        int err;
 
-       if ((n->nlmsg_type != RTM_GETTCLASS) && !capable(CAP_NET_ADMIN))
+       if ((n->nlmsg_type != RTM_GETTCLASS) &&
+           !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
                return -EPERM;
 
        err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
index edee03d922e28678cc4f4ba843f600236cf06f08..6aab8619bbb002570d206d9ff6bb9c90eb82261a 100644 (file)
@@ -414,7 +414,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                }
                bucket->deficit = weight * q->quantum;
        }
-       if (++sch->q.qlen < sch->limit)
+       if (++sch->q.qlen <= sch->limit)
                return NET_XMIT_SUCCESS;
 
        q->drop_overlimit++;
@@ -553,11 +553,6 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
        if (err < 0)
                return err;
 
-       sch_tree_lock(sch);
-
-       if (tb[TCA_HHF_BACKLOG_LIMIT])
-               sch->limit = nla_get_u32(tb[TCA_HHF_BACKLOG_LIMIT]);
-
        if (tb[TCA_HHF_QUANTUM])
                new_quantum = nla_get_u32(tb[TCA_HHF_QUANTUM]);
 
@@ -567,6 +562,12 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
        non_hh_quantum = (u64)new_quantum * new_hhf_non_hh_weight;
        if (non_hh_quantum > INT_MAX)
                return -EINVAL;
+
+       sch_tree_lock(sch);
+
+       if (tb[TCA_HHF_BACKLOG_LIMIT])
+               sch->limit = nla_get_u32(tb[TCA_HHF_BACKLOG_LIMIT]);
+
        q->quantum = new_quantum;
        q->hhf_non_hh_weight = new_hhf_non_hh_weight;
 
index 2b1738ef9394537589b403f7d299181e18fb2315..1999592ba88c9f3d574428e1ce076c10f724c13a 100644 (file)
@@ -216,7 +216,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
        IP6_ECN_flow_xmit(sk, fl6->flowlabel);
 
        if (!(transport->param_flags & SPP_PMTUD_ENABLE))
-               skb->local_df = 1;
+               skb->ignore_df = 1;
 
        SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
 
@@ -943,7 +943,6 @@ static struct inet_protosw sctpv6_seqpacket_protosw = {
        .protocol      = IPPROTO_SCTP,
        .prot          = &sctpv6_prot,
        .ops           = &inet6_seqpacket_ops,
-       .no_check      = 0,
        .flags         = SCTP_PROTOSW_FLAG
 };
 static struct inet_protosw sctpv6_stream_protosw = {
@@ -951,7 +950,6 @@ static struct inet_protosw sctpv6_stream_protosw = {
        .protocol      = IPPROTO_SCTP,
        .prot          = &sctpv6_prot,
        .ops           = &inet6_seqpacket_ops,
-       .no_check      = 0,
        .flags         = SCTP_PROTOSW_FLAG,
 };
 
index 0f4d15fc2627bcccb546ee2da883f812daa4e4a1..01ab8e0723f04ea845ba81045228786ef07c97ad 100644 (file)
@@ -591,7 +591,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
 
        pr_debug("***sctp_transmit_packet*** skb->len:%d\n", nskb->len);
 
-       nskb->local_df = packet->ipfragok;
+       nskb->ignore_df = packet->ipfragok;
        tp->af_specific->sctp_xmit(nskb, tp);
 
 out:
index 0947f1e15eb88a0381434a05da082bf3d6fd9797..34229ee7f379902b16a7659f82545f10e5908b17 100644 (file)
@@ -78,7 +78,7 @@ static int sctp_snmp_seq_show(struct seq_file *seq, void *v)
 
        for (i = 0; sctp_snmp_list[i].name != NULL; i++)
                seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name,
-                          snmp_fold_field((void __percpu **)net->sctp.sctp_statistics,
+                          snmp_fold_field(net->sctp.sctp_statistics,
                                      sctp_snmp_list[i].entry));
 
        return 0;
index c09757fbf8039e76c935c1dbf33ab2b19be228b1..6789d785e698325afc7c8cfd65d1b69c2e50047a 100644 (file)
@@ -491,8 +491,13 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
                        continue;
                if ((laddr->state == SCTP_ADDR_SRC) &&
                    (AF_INET == laddr->a.sa.sa_family)) {
-                       fl4->saddr = laddr->a.v4.sin_addr.s_addr;
                        fl4->fl4_sport = laddr->a.v4.sin_port;
+                       flowi4_update_output(fl4,
+                                            asoc->base.sk->sk_bound_dev_if,
+                                            RT_CONN_FLAGS(asoc->base.sk),
+                                            daddr->v4.sin_addr.s_addr,
+                                            laddr->a.v4.sin_addr.s_addr);
+
                        rt = ip_route_output_key(sock_net(sk), fl4);
                        if (!IS_ERR(rt)) {
                                dst = &rt->dst;
@@ -1012,7 +1017,6 @@ static struct inet_protosw sctp_seqpacket_protosw = {
        .protocol   = IPPROTO_SCTP,
        .prot       = &sctp_prot,
        .ops        = &inet_seqpacket_ops,
-       .no_check   = 0,
        .flags      = SCTP_PROTOSW_FLAG
 };
 static struct inet_protosw sctp_stream_protosw = {
@@ -1020,7 +1024,6 @@ static struct inet_protosw sctp_stream_protosw = {
        .protocol   = IPPROTO_SCTP,
        .prot       = &sctp_prot,
        .ops        = &inet_seqpacket_ops,
-       .no_check   = 0,
        .flags      = SCTP_PROTOSW_FLAG
 };
 
@@ -1100,14 +1103,15 @@ int sctp_register_pf(struct sctp_pf *pf, sa_family_t family)
 
 static inline int init_sctp_mibs(struct net *net)
 {
-       return snmp_mib_init((void __percpu **)net->sctp.sctp_statistics,
-                            sizeof(struct sctp_mib),
-                            __alignof__(struct sctp_mib));
+       net->sctp.sctp_statistics = alloc_percpu(struct sctp_mib);
+       if (!net->sctp.sctp_statistics)
+               return -ENOMEM;
+       return 0;
 }
 
 static inline void cleanup_sctp_mibs(struct net *net)
 {
-       snmp_mib_free((void __percpu **)net->sctp.sctp_statistics);
+       free_percpu(net->sctp.sctp_statistics);
 }
 
 static void sctp_v4_pf_init(void)
index 5d6883ff00c3b7056639f06254caffcb14349e27..fef2acdf4a2e675c55dc9fbf2124d132499b89e3 100644 (file)
@@ -496,11 +496,10 @@ static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands,
 
        /* If the transport error count is greater than the pf_retrans
         * threshold, and less than pathmaxrtx, and if the current state
-        * is not SCTP_UNCONFIRMED, then mark this transport as Partially
-        * Failed, see SCTP Quick Failover Draft, section 5.1
+        * is SCTP_ACTIVE, then mark this transport as Partially Failed,
+        * see SCTP Quick Failover Draft, section 5.1
         */
-       if ((transport->state != SCTP_PF) &&
-          (transport->state != SCTP_UNCONFIRMED) &&
+       if ((transport->state == SCTP_ACTIVE) &&
           (asoc->pf_retrans < transport->pathmaxrxt) &&
           (transport->error_count > asoc->pf_retrans)) {
 
index e37b2cbbf177da9739d54a2b101d3a4e74299745..429899689408caec64cf5c36b4eb0b00369f8e48 100644 (file)
@@ -5946,8 +5946,9 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
                /* Search for an available port. */
                int low, high, remaining, index;
                unsigned int rover;
+               struct net *net = sock_net(sk);
 
-               inet_get_local_port_range(sock_net(sk), &low, &high);
+               inet_get_local_port_range(net, &low, &high);
                remaining = (high - low) + 1;
                rover = prandom_u32() % remaining + low;
 
@@ -5955,7 +5956,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
                        rover++;
                        if ((rover < low) || (rover > high))
                                rover = low;
-                       if (inet_is_reserved_local_port(rover))
+                       if (inet_is_local_reserved_port(net, rover))
                                continue;
                        index = sctp_phashfn(sock_net(sk), rover);
                        head = &sctp_port_hashtable[index];
@@ -6945,7 +6946,8 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
        newsk->sk_type = sk->sk_type;
        newsk->sk_bound_dev_if = sk->sk_bound_dev_if;
        newsk->sk_flags = sk->sk_flags;
-       newsk->sk_no_check = sk->sk_no_check;
+       newsk->sk_no_check_tx = sk->sk_no_check_tx;
+       newsk->sk_no_check_rx = sk->sk_no_check_rx;
        newsk->sk_reuse = sk->sk_reuse;
 
        newsk->sk_shutdown = sk->sk_shutdown;
index c82fdc1eab7c359dbee3812db3f3707fa5c65560..7e5eb75549902eeeb2c0c0889daca8ee53317ccf 100644 (file)
@@ -436,20 +436,21 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
 
 int sctp_sysctl_net_register(struct net *net)
 {
-       struct ctl_table *table = sctp_net_table;
-
-       if (!net_eq(net, &init_net)) {
-               int i;
+       struct ctl_table *table;
+       int i;
 
-               table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL);
-               if (!table)
-                       return -ENOMEM;
+       table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL);
+       if (!table)
+               return -ENOMEM;
 
-               for (i = 0; table[i].data; i++)
-                       table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp;
-       }
+       for (i = 0; table[i].data; i++)
+               table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp;
 
        net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table);
+       if (net->sctp.sysctl_header == NULL) {
+               kfree(table);
+               return -ENOMEM;
+       }
        return 0;
 }
 
index 25a3dcf15cae97ff54f315412848c269acef3b01..1dec6043e4de7f208f2d1df23ae658868c004f5a 100644 (file)
@@ -866,8 +866,6 @@ static void xs_reset_transport(struct sock_xprt *transport)
        xs_restore_old_callbacks(transport, sk);
        write_unlock_bh(&sk->sk_callback_lock);
 
-       sk->sk_no_check = 0;
-
        trace_rpc_socket_close(&transport->xprt, sock);
        sock_release(sock);
 }
@@ -2046,7 +2044,6 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
                sk->sk_user_data = xprt;
                sk->sk_data_ready = xs_udp_data_ready;
                sk->sk_write_space = xs_udp_write_space;
-               sk->sk_no_check = UDP_CSUM_NORCV;
                sk->sk_allocation = GFP_ATOMIC;
 
                xprt_set_connected(xprt);
index b282f7130d2bb51f0dee12e4800a9a0ad54a33a7..a080c66d819a032233a963512d849f757cc979e2 100644 (file)
@@ -5,7 +5,7 @@
 obj-$(CONFIG_TIPC) := tipc.o
 
 tipc-y += addr.o bcast.o bearer.o config.o \
-          core.o handler.o link.o discover.o msg.o  \
+          core.o link.o discover.o msg.o  \
           name_distr.o  subscr.o name_table.o net.o  \
           netlink.o node.o node_subscr.o port.o ref.o  \
           socket.o log.o eth_media.o server.o
index 119a59b4bec6cc5c5fecbf568f199895608426bc..671f9817b4f4c3a6e8c0a0a9eec9e96e1cb9b359 100644 (file)
@@ -71,7 +71,7 @@ struct tipc_bcbearer_pair {
  * Note: The fields labelled "temporary" are incorporated into the bearer
  * to avoid consuming potentially limited stack space through the use of
  * large local variables within multicast routines.  Concurrent access is
- * prevented through use of the spinlock "bc_lock".
+ * prevented through use of the spinlock "bclink_lock".
  */
 struct tipc_bcbearer {
        struct tipc_bearer bearer;
@@ -84,28 +84,27 @@ struct tipc_bcbearer {
 
 /**
  * struct tipc_bclink - link used for broadcast messages
+ * @lock: spinlock governing access to structure
  * @link: (non-standard) broadcast link structure
  * @node: (non-standard) node structure representing b'cast link's peer node
+ * @flags: represent bclink states
  * @bcast_nodes: map of broadcast-capable nodes
  * @retransmit_to: node that most recently requested a retransmit
  *
  * Handles sequence numbering, fragmentation, bundling, etc.
  */
 struct tipc_bclink {
+       spinlock_t lock;
        struct tipc_link link;
        struct tipc_node node;
+       unsigned int flags;
        struct tipc_node_map bcast_nodes;
        struct tipc_node *retransmit_to;
 };
 
-static struct tipc_bcbearer bcast_bearer;
-static struct tipc_bclink bcast_link;
-
-static struct tipc_bcbearer *bcbearer = &bcast_bearer;
-static struct tipc_bclink *bclink = &bcast_link;
-static struct tipc_link *bcl = &bcast_link.link;
-
-static DEFINE_SPINLOCK(bc_lock);
+static struct tipc_bcbearer *bcbearer;
+static struct tipc_bclink *bclink;
+static struct tipc_link *bcl;
 
 const char tipc_bclink_name[] = "broadcast-link";
 
@@ -115,6 +114,35 @@ static void tipc_nmap_diff(struct tipc_node_map *nm_a,
 static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
 static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
 
+static void tipc_bclink_lock(void)
+{
+       spin_lock_bh(&bclink->lock);
+}
+
+static void tipc_bclink_unlock(void)
+{
+       struct tipc_node *node = NULL;
+
+       if (likely(!bclink->flags)) {
+               spin_unlock_bh(&bclink->lock);
+               return;
+       }
+
+       if (bclink->flags & TIPC_BCLINK_RESET) {
+               bclink->flags &= ~TIPC_BCLINK_RESET;
+               node = tipc_bclink_retransmit_to();
+       }
+       spin_unlock_bh(&bclink->lock);
+
+       if (node)
+               tipc_link_reset_all(node);
+}
+
+void tipc_bclink_set_flags(unsigned int flags)
+{
+       bclink->flags |= flags;
+}
+
 static u32 bcbuf_acks(struct sk_buff *buf)
 {
        return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
@@ -132,16 +160,16 @@ static void bcbuf_decr_acks(struct sk_buff *buf)
 
 void tipc_bclink_add_node(u32 addr)
 {
-       spin_lock_bh(&bc_lock);
+       tipc_bclink_lock();
        tipc_nmap_add(&bclink->bcast_nodes, addr);
-       spin_unlock_bh(&bc_lock);
+       tipc_bclink_unlock();
 }
 
 void tipc_bclink_remove_node(u32 addr)
 {
-       spin_lock_bh(&bc_lock);
+       tipc_bclink_lock();
        tipc_nmap_remove(&bclink->bcast_nodes, addr);
-       spin_unlock_bh(&bc_lock);
+       tipc_bclink_unlock();
 }
 
 static void bclink_set_last_sent(void)
@@ -167,7 +195,7 @@ static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
 /**
  * tipc_bclink_retransmit_to - get most recent node to request retransmission
  *
- * Called with bc_lock locked
+ * Called with bclink_lock locked
  */
 struct tipc_node *tipc_bclink_retransmit_to(void)
 {
@@ -179,7 +207,7 @@ struct tipc_node *tipc_bclink_retransmit_to(void)
  * @after: sequence number of last packet to *not* retransmit
  * @to: sequence number of last packet to retransmit
  *
- * Called with bc_lock locked
+ * Called with bclink_lock locked
  */
 static void bclink_retransmit_pkt(u32 after, u32 to)
 {
@@ -196,7 +224,7 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
  * @n_ptr: node that sent acknowledgement info
  * @acked: broadcast sequence # that has been acknowledged
  *
- * Node is locked, bc_lock unlocked.
+ * Node is locked, bclink_lock unlocked.
  */
 void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
 {
@@ -204,8 +232,7 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
        struct sk_buff *next;
        unsigned int released = 0;
 
-       spin_lock_bh(&bc_lock);
-
+       tipc_bclink_lock();
        /* Bail out if tx queue is empty (no clean up is required) */
        crs = bcl->first_out;
        if (!crs)
@@ -269,7 +296,7 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
        if (unlikely(released && !list_empty(&bcl->waiting_ports)))
                tipc_link_wakeup_ports(bcl, 0);
 exit:
-       spin_unlock_bh(&bc_lock);
+       tipc_bclink_unlock();
 }
 
 /**
@@ -322,10 +349,10 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
                                 ? buf_seqno(n_ptr->bclink.deferred_head) - 1
                                 : n_ptr->bclink.last_sent);
 
-               spin_lock_bh(&bc_lock);
+               tipc_bclink_lock();
                tipc_bearer_send(MAX_BEARERS, buf, NULL);
                bcl->stats.sent_nacks++;
-               spin_unlock_bh(&bc_lock);
+               tipc_bclink_unlock();
                kfree_skb(buf);
 
                n_ptr->bclink.oos_state++;
@@ -362,7 +389,7 @@ int tipc_bclink_xmit(struct sk_buff *buf)
 {
        int res;
 
-       spin_lock_bh(&bc_lock);
+       tipc_bclink_lock();
 
        if (!bclink->bcast_nodes.count) {
                res = msg_data_sz(buf_msg(buf));
@@ -377,14 +404,14 @@ int tipc_bclink_xmit(struct sk_buff *buf)
                bcl->stats.accu_queue_sz += bcl->out_queue_size;
        }
 exit:
-       spin_unlock_bh(&bc_lock);
+       tipc_bclink_unlock();
        return res;
 }
 
 /**
  * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
  *
- * Called with both sending node's lock and bc_lock taken.
+ * Called with both sending node's lock and bclink_lock taken.
  */
 static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
 {
@@ -439,12 +466,12 @@ void tipc_bclink_rcv(struct sk_buff *buf)
                if (msg_destnode(msg) == tipc_own_addr) {
                        tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
                        tipc_node_unlock(node);
-                       spin_lock_bh(&bc_lock);
+                       tipc_bclink_lock();
                        bcl->stats.recv_nacks++;
                        bclink->retransmit_to = node;
                        bclink_retransmit_pkt(msg_bcgap_after(msg),
                                              msg_bcgap_to(msg));
-                       spin_unlock_bh(&bc_lock);
+                       tipc_bclink_unlock();
                } else {
                        tipc_node_unlock(node);
                        bclink_peek_nack(msg);
@@ -462,51 +489,47 @@ receive:
                /* Deliver message to destination */
 
                if (likely(msg_isdata(msg))) {
-                       spin_lock_bh(&bc_lock);
+                       tipc_bclink_lock();
                        bclink_accept_pkt(node, seqno);
-                       spin_unlock_bh(&bc_lock);
+                       tipc_bclink_unlock();
                        tipc_node_unlock(node);
                        if (likely(msg_mcast(msg)))
                                tipc_port_mcast_rcv(buf, NULL);
                        else
                                kfree_skb(buf);
                } else if (msg_user(msg) == MSG_BUNDLER) {
-                       spin_lock_bh(&bc_lock);
+                       tipc_bclink_lock();
                        bclink_accept_pkt(node, seqno);
                        bcl->stats.recv_bundles++;
                        bcl->stats.recv_bundled += msg_msgcnt(msg);
-                       spin_unlock_bh(&bc_lock);
+                       tipc_bclink_unlock();
                        tipc_node_unlock(node);
                        tipc_link_bundle_rcv(buf);
                } else if (msg_user(msg) == MSG_FRAGMENTER) {
-                       int ret;
-                       ret = tipc_link_frag_rcv(&node->bclink.reasm_head,
-                                                &node->bclink.reasm_tail,
-                                                &buf);
-                       if (ret == LINK_REASM_ERROR)
+                       tipc_buf_append(&node->bclink.reasm_buf, &buf);
+                       if (unlikely(!buf && !node->bclink.reasm_buf))
                                goto unlock;
-                       spin_lock_bh(&bc_lock);
+                       tipc_bclink_lock();
                        bclink_accept_pkt(node, seqno);
                        bcl->stats.recv_fragments++;
-                       if (ret == LINK_REASM_COMPLETE) {
+                       if (buf) {
                                bcl->stats.recv_fragmented++;
-                               /* Point msg to inner header */
                                msg = buf_msg(buf);
-                               spin_unlock_bh(&bc_lock);
+                               tipc_bclink_unlock();
                                goto receive;
                        }
-                       spin_unlock_bh(&bc_lock);
+                       tipc_bclink_unlock();
                        tipc_node_unlock(node);
                } else if (msg_user(msg) == NAME_DISTRIBUTOR) {
-                       spin_lock_bh(&bc_lock);
+                       tipc_bclink_lock();
                        bclink_accept_pkt(node, seqno);
-                       spin_unlock_bh(&bc_lock);
+                       tipc_bclink_unlock();
                        tipc_node_unlock(node);
                        tipc_named_rcv(buf);
                } else {
-                       spin_lock_bh(&bc_lock);
+                       tipc_bclink_lock();
                        bclink_accept_pkt(node, seqno);
-                       spin_unlock_bh(&bc_lock);
+                       tipc_bclink_unlock();
                        tipc_node_unlock(node);
                        kfree_skb(buf);
                }
@@ -552,14 +575,14 @@ receive:
        } else
                deferred = 0;
 
-       spin_lock_bh(&bc_lock);
+       tipc_bclink_lock();
 
        if (deferred)
                bcl->stats.deferred_recv++;
        else
                bcl->stats.duplicates++;
 
-       spin_unlock_bh(&bc_lock);
+       tipc_bclink_unlock();
 
 unlock:
        tipc_node_unlock(node);
@@ -663,7 +686,7 @@ void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action)
        int b_index;
        int pri;
 
-       spin_lock_bh(&bc_lock);
+       tipc_bclink_lock();
 
        if (action)
                tipc_nmap_add(nm_ptr, node);
@@ -710,7 +733,7 @@ void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action)
                bp_curr++;
        }
 
-       spin_unlock_bh(&bc_lock);
+       tipc_bclink_unlock();
 }
 
 
@@ -722,7 +745,7 @@ int tipc_bclink_stats(char *buf, const u32 buf_size)
        if (!bcl)
                return 0;
 
-       spin_lock_bh(&bc_lock);
+       tipc_bclink_lock();
 
        s = &bcl->stats;
 
@@ -751,7 +774,7 @@ int tipc_bclink_stats(char *buf, const u32 buf_size)
                             s->queue_sz_counts ?
                             (s->accu_queue_sz / s->queue_sz_counts) : 0);
 
-       spin_unlock_bh(&bc_lock);
+       tipc_bclink_unlock();
        return ret;
 }
 
@@ -760,9 +783,9 @@ int tipc_bclink_reset_stats(void)
        if (!bcl)
                return -ENOPROTOOPT;
 
-       spin_lock_bh(&bc_lock);
+       tipc_bclink_lock();
        memset(&bcl->stats, 0, sizeof(bcl->stats));
-       spin_unlock_bh(&bc_lock);
+       tipc_bclink_unlock();
        return 0;
 }
 
@@ -773,18 +796,30 @@ int tipc_bclink_set_queue_limits(u32 limit)
        if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
                return -EINVAL;
 
-       spin_lock_bh(&bc_lock);
+       tipc_bclink_lock();
        tipc_link_set_queue_limits(bcl, limit);
-       spin_unlock_bh(&bc_lock);
+       tipc_bclink_unlock();
        return 0;
 }
 
-void tipc_bclink_init(void)
+int tipc_bclink_init(void)
 {
+       bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
+       if (!bcbearer)
+               return -ENOMEM;
+
+       bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
+       if (!bclink) {
+               kfree(bcbearer);
+               return -ENOMEM;
+       }
+
+       bcl = &bclink->link;
        bcbearer->bearer.media = &bcbearer->media;
        bcbearer->media.send_msg = tipc_bcbearer_send;
        sprintf(bcbearer->media.name, "tipc-broadcast");
 
+       spin_lock_init(&bclink->lock);
        INIT_LIST_HEAD(&bcl->waiting_ports);
        bcl->next_out_no = 1;
        spin_lock_init(&bclink->node.lock);
@@ -795,17 +830,19 @@ void tipc_bclink_init(void)
        rcu_assign_pointer(bearer_list[MAX_BEARERS], &bcbearer->bearer);
        bcl->state = WORKING_WORKING;
        strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
+       return 0;
 }
 
 void tipc_bclink_stop(void)
 {
-       spin_lock_bh(&bc_lock);
+       tipc_bclink_lock();
        tipc_link_purge_queues(bcl);
-       spin_unlock_bh(&bc_lock);
+       tipc_bclink_unlock();
 
        RCU_INIT_POINTER(bearer_list[BCBEARER], NULL);
-       memset(bclink, 0, sizeof(*bclink));
-       memset(bcbearer, 0, sizeof(*bcbearer));
+       synchronize_net();
+       kfree(bcbearer);
+       kfree(bclink);
 }
 
 /**
index 7c1ef1b3d7b3836a20e5071fea66b03a21aacd00..00330c45df3e04d03626a31d5f2ee6ba66569298 100644 (file)
@@ -39,6 +39,7 @@
 
 #define MAX_NODES 4096
 #define WSIZE 32
+#define TIPC_BCLINK_RESET 1
 
 /**
  * struct tipc_node_map - set of node identifiers
@@ -81,8 +82,9 @@ static inline int tipc_nmap_equal(struct tipc_node_map *nm_a,
 void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port);
 void tipc_port_list_free(struct tipc_port_list *pl_ptr);
 
-void tipc_bclink_init(void);
+int tipc_bclink_init(void);
 void tipc_bclink_stop(void);
+void tipc_bclink_set_flags(unsigned int flags);
 void tipc_bclink_add_node(u32 addr);
 void tipc_bclink_remove_node(u32 addr);
 struct tipc_node *tipc_bclink_retransmit_to(void);
index f3259d4133b62f16702fb945e468f1553bbdd859..264474394f9f75994205b751e734b632d21fbc02 100644 (file)
@@ -411,28 +411,6 @@ int tipc_disable_bearer(const char *name)
        return res;
 }
 
-
-/* tipc_l2_media_addr_set - initialize Ethernet media address structure
- *
- * Media-dependent "value" field stores MAC address in first 6 bytes
- * and zeroes out the remaining bytes.
- */
-void tipc_l2_media_addr_set(const struct tipc_bearer *b,
-                           struct tipc_media_addr *a, char *mac)
-{
-       int len = b->media->hwaddr_len;
-
-       if (unlikely(sizeof(a->value) < len)) {
-               WARN_ONCE(1, "Media length invalid\n");
-               return;
-       }
-
-       memcpy(a->value, mac, len);
-       memset(a->value + len, 0, sizeof(a->value) - len);
-       a->media_id = b->media->type_id;
-       a->broadcast = !memcmp(mac, b->bcast_addr.value, len);
-}
-
 int tipc_enable_l2_media(struct tipc_bearer *b)
 {
        struct net_device *dev;
@@ -443,21 +421,21 @@ int tipc_enable_l2_media(struct tipc_bearer *b)
        if (!dev)
                return -ENODEV;
 
-       /* Associate TIPC bearer with Ethernet bearer */
+       /* Associate TIPC bearer with L2 bearer */
        rcu_assign_pointer(b->media_ptr, dev);
-       memset(b->bcast_addr.value, 0, sizeof(b->bcast_addr.value));
+       memset(&b->bcast_addr, 0, sizeof(b->bcast_addr));
        memcpy(b->bcast_addr.value, dev->broadcast, b->media->hwaddr_len);
        b->bcast_addr.media_id = b->media->type_id;
        b->bcast_addr.broadcast = 1;
        b->mtu = dev->mtu;
-       tipc_l2_media_addr_set(b, &b->addr, (char *)dev->dev_addr);
+       b->media->raw2addr(b, &b->addr, (char *)dev->dev_addr);
        rcu_assign_pointer(dev->tipc_ptr, b);
        return 0;
 }
 
-/* tipc_disable_l2_media - detach TIPC bearer from an Ethernet interface
+/* tipc_disable_l2_media - detach TIPC bearer from an L2 interface
  *
- * Mark Ethernet bearer as inactive so that incoming buffers are thrown away,
+ * Mark L2 bearer as inactive so that incoming buffers are thrown away,
  * then get worker thread to complete bearer cleanup.  (Can't do cleanup
  * here because cleanup code needs to sleep and caller holds spinlocks.)
  */
@@ -473,7 +451,7 @@ void tipc_disable_l2_media(struct tipc_bearer *b)
 }
 
 /**
- * tipc_l2_send_msg - send a TIPC packet out over an Ethernet interface
+ * tipc_l2_send_msg - send a TIPC packet out over an L2 interface
  * @buf: the packet to be sent
  * @b_ptr: the bearer through which the packet is to be sent
  * @dest: peer destination address
@@ -597,7 +575,7 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
                tipc_reset_bearer(b_ptr);
                break;
        case NETDEV_CHANGEADDR:
-               tipc_l2_media_addr_set(b_ptr, &b_ptr->addr,
+               b_ptr->media->raw2addr(b_ptr, &b_ptr->addr,
                                       (char *)dev->dev_addr);
                tipc_reset_bearer(b_ptr);
                break;
index a983b3005e715b577d78060854edc373324514bd..78fccc49de23c1e6dc20c1799d629c4083f6f80c 100644 (file)
 #define MAX_BEARERS    2
 #define MAX_MEDIA      2
 
-/*
- * Identifiers associated with TIPC message header media address info
- *
- * - address info field is 20 bytes long
- * - media type identifier located at offset 3
- * - remaining bytes vary according to media type
+/* Identifiers associated with TIPC message header media address info
+ * - address info field is 32 bytes long
+ * - the field's actual content and length is defined per media
+ * - remaining unused bytes in the field are set to zero
  */
-#define TIPC_MEDIA_ADDR_SIZE   20
+#define TIPC_MEDIA_ADDR_SIZE   32
 #define TIPC_MEDIA_TYPE_OFFSET 3
 
 /*
@@ -77,9 +75,10 @@ struct tipc_bearer;
  * @send_msg: routine which handles buffer transmission
  * @enable_media: routine which enables a media
  * @disable_media: routine which disables a media
- * @addr2str: routine which converts media address to string
- * @addr2msg: routine which converts media address to protocol message area
- * @msg2addr: routine which converts media address from protocol message area
+ * @addr2str: convert media address format to string
+ * @addr2msg: convert from media addr format to discovery msg addr format
+ * @msg2addr: convert from discovery msg addr format to media addr format
+ * @raw2addr: convert from raw addr format to media addr format
  * @priority: default link (and bearer) priority
  * @tolerance: default time (in ms) before declaring link failure
  * @window: default window (in packets) before declaring link congestion
@@ -93,10 +92,16 @@ struct tipc_media {
                        struct tipc_media_addr *dest);
        int (*enable_media)(struct tipc_bearer *b_ptr);
        void (*disable_media)(struct tipc_bearer *b_ptr);
-       int (*addr2str)(struct tipc_media_addr *a, char *str_buf, int str_size);
-       int (*addr2msg)(struct tipc_media_addr *a, char *msg_area);
-       int (*msg2addr)(const struct tipc_bearer *b_ptr,
-                       struct tipc_media_addr *a, char *msg_area);
+       int (*addr2str)(struct tipc_media_addr *addr,
+                       char *strbuf,
+                       int bufsz);
+       int (*addr2msg)(char *msg, struct tipc_media_addr *addr);
+       int (*msg2addr)(struct tipc_bearer *b,
+                       struct tipc_media_addr *addr,
+                       char *msg);
+       int (*raw2addr)(struct tipc_bearer *b,
+                       struct tipc_media_addr *addr,
+                       char *raw);
        u32 priority;
        u32 tolerance;
        u32 window;
@@ -175,8 +180,6 @@ int tipc_media_set_priority(const char *name, u32 new_value);
 int tipc_media_set_window(const char *name, u32 new_value);
 void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a);
 struct sk_buff *tipc_media_get_names(void);
-void tipc_l2_media_addr_set(const struct tipc_bearer *b,
-                           struct tipc_media_addr *a, char *mac);
 int tipc_enable_l2_media(struct tipc_bearer *b);
 void tipc_disable_l2_media(struct tipc_bearer *b);
 int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b,
index 251f5a2028e4b16fb85266d268336640d5da0399..2b42403ad33a690221456ff25fb4be50a2235255 100644 (file)
@@ -177,8 +177,10 @@ static struct sk_buff *cfg_set_own_addr(void)
        if (tipc_own_addr)
                return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
                                                   " (cannot change node address once assigned)");
-       tipc_net_start(addr);
-       return tipc_cfg_reply_none();
+       if (!tipc_net_start(addr))
+               return tipc_cfg_reply_none();
+
+       return tipc_cfg_reply_error_string("cannot change to network mode");
 }
 
 static struct sk_buff *cfg_set_max_ports(void)
index 50d57429ebcaf82b8d36bcf49f6fa1585664180a..676d18015dd82efa0346f6bed2bf0d7f5489f1f6 100644 (file)
@@ -80,7 +80,6 @@ struct sk_buff *tipc_buf_acquire(u32 size)
  */
 static void tipc_core_stop(void)
 {
-       tipc_handler_stop();
        tipc_net_stop();
        tipc_bearer_cleanup();
        tipc_netlink_stop();
@@ -100,10 +99,6 @@ static int tipc_core_start(void)
 
        get_random_bytes(&tipc_random, sizeof(tipc_random));
 
-       err = tipc_handler_start();
-       if (err)
-               goto out_handler;
-
        err = tipc_ref_table_init(tipc_max_ports, tipc_random);
        if (err)
                goto out_reftbl;
@@ -146,8 +141,6 @@ out_netlink:
 out_nametbl:
        tipc_ref_table_stop();
 out_reftbl:
-       tipc_handler_stop();
-out_handler:
        return err;
 }
 
@@ -161,10 +154,11 @@ static int __init tipc_init(void)
        tipc_max_ports = CONFIG_TIPC_PORTS;
        tipc_net_id = 4711;
 
-       sysctl_tipc_rmem[0] = CONN_OVERLOAD_LIMIT >> 4 << TIPC_LOW_IMPORTANCE;
-       sysctl_tipc_rmem[1] = CONN_OVERLOAD_LIMIT >> 4 <<
+       sysctl_tipc_rmem[0] = TIPC_CONN_OVERLOAD_LIMIT >> 4 <<
+                             TIPC_LOW_IMPORTANCE;
+       sysctl_tipc_rmem[1] = TIPC_CONN_OVERLOAD_LIMIT >> 4 <<
                              TIPC_CRITICAL_IMPORTANCE;
-       sysctl_tipc_rmem[2] = CONN_OVERLOAD_LIMIT;
+       sysctl_tipc_rmem[2] = TIPC_CONN_OVERLOAD_LIMIT;
 
        res = tipc_core_start();
        if (res)
index 36cbf158845f7fd1421dd6812ffecb992d2b86bb..bb26ed1ee966c84c66fc7322877d80f763047e5a 100644 (file)
@@ -57,6 +57,7 @@
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/rtnetlink.h>
+#include <linux/etherdevice.h>
 
 #define TIPC_MOD_VER "2.0.0"
 
@@ -89,8 +90,6 @@ extern int tipc_random __read_mostly;
 /*
  * Routines available to privileged subsystems
  */
-int tipc_handler_start(void);
-void tipc_handler_stop(void);
 int tipc_netlink_start(void);
 void tipc_netlink_stop(void);
 int tipc_socket_init(void);
@@ -109,12 +108,10 @@ void tipc_unregister_sysctl(void);
 #endif
 
 /*
- * TIPC timer and signal code
+ * TIPC timer code
  */
 typedef void (*Handler) (unsigned long);
 
-u32 tipc_k_signal(Handler routine, unsigned long argument);
-
 /**
  * k_init_timer - initialize a timer
  * @timer: pointer to timer structure
@@ -191,6 +188,7 @@ static inline void k_term_timer(struct timer_list *timer)
 struct tipc_skb_cb {
        void *handle;
        bool deferred;
+       struct sk_buff *tail;
 };
 
 #define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
index ada42e436f5e78ee29b506639d77cc4116e61cbb..aa722a42ef8b03b4d840e31bbf7582d51948fbae 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/discover.c
  *
- * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2003-2006, 2014, Ericsson AB
  * Copyright (c) 2005-2006, 2010-2011, Wind River Systems
  * All rights reserved.
  *
@@ -83,7 +83,7 @@ static void tipc_disc_init_msg(struct sk_buff *buf, u32 type,
        msg_set_node_sig(msg, tipc_random);
        msg_set_dest_domain(msg, dest_domain);
        msg_set_bc_netid(msg, tipc_net_id);
-       b_ptr->media->addr2msg(&b_ptr->addr, msg_media_addr(msg));
+       b_ptr->media->addr2msg(msg_media_addr(msg), &b_ptr->addr);
 }
 
 /**
@@ -106,147 +106,150 @@ static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
 }
 
 /**
- * tipc_disc_rcv - handle incoming link setup message (request or response)
+ * tipc_disc_rcv - handle incoming discovery message (request or response)
  * @buf: buffer containing message
- * @b_ptr: bearer that message arrived on
+ * @bearer: bearer that message arrived on
  */
-void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *b_ptr)
+void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *bearer)
 {
-       struct tipc_node *n_ptr;
+       struct tipc_node *node;
        struct tipc_link *link;
-       struct tipc_media_addr media_addr;
+       struct tipc_media_addr maddr;
        struct sk_buff *rbuf;
        struct tipc_msg *msg = buf_msg(buf);
-       u32 dest = msg_dest_domain(msg);
-       u32 orig = msg_prevnode(msg);
+       u32 ddom = msg_dest_domain(msg);
+       u32 onode = msg_prevnode(msg);
        u32 net_id = msg_bc_netid(msg);
-       u32 type = msg_type(msg);
+       u32 mtyp = msg_type(msg);
        u32 signature = msg_node_sig(msg);
-       int addr_mismatch;
-       int link_fully_up;
-
-       media_addr.broadcast = 1;
-       b_ptr->media->msg2addr(b_ptr, &media_addr, msg_media_addr(msg));
+       bool addr_match = false;
+       bool sign_match = false;
+       bool link_up = false;
+       bool accept_addr = false;
+       bool accept_sign = false;
+       bool respond = false;
+
+       bearer->media->msg2addr(bearer, &maddr, msg_media_addr(msg));
        kfree_skb(buf);
 
        /* Ensure message from node is valid and communication is permitted */
        if (net_id != tipc_net_id)
                return;
-       if (media_addr.broadcast)
+       if (maddr.broadcast)
                return;
-       if (!tipc_addr_domain_valid(dest))
+       if (!tipc_addr_domain_valid(ddom))
                return;
-       if (!tipc_addr_node_valid(orig))
+       if (!tipc_addr_node_valid(onode))
                return;
-       if (orig == tipc_own_addr) {
-               if (memcmp(&media_addr, &b_ptr->addr, sizeof(media_addr)))
-                       disc_dupl_alert(b_ptr, tipc_own_addr, &media_addr);
+
+       if (in_own_node(onode)) {
+               if (memcmp(&maddr, &bearer->addr, sizeof(maddr)))
+                       disc_dupl_alert(bearer, tipc_own_addr, &maddr);
                return;
        }
-       if (!tipc_in_scope(dest, tipc_own_addr))
+       if (!tipc_in_scope(ddom, tipc_own_addr))
                return;
-       if (!tipc_in_scope(b_ptr->domain, orig))
+       if (!tipc_in_scope(bearer->domain, onode))
                return;
 
-       /* Locate structure corresponding to requesting node */
-       n_ptr = tipc_node_find(orig);
-       if (!n_ptr) {
-               n_ptr = tipc_node_create(orig);
-               if (!n_ptr)
-                       return;
-       }
-       tipc_node_lock(n_ptr);
+       /* Locate, or if necessary, create, node: */
+       node = tipc_node_find(onode);
+       if (!node)
+               node = tipc_node_create(onode);
+       if (!node)
+               return;
 
-       /* Prepare to validate requesting node's signature and media address */
-       link = n_ptr->links[b_ptr->identity];
-       addr_mismatch = (link != NULL) &&
-               memcmp(&link->media_addr, &media_addr, sizeof(media_addr));
+       tipc_node_lock(node);
+       link = node->links[bearer->identity];
 
-       /*
-        * Ensure discovery message's signature is correct
-        *
-        * If signature is incorrect and there is no working link to the node,
-        * accept the new signature but invalidate all existing links to the
-        * node so they won't re-activate without a new discovery message.
-        *
-        * If signature is incorrect and the requested link to the node is
-        * working, accept the new signature. (This is an instance of delayed
-        * rediscovery, where a link endpoint was able to re-establish contact
-        * with its peer endpoint on a node that rebooted before receiving a
-        * discovery message from that node.)
-        *
-        * If signature is incorrect and there is a working link to the node
-        * that is not the requested link, reject the request (must be from
-        * a duplicate node).
-        */
-       if (signature != n_ptr->signature) {
-               if (n_ptr->working_links == 0) {
-                       struct tipc_link *curr_link;
-                       int i;
-
-                       for (i = 0; i < MAX_BEARERS; i++) {
-                               curr_link = n_ptr->links[i];
-                               if (curr_link) {
-                                       memset(&curr_link->media_addr, 0,
-                                              sizeof(media_addr));
-                                       tipc_link_reset(curr_link);
-                               }
-                       }
-                       addr_mismatch = (link != NULL);
-               } else if (tipc_link_is_up(link) && !addr_mismatch) {
-                       /* delayed rediscovery */
-               } else {
-                       disc_dupl_alert(b_ptr, orig, &media_addr);
-                       tipc_node_unlock(n_ptr);
-                       return;
-               }
-               n_ptr->signature = signature;
+       /* Prepare to validate requesting node's signature and media address */
+       sign_match = (signature == node->signature);
+       addr_match = link && !memcmp(&link->media_addr, &maddr, sizeof(maddr));
+       link_up = link && tipc_link_is_up(link);
+
+
+       /* These three flags give us eight permutations: */
+
+       if (sign_match && addr_match && link_up) {
+               /* All is fine. Do nothing. */
+       } else if (sign_match && addr_match && !link_up) {
+               /* Respond. The link will come up in due time */
+               respond = true;
+       } else if (sign_match && !addr_match && link_up) {
+               /* Peer has changed i/f address without rebooting.
+                * If so, the link will reset soon, and the next
+                * discovery will be accepted. So we can ignore it.
+                * It may also be an cloned or malicious peer having
+                * chosen the same node address and signature as an
+                * existing one.
+                * Ignore requests until the link goes down, if ever.
+                */
+               disc_dupl_alert(bearer, onode, &maddr);
+       } else if (sign_match && !addr_match && !link_up) {
+               /* Peer link has changed i/f address without rebooting.
+                * It may also be a cloned or malicious peer; we can't
+                * distinguish between the two.
+                * The signature is correct, so we must accept.
+                */
+               accept_addr = true;
+               respond = true;
+       } else if (!sign_match && addr_match && link_up) {
+               /* Peer node rebooted. Two possibilities:
+                *  - Delayed re-discovery; this link endpoint has already
+                *    reset and re-established contact with the peer, before
+                *    receiving a discovery message from that node.
+                *    (The peer happened to receive one from this node first).
+                *  - The peer came back so fast that our side has not
+                *    discovered it yet. Probing from this side will soon
+                *    reset the link, since there can be no working link
+                *    endpoint at the peer end, and the link will re-establish.
+                *  Accept the signature, since it comes from a known peer.
+                */
+               accept_sign = true;
+       } else if (!sign_match && addr_match && !link_up) {
+               /*  The peer node has rebooted.
+                *  Accept signature, since it is a known peer.
+                */
+               accept_sign = true;
+               respond = true;
+       } else if (!sign_match && !addr_match && link_up) {
+               /* Peer rebooted with new address, or a new/duplicate peer.
+                * Ignore until the link goes down, if ever.
+                */
+               disc_dupl_alert(bearer, onode, &maddr);
+       } else if (!sign_match && !addr_match && !link_up) {
+               /* Peer rebooted with new address, or it is a new peer.
+                * Accept signature and address.
+               */
+               accept_sign = true;
+               accept_addr = true;
+               respond = true;
        }
 
-       /*
-        * Ensure requesting node's media address is correct
-        *
-        * If media address doesn't match and the link is working, reject the
-        * request (must be from a duplicate node).
-        *
-        * If media address doesn't match and the link is not working, accept
-        * the new media address and reset the link to ensure it starts up
-        * cleanly.
-        */
-       if (addr_mismatch) {
-               if (tipc_link_is_up(link)) {
-                       disc_dupl_alert(b_ptr, orig, &media_addr);
-                       tipc_node_unlock(n_ptr);
-                       return;
-               } else {
-                       memcpy(&link->media_addr, &media_addr,
-                              sizeof(media_addr));
-                       tipc_link_reset(link);
-               }
-       }
+       if (accept_sign)
+               node->signature = signature;
 
-       /* Create a link endpoint for this bearer, if necessary */
-       if (!link) {
-               link = tipc_link_create(n_ptr, b_ptr, &media_addr);
-               if (!link) {
-                       tipc_node_unlock(n_ptr);
-                       return;
+       if (accept_addr) {
+               if (!link)
+                       link = tipc_link_create(node, bearer, &maddr);
+               if (link) {
+                       memcpy(&link->media_addr, &maddr, sizeof(maddr));
+                       tipc_link_reset(link);
+               } else {
+                       respond = false;
                }
        }
 
-       /* Accept discovery message & send response, if necessary */
-       link_fully_up = link_working_working(link);
-
-       if ((type == DSC_REQ_MSG) && !link_fully_up) {
+       /* Send response, if necessary */
+       if (respond && (mtyp == DSC_REQ_MSG)) {
                rbuf = tipc_buf_acquire(INT_H_SIZE);
                if (rbuf) {
-                       tipc_disc_init_msg(rbuf, DSC_RESP_MSG, b_ptr);
-                       tipc_bearer_send(b_ptr->identity, rbuf, &media_addr);
+                       tipc_disc_init_msg(rbuf, DSC_RESP_MSG, bearer);
+                       tipc_bearer_send(bearer->identity, rbuf, &maddr);
                        kfree_skb(rbuf);
                }
        }
-
-       tipc_node_unlock(n_ptr);
+       tipc_node_unlock(node);
 }
 
 /**
@@ -348,8 +351,10 @@ int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest)
                return -ENOMEM;
 
        req->buf = tipc_buf_acquire(INT_H_SIZE);
-       if (!req->buf)
+       if (!req->buf) {
+               kfree(req);
                return -ENOMEM;
+       }
 
        tipc_disc_init_msg(req->buf, DSC_REQ_MSG, b_ptr);
        memcpy(&req->dest, dest, sizeof(*dest));
index 67cf3f935dba0a9e4d0141fc0406a93b1aeb60d6..5e1426f1751f146cf3350983e9d0c04d218da850 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/eth_media.c: Ethernet bearer support for TIPC
  *
- * Copyright (c) 2001-2007, 2013, Ericsson AB
+ * Copyright (c) 2001-2007, 2013-2014, Ericsson AB
  * Copyright (c) 2005-2008, 2011-2013, Wind River Systems
  * All rights reserved.
  *
 #include "core.h"
 #include "bearer.h"
 
-#define ETH_ADDR_OFFSET        4       /* message header offset of MAC address */
+#define ETH_ADDR_OFFSET  4  /* MAC addr position inside address field */
 
-/* convert Ethernet address to string */
-static int tipc_eth_addr2str(struct tipc_media_addr *a, char *str_buf,
-                            int str_size)
+/* Convert Ethernet address (media address format) to string */
+static int tipc_eth_addr2str(struct tipc_media_addr *addr,
+                            char *strbuf, int bufsz)
 {
-       if (str_size < 18)      /* 18 = strlen("aa:bb:cc:dd:ee:ff\0") */
+       if (bufsz < 18) /* 18 = strlen("aa:bb:cc:dd:ee:ff\0") */
                return 1;
 
-       sprintf(str_buf, "%pM", a->value);
+       sprintf(strbuf, "%pM", addr->value);
        return 0;
 }
 
-/* convert Ethernet address format to message header format */
-static int tipc_eth_addr2msg(struct tipc_media_addr *a, char *msg_area)
+/* Convert from media address format to discovery message addr format */
+static int tipc_eth_addr2msg(char *msg, struct tipc_media_addr *addr)
 {
-       memset(msg_area, 0, TIPC_MEDIA_ADDR_SIZE);
-       msg_area[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_ETH;
-       memcpy(msg_area + ETH_ADDR_OFFSET, a->value, ETH_ALEN);
+       memset(msg, 0, TIPC_MEDIA_ADDR_SIZE);
+       msg[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_ETH;
+       memcpy(msg + ETH_ADDR_OFFSET, addr->value, ETH_ALEN);
        return 0;
 }
 
-/* convert message header address format to Ethernet format */
-static int tipc_eth_msg2addr(const struct tipc_bearer *tb_ptr,
-                            struct tipc_media_addr *a, char *msg_area)
+/* Convert raw mac address format to media addr format */
+static int tipc_eth_raw2addr(struct tipc_bearer *b,
+                            struct tipc_media_addr *addr,
+                            char *msg)
 {
-       if (msg_area[TIPC_MEDIA_TYPE_OFFSET] != TIPC_MEDIA_TYPE_ETH)
-               return 1;
+       char bcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
 
-       tipc_l2_media_addr_set(tb_ptr, a, msg_area + ETH_ADDR_OFFSET);
+       memset(addr, 0, sizeof(*addr));
+       ether_addr_copy(addr->value, msg);
+       addr->media_id = TIPC_MEDIA_TYPE_ETH;
+       addr->broadcast = !memcmp(addr->value, bcast_mac, ETH_ALEN);
        return 0;
 }
 
+/* Convert discovery msg addr format to Ethernet media addr format */
+static int tipc_eth_msg2addr(struct tipc_bearer *b,
+                            struct tipc_media_addr *addr,
+                            char *msg)
+{
+       /* Skip past preamble: */
+       msg += ETH_ADDR_OFFSET;
+       return tipc_eth_raw2addr(b, addr, msg);
+}
+
 /* Ethernet media registration info */
 struct tipc_media eth_media_info = {
        .send_msg       = tipc_l2_send_msg,
@@ -78,6 +91,7 @@ struct tipc_media eth_media_info = {
        .addr2str       = tipc_eth_addr2str,
        .addr2msg       = tipc_eth_addr2msg,
        .msg2addr       = tipc_eth_msg2addr,
+       .raw2addr       = tipc_eth_raw2addr,
        .priority       = TIPC_DEF_LINK_PRI,
        .tolerance      = TIPC_DEF_LINK_TOL,
        .window         = TIPC_DEF_LINK_WIN,
@@ -85,4 +99,3 @@ struct tipc_media eth_media_info = {
        .hwaddr_len     = ETH_ALEN,
        .name           = "eth"
 };
-
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
deleted file mode 100644 (file)
index 1fabf16..0000000
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * net/tipc/handler.c: TIPC signal handling
- *
- * Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- *    contributors may be used to endorse or promote products derived from
- *    this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "core.h"
-
-struct queue_item {
-       struct list_head next_signal;
-       void (*handler) (unsigned long);
-       unsigned long data;
-};
-
-static struct kmem_cache *tipc_queue_item_cache;
-static struct list_head signal_queue_head;
-static DEFINE_SPINLOCK(qitem_lock);
-static int handler_enabled __read_mostly;
-
-static void process_signal_queue(unsigned long dummy);
-
-static DECLARE_TASKLET_DISABLED(tipc_tasklet, process_signal_queue, 0);
-
-
-unsigned int tipc_k_signal(Handler routine, unsigned long argument)
-{
-       struct queue_item *item;
-
-       spin_lock_bh(&qitem_lock);
-       if (!handler_enabled) {
-               spin_unlock_bh(&qitem_lock);
-               return -ENOPROTOOPT;
-       }
-
-       item = kmem_cache_alloc(tipc_queue_item_cache, GFP_ATOMIC);
-       if (!item) {
-               pr_err("Signal queue out of memory\n");
-               spin_unlock_bh(&qitem_lock);
-               return -ENOMEM;
-       }
-       item->handler = routine;
-       item->data = argument;
-       list_add_tail(&item->next_signal, &signal_queue_head);
-       spin_unlock_bh(&qitem_lock);
-       tasklet_schedule(&tipc_tasklet);
-       return 0;
-}
-
-static void process_signal_queue(unsigned long dummy)
-{
-       struct queue_item *__volatile__ item;
-       struct list_head *l, *n;
-
-       spin_lock_bh(&qitem_lock);
-       list_for_each_safe(l, n, &signal_queue_head) {
-               item = list_entry(l, struct queue_item, next_signal);
-               list_del(&item->next_signal);
-               spin_unlock_bh(&qitem_lock);
-               item->handler(item->data);
-               spin_lock_bh(&qitem_lock);
-               kmem_cache_free(tipc_queue_item_cache, item);
-       }
-       spin_unlock_bh(&qitem_lock);
-}
-
-int tipc_handler_start(void)
-{
-       tipc_queue_item_cache =
-               kmem_cache_create("tipc_queue_items", sizeof(struct queue_item),
-                                 0, SLAB_HWCACHE_ALIGN, NULL);
-       if (!tipc_queue_item_cache)
-               return -ENOMEM;
-
-       INIT_LIST_HEAD(&signal_queue_head);
-       tasklet_enable(&tipc_tasklet);
-       handler_enabled = 1;
-       return 0;
-}
-
-void tipc_handler_stop(void)
-{
-       struct list_head *l, *n;
-       struct queue_item *item;
-
-       spin_lock_bh(&qitem_lock);
-       if (!handler_enabled) {
-               spin_unlock_bh(&qitem_lock);
-               return;
-       }
-       handler_enabled = 0;
-       spin_unlock_bh(&qitem_lock);
-
-       tasklet_kill(&tipc_tasklet);
-
-       spin_lock_bh(&qitem_lock);
-       list_for_each_safe(l, n, &signal_queue_head) {
-               item = list_entry(l, struct queue_item, next_signal);
-               list_del(&item->next_signal);
-               kmem_cache_free(tipc_queue_item_cache, item);
-       }
-       spin_unlock_bh(&qitem_lock);
-
-       kmem_cache_destroy(tipc_queue_item_cache);
-}
index 844a77e2582856ae8cff4618c5e1de23f4220e0b..8522eef9c136bc25d39e166b32dfc459881d77c9 100644 (file)
@@ -42,7 +42,7 @@
 #include "core.h"
 #include "bearer.h"
 
-/* convert InfiniBand address to string */
+/* convert InfiniBand address (media address format) media address to string */
 static int tipc_ib_addr2str(struct tipc_media_addr *a, char *str_buf,
                            int str_size)
 {
@@ -54,23 +54,35 @@ static int tipc_ib_addr2str(struct tipc_media_addr *a, char *str_buf,
        return 0;
 }
 
-/* convert InfiniBand address format to message header format */
-static int tipc_ib_addr2msg(struct tipc_media_addr *a, char *msg_area)
+/* Convert from media address format to discovery message addr format */
+static int tipc_ib_addr2msg(char *msg, struct tipc_media_addr *addr)
 {
-       memset(msg_area, 0, TIPC_MEDIA_ADDR_SIZE);
-       msg_area[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_IB;
-       memcpy(msg_area, a->value, INFINIBAND_ALEN);
+       memset(msg, 0, TIPC_MEDIA_ADDR_SIZE);
+       memcpy(msg, addr->value, INFINIBAND_ALEN);
        return 0;
 }
 
-/* convert message header address format to InfiniBand format */
-static int tipc_ib_msg2addr(const struct tipc_bearer *tb_ptr,
-                           struct tipc_media_addr *a, char *msg_area)
+/* Convert raw InfiniBand address format to media addr format */
+static int tipc_ib_raw2addr(struct tipc_bearer *b,
+                           struct tipc_media_addr *addr,
+                           char *msg)
 {
-       tipc_l2_media_addr_set(tb_ptr, a, msg_area);
+       memset(addr, 0, sizeof(*addr));
+       memcpy(addr->value, msg, INFINIBAND_ALEN);
+       addr->media_id = TIPC_MEDIA_TYPE_IB;
+       addr->broadcast = !memcmp(msg, b->bcast_addr.value,
+                                 INFINIBAND_ALEN);
        return 0;
 }
 
+/* Convert discovery msg addr format to InfiniBand media addr format */
+static int tipc_ib_msg2addr(struct tipc_bearer *b,
+                           struct tipc_media_addr *addr,
+                           char *msg)
+{
+       return tipc_ib_raw2addr(b, addr, msg);
+}
+
 /* InfiniBand media registration info */
 struct tipc_media ib_media_info = {
        .send_msg       = tipc_l2_send_msg,
@@ -79,6 +91,7 @@ struct tipc_media ib_media_info = {
        .addr2str       = tipc_ib_addr2str,
        .addr2msg       = tipc_ib_addr2msg,
        .msg2addr       = tipc_ib_msg2addr,
+       .raw2addr       = tipc_ib_raw2addr,
        .priority       = TIPC_DEF_LINK_PRI,
        .tolerance      = TIPC_DEF_LINK_TOL,
        .window         = TIPC_DEF_LINK_WIN,
@@ -86,4 +99,3 @@ struct tipc_media ib_media_info = {
        .hwaddr_len     = INFINIBAND_ALEN,
        .name           = "ib"
 };
-
index c723ee90219da1fd6073b79741893c21fea2f6e3..ad2c57f5868dafe28fbf4204f9fc2189c1156d25 100644 (file)
@@ -37,6 +37,7 @@
 #include "core.h"
 #include "link.h"
 #include "port.h"
+#include "socket.h"
 #include "name_distr.h"
 #include "discover.h"
 #include "config.h"
@@ -297,14 +298,14 @@ void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
 
        rcu_read_lock();
        list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
-               spin_lock_bh(&n_ptr->lock);
+               tipc_node_lock(n_ptr);
                l_ptr = n_ptr->links[bearer_id];
                if (l_ptr) {
                        tipc_link_reset(l_ptr);
                        if (shutting_down || !tipc_node_is_up(n_ptr)) {
                                tipc_node_detach_link(l_ptr->owner, l_ptr);
                                tipc_link_reset_fragments(l_ptr);
-                               spin_unlock_bh(&n_ptr->lock);
+                               tipc_node_unlock(n_ptr);
 
                                /* Nobody else can access this link now: */
                                del_timer_sync(&l_ptr->timer);
@@ -312,12 +313,12 @@ void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
                        } else {
                                /* Detach/delete when failover is finished: */
                                l_ptr->flags |= LINK_STOPPED;
-                               spin_unlock_bh(&n_ptr->lock);
+                               tipc_node_unlock(n_ptr);
                                del_timer_sync(&l_ptr->timer);
                        }
                        continue;
                }
-               spin_unlock_bh(&n_ptr->lock);
+               tipc_node_unlock(n_ptr);
        }
        rcu_read_unlock();
 }
@@ -398,9 +399,8 @@ static void link_release_outqueue(struct tipc_link *l_ptr)
  */
 void tipc_link_reset_fragments(struct tipc_link *l_ptr)
 {
-       kfree_skb(l_ptr->reasm_head);
-       l_ptr->reasm_head = NULL;
-       l_ptr->reasm_tail = NULL;
+       kfree_skb(l_ptr->reasm_buf);
+       l_ptr->reasm_buf = NULL;
 }
 
 /**
@@ -474,11 +474,11 @@ void tipc_link_reset_list(unsigned int bearer_id)
 
        rcu_read_lock();
        list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
-               spin_lock_bh(&n_ptr->lock);
+               tipc_node_lock(n_ptr);
                l_ptr = n_ptr->links[bearer_id];
                if (l_ptr)
                        tipc_link_reset(l_ptr);
-               spin_unlock_bh(&n_ptr->lock);
+               tipc_node_unlock(n_ptr);
        }
        rcu_read_unlock();
 }
@@ -1259,29 +1259,24 @@ void tipc_link_push_queue(struct tipc_link *l_ptr)
        } while (!res);
 }
 
-static void link_reset_all(unsigned long addr)
+void tipc_link_reset_all(struct tipc_node *node)
 {
-       struct tipc_node *n_ptr;
        char addr_string[16];
        u32 i;
 
-       n_ptr = tipc_node_find((u32)addr);
-       if (!n_ptr)
-               return; /* node no longer exists */
-
-       tipc_node_lock(n_ptr);
+       tipc_node_lock(node);
 
        pr_warn("Resetting all links to %s\n",
-               tipc_addr_string_fill(addr_string, n_ptr->addr));
+               tipc_addr_string_fill(addr_string, node->addr));
 
        for (i = 0; i < MAX_BEARERS; i++) {
-               if (n_ptr->links[i]) {
-                       link_print(n_ptr->links[i], "Resetting link\n");
-                       tipc_link_reset(n_ptr->links[i]);
+               if (node->links[i]) {
+                       link_print(node->links[i], "Resetting link\n");
+                       tipc_link_reset(node->links[i]);
                }
        }
 
-       tipc_node_unlock(n_ptr);
+       tipc_node_unlock(node);
 }
 
 static void link_retransmit_failure(struct tipc_link *l_ptr,
@@ -1318,10 +1313,9 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
                        n_ptr->bclink.oos_state,
                        n_ptr->bclink.last_sent);
 
-               tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr);
-
                tipc_node_unlock(n_ptr);
 
+               tipc_bclink_set_flags(TIPC_BCLINK_RESET);
                l_ptr->stale_count = 0;
        }
 }
@@ -1495,14 +1489,14 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
                        goto unlock_discard;
 
                /* Verify that communication with node is currently allowed */
-               if ((n_ptr->block_setup & WAIT_PEER_DOWN) &&
-                       msg_user(msg) == LINK_PROTOCOL &&
-                       (msg_type(msg) == RESET_MSG ||
-                        msg_type(msg) == ACTIVATE_MSG) &&
-                       !msg_redundant_link(msg))
-                       n_ptr->block_setup &= ~WAIT_PEER_DOWN;
-
-               if (n_ptr->block_setup)
+               if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) &&
+                   msg_user(msg) == LINK_PROTOCOL &&
+                   (msg_type(msg) == RESET_MSG ||
+                   msg_type(msg) == ACTIVATE_MSG) &&
+                   !msg_redundant_link(msg))
+                       n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN;
+
+               if (tipc_node_blocked(n_ptr))
                        goto unlock_discard;
 
                /* Validate message sequence number info */
@@ -1579,17 +1573,12 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
                        }
                        msg = buf_msg(buf);
                } else if (msg_user(msg) == MSG_FRAGMENTER) {
-                       int rc;
-
                        l_ptr->stats.recv_fragments++;
-                       rc = tipc_link_frag_rcv(&l_ptr->reasm_head,
-                                               &l_ptr->reasm_tail,
-                                               &buf);
-                       if (rc == LINK_REASM_COMPLETE) {
+                       if (tipc_buf_append(&l_ptr->reasm_buf, &buf)) {
                                l_ptr->stats.recv_fragmented++;
                                msg = buf_msg(buf);
                        } else {
-                               if (rc == LINK_REASM_ERROR)
+                               if (!l_ptr->reasm_buf)
                                        tipc_link_reset(l_ptr);
                                tipc_node_unlock(n_ptr);
                                continue;
@@ -1602,7 +1591,7 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
                case TIPC_HIGH_IMPORTANCE:
                case TIPC_CRITICAL_IMPORTANCE:
                        tipc_node_unlock(n_ptr);
-                       tipc_port_rcv(buf);
+                       tipc_sk_rcv(buf);
                        continue;
                case MSG_BUNDLER:
                        l_ptr->stats.recv_bundles++;
@@ -1744,7 +1733,7 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
                return;
 
        /* Abort non-RESET send if communication with node is prohibited */
-       if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG))
+       if ((tipc_node_blocked(l_ptr->owner)) && (msg_typ != RESET_MSG))
                return;
 
        /* Create protocol message with "out-of-sequence" sequence number */
@@ -1837,9 +1826,6 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
        if (l_ptr->exp_msg_count)
                goto exit;
 
-       /* record unnumbered packet arrival (force mismatch on next timeout) */
-       l_ptr->checkpoint--;
-
        if (l_ptr->net_plane != msg_net_plane(msg))
                if (tipc_own_addr > msg_prevnode(msg))
                        l_ptr->net_plane = msg_net_plane(msg);
@@ -1859,7 +1845,7 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
                         * peer has lost contact -- don't allow peer's links
                         * to reactivate before we recognize loss & clean up
                         */
-                       l_ptr->owner->block_setup = WAIT_NODE_DOWN;
+                       l_ptr->owner->action_flags |= TIPC_WAIT_OWN_LINKS_DOWN;
                }
 
                link_state_event(l_ptr, RESET_MSG);
@@ -1915,6 +1901,10 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
                        tipc_link_reset(l_ptr); /* Enforce change to take effect */
                        break;
                }
+
+               /* Record reception; force mismatch at next timeout: */
+               l_ptr->checkpoint--;
+
                link_state_event(l_ptr, TRAFFIC_MSG_EVT);
                l_ptr->stats.recv_states++;
                if (link_reset_unknown(l_ptr))
@@ -2174,9 +2164,7 @@ static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr,
                }
                if (msg_user(msg) == MSG_FRAGMENTER) {
                        l_ptr->stats.recv_fragments++;
-                       tipc_link_frag_rcv(&l_ptr->reasm_head,
-                                          &l_ptr->reasm_tail,
-                                          &buf);
+                       tipc_buf_append(&l_ptr->reasm_buf, &buf);
                }
        }
 exit:
@@ -2314,53 +2302,6 @@ static int tipc_link_frag_xmit(struct tipc_link *l_ptr, struct sk_buff *buf)
        return dsz;
 }
 
-/* tipc_link_frag_rcv(): Called with node lock on. Returns
- * the reassembled buffer if message is complete.
- */
-int tipc_link_frag_rcv(struct sk_buff **head, struct sk_buff **tail,
-                      struct sk_buff **fbuf)
-{
-       struct sk_buff *frag = *fbuf;
-       struct tipc_msg *msg = buf_msg(frag);
-       u32 fragid = msg_type(msg);
-       bool headstolen;
-       int delta;
-
-       skb_pull(frag, msg_hdr_sz(msg));
-       if (fragid == FIRST_FRAGMENT) {
-               if (*head || skb_unclone(frag, GFP_ATOMIC))
-                       goto out_free;
-               *head = frag;
-               skb_frag_list_init(*head);
-               *fbuf = NULL;
-               return 0;
-       } else if (*head &&
-                  skb_try_coalesce(*head, frag, &headstolen, &delta)) {
-               kfree_skb_partial(frag, headstolen);
-       } else {
-               if (!*head)
-                       goto out_free;
-               if (!skb_has_frag_list(*head))
-                       skb_shinfo(*head)->frag_list = frag;
-               else
-                       (*tail)->next = frag;
-               *tail = frag;
-               (*head)->truesize += frag->truesize;
-       }
-       if (fragid == LAST_FRAGMENT) {
-               *fbuf = *head;
-               *tail = *head = NULL;
-               return LINK_REASM_COMPLETE;
-       }
-       *fbuf = NULL;
-       return 0;
-out_free:
-       pr_warn_ratelimited("Link unable to reassemble fragmented message\n");
-       kfree_skb(*fbuf);
-       *fbuf = NULL;
-       return LINK_REASM_ERROR;
-}
-
 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
 {
        if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
index 4b556c181baeb6a580552a956328b3dad276ae1c..200d518b218ede4e0ad7c210c863c546f2362947 100644 (file)
 #include "msg.h"
 #include "node.h"
 
-/* Link reassembly status codes
- */
-#define LINK_REASM_ERROR       -1
-#define LINK_REASM_COMPLETE    1
-
 /* Out-of-range value for link sequence numbers
  */
 #define INVALID_LINK_SEQ 0x10000
@@ -140,8 +135,7 @@ struct tipc_stats {
  * @next_out: ptr to first unsent outbound message in queue
  * @waiting_ports: linked list of ports waiting for link congestion to abate
  * @long_msg_seq_no: next identifier to use for outbound fragmented messages
- * @reasm_head: list head of partially reassembled inbound message fragments
- * @reasm_tail: last fragment received
+ * @reasm_buf: head of partially reassembled inbound message fragments
  * @stats: collects statistics regarding link activity
  */
 struct tipc_link {
@@ -204,8 +198,7 @@ struct tipc_link {
 
        /* Fragmentation/reassembly */
        u32 long_msg_seq_no;
-       struct sk_buff *reasm_head;
-       struct sk_buff *reasm_tail;
+       struct sk_buff *reasm_buf;
 
        /* Statistics */
        struct tipc_stats stats;
@@ -230,6 +223,7 @@ struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area,
                                         int req_tlv_space);
 struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area,
                                          int req_tlv_space);
+void tipc_link_reset_all(struct tipc_node *node);
 void tipc_link_reset(struct tipc_link *l_ptr);
 void tipc_link_reset_list(unsigned int bearer_id);
 int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector);
@@ -241,9 +235,6 @@ int tipc_link_iovec_xmit_fast(struct tipc_port *sender,
                              struct iovec const *msg_sect,
                              unsigned int len, u32 destnode);
 void tipc_link_bundle_rcv(struct sk_buff *buf);
-int tipc_link_frag_rcv(struct sk_buff **reasm_head,
-                      struct sk_buff **reasm_tail,
-                      struct sk_buff **fbuf);
 void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
                          u32 gap, u32 tolerance, u32 priority, u32 acked_mtu);
 void tipc_link_push_queue(struct tipc_link *l_ptr);
index e525f8ce1dee09ce0d9baf214bd3dd8e13daa9d8..8be6e94a1ca9790dbbde757b6bd70fe9c5abb428 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/msg.c: TIPC message header routines
  *
- * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2000-2006, 2014, Ericsson AB
  * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
  *
@@ -99,3 +99,56 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
        }
        return dsz;
 }
+
+/* tipc_buf_append(): Append a buffer to the fragment list of another buffer
+ * Let first buffer become head buffer
+ * Returns 1 and sets *buf to headbuf if chain is complete, otherwise 0
+ * Leaves headbuf pointer at NULL if failure
+ */
+int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
+{
+       struct sk_buff *head = *headbuf;
+       struct sk_buff *frag = *buf;
+       struct sk_buff *tail;
+       struct tipc_msg *msg = buf_msg(frag);
+       u32 fragid = msg_type(msg);
+       bool headstolen;
+       int delta;
+
+       skb_pull(frag, msg_hdr_sz(msg));
+
+       if (fragid == FIRST_FRAGMENT) {
+               if (head || skb_unclone(frag, GFP_ATOMIC))
+                       goto out_free;
+               head = *headbuf = frag;
+               skb_frag_list_init(head);
+               return 0;
+       }
+       if (!head)
+               goto out_free;
+       tail = TIPC_SKB_CB(head)->tail;
+       if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
+               kfree_skb_partial(frag, headstolen);
+       } else {
+               if (!skb_has_frag_list(head))
+                       skb_shinfo(head)->frag_list = frag;
+               else
+                       tail->next = frag;
+               head->truesize += frag->truesize;
+               head->data_len += frag->len;
+               head->len += frag->len;
+               TIPC_SKB_CB(head)->tail = frag;
+       }
+       if (fragid == LAST_FRAGMENT) {
+               *buf = head;
+               TIPC_SKB_CB(head)->tail = NULL;
+               *headbuf = NULL;
+               return 1;
+       }
+       *buf = NULL;
+       return 0;
+out_free:
+       pr_warn_ratelimited("Unable to build fragment list\n");
+       kfree_skb(*buf);
+       return 0;
+}
index 76d1269b944361076855876219d6e59b16c3b23e..503511903d1d25c9c4106f669da0a7def7c9dd46 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/msg.h: Include file for TIPC message header routines
  *
- * Copyright (c) 2000-2007, Ericsson AB
+ * Copyright (c) 2000-2007, 2014, Ericsson AB
  * Copyright (c) 2005-2008, 2010-2011, Wind River Systems
  * All rights reserved.
  *
@@ -711,4 +711,7 @@ void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
                   u32 destnode);
 int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
                   unsigned int len, int max_size, struct sk_buff **buf);
+
+int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf);
+
 #endif
index 36a72822601c91f2c2d64aadc92fb6ad79976d3a..8ce730984aa1f0d429b2325d58862e1ef7799d4d 100644 (file)
 #include "link.h"
 #include "name_distr.h"
 
-#define ITEM_SIZE sizeof(struct distr_item)
-
-/**
- * struct distr_item - publication info distributed to other nodes
- * @type: name sequence type
- * @lower: name sequence lower bound
- * @upper: name sequence upper bound
- * @ref: publishing port reference
- * @key: publication key
- *
- * ===> All fields are stored in network byte order. <===
- *
- * First 3 fields identify (name or) name sequence being published.
- * Reference field uniquely identifies port that published name sequence.
- * Key field uniquely identifies publication, in the event a port has
- * multiple publications of the same name sequence.
- *
- * Note: There is no field that identifies the publishing node because it is
- * the same for all items contained within a publication message.
- */
-struct distr_item {
-       __be32 type;
-       __be32 lower;
-       __be32 upper;
-       __be32 ref;
-       __be32 key;
-};
-
 /**
  * struct publ_list - list of publications made by this node
  * @list: circular list of publications
@@ -127,7 +99,7 @@ static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
        return buf;
 }
 
-static void named_cluster_distribute(struct sk_buff *buf)
+void named_cluster_distribute(struct sk_buff *buf)
 {
        struct sk_buff *buf_copy;
        struct tipc_node *n_ptr;
@@ -135,18 +107,18 @@ static void named_cluster_distribute(struct sk_buff *buf)
 
        rcu_read_lock();
        list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
-               spin_lock_bh(&n_ptr->lock);
+               tipc_node_lock(n_ptr);
                l_ptr = n_ptr->active_links[n_ptr->addr & 1];
                if (l_ptr) {
                        buf_copy = skb_copy(buf, GFP_ATOMIC);
                        if (!buf_copy) {
-                               spin_unlock_bh(&n_ptr->lock);
+                               tipc_node_unlock(n_ptr);
                                break;
                        }
                        msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
                        __tipc_link_xmit(l_ptr, buf_copy);
                }
-               spin_unlock_bh(&n_ptr->lock);
+               tipc_node_unlock(n_ptr);
        }
        rcu_read_unlock();
 
@@ -156,7 +128,7 @@ static void named_cluster_distribute(struct sk_buff *buf)
 /**
  * tipc_named_publish - tell other nodes about a new publication by this node
  */
-void tipc_named_publish(struct publication *publ)
+struct sk_buff *tipc_named_publish(struct publication *publ)
 {
        struct sk_buff *buf;
        struct distr_item *item;
@@ -165,23 +137,23 @@ void tipc_named_publish(struct publication *publ)
        publ_lists[publ->scope]->size++;
 
        if (publ->scope == TIPC_NODE_SCOPE)
-               return;
+               return NULL;
 
        buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
        if (!buf) {
                pr_warn("Publication distribution failure\n");
-               return;
+               return NULL;
        }
 
        item = (struct distr_item *)msg_data(buf_msg(buf));
        publ_to_item(item, publ);
-       named_cluster_distribute(buf);
+       return buf;
 }
 
 /**
  * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
  */
-void tipc_named_withdraw(struct publication *publ)
+struct sk_buff *tipc_named_withdraw(struct publication *publ)
 {
        struct sk_buff *buf;
        struct distr_item *item;
@@ -190,17 +162,17 @@ void tipc_named_withdraw(struct publication *publ)
        publ_lists[publ->scope]->size--;
 
        if (publ->scope == TIPC_NODE_SCOPE)
-               return;
+               return NULL;
 
        buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
        if (!buf) {
                pr_warn("Withdrawal distribution failure\n");
-               return;
+               return NULL;
        }
 
        item = (struct distr_item *)msg_data(buf_msg(buf));
        publ_to_item(item, publ);
-       named_cluster_distribute(buf);
+       return buf;
 }
 
 /*
@@ -239,29 +211,9 @@ static void named_distribute(struct list_head *message_list, u32 node,
 /**
  * tipc_named_node_up - tell specified node about all publications by this node
  */
-void tipc_named_node_up(unsigned long nodearg)
+void tipc_named_node_up(u32 max_item_buf, u32 node)
 {
-       struct tipc_node *n_ptr;
-       struct tipc_link *l_ptr;
-       struct list_head message_list;
-       u32 node = (u32)nodearg;
-       u32 max_item_buf = 0;
-
-       /* compute maximum amount of publication data to send per message */
-       n_ptr = tipc_node_find(node);
-       if (n_ptr) {
-               tipc_node_lock(n_ptr);
-               l_ptr = n_ptr->active_links[0];
-               if (l_ptr)
-                       max_item_buf = ((l_ptr->max_pkt - INT_H_SIZE) /
-                               ITEM_SIZE) * ITEM_SIZE;
-               tipc_node_unlock(n_ptr);
-       }
-       if (!max_item_buf)
-               return;
-
-       /* create list of publication messages, then send them as a unit */
-       INIT_LIST_HEAD(&message_list);
+       LIST_HEAD(message_list);
 
        read_lock_bh(&tipc_nametbl_lock);
        named_distribute(&message_list, node, &publ_cluster, max_item_buf);
index 9b312ccfd43e7da41bcab4ca33d5f0f4d5be86cf..b2eed4ec1526a34efd8b191e4b952bf9718f7a2e 100644 (file)
 
 #include "name_table.h"
 
-void tipc_named_publish(struct publication *publ);
-void tipc_named_withdraw(struct publication *publ);
-void tipc_named_node_up(unsigned long node);
+#define ITEM_SIZE sizeof(struct distr_item)
+
+/**
+ * struct distr_item - publication info distributed to other nodes
+ * @type: name sequence type
+ * @lower: name sequence lower bound
+ * @upper: name sequence upper bound
+ * @ref: publishing port reference
+ * @key: publication key
+ *
+ * ===> All fields are stored in network byte order. <===
+ *
+ * First 3 fields identify (name or) name sequence being published.
+ * Reference field uniquely identifies port that published name sequence.
+ * Key field uniquely identifies publication, in the event a port has
+ * multiple publications of the same name sequence.
+ *
+ * Note: There is no field that identifies the publishing node because it is
+ * the same for all items contained within a publication message.
+ */
+struct distr_item {
+       __be32 type;
+       __be32 lower;
+       __be32 upper;
+       __be32 ref;
+       __be32 key;
+};
+
+struct sk_buff *tipc_named_publish(struct publication *publ);
+struct sk_buff *tipc_named_withdraw(struct publication *publ);
+void named_cluster_distribute(struct sk_buff *buf);
+void tipc_named_node_up(u32 max_item_buf, u32 node);
 void tipc_named_rcv(struct sk_buff *buf);
 void tipc_named_reinit(void);
 
index 042e8e3cabc09f84aa5dce626c57a30faf3ca32d..9d7d37d95187c77d9d7490ce7aec4de147a9f2fd 100644 (file)
@@ -664,6 +664,7 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
                                         u32 scope, u32 port_ref, u32 key)
 {
        struct publication *publ;
+       struct sk_buff *buf = NULL;
 
        if (table.local_publ_count >= TIPC_MAX_PUBLICATIONS) {
                pr_warn("Publication failed, local publication limit reached (%u)\n",
@@ -676,9 +677,12 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
                                   tipc_own_addr, port_ref, key);
        if (likely(publ)) {
                table.local_publ_count++;
-               tipc_named_publish(publ);
+               buf = tipc_named_publish(publ);
        }
        write_unlock_bh(&tipc_nametbl_lock);
+
+       if (buf)
+               named_cluster_distribute(buf);
        return publ;
 }
 
@@ -688,15 +692,19 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
 int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
 {
        struct publication *publ;
+       struct sk_buff *buf;
 
        write_lock_bh(&tipc_nametbl_lock);
        publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key);
        if (likely(publ)) {
                table.local_publ_count--;
-               tipc_named_withdraw(publ);
+               buf = tipc_named_withdraw(publ);
                write_unlock_bh(&tipc_nametbl_lock);
                list_del_init(&publ->pport_list);
                kfree(publ);
+
+               if (buf)
+                       named_cluster_distribute(buf);
                return 1;
        }
        write_unlock_bh(&tipc_nametbl_lock);
@@ -961,6 +969,7 @@ static void tipc_purge_publications(struct name_seq *seq)
        list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) {
                tipc_nametbl_remove_publ(publ->type, publ->lower, publ->node,
                                         publ->ref, publ->key);
+               kfree(publ);
        }
 }
 
@@ -982,7 +991,6 @@ void tipc_nametbl_stop(void)
                hlist_for_each_entry_safe(seq, safe, seq_head, ns_list) {
                        tipc_purge_publications(seq);
                }
-               continue;
        }
        kfree(table.types);
        table.types = NULL;
index 75bb39025d53eeffecf2c63827954e544eedb9db..f64375e7f99fa4081ce2a30629071ca3e546aa05 100644 (file)
@@ -39,6 +39,7 @@
 #include "name_distr.h"
 #include "subscr.h"
 #include "port.h"
+#include "socket.h"
 #include "node.h"
 #include "config.h"
 
@@ -141,7 +142,7 @@ void tipc_net_route_msg(struct sk_buff *buf)
                        if (msg_mcast(msg))
                                tipc_port_mcast_rcv(buf, NULL);
                        else if (msg_destport(msg))
-                               tipc_port_rcv(buf);
+                               tipc_sk_rcv(buf);
                        else
                                net_route_named_msg(buf);
                        return;
@@ -164,20 +165,25 @@ void tipc_net_route_msg(struct sk_buff *buf)
        tipc_link_xmit(buf, dnode, msg_link_selector(msg));
 }
 
-void tipc_net_start(u32 addr)
+int tipc_net_start(u32 addr)
 {
        char addr_string[16];
+       int res;
 
        tipc_own_addr = addr;
        tipc_named_reinit();
        tipc_port_reinit();
-       tipc_bclink_init();
+       res = tipc_bclink_init();
+       if (res)
+               return res;
+
        tipc_nametbl_publish(TIPC_CFG_SRV, tipc_own_addr, tipc_own_addr,
                             TIPC_ZONE_SCOPE, 0, tipc_own_addr);
 
        pr_info("Started in network mode\n");
        pr_info("Own node address %s, network identity %u\n",
                tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
+       return 0;
 }
 
 void tipc_net_stop(void)
index f781cae8df4bd239b5886fa130bc44be92c25fe3..c6c2b46f7c283095c4e29c7e0b11c5cdea2bcc01 100644 (file)
@@ -39,7 +39,7 @@
 
 void tipc_net_route_msg(struct sk_buff *buf);
 
-void tipc_net_start(u32 addr);
+int tipc_net_start(u32 addr);
 void tipc_net_stop(void);
 
 #endif
index 3aaf73de9e2d017e96b3cc1124d5c9420d827abd..ad844d3653409a6f5ad2ceac53e1b52dc48eacaa 100644 (file)
@@ -47,7 +47,7 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
        int hdr_space = nlmsg_total_size(GENL_HDRLEN + TIPC_GENL_HDRLEN);
        u16 cmd;
 
-       if ((req_userhdr->cmd & 0xC000) && (!capable(CAP_NET_ADMIN)))
+       if ((req_userhdr->cmd & 0xC000) && (!netlink_capable(skb, CAP_NET_ADMIN)))
                cmd = TIPC_CMD_NOT_NET_ADMIN;
        else
                cmd = req_userhdr->cmd;
index be90115cda1ac9ab158a29b3d861da3fc5d63a3b..5b44c3041be431955094de87f1815122deeea369 100644 (file)
@@ -108,7 +108,7 @@ struct tipc_node *tipc_node_create(u32 addr)
                        break;
        }
        list_add_tail_rcu(&n_ptr->list, &temp_node->list);
-       n_ptr->block_setup = WAIT_PEER_DOWN;
+       n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN;
        n_ptr->signature = INVALID_NODE_SIG;
 
        tipc_num_nodes++;
@@ -144,9 +144,11 @@ void tipc_node_stop(void)
 void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 {
        struct tipc_link **active = &n_ptr->active_links[0];
+       u32 addr = n_ptr->addr;
 
        n_ptr->working_links++;
-
+       tipc_nametbl_publish(TIPC_LINK_STATE, addr, addr, TIPC_NODE_SCOPE,
+                            l_ptr->bearer_id, addr);
        pr_info("Established link <%s> on network plane %c\n",
                l_ptr->name, l_ptr->net_plane);
 
@@ -203,8 +205,10 @@ static void node_select_active_links(struct tipc_node *n_ptr)
 void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 {
        struct tipc_link **active;
+       u32 addr = n_ptr->addr;
 
        n_ptr->working_links--;
+       tipc_nametbl_withdraw(TIPC_LINK_STATE, addr, l_ptr->bearer_id, addr);
 
        if (!tipc_link_is_active(l_ptr)) {
                pr_info("Lost standby link <%s> on network plane %c\n",
@@ -263,24 +267,12 @@ void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 
 static void node_established_contact(struct tipc_node *n_ptr)
 {
-       tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr);
+       n_ptr->action_flags |= TIPC_NOTIFY_NODE_UP;
        n_ptr->bclink.oos_state = 0;
        n_ptr->bclink.acked = tipc_bclink_get_last_sent();
        tipc_bclink_add_node(n_ptr->addr);
 }
 
-static void node_name_purge_complete(unsigned long node_addr)
-{
-       struct tipc_node *n_ptr;
-
-       n_ptr = tipc_node_find(node_addr);
-       if (n_ptr) {
-               tipc_node_lock(n_ptr);
-               n_ptr->block_setup &= ~WAIT_NAMES_GONE;
-               tipc_node_unlock(n_ptr);
-       }
-}
-
 static void node_lost_contact(struct tipc_node *n_ptr)
 {
        char addr_string[16];
@@ -294,10 +286,9 @@ static void node_lost_contact(struct tipc_node *n_ptr)
                kfree_skb_list(n_ptr->bclink.deferred_head);
                n_ptr->bclink.deferred_size = 0;
 
-               if (n_ptr->bclink.reasm_head) {
-                       kfree_skb(n_ptr->bclink.reasm_head);
-                       n_ptr->bclink.reasm_head = NULL;
-                       n_ptr->bclink.reasm_tail = NULL;
+               if (n_ptr->bclink.reasm_buf) {
+                       kfree_skb(n_ptr->bclink.reasm_buf);
+                       n_ptr->bclink.reasm_buf = NULL;
                }
 
                tipc_bclink_remove_node(n_ptr->addr);
@@ -316,12 +307,13 @@ static void node_lost_contact(struct tipc_node *n_ptr)
                tipc_link_reset_fragments(l_ptr);
        }
 
-       /* Notify subscribers */
-       tipc_nodesub_notify(n_ptr);
+       n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN;
 
-       /* Prevent re-contact with node until cleanup is done */
-       n_ptr->block_setup = WAIT_PEER_DOWN | WAIT_NAMES_GONE;
-       tipc_k_signal((Handler)node_name_purge_complete, n_ptr->addr);
+       /* Notify subscribers and prevent re-contact with node until
+        * cleanup is done.
+        */
+       n_ptr->action_flags |= TIPC_WAIT_PEER_LINKS_DOWN |
+                              TIPC_NOTIFY_NODE_DOWN;
 }
 
 struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
@@ -434,3 +426,63 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
        rcu_read_unlock();
        return buf;
 }
+
+/**
+ * tipc_node_get_linkname - get the name of a link
+ *
+ * @bearer_id: id of the bearer
+ * @node: peer node address
+ * @linkname: link name output buffer
+ *
+ * Returns 0 on success
+ */
+int tipc_node_get_linkname(u32 bearer_id, u32 addr, char *linkname, size_t len)
+{
+       struct tipc_link *link;
+       struct tipc_node *node = tipc_node_find(addr);
+
+       if ((bearer_id >= MAX_BEARERS) || !node)
+               return -EINVAL;
+       tipc_node_lock(node);
+       link = node->links[bearer_id];
+       if (link) {
+               strncpy(linkname, link->name, len);
+               tipc_node_unlock(node);
+               return 0;
+       }
+       tipc_node_unlock(node);
+       return -EINVAL;
+}
+
+void tipc_node_unlock(struct tipc_node *node)
+{
+       LIST_HEAD(nsub_list);
+       struct tipc_link *link;
+       int pkt_sz = 0;
+       u32 addr = 0;
+
+       if (likely(!node->action_flags)) {
+               spin_unlock_bh(&node->lock);
+               return;
+       }
+
+       if (node->action_flags & TIPC_NOTIFY_NODE_DOWN) {
+               list_replace_init(&node->nsub, &nsub_list);
+               node->action_flags &= ~TIPC_NOTIFY_NODE_DOWN;
+       }
+       if (node->action_flags & TIPC_NOTIFY_NODE_UP) {
+               link = node->active_links[0];
+               node->action_flags &= ~TIPC_NOTIFY_NODE_UP;
+               if (link) {
+                       pkt_sz = ((link->max_pkt - INT_H_SIZE) / ITEM_SIZE) *
+                                 ITEM_SIZE;
+                       addr = node->addr;
+               }
+       }
+       spin_unlock_bh(&node->lock);
+
+       if (!list_empty(&nsub_list))
+               tipc_nodesub_notify(&nsub_list);
+       if (pkt_sz)
+               tipc_named_node_up(pkt_sz, addr);
+}
index 7cbb8cec1a932f881cd636a71edb0342070530ae..9087063793f26eb352f9b848a14b27545c8b02be 100644 (file)
  */
 #define INVALID_NODE_SIG 0x10000
 
-/* Flags used to block (re)establishment of contact with a neighboring node */
-#define WAIT_PEER_DOWN 0x0001  /* wait to see that peer's links are down */
-#define WAIT_NAMES_GONE        0x0002  /* wait for peer's publications to be purged */
-#define WAIT_NODE_DOWN 0x0004  /* wait until peer node is declared down */
+/* Flags used to take different actions according to flag type
+ * TIPC_WAIT_PEER_LINKS_DOWN: wait to see that peer's links are down
+ * TIPC_WAIT_OWN_LINKS_DOWN: wait until peer node is declared down
+ * TIPC_NOTIFY_NODE_DOWN: notify node is down
+ * TIPC_NOTIFY_NODE_UP: notify node is up
+ */
+enum {
+       TIPC_WAIT_PEER_LINKS_DOWN       = (1 << 1),
+       TIPC_WAIT_OWN_LINKS_DOWN        = (1 << 2),
+       TIPC_NOTIFY_NODE_DOWN           = (1 << 3),
+       TIPC_NOTIFY_NODE_UP             = (1 << 4)
+};
+
+/**
+ * struct tipc_node_bclink - TIPC node bclink structure
+ * @acked: sequence # of last outbound b'cast message acknowledged by node
+ * @last_in: sequence # of last in-sequence b'cast message received from node
+ * @last_sent: sequence # of last b'cast message sent by node
+ * @oos_state: state tracker for handling OOS b'cast messages
+ * @deferred_size: number of OOS b'cast messages in deferred queue
+ * @deferred_head: oldest OOS b'cast message received from node
+ * @deferred_tail: newest OOS b'cast message received from node
+ * @reasm_buf: broadcast reassembly queue head from node
+ * @recv_permitted: true if node is allowed to receive b'cast messages
+ */
+struct tipc_node_bclink {
+       u32 acked;
+       u32 last_in;
+       u32 last_sent;
+       u32 oos_state;
+       u32 deferred_size;
+       struct sk_buff *deferred_head;
+       struct sk_buff *deferred_tail;
+       struct sk_buff *reasm_buf;
+       bool recv_permitted;
+};
 
 /**
  * struct tipc_node - TIPC node structure
  * @addr: network address of node
  * @lock: spinlock governing access to structure
  * @hash: links to adjacent nodes in unsorted hash chain
- * @list: links to adjacent nodes in sorted list of cluster's nodes
- * @nsub: list of "node down" subscriptions monitoring node
  * @active_links: pointers to active links to node
  * @links: pointers to all links to node
+ * @action_flags: bit mask of different types of node actions
+ * @bclink: broadcast-related info
+ * @list: links to adjacent nodes in sorted list of cluster's nodes
  * @working_links: number of working links to node (both active and standby)
- * @block_setup: bit mask of conditions preventing link establishment to node
  * @link_cnt: number of links to node
  * @signature: node instance identifier
- * @bclink: broadcast-related info
+ * @nsub: list of "node down" subscriptions monitoring node
  * @rcu: rcu struct for tipc_node
- *    @acked: sequence # of last outbound b'cast message acknowledged by node
- *    @last_in: sequence # of last in-sequence b'cast message received from node
- *    @last_sent: sequence # of last b'cast message sent by node
- *    @oos_state: state tracker for handling OOS b'cast messages
- *    @deferred_size: number of OOS b'cast messages in deferred queue
- *    @deferred_head: oldest OOS b'cast message received from node
- *    @deferred_tail: newest OOS b'cast message received from node
- *    @reasm_head: broadcast reassembly queue head from node
- *    @reasm_tail: last broadcast fragment received from node
- *    @recv_permitted: true if node is allowed to receive b'cast messages
  */
 struct tipc_node {
        u32 addr;
        spinlock_t lock;
        struct hlist_node hash;
-       struct list_head list;
-       struct list_head nsub;
        struct tipc_link *active_links[2];
        struct tipc_link *links[MAX_BEARERS];
+       unsigned int action_flags;
+       struct tipc_node_bclink bclink;
+       struct list_head list;
        int link_cnt;
        int working_links;
-       int block_setup;
        u32 signature;
+       struct list_head nsub;
        struct rcu_head rcu;
-       struct {
-               u32 acked;
-               u32 last_in;
-               u32 last_sent;
-               u32 oos_state;
-               u32 deferred_size;
-               struct sk_buff *deferred_head;
-               struct sk_buff *deferred_tail;
-               struct sk_buff *reasm_head;
-               struct sk_buff *reasm_tail;
-               bool recv_permitted;
-       } bclink;
 };
 
 extern struct list_head tipc_node_list;
@@ -118,15 +129,18 @@ int tipc_node_active_links(struct tipc_node *n_ptr);
 int tipc_node_is_up(struct tipc_node *n_ptr);
 struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space);
 struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space);
+int tipc_node_get_linkname(u32 bearer_id, u32 node, char *linkname, size_t len);
+void tipc_node_unlock(struct tipc_node *node);
 
-static inline void tipc_node_lock(struct tipc_node *n_ptr)
+static inline void tipc_node_lock(struct tipc_node *node)
 {
-       spin_lock_bh(&n_ptr->lock);
+       spin_lock_bh(&node->lock);
 }
 
-static inline void tipc_node_unlock(struct tipc_node *n_ptr)
+static inline bool tipc_node_blocked(struct tipc_node *node)
 {
-       spin_unlock_bh(&n_ptr->lock);
+       return (node->action_flags & (TIPC_WAIT_PEER_LINKS_DOWN |
+               TIPC_NOTIFY_NODE_DOWN | TIPC_WAIT_OWN_LINKS_DOWN));
 }
 
 #endif
index 8a7384c04add4bdc6db6ae4451ebb2232e4b338b..7c59ab1d6ecb3dc26c4efb77cd243a00341c7b5b 100644 (file)
@@ -81,14 +81,13 @@ void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub)
  *
  * Note: node is locked by caller
  */
-void tipc_nodesub_notify(struct tipc_node *node)
+void tipc_nodesub_notify(struct list_head *nsub_list)
 {
-       struct tipc_node_subscr *ns;
+       struct tipc_node_subscr *ns, *safe;
 
-       list_for_each_entry(ns, &node->nsub, nodesub_list) {
+       list_for_each_entry_safe(ns, safe, nsub_list, nodesub_list) {
                if (ns->handle_node_down) {
-                       tipc_k_signal((Handler)ns->handle_node_down,
-                                     (unsigned long)ns->usr_handle);
+                       ns->handle_node_down(ns->usr_handle);
                        ns->handle_node_down = NULL;
                }
        }
index c95d20727ded3ea70cf9532a82cd8ff20fde415d..d91b8cc81e3d786948bd4ed9ac622689c4218ea1 100644 (file)
@@ -58,6 +58,6 @@ struct tipc_node_subscr {
 void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr,
                            void *usr_handle, net_ev_handler handle_down);
 void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub);
-void tipc_nodesub_notify(struct tipc_node *node);
+void tipc_nodesub_notify(struct list_head *nsub_list);
 
 #endif
index 5c14c7801ee65095d809d502cab9f78d33d51e5e..5fd7acce01ea339b7ffe2873956e9513eb40bb49 100644 (file)
@@ -165,7 +165,7 @@ void tipc_port_mcast_rcv(struct sk_buff *buf, struct tipc_port_list *dp)
                msg_set_destnode(msg, tipc_own_addr);
                if (dp->count == 1) {
                        msg_set_destport(msg, dp->ports[0]);
-                       tipc_port_rcv(buf);
+                       tipc_sk_rcv(buf);
                        tipc_port_list_free(dp);
                        return;
                }
@@ -180,7 +180,7 @@ void tipc_port_mcast_rcv(struct sk_buff *buf, struct tipc_port_list *dp)
                        if ((index == 0) && (cnt != 0))
                                item = item->next;
                        msg_set_destport(buf_msg(b), item->ports[index]);
-                       tipc_port_rcv(b);
+                       tipc_sk_rcv(b);
                }
        }
 exit:
@@ -343,7 +343,7 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
        /* send returned message & dispose of rejected message */
        src_node = msg_prevnode(msg);
        if (in_own_node(src_node))
-               tipc_port_rcv(rbuf);
+               tipc_sk_rcv(rbuf);
        else
                tipc_link_xmit(rbuf, src_node, msg_link_selector(rmsg));
 exit:
@@ -754,37 +754,6 @@ int tipc_port_shutdown(u32 ref)
        return tipc_port_disconnect(ref);
 }
 
-/**
- * tipc_port_rcv - receive message from lower layer and deliver to port user
- */
-int tipc_port_rcv(struct sk_buff *buf)
-{
-       struct tipc_port *p_ptr;
-       struct tipc_msg *msg = buf_msg(buf);
-       u32 destport = msg_destport(msg);
-       u32 dsz = msg_data_sz(msg);
-       u32 err;
-
-       /* forward unresolved named message */
-       if (unlikely(!destport)) {
-               tipc_net_route_msg(buf);
-               return dsz;
-       }
-
-       /* validate destination & pass to port, otherwise reject message */
-       p_ptr = tipc_port_lock(destport);
-       if (likely(p_ptr)) {
-               err = tipc_sk_rcv(&tipc_port_to_sock(p_ptr)->sk, buf);
-               tipc_port_unlock(p_ptr);
-               if (likely(!err))
-                       return dsz;
-       } else {
-               err = TIPC_ERR_NO_PORT;
-       }
-
-       return tipc_reject_msg(buf, err);
-}
-
 /*
  *  tipc_port_iovec_rcv: Concatenate and deliver sectioned
  *                       message for this node.
@@ -798,7 +767,7 @@ static int tipc_port_iovec_rcv(struct tipc_port *sender,
 
        res = tipc_msg_build(&sender->phdr, msg_sect, len, MAX_MSG_SIZE, &buf);
        if (likely(buf))
-               tipc_port_rcv(buf);
+               tipc_sk_rcv(buf);
        return res;
 }
 
index a00397393bd1d9179bd779339500e34bd710aa5b..cf4ca5b1d9a48ae7752f9f476cad079e3f115da8 100644 (file)
 #include "msg.h"
 #include "node_subscr.h"
 
-#define TIPC_FLOW_CONTROL_WIN 512
-#define CONN_OVERLOAD_LIMIT    ((TIPC_FLOW_CONTROL_WIN * 2 + 1) * \
-                               SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
+#define TIPC_CONNACK_INTV         256
+#define TIPC_FLOWCTRL_WIN        (TIPC_CONNACK_INTV * 2)
+#define TIPC_CONN_OVERLOAD_LIMIT ((TIPC_FLOWCTRL_WIN * 2 + 1) * \
+                                 SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
 
 /**
  * struct tipc_port - TIPC port structure
@@ -134,7 +135,6 @@ int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg);
 /*
  * TIPC messaging routines
  */
-int tipc_port_rcv(struct sk_buff *buf);
 
 int tipc_send(struct tipc_port *port,
              struct iovec const *msg_sect,
@@ -187,7 +187,7 @@ static inline void tipc_port_unlock(struct tipc_port *p_ptr)
 
 static inline int tipc_port_congested(struct tipc_port *p_ptr)
 {
-       return (p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2);
+       return ((p_ptr->sent - p_ptr->acked) >= TIPC_FLOWCTRL_WIN);
 }
 
 
index 3c0256962f7dafa4ee3b11d69aed963822c888c2..08d87fc80b10de342d752b13814262105471b2cb 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * net/tipc/socket.c: TIPC socket API
+* net/tipc/socket.c: TIPC socket API
  *
  * Copyright (c) 2001-2007, 2012-2014, Ericsson AB
  * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
@@ -36,6 +36,7 @@
 
 #include "core.h"
 #include "port.h"
+#include "node.h"
 
 #include <linux/export.h>
 
@@ -44,7 +45,7 @@
 
 #define CONN_TIMEOUT_DEFAULT   8000    /* default connect timeout = 8s */
 
-static int backlog_rcv(struct sock *sk, struct sk_buff *skb);
+static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
 static void tipc_data_ready(struct sock *sk);
 static void tipc_write_space(struct sock *sk);
 static int tipc_release(struct socket *sock);
@@ -195,11 +196,12 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
        sock->state = state;
 
        sock_init_data(sock, sk);
-       sk->sk_backlog_rcv = backlog_rcv;
+       sk->sk_backlog_rcv = tipc_backlog_rcv;
        sk->sk_rcvbuf = sysctl_tipc_rmem[1];
        sk->sk_data_ready = tipc_data_ready;
        sk->sk_write_space = tipc_write_space;
-       tipc_sk(sk)->conn_timeout = CONN_TIMEOUT_DEFAULT;
+       tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
+       atomic_set(&tsk->dupl_rcvcnt, 0);
        tipc_port_unlock(port);
 
        if (sock->state == SS_READY) {
@@ -983,10 +985,11 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
        return 0;
 }
 
-static int tipc_wait_for_rcvmsg(struct socket *sock, long timeo)
+static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
 {
        struct sock *sk = sock->sk;
        DEFINE_WAIT(wait);
+       long timeo = *timeop;
        int err;
 
        for (;;) {
@@ -1011,6 +1014,7 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long timeo)
                        break;
        }
        finish_wait(sk_sleep(sk), &wait);
+       *timeop = timeo;
        return err;
 }
 
@@ -1054,7 +1058,7 @@ static int tipc_recvmsg(struct kiocb *iocb, struct socket *sock,
 restart:
 
        /* Look for a message in receive queue; wait if necessary */
-       res = tipc_wait_for_rcvmsg(sock, timeo);
+       res = tipc_wait_for_rcvmsg(sock, &timeo);
        if (res)
                goto exit;
 
@@ -1100,7 +1104,7 @@ restart:
        /* Consume received message (optional) */
        if (likely(!(flags & MSG_PEEK))) {
                if ((sock->state != SS_READY) &&
-                   (++port->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
+                   (++port->conn_unacked >= TIPC_CONNACK_INTV))
                        tipc_acknowledge(port->ref, port->conn_unacked);
                advance_rx_queue(sk);
        }
@@ -1152,7 +1156,7 @@ static int tipc_recv_stream(struct kiocb *iocb, struct socket *sock,
 
 restart:
        /* Look for a message in receive queue; wait if necessary */
-       res = tipc_wait_for_rcvmsg(sock, timeo);
+       res = tipc_wait_for_rcvmsg(sock, &timeo);
        if (res)
                goto exit;
 
@@ -1209,7 +1213,7 @@ restart:
 
        /* Consume received message (optional) */
        if (likely(!(flags & MSG_PEEK))) {
-               if (unlikely(++port->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
+               if (unlikely(++port->conn_unacked >= TIPC_CONNACK_INTV))
                        tipc_acknowledge(port->ref, port->conn_unacked);
                advance_rx_queue(sk);
        }
@@ -1415,7 +1419,7 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
 }
 
 /**
- * backlog_rcv - handle incoming message from backlog queue
+ * tipc_backlog_rcv - handle incoming message from backlog queue
  * @sk: socket
  * @buf: message
  *
@@ -1423,47 +1427,73 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
  *
  * Returns 0
  */
-static int backlog_rcv(struct sock *sk, struct sk_buff *buf)
+static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *buf)
 {
        u32 res;
+       struct tipc_sock *tsk = tipc_sk(sk);
 
        res = filter_rcv(sk, buf);
-       if (res)
+       if (unlikely(res))
                tipc_reject_msg(buf, res);
+
+       if (atomic_read(&tsk->dupl_rcvcnt) < TIPC_CONN_OVERLOAD_LIMIT)
+               atomic_add(buf->truesize, &tsk->dupl_rcvcnt);
+
        return 0;
 }
 
 /**
  * tipc_sk_rcv - handle incoming message
- * @sk:  socket receiving message
- * @buf: message
- *
- * Called with port lock already taken.
- *
- * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
+ * @buf: buffer containing arriving message
+ * Consumes buffer
+ * Returns 0 if success, or errno: -EHOSTUNREACH
  */
-u32 tipc_sk_rcv(struct sock *sk, struct sk_buff *buf)
+int tipc_sk_rcv(struct sk_buff *buf)
 {
-       u32 res;
+       struct tipc_sock *tsk;
+       struct tipc_port *port;
+       struct sock *sk;
+       u32 dport = msg_destport(buf_msg(buf));
+       int err = TIPC_OK;
+       uint limit;
 
-       /*
-        * Process message if socket is unlocked; otherwise add to backlog queue
-        *
-        * This code is based on sk_receive_skb(), but must be distinct from it
-        * since a TIPC-specific filter/reject mechanism is utilized
-        */
+       /* Forward unresolved named message */
+       if (unlikely(!dport)) {
+               tipc_net_route_msg(buf);
+               return 0;
+       }
+
+       /* Validate destination */
+       port = tipc_port_lock(dport);
+       if (unlikely(!port)) {
+               err = TIPC_ERR_NO_PORT;
+               goto exit;
+       }
+
+       tsk = tipc_port_to_sock(port);
+       sk = &tsk->sk;
+
+       /* Queue message */
        bh_lock_sock(sk);
+
        if (!sock_owned_by_user(sk)) {
-               res = filter_rcv(sk, buf);
+               err = filter_rcv(sk, buf);
        } else {
-               if (sk_add_backlog(sk, buf, rcvbuf_limit(sk, buf)))
-                       res = TIPC_ERR_OVERLOAD;
-               else
-                       res = TIPC_OK;
+               if (sk->sk_backlog.len == 0)
+                       atomic_set(&tsk->dupl_rcvcnt, 0);
+               limit = rcvbuf_limit(sk, buf) + atomic_read(&tsk->dupl_rcvcnt);
+               if (sk_add_backlog(sk, buf, limit))
+                       err = TIPC_ERR_OVERLOAD;
        }
+
        bh_unlock_sock(sk);
+       tipc_port_unlock(port);
 
-       return res;
+       if (likely(!err))
+               return 0;
+exit:
+       tipc_reject_msg(buf, err);
+       return -EHOSTUNREACH;
 }
 
 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
@@ -1905,6 +1935,28 @@ static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
        return put_user(sizeof(value), ol);
 }
 
+int tipc_ioctl(struct socket *sk, unsigned int cmd, unsigned long arg)
+{
+       struct tipc_sioc_ln_req lnr;
+       void __user *argp = (void __user *)arg;
+
+       switch (cmd) {
+       case SIOCGETLINKNAME:
+               if (copy_from_user(&lnr, argp, sizeof(lnr)))
+                       return -EFAULT;
+               if (!tipc_node_get_linkname(lnr.bearer_id, lnr.peer,
+                                           lnr.linkname, TIPC_MAX_LINK_NAME)) {
+                       if (copy_to_user(argp, &lnr, sizeof(lnr)))
+                               return -EFAULT;
+                       return 0;
+               }
+               return -EADDRNOTAVAIL;
+               break;
+       default:
+               return -ENOIOCTLCMD;
+       }
+}
+
 /* Protocol switches for the various types of TIPC sockets */
 
 static const struct proto_ops msg_ops = {
@@ -1917,7 +1969,7 @@ static const struct proto_ops msg_ops = {
        .accept         = sock_no_accept,
        .getname        = tipc_getname,
        .poll           = tipc_poll,
-       .ioctl          = sock_no_ioctl,
+       .ioctl          = tipc_ioctl,
        .listen         = sock_no_listen,
        .shutdown       = tipc_shutdown,
        .setsockopt     = tipc_setsockopt,
@@ -1938,7 +1990,7 @@ static const struct proto_ops packet_ops = {
        .accept         = tipc_accept,
        .getname        = tipc_getname,
        .poll           = tipc_poll,
-       .ioctl          = sock_no_ioctl,
+       .ioctl          = tipc_ioctl,
        .listen         = tipc_listen,
        .shutdown       = tipc_shutdown,
        .setsockopt     = tipc_setsockopt,
@@ -1959,7 +2011,7 @@ static const struct proto_ops stream_ops = {
        .accept         = tipc_accept,
        .getname        = tipc_getname,
        .poll           = tipc_poll,
-       .ioctl          = sock_no_ioctl,
+       .ioctl          = tipc_ioctl,
        .listen         = tipc_listen,
        .shutdown       = tipc_shutdown,
        .setsockopt     = tipc_setsockopt,
index 74e5c7f195a660d6a1e88d1b506cf4e7371566f1..3afcd2a70b313c21d67752606b839e613d3cf9df 100644 (file)
  * @port: port - interacts with 'sk' and with the rest of the TIPC stack
  * @peer_name: the peer of the connection, if any
  * @conn_timeout: the time we can wait for an unresponded setup request
+ * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
  */
 
 struct tipc_sock {
        struct sock sk;
        struct tipc_port port;
        unsigned int conn_timeout;
+       atomic_t dupl_rcvcnt;
 };
 
 static inline struct tipc_sock *tipc_sk(const struct sock *sk)
@@ -67,6 +69,6 @@ static inline void tipc_sock_wakeup(struct tipc_sock *tsk)
        tsk->sk.sk_write_space(&tsk->sk);
 }
 
-u32 tipc_sk_rcv(struct sock *sk, struct sk_buff *buf);
+int tipc_sk_rcv(struct sk_buff *buf);
 
 #endif
index bb7e8ba821f44014d65f3669e017b8c9cca54f53..7b9114e0a5b14949e11f312e34a8b5b26c5a5a39 100644 (file)
@@ -1492,10 +1492,14 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
        if (len > sk->sk_sndbuf - 32)
                goto out;
 
-       if (len > SKB_MAX_ALLOC)
+       if (len > SKB_MAX_ALLOC) {
                data_len = min_t(size_t,
                                 len - SKB_MAX_ALLOC,
                                 MAX_SKB_FRAGS * PAGE_SIZE);
+               data_len = PAGE_ALIGN(data_len);
+
+               BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
+       }
 
        skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
                                   msg->msg_flags & MSG_DONTWAIT, &err,
@@ -1670,6 +1674,8 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
 
                data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
 
+               data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
+
                skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
                                           msg->msg_flags & MSG_DONTWAIT, &err,
                                           get_order(UNIX_SKB_FRAGS_SZ));
index 5adfd94c5b85d3d48a6d48d3a4c7c2fa98526d8b..85d232bed87d21f3c23cd695b83defef5a6f22c1 100644 (file)
@@ -1925,9 +1925,23 @@ static struct miscdevice vsock_device = {
        .fops           = &vsock_device_ops,
 };
 
-static int __vsock_core_init(void)
+int __vsock_core_init(const struct vsock_transport *t, struct module *owner)
 {
-       int err;
+       int err = mutex_lock_interruptible(&vsock_register_mutex);
+
+       if (err)
+               return err;
+
+       if (transport) {
+               err = -EBUSY;
+               goto err_busy;
+       }
+
+       /* Transport must be the owner of the protocol so that it can't
+        * unload while there are open sockets.
+        */
+       vsock_proto.owner = owner;
+       transport = t;
 
        vsock_init_tables();
 
@@ -1951,36 +1965,19 @@ static int __vsock_core_init(void)
                goto err_unregister_proto;
        }
 
+       mutex_unlock(&vsock_register_mutex);
        return 0;
 
 err_unregister_proto:
        proto_unregister(&vsock_proto);
 err_misc_deregister:
        misc_deregister(&vsock_device);
-       return err;
-}
-
-int vsock_core_init(const struct vsock_transport *t)
-{
-       int retval = mutex_lock_interruptible(&vsock_register_mutex);
-       if (retval)
-               return retval;
-
-       if (transport) {
-               retval = -EBUSY;
-               goto out;
-       }
-
-       transport = t;
-       retval = __vsock_core_init();
-       if (retval)
-               transport = NULL;
-
-out:
+       transport = NULL;
+err_busy:
        mutex_unlock(&vsock_register_mutex);
-       return retval;
+       return err;
 }
-EXPORT_SYMBOL_GPL(vsock_core_init);
+EXPORT_SYMBOL_GPL(__vsock_core_init);
 
 void vsock_core_exit(void)
 {
@@ -2000,5 +1997,5 @@ EXPORT_SYMBOL_GPL(vsock_core_exit);
 
 MODULE_AUTHOR("VMware, Inc.");
 MODULE_DESCRIPTION("VMware Virtual Socket Family");
-MODULE_VERSION("1.0.0.0-k");
+MODULE_VERSION("1.0.1.0-k");
 MODULE_LICENSE("GPL v2");
index 16d08b39921071479456e2033c5e371d381175b7..405f3c4cf70ca3617a4101e1bad278b93d7ae1b7 100644 (file)
@@ -95,6 +95,43 @@ config CFG80211_CERTIFICATION_ONUS
          you are a wireless researcher and are working in a controlled
          and approved environment by your local regulatory agency.
 
+config CFG80211_REG_CELLULAR_HINTS
+       bool "cfg80211 regulatory support for cellular base station hints"
+       depends on CFG80211_CERTIFICATION_ONUS
+       ---help---
+         This option enables support for parsing regulatory hints
+         from cellular base stations. If enabled and at least one driver
+         claims support for parsing cellular base station hints the
+         regulatory core will allow and parse these regulatory hints.
+         The regulatory core will only apply these regulatory hints on
+         drivers that support this feature. You should only enable this
+         feature if you have tested and validated this feature on your
+         systems.
+
+config CFG80211_REG_RELAX_NO_IR
+       bool "cfg80211 support for NO_IR relaxation"
+       depends on CFG80211_CERTIFICATION_ONUS
+       ---help---
+        This option enables support for relaxation of the NO_IR flag for
+        situations that certain regulatory bodies have provided clarifications
+        on how relaxation can occur. This feature has an inherent dependency on
+        userspace features which must have been properly tested and as such is
+        not enabled by default.
+
+        A relaxation feature example is allowing the operation of a P2P group
+        owner (GO) on channels marked with NO_IR if there is an additional BSS
+        interface which associated to an AP which userspace assumes or confirms
+        to be an authorized master, i.e., with radar detection support and DFS
+        capabilities. However, note that in order to not create daisy chain
+        scenarios, this relaxation is not allowed in cases that the BSS client
+        is associated to P2P GO and in addition the P2P GO instantiated on
+        a channel due to this relaxation should not allow connection from
+        non P2P clients.
+
+        The regulatory core will apply these relaxations only for drivers that
+        support this feature by declaring the appropriate channel flags and
+        capabilities in their registration flow.
+
 config CFG80211_DEFAULT_PS
        bool "enable powersave by default"
        depends on CFG80211
index 3e02ade508d8804d4158a03ad5de474d64bf7594..bdad1f951561b3d8b7c75d8992e1c3c1a623f146 100644 (file)
@@ -6,8 +6,8 @@
 #include "rdev-ops.h"
 
 
-static int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
-                             struct net_device *dev, bool notify)
+int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
+                      struct net_device *dev, bool notify)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        int err;
index 9c9501a35fb5c6a43a142132306998405e086774..992b34070bcb16f9fe4323bae293996524f1373c 100644 (file)
@@ -326,28 +326,57 @@ static int cfg80211_get_chans_dfs_required(struct wiphy *wiphy,
 
 
 int cfg80211_chandef_dfs_required(struct wiphy *wiphy,
-                                 const struct cfg80211_chan_def *chandef)
+                                 const struct cfg80211_chan_def *chandef,
+                                 enum nl80211_iftype iftype)
 {
        int width;
-       int r;
+       int ret;
 
        if (WARN_ON(!cfg80211_chandef_valid(chandef)))
                return -EINVAL;
 
-       width = cfg80211_chandef_get_width(chandef);
-       if (width < 0)
-               return -EINVAL;
+       switch (iftype) {
+       case NL80211_IFTYPE_ADHOC:
+       case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_P2P_GO:
+       case NL80211_IFTYPE_MESH_POINT:
+               width = cfg80211_chandef_get_width(chandef);
+               if (width < 0)
+                       return -EINVAL;
 
-       r = cfg80211_get_chans_dfs_required(wiphy, chandef->center_freq1,
-                                           width);
-       if (r)
-               return r;
+               ret = cfg80211_get_chans_dfs_required(wiphy,
+                                                     chandef->center_freq1,
+                                                     width);
+               if (ret < 0)
+                       return ret;
+               else if (ret > 0)
+                       return BIT(chandef->width);
 
-       if (!chandef->center_freq2)
-               return 0;
+               if (!chandef->center_freq2)
+                       return 0;
+
+               ret = cfg80211_get_chans_dfs_required(wiphy,
+                                                     chandef->center_freq2,
+                                                     width);
+               if (ret < 0)
+                       return ret;
+               else if (ret > 0)
+                       return BIT(chandef->width);
 
-       return cfg80211_get_chans_dfs_required(wiphy, chandef->center_freq2,
-                                              width);
+               break;
+       case NL80211_IFTYPE_STATION:
+       case NL80211_IFTYPE_P2P_CLIENT:
+       case NL80211_IFTYPE_MONITOR:
+       case NL80211_IFTYPE_AP_VLAN:
+       case NL80211_IFTYPE_WDS:
+       case NL80211_IFTYPE_P2P_DEVICE:
+               break;
+       case NL80211_IFTYPE_UNSPECIFIED:
+       case NUM_NL80211_IFTYPES:
+               WARN_ON(1);
+       }
+
+       return 0;
 }
 EXPORT_SYMBOL(cfg80211_chandef_dfs_required);
 
@@ -587,12 +616,14 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
                width = 5;
                break;
        case NL80211_CHAN_WIDTH_10:
+               prohibited_flags |= IEEE80211_CHAN_NO_10MHZ;
                width = 10;
                break;
        case NL80211_CHAN_WIDTH_20:
                if (!ht_cap->ht_supported)
                        return false;
        case NL80211_CHAN_WIDTH_20_NOHT:
+               prohibited_flags |= IEEE80211_CHAN_NO_20MHZ;
                width = 20;
                break;
        case NL80211_CHAN_WIDTH_40:
@@ -661,17 +692,111 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
 }
 EXPORT_SYMBOL(cfg80211_chandef_usable);
 
+/*
+ * For GO only, check if the channel can be used under permissive conditions
+ * mandated by the some regulatory bodies, i.e., the channel is marked with
+ * IEEE80211_CHAN_GO_CONCURRENT and there is an additional station interface
+ * associated to an AP on the same channel or on the same UNII band
+ * (assuming that the AP is an authorized master).
+ * In addition allow the GO to operate on a channel on which indoor operation is
+ * allowed, iff we are currently operating in an indoor environment.
+ */
+static bool cfg80211_go_permissive_chan(struct cfg80211_registered_device *rdev,
+                                       struct ieee80211_channel *chan)
+{
+       struct wireless_dev *wdev_iter;
+       struct wiphy *wiphy = wiphy_idx_to_wiphy(rdev->wiphy_idx);
+
+       ASSERT_RTNL();
+
+       if (!config_enabled(CONFIG_CFG80211_REG_RELAX_NO_IR) ||
+           !(wiphy->regulatory_flags & REGULATORY_ENABLE_RELAX_NO_IR))
+               return false;
+
+       if (regulatory_indoor_allowed() &&
+           (chan->flags & IEEE80211_CHAN_INDOOR_ONLY))
+               return true;
+
+       if (!(chan->flags & IEEE80211_CHAN_GO_CONCURRENT))
+               return false;
+
+       /*
+        * Generally, it is possible to rely on another device/driver to allow
+        * the GO concurrent relaxation, however, since the device can further
+        * enforce the relaxation (by doing a similar verifications as this),
+        * and thus fail the GO instantiation, consider only the interfaces of
+        * the current registered device.
+        */
+       list_for_each_entry(wdev_iter, &rdev->wdev_list, list) {
+               struct ieee80211_channel *other_chan = NULL;
+               int r1, r2;
+
+               if (wdev_iter->iftype != NL80211_IFTYPE_STATION ||
+                   !netif_running(wdev_iter->netdev))
+                       continue;
+
+               wdev_lock(wdev_iter);
+               if (wdev_iter->current_bss)
+                       other_chan = wdev_iter->current_bss->pub.channel;
+               wdev_unlock(wdev_iter);
+
+               if (!other_chan)
+                       continue;
+
+               if (chan == other_chan)
+                       return true;
+
+               if (chan->band != IEEE80211_BAND_5GHZ)
+                       continue;
+
+               r1 = cfg80211_get_unii(chan->center_freq);
+               r2 = cfg80211_get_unii(other_chan->center_freq);
+
+               if (r1 != -EINVAL && r1 == r2) {
+                       /*
+                        * At some locations channels 149-165 are considered a
+                        * bundle, but at other locations, e.g., Indonesia,
+                        * channels 149-161 are considered a bundle while
+                        * channel 165 is left out and considered to be in a
+                        * different bundle. Thus, in case that there is a
+                        * station interface connected to an AP on channel 165,
+                        * it is assumed that channels 149-161 are allowed for
+                        * GO operations. However, having a station interface
+                        * connected to an AP on channels 149-161, does not
+                        * allow GO operation on channel 165.
+                        */
+                       if (chan->center_freq == 5825 &&
+                           other_chan->center_freq != 5825)
+                               continue;
+                       return true;
+               }
+       }
+
+       return false;
+}
+
 bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
-                            struct cfg80211_chan_def *chandef)
+                            struct cfg80211_chan_def *chandef,
+                            enum nl80211_iftype iftype)
 {
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        bool res;
        u32 prohibited_flags = IEEE80211_CHAN_DISABLED |
-                              IEEE80211_CHAN_NO_IR |
                               IEEE80211_CHAN_RADAR;
 
-       trace_cfg80211_reg_can_beacon(wiphy, chandef);
+       trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype);
 
-       if (cfg80211_chandef_dfs_required(wiphy, chandef) > 0 &&
+       /*
+        * Under certain conditions suggested by the some regulatory bodies
+        * a GO can operate on channels marked with IEEE80211_NO_IR
+        * so set this flag only if such relaxations are not enabled and
+        * the conditions are not met.
+        */
+       if (iftype != NL80211_IFTYPE_P2P_GO ||
+           !cfg80211_go_permissive_chan(rdev, chandef->chan))
+               prohibited_flags |= IEEE80211_CHAN_NO_IR;
+
+       if (cfg80211_chandef_dfs_required(wiphy, chandef, iftype) > 0 &&
            cfg80211_chandef_dfs_available(wiphy, chandef)) {
                /* We can skip IEEE80211_CHAN_NO_IR if chandef dfs available */
                prohibited_flags = IEEE80211_CHAN_DISABLED;
@@ -701,6 +826,8 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
                        enum cfg80211_chan_mode *chanmode,
                        u8 *radar_detect)
 {
+       int ret;
+
        *chan = NULL;
        *chanmode = CHAN_MODE_UNDEFINED;
 
@@ -743,8 +870,11 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
                        *chan = wdev->chandef.chan;
                        *chanmode = CHAN_MODE_SHARED;
 
-                       if (cfg80211_chandef_dfs_required(wdev->wiphy,
-                                                         &wdev->chandef))
+                       ret = cfg80211_chandef_dfs_required(wdev->wiphy,
+                                                           &wdev->chandef,
+                                                           wdev->iftype);
+                       WARN_ON(ret < 0);
+                       if (ret > 0)
                                *radar_detect |= BIT(wdev->chandef.width);
                }
                return;
@@ -753,8 +883,11 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
                        *chan = wdev->chandef.chan;
                        *chanmode = CHAN_MODE_SHARED;
 
-                       if (cfg80211_chandef_dfs_required(wdev->wiphy,
-                                                         &wdev->chandef))
+                       ret = cfg80211_chandef_dfs_required(wdev->wiphy,
+                                                           &wdev->chandef,
+                                                           wdev->iftype);
+                       WARN_ON(ret < 0);
+                       if (ret > 0)
                                *radar_detect |= BIT(wdev->chandef.width);
                }
                return;
index 086cddd03ba6edd79d1609ecf713146bc756c1ff..d03d8bdb29cafc4097458433c0d9b3ed447f765c 100644 (file)
@@ -69,7 +69,7 @@ struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx)
 
 int get_wiphy_idx(struct wiphy *wiphy)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
        return rdev->wiphy_idx;
 }
@@ -210,15 +210,12 @@ void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
        }
 }
 
-static int cfg80211_rfkill_set_block(void *data, bool blocked)
+void cfg80211_shutdown_all_interfaces(struct wiphy *wiphy)
 {
-       struct cfg80211_registered_device *rdev = data;
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct wireless_dev *wdev;
 
-       if (!blocked)
-               return 0;
-
-       rtnl_lock();
+       ASSERT_RTNL();
 
        list_for_each_entry(wdev, &rdev->wdev_list, list) {
                if (wdev->netdev) {
@@ -234,7 +231,18 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked)
                        break;
                }
        }
+}
+EXPORT_SYMBOL_GPL(cfg80211_shutdown_all_interfaces);
+
+static int cfg80211_rfkill_set_block(void *data, bool blocked)
+{
+       struct cfg80211_registered_device *rdev = data;
+
+       if (!blocked)
+               return 0;
 
+       rtnl_lock();
+       cfg80211_shutdown_all_interfaces(&rdev->wiphy);
        rtnl_unlock();
 
        return 0;
@@ -260,6 +268,45 @@ static void cfg80211_event_work(struct work_struct *work)
        rtnl_unlock();
 }
 
+void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev)
+{
+       struct cfg80211_iface_destroy *item;
+
+       ASSERT_RTNL();
+
+       spin_lock_irq(&rdev->destroy_list_lock);
+       while ((item = list_first_entry_or_null(&rdev->destroy_list,
+                                               struct cfg80211_iface_destroy,
+                                               list))) {
+               struct wireless_dev *wdev, *tmp;
+               u32 nlportid = item->nlportid;
+
+               list_del(&item->list);
+               kfree(item);
+               spin_unlock_irq(&rdev->destroy_list_lock);
+
+               list_for_each_entry_safe(wdev, tmp, &rdev->wdev_list, list) {
+                       if (nlportid == wdev->owner_nlportid)
+                               rdev_del_virtual_intf(rdev, wdev);
+               }
+
+               spin_lock_irq(&rdev->destroy_list_lock);
+       }
+       spin_unlock_irq(&rdev->destroy_list_lock);
+}
+
+static void cfg80211_destroy_iface_wk(struct work_struct *work)
+{
+       struct cfg80211_registered_device *rdev;
+
+       rdev = container_of(work, struct cfg80211_registered_device,
+                           destroy_work);
+
+       rtnl_lock();
+       cfg80211_destroy_ifaces(rdev);
+       rtnl_unlock();
+}
+
 /* exported functions */
 
 struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
@@ -318,6 +365,10 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
        rdev->wiphy.dev.class = &ieee80211_class;
        rdev->wiphy.dev.platform_data = rdev;
 
+       INIT_LIST_HEAD(&rdev->destroy_list);
+       spin_lock_init(&rdev->destroy_list_lock);
+       INIT_WORK(&rdev->destroy_work, cfg80211_destroy_iface_wk);
+
 #ifdef CONFIG_CFG80211_DEFAULT_PS
        rdev->wiphy.flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
 #endif
@@ -351,6 +402,8 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
        rdev->wiphy.rts_threshold = (u32) -1;
        rdev->wiphy.coverage_class = 0;
 
+       rdev->wiphy.max_num_csa_counters = 1;
+
        return &rdev->wiphy;
 }
 EXPORT_SYMBOL(wiphy_new);
@@ -396,10 +449,7 @@ static int wiphy_verify_combinations(struct wiphy *wiphy)
                for (j = 0; j < c->n_limits; j++) {
                        u16 types = c->limits[j].types;
 
-                       /*
-                        * interface types shouldn't overlap, this is
-                        * used in cfg80211_can_change_interface()
-                        */
+                       /* interface types shouldn't overlap */
                        if (WARN_ON(types & all_iftypes))
                                return -EINVAL;
                        all_iftypes |= types;
@@ -435,7 +485,7 @@ static int wiphy_verify_combinations(struct wiphy *wiphy)
 
 int wiphy_register(struct wiphy *wiphy)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        int res;
        enum ieee80211_band band;
        struct ieee80211_supported_band *sband;
@@ -616,7 +666,7 @@ EXPORT_SYMBOL(wiphy_register);
 
 void wiphy_rfkill_start_polling(struct wiphy *wiphy)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
        if (!rdev->ops->rfkill_poll)
                return;
@@ -627,7 +677,7 @@ EXPORT_SYMBOL(wiphy_rfkill_start_polling);
 
 void wiphy_rfkill_stop_polling(struct wiphy *wiphy)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
        rfkill_pause_polling(rdev->rfkill);
 }
@@ -635,7 +685,7 @@ EXPORT_SYMBOL(wiphy_rfkill_stop_polling);
 
 void wiphy_unregister(struct wiphy *wiphy)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
        wait_event(rdev->dev_wait, ({
                int __count;
@@ -650,7 +700,7 @@ void wiphy_unregister(struct wiphy *wiphy)
        rtnl_lock();
        rdev->wiphy.registered = false;
 
-       BUG_ON(!list_empty(&rdev->wdev_list));
+       WARN_ON(!list_empty(&rdev->wdev_list));
 
        /*
         * First remove the hardware from everywhere, this makes
@@ -675,6 +725,7 @@ void wiphy_unregister(struct wiphy *wiphy)
        cancel_work_sync(&rdev->conn_work);
        flush_work(&rdev->event_work);
        cancel_delayed_work_sync(&rdev->dfs_update_channels_wk);
+       flush_work(&rdev->destroy_work);
 
 #ifdef CONFIG_PM
        if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup)
@@ -707,7 +758,7 @@ EXPORT_SYMBOL(wiphy_free);
 
 void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool blocked)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
        if (rfkill_set_hw_state(rdev->rfkill, blocked))
                schedule_work(&rdev->rfkill_sync);
@@ -716,7 +767,7 @@ EXPORT_SYMBOL(wiphy_rfkill_set_hw_state);
 
 void cfg80211_unregister_wdev(struct wireless_dev *wdev)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 
        ASSERT_RTNL();
 
@@ -751,23 +802,23 @@ void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev,
                rdev->num_running_monitor_ifaces += num;
 }
 
-void cfg80211_leave(struct cfg80211_registered_device *rdev,
-                   struct wireless_dev *wdev)
+void __cfg80211_leave(struct cfg80211_registered_device *rdev,
+                     struct wireless_dev *wdev)
 {
        struct net_device *dev = wdev->netdev;
 
        ASSERT_RTNL();
+       ASSERT_WDEV_LOCK(wdev);
 
        switch (wdev->iftype) {
        case NL80211_IFTYPE_ADHOC:
-               cfg80211_leave_ibss(rdev, dev, true);
+               __cfg80211_leave_ibss(rdev, dev, true);
                break;
        case NL80211_IFTYPE_P2P_CLIENT:
        case NL80211_IFTYPE_STATION:
                if (rdev->sched_scan_req && dev == rdev->sched_scan_req->dev)
                        __cfg80211_stop_sched_scan(rdev, false);
 
-               wdev_lock(wdev);
 #ifdef CONFIG_CFG80211_WEXT
                kfree(wdev->wext.ie);
                wdev->wext.ie = NULL;
@@ -776,32 +827,60 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev,
 #endif
                cfg80211_disconnect(rdev, dev,
                                    WLAN_REASON_DEAUTH_LEAVING, true);
-               wdev_unlock(wdev);
                break;
        case NL80211_IFTYPE_MESH_POINT:
-               cfg80211_leave_mesh(rdev, dev);
+               __cfg80211_leave_mesh(rdev, dev);
                break;
        case NL80211_IFTYPE_AP:
        case NL80211_IFTYPE_P2P_GO:
-               cfg80211_stop_ap(rdev, dev, true);
+               __cfg80211_stop_ap(rdev, dev, true);
                break;
        default:
                break;
        }
 }
 
+void cfg80211_leave(struct cfg80211_registered_device *rdev,
+                   struct wireless_dev *wdev)
+{
+       wdev_lock(wdev);
+       __cfg80211_leave(rdev, wdev);
+       wdev_unlock(wdev);
+}
+
+void cfg80211_stop_iface(struct wiphy *wiphy, struct wireless_dev *wdev,
+                        gfp_t gfp)
+{
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+       struct cfg80211_event *ev;
+       unsigned long flags;
+
+       trace_cfg80211_stop_iface(wiphy, wdev);
+
+       ev = kzalloc(sizeof(*ev), gfp);
+       if (!ev)
+               return;
+
+       ev->type = EVENT_STOPPED;
+
+       spin_lock_irqsave(&wdev->event_lock, flags);
+       list_add_tail(&ev->list, &wdev->event_list);
+       spin_unlock_irqrestore(&wdev->event_lock, flags);
+       queue_work(cfg80211_wq, &rdev->event_work);
+}
+EXPORT_SYMBOL(cfg80211_stop_iface);
+
 static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
                                         unsigned long state, void *ptr)
 {
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct cfg80211_registered_device *rdev;
-       int ret;
 
        if (!wdev)
                return NOTIFY_DONE;
 
-       rdev = wiphy_to_dev(wdev->wiphy);
+       rdev = wiphy_to_rdev(wdev->wiphy);
 
        WARN_ON(wdev->iftype == NL80211_IFTYPE_UNSPECIFIED);
 
@@ -959,13 +1038,14 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
        case NETDEV_PRE_UP:
                if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
                        return notifier_from_errno(-EOPNOTSUPP);
-               ret = cfg80211_can_add_interface(rdev, wdev->iftype);
-               if (ret)
-                       return notifier_from_errno(ret);
+               if (rfkill_blocked(rdev->rfkill))
+                       return notifier_from_errno(-ERFKILL);
                break;
+       default:
+               return NOTIFY_DONE;
        }
 
-       return NOTIFY_DONE;
+       return NOTIFY_OK;
 }
 
 static struct notifier_block cfg80211_netdev_notifier = {
index 5b1fdcadd46985548f4a04f4f64ddaccbc935661..e9afbf10e756bd3a1ec81a6b6b7aa229d0688be7 100644 (file)
@@ -80,13 +80,17 @@ struct cfg80211_registered_device {
 
        struct cfg80211_coalesce *coalesce;
 
+       spinlock_t destroy_list_lock;
+       struct list_head destroy_list;
+       struct work_struct destroy_work;
+
        /* must be last because of the way we do wiphy_priv(),
         * and it should at least be aligned to NETDEV_ALIGN */
        struct wiphy wiphy __aligned(NETDEV_ALIGN);
 };
 
 static inline
-struct cfg80211_registered_device *wiphy_to_dev(struct wiphy *wiphy)
+struct cfg80211_registered_device *wiphy_to_rdev(struct wiphy *wiphy)
 {
        BUG_ON(!wiphy);
        return container_of(wiphy, struct cfg80211_registered_device, wiphy);
@@ -181,6 +185,7 @@ enum cfg80211_event_type {
        EVENT_ROAMED,
        EVENT_DISCONNECTED,
        EVENT_IBSS_JOINED,
+       EVENT_STOPPED,
 };
 
 struct cfg80211_event {
@@ -232,6 +237,13 @@ struct cfg80211_beacon_registration {
        u32 nlportid;
 };
 
+struct cfg80211_iface_destroy {
+       struct list_head list;
+       u32 nlportid;
+};
+
+void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev);
+
 /* free object */
 void cfg80211_dev_free(struct cfg80211_registered_device *rdev);
 
@@ -240,8 +252,8 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
 
 void ieee80211_set_bitrate_flags(struct wiphy *wiphy);
 
-void cfg80211_bss_expire(struct cfg80211_registered_device *dev);
-void cfg80211_bss_age(struct cfg80211_registered_device *dev,
+void cfg80211_bss_expire(struct cfg80211_registered_device *rdev);
+void cfg80211_bss_age(struct cfg80211_registered_device *rdev,
                       unsigned long age_secs);
 
 /* IBSS */
@@ -270,6 +282,8 @@ int cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
                       struct net_device *dev,
                       struct mesh_setup *setup,
                       const struct mesh_config *conf);
+int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
+                         struct net_device *dev);
 int cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
                        struct net_device *dev);
 int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev,
@@ -277,6 +291,8 @@ int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev,
                              struct cfg80211_chan_def *chandef);
 
 /* AP */
+int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
+                      struct net_device *dev, bool notify);
 int cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
                     struct net_device *dev, bool notify);
 
@@ -401,35 +417,6 @@ unsigned int
 cfg80211_chandef_dfs_cac_time(struct wiphy *wiphy,
                              const struct cfg80211_chan_def *chandef);
 
-static inline int
-cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
-                             struct wireless_dev *wdev,
-                             enum nl80211_iftype iftype)
-{
-       return cfg80211_can_use_iftype_chan(rdev, wdev, iftype, NULL,
-                                           CHAN_MODE_UNDEFINED, 0);
-}
-
-static inline int
-cfg80211_can_add_interface(struct cfg80211_registered_device *rdev,
-                          enum nl80211_iftype iftype)
-{
-       if (rfkill_blocked(rdev->rfkill))
-               return -ERFKILL;
-
-       return cfg80211_can_change_interface(rdev, NULL, iftype);
-}
-
-static inline int
-cfg80211_can_use_chan(struct cfg80211_registered_device *rdev,
-                     struct wireless_dev *wdev,
-                     struct ieee80211_channel *chan,
-                     enum cfg80211_chan_mode chanmode)
-{
-       return cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
-                                           chan, chanmode, 0);
-}
-
 static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
 {
        unsigned long end = jiffies;
@@ -459,6 +446,8 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
 void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev,
                               enum nl80211_iftype iftype, int num);
 
+void __cfg80211_leave(struct cfg80211_registered_device *rdev,
+                     struct wireless_dev *wdev);
 void cfg80211_leave(struct cfg80211_registered_device *rdev,
                    struct wireless_dev *wdev);
 
index e37862f1b1270d8e2056fb6289f01bf69b583720..d4860bfc020e5a1c43758e8cca6e9508908344be 100644 (file)
@@ -43,7 +43,7 @@ static void cfg80211_get_ringparam(struct net_device *dev,
                                   struct ethtool_ringparam *rp)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 
        memset(rp, 0, sizeof(*rp));
 
@@ -56,7 +56,7 @@ static int cfg80211_set_ringparam(struct net_device *dev,
                                  struct ethtool_ringparam *rp)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 
        if (rp->rx_mini_pending != 0 || rp->rx_jumbo_pending != 0)
                return -EINVAL;
@@ -70,7 +70,7 @@ static int cfg80211_set_ringparam(struct net_device *dev,
 static int cfg80211_get_sset_count(struct net_device *dev, int sset)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        if (rdev->ops->get_et_sset_count)
                return rdev_get_et_sset_count(rdev, dev, sset);
        return -EOPNOTSUPP;
@@ -80,7 +80,7 @@ static void cfg80211_get_stats(struct net_device *dev,
                               struct ethtool_stats *stats, u64 *data)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        if (rdev->ops->get_et_stats)
                rdev_get_et_stats(rdev, dev, stats, data);
 }
@@ -88,7 +88,7 @@ static void cfg80211_get_stats(struct net_device *dev,
 static void cfg80211_get_strings(struct net_device *dev, u32 sset, u8 *data)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        if (rdev->ops->get_et_strings)
                rdev_get_et_strings(rdev, dev, sset, data);
 }
index a6b5bdad039c7450f276d1e56994661e2e41952c..8f345da3ea5f4e0767ae6b1de539f18954707148 100644 (file)
@@ -45,7 +45,7 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
 
        cfg80211_upload_connect_keys(wdev);
 
-       nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid,
+       nl80211_send_ibss_bssid(wiphy_to_rdev(wdev->wiphy), dev, bssid,
                                GFP_KERNEL);
 #ifdef CONFIG_CFG80211_WEXT
        memset(&wrqu, 0, sizeof(wrqu));
@@ -58,7 +58,7 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
                          struct ieee80211_channel *channel, gfp_t gfp)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct cfg80211_event *ev;
        unsigned long flags;
 
@@ -88,8 +88,6 @@ static int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
                                struct cfg80211_cached_keys *connkeys)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct ieee80211_channel *check_chan;
-       u8 radar_detect_width = 0;
        int err;
 
        ASSERT_WDEV_LOCK(wdev);
@@ -126,28 +124,6 @@ static int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
 #ifdef CONFIG_CFG80211_WEXT
        wdev->wext.ibss.chandef = params->chandef;
 #endif
-       check_chan = params->chandef.chan;
-       if (params->userspace_handles_dfs) {
-               /* Check for radar even if the current channel is not
-                * a radar channel - it might decide to change to DFS
-                * channel later.
-                */
-               radar_detect_width = BIT(params->chandef.width);
-       }
-
-       err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
-                                          check_chan,
-                                          (params->channel_fixed &&
-                                           !radar_detect_width)
-                                          ? CHAN_MODE_SHARED
-                                          : CHAN_MODE_EXCLUSIVE,
-                                          radar_detect_width);
-
-       if (err) {
-               wdev->connect_keys = NULL;
-               return err;
-       }
-
        err = rdev_join_ibss(rdev, dev, params);
        if (err) {
                wdev->connect_keys = NULL;
@@ -180,7 +156,7 @@ int cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
 static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        int i;
 
        ASSERT_WDEV_LOCK(wdev);
@@ -335,7 +311,7 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
                               struct iw_freq *wextfreq, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct ieee80211_channel *chan = NULL;
        int err, freq;
 
@@ -346,7 +322,7 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
        if (!rdev->ops->join_ibss)
                return -EOPNOTSUPP;
 
-       freq = cfg80211_wext_freq(wdev->wiphy, wextfreq);
+       freq = cfg80211_wext_freq(wextfreq);
        if (freq < 0)
                return freq;
 
@@ -420,7 +396,7 @@ int cfg80211_ibss_wext_siwessid(struct net_device *dev,
                                struct iw_point *data, char *ssid)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        size_t len = data->length;
        int err;
 
@@ -444,8 +420,8 @@ int cfg80211_ibss_wext_siwessid(struct net_device *dev,
        if (len > 0 && ssid[len - 1] == '\0')
                len--;
 
+       memcpy(wdev->ssid, ssid, len);
        wdev->wext.ibss.ssid = wdev->ssid;
-       memcpy(wdev->wext.ibss.ssid, ssid, len);
        wdev->wext.ibss.ssid_len = len;
 
        wdev_lock(wdev);
@@ -487,7 +463,7 @@ int cfg80211_ibss_wext_siwap(struct net_device *dev,
                             struct sockaddr *ap_addr, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        u8 *bssid = ap_addr->sa_data;
        int err;
 
@@ -505,6 +481,9 @@ int cfg80211_ibss_wext_siwap(struct net_device *dev,
        if (is_zero_ether_addr(bssid) || is_broadcast_ether_addr(bssid))
                bssid = NULL;
 
+       if (bssid && !is_valid_ether_addr(bssid))
+               return -EINVAL;
+
        /* both automatic */
        if (!bssid && !wdev->wext.ibss.bssid)
                return 0;
index 5af5cc6b2c4c2406475a3063a69eef80cc14691f..092300b30c372ddc03115638a83bc65ce7a49997 100644 (file)
@@ -99,7 +99,6 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
                         const struct mesh_config *conf)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       u8 radar_detect_width = 0;
        int err;
 
        BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN != IEEE80211_MAX_MESH_ID_LEN);
@@ -175,22 +174,10 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
                                                               scan_width);
        }
 
-       if (!cfg80211_reg_can_beacon(&rdev->wiphy, &setup->chandef))
+       if (!cfg80211_reg_can_beacon(&rdev->wiphy, &setup->chandef,
+                                    NL80211_IFTYPE_MESH_POINT))
                return -EINVAL;
 
-       err = cfg80211_chandef_dfs_required(wdev->wiphy, &setup->chandef);
-       if (err < 0)
-               return err;
-       if (err)
-               radar_detect_width = BIT(setup->chandef.width);
-
-       err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
-                                          setup->chandef.chan,
-                                          CHAN_MODE_SHARED,
-                                          radar_detect_width);
-       if (err)
-               return err;
-
        err = rdev_join_mesh(rdev, dev, conf, setup);
        if (!err) {
                memcpy(wdev->ssid, setup->mesh_id, setup->mesh_id_len);
@@ -236,17 +223,6 @@ int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev,
                if (!netif_running(wdev->netdev))
                        return -ENETDOWN;
 
-               /* cfg80211_can_use_chan() calls
-                * cfg80211_can_use_iftype_chan() with no radar
-                * detection, so if we're trying to use a radar
-                * channel here, something is wrong.
-                */
-               WARN_ON_ONCE(chandef->chan->flags & IEEE80211_CHAN_RADAR);
-               err = cfg80211_can_use_chan(rdev, wdev, chandef->chan,
-                                           CHAN_MODE_SHARED);
-               if (err)
-                       return err;
-
                err = rdev_libertas_set_mesh_channel(rdev, wdev->netdev,
                                                     chandef->chan);
                if (!err)
@@ -262,8 +238,8 @@ int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev,
        return 0;
 }
 
-static int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
-                                struct net_device *dev)
+int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
+                         struct net_device *dev)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        int err;
index c52ff59a3e96d7cabb892bff220b86c580069a43..266766b8d80b61455565cc43779a6e229ed7710d 100644 (file)
@@ -23,7 +23,7 @@ void cfg80211_rx_assoc_resp(struct net_device *dev, struct cfg80211_bss *bss,
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
        u8 *ie = mgmt->u.assoc_resp.variable;
        int ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable);
@@ -54,7 +54,7 @@ EXPORT_SYMBOL(cfg80211_rx_assoc_resp);
 static void cfg80211_process_auth(struct wireless_dev *wdev,
                                  const u8 *buf, size_t len)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 
        nl80211_send_rx_auth(rdev, wdev->netdev, buf, len, GFP_KERNEL);
        cfg80211_sme_rx_auth(wdev, buf, len);
@@ -63,7 +63,7 @@ static void cfg80211_process_auth(struct wireless_dev *wdev,
 static void cfg80211_process_deauth(struct wireless_dev *wdev,
                                    const u8 *buf, size_t len)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
        const u8 *bssid = mgmt->bssid;
        u16 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
@@ -82,7 +82,7 @@ static void cfg80211_process_deauth(struct wireless_dev *wdev,
 static void cfg80211_process_disassoc(struct wireless_dev *wdev,
                                      const u8 *buf, size_t len)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
        const u8 *bssid = mgmt->bssid;
        u16 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
@@ -123,7 +123,7 @@ void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
        trace_cfg80211_send_auth_timeout(dev, addr);
 
@@ -136,7 +136,7 @@ void cfg80211_assoc_timeout(struct net_device *dev, struct cfg80211_bss *bss)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
        trace_cfg80211_send_assoc_timeout(dev, bss->bssid);
 
@@ -172,7 +172,7 @@ void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr,
                                  const u8 *tsc, gfp_t gfp)
 {
        struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 #ifdef CONFIG_CFG80211_WEXT
        union iwreq_data wrqu;
        char *buf = kmalloc(128, gfp);
@@ -233,14 +233,8 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
        if (!req.bss)
                return -ENOENT;
 
-       err = cfg80211_can_use_chan(rdev, wdev, req.bss->channel,
-                                   CHAN_MODE_SHARED);
-       if (err)
-               goto out;
-
        err = rdev_auth(rdev, dev, &req);
 
-out:
        cfg80211_put_bss(&rdev->wiphy, req.bss);
        return err;
 }
@@ -306,16 +300,10 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
        if (!req->bss)
                return -ENOENT;
 
-       err = cfg80211_can_use_chan(rdev, wdev, chan, CHAN_MODE_SHARED);
-       if (err)
-               goto out;
-
        err = rdev_assoc(rdev, dev, req);
        if (!err)
                cfg80211_hold_bss(bss_from_pub(req->bss));
-
-out:
-       if (err)
+       else
                cfg80211_put_bss(&rdev->wiphy, req->bss);
 
        return err;
@@ -414,7 +402,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
                                int match_len)
 {
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct cfg80211_mgmt_registration *reg, *nreg;
        int err = 0;
        u16 mgmt_type;
@@ -473,7 +461,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
 void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlportid)
 {
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct cfg80211_mgmt_registration *reg, *tmp;
 
        spin_lock_bh(&wdev->mgmt_registrations_lock);
@@ -620,7 +608,7 @@ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
                      const u8 *buf, size_t len, u32 flags, gfp_t gfp)
 {
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct cfg80211_mgmt_registration *reg;
        const struct ieee80211_txrx_stypes *stypes =
                &wiphy->mgmt_stypes[wdev->iftype];
@@ -739,7 +727,7 @@ void cfg80211_radar_event(struct wiphy *wiphy,
                          struct cfg80211_chan_def *chandef,
                          gfp_t gfp)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        unsigned long timeout;
 
        trace_cfg80211_radar_event(wiphy, chandef);
@@ -764,7 +752,7 @@ void cfg80211_cac_event(struct net_device *netdev,
 {
        struct wireless_dev *wdev = netdev->ieee80211_ptr;
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        unsigned long timeout;
 
        trace_cfg80211_cac_event(netdev, event);
index 052c1bf8ffaceb92d3f117231a46fca78ed30216..62bdb1adaa4d70363c6b082cb2a3ad5b273d9179 100644 (file)
@@ -168,8 +168,8 @@ __cfg80211_rdev_from_attrs(struct net *netns, struct nlattr **attrs)
                netdev = __dev_get_by_index(netns, ifindex);
                if (netdev) {
                        if (netdev->ieee80211_ptr)
-                               tmp = wiphy_to_dev(
-                                               netdev->ieee80211_ptr->wiphy);
+                               tmp = wiphy_to_rdev(
+                                       netdev->ieee80211_ptr->wiphy);
                        else
                                tmp = NULL;
 
@@ -371,8 +371,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
        [NL80211_ATTR_CH_SWITCH_COUNT] = { .type = NLA_U32 },
        [NL80211_ATTR_CH_SWITCH_BLOCK_TX] = { .type = NLA_FLAG },
        [NL80211_ATTR_CSA_IES] = { .type = NLA_NESTED },
-       [NL80211_ATTR_CSA_C_OFF_BEACON] = { .type = NLA_U16 },
-       [NL80211_ATTR_CSA_C_OFF_PRESP] = { .type = NLA_U16 },
+       [NL80211_ATTR_CSA_C_OFF_BEACON] = { .type = NLA_BINARY },
+       [NL80211_ATTR_CSA_C_OFF_PRESP] = { .type = NLA_BINARY },
        [NL80211_ATTR_STA_SUPPORTED_CHANNELS] = { .type = NLA_BINARY },
        [NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES] = { .type = NLA_BINARY },
        [NL80211_ATTR_HANDLE_DFS] = { .type = NLA_FLAG },
@@ -385,6 +385,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
        [NL80211_ATTR_MAC_HINT] = { .len = ETH_ALEN },
        [NL80211_ATTR_WIPHY_FREQ_HINT] = { .type = NLA_U32 },
        [NL80211_ATTR_TDLS_PEER_CAPABILITY] = { .type = NLA_U32 },
+       [NL80211_ATTR_IFACE_SOCKET_OWNER] = { .type = NLA_FLAG },
+       [NL80211_ATTR_CSA_C_OFFSETS_TX] = { .type = NLA_BINARY },
 };
 
 /* policy for the key attributes */
@@ -484,7 +486,7 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
                        err = PTR_ERR(*wdev);
                        goto out_unlock;
                }
-               *rdev = wiphy_to_dev((*wdev)->wiphy);
+               *rdev = wiphy_to_rdev((*wdev)->wiphy);
                /* 0 is the first index - add 1 to parse only once */
                cb->args[0] = (*rdev)->wiphy_idx + 1;
                cb->args[1] = (*wdev)->identifier;
@@ -497,7 +499,7 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
                        err = -ENODEV;
                        goto out_unlock;
                }
-               *rdev = wiphy_to_dev(wiphy);
+               *rdev = wiphy_to_rdev(wiphy);
                *wdev = NULL;
 
                list_for_each_entry(tmp, &(*rdev)->wdev_list, list) {
@@ -566,6 +568,13 @@ static int nl80211_msg_put_channel(struct sk_buff *msg,
                                   struct ieee80211_channel *chan,
                                   bool large)
 {
+       /* Some channels must be completely excluded from the
+        * list to protect old user-space tools from breaking
+        */
+       if (!large && chan->flags &
+           (IEEE80211_CHAN_NO_10MHZ | IEEE80211_CHAN_NO_20MHZ))
+               return 0;
+
        if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_FREQ,
                        chan->center_freq))
                goto nla_put_failure;
@@ -613,6 +622,18 @@ static int nl80211_msg_put_channel(struct sk_buff *msg,
                if ((chan->flags & IEEE80211_CHAN_NO_160MHZ) &&
                    nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_160MHZ))
                        goto nla_put_failure;
+               if ((chan->flags & IEEE80211_CHAN_INDOOR_ONLY) &&
+                   nla_put_flag(msg, NL80211_FREQUENCY_ATTR_INDOOR_ONLY))
+                       goto nla_put_failure;
+               if ((chan->flags & IEEE80211_CHAN_GO_CONCURRENT) &&
+                   nla_put_flag(msg, NL80211_FREQUENCY_ATTR_GO_CONCURRENT))
+                       goto nla_put_failure;
+               if ((chan->flags & IEEE80211_CHAN_NO_20MHZ) &&
+                   nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_20MHZ))
+                       goto nla_put_failure;
+               if ((chan->flags & IEEE80211_CHAN_NO_10MHZ) &&
+                   nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_10MHZ))
+                       goto nla_put_failure;
        }
 
        if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
@@ -950,8 +971,10 @@ static int nl80211_put_iface_combinations(struct wiphy *wiphy,
                                c->max_interfaces))
                        goto nla_put_failure;
                if (large &&
-                   nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS,
-                               c->radar_detect_widths))
+                   (nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS,
+                               c->radar_detect_widths) ||
+                    nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_REGIONS,
+                               c->radar_detect_regions)))
                        goto nla_put_failure;
 
                nla_nest_end(msg, nl_combi);
@@ -1006,42 +1029,42 @@ static int nl80211_send_wowlan_tcp_caps(struct cfg80211_registered_device *rdev,
 }
 
 static int nl80211_send_wowlan(struct sk_buff *msg,
-                              struct cfg80211_registered_device *dev,
+                              struct cfg80211_registered_device *rdev,
                               bool large)
 {
        struct nlattr *nl_wowlan;
 
-       if (!dev->wiphy.wowlan)
+       if (!rdev->wiphy.wowlan)
                return 0;
 
        nl_wowlan = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED);
        if (!nl_wowlan)
                return -ENOBUFS;
 
-       if (((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_ANY) &&
+       if (((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_ANY) &&
             nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) ||
-           ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_DISCONNECT) &&
+           ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_DISCONNECT) &&
             nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) ||
-           ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_MAGIC_PKT) &&
+           ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_MAGIC_PKT) &&
             nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) ||
-           ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) &&
+           ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) &&
             nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED)) ||
-           ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) &&
+           ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) &&
             nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) ||
-           ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) &&
+           ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) &&
             nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) ||
-           ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) &&
+           ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) &&
             nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) ||
-           ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_RFKILL_RELEASE) &&
+           ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_RFKILL_RELEASE) &&
             nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE)))
                return -ENOBUFS;
 
-       if (dev->wiphy.wowlan->n_patterns) {
+       if (rdev->wiphy.wowlan->n_patterns) {
                struct nl80211_pattern_support pat = {
-                       .max_patterns = dev->wiphy.wowlan->n_patterns,
-                       .min_pattern_len = dev->wiphy.wowlan->pattern_min_len,
-                       .max_pattern_len = dev->wiphy.wowlan->pattern_max_len,
-                       .max_pkt_offset = dev->wiphy.wowlan->max_pkt_offset,
+                       .max_patterns = rdev->wiphy.wowlan->n_patterns,
+                       .min_pattern_len = rdev->wiphy.wowlan->pattern_min_len,
+                       .max_pattern_len = rdev->wiphy.wowlan->pattern_max_len,
+                       .max_pkt_offset = rdev->wiphy.wowlan->max_pkt_offset,
                };
 
                if (nla_put(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN,
@@ -1049,7 +1072,7 @@ static int nl80211_send_wowlan(struct sk_buff *msg,
                        return -ENOBUFS;
        }
 
-       if (large && nl80211_send_wowlan_tcp_caps(dev, msg))
+       if (large && nl80211_send_wowlan_tcp_caps(rdev, msg))
                return -ENOBUFS;
 
        nla_nest_end(msg, nl_wowlan);
@@ -1059,19 +1082,19 @@ static int nl80211_send_wowlan(struct sk_buff *msg,
 #endif
 
 static int nl80211_send_coalesce(struct sk_buff *msg,
-                                struct cfg80211_registered_device *dev)
+                                struct cfg80211_registered_device *rdev)
 {
        struct nl80211_coalesce_rule_support rule;
 
-       if (!dev->wiphy.coalesce)
+       if (!rdev->wiphy.coalesce)
                return 0;
 
-       rule.max_rules = dev->wiphy.coalesce->n_rules;
-       rule.max_delay = dev->wiphy.coalesce->max_delay;
-       rule.pat.max_patterns = dev->wiphy.coalesce->n_patterns;
-       rule.pat.min_pattern_len = dev->wiphy.coalesce->pattern_min_len;
-       rule.pat.max_pattern_len = dev->wiphy.coalesce->pattern_max_len;
-       rule.pat.max_pkt_offset = dev->wiphy.coalesce->max_pkt_offset;
+       rule.max_rules = rdev->wiphy.coalesce->n_rules;
+       rule.max_delay = rdev->wiphy.coalesce->max_delay;
+       rule.pat.max_patterns = rdev->wiphy.coalesce->n_patterns;
+       rule.pat.min_pattern_len = rdev->wiphy.coalesce->pattern_min_len;
+       rule.pat.max_pattern_len = rdev->wiphy.coalesce->pattern_max_len;
+       rule.pat.max_pkt_offset = rdev->wiphy.coalesce->max_pkt_offset;
 
        if (nla_put(msg, NL80211_ATTR_COALESCE_RULE, sizeof(rule), &rule))
                return -ENOBUFS;
@@ -1202,7 +1225,7 @@ struct nl80211_dump_wiphy_state {
        bool split;
 };
 
-static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
+static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
                              struct sk_buff *msg, u32 portid, u32 seq,
                              int flags, struct nl80211_dump_wiphy_state *state)
 {
@@ -1214,7 +1237,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
        struct ieee80211_channel *chan;
        int i;
        const struct ieee80211_txrx_stypes *mgmt_stypes =
-                               dev->wiphy.mgmt_stypes;
+                               rdev->wiphy.mgmt_stypes;
        u32 features;
 
        hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_WIPHY);
@@ -1224,9 +1247,9 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
        if (WARN_ON(!state))
                return -EINVAL;
 
-       if (nla_put_u32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx) ||
+       if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
            nla_put_string(msg, NL80211_ATTR_WIPHY_NAME,
-                          wiphy_name(&dev->wiphy)) ||
+                          wiphy_name(&rdev->wiphy)) ||
            nla_put_u32(msg, NL80211_ATTR_GENERATION,
                        cfg80211_rdev_list_generation))
                goto nla_put_failure;
@@ -1234,43 +1257,43 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
        switch (state->split_start) {
        case 0:
                if (nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT,
-                              dev->wiphy.retry_short) ||
+                              rdev->wiphy.retry_short) ||
                    nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_LONG,
-                              dev->wiphy.retry_long) ||
+                              rdev->wiphy.retry_long) ||
                    nla_put_u32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD,
-                               dev->wiphy.frag_threshold) ||
+                               rdev->wiphy.frag_threshold) ||
                    nla_put_u32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD,
-                               dev->wiphy.rts_threshold) ||
+                               rdev->wiphy.rts_threshold) ||
                    nla_put_u8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS,
-                              dev->wiphy.coverage_class) ||
+                              rdev->wiphy.coverage_class) ||
                    nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS,
-                              dev->wiphy.max_scan_ssids) ||
+                              rdev->wiphy.max_scan_ssids) ||
                    nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS,
-                              dev->wiphy.max_sched_scan_ssids) ||
+                              rdev->wiphy.max_sched_scan_ssids) ||
                    nla_put_u16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN,
-                               dev->wiphy.max_scan_ie_len) ||
+                               rdev->wiphy.max_scan_ie_len) ||
                    nla_put_u16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN,
-                               dev->wiphy.max_sched_scan_ie_len) ||
+                               rdev->wiphy.max_sched_scan_ie_len) ||
                    nla_put_u8(msg, NL80211_ATTR_MAX_MATCH_SETS,
-                              dev->wiphy.max_match_sets))
+                              rdev->wiphy.max_match_sets))
                        goto nla_put_failure;
 
-               if ((dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) &&
+               if ((rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) &&
                    nla_put_flag(msg, NL80211_ATTR_SUPPORT_IBSS_RSN))
                        goto nla_put_failure;
-               if ((dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) &&
+               if ((rdev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) &&
                    nla_put_flag(msg, NL80211_ATTR_SUPPORT_MESH_AUTH))
                        goto nla_put_failure;
-               if ((dev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) &&
+               if ((rdev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) &&
                    nla_put_flag(msg, NL80211_ATTR_SUPPORT_AP_UAPSD))
                        goto nla_put_failure;
-               if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM) &&
+               if ((rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM) &&
                    nla_put_flag(msg, NL80211_ATTR_ROAM_SUPPORT))
                        goto nla_put_failure;
-               if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
+               if ((rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
                    nla_put_flag(msg, NL80211_ATTR_TDLS_SUPPORT))
                        goto nla_put_failure;
-               if ((dev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP) &&
+               if ((rdev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP) &&
                    nla_put_flag(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP))
                        goto nla_put_failure;
                state->split_start++;
@@ -1278,35 +1301,35 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                        break;
        case 1:
                if (nla_put(msg, NL80211_ATTR_CIPHER_SUITES,
-                           sizeof(u32) * dev->wiphy.n_cipher_suites,
-                           dev->wiphy.cipher_suites))
+                           sizeof(u32) * rdev->wiphy.n_cipher_suites,
+                           rdev->wiphy.cipher_suites))
                        goto nla_put_failure;
 
                if (nla_put_u8(msg, NL80211_ATTR_MAX_NUM_PMKIDS,
-                              dev->wiphy.max_num_pmkids))
+                              rdev->wiphy.max_num_pmkids))
                        goto nla_put_failure;
 
-               if ((dev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) &&
+               if ((rdev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) &&
                    nla_put_flag(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE))
                        goto nla_put_failure;
 
                if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX,
-                               dev->wiphy.available_antennas_tx) ||
+                               rdev->wiphy.available_antennas_tx) ||
                    nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX,
-                               dev->wiphy.available_antennas_rx))
+                               rdev->wiphy.available_antennas_rx))
                        goto nla_put_failure;
 
-               if ((dev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD) &&
+               if ((rdev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD) &&
                    nla_put_u32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD,
-                               dev->wiphy.probe_resp_offload))
+                               rdev->wiphy.probe_resp_offload))
                        goto nla_put_failure;
 
-               if ((dev->wiphy.available_antennas_tx ||
-                    dev->wiphy.available_antennas_rx) &&
-                   dev->ops->get_antenna) {
+               if ((rdev->wiphy.available_antennas_tx ||
+                    rdev->wiphy.available_antennas_rx) &&
+                   rdev->ops->get_antenna) {
                        u32 tx_ant = 0, rx_ant = 0;
                        int res;
-                       res = rdev_get_antenna(dev, &tx_ant, &rx_ant);
+                       res = rdev_get_antenna(rdev, &tx_ant, &rx_ant);
                        if (!res) {
                                if (nla_put_u32(msg,
                                                NL80211_ATTR_WIPHY_ANTENNA_TX,
@@ -1323,7 +1346,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                        break;
        case 2:
                if (nl80211_put_iftypes(msg, NL80211_ATTR_SUPPORTED_IFTYPES,
-                                       dev->wiphy.interface_modes))
+                                       rdev->wiphy.interface_modes))
                                goto nla_put_failure;
                state->split_start++;
                if (state->split)
@@ -1337,7 +1360,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                     band < IEEE80211_NUM_BANDS; band++) {
                        struct ieee80211_supported_band *sband;
 
-                       sband = dev->wiphy.bands[band];
+                       sband = rdev->wiphy.bands[band];
 
                        if (!sband)
                                continue;
@@ -1414,7 +1437,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                i = 0;
 #define CMD(op, n)                                                     \
                 do {                                                   \
-                       if (dev->ops->op) {                             \
+                       if (rdev->ops->op) {                            \
                                i++;                                    \
                                if (nla_put_u32(msg, i, NL80211_CMD_ ## n)) \
                                        goto nla_put_failure;           \
@@ -1438,32 +1461,32 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                CMD(set_pmksa, SET_PMKSA);
                CMD(del_pmksa, DEL_PMKSA);
                CMD(flush_pmksa, FLUSH_PMKSA);
-               if (dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL)
+               if (rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL)
                        CMD(remain_on_channel, REMAIN_ON_CHANNEL);
                CMD(set_bitrate_mask, SET_TX_BITRATE_MASK);
                CMD(mgmt_tx, FRAME);
                CMD(mgmt_tx_cancel_wait, FRAME_WAIT_CANCEL);
-               if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) {
+               if (rdev->wiphy.flags & WIPHY_FLAG_NETNS_OK) {
                        i++;
                        if (nla_put_u32(msg, i, NL80211_CMD_SET_WIPHY_NETNS))
                                goto nla_put_failure;
                }
-               if (dev->ops->set_monitor_channel || dev->ops->start_ap ||
-                   dev->ops->join_mesh) {
+               if (rdev->ops->set_monitor_channel || rdev->ops->start_ap ||
+                   rdev->ops->join_mesh) {
                        i++;
                        if (nla_put_u32(msg, i, NL80211_CMD_SET_CHANNEL))
                                goto nla_put_failure;
                }
                CMD(set_wds_peer, SET_WDS_PEER);
-               if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) {
+               if (rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) {
                        CMD(tdls_mgmt, TDLS_MGMT);
                        CMD(tdls_oper, TDLS_OPER);
                }
-               if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
+               if (rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
                        CMD(sched_scan_start, START_SCHED_SCAN);
                CMD(probe_client, PROBE_CLIENT);
                CMD(set_noack_map, SET_NOACK_MAP);
-               if (dev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS) {
+               if (rdev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS) {
                        i++;
                        if (nla_put_u32(msg, i, NL80211_CMD_REGISTER_BEACONS))
                                goto nla_put_failure;
@@ -1473,7 +1496,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                if (state->split) {
                        CMD(crit_proto_start, CRIT_PROTOCOL_START);
                        CMD(crit_proto_stop, CRIT_PROTOCOL_STOP);
-                       if (dev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)
+                       if (rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)
                                CMD(channel_switch, CHANNEL_SWITCH);
                }
                CMD(set_qos_map, SET_QOS_MAP);
@@ -1484,13 +1507,13 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
 
 #undef CMD
 
-               if (dev->ops->connect || dev->ops->auth) {
+               if (rdev->ops->connect || rdev->ops->auth) {
                        i++;
                        if (nla_put_u32(msg, i, NL80211_CMD_CONNECT))
                                goto nla_put_failure;
                }
 
-               if (dev->ops->disconnect || dev->ops->deauth) {
+               if (rdev->ops->disconnect || rdev->ops->deauth) {
                        i++;
                        if (nla_put_u32(msg, i, NL80211_CMD_DISCONNECT))
                                goto nla_put_failure;
@@ -1501,14 +1524,14 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                if (state->split)
                        break;
        case 5:
-               if (dev->ops->remain_on_channel &&
-                   (dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) &&
+               if (rdev->ops->remain_on_channel &&
+                   (rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) &&
                    nla_put_u32(msg,
                                NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION,
-                               dev->wiphy.max_remain_on_channel_duration))
+                               rdev->wiphy.max_remain_on_channel_duration))
                        goto nla_put_failure;
 
-               if ((dev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX) &&
+               if ((rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX) &&
                    nla_put_flag(msg, NL80211_ATTR_OFFCHANNEL_TX_OK))
                        goto nla_put_failure;
 
@@ -1519,7 +1542,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                        break;
        case 6:
 #ifdef CONFIG_PM
-               if (nl80211_send_wowlan(msg, dev, state->split))
+               if (nl80211_send_wowlan(msg, rdev, state->split))
                        goto nla_put_failure;
                state->split_start++;
                if (state->split)
@@ -1529,10 +1552,10 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
 #endif
        case 7:
                if (nl80211_put_iftypes(msg, NL80211_ATTR_SOFTWARE_IFTYPES,
-                                       dev->wiphy.software_iftypes))
+                                       rdev->wiphy.software_iftypes))
                        goto nla_put_failure;
 
-               if (nl80211_put_iface_combinations(&dev->wiphy, msg,
+               if (nl80211_put_iface_combinations(&rdev->wiphy, msg,
                                                   state->split))
                        goto nla_put_failure;
 
@@ -1540,12 +1563,12 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                if (state->split)
                        break;
        case 8:
-               if ((dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) &&
+               if ((rdev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) &&
                    nla_put_u32(msg, NL80211_ATTR_DEVICE_AP_SME,
-                               dev->wiphy.ap_sme_capa))
+                               rdev->wiphy.ap_sme_capa))
                        goto nla_put_failure;
 
-               features = dev->wiphy.features;
+               features = rdev->wiphy.features;
                /*
                 * We can only add the per-channel limit information if the
                 * dump is split, otherwise it makes it too big. Therefore
@@ -1556,16 +1579,16 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                if (nla_put_u32(msg, NL80211_ATTR_FEATURE_FLAGS, features))
                        goto nla_put_failure;
 
-               if (dev->wiphy.ht_capa_mod_mask &&
+               if (rdev->wiphy.ht_capa_mod_mask &&
                    nla_put(msg, NL80211_ATTR_HT_CAPABILITY_MASK,
-                           sizeof(*dev->wiphy.ht_capa_mod_mask),
-                           dev->wiphy.ht_capa_mod_mask))
+                           sizeof(*rdev->wiphy.ht_capa_mod_mask),
+                           rdev->wiphy.ht_capa_mod_mask))
                        goto nla_put_failure;
 
-               if (dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME &&
-                   dev->wiphy.max_acl_mac_addrs &&
+               if (rdev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME &&
+                   rdev->wiphy.max_acl_mac_addrs &&
                    nla_put_u32(msg, NL80211_ATTR_MAC_ACL_MAX,
-                               dev->wiphy.max_acl_mac_addrs))
+                               rdev->wiphy.max_acl_mac_addrs))
                        goto nla_put_failure;
 
                /*
@@ -1581,41 +1604,41 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                state->split_start++;
                break;
        case 9:
-               if (dev->wiphy.extended_capabilities &&
+               if (rdev->wiphy.extended_capabilities &&
                    (nla_put(msg, NL80211_ATTR_EXT_CAPA,
-                            dev->wiphy.extended_capabilities_len,
-                            dev->wiphy.extended_capabilities) ||
+                            rdev->wiphy.extended_capabilities_len,
+                            rdev->wiphy.extended_capabilities) ||
                     nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK,
-                            dev->wiphy.extended_capabilities_len,
-                            dev->wiphy.extended_capabilities_mask)))
+                            rdev->wiphy.extended_capabilities_len,
+                            rdev->wiphy.extended_capabilities_mask)))
                        goto nla_put_failure;
 
-               if (dev->wiphy.vht_capa_mod_mask &&
+               if (rdev->wiphy.vht_capa_mod_mask &&
                    nla_put(msg, NL80211_ATTR_VHT_CAPABILITY_MASK,
-                           sizeof(*dev->wiphy.vht_capa_mod_mask),
-                           dev->wiphy.vht_capa_mod_mask))
+                           sizeof(*rdev->wiphy.vht_capa_mod_mask),
+                           rdev->wiphy.vht_capa_mod_mask))
                        goto nla_put_failure;
 
                state->split_start++;
                break;
        case 10:
-               if (nl80211_send_coalesce(msg, dev))
+               if (nl80211_send_coalesce(msg, rdev))
                        goto nla_put_failure;
 
-               if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_5_10_MHZ) &&
+               if ((rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_5_10_MHZ) &&
                    (nla_put_flag(msg, NL80211_ATTR_SUPPORT_5_MHZ) ||
                     nla_put_flag(msg, NL80211_ATTR_SUPPORT_10_MHZ)))
                        goto nla_put_failure;
 
-               if (dev->wiphy.max_ap_assoc_sta &&
+               if (rdev->wiphy.max_ap_assoc_sta &&
                    nla_put_u32(msg, NL80211_ATTR_MAX_AP_ASSOC_STA,
-                               dev->wiphy.max_ap_assoc_sta))
+                               rdev->wiphy.max_ap_assoc_sta))
                        goto nla_put_failure;
 
                state->split_start++;
                break;
        case 11:
-               if (dev->wiphy.n_vendor_commands) {
+               if (rdev->wiphy.n_vendor_commands) {
                        const struct nl80211_vendor_cmd_info *info;
                        struct nlattr *nested;
 
@@ -1623,15 +1646,15 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                        if (!nested)
                                goto nla_put_failure;
 
-                       for (i = 0; i < dev->wiphy.n_vendor_commands; i++) {
-                               info = &dev->wiphy.vendor_commands[i].info;
+                       for (i = 0; i < rdev->wiphy.n_vendor_commands; i++) {
+                               info = &rdev->wiphy.vendor_commands[i].info;
                                if (nla_put(msg, i + 1, sizeof(*info), info))
                                        goto nla_put_failure;
                        }
                        nla_nest_end(msg, nested);
                }
 
-               if (dev->wiphy.n_vendor_events) {
+               if (rdev->wiphy.n_vendor_events) {
                        const struct nl80211_vendor_cmd_info *info;
                        struct nlattr *nested;
 
@@ -1640,13 +1663,20 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                        if (!nested)
                                goto nla_put_failure;
 
-                       for (i = 0; i < dev->wiphy.n_vendor_events; i++) {
-                               info = &dev->wiphy.vendor_events[i];
+                       for (i = 0; i < rdev->wiphy.n_vendor_events; i++) {
+                               info = &rdev->wiphy.vendor_events[i];
                                if (nla_put(msg, i + 1, sizeof(*info), info))
                                        goto nla_put_failure;
                        }
                        nla_nest_end(msg, nested);
                }
+               state->split_start++;
+               break;
+       case 12:
+               if (rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH &&
+                   nla_put_u8(msg, NL80211_ATTR_MAX_CSA_COUNTERS,
+                              rdev->wiphy.max_num_csa_counters))
+                       goto nla_put_failure;
 
                /* done */
                state->split_start = 0;
@@ -1684,7 +1714,7 @@ static int nl80211_dump_wiphy_parse(struct sk_buff *skb,
                if (!netdev)
                        return -ENODEV;
                if (netdev->ieee80211_ptr) {
-                       rdev = wiphy_to_dev(
+                       rdev = wiphy_to_rdev(
                                netdev->ieee80211_ptr->wiphy);
                        state->filter_wiphy = rdev->wiphy_idx;
                }
@@ -1697,7 +1727,7 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
 {
        int idx = 0, ret;
        struct nl80211_dump_wiphy_state *state = (void *)cb->args[0];
-       struct cfg80211_registered_device *dev;
+       struct cfg80211_registered_device *rdev;
 
        rtnl_lock();
        if (!state) {
@@ -1716,17 +1746,17 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
                cb->args[0] = (long)state;
        }
 
-       list_for_each_entry(dev, &cfg80211_rdev_list, list) {
-               if (!net_eq(wiphy_net(&dev->wiphy), sock_net(skb->sk)))
+       list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+               if (!net_eq(wiphy_net(&rdev->wiphy), sock_net(skb->sk)))
                        continue;
                if (++idx <= state->start)
                        continue;
                if (state->filter_wiphy != -1 &&
-                   state->filter_wiphy != dev->wiphy_idx)
+                   state->filter_wiphy != rdev->wiphy_idx)
                        continue;
                /* attempt to fit multiple wiphy data chunks into the skb */
                do {
-                       ret = nl80211_send_wiphy(dev, skb,
+                       ret = nl80211_send_wiphy(rdev, skb,
                                                 NETLINK_CB(cb->skb).portid,
                                                 cb->nlh->nlmsg_seq,
                                                 NLM_F_MULTI, state);
@@ -1774,14 +1804,14 @@ static int nl80211_dump_wiphy_done(struct netlink_callback *cb)
 static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info)
 {
        struct sk_buff *msg;
-       struct cfg80211_registered_device *dev = info->user_ptr[0];
+       struct cfg80211_registered_device *rdev = info->user_ptr[0];
        struct nl80211_dump_wiphy_state state = {};
 
        msg = nlmsg_new(4096, GFP_KERNEL);
        if (!msg)
                return -ENOMEM;
 
-       if (nl80211_send_wiphy(dev, msg, info->snd_portid, info->snd_seq, 0,
+       if (nl80211_send_wiphy(rdev, msg, info->snd_portid, info->snd_seq, 0,
                               &state) < 0) {
                nlmsg_free(msg);
                return -ENOBUFS;
@@ -1908,18 +1938,20 @@ static int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
 }
 
 static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
-                                struct wireless_dev *wdev,
+                                struct net_device *dev,
                                 struct genl_info *info)
 {
        struct cfg80211_chan_def chandef;
        int result;
        enum nl80211_iftype iftype = NL80211_IFTYPE_MONITOR;
+       struct wireless_dev *wdev = NULL;
 
-       if (wdev)
-               iftype = wdev->iftype;
-
+       if (dev)
+               wdev = dev->ieee80211_ptr;
        if (!nl80211_can_set_dev_channel(wdev))
                return -EOPNOTSUPP;
+       if (wdev)
+               iftype = wdev->iftype;
 
        result = nl80211_parse_chandef(rdev, info, &chandef);
        if (result)
@@ -1928,14 +1960,27 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
        switch (iftype) {
        case NL80211_IFTYPE_AP:
        case NL80211_IFTYPE_P2P_GO:
-               if (wdev->beacon_interval) {
-                       result = -EBUSY;
-                       break;
-               }
-               if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef)) {
+               if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef, iftype)) {
                        result = -EINVAL;
                        break;
                }
+               if (wdev->beacon_interval) {
+                       if (!dev || !rdev->ops->set_ap_chanwidth ||
+                           !(rdev->wiphy.features &
+                             NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE)) {
+                               result = -EBUSY;
+                               break;
+                       }
+
+                       /* Only allow dynamic channel width changes */
+                       if (chandef.chan != wdev->preset_chandef.chan) {
+                               result = -EBUSY;
+                               break;
+                       }
+                       result = rdev_set_ap_chanwidth(rdev, dev, &chandef);
+                       if (result)
+                               break;
+               }
                wdev->preset_chandef = chandef;
                result = 0;
                break;
@@ -1957,7 +2002,7 @@ static int nl80211_set_channel(struct sk_buff *skb, struct genl_info *info)
        struct cfg80211_registered_device *rdev = info->user_ptr[0];
        struct net_device *netdev = info->user_ptr[1];
 
-       return __nl80211_set_channel(rdev, netdev->ieee80211_ptr, info);
+       return __nl80211_set_channel(rdev, netdev, info);
 }
 
 static int nl80211_set_wds_peer(struct sk_buff *skb, struct genl_info *info)
@@ -2013,7 +2058,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
 
                netdev = __dev_get_by_index(genl_info_net(info), ifindex);
                if (netdev && netdev->ieee80211_ptr)
-                       rdev = wiphy_to_dev(netdev->ieee80211_ptr->wiphy);
+                       rdev = wiphy_to_rdev(netdev->ieee80211_ptr->wiphy);
                else
                        netdev = NULL;
        }
@@ -2079,9 +2124,10 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
        }
 
        if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
-               result = __nl80211_set_channel(rdev,
-                               nl80211_can_set_dev_channel(wdev) ? wdev : NULL,
-                               info);
+               result = __nl80211_set_channel(
+                       rdev,
+                       nl80211_can_set_dev_channel(wdev) ? netdev : NULL,
+                       info);
                if (result)
                        return result;
        }
@@ -2229,7 +2275,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
 static inline u64 wdev_id(struct wireless_dev *wdev)
 {
        return (u64)wdev->identifier |
-              ((u64)wiphy_to_dev(wdev->wiphy)->wiphy_idx << 32);
+              ((u64)wiphy_to_rdev(wdev->wiphy)->wiphy_idx << 32);
 }
 
 static int nl80211_send_chandef(struct sk_buff *msg,
@@ -2355,7 +2401,7 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
 static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
 {
        struct sk_buff *msg;
-       struct cfg80211_registered_device *dev = info->user_ptr[0];
+       struct cfg80211_registered_device *rdev = info->user_ptr[0];
        struct wireless_dev *wdev = info->user_ptr[1];
 
        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
@@ -2363,7 +2409,7 @@ static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
                return -ENOMEM;
 
        if (nl80211_send_iface(msg, info->snd_portid, info->snd_seq, 0,
-                              dev, wdev) < 0) {
+                              rdev, wdev) < 0) {
                nlmsg_free(msg);
                return -ENOBUFS;
        }
@@ -2514,6 +2560,9 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
        enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED;
        u32 flags;
 
+       /* to avoid failing a new interface creation due to pending removal */
+       cfg80211_destroy_ifaces(rdev);
+
        memset(&params, 0, sizeof(params));
 
        if (!info->attrs[NL80211_ATTR_IFNAME])
@@ -2563,6 +2612,9 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
                return PTR_ERR(wdev);
        }
 
+       if (info->attrs[NL80211_ATTR_IFACE_SOCKET_OWNER])
+               wdev->owner_nlportid = info->snd_portid;
+
        switch (type) {
        case NL80211_IFTYPE_MESH_POINT:
                if (!info->attrs[NL80211_ATTR_MESH_ID])
@@ -3142,7 +3194,6 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct cfg80211_ap_settings params;
        int err;
-       u8 radar_detect_width = 0;
 
        if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
            dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
@@ -3258,24 +3309,10 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
        } else if (!nl80211_get_ap_channel(rdev, &params))
                return -EINVAL;
 
-       if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef))
+       if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef,
+                                    wdev->iftype))
                return -EINVAL;
 
-       err = cfg80211_chandef_dfs_required(wdev->wiphy, &params.chandef);
-       if (err < 0)
-               return err;
-       if (err) {
-               radar_detect_width = BIT(params.chandef.width);
-               params.radar_required = true;
-       }
-
-       err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
-                                          params.chandef.chan,
-                                          CHAN_MODE_SHARED,
-                                          radar_detect_width);
-       if (err)
-               return err;
-
        if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
                params.acl = parse_acl_data(&rdev->wiphy, info);
                if (IS_ERR(params.acl))
@@ -3613,6 +3650,10 @@ static int nl80211_send_station(struct sk_buff *msg, u32 portid, u32 seq,
            nla_put_u32(msg, NL80211_STA_INFO_TX_FAILED,
                        sinfo->tx_failed))
                goto nla_put_failure;
+       if ((sinfo->filled & STATION_INFO_EXPECTED_THROUGHPUT) &&
+           nla_put_u32(msg, NL80211_STA_INFO_EXPECTED_THROUGHPUT,
+                       sinfo->expected_throughput))
+               goto nla_put_failure;
        if ((sinfo->filled & STATION_INFO_BEACON_LOSS_COUNT) &&
            nla_put_u32(msg, NL80211_STA_INFO_BEACON_LOSS,
                        sinfo->beacon_loss_count))
@@ -3675,13 +3716,13 @@ static int nl80211_dump_station(struct sk_buff *skb,
                                struct netlink_callback *cb)
 {
        struct station_info sinfo;
-       struct cfg80211_registered_device *dev;
+       struct cfg80211_registered_device *rdev;
        struct wireless_dev *wdev;
        u8 mac_addr[ETH_ALEN];
        int sta_idx = cb->args[2];
        int err;
 
-       err = nl80211_prepare_wdev_dump(skb, cb, &dev, &wdev);
+       err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
        if (err)
                return err;
 
@@ -3690,14 +3731,14 @@ static int nl80211_dump_station(struct sk_buff *skb,
                goto out_err;
        }
 
-       if (!dev->ops->dump_station) {
+       if (!rdev->ops->dump_station) {
                err = -EOPNOTSUPP;
                goto out_err;
        }
 
        while (1) {
                memset(&sinfo, 0, sizeof(sinfo));
-               err = rdev_dump_station(dev, wdev->netdev, sta_idx,
+               err = rdev_dump_station(rdev, wdev->netdev, sta_idx,
                                        mac_addr, &sinfo);
                if (err == -ENOENT)
                        break;
@@ -3707,7 +3748,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
                if (nl80211_send_station(skb,
                                NETLINK_CB(cb->skb).portid,
                                cb->nlh->nlmsg_seq, NLM_F_MULTI,
-                               dev, wdev->netdev, mac_addr,
+                               rdev, wdev->netdev, mac_addr,
                                &sinfo) < 0)
                        goto out;
 
@@ -3719,7 +3760,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
        cb->args[2] = sta_idx;
        err = skb->len;
  out_err:
-       nl80211_finish_wdev_dump(dev);
+       nl80211_finish_wdev_dump(rdev);
 
        return err;
 }
@@ -4380,18 +4421,18 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
                              struct netlink_callback *cb)
 {
        struct mpath_info pinfo;
-       struct cfg80211_registered_device *dev;
+       struct cfg80211_registered_device *rdev;
        struct wireless_dev *wdev;
        u8 dst[ETH_ALEN];
        u8 next_hop[ETH_ALEN];
        int path_idx = cb->args[2];
        int err;
 
-       err = nl80211_prepare_wdev_dump(skb, cb, &dev, &wdev);
+       err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
        if (err)
                return err;
 
-       if (!dev->ops->dump_mpath) {
+       if (!rdev->ops->dump_mpath) {
                err = -EOPNOTSUPP;
                goto out_err;
        }
@@ -4402,7 +4443,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
        }
 
        while (1) {
-               err = rdev_dump_mpath(dev, wdev->netdev, path_idx, dst,
+               err = rdev_dump_mpath(rdev, wdev->netdev, path_idx, dst,
                                      next_hop, &pinfo);
                if (err == -ENOENT)
                        break;
@@ -4423,7 +4464,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
        cb->args[2] = path_idx;
        err = skb->len;
  out_err:
-       nl80211_finish_wdev_dump(dev);
+       nl80211_finish_wdev_dump(rdev);
        return err;
 }
 
@@ -4663,7 +4704,6 @@ static int parse_reg_rule(struct nlattr *tb[],
 
 static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
 {
-       int r;
        char *data = NULL;
        enum nl80211_user_reg_hint_type user_reg_hint_type;
 
@@ -4676,11 +4716,6 @@ static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
        if (unlikely(!rcu_access_pointer(cfg80211_regdomain)))
                return -EINPROGRESS;
 
-       if (!info->attrs[NL80211_ATTR_REG_ALPHA2])
-               return -EINVAL;
-
-       data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]);
-
        if (info->attrs[NL80211_ATTR_USER_REG_HINT_TYPE])
                user_reg_hint_type =
                  nla_get_u32(info->attrs[NL80211_ATTR_USER_REG_HINT_TYPE]);
@@ -4690,14 +4725,16 @@ static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
        switch (user_reg_hint_type) {
        case NL80211_USER_REG_HINT_USER:
        case NL80211_USER_REG_HINT_CELL_BASE:
-               break;
+               if (!info->attrs[NL80211_ATTR_REG_ALPHA2])
+                       return -EINVAL;
+
+               data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]);
+               return regulatory_hint_user(data, user_reg_hint_type);
+       case NL80211_USER_REG_HINT_INDOOR:
+               return regulatory_hint_indoor_user();
        default:
                return -EINVAL;
        }
-
-       r = regulatory_hint_user(data, user_reg_hint_type);
-
-       return r;
 }
 
 static int nl80211_get_mesh_config(struct sk_buff *skb,
@@ -5796,7 +5833,8 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
        if (wdev->cac_started)
                return -EBUSY;
 
-       err = cfg80211_chandef_dfs_required(wdev->wiphy, &chandef);
+       err = cfg80211_chandef_dfs_required(wdev->wiphy, &chandef,
+                                           wdev->iftype);
        if (err < 0)
                return err;
 
@@ -5809,12 +5847,6 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
        if (!rdev->ops->start_radar_detection)
                return -EOPNOTSUPP;
 
-       err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
-                                          chandef.chan, CHAN_MODE_SHARED,
-                                          BIT(chandef.width));
-       if (err)
-               return err;
-
        cac_time_ms = cfg80211_chandef_dfs_cac_time(&rdev->wiphy, &chandef);
        if (WARN_ON(!cac_time_ms))
                cac_time_ms = IEEE80211_DFS_MIN_CAC_TIME_MS;
@@ -5843,6 +5875,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
        u8 radar_detect_width = 0;
        int err;
        bool need_new_beacon = false;
+       int len, i;
 
        if (!rdev->ops->channel_switch ||
            !(rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH))
@@ -5901,26 +5934,55 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
        if (!csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON])
                return -EINVAL;
 
-       params.counter_offset_beacon =
-               nla_get_u16(csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON]);
-       if (params.counter_offset_beacon >= params.beacon_csa.tail_len)
+       len = nla_len(csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON]);
+       if (!len || (len % sizeof(u16)))
                return -EINVAL;
 
-       /* sanity check - counters should be the same */
-       if (params.beacon_csa.tail[params.counter_offset_beacon] !=
-           params.count)
+       params.n_counter_offsets_beacon = len / sizeof(u16);
+       if (rdev->wiphy.max_num_csa_counters &&
+           (params.n_counter_offsets_beacon >
+            rdev->wiphy.max_num_csa_counters))
                return -EINVAL;
 
+       params.counter_offsets_beacon =
+               nla_data(csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON]);
+
+       /* sanity checks - counters should fit and be the same */
+       for (i = 0; i < params.n_counter_offsets_beacon; i++) {
+               u16 offset = params.counter_offsets_beacon[i];
+
+               if (offset >= params.beacon_csa.tail_len)
+                       return -EINVAL;
+
+               if (params.beacon_csa.tail[offset] != params.count)
+                       return -EINVAL;
+       }
+
        if (csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]) {
-               params.counter_offset_presp =
-                       nla_get_u16(csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]);
-               if (params.counter_offset_presp >=
-                   params.beacon_csa.probe_resp_len)
+               len = nla_len(csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]);
+               if (!len || (len % sizeof(u16)))
                        return -EINVAL;
 
-               if (params.beacon_csa.probe_resp[params.counter_offset_presp] !=
-                   params.count)
+               params.n_counter_offsets_presp = len / sizeof(u16);
+               if (rdev->wiphy.max_num_csa_counters &&
+                   (params.n_counter_offsets_beacon >
+                    rdev->wiphy.max_num_csa_counters))
                        return -EINVAL;
+
+               params.counter_offsets_presp =
+                       nla_data(csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]);
+
+               /* sanity checks - counters should fit and be the same */
+               for (i = 0; i < params.n_counter_offsets_presp; i++) {
+                       u16 offset = params.counter_offsets_presp[i];
+
+                       if (offset >= params.beacon_csa.probe_resp_len)
+                               return -EINVAL;
+
+                       if (params.beacon_csa.probe_resp[offset] !=
+                           params.count)
+                               return -EINVAL;
+               }
        }
 
 skip_beacons:
@@ -5928,27 +5990,25 @@ skip_beacons:
        if (err)
                return err;
 
-       if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef))
+       if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef,
+                                    wdev->iftype))
                return -EINVAL;
 
-       switch (dev->ieee80211_ptr->iftype) {
-       case NL80211_IFTYPE_AP:
-       case NL80211_IFTYPE_P2P_GO:
-       case NL80211_IFTYPE_ADHOC:
-       case NL80211_IFTYPE_MESH_POINT:
-               err = cfg80211_chandef_dfs_required(wdev->wiphy,
-                                                   &params.chandef);
-               if (err < 0)
-                       return err;
-               if (err) {
-                       radar_detect_width = BIT(params.chandef.width);
-                       params.radar_required = true;
-               }
-               break;
-       default:
-               break;
+       err = cfg80211_chandef_dfs_required(wdev->wiphy,
+                                           &params.chandef,
+                                           wdev->iftype);
+       if (err < 0)
+               return err;
+
+       if (err > 0) {
+               radar_detect_width = BIT(params.chandef.width);
+               params.radar_required = true;
        }
 
+       /* TODO: I left this here for now.  With channel switch, the
+        * verification is a bit more complicated, because we only do
+        * it later when the channel switch really happens.
+        */
        err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
                                           params.chandef.chan,
                                           CHAN_MODE_SHARED,
@@ -6175,12 +6235,12 @@ static int nl80211_dump_survey(struct sk_buff *skb,
                        struct netlink_callback *cb)
 {
        struct survey_info survey;
-       struct cfg80211_registered_device *dev;
+       struct cfg80211_registered_device *rdev;
        struct wireless_dev *wdev;
        int survey_idx = cb->args[2];
        int res;
 
-       res = nl80211_prepare_wdev_dump(skb, cb, &dev, &wdev);
+       res = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
        if (res)
                return res;
 
@@ -6189,7 +6249,7 @@ static int nl80211_dump_survey(struct sk_buff *skb,
                goto out_err;
        }
 
-       if (!dev->ops->dump_survey) {
+       if (!rdev->ops->dump_survey) {
                res = -EOPNOTSUPP;
                goto out_err;
        }
@@ -6197,7 +6257,7 @@ static int nl80211_dump_survey(struct sk_buff *skb,
        while (1) {
                struct ieee80211_channel *chan;
 
-               res = rdev_dump_survey(dev, wdev->netdev, survey_idx, &survey);
+               res = rdev_dump_survey(rdev, wdev->netdev, survey_idx, &survey);
                if (res == -ENOENT)
                        break;
                if (res)
@@ -6209,7 +6269,7 @@ static int nl80211_dump_survey(struct sk_buff *skb,
                        goto out;
                }
 
-               chan = ieee80211_get_channel(&dev->wiphy,
+               chan = ieee80211_get_channel(&rdev->wiphy,
                                             survey.channel->center_freq);
                if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) {
                        survey_idx++;
@@ -6228,7 +6288,7 @@ static int nl80211_dump_survey(struct sk_buff *skb,
        cb->args[2] = survey_idx;
        res = skb->len;
  out_err:
-       nl80211_finish_wdev_dump(dev);
+       nl80211_finish_wdev_dump(rdev);
        return res;
 }
 
@@ -6704,7 +6764,8 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
        if (err)
                return err;
 
-       if (!cfg80211_reg_can_beacon(&rdev->wiphy, &ibss.chandef))
+       if (!cfg80211_reg_can_beacon(&rdev->wiphy, &ibss.chandef,
+                                    NL80211_IFTYPE_ADHOC))
                return -EINVAL;
 
        switch (ibss.chandef.width) {
@@ -6879,7 +6940,7 @@ struct sk_buff *__cfg80211_alloc_event_skb(struct wiphy *wiphy,
                                           int vendor_event_idx,
                                           int approxlen, gfp_t gfp)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        const struct nl80211_vendor_cmd_info *info;
 
        switch (cmd) {
@@ -7767,6 +7828,27 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
        if (!chandef.chan && params.offchan)
                return -EINVAL;
 
+       params.buf = nla_data(info->attrs[NL80211_ATTR_FRAME]);
+       params.len = nla_len(info->attrs[NL80211_ATTR_FRAME]);
+
+       if (info->attrs[NL80211_ATTR_CSA_C_OFFSETS_TX]) {
+               int len = nla_len(info->attrs[NL80211_ATTR_CSA_C_OFFSETS_TX]);
+               int i;
+
+               if (len % sizeof(u16))
+                       return -EINVAL;
+
+               params.n_csa_offsets = len / sizeof(u16);
+               params.csa_offsets =
+                       nla_data(info->attrs[NL80211_ATTR_CSA_C_OFFSETS_TX]);
+
+               /* check that all the offsets fit the frame */
+               for (i = 0; i < params.n_csa_offsets; i++) {
+                       if (params.csa_offsets[i] >= params.len)
+                               return -EINVAL;
+               }
+       }
+
        if (!params.dont_wait_for_ack) {
                msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
                if (!msg)
@@ -7780,8 +7862,6 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
                }
        }
 
-       params.buf = nla_data(info->attrs[NL80211_ATTR_FRAME]);
-       params.len = nla_len(info->attrs[NL80211_ATTR_FRAME]);
        params.chan = chandef.chan;
        err = cfg80211_mlme_mgmt_tx(rdev, wdev, &params, &cookie);
        if (err)
@@ -8478,6 +8558,8 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
 
                nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN],
                                    rem) {
+                       u8 *mask_pat;
+
                        nla_parse(pat_tb, MAX_NL80211_PKTPAT, nla_data(pat),
                                  nla_len(pat), NULL);
                        err = -EINVAL;
@@ -8501,19 +8583,18 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
                                goto error;
                        new_triggers.patterns[i].pkt_offset = pkt_offset;
 
-                       new_triggers.patterns[i].mask =
-                               kmalloc(mask_len + pat_len, GFP_KERNEL);
-                       if (!new_triggers.patterns[i].mask) {
+                       mask_pat = kmalloc(mask_len + pat_len, GFP_KERNEL);
+                       if (!mask_pat) {
                                err = -ENOMEM;
                                goto error;
                        }
-                       new_triggers.patterns[i].pattern =
-                               new_triggers.patterns[i].mask + mask_len;
-                       memcpy(new_triggers.patterns[i].mask,
-                              nla_data(pat_tb[NL80211_PKTPAT_MASK]),
+                       new_triggers.patterns[i].mask = mask_pat;
+                       memcpy(mask_pat, nla_data(pat_tb[NL80211_PKTPAT_MASK]),
                               mask_len);
+                       mask_pat += mask_len;
+                       new_triggers.patterns[i].pattern = mask_pat;
                        new_triggers.patterns[i].pattern_len = pat_len;
-                       memcpy(new_triggers.patterns[i].pattern,
+                       memcpy(mask_pat,
                               nla_data(pat_tb[NL80211_PKTPAT_PATTERN]),
                               pat_len);
                        i++;
@@ -8705,6 +8786,8 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev,
 
        nla_for_each_nested(pat, tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN],
                            rem) {
+               u8 *mask_pat;
+
                nla_parse(pat_tb, MAX_NL80211_PKTPAT, nla_data(pat),
                          nla_len(pat), NULL);
                if (!pat_tb[NL80211_PKTPAT_MASK] ||
@@ -8726,17 +8809,19 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev,
                        return -EINVAL;
                new_rule->patterns[i].pkt_offset = pkt_offset;
 
-               new_rule->patterns[i].mask =
-                       kmalloc(mask_len + pat_len, GFP_KERNEL);
-               if (!new_rule->patterns[i].mask)
+               mask_pat = kmalloc(mask_len + pat_len, GFP_KERNEL);
+               if (!mask_pat)
                        return -ENOMEM;
-               new_rule->patterns[i].pattern =
-                       new_rule->patterns[i].mask + mask_len;
-               memcpy(new_rule->patterns[i].mask,
-                      nla_data(pat_tb[NL80211_PKTPAT_MASK]), mask_len);
+
+               new_rule->patterns[i].mask = mask_pat;
+               memcpy(mask_pat, nla_data(pat_tb[NL80211_PKTPAT_MASK]),
+                      mask_len);
+
+               mask_pat += mask_len;
+               new_rule->patterns[i].pattern = mask_pat;
                new_rule->patterns[i].pattern_len = pat_len;
-               memcpy(new_rule->patterns[i].pattern,
-                      nla_data(pat_tb[NL80211_PKTPAT_PATTERN]), pat_len);
+               memcpy(mask_pat, nla_data(pat_tb[NL80211_PKTPAT_PATTERN]),
+                      pat_len);
                i++;
        }
 
@@ -8981,9 +9066,8 @@ static int nl80211_start_p2p_device(struct sk_buff *skb, struct genl_info *info)
        if (wdev->p2p_started)
                return 0;
 
-       err = cfg80211_can_add_interface(rdev, wdev->iftype);
-       if (err)
-               return err;
+       if (rfkill_blocked(rdev->rfkill))
+               return -ERFKILL;
 
        err = rdev_start_p2p_device(rdev, wdev);
        if (err)
@@ -9192,7 +9276,7 @@ struct sk_buff *__cfg80211_alloc_reply_skb(struct wiphy *wiphy,
                                           enum nl80211_attrs attr,
                                           int approxlen)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
        if (WARN_ON(!rdev->cur_cmd_info))
                return NULL;
@@ -9316,7 +9400,7 @@ static int nl80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
                }
 
                dev = wdev->netdev;
-               rdev = wiphy_to_dev(wdev->wiphy);
+               rdev = wiphy_to_rdev(wdev->wiphy);
 
                if (ops->internal_flags & NL80211_FLAG_NEED_NETDEV) {
                        if (!dev) {
@@ -10345,7 +10429,7 @@ void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev, const u8 *buf,
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        const struct ieee80211_mgmt *mgmt = (void *)buf;
        u32 cmd;
 
@@ -10567,7 +10651,7 @@ void cfg80211_notify_new_peer_candidate(struct net_device *dev, const u8 *addr,
                                        const u8* ie, u8 ie_len, gfp_t gfp)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct sk_buff *msg;
        void *hdr;
 
@@ -10747,7 +10831,7 @@ void cfg80211_ready_on_channel(struct wireless_dev *wdev, u64 cookie,
                               unsigned int duration, gfp_t gfp)
 {
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
        trace_cfg80211_ready_on_channel(wdev, cookie, chan, duration);
        nl80211_send_remain_on_chan_event(NL80211_CMD_REMAIN_ON_CHANNEL,
@@ -10761,7 +10845,7 @@ void cfg80211_remain_on_channel_expired(struct wireless_dev *wdev, u64 cookie,
                                        gfp_t gfp)
 {
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
        trace_cfg80211_ready_on_channel_expired(wdev, cookie, chan);
        nl80211_send_remain_on_chan_event(NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL,
@@ -10773,7 +10857,7 @@ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
                      struct station_info *sinfo, gfp_t gfp)
 {
        struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct sk_buff *msg;
 
        trace_cfg80211_new_sta(dev, mac_addr, sinfo);
@@ -10796,7 +10880,7 @@ EXPORT_SYMBOL(cfg80211_new_sta);
 void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp)
 {
        struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct sk_buff *msg;
        void *hdr;
 
@@ -10833,7 +10917,7 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
                          gfp_t gfp)
 {
        struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct sk_buff *msg;
        void *hdr;
 
@@ -10868,7 +10952,7 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
                                       const u8 *addr, gfp_t gfp)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct sk_buff *msg;
        void *hdr;
        u32 nlportid = ACCESS_ONCE(wdev->ap_unexpected_nlportid);
@@ -10988,7 +11072,7 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
                             const u8 *buf, size_t len, bool ack, gfp_t gfp)
 {
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct net_device *netdev = wdev->netdev;
        struct sk_buff *msg;
        void *hdr;
@@ -11032,7 +11116,7 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev,
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct sk_buff *msg;
        struct nlattr *pinfoattr;
        void *hdr;
@@ -11124,7 +11208,7 @@ void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid,
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
        trace_cfg80211_gtk_rekey_notify(dev, bssid);
        nl80211_gtk_rekey_notify(rdev, dev, bssid, replay_ctr, gfp);
@@ -11182,7 +11266,7 @@ void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
        trace_cfg80211_pmksa_candidate_notify(dev, index, bssid, preauth);
        nl80211_pmksa_candidate_notify(rdev, dev, index, bssid, preauth, gfp);
@@ -11229,7 +11313,7 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
        ASSERT_WDEV_LOCK(wdev);
 
@@ -11253,7 +11337,7 @@ void cfg80211_cqm_txe_notify(struct net_device *dev,
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct sk_buff *msg;
        struct nlattr *pinfoattr;
        void *hdr;
@@ -11353,7 +11437,7 @@ void cfg80211_cqm_pktloss_notify(struct net_device *dev,
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct sk_buff *msg;
        struct nlattr *pinfoattr;
        void *hdr;
@@ -11400,7 +11484,7 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
                           u64 cookie, bool acked, gfp_t gfp)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct sk_buff *msg;
        void *hdr;
 
@@ -11440,7 +11524,7 @@ void cfg80211_report_obss_beacon(struct wiphy *wiphy,
                                 const u8 *frame, size_t len,
                                 int freq, int sig_dbm)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct sk_buff *msg;
        void *hdr;
        struct cfg80211_beacon_registration *reg;
@@ -11487,7 +11571,7 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev,
                                   struct cfg80211_wowlan_wakeup *wakeup,
                                   gfp_t gfp)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct sk_buff *msg;
        void *hdr;
        int size = 200;
@@ -11597,7 +11681,7 @@ void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer,
                                u16 reason_code, gfp_t gfp)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct sk_buff *msg;
        void *hdr;
 
@@ -11649,9 +11733,15 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
        rcu_read_lock();
 
        list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) {
-               list_for_each_entry_rcu(wdev, &rdev->wdev_list, list)
+               bool schedule_destroy_work = false;
+
+               list_for_each_entry_rcu(wdev, &rdev->wdev_list, list) {
                        cfg80211_mlme_unregister_socket(wdev, notify->portid);
 
+                       if (wdev->owner_nlportid == notify->portid)
+                               schedule_destroy_work = true;
+               }
+
                spin_lock_bh(&rdev->beacon_registrations_lock);
                list_for_each_entry_safe(reg, tmp, &rdev->beacon_registrations,
                                         list) {
@@ -11662,11 +11752,24 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
                        }
                }
                spin_unlock_bh(&rdev->beacon_registrations_lock);
+
+               if (schedule_destroy_work) {
+                       struct cfg80211_iface_destroy *destroy;
+
+                       destroy = kzalloc(sizeof(*destroy), GFP_ATOMIC);
+                       if (destroy) {
+                               destroy->nlportid = notify->portid;
+                               spin_lock(&rdev->destroy_list_lock);
+                               list_add(&destroy->list, &rdev->destroy_list);
+                               spin_unlock(&rdev->destroy_list_lock);
+                               schedule_work(&rdev->destroy_work);
+                       }
+               }
        }
 
        rcu_read_unlock();
 
-       return NOTIFY_DONE;
+       return NOTIFY_OK;
 }
 
 static struct notifier_block nl80211_netlink_notifier = {
@@ -11677,7 +11780,7 @@ void cfg80211_ft_event(struct net_device *netdev,
                       struct cfg80211_ft_event_params *ft_event)
 {
        struct wiphy *wiphy = netdev->ieee80211_ptr->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct sk_buff *msg;
        void *hdr;
 
@@ -11724,7 +11827,7 @@ void cfg80211_crit_proto_stopped(struct wireless_dev *wdev, gfp_t gfp)
        void *hdr;
        u32 nlportid;
 
-       rdev = wiphy_to_dev(wdev->wiphy);
+       rdev = wiphy_to_rdev(wdev->wiphy);
        if (!rdev->crit_proto_nlportid)
                return;
 
@@ -11759,7 +11862,7 @@ EXPORT_SYMBOL(cfg80211_crit_proto_stopped);
 void nl80211_send_ap_stopped(struct wireless_dev *wdev)
 {
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct sk_buff *msg;
        void *hdr;
 
index 74d97d33c938e8250ef300c2c3fb82d2a39b63b6..d95bbe34813833d04ed618fcd6ca85700718216b 100644 (file)
@@ -199,7 +199,7 @@ static inline int rdev_change_station(struct cfg80211_registered_device *rdev,
 }
 
 static inline int rdev_get_station(struct cfg80211_registered_device *rdev,
-                                  struct net_device *dev, u8 *mac,
+                                  struct net_device *dev, const u8 *mac,
                                   struct station_info *sinfo)
 {
        int ret;
@@ -950,4 +950,17 @@ static inline int rdev_set_qos_map(struct cfg80211_registered_device *rdev,
        return ret;
 }
 
+static inline int
+rdev_set_ap_chanwidth(struct cfg80211_registered_device *rdev,
+                     struct net_device *dev, struct cfg80211_chan_def *chandef)
+{
+       int ret;
+
+       trace_rdev_set_ap_chanwidth(&rdev->wiphy, dev, chandef);
+       ret = rdev->ops->set_ap_chanwidth(&rdev->wiphy, dev, chandef);
+       trace_rdev_return_int(&rdev->wiphy, ret);
+
+       return ret;
+}
+
 #endif /* __CFG80211_RDEV_OPS */
index f59aaac586f8cf10905135324c3913646910a662..558b0e3a02d8284c49de58d14833c13b444db5a2 100644 (file)
 #define REG_DBG_PRINT(args...)
 #endif
 
+/**
+ * enum reg_request_treatment - regulatory request treatment
+ *
+ * @REG_REQ_OK: continue processing the regulatory request
+ * @REG_REQ_IGNORE: ignore the regulatory request
+ * @REG_REQ_INTERSECT: the regulatory domain resulting from this request should
+ *     be intersected with the current one.
+ * @REG_REQ_ALREADY_SET: the regulatory request will not change the current
+ *     regulatory settings, and no further processing is required.
+ * @REG_REQ_USER_HINT_HANDLED: a non alpha2  user hint was handled and no
+ *     further processing is required, i.e., not need to update last_request
+ *     etc. This should be used for user hints that do not provide an alpha2
+ *     but some other type of regulatory hint, i.e., indoor operation.
+ */
 enum reg_request_treatment {
        REG_REQ_OK,
        REG_REQ_IGNORE,
        REG_REQ_INTERSECT,
        REG_REQ_ALREADY_SET,
+       REG_REQ_USER_HINT_HANDLED,
 };
 
 static struct regulatory_request core_request_world = {
@@ -106,6 +121,14 @@ const struct ieee80211_regdomain __rcu *cfg80211_regdomain;
  */
 static int reg_num_devs_support_basehint;
 
+/*
+ * State variable indicating if the platform on which the devices
+ * are attached is operating in an indoor environment. The state variable
+ * is relevant for all registered devices.
+ * (protected by RTNL)
+ */
+static bool reg_is_indoor;
+
 static const struct ieee80211_regdomain *get_cfg80211_regdom(void)
 {
        return rtnl_dereference(cfg80211_regdomain);
@@ -240,8 +263,16 @@ static char user_alpha2[2];
 module_param(ieee80211_regdom, charp, 0444);
 MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
 
-static void reg_free_request(struct regulatory_request *lr)
+static void reg_free_request(struct regulatory_request *request)
 {
+       if (request != get_last_request())
+               kfree(request);
+}
+
+static void reg_free_last_request(void)
+{
+       struct regulatory_request *lr = get_last_request();
+
        if (lr != &core_request_world && lr)
                kfree_rcu(lr, rcu_head);
 }
@@ -254,7 +285,7 @@ static void reg_update_last_request(struct regulatory_request *request)
        if (lr == request)
                return;
 
-       reg_free_request(lr);
+       reg_free_last_request();
        rcu_assign_pointer(last_request, request);
 }
 
@@ -873,6 +904,8 @@ static u32 map_regdom_flags(u32 rd_flags)
                channel_flags |= IEEE80211_CHAN_RADAR;
        if (rd_flags & NL80211_RRF_NO_OFDM)
                channel_flags |= IEEE80211_CHAN_NO_OFDM;
+       if (rd_flags & NL80211_RRF_NO_OUTDOOR)
+               channel_flags |= IEEE80211_CHAN_INDOOR_ONLY;
        return channel_flags;
 }
 
@@ -902,7 +935,7 @@ freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq,
                if (!band_rule_found)
                        band_rule_found = freq_in_rule_band(fr, center_freq);
 
-               bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(20));
+               bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(5));
 
                if (band_rule_found && bw_fits)
                        return rr;
@@ -986,10 +1019,10 @@ static void chan_reg_rule_print_dbg(const struct ieee80211_regdomain *regd,
 }
 #endif
 
-/*
- * Note that right now we assume the desired channel bandwidth
- * is always 20 MHz for each individual channel (HT40 uses 20 MHz
- * per channel, the primary and the extension channel).
+/* Find an ieee80211_reg_rule such that a 5MHz channel with frequency
+ * chan->center_freq fits there.
+ * If there is no such reg_rule, disable the channel, otherwise set the
+ * flags corresponding to the bandwidths allowed in the particular reg_rule
  */
 static void handle_channel(struct wiphy *wiphy,
                           enum nl80211_reg_initiator initiator,
@@ -1050,8 +1083,12 @@ static void handle_channel(struct wiphy *wiphy,
        if (reg_rule->flags & NL80211_RRF_AUTO_BW)
                max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
 
+       if (max_bandwidth_khz < MHZ_TO_KHZ(10))
+               bw_flags = IEEE80211_CHAN_NO_10MHZ;
+       if (max_bandwidth_khz < MHZ_TO_KHZ(20))
+               bw_flags |= IEEE80211_CHAN_NO_20MHZ;
        if (max_bandwidth_khz < MHZ_TO_KHZ(40))
-               bw_flags = IEEE80211_CHAN_NO_HT40;
+               bw_flags |= IEEE80211_CHAN_NO_HT40;
        if (max_bandwidth_khz < MHZ_TO_KHZ(80))
                bw_flags |= IEEE80211_CHAN_NO_80MHZ;
        if (max_bandwidth_khz < MHZ_TO_KHZ(160))
@@ -1071,6 +1108,13 @@ static void handle_channel(struct wiphy *wiphy,
                        (int) MBI_TO_DBI(power_rule->max_antenna_gain);
                chan->max_reg_power = chan->max_power = chan->orig_mpwr =
                        (int) MBM_TO_DBM(power_rule->max_eirp);
+
+               if (chan->flags & IEEE80211_CHAN_RADAR) {
+                       chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS;
+                       if (reg_rule->dfs_cac_ms)
+                               chan->dfs_cac_ms = reg_rule->dfs_cac_ms;
+               }
+
                return;
        }
 
@@ -1126,12 +1170,19 @@ static bool reg_request_cell_base(struct regulatory_request *request)
        return request->user_reg_hint_type == NL80211_USER_REG_HINT_CELL_BASE;
 }
 
+static bool reg_request_indoor(struct regulatory_request *request)
+{
+       if (request->initiator != NL80211_REGDOM_SET_BY_USER)
+               return false;
+       return request->user_reg_hint_type == NL80211_USER_REG_HINT_INDOOR;
+}
+
 bool reg_last_request_cell_base(void)
 {
        return reg_request_cell_base(get_last_request());
 }
 
-#ifdef CONFIG_CFG80211_CERTIFICATION_ONUS
+#ifdef CONFIG_CFG80211_REG_CELLULAR_HINTS
 /* Core specific check */
 static enum reg_request_treatment
 reg_ignore_cell_hint(struct regulatory_request *pending_request)
@@ -1471,8 +1522,12 @@ static void handle_channel_custom(struct wiphy *wiphy,
        if (reg_rule->flags & NL80211_RRF_AUTO_BW)
                max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
 
+       if (max_bandwidth_khz < MHZ_TO_KHZ(10))
+               bw_flags = IEEE80211_CHAN_NO_10MHZ;
+       if (max_bandwidth_khz < MHZ_TO_KHZ(20))
+               bw_flags |= IEEE80211_CHAN_NO_20MHZ;
        if (max_bandwidth_khz < MHZ_TO_KHZ(40))
-               bw_flags = IEEE80211_CHAN_NO_HT40;
+               bw_flags |= IEEE80211_CHAN_NO_HT40;
        if (max_bandwidth_khz < MHZ_TO_KHZ(80))
                bw_flags |= IEEE80211_CHAN_NO_80MHZ;
        if (max_bandwidth_khz < MHZ_TO_KHZ(160))
@@ -1568,6 +1623,11 @@ __reg_process_hint_user(struct regulatory_request *user_request)
 {
        struct regulatory_request *lr = get_last_request();
 
+       if (reg_request_indoor(user_request)) {
+               reg_is_indoor = true;
+               return REG_REQ_USER_HINT_HANDLED;
+       }
+
        if (reg_request_cell_base(user_request))
                return reg_ignore_cell_hint(user_request);
 
@@ -1615,8 +1675,9 @@ reg_process_hint_user(struct regulatory_request *user_request)
 
        treatment = __reg_process_hint_user(user_request);
        if (treatment == REG_REQ_IGNORE ||
-           treatment == REG_REQ_ALREADY_SET) {
-               kfree(user_request);
+           treatment == REG_REQ_ALREADY_SET ||
+           treatment == REG_REQ_USER_HINT_HANDLED) {
+               reg_free_request(user_request);
                return treatment;
        }
 
@@ -1676,14 +1737,15 @@ reg_process_hint_driver(struct wiphy *wiphy,
        case REG_REQ_OK:
                break;
        case REG_REQ_IGNORE:
-               kfree(driver_request);
+       case REG_REQ_USER_HINT_HANDLED:
+               reg_free_request(driver_request);
                return treatment;
        case REG_REQ_INTERSECT:
                /* fall through */
        case REG_REQ_ALREADY_SET:
                regd = reg_copy_regd(get_cfg80211_regdom());
                if (IS_ERR(regd)) {
-                       kfree(driver_request);
+                       reg_free_request(driver_request);
                        return REG_REQ_IGNORE;
                }
                rcu_assign_pointer(wiphy->regd, regd);
@@ -1775,12 +1837,13 @@ reg_process_hint_country_ie(struct wiphy *wiphy,
        case REG_REQ_OK:
                break;
        case REG_REQ_IGNORE:
+       case REG_REQ_USER_HINT_HANDLED:
                /* fall through */
        case REG_REQ_ALREADY_SET:
-               kfree(country_ie_request);
+               reg_free_request(country_ie_request);
                return treatment;
        case REG_REQ_INTERSECT:
-               kfree(country_ie_request);
+               reg_free_request(country_ie_request);
                /*
                 * This doesn't happen yet, not sure we
                 * ever want to support it for this case.
@@ -1813,7 +1876,8 @@ static void reg_process_hint(struct regulatory_request *reg_request)
        case NL80211_REGDOM_SET_BY_USER:
                treatment = reg_process_hint_user(reg_request);
                if (treatment == REG_REQ_IGNORE ||
-                   treatment == REG_REQ_ALREADY_SET)
+                   treatment == REG_REQ_ALREADY_SET ||
+                   treatment == REG_REQ_USER_HINT_HANDLED)
                        return;
                queue_delayed_work(system_power_efficient_wq,
                                   &reg_timeout, msecs_to_jiffies(3142));
@@ -1841,7 +1905,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
        return;
 
 out_free:
-       kfree(reg_request);
+       reg_free_request(reg_request);
 }
 
 /*
@@ -1857,7 +1921,7 @@ static void reg_process_pending_hints(void)
 
        /* When last_request->processed becomes true this will be rescheduled */
        if (lr && !lr->processed) {
-               REG_DBG_PRINT("Pending regulatory request, waiting for it to be processed...\n");
+               reg_process_hint(lr);
                return;
        }
 
@@ -1967,6 +2031,22 @@ int regulatory_hint_user(const char *alpha2,
        return 0;
 }
 
+int regulatory_hint_indoor_user(void)
+{
+       struct regulatory_request *request;
+
+       request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL);
+       if (!request)
+               return -ENOMEM;
+
+       request->wiphy_idx = WIPHY_IDX_INVALID;
+       request->initiator = NL80211_REGDOM_SET_BY_USER;
+       request->user_reg_hint_type = NL80211_USER_REG_HINT_INDOOR;
+       queue_regulatory_request(request);
+
+       return 0;
+}
+
 /* Driver hints */
 int regulatory_hint(struct wiphy *wiphy, const char *alpha2)
 {
@@ -2134,6 +2214,8 @@ static void restore_regulatory_settings(bool reset_user)
 
        ASSERT_RTNL();
 
+       reg_is_indoor = false;
+
        reset_regdomains(true, &world_regdom);
        restore_alpha2(alpha2, reset_user);
 
@@ -2594,7 +2676,7 @@ void wiphy_regulatory_deregister(struct wiphy *wiphy)
                reg_num_devs_support_basehint--;
 
        rcu_free_regdom(get_wiphy_regdom(wiphy));
-       rcu_assign_pointer(wiphy->regd, NULL);
+       RCU_INIT_POINTER(wiphy->regd, NULL);
 
        if (lr)
                request_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx);
@@ -2614,6 +2696,40 @@ static void reg_timeout_work(struct work_struct *work)
        rtnl_unlock();
 }
 
+/*
+ * See http://www.fcc.gov/document/5-ghz-unlicensed-spectrum-unii, for
+ * UNII band definitions
+ */
+int cfg80211_get_unii(int freq)
+{
+       /* UNII-1 */
+       if (freq >= 5150 && freq <= 5250)
+               return 0;
+
+       /* UNII-2A */
+       if (freq > 5250 && freq <= 5350)
+               return 1;
+
+       /* UNII-2B */
+       if (freq > 5350 && freq <= 5470)
+               return 2;
+
+       /* UNII-2C */
+       if (freq > 5470 && freq <= 5725)
+               return 3;
+
+       /* UNII-3 */
+       if (freq > 5725 && freq <= 5825)
+               return 4;
+
+       return -EINVAL;
+}
+
+bool regulatory_indoor_allowed(void)
+{
+       return reg_is_indoor;
+}
+
 int __init regulatory_init(void)
 {
        int err = 0;
index 37c180df34b72a1195aacb6d72b7b07ddc44a9ef..5e48031ccb9afc33a41c36221e3bef924625ebe8 100644 (file)
@@ -25,6 +25,7 @@ enum nl80211_dfs_regions reg_get_dfs_region(struct wiphy *wiphy);
 
 int regulatory_hint_user(const char *alpha2,
                         enum nl80211_user_reg_hint_type user_reg_hint_type);
+int regulatory_hint_indoor_user(void);
 
 void wiphy_regulatory_register(struct wiphy *wiphy);
 void wiphy_regulatory_deregister(struct wiphy *wiphy);
@@ -104,4 +105,21 @@ void regulatory_hint_country_ie(struct wiphy *wiphy,
  */
 void regulatory_hint_disconnect(void);
 
+/**
+ * cfg80211_get_unii - get the U-NII band for the frequency
+ * @freq: the frequency for which we want to get the UNII band.
+
+ * Get a value specifying the U-NII band frequency belongs to.
+ * U-NII bands are defined by the FCC in C.F.R 47 part 15.
+ *
+ * Returns -EINVAL if freq is invalid, 0 for UNII-1, 1 for UNII-2A,
+ * 2 for UNII-2B, 3 for UNII-2C and 4 for UNII-3.
+ */
+int cfg80211_get_unii(int freq);
+
+/**
+ * regulatory_indoor_allowed - is indoor operation allowed
+ */
+bool regulatory_indoor_allowed(void);
+
 #endif  /* __NET_WIRELESS_REG_H */
index 7d09a712cb1f1353f13310f5c68b38e750d199a6..0798c62e60858cb81d1df3d4829414665808268b 100644 (file)
@@ -81,10 +81,10 @@ static void bss_free(struct cfg80211_internal_bss *bss)
        kfree(bss);
 }
 
-static inline void bss_ref_get(struct cfg80211_registered_device *dev,
+static inline void bss_ref_get(struct cfg80211_registered_device *rdev,
                               struct cfg80211_internal_bss *bss)
 {
-       lockdep_assert_held(&dev->bss_lock);
+       lockdep_assert_held(&rdev->bss_lock);
 
        bss->refcount++;
        if (bss->pub.hidden_beacon_bss) {
@@ -95,10 +95,10 @@ static inline void bss_ref_get(struct cfg80211_registered_device *dev,
        }
 }
 
-static inline void bss_ref_put(struct cfg80211_registered_device *dev,
+static inline void bss_ref_put(struct cfg80211_registered_device *rdev,
                               struct cfg80211_internal_bss *bss)
 {
-       lockdep_assert_held(&dev->bss_lock);
+       lockdep_assert_held(&rdev->bss_lock);
 
        if (bss->pub.hidden_beacon_bss) {
                struct cfg80211_internal_bss *hbss;
@@ -114,10 +114,10 @@ static inline void bss_ref_put(struct cfg80211_registered_device *dev,
                bss_free(bss);
 }
 
-static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *dev,
+static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *rdev,
                                  struct cfg80211_internal_bss *bss)
 {
-       lockdep_assert_held(&dev->bss_lock);
+       lockdep_assert_held(&rdev->bss_lock);
 
        if (!list_empty(&bss->hidden_list)) {
                /*
@@ -134,31 +134,31 @@ static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *dev,
        }
 
        list_del_init(&bss->list);
-       rb_erase(&bss->rbn, &dev->bss_tree);
-       bss_ref_put(dev, bss);
+       rb_erase(&bss->rbn, &rdev->bss_tree);
+       bss_ref_put(rdev, bss);
        return true;
 }
 
-static void __cfg80211_bss_expire(struct cfg80211_registered_device *dev,
+static void __cfg80211_bss_expire(struct cfg80211_registered_device *rdev,
                                  unsigned long expire_time)
 {
        struct cfg80211_internal_bss *bss, *tmp;
        bool expired = false;
 
-       lockdep_assert_held(&dev->bss_lock);
+       lockdep_assert_held(&rdev->bss_lock);
 
-       list_for_each_entry_safe(bss, tmp, &dev->bss_list, list) {
+       list_for_each_entry_safe(bss, tmp, &rdev->bss_list, list) {
                if (atomic_read(&bss->hold))
                        continue;
                if (!time_after(expire_time, bss->ts))
                        continue;
 
-               if (__cfg80211_unlink_bss(dev, bss))
+               if (__cfg80211_unlink_bss(rdev, bss))
                        expired = true;
        }
 
        if (expired)
-               dev->bss_generation++;
+               rdev->bss_generation++;
 }
 
 void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
@@ -238,11 +238,11 @@ void __cfg80211_scan_done(struct work_struct *wk)
 void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted)
 {
        trace_cfg80211_scan_done(request, aborted);
-       WARN_ON(request != wiphy_to_dev(request->wiphy)->scan_req);
+       WARN_ON(request != wiphy_to_rdev(request->wiphy)->scan_req);
 
        request->aborted = aborted;
        request->notified = true;
-       queue_work(cfg80211_wq, &wiphy_to_dev(request->wiphy)->scan_done_wk);
+       queue_work(cfg80211_wq, &wiphy_to_rdev(request->wiphy)->scan_done_wk);
 }
 EXPORT_SYMBOL(cfg80211_scan_done);
 
@@ -278,20 +278,28 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy)
 {
        trace_cfg80211_sched_scan_results(wiphy);
        /* ignore if we're not scanning */
-       if (wiphy_to_dev(wiphy)->sched_scan_req)
+       if (wiphy_to_rdev(wiphy)->sched_scan_req)
                queue_work(cfg80211_wq,
-                          &wiphy_to_dev(wiphy)->sched_scan_results_wk);
+                          &wiphy_to_rdev(wiphy)->sched_scan_results_wk);
 }
 EXPORT_SYMBOL(cfg80211_sched_scan_results);
 
-void cfg80211_sched_scan_stopped(struct wiphy *wiphy)
+void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+
+       ASSERT_RTNL();
 
        trace_cfg80211_sched_scan_stopped(wiphy);
 
-       rtnl_lock();
        __cfg80211_stop_sched_scan(rdev, true);
+}
+EXPORT_SYMBOL(cfg80211_sched_scan_stopped_rtnl);
+
+void cfg80211_sched_scan_stopped(struct wiphy *wiphy)
+{
+       rtnl_lock();
+       cfg80211_sched_scan_stopped_rtnl(wiphy);
        rtnl_unlock();
 }
 EXPORT_SYMBOL(cfg80211_sched_scan_stopped);
@@ -322,21 +330,21 @@ int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev,
        return 0;
 }
 
-void cfg80211_bss_age(struct cfg80211_registered_device *dev,
+void cfg80211_bss_age(struct cfg80211_registered_device *rdev,
                       unsigned long age_secs)
 {
        struct cfg80211_internal_bss *bss;
        unsigned long age_jiffies = msecs_to_jiffies(age_secs * MSEC_PER_SEC);
 
-       spin_lock_bh(&dev->bss_lock);
-       list_for_each_entry(bss, &dev->bss_list, list)
+       spin_lock_bh(&rdev->bss_lock);
+       list_for_each_entry(bss, &rdev->bss_list, list)
                bss->ts -= age_jiffies;
-       spin_unlock_bh(&dev->bss_lock);
+       spin_unlock_bh(&rdev->bss_lock);
 }
 
-void cfg80211_bss_expire(struct cfg80211_registered_device *dev)
+void cfg80211_bss_expire(struct cfg80211_registered_device *rdev)
 {
-       __cfg80211_bss_expire(dev, jiffies - IEEE80211_SCAN_RESULT_EXPIRE);
+       __cfg80211_bss_expire(rdev, jiffies - IEEE80211_SCAN_RESULT_EXPIRE);
 }
 
 const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len)
@@ -526,32 +534,34 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
                                      const u8 *ssid, size_t ssid_len,
                                      u16 capa_mask, u16 capa_val)
 {
-       struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct cfg80211_internal_bss *bss, *res = NULL;
        unsigned long now = jiffies;
 
        trace_cfg80211_get_bss(wiphy, channel, bssid, ssid, ssid_len, capa_mask,
                               capa_val);
 
-       spin_lock_bh(&dev->bss_lock);
+       spin_lock_bh(&rdev->bss_lock);
 
-       list_for_each_entry(bss, &dev->bss_list, list) {
+       list_for_each_entry(bss, &rdev->bss_list, list) {
                if ((bss->pub.capability & capa_mask) != capa_val)
                        continue;
                if (channel && bss->pub.channel != channel)
                        continue;
+               if (!is_valid_ether_addr(bss->pub.bssid))
+                       continue;
                /* Don't get expired BSS structs */
                if (time_after(now, bss->ts + IEEE80211_SCAN_RESULT_EXPIRE) &&
                    !atomic_read(&bss->hold))
                        continue;
                if (is_bss(&bss->pub, bssid, ssid, ssid_len)) {
                        res = bss;
-                       bss_ref_get(dev, res);
+                       bss_ref_get(rdev, res);
                        break;
                }
        }
 
-       spin_unlock_bh(&dev->bss_lock);
+       spin_unlock_bh(&rdev->bss_lock);
        if (!res)
                return NULL;
        trace_cfg80211_return_bss(&res->pub);
@@ -559,10 +569,10 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
 }
 EXPORT_SYMBOL(cfg80211_get_bss);
 
-static void rb_insert_bss(struct cfg80211_registered_device *dev,
+static void rb_insert_bss(struct cfg80211_registered_device *rdev,
                          struct cfg80211_internal_bss *bss)
 {
-       struct rb_node **p = &dev->bss_tree.rb_node;
+       struct rb_node **p = &rdev->bss_tree.rb_node;
        struct rb_node *parent = NULL;
        struct cfg80211_internal_bss *tbss;
        int cmp;
@@ -585,15 +595,15 @@ static void rb_insert_bss(struct cfg80211_registered_device *dev,
        }
 
        rb_link_node(&bss->rbn, parent, p);
-       rb_insert_color(&bss->rbn, &dev->bss_tree);
+       rb_insert_color(&bss->rbn, &rdev->bss_tree);
 }
 
 static struct cfg80211_internal_bss *
-rb_find_bss(struct cfg80211_registered_device *dev,
+rb_find_bss(struct cfg80211_registered_device *rdev,
            struct cfg80211_internal_bss *res,
            enum bss_compare_mode mode)
 {
-       struct rb_node *n = dev->bss_tree.rb_node;
+       struct rb_node *n = rdev->bss_tree.rb_node;
        struct cfg80211_internal_bss *bss;
        int r;
 
@@ -612,7 +622,7 @@ rb_find_bss(struct cfg80211_registered_device *dev,
        return NULL;
 }
 
-static bool cfg80211_combine_bsses(struct cfg80211_registered_device *dev,
+static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
                                   struct cfg80211_internal_bss *new)
 {
        const struct cfg80211_bss_ies *ies;
@@ -642,7 +652,7 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *dev,
 
        /* This is the bad part ... */
 
-       list_for_each_entry(bss, &dev->bss_list, list) {
+       list_for_each_entry(bss, &rdev->bss_list, list) {
                if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid))
                        continue;
                if (bss->pub.channel != new->pub.channel)
@@ -676,7 +686,7 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *dev,
 
 /* Returned bss is reference counted and must be cleaned up appropriately. */
 static struct cfg80211_internal_bss *
-cfg80211_bss_update(struct cfg80211_registered_device *dev,
+cfg80211_bss_update(struct cfg80211_registered_device *rdev,
                    struct cfg80211_internal_bss *tmp,
                    bool signal_valid)
 {
@@ -687,14 +697,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
 
        tmp->ts = jiffies;
 
-       spin_lock_bh(&dev->bss_lock);
+       spin_lock_bh(&rdev->bss_lock);
 
        if (WARN_ON(!rcu_access_pointer(tmp->pub.ies))) {
-               spin_unlock_bh(&dev->bss_lock);
+               spin_unlock_bh(&rdev->bss_lock);
                return NULL;
        }
 
-       found = rb_find_bss(dev, tmp, BSS_CMP_REGULAR);
+       found = rb_find_bss(rdev, tmp, BSS_CMP_REGULAR);
 
        if (found) {
                /* Update IEs */
@@ -781,7 +791,7 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
                 * is allocated on the stack since it's not needed in the
                 * more common case of an update
                 */
-               new = kzalloc(sizeof(*new) + dev->wiphy.bss_priv_size,
+               new = kzalloc(sizeof(*new) + rdev->wiphy.bss_priv_size,
                              GFP_ATOMIC);
                if (!new) {
                        ies = (void *)rcu_dereference(tmp->pub.beacon_ies);
@@ -797,9 +807,9 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
                INIT_LIST_HEAD(&new->hidden_list);
 
                if (rcu_access_pointer(tmp->pub.proberesp_ies)) {
-                       hidden = rb_find_bss(dev, tmp, BSS_CMP_HIDE_ZLEN);
+                       hidden = rb_find_bss(rdev, tmp, BSS_CMP_HIDE_ZLEN);
                        if (!hidden)
-                               hidden = rb_find_bss(dev, tmp,
+                               hidden = rb_find_bss(rdev, tmp,
                                                     BSS_CMP_HIDE_NUL);
                        if (hidden) {
                                new->pub.hidden_beacon_bss = &hidden->pub;
@@ -816,24 +826,24 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
                         * expensive search for any probe responses that should
                         * be grouped with this beacon for updates ...
                         */
-                       if (!cfg80211_combine_bsses(dev, new)) {
+                       if (!cfg80211_combine_bsses(rdev, new)) {
                                kfree(new);
                                goto drop;
                        }
                }
 
-               list_add_tail(&new->list, &dev->bss_list);
-               rb_insert_bss(dev, new);
+               list_add_tail(&new->list, &rdev->bss_list);
+               rb_insert_bss(rdev, new);
                found = new;
        }
 
-       dev->bss_generation++;
-       bss_ref_get(dev, found);
-       spin_unlock_bh(&dev->bss_lock);
+       rdev->bss_generation++;
+       bss_ref_get(rdev, found);
+       spin_unlock_bh(&rdev->bss_lock);
 
        return found;
  drop:
-       spin_unlock_bh(&dev->bss_lock);
+       spin_unlock_bh(&rdev->bss_lock);
        return NULL;
 }
 
@@ -881,6 +891,7 @@ cfg80211_inform_bss_width(struct wiphy *wiphy,
        struct cfg80211_bss_ies *ies;
        struct ieee80211_channel *channel;
        struct cfg80211_internal_bss tmp = {}, *res;
+       bool signal_valid;
 
        if (WARN_ON(!wiphy))
                return NULL;
@@ -917,8 +928,9 @@ cfg80211_inform_bss_width(struct wiphy *wiphy,
        rcu_assign_pointer(tmp.pub.beacon_ies, ies);
        rcu_assign_pointer(tmp.pub.ies, ies);
 
-       res = cfg80211_bss_update(wiphy_to_dev(wiphy), &tmp,
-                                 rx_channel == channel);
+       signal_valid = abs(rx_channel->center_freq - channel->center_freq) <=
+               wiphy->max_adj_channel_rssi_comp;
+       res = cfg80211_bss_update(wiphy_to_rdev(wiphy), &tmp, signal_valid);
        if (!res)
                return NULL;
 
@@ -942,6 +954,7 @@ cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
        struct cfg80211_internal_bss tmp = {}, *res;
        struct cfg80211_bss_ies *ies;
        struct ieee80211_channel *channel;
+       bool signal_valid;
        size_t ielen = len - offsetof(struct ieee80211_mgmt,
                                      u.probe_resp.variable);
 
@@ -989,8 +1002,9 @@ cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
        tmp.pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
        tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
 
-       res = cfg80211_bss_update(wiphy_to_dev(wiphy), &tmp,
-                                 rx_channel == channel);
+       signal_valid = abs(rx_channel->center_freq - channel->center_freq) <=
+               wiphy->max_adj_channel_rssi_comp;
+       res = cfg80211_bss_update(wiphy_to_rdev(wiphy), &tmp, signal_valid);
        if (!res)
                return NULL;
 
@@ -1005,7 +1019,7 @@ EXPORT_SYMBOL(cfg80211_inform_bss_width_frame);
 
 void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
 {
-       struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct cfg80211_internal_bss *bss;
 
        if (!pub)
@@ -1013,15 +1027,15 @@ void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
 
        bss = container_of(pub, struct cfg80211_internal_bss, pub);
 
-       spin_lock_bh(&dev->bss_lock);
-       bss_ref_get(dev, bss);
-       spin_unlock_bh(&dev->bss_lock);
+       spin_lock_bh(&rdev->bss_lock);
+       bss_ref_get(rdev, bss);
+       spin_unlock_bh(&rdev->bss_lock);
 }
 EXPORT_SYMBOL(cfg80211_ref_bss);
 
 void cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
 {
-       struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct cfg80211_internal_bss *bss;
 
        if (!pub)
@@ -1029,15 +1043,15 @@ void cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
 
        bss = container_of(pub, struct cfg80211_internal_bss, pub);
 
-       spin_lock_bh(&dev->bss_lock);
-       bss_ref_put(dev, bss);
-       spin_unlock_bh(&dev->bss_lock);
+       spin_lock_bh(&rdev->bss_lock);
+       bss_ref_put(rdev, bss);
+       spin_unlock_bh(&rdev->bss_lock);
 }
 EXPORT_SYMBOL(cfg80211_put_bss);
 
 void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
 {
-       struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct cfg80211_internal_bss *bss;
 
        if (WARN_ON(!pub))
@@ -1045,12 +1059,12 @@ void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
 
        bss = container_of(pub, struct cfg80211_internal_bss, pub);
 
-       spin_lock_bh(&dev->bss_lock);
+       spin_lock_bh(&rdev->bss_lock);
        if (!list_empty(&bss->list)) {
-               if (__cfg80211_unlink_bss(dev, bss))
-                       dev->bss_generation++;
+               if (__cfg80211_unlink_bss(rdev, bss))
+                       rdev->bss_generation++;
        }
-       spin_unlock_bh(&dev->bss_lock);
+       spin_unlock_bh(&rdev->bss_lock);
 }
 EXPORT_SYMBOL(cfg80211_unlink_bss);
 
@@ -1067,7 +1081,7 @@ cfg80211_get_dev_from_ifindex(struct net *net, int ifindex)
        if (!dev)
                return ERR_PTR(-ENODEV);
        if (dev->ieee80211_ptr)
-               rdev = wiphy_to_dev(dev->ieee80211_ptr->wiphy);
+               rdev = wiphy_to_rdev(dev->ieee80211_ptr->wiphy);
        else
                rdev = ERR_PTR(-ENODEV);
        dev_put(dev);
@@ -1147,7 +1161,11 @@ int cfg80211_wext_siwscan(struct net_device *dev,
                                int k;
                                int wiphy_freq = wiphy->bands[band]->channels[j].center_freq;
                                for (k = 0; k < wreq->num_channels; k++) {
-                                       int wext_freq = cfg80211_wext_freq(wiphy, &wreq->channel_list[k]);
+                                       struct iw_freq *freq =
+                                               &wreq->channel_list[k];
+                                       int wext_freq =
+                                               cfg80211_wext_freq(freq);
+
                                        if (wext_freq == wiphy_freq)
                                                goto wext_freq_found;
                                }
@@ -1459,7 +1477,7 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
 }
 
 
-static int ieee80211_scan_results(struct cfg80211_registered_device *dev,
+static int ieee80211_scan_results(struct cfg80211_registered_device *rdev,
                                  struct iw_request_info *info,
                                  char *buf, size_t len)
 {
@@ -1467,18 +1485,18 @@ static int ieee80211_scan_results(struct cfg80211_registered_device *dev,
        char *end_buf = buf + len;
        struct cfg80211_internal_bss *bss;
 
-       spin_lock_bh(&dev->bss_lock);
-       cfg80211_bss_expire(dev);
+       spin_lock_bh(&rdev->bss_lock);
+       cfg80211_bss_expire(rdev);
 
-       list_for_each_entry(bss, &dev->bss_list, list) {
+       list_for_each_entry(bss, &rdev->bss_list, list) {
                if (buf + len - current_ev <= IW_EV_ADDR_LEN) {
-                       spin_unlock_bh(&dev->bss_lock);
+                       spin_unlock_bh(&rdev->bss_lock);
                        return -E2BIG;
                }
-               current_ev = ieee80211_bss(&dev->wiphy, info, bss,
+               current_ev = ieee80211_bss(&rdev->wiphy, info, bss,
                                           current_ev, end_buf);
        }
-       spin_unlock_bh(&dev->bss_lock);
+       spin_unlock_bh(&rdev->bss_lock);
        return current_ev - buf;
 }
 
index acdcb4a81817b7c78e8e721ff632284b9b806fa9..8bbeeb302216223a260085634bf6b5627c209783 100644 (file)
@@ -59,7 +59,7 @@ static void cfg80211_sme_free(struct wireless_dev *wdev)
 
 static int cfg80211_conn_scan(struct wireless_dev *wdev)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct cfg80211_scan_request *request;
        int n_channels, err;
 
@@ -130,7 +130,7 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
 
 static int cfg80211_conn_do_work(struct wireless_dev *wdev)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct cfg80211_connect_params *params;
        struct cfg80211_assoc_request req = {};
        int err;
@@ -149,7 +149,8 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
        case CFG80211_CONN_SCAN_AGAIN:
                return cfg80211_conn_scan(wdev);
        case CFG80211_CONN_AUTHENTICATE_NEXT:
-               BUG_ON(!rdev->ops->auth);
+               if (WARN_ON(!rdev->ops->auth))
+                       return -EOPNOTSUPP;
                wdev->conn->state = CFG80211_CONN_AUTHENTICATING;
                return cfg80211_mlme_auth(rdev, wdev->netdev,
                                          params->channel, params->auth_type,
@@ -161,7 +162,8 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
        case CFG80211_CONN_AUTH_FAILED:
                return -ENOTCONN;
        case CFG80211_CONN_ASSOCIATE_NEXT:
-               BUG_ON(!rdev->ops->assoc);
+               if (WARN_ON(!rdev->ops->assoc))
+                       return -EOPNOTSUPP;
                wdev->conn->state = CFG80211_CONN_ASSOCIATING;
                if (wdev->conn->prev_bssid_valid)
                        req.prev_bssid = wdev->conn->prev_bssid;
@@ -234,7 +236,6 @@ void cfg80211_conn_work(struct work_struct *work)
                                        NULL, 0, NULL, 0,
                                        WLAN_STATUS_UNSPECIFIED_FAILURE,
                                        false, NULL);
-                       cfg80211_sme_free(wdev);
                }
                wdev_unlock(wdev);
        }
@@ -245,7 +246,7 @@ void cfg80211_conn_work(struct work_struct *work)
 /* Returned bss is reference counted and must be cleaned up appropriately. */
 static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct cfg80211_bss *bss;
        u16 capa = WLAN_CAPABILITY_ESS;
 
@@ -275,7 +276,7 @@ static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev)
 static void __cfg80211_sme_scan_done(struct net_device *dev)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct cfg80211_bss *bss;
 
        ASSERT_WDEV_LOCK(wdev);
@@ -306,7 +307,7 @@ void cfg80211_sme_scan_done(struct net_device *dev)
 void cfg80211_sme_rx_auth(struct wireless_dev *wdev, const u8 *buf, size_t len)
 {
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
        u16 status_code = le16_to_cpu(mgmt->u.auth.status_code);
 
@@ -352,7 +353,7 @@ void cfg80211_sme_rx_auth(struct wireless_dev *wdev, const u8 *buf, size_t len)
 
 bool cfg80211_sme_rx_assoc_resp(struct wireless_dev *wdev, u16 status)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 
        if (!wdev->conn)
                return false;
@@ -386,7 +387,7 @@ void cfg80211_sme_deauth(struct wireless_dev *wdev)
 
 void cfg80211_sme_auth_timeout(struct wireless_dev *wdev)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 
        if (!wdev->conn)
                return;
@@ -397,7 +398,7 @@ void cfg80211_sme_auth_timeout(struct wireless_dev *wdev)
 
 void cfg80211_sme_disassoc(struct wireless_dev *wdev)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 
        if (!wdev->conn)
                return;
@@ -408,7 +409,7 @@ void cfg80211_sme_disassoc(struct wireless_dev *wdev)
 
 void cfg80211_sme_assoc_timeout(struct wireless_dev *wdev)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 
        if (!wdev->conn)
                return;
@@ -421,7 +422,7 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
                                struct cfg80211_connect_params *connect,
                                const u8 *prev_bssid)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct cfg80211_bss *bss;
        int err;
 
@@ -468,7 +469,7 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
        }
 
        wdev->conn->params.ssid = wdev->ssid;
-       wdev->conn->params.ssid_len = connect->ssid_len;
+       wdev->conn->params.ssid_len = wdev->ssid_len;
 
        /* see if we have the bss already */
        bss = cfg80211_get_conn_bss(wdev);
@@ -480,7 +481,6 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
 
        /* we're good if we have a matching bss struct */
        if (bss) {
-               wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT;
                err = cfg80211_conn_do_work(wdev);
                cfg80211_put_bss(wdev->wiphy, bss);
        } else {
@@ -506,7 +506,7 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
 
 static int cfg80211_sme_disconnect(struct wireless_dev *wdev, u16 reason)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        int err;
 
        if (!wdev->conn)
@@ -594,7 +594,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
                return;
        }
 
-       nl80211_send_connect_result(wiphy_to_dev(wdev->wiphy), dev,
+       nl80211_send_connect_result(wiphy_to_rdev(wdev->wiphy), dev,
                                    bssid, req_ie, req_ie_len,
                                    resp_ie, resp_ie_len,
                                    status, GFP_KERNEL);
@@ -625,7 +625,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
 #endif
 
        if (!bss && (status == WLAN_STATUS_SUCCESS)) {
-               WARN_ON_ONCE(!wiphy_to_dev(wdev->wiphy)->ops->connect);
+               WARN_ON_ONCE(!wiphy_to_rdev(wdev->wiphy)->ops->connect);
                bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
                                       wdev->ssid, wdev->ssid_len,
                                       WLAN_CAPABILITY_ESS,
@@ -648,6 +648,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
                        cfg80211_unhold_bss(bss_from_pub(bss));
                        cfg80211_put_bss(wdev->wiphy, bss);
                }
+               cfg80211_sme_free(wdev);
                return;
        }
 
@@ -687,7 +688,7 @@ void cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
                             u16 status, gfp_t gfp)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct cfg80211_event *ev;
        unsigned long flags;
 
@@ -742,7 +743,8 @@ void __cfg80211_roamed(struct wireless_dev *wdev,
        cfg80211_hold_bss(bss_from_pub(bss));
        wdev->current_bss = bss_from_pub(bss);
 
-       nl80211_send_roamed(wiphy_to_dev(wdev->wiphy), wdev->netdev, bss->bssid,
+       nl80211_send_roamed(wiphy_to_rdev(wdev->wiphy),
+                           wdev->netdev, bss->bssid,
                            req_ie, req_ie_len, resp_ie, resp_ie_len,
                            GFP_KERNEL);
 
@@ -801,7 +803,7 @@ void cfg80211_roamed_bss(struct net_device *dev,
                         size_t resp_ie_len, gfp_t gfp)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct cfg80211_event *ev;
        unsigned long flags;
 
@@ -834,7 +836,7 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
                             size_t ie_len, u16 reason, bool from_ap)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        int i;
 #ifdef CONFIG_CFG80211_WEXT
        union iwreq_data wrqu;
@@ -877,10 +879,10 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
 }
 
 void cfg80211_disconnected(struct net_device *dev, u16 reason,
-                          u8 *ie, size_t ie_len, gfp_t gfp)
+                          const u8 *ie, size_t ie_len, gfp_t gfp)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct cfg80211_event *ev;
        unsigned long flags;
 
index aabccf13e07b6860ef92ddc637a7879a8f961aab..560ed77084e92b52cae0f299ca383eef240a42e6 100644 (file)
@@ -1876,29 +1876,33 @@ TRACE_EVENT(rdev_channel_switch,
                WIPHY_ENTRY
                NETDEV_ENTRY
                CHAN_DEF_ENTRY
-               __field(u16, counter_offset_beacon)
-               __field(u16, counter_offset_presp)
                __field(bool, radar_required)
                __field(bool, block_tx)
                __field(u8, count)
+               __dynamic_array(u16, bcn_ofs, params->n_counter_offsets_beacon)
+               __dynamic_array(u16, pres_ofs, params->n_counter_offsets_presp)
        ),
        TP_fast_assign(
                WIPHY_ASSIGN;
                NETDEV_ASSIGN;
                CHAN_DEF_ASSIGN(&params->chandef);
-               __entry->counter_offset_beacon = params->counter_offset_beacon;
-               __entry->counter_offset_presp = params->counter_offset_presp;
                __entry->radar_required = params->radar_required;
                __entry->block_tx = params->block_tx;
                __entry->count = params->count;
+               memcpy(__get_dynamic_array(bcn_ofs),
+                      params->counter_offsets_beacon,
+                      params->n_counter_offsets_beacon * sizeof(u16));
+
+               /* probe response offsets are optional */
+               if (params->n_counter_offsets_presp)
+                       memcpy(__get_dynamic_array(pres_ofs),
+                              params->counter_offsets_presp,
+                              params->n_counter_offsets_presp * sizeof(u16));
        ),
        TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT
-                 ", block_tx: %d, count: %u, radar_required: %d"
-                 ", counter offsets (beacon/presp): %u/%u",
+                 ", block_tx: %d, count: %u, radar_required: %d",
                  WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG,
-                 __entry->block_tx, __entry->count, __entry->radar_required,
-                 __entry->counter_offset_beacon,
-                 __entry->counter_offset_presp)
+                 __entry->block_tx, __entry->count, __entry->radar_required)
 );
 
 TRACE_EVENT(rdev_set_qos_map,
@@ -1919,6 +1923,24 @@ TRACE_EVENT(rdev_set_qos_map,
                  WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->num_des)
 );
 
+TRACE_EVENT(rdev_set_ap_chanwidth,
+       TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+                struct cfg80211_chan_def *chandef),
+       TP_ARGS(wiphy, netdev, chandef),
+       TP_STRUCT__entry(
+               WIPHY_ENTRY
+               NETDEV_ENTRY
+               CHAN_DEF_ENTRY
+       ),
+       TP_fast_assign(
+               WIPHY_ASSIGN;
+               NETDEV_ASSIGN;
+               CHAN_DEF_ASSIGN(chandef);
+       ),
+       TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT,
+                 WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG)
+);
+
 /*************************************************************
  *          cfg80211 exported functions traces              *
  *************************************************************/
@@ -2193,18 +2215,21 @@ TRACE_EVENT(cfg80211_cqm_rssi_notify,
 );
 
 TRACE_EVENT(cfg80211_reg_can_beacon,
-       TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef),
-       TP_ARGS(wiphy, chandef),
+       TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef,
+                enum nl80211_iftype iftype),
+       TP_ARGS(wiphy, chandef, iftype),
        TP_STRUCT__entry(
                WIPHY_ENTRY
                CHAN_DEF_ENTRY
+               __field(enum nl80211_iftype, iftype)
        ),
        TP_fast_assign(
                WIPHY_ASSIGN;
                CHAN_DEF_ASSIGN(chandef);
+               __entry->iftype = iftype;
        ),
-       TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT,
-                 WIPHY_PR_ARG, CHAN_DEF_PR_ARG)
+       TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d",
+                 WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->iftype)
 );
 
 TRACE_EVENT(cfg80211_chandef_dfs_required,
@@ -2615,6 +2640,21 @@ TRACE_EVENT(cfg80211_ft_event,
                  WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(target_ap))
 );
 
+TRACE_EVENT(cfg80211_stop_iface,
+       TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
+       TP_ARGS(wiphy, wdev),
+       TP_STRUCT__entry(
+               WIPHY_ENTRY
+               WDEV_ENTRY
+       ),
+       TP_fast_assign(
+               WIPHY_ASSIGN;
+               WDEV_ASSIGN;
+       ),
+       TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT,
+                 WIPHY_PR_ARG, WDEV_PR_ARG)
+);
+
 #endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */
 
 #undef TRACE_INCLUDE_PATH
index e5872ff2c27ca8989ca6da7cfdf4d7041c29a72e..728f1c0dc70dbdb7c85558b84b83dc9b66e4083f 100644 (file)
@@ -476,7 +476,8 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
 EXPORT_SYMBOL(ieee80211_data_to_8023);
 
 int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
-                            enum nl80211_iftype iftype, u8 *bssid, bool qos)
+                            enum nl80211_iftype iftype,
+                            const u8 *bssid, bool qos)
 {
        struct ieee80211_hdr hdr;
        u16 hdrlen, ethertype;
@@ -770,7 +771,7 @@ EXPORT_SYMBOL(ieee80211_bss_get_ie);
 
 void cfg80211_upload_connect_keys(struct wireless_dev *wdev)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct net_device *dev = wdev->netdev;
        int i;
 
@@ -839,6 +840,9 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev)
                        __cfg80211_ibss_joined(wdev->netdev, ev->ij.bssid,
                                               ev->ij.channel);
                        break;
+               case EVENT_STOPPED:
+                       __cfg80211_leave(wiphy_to_rdev(wdev->wiphy), wdev);
+                       break;
                }
                wdev_unlock(wdev);
 
@@ -888,11 +892,6 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
                return -EBUSY;
 
        if (ntype != otype && netif_running(dev)) {
-               err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr,
-                                                   ntype);
-               if (err)
-                       return err;
-
                dev->ieee80211_ptr->use_4addr = false;
                dev->ieee80211_ptr->mesh_id_up_len = 0;
                wdev_lock(dev->ieee80211_ptr);
@@ -1268,6 +1267,120 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
        return res;
 }
 
+int cfg80211_iter_combinations(struct wiphy *wiphy,
+                              const int num_different_channels,
+                              const u8 radar_detect,
+                              const int iftype_num[NUM_NL80211_IFTYPES],
+                              void (*iter)(const struct ieee80211_iface_combination *c,
+                                           void *data),
+                              void *data)
+{
+       const struct ieee80211_regdomain *regdom;
+       enum nl80211_dfs_regions region = 0;
+       int i, j, iftype;
+       int num_interfaces = 0;
+       u32 used_iftypes = 0;
+
+       if (radar_detect) {
+               rcu_read_lock();
+               regdom = rcu_dereference(cfg80211_regdomain);
+               if (regdom)
+                       region = regdom->dfs_region;
+               rcu_read_unlock();
+       }
+
+       for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
+               num_interfaces += iftype_num[iftype];
+               if (iftype_num[iftype] > 0 &&
+                   !(wiphy->software_iftypes & BIT(iftype)))
+                       used_iftypes |= BIT(iftype);
+       }
+
+       for (i = 0; i < wiphy->n_iface_combinations; i++) {
+               const struct ieee80211_iface_combination *c;
+               struct ieee80211_iface_limit *limits;
+               u32 all_iftypes = 0;
+
+               c = &wiphy->iface_combinations[i];
+
+               if (num_interfaces > c->max_interfaces)
+                       continue;
+               if (num_different_channels > c->num_different_channels)
+                       continue;
+
+               limits = kmemdup(c->limits, sizeof(limits[0]) * c->n_limits,
+                                GFP_KERNEL);
+               if (!limits)
+                       return -ENOMEM;
+
+               for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
+                       if (wiphy->software_iftypes & BIT(iftype))
+                               continue;
+                       for (j = 0; j < c->n_limits; j++) {
+                               all_iftypes |= limits[j].types;
+                               if (!(limits[j].types & BIT(iftype)))
+                                       continue;
+                               if (limits[j].max < iftype_num[iftype])
+                                       goto cont;
+                               limits[j].max -= iftype_num[iftype];
+                       }
+               }
+
+               if (radar_detect != (c->radar_detect_widths & radar_detect))
+                       goto cont;
+
+               if (radar_detect && c->radar_detect_regions &&
+                   !(c->radar_detect_regions & BIT(region)))
+                       goto cont;
+
+               /* Finally check that all iftypes that we're currently
+                * using are actually part of this combination. If they
+                * aren't then we can't use this combination and have
+                * to continue to the next.
+                */
+               if ((all_iftypes & used_iftypes) != used_iftypes)
+                       goto cont;
+
+               /* This combination covered all interface types and
+                * supported the requested numbers, so we're good.
+                */
+
+               (*iter)(c, data);
+ cont:
+               kfree(limits);
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(cfg80211_iter_combinations);
+
+static void
+cfg80211_iter_sum_ifcombs(const struct ieee80211_iface_combination *c,
+                         void *data)
+{
+       int *num = data;
+       (*num)++;
+}
+
+int cfg80211_check_combinations(struct wiphy *wiphy,
+                               const int num_different_channels,
+                               const u8 radar_detect,
+                               const int iftype_num[NUM_NL80211_IFTYPES])
+{
+       int err, num = 0;
+
+       err = cfg80211_iter_combinations(wiphy, num_different_channels,
+                                        radar_detect, iftype_num,
+                                        cfg80211_iter_sum_ifcombs, &num);
+       if (err)
+               return err;
+       if (num == 0)
+               return -EBUSY;
+
+       return 0;
+}
+EXPORT_SYMBOL(cfg80211_check_combinations);
+
 int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
                                 struct wireless_dev *wdev,
                                 enum nl80211_iftype iftype,
@@ -1276,7 +1389,6 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
                                 u8 radar_detect)
 {
        struct wireless_dev *wdev_iter;
-       u32 used_iftypes = BIT(iftype);
        int num[NUM_NL80211_IFTYPES];
        struct ieee80211_channel
                        *used_channels[CFG80211_MAX_NUM_DIFFERENT_CHANNELS];
@@ -1284,7 +1396,7 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
        enum cfg80211_chan_mode chmode;
        int num_different_channels = 0;
        int total = 1;
-       int i, j;
+       int i;
 
        ASSERT_RTNL();
 
@@ -1306,6 +1418,11 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
 
        num[iftype] = 1;
 
+       /* TODO: We'll probably not need this anymore, since this
+        * should only be called with CHAN_MODE_UNDEFINED. There are
+        * still a couple of pending calls where other chanmodes are
+        * used, but we should get rid of them.
+        */
        switch (chanmode) {
        case CHAN_MODE_UNDEFINED:
                break;
@@ -1369,65 +1486,13 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
 
                num[wdev_iter->iftype]++;
                total++;
-               used_iftypes |= BIT(wdev_iter->iftype);
        }
 
        if (total == 1 && !radar_detect)
                return 0;
 
-       for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) {
-               const struct ieee80211_iface_combination *c;
-               struct ieee80211_iface_limit *limits;
-               u32 all_iftypes = 0;
-
-               c = &rdev->wiphy.iface_combinations[i];
-
-               if (total > c->max_interfaces)
-                       continue;
-               if (num_different_channels > c->num_different_channels)
-                       continue;
-
-               limits = kmemdup(c->limits, sizeof(limits[0]) * c->n_limits,
-                                GFP_KERNEL);
-               if (!limits)
-                       return -ENOMEM;
-
-               for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
-                       if (rdev->wiphy.software_iftypes & BIT(iftype))
-                               continue;
-                       for (j = 0; j < c->n_limits; j++) {
-                               all_iftypes |= limits[j].types;
-                               if (!(limits[j].types & BIT(iftype)))
-                                       continue;
-                               if (limits[j].max < num[iftype])
-                                       goto cont;
-                               limits[j].max -= num[iftype];
-                       }
-               }
-
-               if (radar_detect && !(c->radar_detect_widths & radar_detect))
-                       goto cont;
-
-               /*
-                * Finally check that all iftypes that we're currently
-                * using are actually part of this combination. If they
-                * aren't then we can't use this combination and have
-                * to continue to the next.
-                */
-               if ((all_iftypes & used_iftypes) != used_iftypes)
-                       goto cont;
-
-               /*
-                * This combination covered all interface types and
-                * supported the requested numbers, so we're good.
-                */
-               kfree(limits);
-               return 0;
- cont:
-               kfree(limits);
-       }
-
-       return -EBUSY;
+       return cfg80211_check_combinations(&rdev->wiphy, num_different_channels,
+                                          radar_detect, num);
 }
 
 int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
@@ -1481,6 +1546,24 @@ unsigned int ieee80211_get_num_supported_channels(struct wiphy *wiphy)
 }
 EXPORT_SYMBOL(ieee80211_get_num_supported_channels);
 
+int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
+                        struct station_info *sinfo)
+{
+       struct cfg80211_registered_device *rdev;
+       struct wireless_dev *wdev;
+
+       wdev = dev->ieee80211_ptr;
+       if (!wdev)
+               return -EOPNOTSUPP;
+
+       rdev = wiphy_to_rdev(wdev->wiphy);
+       if (!rdev->ops->get_station)
+               return -EOPNOTSUPP;
+
+       return rdev_get_station(rdev, dev, mac_addr, sinfo);
+}
+EXPORT_SYMBOL(cfg80211_get_station);
+
 /* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
 /* Ethernet-II snap header (RFC1042 for most EtherTypes) */
 const unsigned char rfc1042_header[] __aligned(2) =
index 5661a54ac7ee4ed1c1865d855e1b2681c67d07cc..11120bb14162505043579628bed2ad131ba41f7d 100644 (file)
@@ -73,7 +73,7 @@ int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
        struct vif_params vifparams;
        enum nl80211_iftype type;
 
-       rdev = wiphy_to_dev(wdev->wiphy);
+       rdev = wiphy_to_rdev(wdev->wiphy);
 
        switch (*mode) {
        case IW_MODE_INFRA:
@@ -253,12 +253,12 @@ EXPORT_SYMBOL_GPL(cfg80211_wext_giwrange);
 
 /**
  * cfg80211_wext_freq - get wext frequency for non-"auto"
- * @wiphy: the wiphy
+ * @dev: the net device
  * @freq: the wext freq encoding
  *
  * Returns a frequency, or a negative error code, or 0 for auto.
  */
-int cfg80211_wext_freq(struct wiphy *wiphy, struct iw_freq *freq)
+int cfg80211_wext_freq(struct iw_freq *freq)
 {
        /*
         * Parse frequency - return 0 for auto and
@@ -286,7 +286,7 @@ int cfg80211_wext_siwrts(struct net_device *dev,
                         struct iw_param *rts, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        u32 orts = wdev->wiphy->rts_threshold;
        int err;
 
@@ -324,7 +324,7 @@ int cfg80211_wext_siwfrag(struct net_device *dev,
                          struct iw_param *frag, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        u32 ofrag = wdev->wiphy->frag_threshold;
        int err;
 
@@ -364,7 +364,7 @@ static int cfg80211_wext_siwretry(struct net_device *dev,
                                  struct iw_param *retry, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        u32 changed = 0;
        u8 olong = wdev->wiphy->retry_long;
        u8 oshort = wdev->wiphy->retry_short;
@@ -587,7 +587,7 @@ static int cfg80211_wext_siwencode(struct net_device *dev,
                                   struct iw_point *erq, char *keybuf)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        int idx, err;
        bool remove = false;
        struct key_params params;
@@ -647,7 +647,7 @@ static int cfg80211_wext_siwencodeext(struct net_device *dev,
                                      struct iw_point *erq, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct iw_encode_ext *ext = (struct iw_encode_ext *) extra;
        const u8 *addr;
        int idx;
@@ -775,7 +775,7 @@ static int cfg80211_wext_siwfreq(struct net_device *dev,
                                 struct iw_freq *wextfreq, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct cfg80211_chan_def chandef = {
                .width = NL80211_CHAN_WIDTH_20_NOHT,
        };
@@ -787,7 +787,7 @@ static int cfg80211_wext_siwfreq(struct net_device *dev,
        case NL80211_IFTYPE_ADHOC:
                return cfg80211_ibss_wext_siwfreq(dev, info, wextfreq, extra);
        case NL80211_IFTYPE_MONITOR:
-               freq = cfg80211_wext_freq(wdev->wiphy, wextfreq);
+               freq = cfg80211_wext_freq(wextfreq);
                if (freq < 0)
                        return freq;
                if (freq == 0)
@@ -798,7 +798,7 @@ static int cfg80211_wext_siwfreq(struct net_device *dev,
                        return -EINVAL;
                return cfg80211_set_monitor_channel(rdev, &chandef);
        case NL80211_IFTYPE_MESH_POINT:
-               freq = cfg80211_wext_freq(wdev->wiphy, wextfreq);
+               freq = cfg80211_wext_freq(wextfreq);
                if (freq < 0)
                        return freq;
                if (freq == 0)
@@ -818,7 +818,7 @@ static int cfg80211_wext_giwfreq(struct net_device *dev,
                                 struct iw_freq *freq, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct cfg80211_chan_def chandef;
        int ret;
 
@@ -847,7 +847,7 @@ static int cfg80211_wext_siwtxpower(struct net_device *dev,
                                    union iwreq_data *data, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        enum nl80211_tx_power_setting type;
        int dbm = 0;
 
@@ -899,7 +899,7 @@ static int cfg80211_wext_giwtxpower(struct net_device *dev,
                                    union iwreq_data *data, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        int err, val;
 
        if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM)
@@ -1119,7 +1119,7 @@ static int cfg80211_wext_siwpower(struct net_device *dev,
                                  struct iw_param *wrq, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        bool ps = wdev->ps;
        int timeout = wdev->ps_timeout;
        int err;
@@ -1177,7 +1177,7 @@ static int cfg80211_wds_wext_siwap(struct net_device *dev,
                                   struct sockaddr *addr, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        int err;
 
        if (WARN_ON(wdev->iftype != NL80211_IFTYPE_WDS))
@@ -1221,7 +1221,7 @@ static int cfg80211_wext_siwrate(struct net_device *dev,
                                 struct iw_param *rate, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct cfg80211_bitrate_mask mask;
        u32 fixed, maxrate;
        struct ieee80211_supported_band *sband;
@@ -1272,7 +1272,7 @@ static int cfg80211_wext_giwrate(struct net_device *dev,
                                 struct iw_param *rate, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        /* we are under RTNL - globally locked - so can use a static struct */
        static struct station_info sinfo;
        u8 addr[ETH_ALEN];
@@ -1310,7 +1310,7 @@ static int cfg80211_wext_giwrate(struct net_device *dev,
 static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        /* we are under RTNL - globally locked - so can use static structs */
        static struct iw_statistics wstats;
        static struct station_info sinfo;
@@ -1449,7 +1449,7 @@ static int cfg80211_wext_siwpmksa(struct net_device *dev,
                                  struct iw_point *data, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct cfg80211_pmksa cfg_pmksa;
        struct iw_pmksa *pmksa = (struct iw_pmksa *)extra;
 
index 5d766b0118e81969ff4f24c59b88bffdaa6496ff..ebcacca2f731941123efb22c8067a6c34aaa9ea1 100644 (file)
@@ -50,7 +50,7 @@ int cfg80211_wext_siwgenie(struct net_device *dev,
                           struct iw_point *data, char *extra);
 
 
-int cfg80211_wext_freq(struct wiphy *wiphy, struct iw_freq *freq);
+int cfg80211_wext_freq(struct iw_freq *freq);
 
 
 extern const struct iw_handler_def cfg80211_wext_handler;
index 86c331a65664a77bfe6c083224b7eae2c91f5b9c..c7e5c8eb4f24708a27d90ae298a85c825ee81e38 100644 (file)
@@ -67,7 +67,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
                              struct iw_freq *wextfreq, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct ieee80211_channel *chan = NULL;
        int err, freq;
 
@@ -75,7 +75,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
        if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
                return -EINVAL;
 
-       freq = cfg80211_wext_freq(wdev->wiphy, wextfreq);
+       freq = cfg80211_wext_freq(wextfreq);
        if (freq < 0)
                return freq;
 
@@ -169,7 +169,7 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev,
                               struct iw_point *data, char *ssid)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        size_t len = data->length;
        int err;
 
@@ -260,7 +260,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev,
                            struct sockaddr *ap_addr, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        u8 *bssid = ap_addr->sa_data;
        int err;
 
@@ -333,7 +333,7 @@ int cfg80211_wext_siwgenie(struct net_device *dev,
                           struct iw_point *data, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        u8 *ie = extra;
        int ie_len = data->length, err;
 
@@ -390,7 +390,7 @@ int cfg80211_wext_siwmlme(struct net_device *dev,
        if (!wdev)
                return -EOPNOTSUPP;
 
-       rdev = wiphy_to_dev(wdev->wiphy);
+       rdev = wiphy_to_rdev(wdev->wiphy);
 
        if (wdev->iftype != NL80211_IFTYPE_STATION)
                return -EINVAL;
index 3bb2cdc13b46e1468743e1130d6c4c25e08921e0..c51e8f7b8653cb167aba13f61c2d7520d615c31c 100644 (file)
@@ -199,6 +199,7 @@ int xfrm_output(struct sk_buff *skb)
 
        return xfrm_output2(skb);
 }
+EXPORT_SYMBOL_GPL(xfrm_output);
 
 int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb)
 {
@@ -213,6 +214,7 @@ int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb)
                return -EAFNOSUPPORT;
        return inner_mode->afinfo->extract_output(x, skb);
 }
+EXPORT_SYMBOL_GPL(xfrm_inner_extract_output);
 
 void xfrm_local_error(struct sk_buff *skb, int mtu)
 {
@@ -233,7 +235,4 @@ void xfrm_local_error(struct sk_buff *skb, int mtu)
        afinfo->local_error(skb, mtu);
        xfrm_state_put_afinfo(afinfo);
 }
-
-EXPORT_SYMBOL_GPL(xfrm_output);
-EXPORT_SYMBOL_GPL(xfrm_inner_extract_output);
 EXPORT_SYMBOL_GPL(xfrm_local_error);
index c08fbd11ceff52ee145316a33758feb30667f02d..a8ef5108e0d86cbc5c411f3db378fde5a0d54f18 100644 (file)
@@ -769,7 +769,7 @@ EXPORT_SYMBOL(xfrm_policy_byid);
 
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
 static inline int
-xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
+xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
 {
        int dir, err = 0;
 
@@ -783,10 +783,7 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi
                                continue;
                        err = security_xfrm_policy_delete(pol->security);
                        if (err) {
-                               xfrm_audit_policy_delete(pol, 0,
-                                                        audit_info->loginuid,
-                                                        audit_info->sessionid,
-                                                        audit_info->secid);
+                               xfrm_audit_policy_delete(pol, 0, task_valid);
                                return err;
                        }
                }
@@ -800,9 +797,7 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi
                                                                pol->security);
                                if (err) {
                                        xfrm_audit_policy_delete(pol, 0,
-                                                       audit_info->loginuid,
-                                                       audit_info->sessionid,
-                                                       audit_info->secid);
+                                                                task_valid);
                                        return err;
                                }
                        }
@@ -812,19 +807,19 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi
 }
 #else
 static inline int
-xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
+xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
 {
        return 0;
 }
 #endif
 
-int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
+int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
 {
        int dir, err = 0, cnt = 0;
 
        write_lock_bh(&net->xfrm.xfrm_policy_lock);
 
-       err = xfrm_policy_flush_secctx_check(net, type, audit_info);
+       err = xfrm_policy_flush_secctx_check(net, type, task_valid);
        if (err)
                goto out;
 
@@ -841,9 +836,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
                        write_unlock_bh(&net->xfrm.xfrm_policy_lock);
                        cnt++;
 
-                       xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
-                                                audit_info->sessionid,
-                                                audit_info->secid);
+                       xfrm_audit_policy_delete(pol, 1, task_valid);
 
                        xfrm_policy_kill(pol);
 
@@ -862,10 +855,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
                                write_unlock_bh(&net->xfrm.xfrm_policy_lock);
                                cnt++;
 
-                               xfrm_audit_policy_delete(pol, 1,
-                                                        audit_info->loginuid,
-                                                        audit_info->sessionid,
-                                                        audit_info->secid);
+                               xfrm_audit_policy_delete(pol, 1, task_valid);
                                xfrm_policy_kill(pol);
 
                                write_lock_bh(&net->xfrm.xfrm_policy_lock);
@@ -2783,21 +2773,19 @@ static struct notifier_block xfrm_dev_notifier = {
 static int __net_init xfrm_statistics_init(struct net *net)
 {
        int rv;
-
-       if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics,
-                         sizeof(struct linux_xfrm_mib),
-                         __alignof__(struct linux_xfrm_mib)) < 0)
+       net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
+       if (!net->mib.xfrm_statistics)
                return -ENOMEM;
        rv = xfrm_proc_init(net);
        if (rv < 0)
-               snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
+               free_percpu(net->mib.xfrm_statistics);
        return rv;
 }
 
 static void xfrm_statistics_fini(struct net *net)
 {
        xfrm_proc_fini(net);
-       snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
+       free_percpu(net->mib.xfrm_statistics);
 }
 #else
 static int __net_init xfrm_statistics_init(struct net *net)
@@ -2862,21 +2850,14 @@ out_byidx:
 
 static void xfrm_policy_fini(struct net *net)
 {
-       struct xfrm_audit audit_info;
        unsigned int sz;
        int dir;
 
        flush_work(&net->xfrm.policy_hash_work);
 #ifdef CONFIG_XFRM_SUB_POLICY
-       audit_info.loginuid = INVALID_UID;
-       audit_info.sessionid = (unsigned int)-1;
-       audit_info.secid = 0;
-       xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, &audit_info);
+       xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
 #endif
-       audit_info.loginuid = INVALID_UID;
-       audit_info.sessionid = (unsigned int)-1;
-       audit_info.secid = 0;
-       xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
+       xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
 
        WARN_ON(!list_empty(&net->xfrm.policy_all));
 
@@ -2991,15 +2972,14 @@ static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
        }
 }
 
-void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
-                          kuid_t auid, unsigned int sessionid, u32 secid)
+void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
 {
        struct audit_buffer *audit_buf;
 
        audit_buf = xfrm_audit_start("SPD-add");
        if (audit_buf == NULL)
                return;
-       xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
+       xfrm_audit_helper_usrinfo(task_valid, audit_buf);
        audit_log_format(audit_buf, " res=%u", result);
        xfrm_audit_common_policyinfo(xp, audit_buf);
        audit_log_end(audit_buf);
@@ -3007,14 +2987,14 @@ void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
 
 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
-                             kuid_t auid, unsigned int sessionid, u32 secid)
+                             bool task_valid)
 {
        struct audit_buffer *audit_buf;
 
        audit_buf = xfrm_audit_start("SPD-delete");
        if (audit_buf == NULL)
                return;
-       xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
+       xfrm_audit_helper_usrinfo(task_valid, audit_buf);
        audit_log_format(audit_buf, " res=%u", result);
        xfrm_audit_common_policyinfo(xp, audit_buf);
        audit_log_end(audit_buf);
index fc5abd0b456f3a3abf8163821f22577b8d700f6f..9c4fbd8935f48e28c3c86d9da443904f55f93ec3 100644 (file)
@@ -54,8 +54,7 @@ static int xfrm_statistics_seq_show(struct seq_file *seq, void *v)
        int i;
        for (i = 0; xfrm_mib_list[i].name; i++)
                seq_printf(seq, "%-24s\t%lu\n", xfrm_mib_list[i].name,
-                          snmp_fold_field((void __percpu **)
-                                          net->mib.xfrm_statistics,
+                          snmp_fold_field(net->mib.xfrm_statistics,
                                           xfrm_mib_list[i].entry));
        return 0;
 }
index 8e9c781a6bbaaba83e4af4a31ac7a07a70dd6c71..0ab54134bb40b84cf825df6035a9acf7cb3cf760 100644 (file)
@@ -463,9 +463,7 @@ expired:
        if (!err)
                km_state_expired(x, 1, 0);
 
-       xfrm_audit_state_delete(x, err ? 0 : 1,
-                               audit_get_loginuid(current),
-                               audit_get_sessionid(current), 0);
+       xfrm_audit_state_delete(x, err ? 0 : 1, true);
 
 out:
        spin_unlock(&x->lock);
@@ -562,7 +560,7 @@ EXPORT_SYMBOL(xfrm_state_delete);
 
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
 static inline int
-xfrm_state_flush_secctx_check(struct net *net, u8 proto, struct xfrm_audit *audit_info)
+xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
 {
        int i, err = 0;
 
@@ -572,10 +570,7 @@ xfrm_state_flush_secctx_check(struct net *net, u8 proto, struct xfrm_audit *audi
                hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
                        if (xfrm_id_proto_match(x->id.proto, proto) &&
                           (err = security_xfrm_state_delete(x)) != 0) {
-                               xfrm_audit_state_delete(x, 0,
-                                                       audit_info->loginuid,
-                                                       audit_info->sessionid,
-                                                       audit_info->secid);
+                               xfrm_audit_state_delete(x, 0, task_valid);
                                return err;
                        }
                }
@@ -585,18 +580,18 @@ xfrm_state_flush_secctx_check(struct net *net, u8 proto, struct xfrm_audit *audi
 }
 #else
 static inline int
-xfrm_state_flush_secctx_check(struct net *net, u8 proto, struct xfrm_audit *audit_info)
+xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
 {
        return 0;
 }
 #endif
 
-int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info)
+int xfrm_state_flush(struct net *net, u8 proto, bool task_valid)
 {
        int i, err = 0, cnt = 0;
 
        spin_lock_bh(&net->xfrm.xfrm_state_lock);
-       err = xfrm_state_flush_secctx_check(net, proto, audit_info);
+       err = xfrm_state_flush_secctx_check(net, proto, task_valid);
        if (err)
                goto out;
 
@@ -612,9 +607,7 @@ restart:
 
                                err = xfrm_state_delete(x);
                                xfrm_audit_state_delete(x, err ? 0 : 1,
-                                                       audit_info->loginuid,
-                                                       audit_info->sessionid,
-                                                       audit_info->secid);
+                                                       task_valid);
                                xfrm_state_put(x);
                                if (!err)
                                        cnt++;
@@ -2128,14 +2121,10 @@ out_bydst:
 
 void xfrm_state_fini(struct net *net)
 {
-       struct xfrm_audit audit_info;
        unsigned int sz;
 
        flush_work(&net->xfrm.state_hash_work);
-       audit_info.loginuid = INVALID_UID;
-       audit_info.sessionid = (unsigned int)-1;
-       audit_info.secid = 0;
-       xfrm_state_flush(net, IPSEC_PROTO_ANY, &audit_info);
+       xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
        flush_work(&net->xfrm.state_gc_work);
 
        WARN_ON(!list_empty(&net->xfrm.state_all));
@@ -2198,30 +2187,28 @@ static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
        }
 }
 
-void xfrm_audit_state_add(struct xfrm_state *x, int result,
-                         kuid_t auid, unsigned int sessionid, u32 secid)
+void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid)
 {
        struct audit_buffer *audit_buf;
 
        audit_buf = xfrm_audit_start("SAD-add");
        if (audit_buf == NULL)
                return;
-       xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
+       xfrm_audit_helper_usrinfo(task_valid, audit_buf);
        xfrm_audit_helper_sainfo(x, audit_buf);
        audit_log_format(audit_buf, " res=%u", result);
        audit_log_end(audit_buf);
 }
 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
 
-void xfrm_audit_state_delete(struct xfrm_state *x, int result,
-                            kuid_t auid, unsigned int sessionid, u32 secid)
+void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid)
 {
        struct audit_buffer *audit_buf;
 
        audit_buf = xfrm_audit_start("SAD-delete");
        if (audit_buf == NULL)
                return;
-       xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
+       xfrm_audit_helper_usrinfo(task_valid, audit_buf);
        xfrm_audit_helper_sainfo(x, audit_buf);
        audit_log_format(audit_buf, " res=%u", result);
        audit_log_end(audit_buf);
index 8f131c10a6f3d6793c6d0a049108ab66ccaa8664..fd9a16a6d1de36faad11b3a8a4676cac61969f37 100644 (file)
@@ -597,9 +597,6 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct xfrm_state *x;
        int err;
        struct km_event c;
-       kuid_t loginuid = audit_get_loginuid(current);
-       unsigned int sessionid = audit_get_sessionid(current);
-       u32 sid;
 
        err = verify_newsa_info(p, attrs);
        if (err)
@@ -615,8 +612,7 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
        else
                err = xfrm_state_update(x);
 
-       security_task_getsecid(current, &sid);
-       xfrm_audit_state_add(x, err ? 0 : 1, loginuid, sessionid, sid);
+       xfrm_audit_state_add(x, err ? 0 : 1, true);
 
        if (err < 0) {
                x->km.state = XFRM_STATE_DEAD;
@@ -676,9 +672,6 @@ static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
        int err = -ESRCH;
        struct km_event c;
        struct xfrm_usersa_id *p = nlmsg_data(nlh);
-       kuid_t loginuid = audit_get_loginuid(current);
-       unsigned int sessionid = audit_get_sessionid(current);
-       u32 sid;
 
        x = xfrm_user_state_lookup(net, p, attrs, &err);
        if (x == NULL)
@@ -703,8 +696,7 @@ static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
        km_state_notify(x, &c);
 
 out:
-       security_task_getsecid(current, &sid);
-       xfrm_audit_state_delete(x, err ? 0 : 1, loginuid, sessionid, sid);
+       xfrm_audit_state_delete(x, err ? 0 : 1, true);
        xfrm_state_put(x);
        return err;
 }
@@ -1414,9 +1406,6 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct km_event c;
        int err;
        int excl;
-       kuid_t loginuid = audit_get_loginuid(current);
-       unsigned int sessionid = audit_get_sessionid(current);
-       u32 sid;
 
        err = verify_newpolicy_info(p);
        if (err)
@@ -1435,8 +1424,7 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
         * a type XFRM_MSG_UPDPOLICY - JHS */
        excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
        err = xfrm_policy_insert(p->dir, xp, excl);
-       security_task_getsecid(current, &sid);
-       xfrm_audit_policy_add(xp, err ? 0 : 1, loginuid, sessionid, sid);
+       xfrm_audit_policy_add(xp, err ? 0 : 1, true);
 
        if (err) {
                security_xfrm_policy_free(xp->security);
@@ -1673,13 +1661,7 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
                                            NETLINK_CB(skb).portid);
                }
        } else {
-               kuid_t loginuid = audit_get_loginuid(current);
-               unsigned int sessionid = audit_get_sessionid(current);
-               u32 sid;
-
-               security_task_getsecid(current, &sid);
-               xfrm_audit_policy_delete(xp, err ? 0 : 1, loginuid, sessionid,
-                                        sid);
+               xfrm_audit_policy_delete(xp, err ? 0 : 1, true);
 
                if (err != 0)
                        goto out;
@@ -1704,13 +1686,9 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct net *net = sock_net(skb->sk);
        struct km_event c;
        struct xfrm_usersa_flush *p = nlmsg_data(nlh);
-       struct xfrm_audit audit_info;
        int err;
 
-       audit_info.loginuid = audit_get_loginuid(current);
-       audit_info.sessionid = audit_get_sessionid(current);
-       security_task_getsecid(current, &audit_info.secid);
-       err = xfrm_state_flush(net, p->proto, &audit_info);
+       err = xfrm_state_flush(net, p->proto, true);
        if (err) {
                if (err == -ESRCH) /* empty table */
                        return 0;
@@ -1894,16 +1872,12 @@ static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct km_event c;
        u8 type = XFRM_POLICY_TYPE_MAIN;
        int err;
-       struct xfrm_audit audit_info;
 
        err = copy_from_user_policy_type(&type, attrs);
        if (err)
                return err;
 
-       audit_info.loginuid = audit_get_loginuid(current);
-       audit_info.sessionid = audit_get_sessionid(current);
-       security_task_getsecid(current, &audit_info.secid);
-       err = xfrm_policy_flush(net, type, &audit_info);
+       err = xfrm_policy_flush(net, type, true);
        if (err) {
                if (err == -ESRCH) /* empty table */
                        return 0;
@@ -1969,14 +1943,8 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        err = 0;
        if (up->hard) {
-               kuid_t loginuid = audit_get_loginuid(current);
-               unsigned int sessionid = audit_get_sessionid(current);
-               u32 sid;
-
-               security_task_getsecid(current, &sid);
                xfrm_policy_delete(xp, p->dir);
-               xfrm_audit_policy_delete(xp, 1, loginuid, sessionid, sid);
-
+               xfrm_audit_policy_delete(xp, 1, true);
        } else {
                // reset the timers here?
                WARN(1, "Dont know what to do with soft policy expire\n");
@@ -2012,13 +1980,8 @@ static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
        km_state_expired(x, ue->hard, nlh->nlmsg_pid);
 
        if (ue->hard) {
-               kuid_t loginuid = audit_get_loginuid(current);
-               unsigned int sessionid = audit_get_sessionid(current);
-               u32 sid;
-
-               security_task_getsecid(current, &sid);
                __xfrm_state_delete(x);
-               xfrm_audit_state_delete(x, 1, loginuid, sessionid, sid);
+               xfrm_audit_state_delete(x, 1, true);
        }
        err = 0;
 out:
@@ -2377,7 +2340,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
        link = &xfrm_dispatch[type];
 
        /* All operations require privileges, even GET */
-       if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+       if (!netlink_net_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
index fd8fa9aa7c4edd698430a9cb0647a8d26095a9a2..5b3add31f9f1202610e67e4d4868598e5846e594 100755 (executable)
@@ -25,7 +25,7 @@ cat << EOF
 #define __IGNORE_rmdir         /* unlinkat */
 #define __IGNORE_lchown                /* fchownat */
 #define __IGNORE_access                /* faccessat */
-#define __IGNORE_rename                /* renameat */
+#define __IGNORE_rename                /* renameat2 */
 #define __IGNORE_readlink      /* readlinkat */
 #define __IGNORE_symlink       /* symlinkat */
 #define __IGNORE_utimes                /* futimesat */
@@ -37,6 +37,9 @@ cat << EOF
 #define __IGNORE_lstat64       /* fstatat64 */
 #endif
 
+/* Missing flags argument */
+#define __IGNORE_renameat      /* renameat2 */
+
 /* CLOEXEC flag */
 #define __IGNORE_pipe          /* pipe2 */
 #define __IGNORE_dup2          /* dup3 */
index cc49062acdeecf85259f646df09abe22f1019a5d..1052d4834a44f502bda4f4f1ebe1202ace1608f5 100644 (file)
 #define EM_ARCOMPACT   93
 #endif
 
+#ifndef EM_XTENSA
+#define EM_XTENSA      94
+#endif
+
 #ifndef EM_AARCH64
 #define EM_AARCH64     183
 #endif
@@ -281,6 +285,7 @@ do_file(char const *const fname)
        case EM_AARCH64:
        case EM_MICROBLAZE:
        case EM_MIPS:
+       case EM_XTENSA:
                break;
        }  /* end switch */
 
index 8fb1488a3cd4499ecf8e374dcd47f956945a0ef7..97130f88838bc2ad385b5ccd69bf0dfc51acae95 100644 (file)
@@ -66,7 +66,6 @@ extern int apparmor_initialized __initdata;
 char *aa_split_fqname(char *args, char **ns_name);
 void aa_info_message(const char *str);
 void *__aa_kvmalloc(size_t size, gfp_t flags);
-void kvfree(void *buffer);
 
 static inline void *kvmalloc(size_t size)
 {
index 69689922c491b8a4eeda5d96115df4a49e6a844f..c1827e068454cf992c510fd7f6bc9cbbda67a500 100644 (file)
@@ -104,17 +104,3 @@ void *__aa_kvmalloc(size_t size, gfp_t flags)
        }
        return buffer;
 }
-
-/**
- * kvfree - free an allocation do by kvmalloc
- * @buffer: buffer to free (MAYBE_NULL)
- *
- * Free a buffer allocated by kvmalloc
- */
-void kvfree(void *buffer)
-{
-       if (is_vmalloc_addr(buffer))
-               vfree(buffer);
-       else
-               kfree(buffer);
-}
index 8365909f5f8cfc6f404fe5ce21b936884298d13f..9134dbf70d3ee6898664f895905c8452e89a01c3 100644 (file)
@@ -306,57 +306,138 @@ static int devcgroup_seq_show(struct seq_file *m, void *v)
 }
 
 /**
- * may_access - verifies if a new exception is part of what is allowed
- *             by a dev cgroup based on the default policy +
- *             exceptions. This is used to make sure a child cgroup
- *             won't have more privileges than its parent or to
- *             verify if a certain access is allowed.
- * @dev_cgroup: dev cgroup to be tested against
- * @refex: new exception
- * @behavior: behavior of the exception
+ * match_exception     - iterates the exception list trying to find a complete match
+ * @exceptions: list of exceptions
+ * @type: device type (DEV_BLOCK or DEV_CHAR)
+ * @major: device file major number, ~0 to match all
+ * @minor: device file minor number, ~0 to match all
+ * @access: permission mask (ACC_READ, ACC_WRITE, ACC_MKNOD)
+ *
+ * It is considered a complete match if an exception is found that will
+ * contain the entire range of provided parameters.
+ *
+ * Return: true in case it matches an exception completely
  */
-static bool may_access(struct dev_cgroup *dev_cgroup,
-                      struct dev_exception_item *refex,
-                      enum devcg_behavior behavior)
+static bool match_exception(struct list_head *exceptions, short type,
+                           u32 major, u32 minor, short access)
 {
        struct dev_exception_item *ex;
-       bool match = false;
 
-       rcu_lockdep_assert(rcu_read_lock_held() ||
-                          lockdep_is_held(&devcgroup_mutex),
-                          "device_cgroup::may_access() called without proper synchronization");
+       list_for_each_entry_rcu(ex, exceptions, list) {
+               if ((type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
+                       continue;
+               if ((type & DEV_CHAR) && !(ex->type & DEV_CHAR))
+                       continue;
+               if (ex->major != ~0 && ex->major != major)
+                       continue;
+               if (ex->minor != ~0 && ex->minor != minor)
+                       continue;
+               /* provided access cannot have more than the exception rule */
+               if (access & (~ex->access))
+                       continue;
+               return true;
+       }
+       return false;
+}
+
+/**
+ * match_exception_partial - iterates the exception list trying to find a partial match
+ * @exceptions: list of exceptions
+ * @type: device type (DEV_BLOCK or DEV_CHAR)
+ * @major: device file major number, ~0 to match all
+ * @minor: device file minor number, ~0 to match all
+ * @access: permission mask (ACC_READ, ACC_WRITE, ACC_MKNOD)
+ *
+ * It is considered a partial match if an exception's range is found to
+ * contain *any* of the devices specified by provided parameters. This is
+ * used to make sure no extra access is being granted that is forbidden by
+ * any of the exception list.
+ *
+ * Return: true in case the provided range mat matches an exception completely
+ */
+static bool match_exception_partial(struct list_head *exceptions, short type,
+                                   u32 major, u32 minor, short access)
+{
+       struct dev_exception_item *ex;
 
-       list_for_each_entry_rcu(ex, &dev_cgroup->exceptions, list) {
-               if ((refex->type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
+       list_for_each_entry_rcu(ex, exceptions, list) {
+               if ((type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
                        continue;
-               if ((refex->type & DEV_CHAR) && !(ex->type & DEV_CHAR))
+               if ((type & DEV_CHAR) && !(ex->type & DEV_CHAR))
                        continue;
-               if (ex->major != ~0 && ex->major != refex->major)
+               /*
+                * We must be sure that both the exception and the provided
+                * range aren't masking all devices
+                */
+               if (ex->major != ~0 && major != ~0 && ex->major != major)
                        continue;
-               if (ex->minor != ~0 && ex->minor != refex->minor)
+               if (ex->minor != ~0 && minor != ~0 && ex->minor != minor)
                        continue;
-               if (refex->access & (~ex->access))
+               /*
+                * In order to make sure the provided range isn't matching
+                * an exception, all its access bits shouldn't match the
+                * exception's access bits
+                */
+               if (!(access & ex->access))
                        continue;
-               match = true;
-               break;
+               return true;
        }
+       return false;
+}
+
+/**
+ * verify_new_ex - verifies if a new exception is allowed by parent cgroup's permissions
+ * @dev_cgroup: dev cgroup to be tested against
+ * @refex: new exception
+ * @behavior: behavior of the exception's dev_cgroup
+ *
+ * This is used to make sure a child cgroup won't have more privileges
+ * than its parent
+ */
+static bool verify_new_ex(struct dev_cgroup *dev_cgroup,
+                         struct dev_exception_item *refex,
+                         enum devcg_behavior behavior)
+{
+       bool match = false;
+
+       rcu_lockdep_assert(rcu_read_lock_held() ||
+                          lockdep_is_held(&devcgroup_mutex),
+                          "device_cgroup:verify_new_ex called without proper synchronization");
 
        if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) {
                if (behavior == DEVCG_DEFAULT_ALLOW) {
-                       /* the exception will deny access to certain devices */
+                       /*
+                        * new exception in the child doesn't matter, only
+                        * adding extra restrictions
+                        */ 
                        return true;
                } else {
-                       /* the exception will allow access to certain devices */
+                       /*
+                        * new exception in the child will add more devices
+                        * that can be acessed, so it can't match any of
+                        * parent's exceptions, even slightly
+                        */ 
+                       match = match_exception_partial(&dev_cgroup->exceptions,
+                                                       refex->type,
+                                                       refex->major,
+                                                       refex->minor,
+                                                       refex->access);
+
                        if (match)
-                               /*
-                                * a new exception allowing access shouldn't
-                                * match an parent's exception
-                                */
                                return false;
                        return true;
                }
        } else {
-               /* only behavior == DEVCG_DEFAULT_DENY allowed here */
+               /*
+                * Only behavior == DEVCG_DEFAULT_DENY allowed here, therefore
+                * the new exception will add access to more devices and must
+                * be contained completely in an parent's exception to be
+                * allowed
+                */
+               match = match_exception(&dev_cgroup->exceptions, refex->type,
+                                       refex->major, refex->minor,
+                                       refex->access);
+
                if (match)
                        /* parent has an exception that matches the proposed */
                        return true;
@@ -378,7 +459,38 @@ static int parent_has_perm(struct dev_cgroup *childcg,
 
        if (!parent)
                return 1;
-       return may_access(parent, ex, childcg->behavior);
+       return verify_new_ex(parent, ex, childcg->behavior);
+}
+
+/**
+ * parent_allows_removal - verify if it's ok to remove an exception
+ * @childcg: child cgroup from where the exception will be removed
+ * @ex: exception being removed
+ *
+ * When removing an exception in cgroups with default ALLOW policy, it must
+ * be checked if removing it will give the child cgroup more access than the
+ * parent.
+ *
+ * Return: true if it's ok to remove exception, false otherwise
+ */
+static bool parent_allows_removal(struct dev_cgroup *childcg,
+                                 struct dev_exception_item *ex)
+{
+       struct dev_cgroup *parent = css_to_devcgroup(css_parent(&childcg->css));
+
+       if (!parent)
+               return true;
+
+       /* It's always allowed to remove access to devices */
+       if (childcg->behavior == DEVCG_DEFAULT_DENY)
+               return true;
+
+       /*
+        * Make sure you're not removing part or a whole exception existing in
+        * the parent cgroup
+        */
+       return !match_exception_partial(&parent->exceptions, ex->type,
+                                       ex->major, ex->minor, ex->access);
 }
 
 /**
@@ -616,17 +728,21 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
 
        switch (filetype) {
        case DEVCG_ALLOW:
-               if (!parent_has_perm(devcgroup, &ex))
-                       return -EPERM;
                /*
                 * If the default policy is to allow by default, try to remove
                 * an matching exception instead. And be silent about it: we
                 * don't want to break compatibility
                 */
                if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
+                       /* Check if the parent allows removing it first */
+                       if (!parent_allows_removal(devcgroup, &ex))
+                               return -EPERM;
                        dev_exception_rm(devcgroup, &ex);
-                       return 0;
+                       break;
                }
+
+               if (!parent_has_perm(devcgroup, &ex))
+                       return -EPERM;
                rc = dev_exception_add(devcgroup, &ex);
                break;
        case DEVCG_DENY:
@@ -704,18 +820,18 @@ static int __devcgroup_check_permission(short type, u32 major, u32 minor,
                                        short access)
 {
        struct dev_cgroup *dev_cgroup;
-       struct dev_exception_item ex;
-       int rc;
-
-       memset(&ex, 0, sizeof(ex));
-       ex.type = type;
-       ex.major = major;
-       ex.minor = minor;
-       ex.access = access;
+       bool rc;
 
        rcu_read_lock();
        dev_cgroup = task_devcgroup(current);
-       rc = may_access(dev_cgroup, &ex, dev_cgroup->behavior);
+       if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW)
+               /* Can't match any of the exceptions, even partially */
+               rc = !match_exception_partial(&dev_cgroup->exceptions,
+                                             type, major, minor, access);
+       else
+               /* Need to match completely one exception to be allowed */
+               rc = match_exception(&dev_cgroup->exceptions, type, major,
+                                    minor, access);
        rcu_read_unlock();
 
        if (!rc)
index b4beb77967b17949daf46623a822cb7e960e8829..2c7341dbc5d68d1948ad0efa713ad3a85307608e 100644 (file)
@@ -3317,9 +3317,9 @@ static int selinux_file_fcntl(struct file *file, unsigned int cmd,
        case F_GETLK:
        case F_SETLK:
        case F_SETLKW:
-       case F_GETLKP:
-       case F_SETLKP:
-       case F_SETLKPW:
+       case F_OFD_GETLK:
+       case F_OFD_SETLK:
+       case F_OFD_SETLKW:
 #if BITS_PER_LONG == 32
        case F_GETLK64:
        case F_SETLK64:
index 6496822c1808b53c172d56df4382fb88e9e20e95..1ff78ec9f0ac508734d536afbe187b9039127e14 100644 (file)
@@ -818,12 +818,14 @@ int snd_sbmixer_new(struct snd_sb *chip)
                        return err;
                break;
        case SB_HW_DT019X:
-               if ((err = snd_sbmixer_init(chip,
-                                           snd_dt019x_controls,
-                                           ARRAY_SIZE(snd_dt019x_controls),
-                                           snd_dt019x_init_values,
-                                           ARRAY_SIZE(snd_dt019x_init_values),
-                                           "DT019X")) < 0)
+               err = snd_sbmixer_init(chip,
+                                      snd_dt019x_controls,
+                                      ARRAY_SIZE(snd_dt019x_controls),
+                                      snd_dt019x_init_values,
+                                      ARRAY_SIZE(snd_dt019x_init_values),
+                                      "DT019X");
+               if (err < 0)
+                       return err;
                break;
        default:
                strcpy(card->mixername, "???");
index 248b90abb8825a62e9530a0629cbf432898898d3..480bbddbd801bf002e4cc43fb8c7c0f762ec40c8 100644 (file)
@@ -1059,24 +1059,26 @@ static void azx_init_cmd_io(struct azx *chip)
 
        /* reset the corb hw read pointer */
        azx_writew(chip, CORBRP, ICH6_CORBRP_RST);
-       for (timeout = 1000; timeout > 0; timeout--) {
-               if ((azx_readw(chip, CORBRP) & ICH6_CORBRP_RST) == ICH6_CORBRP_RST)
-                       break;
-               udelay(1);
-       }
-       if (timeout <= 0)
-               dev_err(chip->card->dev, "CORB reset timeout#1, CORBRP = %d\n",
-                       azx_readw(chip, CORBRP));
+       if (!(chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)) {
+               for (timeout = 1000; timeout > 0; timeout--) {
+                       if ((azx_readw(chip, CORBRP) & ICH6_CORBRP_RST) == ICH6_CORBRP_RST)
+                               break;
+                       udelay(1);
+               }
+               if (timeout <= 0)
+                       dev_err(chip->card->dev, "CORB reset timeout#1, CORBRP = %d\n",
+                               azx_readw(chip, CORBRP));
 
-       azx_writew(chip, CORBRP, 0);
-       for (timeout = 1000; timeout > 0; timeout--) {
-               if (azx_readw(chip, CORBRP) == 0)
-                       break;
-               udelay(1);
+               azx_writew(chip, CORBRP, 0);
+               for (timeout = 1000; timeout > 0; timeout--) {
+                       if (azx_readw(chip, CORBRP) == 0)
+                               break;
+                       udelay(1);
+               }
+               if (timeout <= 0)
+                       dev_err(chip->card->dev, "CORB reset timeout#2, CORBRP = %d\n",
+                               azx_readw(chip, CORBRP));
        }
-       if (timeout <= 0)
-               dev_err(chip->card->dev, "CORB reset timeout#2, CORBRP = %d\n",
-                       azx_readw(chip, CORBRP));
 
        /* enable corb dma */
        azx_writeb(chip, CORBCTL, ICH6_CORBCTL_RUN);
index d6bca62ef387b92b499dcf5954d5c783543055d1..2c54629d62d10db0166d6feb32588f1f0d3f0543 100644 (file)
@@ -249,7 +249,8 @@ enum {
 /* quirks for Nvidia */
 #define AZX_DCAPS_PRESET_NVIDIA \
        (AZX_DCAPS_NVIDIA_SNOOP | AZX_DCAPS_RIRB_DELAY | AZX_DCAPS_NO_MSI |\
-        AZX_DCAPS_ALIGN_BUFSIZE | AZX_DCAPS_NO_64BIT)
+        AZX_DCAPS_ALIGN_BUFSIZE | AZX_DCAPS_NO_64BIT |\
+        AZX_DCAPS_CORBRP_SELF_CLEAR)
 
 #define AZX_DCAPS_PRESET_CTHDA \
        (AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_4K_BDLE_BOUNDARY)
@@ -1366,6 +1367,12 @@ static int azx_first_init(struct azx *chip)
        /* initialize streams */
        azx_init_stream(chip);
 
+       /* workaround for Broadwell HDMI: the first stream is broken,
+        * so mask it by keeping it as if opened
+        */
+       if (pci->vendor == 0x8086 && pci->device == 0x160c)
+               chip->azx_dev[0].opened = 1;
+
        /* initialize chip */
        azx_init_pci(chip);
        azx_init_chip(chip, (probe_only[dev] & 2) == 0);
index ba38b819f9847de7522de9171c1a7794aecc7a8f..4a7cb01fa91226b2cfd3a4a582d02d9899ffa6e0 100644 (file)
@@ -189,6 +189,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
 #define AZX_DCAPS_COUNT_LPIB_DELAY  (1 << 25)  /* Take LPIB as delay */
 #define AZX_DCAPS_PM_RUNTIME   (1 << 26)       /* runtime PM support */
 #define AZX_DCAPS_I915_POWERWELL (1 << 27)     /* HSW i915 powerwell support */
+#define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28)  /* CORBRP clears itself after reset */
 
 /* position fix mode */
 enum {
index 0cb5b89cd0c8b3e81dd57a0bcdaa471d67d018c1..b4218a19df22209e227538b0056a79cbe1c21279 100644 (file)
@@ -1127,8 +1127,10 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
                                            AMP_OUT_UNMUTE);
 
        eld = &per_pin->sink_eld;
-       if (!eld->monitor_present)
+       if (!eld->monitor_present) {
+               hdmi_set_channel_count(codec, per_pin->cvt_nid, channels);
                return;
+       }
 
        if (!non_pcm && per_pin->chmap_set)
                ca = hdmi_manual_channel_allocation(channels, per_pin->chmap);
@@ -3330,6 +3332,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
 { .id = 0x10de0051, .name = "GPU 51 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de0060, .name = "GPU 60 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de0067, .name = "MCP67 HDMI",      .patch = patch_nvhdmi_2ch },
+{ .id = 0x10de0071, .name = "GPU 71 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de8001, .name = "MCP73 HDMI",      .patch = patch_nvhdmi_2ch },
 { .id = 0x11069f80, .name = "VX900 HDMI/DP",   .patch = patch_via_hdmi },
 { .id = 0x11069f81, .name = "VX900 HDMI/DP",   .patch = patch_via_hdmi },
@@ -3385,6 +3388,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0044");
 MODULE_ALIAS("snd-hda-codec-id:10de0051");
 MODULE_ALIAS("snd-hda-codec-id:10de0060");
 MODULE_ALIAS("snd-hda-codec-id:10de0067");
+MODULE_ALIAS("snd-hda-codec-id:10de0071");
 MODULE_ALIAS("snd-hda-codec-id:10de8001");
 MODULE_ALIAS("snd-hda-codec-id:11069f80");
 MODULE_ALIAS("snd-hda-codec-id:11069f81");
index c643dfc0a82612c5a2672c5211e6e1d46c5102f3..49e884fb3e5db064426758b8c8612a42a1633830 100644 (file)
@@ -4616,12 +4616,17 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0653, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0657, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0658, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x065c, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x065f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0662, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0667, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0668, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0669, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x0674, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x067e, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x067f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x0680, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x0684, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
index f500905e9373510d2bcfdb583b0569cec5994708..2acf82f4a08a8bed4db26c6c444a0eabec688412 100644 (file)
@@ -1018,13 +1018,13 @@ static int alc5623_i2c_probe(struct i2c_client *client,
                dev_err(&client->dev, "failed to read vendor ID1: %d\n", ret);
                return ret;
        }
-       vid1 = ((vid1 & 0xff) << 8) | (vid1 >> 8);
 
        ret = regmap_read(alc5623->regmap, ALC5623_VENDOR_ID2, &vid2);
        if (ret < 0) {
                dev_err(&client->dev, "failed to read vendor ID2: %d\n", ret);
                return ret;
        }
+       vid2 >>= 8;
 
        if ((vid1 != 0x10ec) || (vid2 != id->driver_data)) {
                dev_err(&client->dev, "unknown or wrong codec\n");
index 460d35547a683d226521591333ce06fe1c5de634..2213a037c893107bcfa584701d58e0541bd59e13 100644 (file)
@@ -1229,8 +1229,10 @@ static int cs42l52_i2c_probe(struct i2c_client *i2c_client,
        }
 
        if (cs42l52->pdata.reset_gpio) {
-               ret = gpio_request_one(cs42l52->pdata.reset_gpio,
-                                      GPIOF_OUT_INIT_HIGH, "CS42L52 /RST");
+               ret = devm_gpio_request_one(&i2c_client->dev,
+                                           cs42l52->pdata.reset_gpio,
+                                           GPIOF_OUT_INIT_HIGH,
+                                           "CS42L52 /RST");
                if (ret < 0) {
                        dev_err(&i2c_client->dev, "Failed to request /RST %d: %d\n",
                                cs42l52->pdata.reset_gpio, ret);
index 0ee60a19a26334dcae0484244fcf9d374965fc88..ae3717992d568fb2ba533634a25e306a9e8fd05b 100644 (file)
@@ -1443,8 +1443,10 @@ static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
        i2c_set_clientdata(i2c_client, cs42l73);
 
        if (cs42l73->pdata.reset_gpio) {
-               ret = gpio_request_one(cs42l73->pdata.reset_gpio,
-                                      GPIOF_OUT_INIT_HIGH, "CS42L73 /RST");
+               ret = devm_gpio_request_one(&i2c_client->dev,
+                                           cs42l73->pdata.reset_gpio,
+                                           GPIOF_OUT_INIT_HIGH,
+                                           "CS42L73 /RST");
                if (ret < 0) {
                        dev_err(&i2c_client->dev, "Failed to request /RST %d: %d\n",
                                cs42l73->pdata.reset_gpio, ret);
index fa158cfe9b32d396d09bdb266727c57203c7e62d..d1929de641e24eb4ec733820f8b7dc717d542d33 100644 (file)
@@ -376,7 +376,7 @@ static int aic31xx_dapm_power_event(struct snd_soc_dapm_widget *w,
                reg = AIC31XX_ADCFLAG;
                break;
        default:
-               dev_err(w->codec->dev, "Unknown widget '%s' calling %s/n",
+               dev_err(w->codec->dev, "Unknown widget '%s' calling %s\n",
                        w->name, __func__);
                return -EINVAL;
        }
index b1835103e9b4002ab44429d40bb16da8372f65aa..d7349bc89ad3085430b57eb0b67a14ea8b3886e6 100644 (file)
@@ -1399,7 +1399,6 @@ static int aic3x_probe(struct snd_soc_codec *codec)
        }
 
        aic3x_add_widgets(codec);
-       list_add(&aic3x->list, &reset_list);
 
        return 0;
 
@@ -1569,7 +1568,13 @@ static int aic3x_i2c_probe(struct i2c_client *i2c,
 
        ret = snd_soc_register_codec(&i2c->dev,
                        &soc_codec_dev_aic3x, &aic3x_dai, 1);
-       return ret;
+
+       if (ret != 0)
+               goto err_gpio;
+
+       list_add(&aic3x->list, &reset_list);
+
+       return 0;
 
 err_gpio:
        if (gpio_is_valid(aic3x->gpio_reset) &&
index 5522d2566c6742d5ee19f91b4358b30f51276b62..ecd26dd2e442fb2e4019eca0238fdb9edba4a73e 100644 (file)
@@ -154,6 +154,7 @@ static struct reg_default wm8962_reg[] = {
        { 40, 0x0000 },   /* R40    - SPKOUTL volume */
        { 41, 0x0000 },   /* R41    - SPKOUTR volume */
 
+       { 49, 0x0010 },   /* R49    - Class D Control 1 */
        { 51, 0x0003 },   /* R51    - Class D Control 2 */
 
        { 56, 0x0506 },   /* R56    - Clocking 4 */
@@ -795,7 +796,6 @@ static bool wm8962_volatile_register(struct device *dev, unsigned int reg)
        case WM8962_ALC2:
        case WM8962_THERMAL_SHUTDOWN_STATUS:
        case WM8962_ADDITIONAL_CONTROL_4:
-       case WM8962_CLASS_D_CONTROL_1:
        case WM8962_DC_SERVO_6:
        case WM8962_INTERRUPT_STATUS_1:
        case WM8962_INTERRUPT_STATUS_2:
@@ -2929,13 +2929,22 @@ static int wm8962_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
 static int wm8962_mute(struct snd_soc_dai *dai, int mute)
 {
        struct snd_soc_codec *codec = dai->codec;
-       int val;
+       int val, ret;
 
        if (mute)
-               val = WM8962_DAC_MUTE;
+               val = WM8962_DAC_MUTE | WM8962_DAC_MUTE_ALT;
        else
                val = 0;
 
+       /**
+        * The DAC mute bit is mirrored in two registers, update both to keep
+        * the register cache consistent.
+        */
+       ret = snd_soc_update_bits(codec, WM8962_CLASS_D_CONTROL_1,
+                                 WM8962_DAC_MUTE_ALT, val);
+       if (ret < 0)
+               return ret;
+
        return snd_soc_update_bits(codec, WM8962_ADC_DAC_CONTROL_1,
                                   WM8962_DAC_MUTE, val);
 }
index a1a5d5294c19dea3d76ce03be2dfe490d925bd78..910aafd09d21e210d2b6e0b36c7e3243533d5e6b 100644 (file)
 #define WM8962_SPKOUTL_ENA_MASK                 0x0040  /* SPKOUTL_ENA */
 #define WM8962_SPKOUTL_ENA_SHIFT                     6  /* SPKOUTL_ENA */
 #define WM8962_SPKOUTL_ENA_WIDTH                     1  /* SPKOUTL_ENA */
+#define WM8962_DAC_MUTE_ALT                     0x0010  /* DAC_MUTE */
+#define WM8962_DAC_MUTE_ALT_MASK                0x0010  /* DAC_MUTE */
+#define WM8962_DAC_MUTE_ALT_SHIFT                    4  /* DAC_MUTE */
+#define WM8962_DAC_MUTE_ALT_WIDTH                    1  /* DAC_MUTE */
 #define WM8962_SPKOUTL_PGA_MUTE                 0x0002  /* SPKOUTL_PGA_MUTE */
 #define WM8962_SPKOUTL_PGA_MUTE_MASK            0x0002  /* SPKOUTL_PGA_MUTE */
 #define WM8962_SPKOUTL_PGA_MUTE_SHIFT                1  /* SPKOUTL_PGA_MUTE */
index c8e5db1414d7e75f4077728765a210877f4b3cd9..496ce2eb2f1f31f4c4f0b6776ffd4d9a60243992 100644 (file)
@@ -258,10 +258,16 @@ static int fsl_esai_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id,
                return -EINVAL;
        }
 
-       if (ratio == 1) {
+       /* Only EXTAL source can be output directly without using PSR and PM */
+       if (ratio == 1 && clksrc == esai_priv->extalclk) {
                /* Bypass all the dividers if not being needed */
                ecr |= tx ? ESAI_ECR_ETO : ESAI_ECR_ERO;
                goto out;
+       } else if (ratio < 2) {
+               /* The ratio should be no less than 2 if using other sources */
+               dev_err(dai->dev, "failed to derive required HCK%c rate\n",
+                               tx ? 'T' : 'R');
+               return -EINVAL;
        }
 
        ret = fsl_esai_divisor_cal(dai, tx, ratio, false, 0);
@@ -307,7 +313,8 @@ static int fsl_esai_set_bclk(struct snd_soc_dai *dai, bool tx, u32 freq)
                return -EINVAL;
        }
 
-       if (esai_priv->sck_div[tx] && (ratio > 16 || ratio == 0)) {
+       /* The ratio should be contented by FP alone if bypassing PM and PSR */
+       if (!esai_priv->sck_div[tx] && (ratio > 16 || ratio == 0)) {
                dev_err(dai->dev, "the ratio is out of range (1 ~ 16)\n");
                return -EINVAL;
        }
@@ -454,12 +461,6 @@ static int fsl_esai_startup(struct snd_pcm_substream *substream,
        }
 
        if (!dai->active) {
-               /* Reset Port C */
-               regmap_update_bits(esai_priv->regmap, REG_ESAI_PRRC,
-                                  ESAI_PRRC_PDC_MASK, ESAI_PRRC_PDC(ESAI_GPIO));
-               regmap_update_bits(esai_priv->regmap, REG_ESAI_PCRC,
-                                  ESAI_PCRC_PC_MASK, ESAI_PCRC_PC(ESAI_GPIO));
-
                /* Set synchronous mode */
                regmap_update_bits(esai_priv->regmap, REG_ESAI_SAICR,
                                   ESAI_SAICR_SYNC, esai_priv->synchronous ?
@@ -519,6 +520,11 @@ static int fsl_esai_hw_params(struct snd_pcm_substream *substream,
 
        regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx), mask, val);
 
+       /* Remove ESAI personal reset by configuring ESAI_PCRC and ESAI_PRRC */
+       regmap_update_bits(esai_priv->regmap, REG_ESAI_PRRC,
+                          ESAI_PRRC_PDC_MASK, ESAI_PRRC_PDC(ESAI_GPIO));
+       regmap_update_bits(esai_priv->regmap, REG_ESAI_PCRC,
+                          ESAI_PCRC_PC_MASK, ESAI_PCRC_PC(ESAI_GPIO));
        return 0;
 }
 
index b1266790d1174a74497e81d1848436300894b5ea..605a10b2112b3808e8d5a95e282e51daf69eec62 100644 (file)
@@ -144,8 +144,8 @@ enum spdif_gainsel {
 
 /* SPDIF Clock register */
 #define STC_SYSCLK_DIV_OFFSET          11
-#define STC_SYSCLK_DIV_MASK            (0x1ff << STC_TXCLK_SRC_OFFSET)
-#define STC_SYSCLK_DIV(x)              ((((x) - 1) << STC_TXCLK_DIV_OFFSET) & STC_SYSCLK_DIV_MASK)
+#define STC_SYSCLK_DIV_MASK            (0x1ff << STC_SYSCLK_DIV_OFFSET)
+#define STC_SYSCLK_DIV(x)              ((((x) - 1) << STC_SYSCLK_DIV_OFFSET) & STC_SYSCLK_DIV_MASK)
 #define STC_TXCLK_SRC_OFFSET           8
 #define STC_TXCLK_SRC_MASK             (0x7 << STC_TXCLK_SRC_OFFSET)
 #define STC_TXCLK_SRC_SET(x)           ((x << STC_TXCLK_SRC_OFFSET) & STC_TXCLK_SRC_MASK)
index ac869931d7f16c9c4049aefaffb4a7d416417e49..267717aa96c14e971329cfe2f728d1f141de72c3 100644 (file)
@@ -145,7 +145,7 @@ static const struct file_operations audmux_debugfs_fops = {
        .llseek = default_llseek,
 };
 
-static void __init audmux_debugfs_init(void)
+static void audmux_debugfs_init(void)
 {
        int i;
        char buf[20];
index 5d06eecb61986da272fcda4858db56c91d285153..18aee77f8d4a55276194542c41464feb363784d3 100644 (file)
@@ -138,6 +138,7 @@ static int sst_acpi_probe(struct platform_device *pdev)
 
        sst_pdata = &sst_acpi->sst_pdata;
        sst_pdata->id = desc->sst_id;
+       sst_pdata->dma_dev = dev;
        sst_acpi->desc = desc;
        sst_acpi->mach = mach;
 
index a50bf7fc0e3abf8dc729e7ce77f75ef40848480e..adf0aca5aca60423d2643a81679ce291f44d1208 100644 (file)
@@ -324,7 +324,7 @@ static int sst_byt_init(struct sst_dsp *sst, struct sst_pdata *pdata)
        memcpy_toio(sst->addr.lpe + SST_BYT_MAILBOX_OFFSET,
               &pdata->fw_base, sizeof(u32));
 
-       ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+       ret = dma_coerce_mask_and_coherent(sst->dma_dev, DMA_BIT_MASK(32));
        if (ret)
                return ret;
 
index d0eaeee21be4c634ae88e984cf964c5067b5cae6..0d31dbbf480652e773243dca639f042a4c531bc0 100644 (file)
@@ -542,16 +542,20 @@ struct sst_byt_stream *sst_byt_stream_new(struct sst_byt *byt, int id,
        void *data)
 {
        struct sst_byt_stream *stream;
+       struct sst_dsp *sst = byt->dsp;
+       unsigned long flags;
 
        stream = kzalloc(sizeof(*stream), GFP_KERNEL);
        if (stream == NULL)
                return NULL;
 
+       spin_lock_irqsave(&sst->spinlock, flags);
        list_add(&stream->node, &byt->stream_list);
        stream->notify_position = notify_position;
        stream->pdata = data;
        stream->byt = byt;
        stream->str_id = id;
+       spin_unlock_irqrestore(&sst->spinlock, flags);
 
        return stream;
 }
@@ -630,6 +634,8 @@ int sst_byt_stream_free(struct sst_byt *byt, struct sst_byt_stream *stream)
 {
        u64 header;
        int ret = 0;
+       struct sst_dsp *sst = byt->dsp;
+       unsigned long flags;
 
        if (!stream->commited)
                goto out;
@@ -644,8 +650,10 @@ int sst_byt_stream_free(struct sst_byt *byt, struct sst_byt_stream *stream)
 
        stream->commited = false;
 out:
+       spin_lock_irqsave(&sst->spinlock, flags);
        list_del(&stream->node);
        kfree(stream);
+       spin_unlock_irqrestore(&sst->spinlock, flags);
 
        return ret;
 }
index fe8e81aad6461faf545b179f6d1a912369872e6a..401213455497258111dcf939c7d963fbbbeb3e37 100644 (file)
@@ -136,7 +136,7 @@ struct sst_module_data {
        enum sst_data_type data_type;   /* type of module data */
 
        u32 size;               /* size in bytes */
-       u32 offset;             /* offset in FW file */
+       int32_t offset;         /* offset in FW file */
        u32 data_offset;        /* offset in ADSP memory space */
        void *data;             /* module data */
 };
@@ -228,6 +228,7 @@ struct sst_dsp {
        spinlock_t spinlock;    /* IPC locking */
        struct mutex mutex;     /* DSP FW lock */
        struct device *dev;
+       struct device *dma_dev;
        void *thread_context;
        int irq;
        u32 id;
index 0c129fd85ecf8a37b3758dfc924a9e5091c9e01e..0b715b20a2d7d46b9f06189de1ee57d9aacc28ab 100644 (file)
@@ -337,6 +337,7 @@ struct sst_dsp *sst_dsp_new(struct device *dev,
        spin_lock_init(&sst->spinlock);
        mutex_init(&sst->mutex);
        sst->dev = dev;
+       sst->dma_dev = pdata->dma_dev;
        sst->thread_context = sst_dev->thread_context;
        sst->sst_dev = sst_dev;
        sst->id = pdata->id;
index 74052b59485ca1ce942444d0a3871ed8a6051672..e44423be66c459ba721249d0efe21bccdaeea4b5 100644 (file)
@@ -169,6 +169,7 @@ struct sst_pdata {
        u32 dma_base;
        u32 dma_size;
        int dma_engine;
+       struct device *dma_dev;
 
        /* DSP */
        u32 id;
index f7687107cf7f51f19a95992b79819270e3dd4734..928f228c38e754db3f41551208493045ce9ee561 100644 (file)
@@ -57,14 +57,8 @@ struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
        sst_fw->private = private;
        sst_fw->size = fw->size;
 
-       err = dma_coerce_mask_and_coherent(dsp->dev, DMA_BIT_MASK(32));
-       if (err < 0) {
-               kfree(sst_fw);
-               return NULL;
-       }
-
        /* allocate DMA buffer to store FW data */
-       sst_fw->dma_buf = dma_alloc_coherent(dsp->dev, sst_fw->size,
+       sst_fw->dma_buf = dma_alloc_coherent(dsp->dma_dev, sst_fw->size,
                                &sst_fw->dmable_fw_paddr, GFP_DMA | GFP_KERNEL);
        if (!sst_fw->dma_buf) {
                dev_err(dsp->dev, "error: DMA alloc failed\n");
@@ -106,7 +100,7 @@ void sst_fw_free(struct sst_fw *sst_fw)
        list_del(&sst_fw->list);
        mutex_unlock(&dsp->mutex);
 
-       dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf,
+       dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf,
                        sst_fw->dmable_fw_paddr);
        kfree(sst_fw);
 }
@@ -202,6 +196,9 @@ static int block_alloc_contiguous(struct sst_module *module,
                size -= block->size;
        }
 
+       list_for_each_entry(block, &tmp, list)
+               list_add(&block->module_list, &module->block_list);
+
        list_splice(&tmp, &dsp->used_block_list);
        return 0;
 }
@@ -247,8 +244,7 @@ static int block_alloc(struct sst_module *module,
                /* do we span > 1 blocks */
                if (data->size > block->size) {
                        ret = block_alloc_contiguous(module, data,
-                               block->offset + block->size,
-                               data->size - block->size);
+                               block->offset, data->size);
                        if (ret == 0)
                                return ret;
                }
@@ -344,7 +340,7 @@ static int block_alloc_fixed(struct sst_module *module,
 
                        err = block_alloc_contiguous(module, data,
                                block->offset + block->size,
-                               data->size - block->size + data->offset - block->offset);
+                               data->size - block->size);
                        if (err < 0)
                                return -ENOMEM;
 
@@ -371,15 +367,10 @@ static int block_alloc_fixed(struct sst_module *module,
                if (data->offset >= block->offset && data->offset < block_end) {
 
                        err = block_alloc_contiguous(module, data,
-                               block->offset + block->size,
-                               data->size - block->size);
+                               block->offset, data->size);
                        if (err < 0)
                                return -ENOMEM;
 
-                       /* add block */
-                       block->data_type = data->data_type;
-                       list_move(&block->list, &dsp->used_block_list);
-                       list_add(&block->module_list, &module->block_list);
                        return 0;
                }
 
index f5ebf36af8898d8a94d4a46886bfcfe0964d0567..535f517629fd608fb7c4bd3eb79a05a67d98a518 100644 (file)
@@ -433,7 +433,7 @@ static int hsw_init(struct sst_dsp *sst, struct sst_pdata *pdata)
        int ret = -ENODEV, i, j, region_count;
        u32 offset, size;
 
-       dev = sst->dev;
+       dev = sst->dma_dev;
 
        switch (sst->id) {
        case SST_DEV_ID_LYNX_POINT:
@@ -466,7 +466,7 @@ static int hsw_init(struct sst_dsp *sst, struct sst_pdata *pdata)
                return ret;
        }
 
-       ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+       ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(31));
        if (ret)
                return ret;
 
index f46bb4ddde6fc7550573e5fdd0235afda429044a..e7996b39a48480a8577ac26d6cad28e3392259aa 100644 (file)
@@ -617,7 +617,7 @@ static void hsw_notification_work(struct work_struct *work)
        case IPC_POSITION_CHANGED:
                trace_ipc_notification("DSP stream position changed for",
                        stream->reply.stream_hw_id);
-               sst_dsp_inbox_read(hsw->dsp, pos, sizeof(pos));
+               sst_dsp_inbox_read(hsw->dsp, pos, sizeof(*pos));
 
                if (stream->notify_position)
                        stream->notify_position(stream, stream->pdata);
@@ -991,7 +991,8 @@ int sst_hsw_stream_get_volume(struct sst_hsw *hsw, struct sst_hsw_stream *stream
                return -EINVAL;
 
        sst_dsp_read(hsw->dsp, volume,
-               stream->reply.volume_register_address[channel], sizeof(volume));
+               stream->reply.volume_register_address[channel],
+               sizeof(*volume));
 
        return 0;
 }
@@ -1158,11 +1159,14 @@ struct sst_hsw_stream *sst_hsw_stream_new(struct sst_hsw *hsw, int id,
        void *data)
 {
        struct sst_hsw_stream *stream;
+       struct sst_dsp *sst = hsw->dsp;
+       unsigned long flags;
 
        stream = kzalloc(sizeof(*stream), GFP_KERNEL);
        if (stream == NULL)
                return NULL;
 
+       spin_lock_irqsave(&sst->spinlock, flags);
        list_add(&stream->node, &hsw->stream_list);
        stream->notify_position = notify_position;
        stream->pdata = data;
@@ -1171,6 +1175,7 @@ struct sst_hsw_stream *sst_hsw_stream_new(struct sst_hsw *hsw, int id,
 
        /* work to process notification messages */
        INIT_WORK(&stream->notify_work, hsw_notification_work);
+       spin_unlock_irqrestore(&sst->spinlock, flags);
 
        return stream;
 }
@@ -1179,6 +1184,8 @@ int sst_hsw_stream_free(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
 {
        u32 header;
        int ret = 0;
+       struct sst_dsp *sst = hsw->dsp;
+       unsigned long flags;
 
        /* dont free DSP streams that are not commited */
        if (!stream->commited)
@@ -1200,8 +1207,11 @@ int sst_hsw_stream_free(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
        trace_hsw_stream_free_req(stream, &stream->free_req);
 
 out:
+       cancel_work_sync(&stream->notify_work);
+       spin_lock_irqsave(&sst->spinlock, flags);
        list_del(&stream->node);
        kfree(stream);
+       spin_unlock_irqrestore(&sst->spinlock, flags);
 
        return ret;
 }
@@ -1537,10 +1547,28 @@ int sst_hsw_stream_reset(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
 }
 
 /* Stream pointer positions */
-int sst_hsw_get_dsp_position(struct sst_hsw *hsw,
+u32 sst_hsw_get_dsp_position(struct sst_hsw *hsw,
        struct sst_hsw_stream *stream)
 {
-       return stream->rpos.position;
+       u32 rpos;
+
+       sst_dsp_read(hsw->dsp, &rpos,
+               stream->reply.read_position_register_address, sizeof(rpos));
+
+       return rpos;
+}
+
+/* Stream presentation (monotonic) positions */
+u64 sst_hsw_get_dsp_presentation_position(struct sst_hsw *hsw,
+       struct sst_hsw_stream *stream)
+{
+       u64 ppos;
+
+       sst_dsp_read(hsw->dsp, &ppos,
+               stream->reply.presentation_position_register_address,
+               sizeof(ppos));
+
+       return ppos;
 }
 
 int sst_hsw_stream_set_write_position(struct sst_hsw *hsw,
@@ -1609,7 +1637,7 @@ int sst_hsw_dx_set_state(struct sst_hsw *hsw,
        trace_ipc_request("PM enter Dx state", state);
 
        ret = ipc_tx_message_wait(hsw, header, &state_, sizeof(state_),
-               dx, sizeof(dx));
+               dx, sizeof(*dx));
        if (ret < 0) {
                dev_err(hsw->dev, "ipc: error set dx state %d failed\n", state);
                return ret;
index d517929ccc389e2aaca3321106b6088250929482..2ac194a6d04b226eb86cb42c2361c643f7400635 100644 (file)
@@ -464,7 +464,9 @@ int sst_hsw_stream_get_write_pos(struct sst_hsw *hsw,
        struct sst_hsw_stream *stream, u32 *position);
 int sst_hsw_stream_set_write_position(struct sst_hsw *hsw,
        struct sst_hsw_stream *stream, u32 stage_id, u32 position);
-int sst_hsw_get_dsp_position(struct sst_hsw *hsw,
+u32 sst_hsw_get_dsp_position(struct sst_hsw *hsw,
+       struct sst_hsw_stream *stream);
+u64 sst_hsw_get_dsp_presentation_position(struct sst_hsw *hsw,
        struct sst_hsw_stream *stream);
 
 /* HW port config */
index 0a32dd13a23d28ab96282b75592c20d5b133ee9f..9d5f64a583a388bd501973c32ae3d5d086d6acc6 100644 (file)
@@ -99,6 +99,7 @@ struct hsw_pcm_data {
        struct snd_compr_stream *cstream;
        unsigned int wpos;
        struct mutex mutex;
+       bool allocated;
 };
 
 /* private data for the driver */
@@ -107,12 +108,14 @@ struct hsw_priv_data {
        struct sst_hsw *hsw;
 
        /* page tables */
-       unsigned char *pcm_pg[HSW_PCM_COUNT][2];
+       struct snd_dma_buffer dmab[HSW_PCM_COUNT][2];
 
        /* DAI data */
        struct hsw_pcm_data pcm[HSW_PCM_COUNT];
 };
 
+static u32 hsw_notify_pointer(struct sst_hsw_stream *stream, void *data);
+
 static inline u32 hsw_mixer_to_ipc(unsigned int value)
 {
        if (value >= ARRAY_SIZE(volume_map))
@@ -273,28 +276,26 @@ static const struct snd_kcontrol_new hsw_volume_controls[] = {
 };
 
 /* Create DMA buffer page table for DSP */
-static int create_adsp_page_table(struct hsw_priv_data *pdata,
-       struct snd_soc_pcm_runtime *rtd,
-       unsigned char *dma_area, size_t size, int pcm, int stream)
+static int create_adsp_page_table(struct snd_pcm_substream *substream,
+       struct hsw_priv_data *pdata, struct snd_soc_pcm_runtime *rtd,
+       unsigned char *dma_area, size_t size, int pcm)
 {
-       int i, pages;
+       struct snd_dma_buffer *dmab = snd_pcm_get_dma_buf(substream);
+       int i, pages, stream = substream->stream;
 
-       if (size % PAGE_SIZE)
-               pages = (size / PAGE_SIZE) + 1;
-       else
-               pages = size / PAGE_SIZE;
+       pages = snd_sgbuf_aligned_pages(size);
 
        dev_dbg(rtd->dev, "generating page table for %p size 0x%zu pages %d\n",
                dma_area, size, pages);
 
        for (i = 0; i < pages; i++) {
                u32 idx = (((i << 2) + i)) >> 1;
-               u32 pfn = (virt_to_phys(dma_area + i * PAGE_SIZE)) >> PAGE_SHIFT;
+               u32 pfn = snd_sgbuf_get_addr(dmab, i * PAGE_SIZE) >> PAGE_SHIFT;
                u32 *pg_table;
 
                dev_dbg(rtd->dev, "pfn i %i idx %d pfn %x\n", i, idx, pfn);
 
-               pg_table = (u32*)(pdata->pcm_pg[pcm][stream] + idx);
+               pg_table = (u32 *)(pdata->dmab[pcm][stream].area + idx);
 
                if (i & 1)
                        *pg_table |= (pfn << 4);
@@ -317,12 +318,36 @@ static int hsw_pcm_hw_params(struct snd_pcm_substream *substream,
        struct sst_hsw *hsw = pdata->hsw;
        struct sst_module *module_data;
        struct sst_dsp *dsp;
+       struct snd_dma_buffer *dmab;
        enum sst_hsw_stream_type stream_type;
        enum sst_hsw_stream_path_id path_id;
        u32 rate, bits, map, pages, module_id;
        u8 channels;
        int ret;
 
+       /* check if we are being called a subsequent time */
+       if (pcm_data->allocated) {
+               ret = sst_hsw_stream_reset(hsw, pcm_data->stream);
+               if (ret < 0)
+                       dev_dbg(rtd->dev, "error: reset stream failed %d\n",
+                               ret);
+
+               ret = sst_hsw_stream_free(hsw, pcm_data->stream);
+               if (ret < 0) {
+                       dev_dbg(rtd->dev, "error: free stream failed %d\n",
+                               ret);
+                       return ret;
+               }
+               pcm_data->allocated = false;
+
+               pcm_data->stream = sst_hsw_stream_new(hsw, rtd->cpu_dai->id,
+                       hsw_notify_pointer, pcm_data);
+               if (pcm_data->stream == NULL) {
+                       dev_err(rtd->dev, "error: failed to create stream\n");
+                       return -EINVAL;
+               }
+       }
+
        /* stream direction */
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
                path_id = SST_HSW_STREAM_PATH_SSP0_OUT;
@@ -416,8 +441,10 @@ static int hsw_pcm_hw_params(struct snd_pcm_substream *substream,
                return ret;
        }
 
-       ret = create_adsp_page_table(pdata, rtd, runtime->dma_area,
-               runtime->dma_bytes, rtd->cpu_dai->id, substream->stream);
+       dmab = snd_pcm_get_dma_buf(substream);
+
+       ret = create_adsp_page_table(substream, pdata, rtd, runtime->dma_area,
+               runtime->dma_bytes, rtd->cpu_dai->id);
        if (ret < 0)
                return ret;
 
@@ -430,9 +457,9 @@ static int hsw_pcm_hw_params(struct snd_pcm_substream *substream,
                pages = runtime->dma_bytes / PAGE_SIZE;
 
        ret = sst_hsw_stream_buffer(hsw, pcm_data->stream,
-               virt_to_phys(pdata->pcm_pg[rtd->cpu_dai->id][substream->stream]),
+               pdata->dmab[rtd->cpu_dai->id][substream->stream].addr,
                pages, runtime->dma_bytes, 0,
-               (u32)(virt_to_phys(runtime->dma_area) >> PAGE_SHIFT));
+               snd_sgbuf_get_addr(dmab, 0) >> PAGE_SHIFT);
        if (ret < 0) {
                dev_err(rtd->dev, "error: failed to set DMA buffer %d\n", ret);
                return ret;
@@ -474,6 +501,7 @@ static int hsw_pcm_hw_params(struct snd_pcm_substream *substream,
                dev_err(rtd->dev, "error: failed to commit stream %d\n", ret);
                return ret;
        }
+       pcm_data->allocated = true;
 
        ret = sst_hsw_stream_pause(hsw, pcm_data->stream, 1);
        if (ret < 0)
@@ -541,12 +569,14 @@ static snd_pcm_uframes_t hsw_pcm_pointer(struct snd_pcm_substream *substream)
        struct hsw_pcm_data *pcm_data = snd_soc_pcm_get_drvdata(rtd);
        struct sst_hsw *hsw = pdata->hsw;
        snd_pcm_uframes_t offset;
+       uint64_t ppos;
+       u32 position = sst_hsw_get_dsp_position(hsw, pcm_data->stream);
 
-       offset = bytes_to_frames(runtime,
-               sst_hsw_get_dsp_position(hsw, pcm_data->stream));
+       offset = bytes_to_frames(runtime, position);
+       ppos = sst_hsw_get_dsp_presentation_position(hsw, pcm_data->stream);
 
-       dev_dbg(rtd->dev, "PCM: DMA pointer %zu bytes\n",
-               frames_to_bytes(runtime, (u32)offset));
+       dev_dbg(rtd->dev, "PCM: DMA pointer %du bytes, pos %llu\n",
+               position, ppos);
        return offset;
 }
 
@@ -606,6 +636,7 @@ static int hsw_pcm_close(struct snd_pcm_substream *substream)
                dev_dbg(rtd->dev, "error: free stream failed %d\n", ret);
                goto out;
        }
+       pcm_data->allocated = 0;
        pcm_data->stream = NULL;
 
 out:
@@ -621,7 +652,7 @@ static struct snd_pcm_ops hsw_pcm_ops = {
        .hw_free        = hsw_pcm_hw_free,
        .trigger        = hsw_pcm_trigger,
        .pointer        = hsw_pcm_pointer,
-       .mmap           = snd_pcm_lib_default_mmap,
+       .page           = snd_pcm_sgbuf_ops_page,
 };
 
 static void hsw_pcm_free(struct snd_pcm *pcm)
@@ -632,17 +663,16 @@ static void hsw_pcm_free(struct snd_pcm *pcm)
 static int hsw_pcm_new(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_pcm *pcm = rtd->pcm;
+       struct snd_soc_platform *platform = rtd->platform;
+       struct sst_pdata *pdata = dev_get_platdata(platform->dev);
+       struct device *dev = pdata->dma_dev;
        int ret = 0;
 
-       ret = dma_coerce_mask_and_coherent(rtd->card->dev, DMA_BIT_MASK(32));
-       if (ret)
-               return ret;
-
        if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream ||
                        pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
                ret = snd_pcm_lib_preallocate_pages_for_all(pcm,
-                       SNDRV_DMA_TYPE_DEV,
-                       rtd->card->dev,
+                       SNDRV_DMA_TYPE_DEV_SG,
+                       dev,
                        hsw_pcm_hardware.buffer_bytes_max,
                        hsw_pcm_hardware.buffer_bytes_max);
                if (ret) {
@@ -742,11 +772,14 @@ static int hsw_pcm_probe(struct snd_soc_platform *platform)
 {
        struct sst_pdata *pdata = dev_get_platdata(platform->dev);
        struct hsw_priv_data *priv_data;
-       int i;
+       struct device *dma_dev;
+       int i, ret = 0;
 
        if (!pdata)
                return -ENODEV;
 
+       dma_dev = pdata->dma_dev;
+
        priv_data = devm_kzalloc(platform->dev, sizeof(*priv_data), GFP_KERNEL);
        priv_data->hsw = pdata->dsp;
        snd_soc_platform_set_drvdata(platform, priv_data);
@@ -758,15 +791,17 @@ static int hsw_pcm_probe(struct snd_soc_platform *platform)
 
                /* playback */
                if (hsw_dais[i].playback.channels_min) {
-                       priv_data->pcm_pg[i][0] = kzalloc(PAGE_SIZE, GFP_DMA);
-                       if (priv_data->pcm_pg[i][0] == NULL)
+                       ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dma_dev,
+                               PAGE_SIZE, &priv_data->dmab[i][0]);
+                       if (ret < 0)
                                goto err;
                }
 
                /* capture */
                if (hsw_dais[i].capture.channels_min) {
-                       priv_data->pcm_pg[i][1] = kzalloc(PAGE_SIZE, GFP_DMA);
-                       if (priv_data->pcm_pg[i][1] == NULL)
+                       ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dma_dev,
+                               PAGE_SIZE, &priv_data->dmab[i][1]);
+                       if (ret < 0)
                                goto err;
                }
        }
@@ -776,11 +811,11 @@ static int hsw_pcm_probe(struct snd_soc_platform *platform)
 err:
        for (;i >= 0; i--) {
                if (hsw_dais[i].playback.channels_min)
-                       kfree(priv_data->pcm_pg[i][0]);
+                       snd_dma_free_pages(&priv_data->dmab[i][0]);
                if (hsw_dais[i].capture.channels_min)
-                       kfree(priv_data->pcm_pg[i][1]);
+                       snd_dma_free_pages(&priv_data->dmab[i][1]);
        }
-       return -ENOMEM;
+       return ret;
 }
 
 static int hsw_pcm_remove(struct snd_soc_platform *platform)
@@ -791,9 +826,9 @@ static int hsw_pcm_remove(struct snd_soc_platform *platform)
 
        for (i = 0; i < ARRAY_SIZE(hsw_dais); i++) {
                if (hsw_dais[i].playback.channels_min)
-                       kfree(priv_data->pcm_pg[i][0]);
+                       snd_dma_free_pages(&priv_data->dmab[i][0]);
                if (hsw_dais[i].capture.channels_min)
-                       kfree(priv_data->pcm_pg[i][1]);
+                       snd_dma_free_pages(&priv_data->dmab[i][1]);
        }
 
        return 0;
index be873c1b0c204f4f902bfc7e2edad8c10d623e15..d32c540555c41b6c3f9b8f5817a5baa09b5ca47b 100644 (file)
@@ -1,10 +1,8 @@
 #
 # Jz4740 Platform Support
 #
-snd-soc-jz4740-objs := jz4740-pcm.o
 snd-soc-jz4740-i2s-objs := jz4740-i2s.o
 
-obj-$(CONFIG_SND_JZ4740_SOC) += snd-soc-jz4740.o
 obj-$(CONFIG_SND_JZ4740_SOC_I2S) += snd-soc-jz4740-i2s.o
 
 # Jz4740 Machine Support
index 215b668166be6c50d01963cac2ef62598cf6f7b2..89424470a1f3860eab989eabeab579184e070359 100644 (file)
@@ -197,13 +197,12 @@ static void rsnd_dma_complete(void *data)
         * rsnd_dai_pointer_update() will be called twice,
         * ant it will breaks io->byte_pos
         */
-
-       rsnd_dai_pointer_update(io, io->byte_per_period);
-
        if (dma->submit_loop)
                rsnd_dma_continue(dma);
 
        rsnd_unlock(priv, flags);
+
+       rsnd_dai_pointer_update(io, io->byte_per_period);
 }
 
 static void __rsnd_dma_start(struct rsnd_dma *dma)
index 6232b7d307aab2c553bad3c7b6a19f7f69ac997e..4d0720ed5a906d86315971a869028011c3c3a7af 100644 (file)
@@ -258,7 +258,7 @@ static int rsnd_src_init(struct rsnd_mod *mod,
 {
        struct rsnd_src *src = rsnd_mod_to_src(mod);
 
-       clk_enable(src->clk);
+       clk_prepare_enable(src->clk);
 
        return 0;
 }
@@ -269,7 +269,7 @@ static int rsnd_src_quit(struct rsnd_mod *mod,
 {
        struct rsnd_src *src = rsnd_mod_to_src(mod);
 
-       clk_disable(src->clk);
+       clk_disable_unprepare(src->clk);
 
        return 0;
 }
index 4b7e20603dd7be8032198291ee08ed9b95de88dd..1d8387c25bd85f5b312db49e42109fdfd064815f 100644 (file)
@@ -171,7 +171,7 @@ static void rsnd_ssi_hw_start(struct rsnd_ssi *ssi,
        u32 cr;
 
        if (0 == ssi->usrcnt) {
-               clk_enable(ssi->clk);
+               clk_prepare_enable(ssi->clk);
 
                if (rsnd_dai_is_clk_master(rdai)) {
                        if (rsnd_ssi_clk_from_parent(ssi))
@@ -230,7 +230,7 @@ static void rsnd_ssi_hw_stop(struct rsnd_ssi *ssi,
                                rsnd_ssi_master_clk_stop(ssi);
                }
 
-               clk_disable(ssi->clk);
+               clk_disable_unprepare(ssi->clk);
        }
 
        dev_dbg(dev, "ssi%d hw stopped\n", rsnd_mod_id(&ssi->mod));
index c8a780d0d057f43b08e74fc7c6d57f414e8aecde..6d6ceee447d559e711ba8eccc2e734953585d9ad 100644 (file)
@@ -254,7 +254,6 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
 static void dapm_kcontrol_free(struct snd_kcontrol *kctl)
 {
        struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl);
-       kfree(data->widget);
        kfree(data->wlist);
        kfree(data);
 }
@@ -1613,8 +1612,11 @@ static void dapm_pre_sequence_async(void *data, async_cookie_t cookie)
                                "ASoC: Failed to turn on bias: %d\n", ret);
        }
 
-       /* Prepare for a STADDBY->ON or ON->STANDBY transition */
-       if (d->bias_level != d->target_bias_level) {
+       /* Prepare for a transition to ON or away from ON */
+       if ((d->target_bias_level == SND_SOC_BIAS_ON &&
+            d->bias_level != SND_SOC_BIAS_ON) ||
+           (d->target_bias_level != SND_SOC_BIAS_ON &&
+            d->bias_level == SND_SOC_BIAS_ON)) {
                ret = snd_soc_dapm_set_bias_level(d, SND_SOC_BIAS_PREPARE);
                if (ret != 0)
                        dev_err(d->dev,
@@ -3476,8 +3478,11 @@ void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card)
                cpu_dai = rtd->cpu_dai;
                codec_dai = rtd->codec_dai;
 
-               /* dynamic FE links have no fixed DAI mapping */
-               if (rtd->dai_link->dynamic)
+               /*
+                * dynamic FE links have no fixed DAI mapping.
+                * CODEC<->CODEC links have no direct connection.
+                */
+               if (rtd->dai_link->dynamic || rtd->dai_link->params)
                        continue;
 
                /* there is no point in connecting BE DAI links with dummies */
index 2cedf09f6d9613c7b34bb22888806fde598052c6..a391de05803765403fc94e989717ad7dae91db5c 100644 (file)
@@ -1675,7 +1675,7 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
                        be->dpcm[stream].state = SND_SOC_DPCM_STATE_STOP;
                        break;
                case SNDRV_PCM_TRIGGER_SUSPEND:
-                       if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP)
+                       if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START)
                                continue;
 
                        if (!snd_soc_dpcm_can_be_free_stop(fe, be, stream))
index 893d5a1afc3ce6bf854a61ef0c3f4f54cc552d0d..c3b5b7dca1c3a8fd4b771512c841bb32e27b5554 100644 (file)
@@ -651,7 +651,7 @@ int snd_usb_autoresume(struct snd_usb_audio *chip)
        int err = -ENODEV;
 
        down_read(&chip->shutdown_rwsem);
-       if (chip->probing)
+       if (chip->probing && chip->in_pm)
                err = 0;
        else if (!chip->shutdown)
                err = usb_autopm_get_interface(chip->pm_intf);
@@ -663,7 +663,7 @@ int snd_usb_autoresume(struct snd_usb_audio *chip)
 void snd_usb_autosuspend(struct snd_usb_audio *chip)
 {
        down_read(&chip->shutdown_rwsem);
-       if (!chip->shutdown && !chip->probing)
+       if (!chip->shutdown && !chip->probing && !chip->in_pm)
                usb_autopm_put_interface(chip->pm_intf);
        up_read(&chip->shutdown_rwsem);
 }
@@ -695,8 +695,9 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
                        chip->autosuspended = 1;
        }
 
-       list_for_each_entry(mixer, &chip->mixer_list, list)
-               snd_usb_mixer_suspend(mixer);
+       if (chip->num_suspended_intf == 1)
+               list_for_each_entry(mixer, &chip->mixer_list, list)
+                       snd_usb_mixer_suspend(mixer);
 
        return 0;
 }
@@ -711,6 +712,8 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
                return 0;
        if (--chip->num_suspended_intf)
                return 0;
+
+       chip->in_pm = 1;
        /*
         * ALSA leaves material resumption to user space
         * we just notify and restart the mixers
@@ -726,6 +729,7 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
        chip->autosuspended = 0;
 
 err_out:
+       chip->in_pm = 0;
        return err;
 }
 
index 9867ab866857260df9432b4378d5ba87c1d90834..97acb906acc27041cebb340abaa9d69c36c34634 100644 (file)
@@ -92,6 +92,7 @@ struct snd_usb_endpoint {
        unsigned int curframesize;      /* current packet size in frames (for capture) */
        unsigned int syncmaxsize;       /* sync endpoint packet size */
        unsigned int fill_max:1;        /* fill max packet size always */
+       unsigned int udh01_fb_quirk:1;  /* corrupted feedback data */
        unsigned int datainterval;      /* log_2 of data packet interval */
        unsigned int syncinterval;      /* P for adaptive mode, 0 otherwise */
        unsigned char silence_value;
index e70a87e0d9fe6402765afa0741b9f974b1259bec..289f582c91303cd6bd26d194124b36702f8e4915 100644 (file)
@@ -471,6 +471,10 @@ struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
                        ep->syncinterval = 3;
 
                ep->syncmaxsize = le16_to_cpu(get_endpoint(alts, 1)->wMaxPacketSize);
+
+               if (chip->usb_id == USB_ID(0x0644, 0x8038) /* TEAC UD-H01 */ &&
+                   ep->syncmaxsize == 4)
+                       ep->udh01_fb_quirk = 1;
        }
 
        list_add_tail(&ep->list, &chip->ep_list);
@@ -1105,7 +1109,16 @@ void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep,
        if (f == 0)
                return;
 
-       if (unlikely(ep->freqshift == INT_MIN)) {
+       if (unlikely(sender->udh01_fb_quirk)) {
+               /*
+                * The TEAC UD-H01 firmware sometimes changes the feedback value
+                * by +/- 0x1.0000.
+                */
+               if (f < ep->freqn - 0x8000)
+                       f += 0x10000;
+               else if (f > ep->freqn + 0x8000)
+                       f -= 0x10000;
+       } else if (unlikely(ep->freqshift == INT_MIN)) {
                /*
                 * The first time we see a feedback value, determine its format
                 * by shifting it left or right until it matches the nominal
index 131336d40492786b2283df01577a1957b0556a66..c62a1659106d2c3da63152d59e1302e2eee47d6d 100644 (file)
@@ -1501,9 +1501,8 @@ static void retire_playback_urb(struct snd_usb_substream *subs,
         * The error should be lower than 2ms since the estimate relies
         * on two reads of a counter updated every ms.
         */
-       if (printk_ratelimit() &&
-           abs(est_delay - subs->last_delay) * 1000 > runtime->rate * 2)
-               dev_dbg(&subs->dev->dev,
+       if (abs(est_delay - subs->last_delay) * 1000 > runtime->rate * 2)
+               dev_dbg_ratelimited(&subs->dev->dev,
                        "delay: estimated %d, actual %d\n",
                        est_delay, subs->last_delay);
 
index 25c4c7e217de603c1f02c5c714833e6f29d92c3b..91d0380431b4f79f1e209cd973b7f44152caa494 100644 (file)
@@ -40,6 +40,7 @@ struct snd_usb_audio {
        struct rw_semaphore shutdown_rwsem;
        unsigned int shutdown:1;
        unsigned int probing:1;
+       unsigned int in_pm:1;
        unsigned int autosuspended:1;   
        unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */
        
index bcae806b0c398d5450975ab08001e7a808e376c4..9a617adc6675dc06552de428c93b3c611599900b 100644 (file)
@@ -44,6 +44,9 @@ cpupower: FORCE
 cgroup firewire hv guest usb virtio vm net: FORCE
        $(call descend,$@)
 
+liblockdep: FORCE
+       $(call descend,lib/lockdep)
+
 libapikfs: FORCE
        $(call descend,lib/api)
 
@@ -91,6 +94,9 @@ cpupower_clean:
 cgroup_clean hv_clean firewire_clean lguest_clean usb_clean virtio_clean vm_clean net_clean:
        $(call descend,$(@:_clean=),clean)
 
+liblockdep_clean:
+       $(call descend,lib/lockdep,clean)
+
 libapikfs_clean:
        $(call descend,lib/api,clean)
 
index 7c43479623537af4f0d4179f4cce195cbea3e9b1..a74fba6d774353d33fac7f04b71abdd241e0218e 100644 (file)
@@ -12,8 +12,8 @@
 char debugfs_mountpoint[PATH_MAX + 1] = "/sys/kernel/debug";
 
 static const char * const debugfs_known_mountpoints[] = {
-       "/sys/kernel/debug/",
-       "/debug/",
+       "/sys/kernel/debug",
+       "/debug",
        0,
 };
 
index cb09d3ff8f5856dab489130133ec9c2e2ba73db8..bba2f5253b6e281ff85abdf65a76406d88d6503b 100644 (file)
@@ -1,8 +1,7 @@
 # file format version
 FILE_VERSION = 1
 
-MAKEFLAGS += --no-print-directory
-LIBLOCKDEP_VERSION=$(shell make -sC ../../.. kernelversion)
+LIBLOCKDEP_VERSION=$(shell make --no-print-directory -sC ../../.. kernelversion)
 
 # Makefiles suck: This macro sets a default value of $(2) for the
 # variable named by $(1), unless the variable has been set by
@@ -231,7 +230,7 @@ install_lib: all_cmd
 install: install_lib
 
 clean:
-       $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d
+       $(RM) *.o *~ $(TARGETS) *.a *liblockdep*.so* $(VERSION_FILES) .*.d
        $(RM) tags TAGS
 
 endif # skip-makefile
index baec7d887da4fafeeacbda82697ecff6200c95da..b83184f2d484f59f3a888648fd3f548c0dc37d12 100644 (file)
@@ -4344,6 +4344,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
                                              format, len_arg, arg);
                                trace_seq_terminate(&p);
                                trace_seq_puts(s, p.buffer);
+                               trace_seq_destroy(&p);
                                arg = arg->next;
                                break;
                        default:
index 791c539374c726b7e4d3ad8f6f75aad575692787..feab942816343aba5023d1b6ab588322e7d45506 100644 (file)
@@ -876,8 +876,8 @@ struct event_filter {
 struct event_filter *pevent_filter_alloc(struct pevent *pevent);
 
 /* for backward compatibility */
-#define FILTER_NONE            PEVENT_ERRNO__FILTER_NOT_FOUND
-#define FILTER_NOEXIST         PEVENT_ERRNO__NO_FILTER
+#define FILTER_NONE            PEVENT_ERRNO__NO_FILTER
+#define FILTER_NOEXIST         PEVENT_ERRNO__FILTER_NOT_FOUND
 #define FILTER_MISS            PEVENT_ERRNO__FILTER_MISS
 #define FILTER_MATCH           PEVENT_ERRNO__FILTER_MATCH
 
index bb31813e43ddca8bd2bad4544e593d8c6df418f2..9a287bec695a3630bc43f19e3376d8cff36d2856 100644 (file)
@@ -820,7 +820,7 @@ do_div:
                r->A &= r->X;
                break;
        case BPF_ALU_AND | BPF_K:
-               r->A &= r->X;
+               r->A &= K;
                break;
        case BPF_ALU_OR | BPF_X:
                r->A |= r->X;
index cfe0cdcda3de990793b70d128e2267f53baac5ba..c5baf9c591b7bb5a2c280e173f4e0a2b561285fa 100644 (file)
@@ -43,8 +43,7 @@ static void get_exec_path(char *tpath, size_t size)
        free(path);
 }
 
-static void get_asm_insns(uint8_t *image, size_t len, unsigned long base,
-                         int opcodes)
+static void get_asm_insns(uint8_t *image, size_t len, int opcodes)
 {
        int count, i, pc = 0;
        char tpath[256];
@@ -107,13 +106,13 @@ static void put_klog_buff(char *buff)
 }
 
 static int get_last_jit_image(char *haystack, size_t hlen,
-                             uint8_t *image, size_t ilen,
-                             unsigned long *base)
+                             uint8_t *image, size_t ilen)
 {
        char *ptr, *pptr, *tmp;
        off_t off = 0;
        int ret, flen, proglen, pass, ulen = 0;
        regmatch_t pmatch[1];
+       unsigned long base;
        regex_t regex;
 
        if (hlen == 0)
@@ -136,7 +135,7 @@ static int get_last_jit_image(char *haystack, size_t hlen,
 
        ptr = haystack + off - (pmatch[0].rm_eo - pmatch[0].rm_so);
        ret = sscanf(ptr, "flen=%d proglen=%d pass=%d image=%lx",
-                    &flen, &proglen, &pass, base);
+                    &flen, &proglen, &pass, &base);
        if (ret != 4)
                return 0;
 
@@ -162,7 +161,7 @@ static int get_last_jit_image(char *haystack, size_t hlen,
        assert(ulen == proglen);
        printf("%d bytes emitted from JIT compiler (pass:%d, flen:%d)\n",
               proglen, pass, flen);
-       printf("%lx + <x>:\n", *base);
+       printf("%lx + <x>:\n", base);
 
        regfree(&regex);
        return ulen;
@@ -172,8 +171,7 @@ int main(int argc, char **argv)
 {
        int len, klen, opcodes = 0;
        char *kbuff;
-       unsigned long base;
-       uint8_t image[4096];
+       static uint8_t image[32768];
 
        if (argc > 1) {
                if (!strncmp("-o", argv[argc - 1], 2)) {
@@ -189,9 +187,9 @@ int main(int argc, char **argv)
 
        kbuff = get_klog_buff(&klen);
 
-       len = get_last_jit_image(kbuff, klen, image, sizeof(image), &base);
-       if (len > 0 && base > 0)
-               get_asm_insns(image, len, base, opcodes);
+       len = get_last_jit_image(kbuff, klen, image, sizeof(image));
+       if (len > 0)
+               get_asm_insns(image, len, opcodes);
 
        put_klog_buff(kbuff);
 
index e96923310d5780e2fe62e45736b2511f43aa907d..895edd32930ce7283cbda2df8914ba44462fc7ef 100644 (file)
@@ -589,7 +589,7 @@ $(GTK_OBJS): $(OUTPUT)%.o: %.c $(LIB_H)
        $(QUIET_CC)$(CC) -o $@ -c -fPIC $(CFLAGS) $(GTK_CFLAGS) $<
 
 $(OUTPUT)libperf-gtk.so: $(GTK_OBJS) $(PERFLIBS)
-       $(QUIET_LINK)$(CC) -o $@ -shared $(ALL_LDFLAGS) $(filter %.o,$^) $(GTK_LIBS)
+       $(QUIET_LINK)$(CC) -o $@ -shared $(LDFLAGS) $(filter %.o,$^) $(GTK_LIBS)
 
 $(OUTPUT)builtin-help.o: builtin-help.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
        $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \
index b602ad93ce630ae3f611ee74f0bb6277c367a09b..83bc2385e6d3c2820958b279f77c4d08342b1e06 100644 (file)
@@ -23,9 +23,10 @@ static int sample_ustack(struct perf_sample *sample,
 
        sp = (unsigned long) regs[PERF_REG_X86_SP];
 
-       map = map_groups__find(&thread->mg, MAP__FUNCTION, (u64) sp);
+       map = map_groups__find(&thread->mg, MAP__VARIABLE, (u64) sp);
        if (!map) {
                pr_debug("failed to get stack map\n");
+               free(buf);
                return -1;
        }
 
index 99167bf644eaa8b376060b6b6d42977e9cd09425..60875d5c556c217d3433487d65cf66382a78d25b 100644 (file)
@@ -1,4 +1,3 @@
-
 #include <linux/linkage.h>
 
 #define AX      0
@@ -90,3 +89,10 @@ ENTRY(perf_regs_load)
        ret
 ENDPROC(perf_regs_load)
 #endif
+
+/*
+ * We need to provide note.GNU-stack section, saying that we want
+ * NOT executable stack. Otherwise the final linking will assume that
+ * the ELF stack should not be restricted at all and set it RWX.
+ */
+.section .note.GNU-stack,"",@progbits
index 21c164b8f9db2a70a9467e1341fbb6a49a3536a1..0f1e5a2f6ad71651ad1be15fb98a6174ed53646a 100644 (file)
@@ -404,6 +404,7 @@ static struct kvm_event *kvm_alloc_init_event(struct event_key *key)
        }
 
        event->key = *key;
+       init_stats(&event->total.stats);
        return event;
 }
 
index eb524f91bffe5d9098d582e07050734a1c3e5343..8ce62ef7f6c387e3e23a18c51b9ec1cf82e2a189 100644 (file)
@@ -374,7 +374,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
 
        session = perf_session__new(file, false, NULL);
        if (session == NULL) {
-               pr_err("Not enough memory for reading perf file header\n");
+               pr_err("Perf session creation failed.\n");
                return -1;
        }
 
index ee21fa95ebcf60c2067b32f3e9b4575974f930ad..802cf544202b7b06720afc9abb1c9580cf362892 100644 (file)
@@ -34,6 +34,14 @@ ifeq ($(ARCH),arm)
   LIBUNWIND_LIBS = -lunwind -lunwind-arm
 endif
 
+# So far there's only x86 libdw unwind support merged in perf.
+# Disable it on all other architectures in case libdw unwind
+# support is detected in system. Add supported architectures
+# to the check.
+ifneq ($(ARCH),x86)
+  NO_LIBDW_DWARF_UNWIND := 1
+endif
+
 ifeq ($(LIBUNWIND_LIBS),)
   NO_LIBUNWIND := 1
 else
@@ -109,6 +117,10 @@ CFLAGS += -Wall
 CFLAGS += -Wextra
 CFLAGS += -std=gnu99
 
+# Enforce a non-executable stack, as we may regress (again) in the future by
+# adding assembler files missing the .GNU-stack linker note.
+LDFLAGS += -Wl,-z,noexecstack
+
 EXTLIBS = -lelf -lpthread -lrt -lm -ldl
 
 ifneq ($(OUTPUT),)
@@ -186,7 +198,10 @@ VF_FEATURE_TESTS =                 \
        stackprotector-all              \
        timerfd                         \
        libunwind-debug-frame           \
-       bionic
+       bionic                          \
+       liberty                         \
+       liberty-z                       \
+       cplus-demangle
 
 # Set FEATURE_CHECK_(C|LD)FLAGS-all for all CORE_FEATURE_TESTS features.
 # If in the future we need per-feature checks/flags for features not
@@ -504,7 +519,21 @@ else
 endif
 
 ifeq ($(feature-libbfd), 1)
-  EXTLIBS += -lbfd -lz -liberty
+  EXTLIBS += -lbfd
+
+  # call all detections now so we get correct
+  # status in VF output
+  $(call feature_check,liberty)
+  $(call feature_check,liberty-z)
+  $(call feature_check,cplus-demangle)
+
+  ifeq ($(feature-liberty), 1)
+    EXTLIBS += -liberty
+  else
+    ifeq ($(feature-liberty-z), 1)
+      EXTLIBS += -liberty -lz
+    endif
+  endif
 endif
 
 ifdef NO_DEMANGLE
@@ -515,15 +544,10 @@ else
     CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
   else
     ifneq ($(feature-libbfd), 1)
-      $(call feature_check,liberty)
-      ifeq ($(feature-liberty), 1)
-        EXTLIBS += -lbfd -liberty
-      else
-        $(call feature_check,liberty-z)
-        ifeq ($(feature-liberty-z), 1)
-          EXTLIBS += -lbfd -liberty -lz
-        else
-          $(call feature_check,cplus-demangle)
+      ifneq ($(feature-liberty), 1)
+        ifneq ($(feature-liberty-z), 1)
+          # we dont have neither HAVE_CPLUS_DEMANGLE_SUPPORT
+          # or any of 'bfd iberty z' trinity
           ifeq ($(feature-cplus-demangle), 1)
             EXTLIBS += -liberty
             CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
index 5daeae1cb4c01b3a87f4c54ee4018199c12aeeb6..2f92d6e7ee007bea58636fe8509757c4f626d77d 100644 (file)
@@ -46,6 +46,7 @@ make_install_man    := install-man
 make_install_html   := install-html
 make_install_info   := install-info
 make_install_pdf    := install-pdf
+make_static         := LDFLAGS=-static
 
 # all the NO_* variable combined
 make_minimal        := NO_LIBPERL=1 NO_LIBPYTHON=1 NO_NEWT=1 NO_GTK2=1
@@ -87,6 +88,7 @@ run += make_install_bin
 # run += make_install_info
 # run += make_install_pdf
 run += make_minimal
+run += make_static
 
 ifneq ($(call has,ctags),)
 run += make_tags
index 1fbcd8bdc11b8b387c4f9f2f1a57ae6807e9e92c..55de44ecebefb5aa74365eb0b04294cfb186585a 100644 (file)
@@ -86,10 +86,17 @@ static int open_file_read(struct perf_data_file *file)
 
 static int open_file_write(struct perf_data_file *file)
 {
+       int fd;
+
        if (check_backup(file))
                return -1;
 
-       return open(file->path, O_CREAT|O_RDWR|O_TRUNC, S_IRUSR|S_IWUSR);
+       fd = open(file->path, O_CREAT|O_RDWR|O_TRUNC, S_IRUSR|S_IWUSR);
+
+       if (fd < 0)
+               pr_err("failed to open %s : %s\n", file->path, strerror(errno));
+
+       return fd;
 }
 
 static int open_file(struct perf_data_file *file)
index a53cd0b8c151cdb898d3711c36e5081846813a15..27c2a5efe4504945bf9c8492b62b8256abc0be33 100644 (file)
@@ -717,7 +717,7 @@ static char *get_kernel_version(const char *root_dir)
 }
 
 static int map_groups__set_modules_path_dir(struct map_groups *mg,
-                               const char *dir_name)
+                               const char *dir_name, int depth)
 {
        struct dirent *dent;
        DIR *dir = opendir(dir_name);
@@ -742,7 +742,15 @@ static int map_groups__set_modules_path_dir(struct map_groups *mg,
                            !strcmp(dent->d_name, ".."))
                                continue;
 
-                       ret = map_groups__set_modules_path_dir(mg, path);
+                       /* Do not follow top-level source and build symlinks */
+                       if (depth == 0) {
+                               if (!strcmp(dent->d_name, "source") ||
+                                   !strcmp(dent->d_name, "build"))
+                                       continue;
+                       }
+
+                       ret = map_groups__set_modules_path_dir(mg, path,
+                                                              depth + 1);
                        if (ret < 0)
                                goto out;
                } else {
@@ -786,11 +794,11 @@ static int machine__set_modules_path(struct machine *machine)
        if (!version)
                return -1;
 
-       snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel",
+       snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
                 machine->root_dir, version);
        free(version);
 
-       return map_groups__set_modules_path_dir(&machine->kmaps, modules_path);
+       return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
 }
 
 static int machine__create_module(void *arg, const char *name, u64 start)
index 3b7dbf51d4a93bd425fc41363d98a47acd7288ff..6864661a79dd03a6543d54320f470543304bdb1b 100644 (file)
@@ -6,6 +6,7 @@
 #include <inttypes.h>
 
 #include "symbol.h"
+#include "vdso.h"
 #include <symbol/kallsyms.h>
 #include "debug.h"
 
@@ -618,6 +619,7 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
                GElf_Shdr shdr;
                ss->adjust_symbols = (ehdr.e_type == ET_EXEC ||
                                ehdr.e_type == ET_REL ||
+                               is_vdso_map(dso->short_name) ||
                                elf_section_by_name(elf, &ehdr, &shdr,
                                                     ".gnu.prelink_undo",
                                                     NULL) != NULL);
index d9186a2fdf0696f1a319ba6cac0377a66c1fe112..c2c0f20067a5028ebf304ca4758c830b80ffbe35 100644 (file)
@@ -89,15 +89,6 @@ else
        STRIPCMD = $(STRIP) -s --remove-section=.note --remove-section=.comment
 endif
 
-# if DEBUG is enabled, then we do not strip or optimize
-ifeq ($(strip $(DEBUG)),true)
-       CFLAGS += -O1 -g -DDEBUG
-       STRIPCMD = /bin/true -Since_we_are_debugging
-else
-       CFLAGS += $(OPTIMIZATION) -fomit-frame-pointer
-       STRIPCMD = $(STRIP) -s --remove-section=.note --remove-section=.comment
-endif
-
 # --- ACPIDUMP BEGIN ---
 
 vpath %.c \
@@ -128,7 +119,7 @@ clean:
        -rm -f $(OUTPUT)acpidump
 
 install-tools:
-       $(INSTALL) -d $(DESTDIR)${bindir}
+       $(INSTALL) -d $(DESTDIR)${sbindir}
        $(INSTALL_PROGRAM) $(OUTPUT)acpidump $(DESTDIR)${sbindir}
 
 install-man:
index 750512ba2c8846d2d1275abb796b425fdd4a987d..c7493b8f9b0ee5b951d73d64e7502e809be2d5ed 100644 (file)
@@ -14,6 +14,12 @@ all: $(NET_PROGS)
 run_tests: all
        @/bin/sh ./run_netsocktests || echo "sockettests: [FAIL]"
        @/bin/sh ./run_afpackettests || echo "afpackettests: [FAIL]"
-
+       @if /sbin/modprobe test_bpf ; then \
+               /sbin/rmmod test_bpf; \
+               echo "test_bpf: ok"; \
+       else \
+               echo "test_bpf: [FAIL]"; \
+               exit 1; \
+       fi
 clean:
        $(RM) $(NET_PROGS)
index 47b29834a6b61def09f6340013cc9b2927c03cd9..56ff9bebb577df935200aacfc1e8251ae0800bcc 100644 (file)
@@ -548,11 +548,10 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
        u32 val;
        u32 *reg;
 
-       offset >>= 1;
        reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
-                                 vcpu->vcpu_id, offset);
+                                 vcpu->vcpu_id, offset >> 1);
 
-       if (offset & 2)
+       if (offset & 4)
                val = *reg >> 16;
        else
                val = *reg & 0xffff;
@@ -561,13 +560,13 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
        vgic_reg_access(mmio, &val, offset,
                        ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
        if (mmio->is_write) {
-               if (offset < 4) {
+               if (offset < 8) {
                        *reg = ~0U; /* Force PPIs/SGIs to 1 */
                        return false;
                }
 
                val = vgic_cfg_compress(val);
-               if (offset & 2) {
+               if (offset & 4) {
                        *reg &= 0xffff;
                        *reg |= val << 16;
                } else {
@@ -916,6 +915,7 @@ static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
        case 0:
                if (!target_cpus)
                        return;
+               break;
 
        case 1:
                target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
@@ -1667,10 +1667,11 @@ static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
        if (addr + size < addr)
                return -EINVAL;
 
+       *ioaddr = addr;
        ret = vgic_ioaddr_overlap(kvm);
        if (ret)
-               return ret;
-       *ioaddr = addr;
+               *ioaddr = VGIC_ADDR_UNDEF;
+
        return ret;
 }
 
index 8db43701016f30cebeb37ab7e4d581166606b86f..bf06577fea51c22ab944edb9560e56f01aae2f94 100644 (file)
@@ -395,7 +395,8 @@ static int assigned_device_enable_host_msix(struct kvm *kvm,
        if (dev->entries_nr == 0)
                return r;
 
-       r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr);
+       r = pci_enable_msix_exact(dev->dev,
+                                 dev->host_msix_entries, dev->entries_nr);
        if (r)
                return r;
 
index 10df100c4514e856d1ca5509f87f118e37726f57..06e6401d6ef45326edcbce4c8ff96e13286d2940 100644 (file)
@@ -101,7 +101,7 @@ static void async_pf_execute(struct work_struct *work)
        if (waitqueue_active(&vcpu->wq))
                wake_up_interruptible(&vcpu->wq);
 
-       mmdrop(mm);
+       mmput(mm);
        kvm_put_kvm(vcpu->kvm);
 }
 
@@ -118,7 +118,7 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
                flush_work(&work->work);
 #else
                if (cancel_work_sync(&work->work)) {
-                       mmdrop(work->mm);
+                       mmput(work->mm);
                        kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
                        kmem_cache_free(async_pf_cache, work);
                }
@@ -183,7 +183,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
        work->addr = hva;
        work->arch = *arch;
        work->mm = current->mm;
-       atomic_inc(&work->mm->mm_count);
+       atomic_inc(&work->mm->mm_users);
        kvm_get_kvm(work->vcpu->kvm);
 
        /* this can't really happen otherwise gfn_to_pfn_async
@@ -201,7 +201,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
        return 1;
 retry_sync:
        kvm_put_kvm(work->vcpu->kvm);
-       mmdrop(work->mm);
+       mmput(work->mm);
        kmem_cache_free(async_pf_cache, work);
        return 0;
 }