Merge tag 'drm-fixes-for-4.8-rc7' of git://people.freedesktop.org/~airlied/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 19 Sep 2016 19:27:31 +0000 (12:27 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 19 Sep 2016 19:27:31 +0000 (12:27 -0700)
Pull exynos and one stable ABI fix from Dave Airlie:
 "One important drm 32/64 ABI fix came in so I'll dequeue what I have,
  the rest is just exynos runtime pm fixes, but the net removal of code
  seems like a win to me.

  I'm going to be sporadic this week due to school holidays, so if
  anything urgent turns up, Daniel will take care of it"

* tag 'drm-fixes-for-4.8-rc7' of git://people.freedesktop.org/~airlied/linux:
  drm: Only use compat ioctl for addfb2 on X86/IA64
  Subject: [PATCH, RESEND] drm: exynos: avoid unused function warning
  drm/exynos: g2d: fix system and runtime pm integration
  drm/exynos: rotator: fix system and runtime pm integration
  drm/exynos: gsc: fix system and runtime pm integration
  drm/exynos: fimc: fix system and runtime pm integration
  exynos-drm: Fix unsupported GEM memory type error message to be clear

729 files changed:
.mailmap
Documentation/ABI/stable/sysfs-devices
Documentation/PCI/pci.txt
Documentation/arm/CCN.txt
Documentation/cpu-freq/cpufreq-stats.txt
Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt
Documentation/devicetree/bindings/mmc/sdhci-st.txt
Documentation/devicetree/bindings/serial/8250.txt
Documentation/filesystems/overlayfs.txt
Documentation/i2c/slave-interface
Documentation/networking/dsa/dsa.txt
Documentation/powerpc/transactional_memory.txt
Documentation/rapidio/mport_cdev.txt
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/include/asm/uaccess.h
arch/arc/include/asm/uaccess.h
arch/arm/boot/dts/am335x-baltos.dtsi
arch/arm/boot/dts/am335x-igep0033.dtsi
arch/arm/boot/dts/am335x-phycore-som.dtsi
arch/arm/boot/dts/armada-388-clearfog.dts
arch/arm/boot/dts/bcm2835-rpi.dtsi
arch/arm/boot/dts/bcm283x.dtsi
arch/arm/boot/dts/exynos5410-odroidxu.dts
arch/arm/boot/dts/imx6qdl.dtsi
arch/arm/boot/dts/imx6sx-sabreauto.dts
arch/arm/boot/dts/imx7d-sdb.dts
arch/arm/boot/dts/kirkwood-ib62x0.dts
arch/arm/boot/dts/kirkwood-openrd.dtsi
arch/arm/boot/dts/logicpd-som-lv.dtsi
arch/arm/boot/dts/logicpd-torpedo-som.dtsi
arch/arm/boot/dts/omap3-overo-base.dtsi
arch/arm/boot/dts/omap3-overo-chestnut43-common.dtsi
arch/arm/boot/dts/omap3-overo-tobi-common.dtsi
arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi
arch/arm/boot/dts/rk3066a.dtsi
arch/arm/boot/dts/rk3288.dtsi
arch/arm/boot/dts/rk3xxx.dtsi
arch/arm/boot/dts/stih407-family.dtsi
arch/arm/boot/dts/stih410.dtsi
arch/arm/boot/dts/sun5i-a13.dtsi
arch/arm/boot/dts/tegra114-dalmore.dts
arch/arm/boot/dts/tegra114-roth.dts
arch/arm/boot/dts/tegra114-tn7.dts
arch/arm/boot/dts/tegra124-jetson-tk1.dts
arch/arm/common/locomo.c
arch/arm/common/sa1111.c
arch/arm/configs/keystone_defconfig
arch/arm/configs/multi_v7_defconfig
arch/arm/include/asm/pgtable-2level-hwdef.h
arch/arm/include/asm/pgtable-3level-hwdef.h
arch/arm/kernel/hyp-stub.S
arch/arm/kvm/arm.c
arch/arm/kvm/mmu.c
arch/arm/mach-exynos/suspend.c
arch/arm/mach-imx/mach-imx6ul.c
arch/arm/mach-imx/pm-imx6.c
arch/arm/mach-omap2/cm33xx.c
arch/arm/mach-omap2/cminst44xx.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod.h
arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
arch/arm/mach-pxa/idp.c
arch/arm/mach-pxa/lubbock.c
arch/arm/mach-pxa/xcep.c
arch/arm/mach-realview/core.c
arch/arm/mach-sa1100/clock.c
arch/arm/mach-sa1100/generic.c
arch/arm/mach-sa1100/generic.h
arch/arm/mach-sa1100/pleb.c
arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
arch/arm/mm/mmu.c
arch/arm/mm/proc-v7.S
arch/arm/xen/enlighten.c
arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
arch/arm64/boot/dts/apm/apm-storm.dtsi
arch/arm64/boot/dts/broadcom/bcm2835-rpi.dtsi [new symlink]
arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts
arch/arm64/boot/dts/broadcom/bcm2837.dtsi
arch/arm64/boot/dts/broadcom/bcm283x-rpi-smsc9514.dtsi [new symlink]
arch/arm64/boot/dts/broadcom/bcm283x.dtsi [new symlink]
arch/arm64/boot/dts/broadcom/ns2.dtsi
arch/arm64/boot/dts/cavium/thunder-88xx.dtsi
arch/arm64/boot/dts/exynos/exynos7.dtsi
arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
arch/arm64/boot/dts/marvell/armada-ap806.dtsi
arch/arm64/boot/dts/rockchip/rk3368.dtsi
arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi
arch/arm64/boot/dts/xilinx/zynqmp.dtsi
arch/arm64/include/asm/percpu.h
arch/arm64/include/asm/spinlock.h
arch/arm64/mm/proc.S
arch/avr32/include/asm/uaccess.h
arch/avr32/kernel/avr32_ksyms.c
arch/avr32/lib/copy_user.S
arch/blackfin/include/asm/uaccess.h
arch/blackfin/mach-bf561/boards/cm_bf561.c
arch/blackfin/mach-bf561/boards/ezkit.c
arch/cris/include/asm/uaccess.h
arch/frv/include/asm/uaccess.h
arch/hexagon/include/asm/uaccess.h
arch/ia64/include/asm/uaccess.h
arch/m32r/include/asm/uaccess.h
arch/metag/include/asm/uaccess.h
arch/microblaze/include/asm/uaccess.h
arch/mips/include/asm/uaccess.h
arch/mn10300/include/asm/uaccess.h
arch/mn10300/lib/usercopy.c
arch/nios2/include/asm/uaccess.h
arch/openrisc/include/asm/uaccess.h
arch/parisc/Kconfig
arch/parisc/configs/c8000_defconfig
arch/parisc/configs/generic-64bit_defconfig
arch/parisc/include/asm/uaccess.h
arch/powerpc/include/asm/cpu_has_feature.h
arch/powerpc/include/asm/cputhreads.h
arch/powerpc/include/asm/hmi.h
arch/powerpc/include/asm/paca.h
arch/powerpc/include/asm/pci-bridge.h
arch/powerpc/include/asm/uaccess.h
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/hmi.c [deleted file]
arch/powerpc/kernel/idle_book3s.S
arch/powerpc/kernel/kprobes.c
arch/powerpc/kernel/pci-common.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/smp.c
arch/powerpc/kernel/traps.c
arch/powerpc/kvm/Makefile
arch/powerpc/kvm/book3s_hv_hmi.c [new file with mode: 0644]
arch/powerpc/lib/checksum_32.S
arch/powerpc/mm/fault.c
arch/powerpc/mm/slb_low.S
arch/powerpc/platforms/512x/mpc512x_lpbfifo.c
arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
arch/powerpc/platforms/embedded6xx/holly.c
arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
arch/powerpc/platforms/powernv/opal-dump.c
arch/powerpc/platforms/powernv/opal-elog.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/powerpc/platforms/pseries/pci.c
arch/powerpc/platforms/pseries/pci_dlpar.c
arch/powerpc/platforms/pseries/setup.c
arch/powerpc/sysdev/cpm1.c
arch/powerpc/sysdev/cpm_common.c
arch/powerpc/sysdev/fsl_rio.c
arch/powerpc/sysdev/xics/icp-opal.c
arch/s390/Kconfig
arch/s390/configs/default_defconfig
arch/s390/configs/gcov_defconfig
arch/s390/configs/performance_defconfig
arch/s390/defconfig
arch/s390/include/asm/uaccess.h
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/vsie.c
arch/score/include/asm/uaccess.h
arch/sh/include/asm/uaccess.h
arch/sh/include/asm/uaccess_64.h
arch/sparc/include/asm/uaccess_32.h
arch/sparc/include/asm/uaccess_64.h
arch/tile/Kconfig
arch/tile/include/asm/uaccess.h
arch/um/kernel/skas/syscall.c
arch/x86/Kconfig
arch/x86/boot/compressed/eboot.c
arch/x86/configs/tiny.config
arch/x86/events/amd/core.c
arch/x86/events/amd/uncore.c
arch/x86/events/intel/bts.c
arch/x86/events/intel/core.c
arch/x86/events/intel/cqm.c
arch/x86/events/intel/ds.c
arch/x86/events/intel/pt.c
arch/x86/include/asm/uaccess.h
arch/x86/kernel/apic/apic.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/microcode/amd.c
arch/x86/kernel/kvmclock.c
arch/x86/kernel/paravirt.c
arch/x86/kvm/ioapic.c
arch/x86/kvm/pmu_amd.c
arch/x86/kvm/x86.c
arch/x86/mm/pat.c
arch/x86/pci/fixup.c
arch/x86/um/ptrace_32.c
arch/x86/um/ptrace_64.c
crypto/cryptd.c
drivers/acpi/nfit/mce.c
drivers/acpi/scan.c
drivers/ata/libahci.c
drivers/ata/pata_ninja32.c
drivers/base/power/runtime.c
drivers/base/regmap/regcache-rbtree.c
drivers/base/regmap/regcache.c
drivers/base/regmap/regmap.c
drivers/bus/arm-cci.c
drivers/bus/arm-ccn.c
drivers/bus/vexpress-config.c
drivers/char/hw_random/Kconfig
drivers/char/tpm/tpm2-cmd.c
drivers/char/virtio_console.c
drivers/clk/renesas/r8a7795-cpg-mssr.c
drivers/clk/rockchip/clk-rk3399.c
drivers/clk/sunxi-ng/ccu-sun8i-h3.c
drivers/clk/sunxi-ng/ccu_common.c
drivers/clk/sunxi-ng/ccu_nk.c
drivers/clk/sunxi/clk-a10-pll2.c
drivers/clk/sunxi/clk-sun8i-mbus.c
drivers/clk/tegra/clk-tegra114.c
drivers/clocksource/timer-atmel-pit.c
drivers/cpufreq/cpufreq-dt-platdev.c
drivers/crypto/caam/caamalg.c
drivers/crypto/qat/qat_common/qat_algs.c
drivers/crypto/vmx/aes_xts.c
drivers/dax/dax.c
drivers/dma/at_xdmac.c
drivers/dma/fsl_raid.c
drivers/dma/img-mdc-dma.c
drivers/dma/pxa_dma.c
drivers/dma/sh/usb-dmac.c
drivers/firmware/arm_scpi.c
drivers/firmware/dmi-id.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/libstub/efi-stub-helper.c
drivers/firmware/efi/libstub/fdt.c
drivers/firmware/efi/libstub/random.c
drivers/gpio/Kconfig
drivers/gpio/gpio-mcp23s08.c
drivers/gpio/gpio-sa1100.c
drivers/gpio/gpiolib-of.c
drivers/hwmon/it87.c
drivers/i2c/busses/i2c-bcm-kona.c
drivers/i2c/busses/i2c-cadence.c
drivers/i2c/busses/i2c-designware-core.c
drivers/i2c/busses/i2c-rcar.c
drivers/i2c/busses/i2c-rk3x.c
drivers/i2c/busses/i2c-sh_mobile.c
drivers/i2c/muxes/i2c-demux-pinctrl.c
drivers/iio/accel/Kconfig
drivers/iio/accel/bma220_spi.c
drivers/iio/accel/bmc150-accel-core.c
drivers/iio/accel/kxsd9.c
drivers/iio/adc/Kconfig
drivers/iio/adc/ad799x.c
drivers/iio/adc/at91_adc.c
drivers/iio/adc/rockchip_saradc.c
drivers/iio/adc/ti-ads1015.c
drivers/iio/adc/ti_am335x_adc.c
drivers/iio/chemical/atlas-ph-sensor.c
drivers/iio/common/hid-sensors/hid-sensor-attributes.c
drivers/iio/dac/stx104.c
drivers/iio/humidity/Kconfig
drivers/iio/humidity/am2315.c
drivers/iio/humidity/hdc100x.c
drivers/iio/industrialio-buffer.c
drivers/iio/industrialio-core.c
drivers/iio/light/Kconfig
drivers/iio/pressure/bmp280-core.c
drivers/iio/proximity/as3935.c
drivers/infiniband/core/multicast.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/device.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/hfi1/chip.c
drivers/infiniband/hw/hfi1/chip.h
drivers/infiniband/hw/hfi1/debugfs.c
drivers/infiniband/hw/hfi1/hfi.h
drivers/infiniband/hw/hfi1/init.c
drivers/infiniband/hw/hfi1/mad.c
drivers/infiniband/hw/hfi1/pio_copy.c
drivers/infiniband/hw/hfi1/user_sdma.c
drivers/infiniband/hw/i40iw/i40iw_hw.c
drivers/infiniband/hw/i40iw/i40iw_main.c
drivers/infiniband/hw/mlx4/cq.c
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mcg.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mlx5/cq.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mem.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/sw/rdmavt/mr.c
drivers/infiniband/sw/rxe/rxe.c
drivers/infiniband/sw/rxe/rxe_comp.c
drivers/infiniband/sw/rxe/rxe_net.c
drivers/infiniband/sw/rxe/rxe_net.h
drivers/infiniband/sw/rxe/rxe_recv.c
drivers/infiniband/sw/rxe/rxe_req.c
drivers/infiniband/sw/rxe/rxe_resp.c
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/isert/ib_isert.h
drivers/irqchip/irq-atmel-aic.c
drivers/irqchip/irq-atmel-aic5.c
drivers/macintosh/ams/ams-i2c.c
drivers/macintosh/windfarm_pm112.c
drivers/macintosh/windfarm_pm72.c
drivers/macintosh/windfarm_rm31.c
drivers/mailbox/Kconfig
drivers/mailbox/bcm-pdc-mailbox.c
drivers/md/bitmap.c
drivers/md/dm-bufio.c
drivers/md/dm-crypt.c
drivers/md/dm-log-writes.c
drivers/md/md-cluster.c
drivers/md/md.c
drivers/md/raid10.c
drivers/md/raid5-cache.c
drivers/md/raid5.c
drivers/memory/omap-gpmc.c
drivers/misc/Kconfig
drivers/misc/Makefile
drivers/misc/bh1780gli.c [deleted file]
drivers/misc/cxl/vphb.c
drivers/misc/lkdtm_rodata.c
drivers/misc/lkdtm_usercopy.c
drivers/misc/mei/hw-me.c
drivers/misc/mei/pci-me.c
drivers/mmc/host/omap.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/sdhci-st.c
drivers/net/bonding/bond_main.c
drivers/net/dsa/bcm_sf2.h
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/atheros/alx/reg.h
drivers/net/ethernet/broadcom/bgmac-bcma.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/cavium/thunder/nic.h
drivers/net/ethernet/cavium/thunder/nic_main.c
drivers/net/ethernet/cavium/thunder/nic_reg.h
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
drivers/net/ethernet/cavium/thunder/nicvf_main.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/freescale/gianfar.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
drivers/net/ethernet/intel/i40e/i40e_client.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mediatek/mtk_eth_soc.h
drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/port.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_common.c
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlxsw/port.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
drivers/net/ethernet/nxp/lpc_eth.c
drivers/net/ethernet/qlogic/qed/qed.h
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
drivers/net/ethernet/qlogic/qede/qede_main.c
drivers/net/ethernet/realtek/8139cp.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/smsc/smc91x.c
drivers/net/ethernet/smsc/smc91x.h
drivers/net/ethernet/smsc/smsc911x.c
drivers/net/ethernet/synopsys/dwc_eth_qos.c
drivers/net/ethernet/tehuti/tehuti.c
drivers/net/ethernet/xilinx/xilinx_emaclite.c
drivers/net/phy/Kconfig
drivers/net/phy/micrel.c
drivers/net/phy/phy.c
drivers/net/team/team_mode_loadbalance.c
drivers/net/tun.c
drivers/net/usb/kaweth.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vmxnet3/vmxnet3_int.h
drivers/net/vxlan.c
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath10k/pci.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/marvell/mwifiex/11n_aggr.c
drivers/nvdimm/bus.c
drivers/nvme/host/Kconfig
drivers/nvme/host/fabrics.c
drivers/nvme/host/fabrics.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/target/Kconfig
drivers/nvme/target/loop.c
drivers/nvme/target/rdma.c
drivers/pci/host-bridge.c
drivers/pci/quirks.c
drivers/pci/remove.c
drivers/pcmcia/ds.c
drivers/pcmcia/pxa2xx_base.c
drivers/pcmcia/pxa2xx_base.h
drivers/pcmcia/sa1111_badge4.c
drivers/pcmcia/sa1111_generic.c
drivers/pcmcia/sa1111_jornada720.c
drivers/pcmcia/sa1111_lubbock.c
drivers/pcmcia/sa1111_neponset.c
drivers/pcmcia/sa11xx_base.c
drivers/pcmcia/soc_common.c
drivers/perf/arm_pmu.c
drivers/phy/phy-brcm-sata.c
drivers/phy/phy-sun4i-usb.c
drivers/phy/phy-sun9i-usb.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/pinctrl-pistachio.c
drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
drivers/platform/olpc/olpc-ec.c
drivers/platform/x86/intel_pmic_gpio.c
drivers/rapidio/devices/tsi721.c
drivers/regulator/max14577-regulator.c
drivers/regulator/max77693-regulator.c
drivers/regulator/qcom_smd-regulator.c
drivers/scsi/constants.c
drivers/scsi/scsi_devinfo.c
drivers/scsi/scsi_transport_sas.c
drivers/scsi/ses.c
drivers/scsi/wd719x.c
drivers/spi/spi-img-spfi.c
drivers/spi/spi-mt65xx.c
drivers/spi/spi-pxa2xx-pci.c
drivers/spi/spi-qup.c
drivers/spi/spi-sh-msiof.c
drivers/spi/spi.c
drivers/staging/comedi/drivers/adv_pci1760.c
drivers/staging/comedi/drivers/comedi_test.c
drivers/staging/comedi/drivers/daqboard2000.c
drivers/staging/comedi/drivers/dt2811.c
drivers/staging/comedi/drivers/ni_mio_common.c
drivers/staging/iio/impedance-analyzer/ad5933.c
drivers/staging/lustre/lustre/llite/namei.c
drivers/staging/wilc1000/host_interface.c
drivers/staging/wilc1000/linux_wlan.c
drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
drivers/thermal/rcar_thermal.c
drivers/thunderbolt/nhi.c
drivers/thunderbolt/switch.c
drivers/tty/serial/8250/8250.h
drivers/tty/serial/8250/8250_core.c
drivers/tty/serial/8250/8250_fintek.c
drivers/tty/serial/8250/8250_mid.c
drivers/tty/serial/8250/8250_omap.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/serial/8250/8250_port.c
drivers/tty/serial/8250/Kconfig
drivers/usb/chipidea/udc.c
drivers/usb/core/config.c
drivers/usb/core/devio.c
drivers/usb/dwc2/core.h
drivers/usb/dwc2/platform.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/debug.h
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/function/f_eem.c
drivers/usb/gadget/function/f_rndis.c
drivers/usb/gadget/function/u_serial.c
drivers/usb/gadget/udc/core.c
drivers/usb/gadget/udc/fsl_qe_udc.c
drivers/usb/gadget/udc/renesas_usb3.c
drivers/usb/host/xhci-ring.c
drivers/usb/musb/Kconfig
drivers/usb/musb/musb_virthub.c
drivers/usb/phy/phy-generic.c
drivers/usb/renesas_usbhs/mod.c
drivers/usb/renesas_usbhs/mod_gadget.c
drivers/usb/serial/mos7720.c
drivers/usb/serial/mos7840.c
drivers/usb/serial/option.c
drivers/usb/serial/usb-serial-simple.c
drivers/virtio/virtio_ring.c
fs/aio.c
fs/binfmt_elf.c
fs/btrfs/ctree.h
fs/btrfs/extent-tree.c
fs/btrfs/relocation.c
fs/btrfs/send.c
fs/btrfs/tree-log.c
fs/ceph/dir.c
fs/cifs/cifsfs.c
fs/cifs/cifsproto.h
fs/cifs/connect.c
fs/crypto/policy.c
fs/devpts/inode.c
fs/ext4/inode.c
fs/ext4/ioctl.c
fs/ext4/super.c
fs/ext4/xattr.c
fs/ext4/xattr.h
fs/f2fs/file.c
fs/fuse/file.c
fs/ioctl.c
fs/iomap.c
fs/kernfs/file.c
fs/nfs/blocklayout/blocklayout.c
fs/nfs/blocklayout/blocklayout.h
fs/nfs/blocklayout/extent_tree.c
fs/nfs/callback.c
fs/nfs/callback_proc.c
fs/nfs/client.c
fs/nfs/file.c
fs/nfs/flexfilelayout/flexfilelayout.c
fs/nfs/flexfilelayout/flexfilelayout.h
fs/nfs/flexfilelayout/flexfilelayoutdev.c
fs/nfs/internal.h
fs/nfs/nfs42proc.c
fs/nfs/nfs4client.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4session.c
fs/nfs/nfs4session.h
fs/nfs/pnfs.c
fs/nfs/super.c
fs/overlayfs/copy_up.c
fs/overlayfs/dir.c
fs/overlayfs/inode.c
fs/overlayfs/overlayfs.h
fs/overlayfs/readdir.c
fs/overlayfs/super.c
fs/proc/base.c
fs/proc/task_mmu.c
fs/sysfs/file.c
fs/xfs/libxfs/xfs_alloc.c
fs/xfs/libxfs/xfs_btree.c
fs/xfs/libxfs/xfs_defer.c
fs/xfs/libxfs/xfs_defer.h
fs/xfs/libxfs/xfs_format.h
fs/xfs/libxfs/xfs_sb.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_super.c
fs/xfs/xfs_trace.h
include/asm-generic/uaccess.h
include/linux/acpi.h
include/linux/compiler-gcc.h
include/linux/compiler.h
include/linux/cpuhotplug.h
include/linux/efi.h
include/linux/fence.h
include/linux/fs.h
include/linux/fscrypto.h
include/linux/iio/sw_trigger.h
include/linux/iomap.h
include/linux/irq.h
include/linux/mempolicy.h
include/linux/mfd/da8xx-cfgchip.h [new file with mode: 0644]
include/linux/mfd/ti_am335x_tscadc.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mm.h
include/linux/mmzone.h
include/linux/netdevice.h
include/linux/netfilter/nfnetlink_acct.h
include/linux/nvme.h
include/linux/pci.h
include/linux/serial_8250.h
include/linux/smc91x.h
include/linux/thread_info.h
include/linux/uio.h
include/net/af_unix.h
include/net/cfg80211.h
include/net/ip_fib.h
include/net/netfilter/nft_meta.h
include/net/netfilter/nft_reject.h
include/net/tcp.h
include/scsi/scsi_transport_sas.h
include/uapi/linux/atm_zatm.h
include/uapi/linux/if_pppol2tp.h
include/uapi/linux/if_pppox.h
include/uapi/linux/if_tunnel.h
include/uapi/linux/ipx.h
include/uapi/linux/libc-compat.h
include/uapi/linux/openvswitch.h
kernel/audit_watch.c
kernel/configs/tiny.config
kernel/cpuset.c
kernel/events/core.c
kernel/events/ring_buffer.c
kernel/exit.c
kernel/fork.c
kernel/kexec_file.c
kernel/memremap.c
kernel/power/qos.c
kernel/printk/nmi.c
kernel/sched/core.c
kernel/seccomp.c
kernel/time/tick-sched.c
lib/Kconfig.debug
lib/Makefile
lib/iov_iter.c
lib/rhashtable.c
lib/test_hash.c
lib/usercopy.c [deleted file]
mm/huge_memory.c
mm/mempolicy.c
mm/page_alloc.c
mm/usercopy.c
mm/vmscan.c
net/bluetooth/af_bluetooth.c
net/bluetooth/hci_request.c
net/bluetooth/hci_sock.c
net/bluetooth/l2cap_core.c
net/bluetooth/l2cap_sock.c
net/bridge/br_input.c
net/bridge/br_multicast.c
net/bridge/netfilter/ebtables.c
net/bridge/netfilter/nft_meta_bridge.c
net/core/dev.c
net/core/flow_dissector.c
net/ipv4/devinet.c
net/ipv4/fib_frontend.c
net/ipv4/fib_semantics.c
net/ipv4/fib_trie.c
net/ipv4/ip_tunnel_core.c
net/ipv4/netfilter/nft_reject_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_diag.c
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_yeah.c
net/ipv4/udp.c
net/ipv4/udplite.c
net/ipv4/xfrm4_policy.c
net/ipv6/addrconf.c
net/ipv6/ip6_tunnel.c
net/ipv6/netfilter/nft_reject_ipv6.c
net/ipv6/ping.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/ipv6/udplite.c
net/ipv6/xfrm6_input.c
net/ipv6/xfrm6_policy.c
net/kcm/kcmsock.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_ppp.c
net/mac80211/tdls.c
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nf_tables_netdev.c
net/netfilter/nfnetlink_acct.c
net/netfilter/nfnetlink_cttimeout.c
net/netfilter/nfnetlink_log.c
net/netfilter/nft_meta.c
net/netfilter/nft_reject.c
net/netfilter/nft_reject_inet.c
net/netfilter/xt_TPROXY.c
net/netfilter/xt_nfacct.c
net/sched/act_ife.c
net/sched/sch_generic.c
net/sctp/input.c
net/sctp/inqueue.c
net/sctp/output.c
net/sctp/sctp_diag.c
net/sunrpc/auth_gss/svcauth_gss.c
net/sunrpc/clnt.c
net/sunrpc/xprtrdma/verbs.c
net/sunrpc/xprtrdma/xprt_rdma.h
net/sunrpc/xprtsock.c
net/tipc/name_distr.c
net/tipc/udp_media.c
net/unix/af_unix.c
net/wireless/wext-core.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_user.c
scripts/checkpatch.pl
scripts/faddr2line [new file with mode: 0755]
scripts/package/builddeb
scripts/tags.sh
security/Kconfig
sound/core/rawmidi.c
sound/core/timer.c
sound/firewire/fireworks/fireworks.h
sound/firewire/fireworks/fireworks_hwdep.c
sound/firewire/fireworks/fireworks_proc.c
sound/firewire/fireworks/fireworks_transaction.c
sound/firewire/tascam/tascam-hwdep.c
sound/pci/hda/patch_realtek.c
sound/usb/quirks.c
tools/iio/iio_generic_buffer.c
tools/lguest/lguest.c

index 2a91c14c80bf5315730eadceb7896bcb9d681aaf..de22daefd9daecbf956d9e874aa5664355416597 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -88,6 +88,7 @@ Kay Sievers <kay.sievers@vrfy.org>
 Kenneth W Chen <kenneth.w.chen@intel.com>
 Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
 Koushik <raghavendra.koushik@neterion.com>
+Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski@samsung.com>
 Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com>
 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
 Leonid I Ananiev <leonid.i.ananiev@intel.com>
@@ -158,6 +159,8 @@ Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
 Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com>
 Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com>
 Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com>
+Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
+Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
 Takashi YOSHII <takashi.yoshii.zj@renesas.com>
 Yusuke Goda <goda.yusuke@renesas.com>
 Gustavo Padovan <gustavo@las.ic.unicamp.br>
index 43f78b88da28beaa556b3bba509f1ac97fa44c16..df449d79b563ebe850f1eeb28b0e2e91d517c430 100644 (file)
@@ -1,7 +1,7 @@
 # Note: This documents additional properties of any device beyond what
 # is documented in Documentation/sysfs-rules.txt
 
-What:          /sys/devices/*/of_path
+What:          /sys/devices/*/of_node
 Date:          February 2015
 Contact:       Device Tree mailing list <devicetree@vger.kernel.org>
 Description:
index 123881f62219d444303b047154b25d39363fb9f8..77f49dc5be23585fe671e8058ec9d73cec1f04ff 100644 (file)
@@ -124,7 +124,6 @@ initialization with a pointer to a structure describing the driver
 
 The ID table is an array of struct pci_device_id entries ending with an
 all-zero entry.  Definitions with static const are generally preferred.
-Use of the deprecated macro DEFINE_PCI_DEVICE_TABLE should be avoided.
 
 Each entry consists of:
 
index ffca443a19b4265993493141328b2bd58fdd91ae..15cdb7bc57c3672fa92acfabdbf2de72834d4207 100644 (file)
@@ -18,13 +18,17 @@ and config2 fields of the perf_event_attr structure. The "events"
 directory provides configuration templates for all documented
 events, that can be used with perf tool. For example "xp_valid_flit"
 is an equivalent of "type=0x8,event=0x4". Other parameters must be
-explicitly specified. For events originating from device, "node"
-defines its index. All crosspoint events require "xp" (index),
-"port" (device port number) and "vc" (virtual channel ID) and
-"dir" (direction). Watchpoints (special "event" value 0xfe) also
-require comparator values ("cmp_l" and "cmp_h") and "mask", being
-index of the comparator mask.
+explicitly specified.
 
+For events originating from device, "node" defines its index.
+
+Crosspoint PMU events require "xp" (index), "bus" (bus number)
+and "vc" (virtual channel ID).
+
+Crosspoint watchpoint-based events (special "event" value 0xfe)
+require "xp" and "vc" as as above plus "port" (device port index),
+"dir" (transmit/receive direction), comparator values ("cmp_l"
+and "cmp_h") and "mask", being index of the comparator mask.
 Masks are defined separately from the event description
 (due to limited number of the config values) in the "cmp_mask"
 directory, with first 8 configurable by user and additional
index fc647492e940539be1f37b02335ac1bbc7829820..8d9773f23550cbc4e8d97ebecb79d55a161e6d58 100644 (file)
@@ -103,7 +103,7 @@ Config Main Menu
        Power management options (ACPI, APM)  --->
                CPU Frequency scaling  --->
                        [*] CPU Frequency scaling
-                       <*>   CPU frequency translation statistics 
+                       [*]   CPU frequency translation statistics
                        [*]     CPU frequency translation statistics details
 
 
index bf99e2f24788001557194d56d92224fe8411a1e8..205593f56fe759706ea6fd321c08e66059ebd489 100644 (file)
@@ -16,6 +16,11 @@ Required properties:
 - vref-supply: The regulator supply ADC reference voltage.
 - #io-channel-cells: Should be 1, see ../iio-bindings.txt
 
+Optional properties:
+- resets: Must contain an entry for each entry in reset-names if need support
+         this option. See ../reset/reset.txt for details.
+- reset-names: Must include the name "saradc-apb".
+
 Example:
        saradc: saradc@2006c000 {
                compatible = "rockchip,saradc";
@@ -23,6 +28,8 @@ Example:
                interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
                clock-names = "saradc", "apb_pclk";
+               resets = <&cru SRST_SARADC>;
+               reset-names = "saradc-apb";
                #io-channel-cells = <1>;
                vref-supply = <&vcc18>;
        };
index 88faa91125bf5c92e76afcb6f0c9cdfc01031827..3cd4c43a32601957038eea8595c1e5d42651f94d 100644 (file)
@@ -10,7 +10,7 @@ Required properties:
                        subsystem (mmcss) inside the FlashSS (available in STiH407 SoC
                        family).
 
-- clock-names:         Should be "mmc".
+- clock-names:         Should be "mmc" and "icn".  (NB: The latter is not compulsory)
                        See: Documentation/devicetree/bindings/resource-names.txt
 - clocks:              Phandle to the clock.
                        See: Documentation/devicetree/bindings/clock/clock-bindings.txt
index f5561ac7e17ed358ce7e6a00c7c0f8913f3464c6..936ab5b87324ce7cf022320a777342d2507dfd19 100644 (file)
@@ -42,9 +42,6 @@ Optional properties:
 - auto-flow-control: one way to enable automatic flow control support. The
   driver is allowed to detect support for the capability even without this
   property.
-- {rts,cts,dtr,dsr,rng,dcd}-gpios: specify a GPIO for RTS/CTS/DTR/DSR/RI/DCD
-  line respectively. It will use specified GPIO instead of the peripheral
-  function pin for the UART feature. If unsure, don't specify this property.
 
 Note:
 * fsl,ns16550:
@@ -66,19 +63,3 @@ Example:
                interrupts = <10>;
                reg-shift = <2>;
        };
-
-Example for OMAP UART using GPIO-based modem control signals:
-
-       uart4: serial@49042000 {
-               compatible = "ti,omap3-uart";
-               reg = <0x49042000 0x400>;
-               interrupts = <80>;
-               ti,hwmods = "uart4";
-               clock-frequency = <48000000>;
-               cts-gpios = <&gpio3 5 GPIO_ACTIVE_LOW>;
-               rts-gpios = <&gpio3 6 GPIO_ACTIVE_LOW>;
-               dtr-gpios = <&gpio1 12 GPIO_ACTIVE_LOW>;
-               dsr-gpios = <&gpio1 13 GPIO_ACTIVE_LOW>;
-               dcd-gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
-               rng-gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
-       };
index d6259c7863165939074088908323500e2fcd0fe8..bcbf9710e4af1a1c9f46d9fa9526851ebf9dc944 100644 (file)
@@ -183,12 +183,10 @@ The copy_up operation essentially creates a new, identical file and
 moves it over to the old name.  The new file may be on a different
 filesystem, so both st_dev and st_ino of the file may change.
 
-Any open files referring to this inode will access the old data and
-metadata.  Similarly any file locks obtained before copy_up will not
-apply to the copied up file.
+Any open files referring to this inode will access the old data.
 
-On a file opened with O_RDONLY fchmod(2), fchown(2), futimesat(2) and
-fsetxattr(2) will fail with EROFS.
+Any file locks (and leases) obtained before copy_up will not apply
+to the copied up file.
 
 If a file with multiple hard links is copied up, then this will
 "break" the link.  Changes will not be propagated to other names
index 80807adb8ded52c6ad0482b0abab86e79921a8e5..7e2a228f21bccd8ab12cec93bda73831f66827cb 100644 (file)
@@ -145,6 +145,11 @@ If you want to add slave support to the bus driver:
 
 * Catch the slave interrupts and send appropriate i2c_slave_events to the backend.
 
+Note that most hardware supports being master _and_ slave on the same bus. So,
+if you extend a bus driver, please make sure that the driver supports that as
+well. In almost all cases, slave support does not need to disable the master
+functionality.
+
 Check the i2c-rcar driver as an example.
 
 
index 9d05ed7f7da51648cafeee84a2a21d9711c19dde..f20c884c048afe16409a566bb36c589b39864209 100644 (file)
@@ -587,26 +587,6 @@ of DSA, would be the its port-based VLAN, used by the associated bridge device.
 TODO
 ====
 
-The platform device problem
----------------------------
-DSA is currently implemented as a platform device driver which is far from ideal
-as was discussed in this thread:
-
-http://permalink.gmane.org/gmane.linux.network/329848
-
-This basically prevents the device driver model to be properly used and applied,
-and support non-MDIO, non-MMIO Ethernet connected switches.
-
-Another problem with the platform device driver approach is that it prevents the
-use of a modular switch drivers build due to a circular dependency, illustrated
-here:
-
-http://comments.gmane.org/gmane.linux.network/345803
-
-Attempts of reworking this has been done here:
-
-https://lwn.net/Articles/643149/
-
 Making SWITCHDEV and DSA converge towards an unified codebase
 -------------------------------------------------------------
 
index ba0a2a4a54ba1ffcb484786381b91f5113a62ad6..e32fdbb4c9a7a284976d624037df1ff4c655958c 100644 (file)
@@ -167,6 +167,8 @@ signal will be rolled back anyway.
 For signals taken in non-TM or suspended mode, we use the
 normal/non-checkpointed stack pointer.
 
+Any transaction initiated inside a sighandler and suspended on return
+from the sighandler to the kernel will get reclaimed and discarded.
 
 Failure cause codes used by kernel
 ==================================
index 6e491a6624618cd30cb67e717fd60fed50f2eaa7..a53f786ee2e9ccf7de7d48a8b5d4e5e8b09d00e5 100644 (file)
@@ -80,6 +80,10 @@ functionality of their platform when planning to use this driver:
 
 III. Module parameters
 
+- 'dma_timeout' - DMA transfer completion timeout (in msec, default value 3000).
+        This parameter set a maximum completion wait time for SYNC mode DMA
+        transfer requests and for RIO_WAIT_FOR_ASYNC ioctl requests.
+
 - 'dbg_level' - This parameter allows to control amount of debug information
         generated by this device driver. This parameter is formed by set of
         bit masks that correspond to the specific functional blocks.
index 71aa5daeae8f43f616ce5680320889fdf0666fa7..644ff65d336d3d62c04083fb6dd30a75e5a1bbb3 100644 (file)
@@ -798,6 +798,7 @@ M:  Laura Abbott <labbott@redhat.com>
 M:     Sumit Semwal <sumit.semwal@linaro.org>
 L:     devel@driverdev.osuosl.org
 S:     Supported
+F:     Documentation/devicetree/bindings/staging/ion/
 F:     drivers/staging/android/ion
 F:     drivers/staging/android/uapi/ion.h
 F:     drivers/staging/android/uapi/ion_test.h
@@ -1623,7 +1624,8 @@ N:        rockchip
 
 ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
 M:     Kukjin Kim <kgene@kernel.org>
-M:     Krzysztof Kozlowski <k.kozlowski@samsung.com>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
+R:     Javier Martinez Canillas <javier@osg.samsung.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 S:     Maintained
@@ -1643,7 +1645,6 @@ F:        drivers/*/*s3c64xx*
 F:     drivers/*/*s5pv210*
 F:     drivers/memory/samsung/*
 F:     drivers/soc/samsung/*
-F:     drivers/spi/spi-s3c*
 F:     Documentation/arm/Samsung/
 F:     Documentation/devicetree/bindings/arm/samsung/
 F:     Documentation/devicetree/bindings/sram/samsung-sram.txt
@@ -1831,6 +1832,7 @@ T:        git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
 ARM/UNIPHIER ARCHITECTURE
 M:     Masahiro Yamada <yamada.masahiro@socionext.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-uniphier.git
 S:     Maintained
 F:     arch/arm/boot/dts/uniphier*
 F:     arch/arm/include/asm/hardware/cache-uniphier.h
@@ -2484,7 +2486,7 @@ F:        include/net/bluetooth/
 BONDING DRIVER
 M:     Jay Vosburgh <j.vosburgh@gmail.com>
 M:     Veaceslav Falico <vfalico@gmail.com>
-M:     Andy Gospodarek <gospo@cumulusnetworks.com>
+M:     Andy Gospodarek <andy@greyhouse.net>
 L:     netdev@vger.kernel.org
 W:     http://sourceforge.net/projects/bonding/
 S:     Supported
@@ -3247,7 +3249,7 @@ F:        kernel/cpuset.c
 CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
 M:     Johannes Weiner <hannes@cmpxchg.org>
 M:     Michal Hocko <mhocko@kernel.org>
-M:     Vladimir Davydov <vdavydov@virtuozzo.com>
+M:     Vladimir Davydov <vdavydov.dev@gmail.com>
 L:     cgroups@vger.kernel.org
 L:     linux-mm@kvack.org
 S:     Maintained
@@ -3268,7 +3270,7 @@ S:        Maintained
 F:     drivers/net/wan/cosa*
 
 CPMAC ETHERNET DRIVER
-M:     Florian Fainelli <florian@openwrt.org>
+M:     Florian Fainelli <f.fainelli@gmail.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/ti/cpmac.c
@@ -7464,7 +7466,8 @@ F:        Documentation/devicetree/bindings/sound/max9860.txt
 F:     sound/soc/codecs/max9860.*
 
 MAXIM MUIC CHARGER DRIVERS FOR EXYNOS BASED BOARDS
-M:     Krzysztof Kozlowski <k.kozlowski@samsung.com>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
 L:     linux-pm@vger.kernel.org
 S:     Supported
 F:     drivers/power/max14577_charger.c
@@ -7480,7 +7483,8 @@ F:        include/dt-bindings/*/*max77802.h
 
 MAXIM PMIC AND MUIC DRIVERS FOR EXYNOS BASED BOARDS
 M:     Chanwoo Choi <cw00.choi@samsung.com>
-M:     Krzysztof Kozlowski <k.kozlowski@samsung.com>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
 L:     linux-kernel@vger.kernel.org
 S:     Supported
 F:     drivers/*/max14577*.c
@@ -9246,7 +9250,7 @@ F:        drivers/pinctrl/sh-pfc/
 
 PIN CONTROLLER - SAMSUNG
 M:     Tomasz Figa <tomasz.figa@gmail.com>
-M:     Krzysztof Kozlowski <k.kozlowski@samsung.com>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 M:     Sylwester Nawrocki <s.nawrocki@samsung.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
@@ -10179,7 +10183,7 @@ S:      Maintained
 F:     drivers/platform/x86/samsung-laptop.c
 
 SAMSUNG AUDIO (ASoC) DRIVERS
-M:     Krzysztof Kozlowski <k.kozlowski@samsung.com>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 M:     Sangbeom Kim <sbkim73@samsung.com>
 M:     Sylwester Nawrocki <s.nawrocki@samsung.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -10194,7 +10198,8 @@ F:      drivers/video/fbdev/s3c-fb.c
 
 SAMSUNG MULTIFUNCTION PMIC DEVICE DRIVERS
 M:     Sangbeom Kim <sbkim73@samsung.com>
-M:     Krzysztof Kozlowski <k.kozlowski@samsung.com>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
 L:     linux-kernel@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
 S:     Supported
@@ -10253,6 +10258,17 @@ S:     Supported
 L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 F:     drivers/clk/samsung/
 
+SAMSUNG SPI DRIVERS
+M:     Kukjin Kim <kgene@kernel.org>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Andi Shyti <andi.shyti@samsung.com>
+L:     linux-spi@vger.kernel.org
+L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
+S:     Maintained
+F:     Documentation/devicetree/bindings/spi/spi-samsung.txt
+F:     drivers/spi/spi-s3c*
+F:     include/linux/platform_data/spi-s3c64xx.h
+
 SAMSUNG SXGBE DRIVERS
 M:     Byungho An <bh74.an@samsung.com>
 M:     Girish K S <ks.giri@samsung.com>
@@ -11232,12 +11248,8 @@ S:     Odd Fixes
 F:     drivers/staging/vt665?/
 
 STAGING - WILC1000 WIFI DRIVER
-M:     Johnny Kim <johnny.kim@atmel.com>
-M:     Austin Shin <austin.shin@atmel.com>
-M:     Chris Park <chris.park@atmel.com>
-M:     Tony Cho <tony.cho@atmel.com>
-M:     Glen Lee <glen.lee@atmel.com>
-M:     Leo Kim <leo.kim@atmel.com>
+M:     Aditya Shankar <aditya.shankar@microchip.com>
+M:     Ganesh Krishna <ganesh.krishna@microchip.com>
 L:     linux-wireless@vger.kernel.org
 S:     Supported
 F:     drivers/staging/wilc1000/
index 67f42d57e6e7a9e07d698f830f1a4d31246f1db4..74e22c2f408bc9c208aaaaabe370e2725f615535 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 8
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc7
 NAME = Psychotic Stoned Sheep
 
 # *DOCUMENTATION*
index e9c9334507ddd57f2fd787f2faa3dac71edc18a7..fd6e9712af81b5bab38f871665d2a2df66d8468d 100644 (file)
@@ -336,17 +336,6 @@ config HAVE_ARCH_SECCOMP_FILTER
            results in the system call being skipped immediately.
          - seccomp syscall wired up
 
-         For best performance, an arch should use seccomp_phase1 and
-         seccomp_phase2 directly.  It should call seccomp_phase1 for all
-         syscalls if TIF_SECCOMP is set, but seccomp_phase1 does not
-         need to be called from a ptrace-safe context.  It must then
-         call seccomp_phase2 if seccomp_phase1 returns anything other
-         than SECCOMP_PHASE1_OK or SECCOMP_PHASE1_SKIP.
-
-         As an additional optimization, an arch may provide seccomp_data
-         directly to seccomp_phase1; this avoids multiple calls
-         to the syscall_xyz helpers for every syscall.
-
 config SECCOMP_FILTER
        def_bool y
        depends on HAVE_ARCH_SECCOMP_FILTER && SECCOMP && NET
index c419b43c461dc3326638870919d8f00ec95e7ae6..466e42e96bfaf29d46567e45268dc4844ac81ac9 100644 (file)
@@ -371,14 +371,6 @@ __copy_tofrom_user_nocheck(void *to, const void *from, long len)
        return __cu_len;
 }
 
-extern inline long
-__copy_tofrom_user(void *to, const void *from, long len, const void __user *validate)
-{
-       if (__access_ok((unsigned long)validate, len, get_fs()))
-               len = __copy_tofrom_user_nocheck(to, from, len);
-       return len;
-}
-
 #define __copy_to_user(to, from, n)                                    \
 ({                                                                     \
        __chk_user_ptr(to);                                             \
@@ -393,17 +385,22 @@ __copy_tofrom_user(void *to, const void *from, long len, const void __user *vali
 #define __copy_to_user_inatomic __copy_to_user
 #define __copy_from_user_inatomic __copy_from_user
 
-
 extern inline long
 copy_to_user(void __user *to, const void *from, long n)
 {
-       return __copy_tofrom_user((__force void *)to, from, n, to);
+       if (likely(__access_ok((unsigned long)to, n, get_fs())))
+               n = __copy_tofrom_user_nocheck((__force void *)to, from, n);
+       return n;
 }
 
 extern inline long
 copy_from_user(void *to, const void __user *from, long n)
 {
-       return __copy_tofrom_user(to, (__force void *)from, n, from);
+       if (likely(__access_ok((unsigned long)from, n, get_fs())))
+               n = __copy_tofrom_user_nocheck(to, (__force void *)from, n);
+       else
+               memset(to, 0, n);
+       return n;
 }
 
 extern void __do_clear_user(void);
index a78d5670884f32490d2341b2468bf606258b6654..41faf17cd28d2ced7461f4e6860d2456fb49f8ae 100644 (file)
        "2:     ;nop\n"                         \
        "       .section .fixup, \"ax\"\n"      \
        "       .align 4\n"                     \
-       "3:     mov %0, %3\n"                   \
+       "3:     # return -EFAULT\n"             \
+       "       mov %0, %3\n"                   \
+       "       # zero out dst ptr\n"           \
+       "       mov %1,  0\n"                   \
        "       j   2b\n"                       \
        "       .previous\n"                    \
        "       .section __ex_table, \"a\"\n"   \
        "2:     ;nop\n"                         \
        "       .section .fixup, \"ax\"\n"      \
        "       .align 4\n"                     \
-       "3:     mov %0, %3\n"                   \
+       "3:     # return -EFAULT\n"             \
+       "       mov %0, %3\n"                   \
+       "       # zero out dst ptr\n"           \
+       "       mov %1,  0\n"                   \
+       "       mov %R1, 0\n"                   \
        "       j   2b\n"                       \
        "       .previous\n"                    \
        "       .section __ex_table, \"a\"\n"   \
index c8609d8d2c55ff9f53f2e57c76a372b130500e7e..b689172632efbb48516201b7814fbbc68d925b19 100644 (file)
 
                #address-cells = <1>;
                #size-cells = <1>;
-               elm_id = <&elm>;
+               ti,elm-id = <&elm>;
        };
 };
 
index df63484ef9b37726218d5db98d671f6eaa29647e..e7d9ca1305faef3854ba7efb4da9efc20346c7ce 100644 (file)
 
                #address-cells = <1>;
                #size-cells = <1>;
-               elm_id = <&elm>;
+               ti,elm-id = <&elm>;
 
                /* MTD partition table */
                partition@0 {
index 86f773165d5c40f1102131a827e336c6c7ea2c3b..1263c9d4cba3b5644800dcdc2c172077a83cdfe2 100644 (file)
                gpmc,wr-access-ns = <30>;
                gpmc,wr-data-mux-bus-ns = <0>;
 
-               elm_id = <&elm>;
+               ti,elm-id = <&elm>;
 
                #address-cells = <1>;
                #size-cells = <1>;
index 2e0556af6e5eef508fe29f7ec9cdc91452253455..d3e6bd80500619ab87267ae334a084c8778554ad 100644 (file)
 
                        port@0 {
                                reg = <0>;
-                               label = "lan1";
+                               label = "lan5";
                        };
 
                        port@1 {
                                reg = <1>;
-                               label = "lan2";
+                               label = "lan4";
                        };
 
                        port@2 {
 
                        port@3 {
                                reg = <3>;
-                               label = "lan4";
+                               label = "lan2";
                        };
 
                        port@4 {
                                reg = <4>;
-                               label = "lan5";
+                               label = "lan1";
                        };
 
                        port@5 {
index caf2707680c1b2bc126b823574efe0120f9baad2..e9b47b2bbc3390edc2541f251ae5167640a445d9 100644 (file)
@@ -2,6 +2,7 @@
 
 / {
        memory {
+               device_type = "memory";
                reg = <0 0x10000000>;
        };
 
index b98252232d200ab476c2e8eaf8dc07e637ffc62f..445624a1a1de2fd3d1fda6fd9811a015e26d3dd3 100644 (file)
@@ -2,7 +2,6 @@
 #include <dt-bindings/clock/bcm2835.h>
 #include <dt-bindings/clock/bcm2835-aux.h>
 #include <dt-bindings/gpio/gpio.h>
-#include "skeleton.dtsi"
 
 /* This include file covers the common peripherals and configuration between
  * bcm2835 and bcm2836 implementations, leaving the CPU configuration to
@@ -13,6 +12,8 @@
        compatible = "brcm,bcm2835";
        model = "BCM2835";
        interrupt-parent = <&intc>;
+       #address-cells = <1>;
+       #size-cells = <1>;
 
        chosen {
                bootargs = "earlyprintk console=ttyAMA0";
index d9499310a301b735c475ec6a5c3bb955165079be..f6d135245a4b9b3785fec6beda2e83564a7fa7a0 100644 (file)
        samsung,dw-mshc-ciu-div = <3>;
        samsung,dw-mshc-sdr-timing = <0 4>;
        samsung,dw-mshc-ddr-timing = <0 2>;
-       samsung,dw-mshc-hs400-timing = <0 2>;
-       samsung,read-strobe-delay = <90>;
        pinctrl-names = "default";
        pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_bus1 &sd0_bus4 &sd0_bus8 &sd0_cd>;
        bus-width = <8>;
        cap-mmc-highspeed;
        mmc-hs200-1_8v;
-       mmc-hs400-1_8v;
        vmmc-supply = <&ldo20_reg>;
        vqmmc-supply = <&ldo11_reg>;
 };
index b620ac884cfd0ac53808129443e13fcc45a423ab..b13b0b2db88163eec89f9b4fb4c5b241b41577c9 100644 (file)
                                        clocks = <&clks IMX6QDL_CLK_SPDIF_GCLK>, <&clks IMX6QDL_CLK_OSC>,
                                                 <&clks IMX6QDL_CLK_SPDIF>, <&clks IMX6QDL_CLK_ASRC>,
                                                 <&clks IMX6QDL_CLK_DUMMY>, <&clks IMX6QDL_CLK_ESAI_EXTAL>,
-                                                <&clks IMX6QDL_CLK_IPG>, <&clks IMX6QDL_CLK_MLB>,
+                                                <&clks IMX6QDL_CLK_IPG>, <&clks IMX6QDL_CLK_DUMMY>,
                                                 <&clks IMX6QDL_CLK_DUMMY>, <&clks IMX6QDL_CLK_SPBA>;
                                        clock-names = "core",  "rxtx0",
                                                      "rxtx1", "rxtx2",
index 96ea936eeeb0a1dce450696b38893ed3b341ed48..240a2864d044feeed2fe649729f760220c7a8684 100644 (file)
@@ -64,7 +64,7 @@
        cd-gpios = <&gpio7 11 GPIO_ACTIVE_LOW>;
        no-1-8-v;
        keep-power-in-suspend;
-       enable-sdio-wakup;
+       wakeup-source;
        status = "okay";
 };
 
index 95ee268ed51013436e22955ddf923ef0f204a852..2f33c463cbce4f1eff456ea75666b22efb3ae5cc 100644 (file)
                ti,y-min = /bits/ 16 <0>;
                ti,y-max = /bits/ 16 <0>;
                ti,pressure-max = /bits/ 16 <0>;
-               ti,x-plat-ohms = /bits/ 16 <400>;
+               ti,x-plate-ohms = /bits/ 16 <400>;
                wakeup-source;
        };
 };
index ef84d8699a767ca1f5fbc72c88d5e49a03576c4d..5bf62897014cd2c505258203812b757931916365 100644 (file)
 
        partition@e0000 {
                label = "u-boot environment";
-               reg = <0xe0000 0x100000>;
+               reg = <0xe0000 0x20000>;
        };
 
        partition@100000 {
index e4ecab1126011f220601ed7f31e931ec6336bebb..7175511a92da20af1afe844a248e06dc49abe06a 100644 (file)
        };
 };
 
+&pciec {
+       status = "okay";
+};
+
 &pcie0 {
        status = "okay";
 };
index 365f39ff58bb8a512debeaa1645fc1feb980f75b..0ff1c2de95bfc1a172cd2c7a98e5baf8d93260d1 100644 (file)
        ranges = <0 0 0x00000000 0x1000000>;    /* CS0: 16MB for NAND */
 
        nand@0,0 {
-               linux,mtd-name = "micron,mt29f4g16abbda3w";
+               compatible = "ti,omap2-nand";
                reg = <0 0 4>; /* CS0, offset 0, IO size 4 */
+               interrupt-parent = <&gpmc>;
+               interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */
+                            <1 IRQ_TYPE_NONE>; /* termcount */
+               linux,mtd-name = "micron,mt29f4g16abbda3w";
                nand-bus-width = <16>;
                ti,nand-ecc-opt = "bch8";
+               rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 */
                gpmc,sync-clk-ps = <0>;
                gpmc,cs-on-ns = <0>;
                gpmc,cs-rd-off-ns = <44>;
                gpmc,wr-access-ns = <40>;
                gpmc,wr-data-mux-bus-ns = <0>;
                gpmc,device-width = <2>;
-
-               gpmc,page-burst-access-ns = <5>;
-               gpmc,cycle2cycle-delay-ns = <50>;
-
                #address-cells = <1>;
                #size-cells = <1>;
 
index 5e9a13c0eaf7f4ca515942438cc1c8cdc331eafd..1c2c74655416868237d3d40692217b0beaf2d557 100644 (file)
@@ -46,6 +46,7 @@
                linux,mtd-name = "micron,mt29f4g16abbda3w";
                nand-bus-width = <16>;
                ti,nand-ecc-opt = "bch8";
+               rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 */
                gpmc,sync-clk-ps = <0>;
                gpmc,cs-on-ns = <0>;
                gpmc,cs-rd-off-ns = <44>;
index de256fa8da4870bb5325e21603b24a4b3a5583ea..3e946cac55f3ead97d21726beb96d57e748eaffc 100644 (file)
 };
 
 &gpmc {
-       ranges = <0 0 0x00000000 0x20000000>;
+       ranges = <0 0 0x30000000 0x1000000>,    /* CS0 */
+                <4 0 0x2b000000 0x1000000>,    /* CS4 */
+                <5 0 0x2c000000 0x1000000>;    /* CS5 */
 
        nand@0,0 {
                compatible = "ti,omap2-nand";
index 7df27926ead215be3fdb1e0f8b02ce078ecb935b..4f4c6efbd51892abd031de7fb3a407ac2a0be1a4 100644 (file)
@@ -55,8 +55,6 @@
 #include "omap-gpmc-smsc9221.dtsi"
 
 &gpmc {
-       ranges = <5 0 0x2c000000 0x1000000>;    /* CS5 */
-
        ethernet@gpmc {
                reg = <5 0 0xff>;
                interrupt-parent = <&gpio6>;
index 9e24b6a1d07b8e4d02b6d0704858680015f0069b..1b304e2f1bd2fd4fd5be23dafcd0f364991fd131 100644 (file)
@@ -27,8 +27,6 @@
 #include "omap-gpmc-smsc9221.dtsi"
 
 &gpmc {
-       ranges = <5 0 0x2c000000 0x1000000>;    /* CS5 */
-
        ethernet@gpmc {
                reg = <5 0 0xff>;
                interrupt-parent = <&gpio6>;
index 334109e14613d6eb3a406f69eb56dce2f9f445c4..82e98ee3023ada85319d96c22da2ac985216b04f 100644 (file)
@@ -15,9 +15,6 @@
 #include "omap-gpmc-smsc9221.dtsi"
 
 &gpmc {
-       ranges = <4 0 0x2b000000 0x1000000>,    /* CS4 */
-                <5 0 0x2c000000 0x1000000>;    /* CS5 */
-
        smsc1: ethernet@gpmc {
                reg = <5 0 0xff>;
                interrupt-parent = <&gpio6>;
index c0ba86c3a2abf1f1463d6644d12f28cb6870d7cb..0d0dae3a16949f58c3a62c2826365118a1691cee 100644 (file)
                clock-names = "saradc", "apb_pclk";
                interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
                #io-channel-cells = <1>;
+               resets = <&cru SRST_SARADC>;
+               reset-names = "saradc-apb";
                status = "disabled";
        };
 
index cd33f017089001469df88ff6077253cb477afdb7..91c4b3c7a8d51def2cd4ef18b3e1a5ec7628fb9f 100644 (file)
                #io-channel-cells = <1>;
                clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
                clock-names = "saradc", "apb_pclk";
+               resets = <&cru SRST_SARADC>;
+               reset-names = "saradc-apb";
                status = "disabled";
        };
 
index 99bbcc2c9b8978b28be5a253f6be236a8896a315..e2cd683b4e4b3d19825adf4f28da4b356179e43d 100644 (file)
                #io-channel-cells = <1>;
                clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
                clock-names = "saradc", "apb_pclk";
+               resets = <&cru SRST_SARADC>;
+               reset-names = "saradc-apb";
                status = "disabled";
        };
 
index d294e82447a292bb24e0989191494693f7ee0ac3..8b063ab10c19e9d2faff0f4e9b9a62421e091f45 100644 (file)
                        interrupt-names = "mmcirq";
                        pinctrl-names = "default";
                        pinctrl-0 = <&pinctrl_mmc0>;
-                       clock-names = "mmc";
-                       clocks = <&clk_s_c0_flexgen CLK_MMC_0>;
+                       clock-names = "mmc", "icn";
+                       clocks = <&clk_s_c0_flexgen CLK_MMC_0>,
+                                <&clk_s_c0_flexgen CLK_RX_ICN_HVA>;
                        bus-width = <8>;
                        non-removable;
                };
                        interrupt-names = "mmcirq";
                        pinctrl-names = "default";
                        pinctrl-0 = <&pinctrl_sd1>;
-                       clock-names = "mmc";
-                       clocks = <&clk_s_c0_flexgen CLK_MMC_1>;
+                       clock-names = "mmc", "icn";
+                       clocks = <&clk_s_c0_flexgen CLK_MMC_1>,
+                                <&clk_s_c0_flexgen CLK_RX_ICN_HVA>;
                        resets = <&softreset STIH407_MMC1_SOFTRESET>;
                        bus-width = <4>;
                };
index 18ed1ad10d32bbb251746e3011c541f9cfcbc861..40318869c733668a15e3d318ff291c3221336125 100644 (file)
@@ -41,7 +41,8 @@
                        compatible = "st,st-ohci-300x";
                        reg = <0x9a03c00 0x100>;
                        interrupts = <GIC_SPI 180 IRQ_TYPE_NONE>;
-                       clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
+                       clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
+                                <&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
                        resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>,
                                 <&softreset STIH407_USB2_PORT0_SOFTRESET>;
                        reset-names = "power", "softreset";
@@ -57,7 +58,8 @@
                        interrupts = <GIC_SPI 151 IRQ_TYPE_NONE>;
                        pinctrl-names = "default";
                        pinctrl-0 = <&pinctrl_usb0>;
-                       clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
+                       clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
+                                <&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
                        resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>,
                                 <&softreset STIH407_USB2_PORT0_SOFTRESET>;
                        reset-names = "power", "softreset";
@@ -71,7 +73,8 @@
                        compatible = "st,st-ohci-300x";
                        reg = <0x9a83c00 0x100>;
                        interrupts = <GIC_SPI 181 IRQ_TYPE_NONE>;
-                       clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
+                       clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
+                                <&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
                        resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>,
                                 <&softreset STIH407_USB2_PORT1_SOFTRESET>;
                        reset-names = "power", "softreset";
@@ -87,7 +90,8 @@
                        interrupts = <GIC_SPI 153 IRQ_TYPE_NONE>;
                        pinctrl-names = "default";
                        pinctrl-0 = <&pinctrl_usb1>;
-                       clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
+                       clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
+                                <&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
                        resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>,
                                 <&softreset STIH407_USB2_PORT1_SOFTRESET>;
                        reset-names = "power", "softreset";
index e012890e0cf2f9b998697deaacff1fca74a56769..a17ba0243db3920b2ca5f50a8bbf8e40b5222759 100644 (file)
@@ -84,7 +84,7 @@
                        trips {
                                cpu_alert0: cpu_alert0 {
                                        /* milliCelsius */
-                                       temperature = <850000>;
+                                       temperature = <85000>;
                                        hysteresis = <2000>;
                                        type = "passive";
                                };
index 1dfc492cc004735094f4e1888a868e9d9e43fbe1..1444fbd543e724e367b0091669a7642724023ed1 100644 (file)
                palmas: tps65913@58 {
                        compatible = "ti,palmas";
                        reg = <0x58>;
-                       interrupts = <0 86 IRQ_TYPE_LEVEL_LOW>;
+                       interrupts = <0 86 IRQ_TYPE_LEVEL_HIGH>;
 
                        #interrupt-cells = <2>;
                        interrupt-controller;
index 70cf40996c3f433a12393ac14e989ff27688e8bf..966a7fc044af193295d5888af3e868ff8f3c70da 100644 (file)
                palmas: pmic@58 {
                        compatible = "ti,palmas";
                        reg = <0x58>;
-                       interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_LOW>;
+                       interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
 
                        #interrupt-cells = <2>;
                        interrupt-controller;
index 17dd14545862cef60bcb72105be8466e5d7ff364..a161fa1dfb6136dcd598ba8064630049a2c832b3 100644 (file)
@@ -63,7 +63,7 @@
                palmas: pmic@58 {
                        compatible = "ti,palmas";
                        reg = <0x58>;
-                       interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_LOW>;
+                       interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
 
                        #interrupt-cells = <2>;
                        interrupt-controller;
index 6403e0de540e842b952b3bb02b5673bbbfb3e683..e52b82449a79528bc362d96fabe7b2d688abd78e 100644 (file)
         *   Pin 41: BR_UART1_TXD
         *   Pin 44: BR_UART1_RXD
         */
-       serial@0,70006000 {
+       serial@70006000 {
                compatible = "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart";
                status = "okay";
        };
         *   Pin 71: UART2_CTS_L
         *   Pin 74: UART2_RTS_L
         */
-       serial@0,70006040 {
+       serial@70006040 {
                compatible = "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart";
                status = "okay";
        };
index 0e97b4b871f9b045fb8b01d4e9a8cd686f775fd6..6c7b06854fced378be0aa62efe1c9929f183979b 100644 (file)
@@ -140,7 +140,7 @@ static struct locomo_dev_info locomo_devices[] = {
 
 static void locomo_handler(struct irq_desc *desc)
 {
-       struct locomo *lchip = irq_desc_get_chip_data(desc);
+       struct locomo *lchip = irq_desc_get_handler_data(desc);
        int req, i;
 
        /* Acknowledge the parent IRQ */
@@ -200,8 +200,7 @@ static void locomo_setup_irq(struct locomo *lchip)
         * Install handler for IRQ_LOCOMO_HW.
         */
        irq_set_irq_type(lchip->irq, IRQ_TYPE_EDGE_FALLING);
-       irq_set_chip_data(lchip->irq, lchip);
-       irq_set_chained_handler(lchip->irq, locomo_handler);
+       irq_set_chained_handler_and_data(lchip->irq, locomo_handler, lchip);
 
        /* Install handlers for IRQ_LOCOMO_* */
        for ( ; irq <= lchip->irq_base + 3; irq++) {
index fb0a0a4dfea4da26fd635ccbad4a06de0a72ce94..2e076c492005b57b5ce978fc87b03e1c2292b41b 100644 (file)
@@ -472,8 +472,8 @@ static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base)
         * specifies that S0ReadyInt and S1ReadyInt should be '1'.
         */
        sa1111_writel(0, irqbase + SA1111_INTPOL0);
-       sa1111_writel(SA1111_IRQMASK_HI(IRQ_S0_READY_NINT) |
-                     SA1111_IRQMASK_HI(IRQ_S1_READY_NINT),
+       sa1111_writel(BIT(IRQ_S0_READY_NINT & 31) |
+                     BIT(IRQ_S1_READY_NINT & 31),
                      irqbase + SA1111_INTPOL1);
 
        /* clear all IRQs */
@@ -754,7 +754,7 @@ static int __sa1111_probe(struct device *me, struct resource *mem, int irq)
        if (sachip->irq != NO_IRQ) {
                ret = sa1111_setup_irq(sachip, pd->irq_base);
                if (ret)
-                       goto err_unmap;
+                       goto err_clk;
        }
 
 #ifdef CONFIG_ARCH_SA1100
@@ -799,6 +799,8 @@ static int __sa1111_probe(struct device *me, struct resource *mem, int irq)
 
        return 0;
 
+ err_clk:
+       clk_disable(sachip->clk);
  err_unmap:
        iounmap(sachip->base);
  err_clk_unprep:
@@ -869,9 +871,9 @@ struct sa1111_save_data {
 
 #ifdef CONFIG_PM
 
-static int sa1111_suspend(struct platform_device *dev, pm_message_t state)
+static int sa1111_suspend_noirq(struct device *dev)
 {
-       struct sa1111 *sachip = platform_get_drvdata(dev);
+       struct sa1111 *sachip = dev_get_drvdata(dev);
        struct sa1111_save_data *save;
        unsigned long flags;
        unsigned int val;
@@ -934,9 +936,9 @@ static int sa1111_suspend(struct platform_device *dev, pm_message_t state)
  *     restored by their respective drivers, and must be called
  *     via LDM after this function.
  */
-static int sa1111_resume(struct platform_device *dev)
+static int sa1111_resume_noirq(struct device *dev)
 {
-       struct sa1111 *sachip = platform_get_drvdata(dev);
+       struct sa1111 *sachip = dev_get_drvdata(dev);
        struct sa1111_save_data *save;
        unsigned long flags, id;
        void __iomem *base;
@@ -952,7 +954,7 @@ static int sa1111_resume(struct platform_device *dev)
        id = sa1111_readl(sachip->base + SA1111_SKID);
        if ((id & SKID_ID_MASK) != SKID_SA1111_ID) {
                __sa1111_remove(sachip);
-               platform_set_drvdata(dev, NULL);
+               dev_set_drvdata(dev, NULL);
                kfree(save);
                return 0;
        }
@@ -1003,8 +1005,8 @@ static int sa1111_resume(struct platform_device *dev)
 }
 
 #else
-#define sa1111_suspend NULL
-#define sa1111_resume  NULL
+#define sa1111_suspend_noirq NULL
+#define sa1111_resume_noirq  NULL
 #endif
 
 static int sa1111_probe(struct platform_device *pdev)
@@ -1017,7 +1019,7 @@ static int sa1111_probe(struct platform_device *pdev)
                return -EINVAL;
        irq = platform_get_irq(pdev, 0);
        if (irq < 0)
-               return -ENXIO;
+               return irq;
 
        return __sa1111_probe(&pdev->dev, mem, irq);
 }
@@ -1038,6 +1040,11 @@ static int sa1111_remove(struct platform_device *pdev)
        return 0;
 }
 
+static struct dev_pm_ops sa1111_pm_ops = {
+       .suspend_noirq = sa1111_suspend_noirq,
+       .resume_noirq = sa1111_resume_noirq,
+};
+
 /*
  *     Not sure if this should be on the system bus or not yet.
  *     We really want some way to register a system device at
@@ -1050,10 +1057,9 @@ static int sa1111_remove(struct platform_device *pdev)
 static struct platform_driver sa1111_device_driver = {
        .probe          = sa1111_probe,
        .remove         = sa1111_remove,
-       .suspend        = sa1111_suspend,
-       .resume         = sa1111_resume,
        .driver         = {
                .name   = "sa1111",
+               .pm     = &sa1111_pm_ops,
        },
 };
 
index 71b42e66488a13746692d0706504c4d737019d92..78cd2f197e015bf83e7ed4db934f957dabe137ea 100644 (file)
@@ -161,6 +161,7 @@ CONFIG_USB_MON=y
 CONFIG_USB_XHCI_HCD=y
 CONFIG_USB_STORAGE=y
 CONFIG_USB_DWC3=y
+CONFIG_NOP_USB_XCEIV=y
 CONFIG_KEYSTONE_USB_PHY=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
index 2c8665cd9dc5981e723cfed6136065c0dfa71bca..ea3566fb92e22e22358e61fb33184f1dde120cc4 100644 (file)
@@ -781,7 +781,7 @@ CONFIG_MXS_DMA=y
 CONFIG_DMA_BCM2835=y
 CONFIG_DMA_OMAP=y
 CONFIG_QCOM_BAM_DMA=y
-CONFIG_XILINX_VDMA=y
+CONFIG_XILINX_DMA=y
 CONFIG_DMA_SUN6I=y
 CONFIG_STAGING=y
 CONFIG_SENSORS_ISL29018=y
index d0131ee6f6af920d5c3bc6479c2e515e1d706539..3f82e9da7cec03604059bcdcddb8217635138065 100644 (file)
@@ -47,6 +47,7 @@
 #define PMD_SECT_WB            (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
 #define PMD_SECT_MINICACHE     (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE)
 #define PMD_SECT_WBWA          (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
+#define PMD_SECT_CACHE_MASK    (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
 #define PMD_SECT_NONSHARED_DEV (PMD_SECT_TEX(2))
 
 /*
index f8f1cff62065b7548bb54ce9bc653c684ddf5eb4..4cd664abfcd327ed530b3876e0ee774c2f06b0ad 100644 (file)
@@ -62,6 +62,7 @@
 #define PMD_SECT_WT            (_AT(pmdval_t, 2) << 2) /* normal inner write-through */
 #define PMD_SECT_WB            (_AT(pmdval_t, 3) << 2) /* normal inner write-back */
 #define PMD_SECT_WBWA          (_AT(pmdval_t, 7) << 2) /* normal inner write-alloc */
+#define PMD_SECT_CACHE_MASK    (_AT(pmdval_t, 7) << 2)
 
 /*
  * + Level 3 descriptor (PTE)
index 0b1e4a93d67edfe15899220087a4bdc53966f90c..15d073ae5da2a2123980b2fc67c3986161cd0839 100644 (file)
@@ -142,6 +142,19 @@ ARM_BE8(orr        r7, r7, #(1 << 25))     @ HSCTLR.EE
        and     r7, #0x1f               @ Preserve HPMN
        mcr     p15, 4, r7, c1, c1, 1   @ HDCR
 
+       @ Make sure NS-SVC is initialised appropriately
+       mrc     p15, 0, r7, c1, c0, 0   @ SCTLR
+       orr     r7, #(1 << 5)           @ CP15 barriers enabled
+       bic     r7, #(3 << 7)           @ Clear SED/ITD for v8 (RES0 for v7)
+       bic     r7, #(3 << 19)          @ WXN and UWXN disabled
+       mcr     p15, 0, r7, c1, c0, 0   @ SCTLR
+
+       mrc     p15, 0, r7, c0, c0, 0   @ MIDR
+       mcr     p15, 4, r7, c0, c0, 0   @ VPIDR
+
+       mrc     p15, 0, r7, c0, c0, 5   @ MPIDR
+       mcr     p15, 4, r7, c0, c0, 5   @ VMPIDR
+
 #if !defined(ZIMAGE) && defined(CONFIG_ARM_ARCH_TIMER)
        @ make CNTP_* and CNTPCT accessible from PL1
        mrc     p15, 0, r7, c0, c1, 1   @ ID_PFR1
index 75f130ef650413036625c59f34ab864c1649052e..c94b90d437724f43c60e8748f7fe0f028e3672d3 100644 (file)
@@ -158,8 +158,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
 {
        int i;
 
-       kvm_free_stage2_pgd(kvm);
-
        for (i = 0; i < KVM_MAX_VCPUS; ++i) {
                if (kvm->vcpus[i]) {
                        kvm_arch_vcpu_free(kvm->vcpus[i]);
index 29d0b23af2a9dec3b2199ac3851c9ae74b0731ee..e9a5c0e0c11543fa6918ed618dff6840ff03055e 100644 (file)
@@ -1714,7 +1714,8 @@ int kvm_mmu_init(void)
                 kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL));
 
        if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
-           hyp_idmap_start <  kern_hyp_va(~0UL)) {
+           hyp_idmap_start <  kern_hyp_va(~0UL) &&
+           hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) {
                /*
                 * The idmap page is intersecting with the VA space,
                 * it is not safe to continue further.
@@ -1893,6 +1894,7 @@ void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
 
 void kvm_arch_flush_shadow_all(struct kvm *kvm)
 {
+       kvm_free_stage2_pgd(kvm);
 }
 
 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
index 3750575c73c5f040247ca3d096552e718e2d613c..06332f6265652e16dbd5503dcce3f45425e4fefb 100644 (file)
@@ -255,6 +255,12 @@ static int __init exynos_pmu_irq_init(struct device_node *node,
                return -ENOMEM;
        }
 
+       /*
+        * Clear the OF_POPULATED flag set in of_irq_init so that
+        * later the Exynos PMU platform device won't be skipped.
+        */
+       of_node_clear_flag(node, OF_POPULATED);
+
        return 0;
 }
 
index 5d9bfab279dd8cc3bba37c0bbfe2f2d5904f3850..6bb7d9cf1e389e55b517cdd7eff6b8ed99b9944c 100644 (file)
@@ -64,6 +64,7 @@ static void __init imx6ul_init_machine(void)
        if (parent == NULL)
                pr_warn("failed to initialize soc device\n");
 
+       of_platform_default_populate(NULL, NULL, parent);
        imx6ul_enet_init();
        imx_anatop_init();
        imx6ul_pm_init();
index 58924b3844df551a270aac552b0803fb5eedf8dc..fe708e26d021d30293d0e5a4a8b9cce3a67ae976 100644 (file)
@@ -295,7 +295,7 @@ int imx6_set_lpm(enum mxc_cpu_pwr_mode mode)
                val &= ~BM_CLPCR_SBYOS;
                if (cpu_is_imx6sl())
                        val |= BM_CLPCR_BYPASS_PMIC_READY;
-               if (cpu_is_imx6sl() || cpu_is_imx6sx())
+               if (cpu_is_imx6sl() || cpu_is_imx6sx() || cpu_is_imx6ul())
                        val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS;
                else
                        val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS;
@@ -310,7 +310,7 @@ int imx6_set_lpm(enum mxc_cpu_pwr_mode mode)
                val |= 0x3 << BP_CLPCR_STBY_COUNT;
                val |= BM_CLPCR_VSTBY;
                val |= BM_CLPCR_SBYOS;
-               if (cpu_is_imx6sl())
+               if (cpu_is_imx6sl() || cpu_is_imx6sx())
                        val |= BM_CLPCR_BYPASS_PMIC_READY;
                if (cpu_is_imx6sl() || cpu_is_imx6sx() || cpu_is_imx6ul())
                        val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS;
index c073fb57dd13102dfc239c67cd6860fcc281ed2c..6f2d0aec05139fc1f09b5a57a973423c7cf90882 100644 (file)
@@ -220,9 +220,6 @@ static int am33xx_cm_wait_module_ready(u8 part, s16 inst, u16 clkctrl_offs,
 {
        int i = 0;
 
-       if (!clkctrl_offs)
-               return 0;
-
        omap_test_timeout(_is_module_ready(inst, clkctrl_offs),
                          MAX_MODULE_READY_TIME, i);
 
@@ -246,9 +243,6 @@ static int am33xx_cm_wait_module_idle(u8 part, s16 inst, u16 clkctrl_offs,
 {
        int i = 0;
 
-       if (!clkctrl_offs)
-               return 0;
-
        omap_test_timeout((_clkctrl_idlest(inst, clkctrl_offs) ==
                                CLKCTRL_IDLEST_DISABLED),
                                MAX_MODULE_READY_TIME, i);
index 2c0e07ed6b995b77b2c3a01aaaba457173251335..2ab27ade136a2dbca7e225102cb9700d35674504 100644 (file)
@@ -278,9 +278,6 @@ static int omap4_cminst_wait_module_ready(u8 part, s16 inst, u16 clkctrl_offs,
 {
        int i = 0;
 
-       if (!clkctrl_offs)
-               return 0;
-
        omap_test_timeout(_is_module_ready(part, inst, clkctrl_offs),
                          MAX_MODULE_READY_TIME, i);
 
@@ -304,9 +301,6 @@ static int omap4_cminst_wait_module_idle(u8 part, s16 inst, u16 clkctrl_offs,
 {
        int i = 0;
 
-       if (!clkctrl_offs)
-               return 0;
-
        omap_test_timeout((_clkctrl_idlest(part, inst, clkctrl_offs) ==
                           CLKCTRL_IDLEST_DISABLED),
                          MAX_MODULE_DISABLE_TIME, i);
index 5b709383381c8ae0bdafe014253537804043d97d..1052b29697b8946a5407e919369e2aaf5efba409 100644 (file)
@@ -1053,6 +1053,10 @@ static int _omap4_wait_target_disable(struct omap_hwmod *oh)
        if (oh->flags & HWMOD_NO_IDLEST)
                return 0;
 
+       if (!oh->prcm.omap4.clkctrl_offs &&
+           !(oh->prcm.omap4.flags & HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET))
+               return 0;
+
        return omap_cm_wait_module_idle(oh->clkdm->prcm_partition,
                                        oh->clkdm->cm_inst,
                                        oh->prcm.omap4.clkctrl_offs, 0);
@@ -2971,6 +2975,10 @@ static int _omap4_wait_target_ready(struct omap_hwmod *oh)
        if (!_find_mpu_rt_port(oh))
                return 0;
 
+       if (!oh->prcm.omap4.clkctrl_offs &&
+           !(oh->prcm.omap4.flags & HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET))
+               return 0;
+
        /* XXX check module SIDLEMODE, hardreset status */
 
        return omap_cm_wait_module_ready(oh->clkdm->prcm_partition,
index 4041bad79a9abbe9bc4ca74abd29027c01bf5d57..78904017f18ce03669fdf396cbedf31d735b8f19 100644 (file)
@@ -443,8 +443,12 @@ struct omap_hwmod_omap2_prcm {
  * HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT: Some IP blocks don't have a PRCM
  *     module-level context loss register associated with them; this
  *     flag bit should be set in those cases
+ * HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET: Some IP blocks have a valid CLKCTRL
+ *     offset of zero; this flag bit should be set in those cases to
+ *     distinguish from hwmods that have no clkctrl offset.
  */
 #define HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT                (1 << 0)
+#define HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET                (1 << 1)
 
 /**
  * struct omap_hwmod_omap4_prcm - OMAP4-specific PRCM data
index 55c5878577f44c36da9284beae1b367e69094799..e2d84aa7f595ff3fa8672a3c6f87412a13b3e095 100644 (file)
@@ -29,6 +29,7 @@
 #define CLKCTRL(oh, clkctrl) ((oh).prcm.omap4.clkctrl_offs = (clkctrl))
 #define RSTCTRL(oh, rstctrl) ((oh).prcm.omap4.rstctrl_offs = (rstctrl))
 #define RSTST(oh, rstst) ((oh).prcm.omap4.rstst_offs = (rstst))
+#define PRCM_FLAGS(oh, flag) ((oh).prcm.omap4.flags = (flag))
 
 /*
  * 'l3' class
@@ -1296,6 +1297,7 @@ static void omap_hwmod_am33xx_clkctrl(void)
        CLKCTRL(am33xx_i2c1_hwmod, AM33XX_CM_WKUP_I2C0_CLKCTRL_OFFSET);
        CLKCTRL(am33xx_wd_timer1_hwmod, AM33XX_CM_WKUP_WDT1_CLKCTRL_OFFSET);
        CLKCTRL(am33xx_rtc_hwmod, AM33XX_CM_RTC_RTC_CLKCTRL_OFFSET);
+       PRCM_FLAGS(am33xx_rtc_hwmod, HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET);
        CLKCTRL(am33xx_mmc2_hwmod, AM33XX_CM_PER_MMC2_CLKCTRL_OFFSET);
        CLKCTRL(am33xx_gpmc_hwmod, AM33XX_CM_PER_GPMC_CLKCTRL_OFFSET);
        CLKCTRL(am33xx_l4_ls_hwmod, AM33XX_CM_PER_L4LS_CLKCTRL_OFFSET);
index d72ee6185d5e0b377f14adb7ac5a780a5287cbea..1cc4a6f3954e1936c30005a9b61bcda65bbccd69 100644 (file)
@@ -722,8 +722,20 @@ static struct omap_hwmod omap3xxx_dss_dispc_hwmod = {
  * display serial interface controller
  */
 
+static struct omap_hwmod_class_sysconfig omap3xxx_dsi_sysc = {
+       .rev_offs       = 0x0000,
+       .sysc_offs      = 0x0010,
+       .syss_offs      = 0x0014,
+       .sysc_flags     = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
+                          SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
+                          SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
+       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+       .sysc_fields    = &omap_hwmod_sysc_type1,
+};
+
 static struct omap_hwmod_class omap3xxx_dsi_hwmod_class = {
        .name = "dsi",
+       .sysc   = &omap3xxx_dsi_sysc,
 };
 
 static struct omap_hwmod_irq_info omap3xxx_dsi1_irqs[] = {
index c410d84b243dbe59ed35b147416b473dfac9743e..66070acaa888cc5778f5651592691c34a0adfca5 100644 (file)
@@ -83,7 +83,8 @@ static struct resource smc91x_resources[] = {
 };
 
 static struct smc91x_platdata smc91x_platdata = {
-       .flags = SMC91X_USE_32BIT | SMC91X_USE_DMA | SMC91X_NOWAIT,
+       .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
+                SMC91X_USE_DMA | SMC91X_NOWAIT,
 };
 
 static struct platform_device smc91x_device = {
index 7245f3359564f27b58722d5c14e635929f627a36..d6159f8ef0c245405d5b0c00d32497562f280a05 100644 (file)
@@ -137,6 +137,18 @@ static struct pxa2xx_udc_mach_info udc_info __initdata = {
        // no D+ pullup; lubbock can't connect/disconnect in software
 };
 
+static void lubbock_init_pcmcia(void)
+{
+       struct clk *clk;
+
+       /* Add an alias for the SA1111 PCMCIA clock */
+       clk = clk_get_sys("pxa2xx-pcmcia", NULL);
+       if (!IS_ERR(clk)) {
+               clkdev_create(clk, NULL, "1800");
+               clk_put(clk);
+       }
+}
+
 static struct resource sa1111_resources[] = {
        [0] = {
                .start  = 0x10000000,
@@ -467,6 +479,8 @@ static void __init lubbock_init(void)
        pxa_set_btuart_info(NULL);
        pxa_set_stuart_info(NULL);
 
+       lubbock_init_pcmcia();
+
        clk_add_alias("SA1111_CLK", NULL, "GPIO11_CLK", NULL);
        pxa_set_udc_info(&udc_info);
        pxa_set_fb_info(NULL, &sharp_lm8v31);
index 3f06cd90567a7136abe266cf579bc8d4899eb86b..056369ef250e8a4447b592efbd0775b0b7b3710f 100644 (file)
@@ -120,7 +120,8 @@ static struct resource smc91x_resources[] = {
 };
 
 static struct smc91x_platdata xcep_smc91x_info = {
-       .flags  = SMC91X_USE_32BIT | SMC91X_NOWAIT | SMC91X_USE_DMA,
+       .flags  = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
+                 SMC91X_NOWAIT | SMC91X_USE_DMA,
 };
 
 static struct platform_device smc91x_device = {
index baf174542e36fdf096e11deca2032bab1015ef99..a0ead0ae23d64387e0dee144f81537be00475ac9 100644 (file)
@@ -93,7 +93,8 @@ static struct smsc911x_platform_config smsc911x_config = {
 };
 
 static struct smc91x_platdata smc91x_platdata = {
-       .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
+       .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
+                SMC91X_NOWAIT,
 };
 
 static struct platform_device realview_eth_device = {
index cbf53bb9c814dc328bb209ad3ee580da7f2f6a6f..0db46895c82a4729d40b6c94c62482cd00ae1c9b 100644 (file)
@@ -125,6 +125,8 @@ static unsigned long clk_36864_get_rate(struct clk *clk)
 }
 
 static struct clkops clk_36864_ops = {
+       .enable         = clk_cpu_enable,
+       .disable        = clk_cpu_disable,
        .get_rate       = clk_36864_get_rate,
 };
 
@@ -140,9 +142,8 @@ static struct clk_lookup sa11xx_clkregs[] = {
        CLKDEV_INIT(NULL, "OSTIMER0", &clk_36864),
 };
 
-static int __init sa11xx_clk_init(void)
+int __init sa11xx_clk_init(void)
 {
        clkdev_add_table(sa11xx_clkregs, ARRAY_SIZE(sa11xx_clkregs));
        return 0;
 }
-core_initcall(sa11xx_clk_init);
index 345e63f4eb71f53d0038adc005c8a0f052714b2b..3e09beddb6e8f4e84e0863691c7d33c91bae404a 100644 (file)
@@ -34,6 +34,7 @@
 
 #include <mach/hardware.h>
 #include <mach/irqs.h>
+#include <mach/reset.h>
 
 #include "generic.h"
 #include <clocksource/pxa.h>
@@ -95,6 +96,8 @@ static void sa1100_power_off(void)
 
 void sa11x0_restart(enum reboot_mode mode, const char *cmd)
 {
+       clear_reset_status(RESET_STATUS_ALL);
+
        if (mode == REBOOT_SOFT) {
                /* Jump into ROM at address 0 */
                soft_restart(0);
@@ -388,6 +391,7 @@ void __init sa1100_init_irq(void)
        sa11x0_init_irq_nodt(IRQ_GPIO0_SC, irq_resource.start);
 
        sa1100_init_gpio();
+       sa11xx_clk_init();
 }
 
 /*
index 0d92e119b36b13d28c2ab8036c21f23963a54ff8..68199b603ff717f7b5dfb514abd62c3b21704d1c 100644 (file)
@@ -44,3 +44,5 @@ int sa11x0_pm_init(void);
 #else
 static inline int sa11x0_pm_init(void) { return 0; }
 #endif
+
+int sa11xx_clk_init(void);
index 1525d7b5f1b74b6d06ac1276a56a45c23781e060..88149f85bc49dfa7a62c6b395bdf7a3e3f770e7b 100644 (file)
@@ -45,7 +45,7 @@ static struct resource smc91x_resources[] = {
 };
 
 static struct smc91x_platdata smc91x_platdata = {
-       .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
+       .flags = SMC91X_USE_16BIT | SMC91X_USE_8BIT | SMC91X_NOWAIT,
 };
 
 static struct platform_device smc91x_device = {
index 62437b57813eab3c1cff4fc275386b37378dc9d8..73e3adbc133096eca9cdf652d37ff110f5e35850 100644 (file)
 
 #define REGULATOR_IRQ_MASK     BIT(2)  /* IRQ2, active low */
 
-static void __iomem *irqc;
-
-static const u8 da9063_mask_regs[] = {
-       DA9063_REG_IRQ_MASK_A,
-       DA9063_REG_IRQ_MASK_B,
-       DA9063_REG_IRQ_MASK_C,
-       DA9063_REG_IRQ_MASK_D,
-};
-
-/* DA9210 System Control and Event Registers */
+/* start of DA9210 System Control and Event Registers */
 #define DA9210_REG_MASK_A              0x54
-#define DA9210_REG_MASK_B              0x55
-
-static const u8 da9210_mask_regs[] = {
-       DA9210_REG_MASK_A,
-       DA9210_REG_MASK_B,
-};
-
-static void da9xxx_mask_irqs(struct i2c_client *client, const u8 regs[],
-                            unsigned int nregs)
-{
-       unsigned int i;
 
-       dev_info(&client->dev, "Masking %s interrupt sources\n", client->name);
+static void __iomem *irqc;
 
-       for (i = 0; i < nregs; i++) {
-               int error = i2c_smbus_write_byte_data(client, regs[i], ~0);
-               if (error) {
-                       dev_err(&client->dev, "i2c error %d\n", error);
-                       return;
-               }
-       }
-}
+/* first byte sets the memory pointer, following are consecutive reg values */
+static u8 da9063_irq_clr[] = { DA9063_REG_IRQ_MASK_A, 0xff, 0xff, 0xff, 0xff };
+static u8 da9210_irq_clr[] = { DA9210_REG_MASK_A, 0xff, 0xff };
+
+static struct i2c_msg da9xxx_msgs[2] = {
+       {
+               .addr = 0x58,
+               .len = ARRAY_SIZE(da9063_irq_clr),
+               .buf = da9063_irq_clr,
+       }, {
+               .addr = 0x68,
+               .len = ARRAY_SIZE(da9210_irq_clr),
+               .buf = da9210_irq_clr,
+       },
+};
 
 static int regulator_quirk_notify(struct notifier_block *nb,
                                  unsigned long action, void *data)
@@ -93,12 +80,15 @@ static int regulator_quirk_notify(struct notifier_block *nb,
        client = to_i2c_client(dev);
        dev_dbg(dev, "Detected %s\n", client->name);
 
-       if ((client->addr == 0x58 && !strcmp(client->name, "da9063")))
-               da9xxx_mask_irqs(client, da9063_mask_regs,
-                                ARRAY_SIZE(da9063_mask_regs));
-       else if (client->addr == 0x68 && !strcmp(client->name, "da9210"))
-               da9xxx_mask_irqs(client, da9210_mask_regs,
-                                ARRAY_SIZE(da9210_mask_regs));
+       if ((client->addr == 0x58 && !strcmp(client->name, "da9063")) ||
+           (client->addr == 0x68 && !strcmp(client->name, "da9210"))) {
+               int ret;
+
+               dev_info(&client->dev, "clearing da9063/da9210 interrupts\n");
+               ret = i2c_transfer(client->adapter, da9xxx_msgs, ARRAY_SIZE(da9xxx_msgs));
+               if (ret != ARRAY_SIZE(da9xxx_msgs))
+                       dev_err(&client->dev, "i2c error %d\n", ret);
+       }
 
        mon = ioread32(irqc + IRQC_MONITOR);
        if (mon & REGULATOR_IRQ_MASK)
index 6344913f0804a20ccbad6539febbbfc981e921a5..30fe03f95c85b9b4b3700337f402290d10e320e4 100644 (file)
@@ -137,7 +137,7 @@ void __init init_default_cache_policy(unsigned long pmd)
 
        initial_pmd_value = pmd;
 
-       pmd &= PMD_SECT_TEX(1) | PMD_SECT_BUFFERABLE | PMD_SECT_CACHEABLE;
+       pmd &= PMD_SECT_CACHE_MASK;
 
        for (i = 0; i < ARRAY_SIZE(cache_policies); i++)
                if (cache_policies[i].pmd == pmd) {
index a7123b4e129dfc40f9c0d0df4417d086e23ee9a0..d00d52c9de3e0228c34aeebb0ec86d4b280220f9 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/hwcap.h>
 #include <asm/pgtable-hwdef.h>
 #include <asm/pgtable.h>
+#include <asm/memory.h>
 
 #include "proc-macros.S"
 
index 3d2cef6488ea292155b2f2a2cee43046c70873ae..f193414d0f6ff78a2ac91b48160390935fa68546 100644 (file)
@@ -170,9 +170,6 @@ static int xen_starting_cpu(unsigned int cpu)
        pr_info("Xen: initializing cpu%d\n", cpu);
        vcpup = per_cpu_ptr(xen_vcpu_info, cpu);
 
-       /* Direct vCPU id mapping for ARM guests. */
-       per_cpu(xen_vcpu_id, cpu) = cpu;
-
        info.mfn = virt_to_gfn(vcpup);
        info.offset = xen_offset_in_page(vcpup);
 
@@ -330,6 +327,7 @@ static int __init xen_guest_init(void)
 {
        struct xen_add_to_physmap xatp;
        struct shared_info *shared_info_page = NULL;
+       int cpu;
 
        if (!xen_domain())
                return 0;
@@ -380,7 +378,8 @@ static int __init xen_guest_init(void)
                return -ENOMEM;
 
        /* Direct vCPU id mapping for ARM guests. */
-       per_cpu(xen_vcpu_id, 0) = 0;
+       for_each_possible_cpu(cpu)
+               per_cpu(xen_vcpu_id, cpu) = cpu;
 
        xen_auto_xlat_grant_frames.count = gnttab_max_grant_frames();
        if (xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn,
index 445aa678f9141e2398e2cd3dbfe82da0258bb5be..c2b9bcb0ef61ac42189d8fcadc423c30878f3842 100644 (file)
                /* Local timer */
                timer {
                        compatible = "arm,armv8-timer";
-                       interrupts = <1 13 0xf01>,
-                                    <1 14 0xf01>,
-                                    <1 11 0xf01>,
-                                    <1 10 0xf01>;
+                       interrupts = <1 13 0xf08>,
+                                    <1 14 0xf08>,
+                                    <1 11 0xf08>,
+                                    <1 10 0xf08>;
                };
 
                timer0: timer0@ffc03000 {
index e502c24b0ac79cca089c7f22e19c83e3ea9bf2d4..bf6c8d0510028b472efc4e7a34033e8ca0d8f7c8 100644 (file)
        timer {
                compatible = "arm,armv8-timer";
                interrupts = <GIC_PPI 13
-                       (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_EDGE_RISING)>,
+                       (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>,
                             <GIC_PPI 14
-                       (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_EDGE_RISING)>,
+                       (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>,
                             <GIC_PPI 11
-                       (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_EDGE_RISING)>,
+                       (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>,
                             <GIC_PPI 10
-                       (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_EDGE_RISING)>;
+                       (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>;
        };
 
        xtal: xtal-clk {
index f1c2c713f9b0896b2dc777a960f84fe741d43ff8..c29dab9d18345100e17cd8f8db97a4db9e4427a2 100644 (file)
 
        timer {
                compatible = "arm,armv8-timer";
-               interrupts = <1 0 0xff01>,      /* Secure Phys IRQ */
-                            <1 13 0xff01>,     /* Non-secure Phys IRQ */
-                            <1 14 0xff01>,     /* Virt IRQ */
-                            <1 15 0xff01>;     /* Hyp IRQ */
+               interrupts = <1 0 0xff08>,      /* Secure Phys IRQ */
+                            <1 13 0xff08>,     /* Non-secure Phys IRQ */
+                            <1 14 0xff08>,     /* Virt IRQ */
+                            <1 15 0xff08>;     /* Hyp IRQ */
                clock-frequency = <50000000>;
        };
 
diff --git a/arch/arm64/boot/dts/broadcom/bcm2835-rpi.dtsi b/arch/arm64/boot/dts/broadcom/bcm2835-rpi.dtsi
new file mode 120000 (symlink)
index 0000000..3937b77
--- /dev/null
@@ -0,0 +1 @@
+../../../../arm/boot/dts/bcm2835-rpi.dtsi
\ No newline at end of file
index 6f47dd2bb1db4342b45165c5e88b01edc1deb556..7841b724e340e1441867254c9a89c53c014eb031 100644 (file)
@@ -1,7 +1,7 @@
 /dts-v1/;
 #include "bcm2837.dtsi"
-#include "../../../../arm/boot/dts/bcm2835-rpi.dtsi"
-#include "../../../../arm/boot/dts/bcm283x-rpi-smsc9514.dtsi"
+#include "bcm2835-rpi.dtsi"
+#include "bcm283x-rpi-smsc9514.dtsi"
 
 / {
        compatible = "raspberrypi,3-model-b", "brcm,bcm2837";
index f2a31d06845d84a33bbd575ca6bf634e94a4f773..8216bbb29fe0758ff315098fb1319d97fea91469 100644 (file)
@@ -1,4 +1,4 @@
-#include "../../../../arm/boot/dts/bcm283x.dtsi"
+#include "bcm283x.dtsi"
 
 / {
        compatible = "brcm,bcm2836";
diff --git a/arch/arm64/boot/dts/broadcom/bcm283x-rpi-smsc9514.dtsi b/arch/arm64/boot/dts/broadcom/bcm283x-rpi-smsc9514.dtsi
new file mode 120000 (symlink)
index 0000000..dca7c05
--- /dev/null
@@ -0,0 +1 @@
+../../../../arm/boot/dts/bcm283x-rpi-smsc9514.dtsi
\ No newline at end of file
diff --git a/arch/arm64/boot/dts/broadcom/bcm283x.dtsi b/arch/arm64/boot/dts/broadcom/bcm283x.dtsi
new file mode 120000 (symlink)
index 0000000..5f54e4c
--- /dev/null
@@ -0,0 +1 @@
+../../../../arm/boot/dts/bcm283x.dtsi
\ No newline at end of file
index f53b0955bfd31c44ab05dc328bef1e4bd3dadcf4..d4a12fad8afdeecbb522557aedf1f8013b55748d 100644 (file)
        timer {
                compatible = "arm,armv8-timer";
                interrupts = <GIC_PPI 13 (GIC_CPU_MASK_RAW(0xff) |
-                             IRQ_TYPE_EDGE_RISING)>,
+                             IRQ_TYPE_LEVEL_LOW)>,
                             <GIC_PPI 14 (GIC_CPU_MASK_RAW(0xff) |
-                             IRQ_TYPE_EDGE_RISING)>,
+                             IRQ_TYPE_LEVEL_LOW)>,
                             <GIC_PPI 11 (GIC_CPU_MASK_RAW(0xff) |
-                             IRQ_TYPE_EDGE_RISING)>,
+                             IRQ_TYPE_LEVEL_LOW)>,
                             <GIC_PPI 10 (GIC_CPU_MASK_RAW(0xff) |
-                             IRQ_TYPE_EDGE_RISING)>;
+                             IRQ_TYPE_LEVEL_LOW)>;
        };
 
        pmu {
index 2eb9b225f0bcc193c27520a332a7e68ff55410dc..04dc8a8d15399ea3eb9e63f8215fcbc8840c56f3 100644 (file)
 
        timer {
                compatible = "arm,armv8-timer";
-               interrupts = <1 13 0xff01>,
-                            <1 14 0xff01>,
-                            <1 11 0xff01>,
-                            <1 10 0xff01>;
+               interrupts = <1 13 4>,
+                            <1 14 4>,
+                            <1 11 4>,
+                            <1 10 4>;
        };
 
        pmu {
index ca663dfe51891f11e4cd2964048c3590a6e330fe..162831546e183d192459fee3ae40df1b6bf2929d 100644 (file)
 
                timer {
                        compatible = "arm,armv8-timer";
-                       interrupts = <1 13 0xff01>,
-                                    <1 14 0xff01>,
-                                    <1 11 0xff01>,
-                                    <1 10 0xff01>;
+                       interrupts = <1 13 0xff08>,
+                                    <1 14 0xff08>,
+                                    <1 11 0xff08>,
+                                    <1 10 0xff08>;
                };
 
                pmu_system_controller: system-controller@105c0000 {
index e669fbd7f9c36dee51c70dc68a50dd681e4cf886..a67e210e201935a65768b0505e683801624bc3b1 100644 (file)
 
        timer {
                compatible = "arm,armv8-timer";
-               interrupts = <1 13 0x1>, /* Physical Secure PPI */
-                            <1 14 0x1>, /* Physical Non-Secure PPI */
-                            <1 11 0x1>, /* Virtual PPI */
-                            <1 10 0x1>; /* Hypervisor PPI */
+               interrupts = <1 13 0xf08>, /* Physical Secure PPI */
+                            <1 14 0xf08>, /* Physical Non-Secure PPI */
+                            <1 11 0xf08>, /* Virtual PPI */
+                            <1 10 0xf08>; /* Hypervisor PPI */
        };
 
        pmu {
index 21023a388c29d81ec7b7ae2f3a809b8d93d1a523..e3b6034ea5d9084190cddfcf76fdc2e7283cb0d9 100644 (file)
 
        timer {
                compatible = "arm,armv8-timer";
-               interrupts = <1 13 0x8>, /* Physical Secure PPI, active-low */
-                            <1 14 0x8>, /* Physical Non-Secure PPI, active-low */
-                            <1 11 0x8>, /* Virtual PPI, active-low */
-                            <1 10 0x8>; /* Hypervisor PPI, active-low */
+               interrupts = <1 13 4>, /* Physical Secure PPI, active-low */
+                            <1 14 4>, /* Physical Non-Secure PPI, active-low */
+                            <1 11 4>, /* Virtual PPI, active-low */
+                            <1 10 4>; /* Hypervisor PPI, active-low */
        };
 
        pmu {
index eab1a42fb934d7c4219c9eb7230b9c244236bdb9..c2a6745f168cbd32116f478d8c76bcc18d23104e 100644 (file)
 
                        timer {
                                compatible = "arm,armv8-timer";
-                               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>,
-                                            <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>,
-                                            <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>,
-                                            <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
+                               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                                            <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                                            <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                                            <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
                        };
 
                        odmi: odmi@300000 {
index d02a900378e1037f8750ac3f993dc6be58c61aeb..4f44d1191bfd348f2969339d557141d43da278cf 100644 (file)
                #io-channel-cells = <1>;
                clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
                clock-names = "saradc", "apb_pclk";
+               resets = <&cru SRST_SARADC>;
+               reset-names = "saradc-apb";
                status = "disabled";
        };
 
index c223915f0907df01b095e3d0ccc1cdbb41a44e39..d73bdc8c91156dc0ceb5b7245990cc0abe72d34b 100644 (file)
 
        timer {
                compatible = "arm,armv8-timer";
-               interrupts = <1 13 0xf01>,
-                            <1 14 0xf01>,
-                            <1 11 0xf01>,
-                            <1 10 0xf01>;
+               interrupts = <1 13 4>,
+                            <1 14 4>,
+                            <1 11 4>,
+                            <1 10 4>;
        };
 
        soc {
index e595f22e7e4b67521c5e1720ea0fafeb7273a1c0..3e2e51fbd2bce2d3abdf8390625d1c833bb449c5 100644 (file)
        timer {
                compatible = "arm,armv8-timer";
                interrupt-parent = <&gic>;
-               interrupts = <1 13 0xf01>,
-                            <1 14 0xf01>,
-                            <1 11 0xf01>,
-                            <1 10 0xf01>;
+               interrupts = <1 13 0xf08>,
+                            <1 14 0xf08>,
+                            <1 11 0xf08>,
+                            <1 10 0xf08>;
        };
 
        amba_apu {
index 0a456bef8c792d91a30a4c2caf24cb6fb96546e2..2fee2f59288c94d70814771ed06fced11ede369d 100644 (file)
@@ -199,19 +199,19 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
 #define _percpu_read(pcp)                                              \
 ({                                                                     \
        typeof(pcp) __retval;                                           \
-       preempt_disable();                                              \
+       preempt_disable_notrace();                                      \
        __retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)),      \
                                              sizeof(pcp));             \
-       preempt_enable();                                               \
+       preempt_enable_notrace();                                       \
        __retval;                                                       \
 })
 
 #define _percpu_write(pcp, val)                                                \
 do {                                                                   \
-       preempt_disable();                                              \
+       preempt_disable_notrace();                                      \
        __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val),       \
                                sizeof(pcp));                           \
-       preempt_enable();                                               \
+       preempt_enable_notrace();                                       \
 } while(0)                                                             \
 
 #define _pcp_protect(operation, pcp, val)                      \
index e875a5a551d7debeab3c34ed2d52e9beae7c8f7d..89206b568cd42365090a8be788da1bbd35466f72 100644 (file)
@@ -363,4 +363,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
 #define arch_read_relax(lock)  cpu_relax()
 #define arch_write_relax(lock) cpu_relax()
 
+/*
+ * Accesses appearing in program order before a spin_lock() operation
+ * can be reordered with accesses inside the critical section, by virtue
+ * of arch_spin_lock being constructed using acquire semantics.
+ *
+ * In cases where this is problematic (e.g. try_to_wake_up), an
+ * smp_mb__before_spinlock() can restore the required ordering.
+ */
+#define smp_mb__before_spinlock()      smp_mb()
+
 #endif /* __ASM_SPINLOCK_H */
index 5bb61de2320172c806ee58959e3f721b2b243a99..9d37e967fa198779e01a5cffb35782da5ef2f6f6 100644 (file)
@@ -100,7 +100,16 @@ ENTRY(cpu_do_resume)
 
        msr     tcr_el1, x8
        msr     vbar_el1, x9
+
+       /*
+        * __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking
+        * debug exceptions. By restoring MDSCR_EL1 here, we may take a debug
+        * exception. Mask them until local_dbg_restore() in cpu_suspend()
+        * resets them.
+        */
+       disable_dbg
        msr     mdscr_el1, x10
+
        msr     sctlr_el1, x12
        /*
         * Restore oslsr_el1 by writing oslar_el1
index 68cf638faf4867aef2d92b91f9ab48770100f132..b1ec1fa064632258ac1a4bb50b3b22416bd43c0f 100644 (file)
@@ -74,7 +74,7 @@ extern __kernel_size_t __copy_user(void *to, const void *from,
 
 extern __kernel_size_t copy_to_user(void __user *to, const void *from,
                                    __kernel_size_t n);
-extern __kernel_size_t copy_from_user(void *to, const void __user *from,
+extern __kernel_size_t ___copy_from_user(void *to, const void __user *from,
                                      __kernel_size_t n);
 
 static inline __kernel_size_t __copy_to_user(void __user *to, const void *from,
@@ -88,6 +88,15 @@ static inline __kernel_size_t __copy_from_user(void *to,
 {
        return __copy_user(to, (const void __force *)from, n);
 }
+static inline __kernel_size_t copy_from_user(void *to,
+                                              const void __user *from,
+                                              __kernel_size_t n)
+{
+       size_t res = ___copy_from_user(to, from, n);
+       if (unlikely(res))
+               memset(to + (n - res), 0, res);
+       return res;
+}
 
 #define __copy_to_user_inatomic __copy_to_user
 #define __copy_from_user_inatomic __copy_from_user
index d93ead02daeda4635e772268b328dc652f1d86df..7c6cf14f09854aacd6f31cafedc39a88d451d18a 100644 (file)
@@ -36,7 +36,7 @@ EXPORT_SYMBOL(copy_page);
 /*
  * Userspace access stuff.
  */
-EXPORT_SYMBOL(copy_from_user);
+EXPORT_SYMBOL(___copy_from_user);
 EXPORT_SYMBOL(copy_to_user);
 EXPORT_SYMBOL(__copy_user);
 EXPORT_SYMBOL(strncpy_from_user);
index ea59c04b07de8e3a09ed486b8c48a9ffe25ad5c1..075373471da11d011871ab4681e7ded58732ddfb 100644 (file)
         */
        .text
        .align  1
-       .global copy_from_user
-       .type   copy_from_user, @function
-copy_from_user:
+       .global ___copy_from_user
+       .type   ___copy_from_user, @function
+___copy_from_user:
        branch_if_kernel r8, __copy_user
        ret_if_privileged r8, r11, r10, r10
        rjmp    __copy_user
-       .size   copy_from_user, . - copy_from_user
+       .size   ___copy_from_user, . - ___copy_from_user
 
        .global copy_to_user
        .type   copy_to_user, @function
index 12f5d6851bbcb3e28c7ba5421a849df3cabbf4fd..0a2a70096d8b2dc7345b6e1ca85c68b3cdd39b9e 100644 (file)
@@ -171,11 +171,12 @@ static inline int bad_user_access_length(void)
 static inline unsigned long __must_check
 copy_from_user(void *to, const void __user *from, unsigned long n)
 {
-       if (access_ok(VERIFY_READ, from, n))
+       if (likely(access_ok(VERIFY_READ, from, n))) {
                memcpy(to, (const void __force *)from, n);
-       else
-               return n;
-       return 0;
+               return 0;
+       }
+       memset(to, 0, n);
+       return n;
 }
 
 static inline unsigned long __must_check
index c6db52ba3a06653e8b6a22a277c607d82c69de46..10c57771822d5d8f25eed0ccc64ceacfb2dca5ca 100644 (file)
@@ -146,7 +146,8 @@ static struct platform_device hitachi_fb_device = {
 #include <linux/smc91x.h>
 
 static struct smc91x_platdata smc91x_info = {
-       .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
+       .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
+                SMC91X_NOWAIT,
        .leda = RPC_LED_100_10,
        .ledb = RPC_LED_TX_RX,
 };
index f35525b5581991689f0e4b178c5672a2dadbe060..57d1c43726d928600d777940d9836d9a7865c1f3 100644 (file)
@@ -134,7 +134,8 @@ static struct platform_device net2272_bfin_device = {
 #include <linux/smc91x.h>
 
 static struct smc91x_platdata smc91x_info = {
-       .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
+       .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
+                SMC91X_NOWAIT,
        .leda = RPC_LED_100_10,
        .ledb = RPC_LED_TX_RX,
 };
index e3530d0f13ee74e9c4b2bdfbaf10e226499af2e9..56c7d5750abd66c0b75e7af2d6f0738a18236b6e 100644 (file)
@@ -194,30 +194,6 @@ extern unsigned long __copy_user(void __user *to, const void *from, unsigned lon
 extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n);
 extern unsigned long __do_clear_user(void __user *to, unsigned long n);
 
-static inline unsigned long
-__generic_copy_to_user(void __user *to, const void *from, unsigned long n)
-{
-       if (access_ok(VERIFY_WRITE, to, n))
-               return __copy_user(to, from, n);
-       return n;
-}
-
-static inline unsigned long
-__generic_copy_from_user(void *to, const void __user *from, unsigned long n)
-{
-       if (access_ok(VERIFY_READ, from, n))
-               return __copy_user_zeroing(to, from, n);
-       return n;
-}
-
-static inline unsigned long
-__generic_clear_user(void __user *to, unsigned long n)
-{
-       if (access_ok(VERIFY_WRITE, to, n))
-               return __do_clear_user(to, n);
-       return n;
-}
-
 static inline long
 __strncpy_from_user(char *dst, const char __user *src, long count)
 {
@@ -282,7 +258,7 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
        else if (n == 24)
                __asm_copy_from_user_24(to, from, ret);
        else
-               ret = __generic_copy_from_user(to, from, n);
+               ret = __copy_user_zeroing(to, from, n);
 
        return ret;
 }
@@ -333,7 +309,7 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
        else if (n == 24)
                __asm_copy_to_user_24(to, from, ret);
        else
-               ret = __generic_copy_to_user(to, from, n);
+               ret = __copy_user(to, from, n);
 
        return ret;
 }
@@ -366,26 +342,43 @@ __constant_clear_user(void __user *to, unsigned long n)
        else if (n == 24)
                __asm_clear_24(to, ret);
        else
-               ret = __generic_clear_user(to, n);
+               ret = __do_clear_user(to, n);
 
        return ret;
 }
 
 
-#define clear_user(to, n)                              \
-       (__builtin_constant_p(n) ?                      \
-        __constant_clear_user(to, n) :                 \
-        __generic_clear_user(to, n))
+static inline size_t clear_user(void __user *to, size_t n)
+{
+       if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
+               return n;
+       if (__builtin_constant_p(n))
+               return __constant_clear_user(to, n);
+       else
+               return __do_clear_user(to, n);
+}
 
-#define copy_from_user(to, from, n)                    \
-       (__builtin_constant_p(n) ?                      \
-        __constant_copy_from_user(to, from, n) :       \
-        __generic_copy_from_user(to, from, n))
+static inline size_t copy_from_user(void *to, const void __user *from, size_t n)
+{
+       if (unlikely(!access_ok(VERIFY_READ, from, n))) {
+               memset(to, 0, n);
+               return n;
+       }
+       if (__builtin_constant_p(n))
+               return __constant_copy_from_user(to, from, n);
+       else
+               return __copy_user_zeroing(to, from, n);
+}
 
-#define copy_to_user(to, from, n)                      \
-       (__builtin_constant_p(n) ?                      \
-        __constant_copy_to_user(to, from, n) :         \
-        __generic_copy_to_user(to, from, n))
+static inline size_t copy_to_user(void __user *to, const void *from, size_t n)
+{
+       if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
+               return n;
+       if (__builtin_constant_p(n))
+               return __constant_copy_to_user(to, from, n);
+       else
+               return __copy_user(to, from, n);
+}
 
 /* We let the __ versions of copy_from/to_user inline, because they're often
  * used in fast paths and have only a small space overhead.
index 3ac9a59d65d4129142d06ba9516455e0c5dc96db..87d9e34c5df866072ced6de336ea07e3755853eb 100644 (file)
@@ -263,19 +263,25 @@ do {                                                      \
 extern long __memset_user(void *dst, unsigned long count);
 extern long __memcpy_user(void *dst, const void *src, unsigned long count);
 
-#define clear_user(dst,count)                  __memset_user(____force(dst), (count))
+#define __clear_user(dst,count)                        __memset_user(____force(dst), (count))
 #define __copy_from_user_inatomic(to, from, n) __memcpy_user((to), ____force(from), (n))
 #define __copy_to_user_inatomic(to, from, n)   __memcpy_user(____force(to), (from), (n))
 
 #else
 
-#define clear_user(dst,count)                  (memset(____force(dst), 0, (count)), 0)
+#define __clear_user(dst,count)                        (memset(____force(dst), 0, (count)), 0)
 #define __copy_from_user_inatomic(to, from, n) (memcpy((to), ____force(from), (n)), 0)
 #define __copy_to_user_inatomic(to, from, n)   (memcpy(____force(to), (from), (n)), 0)
 
 #endif
 
-#define __clear_user clear_user
+static inline unsigned long __must_check
+clear_user(void __user *to, unsigned long n)
+{
+       if (likely(__access_ok(to, n)))
+               n = __clear_user(to, n);
+       return n;
+}
 
 static inline unsigned long __must_check
 __copy_to_user(void __user *to, const void *from, unsigned long n)
index f000a382bc7f62f28dfe980ab184904c70681914..f61cfb28e9f283800f968c2953705851da429e68 100644 (file)
@@ -103,7 +103,8 @@ static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
 {
        long res = __strnlen_user(src, n);
 
-       /* return from strnlen can't be zero -- that would be rubbish. */
+       if (unlikely(!res))
+               return -EFAULT;
 
        if (res > n) {
                copy_from_user(dst, src, n);
index 465c70982f40d8960925bcefa29bb9402167aba9..bfe13196f7708f6e574f2ecdbbb34c2acc29855e 100644 (file)
@@ -241,8 +241,7 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
 static inline unsigned long
 __copy_to_user (void __user *to, const void *from, unsigned long count)
 {
-       if (!__builtin_constant_p(count))
-               check_object_size(from, count, true);
+       check_object_size(from, count, true);
 
        return __copy_user(to, (__force void __user *) from, count);
 }
@@ -250,8 +249,7 @@ __copy_to_user (void __user *to, const void *from, unsigned long count)
 static inline unsigned long
 __copy_from_user (void *to, const void __user *from, unsigned long count)
 {
-       if (!__builtin_constant_p(count))
-               check_object_size(to, count, false);
+       check_object_size(to, count, false);
 
        return __copy_user((__force void __user *) to, from, count);
 }
@@ -265,27 +263,22 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
        long __cu_len = (n);                                                            \
                                                                                        \
        if (__access_ok(__cu_to, __cu_len, get_fs())) {                                 \
-               if (!__builtin_constant_p(n))                                           \
-                       check_object_size(__cu_from, __cu_len, true);                   \
+               check_object_size(__cu_from, __cu_len, true);                   \
                __cu_len = __copy_user(__cu_to, (__force void __user *)  __cu_from, __cu_len);  \
        }                                                                               \
        __cu_len;                                                                       \
 })
 
-#define copy_from_user(to, from, n)                                                    \
-({                                                                                     \
-       void *__cu_to = (to);                                                           \
-       const void __user *__cu_from = (from);                                          \
-       long __cu_len = (n);                                                            \
-                                                                                       \
-       __chk_user_ptr(__cu_from);                                                      \
-       if (__access_ok(__cu_from, __cu_len, get_fs())) {                               \
-               if (!__builtin_constant_p(n))                                           \
-                       check_object_size(__cu_to, __cu_len, false);                    \
-               __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len);   \
-       }                                                                               \
-       __cu_len;                                                                       \
-})
+static inline unsigned long
+copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+       check_object_size(to, n, false);
+       if (likely(__access_ok(from, n, get_fs())))
+               n = __copy_user((__force void __user *) to, from, n);
+       else
+               memset(to, 0, n);
+       return n;
+}
 
 #define __copy_in_user(to, from, size) __copy_user((to), (from), (size))
 
index cac7014daef3aa6949d17a72824a757dfdbee5b4..6f8982157a75849b491c8461c0c3b34d4c7c4088 100644 (file)
@@ -219,7 +219,7 @@ extern int fixup_exception(struct pt_regs *regs);
 #define __get_user_nocheck(x, ptr, size)                               \
 ({                                                                     \
        long __gu_err = 0;                                              \
-       unsigned long __gu_val;                                         \
+       unsigned long __gu_val = 0;                                     \
        might_fault();                                                  \
        __get_user_size(__gu_val, (ptr), (size), __gu_err);             \
        (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
index 8282cbce7e399a84488e675af0751341d24f0205..273e61225c277ae67ba28dfae4ef9123e3ef34a9 100644 (file)
@@ -204,8 +204,9 @@ extern unsigned long __must_check __copy_user_zeroing(void *to,
 static inline unsigned long
 copy_from_user(void *to, const void __user *from, unsigned long n)
 {
-       if (access_ok(VERIFY_READ, from, n))
+       if (likely(access_ok(VERIFY_READ, from, n)))
                return __copy_user_zeroing(to, from, n);
+       memset(to, 0, n);
        return n;
 }
 
index 331b0d35f89ce301ad9ba876be7322f417d7c909..826676778094f23a8ced8264b5c448184c0fa006 100644 (file)
@@ -227,7 +227,7 @@ extern long __user_bad(void);
 
 #define __get_user(x, ptr)                                             \
 ({                                                                     \
-       unsigned long __gu_val;                                         \
+       unsigned long __gu_val = 0;                                     \
        /*unsigned long __gu_ptr = (unsigned long)(ptr);*/              \
        long __gu_err;                                                  \
        switch (sizeof(*(ptr))) {                                       \
@@ -373,10 +373,13 @@ extern long __user_bad(void);
 static inline long copy_from_user(void *to,
                const void __user *from, unsigned long n)
 {
+       unsigned long res = n;
        might_fault();
-       if (access_ok(VERIFY_READ, from, n))
-               return __copy_from_user(to, from, n);
-       return n;
+       if (likely(access_ok(VERIFY_READ, from, n)))
+               res = __copy_from_user(to, from, n);
+       if (unlikely(res))
+               memset(to + (n - res), 0, res);
+       return res;
 }
 
 #define __copy_to_user(to, from, n)    \
index 11b965f98d9589f7ba1686877a9f054a4c1ae42e..21a2aaba20d5ed4ae9eb551b19ab208dbf5aa3c2 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/thread_info.h>
+#include <linux/string.h>
 #include <asm/asm-eva.h>
 
 /*
@@ -1170,6 +1171,8 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
                        __cu_len = __invoke_copy_from_user(__cu_to,     \
                                                           __cu_from,   \
                                                           __cu_len);   \
+               } else {                                                \
+                       memset(__cu_to, 0, __cu_len);                   \
                }                                                       \
        }                                                               \
        __cu_len;                                                       \
index 20f7bf6de384d06d5beebaeb216d2e6d4f675350..d012e877a95a02993ab57f9b7e6d73f767978344 100644 (file)
@@ -166,6 +166,7 @@ struct __large_struct { unsigned long buf[100]; };
                "2:\n"                                          \
                "       .section        .fixup,\"ax\"\n"        \
                "3:\n\t"                                        \
+               "       mov             0,%1\n"                 \
                "       mov             %3,%0\n"                \
                "       jmp             2b\n"                   \
                "       .previous\n"                            \
index 7826e6c364e74f0075e43081e5db367062baef30..ce8899e5e171370ebc88c08a91727ef9232febf9 100644 (file)
@@ -9,7 +9,7 @@
  * as published by the Free Software Foundation; either version
  * 2 of the Licence, or (at your option) any later version.
  */
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 
 unsigned long
 __generic_copy_to_user(void *to, const void *from, unsigned long n)
@@ -24,6 +24,8 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n)
 {
        if (access_ok(VERIFY_READ, from, n))
                __copy_user_zeroing(to, from, n);
+       else
+               memset(to, 0, n);
        return n;
 }
 
index caa51ff85a3c7d0ac7c57cf034758185aad64724..0ab82324c8174559ddd478aa0a2dd171b652b8c0 100644 (file)
@@ -102,9 +102,12 @@ extern long __copy_to_user(void __user *to, const void *from, unsigned long n);
 static inline long copy_from_user(void *to, const void __user *from,
                                unsigned long n)
 {
-       if (!access_ok(VERIFY_READ, from, n))
-               return n;
-       return __copy_from_user(to, from, n);
+       unsigned long res = n;
+       if (access_ok(VERIFY_READ, from, n))
+               res = __copy_from_user(to, from, n);
+       if (unlikely(res))
+               memset(to + (n - res), 0, res);
+       return res;
 }
 
 static inline long copy_to_user(void __user *to, const void *from,
@@ -139,7 +142,7 @@ extern long strnlen_user(const char __user *s, long n);
 
 #define __get_user_unknown(val, size, ptr, err) do {                   \
        err = 0;                                                        \
-       if (copy_from_user(&(val), ptr, size)) {                        \
+       if (__copy_from_user(&(val), ptr, size)) {                      \
                err = -EFAULT;                                          \
        }                                                               \
        } while (0)
@@ -166,7 +169,7 @@ do {                                                                        \
        ({                                                              \
        long __gu_err = -EFAULT;                                        \
        const __typeof__(*(ptr)) __user *__gu_ptr = (ptr);              \
-       unsigned long __gu_val;                                         \
+       unsigned long __gu_val = 0;                                     \
        __get_user_common(__gu_val, sizeof(*(ptr)), __gu_ptr, __gu_err);\
        (x) = (__force __typeof__(x))__gu_val;                          \
        __gu_err;                                                       \
index a6bd07ca3d6c08dc49af6ab4738fe85e1ddcd485..5cc6b4f1b79516a7a5882886b59884d03529df6d 100644 (file)
@@ -273,28 +273,20 @@ __copy_tofrom_user(void *to, const void *from, unsigned long size);
 static inline unsigned long
 copy_from_user(void *to, const void *from, unsigned long n)
 {
-       unsigned long over;
-
-       if (access_ok(VERIFY_READ, from, n))
-               return __copy_tofrom_user(to, from, n);
-       if ((unsigned long)from < TASK_SIZE) {
-               over = (unsigned long)from + n - TASK_SIZE;
-               return __copy_tofrom_user(to, from, n - over) + over;
-       }
-       return n;
+       unsigned long res = n;
+
+       if (likely(access_ok(VERIFY_READ, from, n)))
+               res = __copy_tofrom_user(to, from, n);
+       if (unlikely(res))
+               memset(to + (n - res), 0, res);
+       return res;
 }
 
 static inline unsigned long
 copy_to_user(void *to, const void *from, unsigned long n)
 {
-       unsigned long over;
-
-       if (access_ok(VERIFY_WRITE, to, n))
-               return __copy_tofrom_user(to, from, n);
-       if ((unsigned long)to < TASK_SIZE) {
-               over = (unsigned long)to + n - TASK_SIZE;
-               return __copy_tofrom_user(to, from, n - over) + over;
-       }
+       if (likely(access_ok(VERIFY_WRITE, to, n)))
+               n = __copy_tofrom_user(to, from, n);
        return n;
 }
 
@@ -303,13 +295,8 @@ extern unsigned long __clear_user(void *addr, unsigned long size);
 static inline __must_check unsigned long
 clear_user(void *addr, unsigned long size)
 {
-
-       if (access_ok(VERIFY_WRITE, addr, size))
-               return __clear_user(addr, size);
-       if ((unsigned long)addr < TASK_SIZE) {
-               unsigned long over = (unsigned long)addr + size - TASK_SIZE;
-               return __clear_user(addr, size - over) + over;
-       }
+       if (likely(access_ok(VERIFY_WRITE, addr, size)))
+               size = __clear_user(addr, size);
        return size;
 }
 
index cd87781031653e78693610f7539151537a3c6398..af12c2db9bb8536649c514f7fc78c2a10a4e2e23 100644 (file)
@@ -1,6 +1,5 @@
 config PARISC
        def_bool y
-       select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
        select ARCH_MIGHT_HAVE_PC_PARPORT
        select HAVE_IDE
        select HAVE_OPROFILE
index 1a8f6f95689e645676b4aa58a30e0c4f88f6c176..f6a4c016304b657149d8c87f23e0c564e875a36b 100644 (file)
@@ -245,7 +245,6 @@ CONFIG_DEBUG_RT_MUTEXES=y
 CONFIG_PROVE_RCU_DELAY=y
 CONFIG_DEBUG_BLOCK_EXT_DEVT=y
 CONFIG_LATENCYTOP=y
-CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
 CONFIG_KEYS=y
 # CONFIG_CRYPTO_HW is not set
 CONFIG_FONTS=y
index 7e0792658952bbc3fa3e5ec2a7d21276171c8929..c564e6e1fa23424c39efa967cce4a42ac9ae4f2f 100644 (file)
@@ -291,7 +291,6 @@ CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
 CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y
 # CONFIG_SCHED_DEBUG is not set
 CONFIG_TIMER_STATS=y
-CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_ECB=m
 CONFIG_CRYPTO_PCBC=m
index 0f59fd9ca20526d8a27066c724ab2499965ec62c..482847865dac26ffd890ca529be741cddea4e747 100644 (file)
@@ -10,6 +10,7 @@
 #include <asm-generic/uaccess-unaligned.h>
 
 #include <linux/bug.h>
+#include <linux/string.h>
 
 #define VERIFY_READ 0
 #define VERIFY_WRITE 1
@@ -208,26 +209,30 @@ unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned lo
 #define __copy_to_user_inatomic __copy_to_user
 #define __copy_from_user_inatomic __copy_from_user
 
-extern void copy_from_user_overflow(void)
-#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
-        __compiletime_error("copy_from_user() buffer size is not provably correct")
-#else
-        __compiletime_warning("copy_from_user() buffer size is not provably correct")
-#endif
-;
+extern void __compiletime_error("usercopy buffer size is too small")
+__bad_copy_user(void);
+
+static inline void copy_user_overflow(int size, unsigned long count)
+{
+       WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
+}
 
 static inline unsigned long __must_check copy_from_user(void *to,
                                           const void __user *from,
                                           unsigned long n)
 {
         int sz = __compiletime_object_size(to);
-        int ret = -EFAULT;
+        unsigned long ret = n;
 
-        if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
+        if (likely(sz == -1 || sz >= n))
                 ret = __copy_from_user(to, from, n);
-        else
-                copy_from_user_overflow();
+        else if (!__builtin_constant_p(n))
+               copy_user_overflow(sz, n);
+       else
+                __bad_copy_user();
 
+       if (unlikely(ret))
+               memset(to + (n - ret), 0, ret);
         return ret;
 }
 
index 2ef55f8968a2b1fba3f34ce7312e07ce8eb42287..b312b152461b0539a22c5939ba7728a38f5d8d32 100644 (file)
@@ -15,7 +15,7 @@ static inline bool early_cpu_has_feature(unsigned long feature)
 #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
 #include <linux/jump_label.h>
 
-#define NUM_CPU_FTR_KEYS       64
+#define NUM_CPU_FTR_KEYS       BITS_PER_LONG
 
 extern struct static_key_true cpu_feature_keys[NUM_CPU_FTR_KEYS];
 
index 666bef4ebfae72a88c4bceebacbc18a7bfa7b390..9377bdf42eb8aafa983b038afc15a2e96bf33873 100644 (file)
@@ -3,6 +3,7 @@
 
 #ifndef __ASSEMBLY__
 #include <linux/cpumask.h>
+#include <asm/cpu_has_feature.h>
 
 /*
  * Mapping of threads to cores
index 88b4901ac4eef45edd2b48c4305f005abcc68847..85b7a1a21e228571df158782f36a79e20728cff9 100644 (file)
@@ -21,7 +21,7 @@
 #ifndef __ASM_PPC64_HMI_H__
 #define __ASM_PPC64_HMI_H__
 
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 
 #define        CORE_TB_RESYNC_REQ_BIT          63
 #define MAX_SUBCORE_PER_CORE           4
index 148303e7771ff6f2a438b31a07984cd11c9e48dd..6a6792bb39fbc8616034732322bd497ed30c5961 100644 (file)
@@ -183,11 +183,6 @@ struct paca_struct {
         */
        u16 in_mce;
        u8 hmi_event_available;          /* HMI event is available */
-       /*
-        * Bitmap for sibling subcore status. See kvm/book3s_hv_ras.c for
-        * more details
-        */
-       struct sibling_subcore_state *sibling_subcore_state;
 #endif
 
        /* Stuff for accurate time accounting */
@@ -202,6 +197,13 @@ struct paca_struct {
        struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
 #endif
        struct kvmppc_host_state kvm_hstate;
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+       /*
+        * Bitmap for sibling subcore status. See kvm/book3s_hv_ras.c for
+        * more details
+        */
+       struct sibling_subcore_state *sibling_subcore_state;
+#endif
 #endif
 };
 
index b5e88e4a171ab3e5bb005125373bc82050ae10ba..c0309c59bed8dd7e9ad3311eb3d792dc865c766b 100644 (file)
@@ -301,6 +301,7 @@ extern void pci_process_bridge_OF_ranges(struct pci_controller *hose,
 /* Allocate & free a PCI host bridge structure */
 extern struct pci_controller *pcibios_alloc_controller(struct device_node *dev);
 extern void pcibios_free_controller(struct pci_controller *phb);
+extern void pcibios_free_controller_deferred(struct pci_host_bridge *bridge);
 
 #ifdef CONFIG_PCI
 extern int pcibios_vaddr_is_ioport(void __iomem *address);
index c1dc6c14deb84a261ab19e509f12078a271937d1..c266227fdd5ba5320b2d0cfb318feab1b7815cb8 100644 (file)
@@ -308,40 +308,21 @@ extern unsigned long __copy_tofrom_user(void __user *to,
 static inline unsigned long copy_from_user(void *to,
                const void __user *from, unsigned long n)
 {
-       unsigned long over;
-
-       if (access_ok(VERIFY_READ, from, n)) {
-               if (!__builtin_constant_p(n))
-                       check_object_size(to, n, false);
+       if (likely(access_ok(VERIFY_READ, from, n))) {
+               check_object_size(to, n, false);
                return __copy_tofrom_user((__force void __user *)to, from, n);
        }
-       if ((unsigned long)from < TASK_SIZE) {
-               over = (unsigned long)from + n - TASK_SIZE;
-               if (!__builtin_constant_p(n - over))
-                       check_object_size(to, n - over, false);
-               return __copy_tofrom_user((__force void __user *)to, from,
-                               n - over) + over;
-       }
+       memset(to, 0, n);
        return n;
 }
 
 static inline unsigned long copy_to_user(void __user *to,
                const void *from, unsigned long n)
 {
-       unsigned long over;
-
        if (access_ok(VERIFY_WRITE, to, n)) {
-               if (!__builtin_constant_p(n))
-                       check_object_size(from, n, true);
+               check_object_size(from, n, true);
                return __copy_tofrom_user(to, (__force void __user *)from, n);
        }
-       if ((unsigned long)to < TASK_SIZE) {
-               over = (unsigned long)to + n - TASK_SIZE;
-               if (!__builtin_constant_p(n))
-                       check_object_size(from, n - over, true);
-               return __copy_tofrom_user(to, (__force void __user *)from,
-                               n - over) + over;
-       }
        return n;
 }
 
@@ -383,8 +364,7 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
                        return 0;
        }
 
-       if (!__builtin_constant_p(n))
-               check_object_size(to, n, false);
+       check_object_size(to, n, false);
 
        return __copy_tofrom_user((__force void __user *)to, from, n);
 }
@@ -412,8 +392,8 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
                if (ret == 0)
                        return 0;
        }
-       if (!__builtin_constant_p(n))
-               check_object_size(from, n, true);
+
+       check_object_size(from, n, true);
 
        return __copy_tofrom_user(to, (__force const void __user *)from, n);
 }
@@ -439,10 +419,6 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size)
        might_fault();
        if (likely(access_ok(VERIFY_WRITE, addr, size)))
                return __clear_user(addr, size);
-       if ((unsigned long)addr < TASK_SIZE) {
-               unsigned long over = (unsigned long)addr + size - TASK_SIZE;
-               return __clear_user(addr, size - over) + over;
-       }
        return size;
 }
 
index b2027a5cf50817649667cb148087912324b87893..fe4c075bcf50eda75905e7c53b4830914cef5b00 100644 (file)
@@ -41,7 +41,7 @@ obj-$(CONFIG_VDSO32)          += vdso32/
 obj-$(CONFIG_HAVE_HW_BREAKPOINT)       += hw_breakpoint.o
 obj-$(CONFIG_PPC_BOOK3S_64)    += cpu_setup_ppc970.o cpu_setup_pa6t.o
 obj-$(CONFIG_PPC_BOOK3S_64)    += cpu_setup_power.o
-obj-$(CONFIG_PPC_BOOK3S_64)    += mce.o mce_power.o hmi.o
+obj-$(CONFIG_PPC_BOOK3S_64)    += mce.o mce_power.o
 obj-$(CONFIG_PPC_BOOK3E_64)    += exceptions-64e.o idle_book3e.o
 obj-$(CONFIG_PPC64)            += vdso64/
 obj-$(CONFIG_ALTIVEC)          += vecemu.o
index 6b8bc0dd09d4a6e769adae004867b839bb6f4c6f..5afd03e5e8b865e3dc0b08b2293364296a884eff 100644 (file)
@@ -368,13 +368,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
 tabort_syscall:
        /* Firstly we need to enable TM in the kernel */
        mfmsr   r10
-       li      r13, 1
-       rldimi  r10, r13, MSR_TM_LG, 63-MSR_TM_LG
+       li      r9, 1
+       rldimi  r10, r9, MSR_TM_LG, 63-MSR_TM_LG
        mtmsrd  r10, 0
 
        /* tabort, this dooms the transaction, nothing else */
-       li      r13, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
-       TABORT(R13)
+       li      r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
+       TABORT(R9)
 
        /*
         * Return directly to userspace. We have corrupted user register state,
@@ -382,8 +382,8 @@ tabort_syscall:
         * resume after the tbegin of the aborted transaction with the
         * checkpointed register state.
         */
-       li      r13, MSR_RI
-       andc    r10, r10, r13
+       li      r9, MSR_RI
+       andc    r10, r10, r9
        mtmsrd  r10, 1
        mtspr   SPRN_SRR0, r11
        mtspr   SPRN_SRR1, r12
index df6d45eb41150185e7cc17baa82feead23fa27f3..bffec73dbffc19695765a6e9b5bf95f19bbabc2a 100644 (file)
@@ -485,7 +485,23 @@ machine_check_fwnmi:
        EXCEPTION_PROLOG_0(PACA_EXMC)
 machine_check_pSeries_0:
        EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST, 0x200)
-       EXCEPTION_PROLOG_PSERIES_1(machine_check_common, EXC_STD)
+       /*
+        * The following is essentially EXCEPTION_PROLOG_PSERIES_1 with the
+        * difference that MSR_RI is not enabled, because PACA_EXMC is being
+        * used, so nested machine check corrupts it. machine_check_common
+        * enables MSR_RI.
+        */
+       ld      r12,PACAKBASE(r13)
+       ld      r10,PACAKMSR(r13)
+       xori    r10,r10,MSR_RI
+       mfspr   r11,SPRN_SRR0
+       LOAD_HANDLER(r12, machine_check_common)
+       mtspr   SPRN_SRR0,r12
+       mfspr   r12,SPRN_SRR1
+       mtspr   SPRN_SRR1,r10
+       rfid
+       b       .       /* prevent speculative execution */
+
        KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
        KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
        KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
@@ -969,14 +985,17 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
 machine_check_common:
 
        mfspr   r10,SPRN_DAR
-       std     r10,PACA_EXGEN+EX_DAR(r13)
+       std     r10,PACA_EXMC+EX_DAR(r13)
        mfspr   r10,SPRN_DSISR
-       stw     r10,PACA_EXGEN+EX_DSISR(r13)
+       stw     r10,PACA_EXMC+EX_DSISR(r13)
        EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
        FINISH_NAP
        RECONCILE_IRQ_STATE(r10, r11)
-       ld      r3,PACA_EXGEN+EX_DAR(r13)
-       lwz     r4,PACA_EXGEN+EX_DSISR(r13)
+       ld      r3,PACA_EXMC+EX_DAR(r13)
+       lwz     r4,PACA_EXMC+EX_DSISR(r13)
+       /* Enable MSR_RI when finished with PACA_EXMC */
+       li      r10,MSR_RI
+       mtmsrd  r10,1
        std     r3,_DAR(r1)
        std     r4,_DSISR(r1)
        bl      save_nvgprs
diff --git a/arch/powerpc/kernel/hmi.c b/arch/powerpc/kernel/hmi.c
deleted file mode 100644 (file)
index e3f738e..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Hypervisor Maintenance Interrupt (HMI) handling.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.
- *
- * Copyright 2015 IBM Corporation
- * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
- */
-
-#undef DEBUG
-
-#include <linux/types.h>
-#include <linux/compiler.h>
-#include <asm/paca.h>
-#include <asm/hmi.h>
-
-void wait_for_subcore_guest_exit(void)
-{
-       int i;
-
-       /*
-        * NULL bitmap pointer indicates that KVM module hasn't
-        * been loaded yet and hence no guests are running.
-        * If no KVM is in use, no need to co-ordinate among threads
-        * as all of them will always be in host and no one is going
-        * to modify TB other than the opal hmi handler.
-        * Hence, just return from here.
-        */
-       if (!local_paca->sibling_subcore_state)
-               return;
-
-       for (i = 0; i < MAX_SUBCORE_PER_CORE; i++)
-               while (local_paca->sibling_subcore_state->in_guest[i])
-                       cpu_relax();
-}
-
-void wait_for_tb_resync(void)
-{
-       if (!local_paca->sibling_subcore_state)
-               return;
-
-       while (test_bit(CORE_TB_RESYNC_REQ_BIT,
-                               &local_paca->sibling_subcore_state->flags))
-               cpu_relax();
-}
index 2265c6398a17ec4af7fc5983df9dad7443ac2fce..bd739fed26e3203aae73807399c43a939c248d38 100644 (file)
@@ -411,7 +411,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
  *
  * r13 - PACA
  * cr3 - gt if waking up with partial/complete hypervisor state loss
- * cr4 - eq if waking up from complete hypervisor state loss.
+ * cr4 - gt or eq if waking up from complete hypervisor state loss.
  */
 _GLOBAL(pnv_wakeup_tb_loss)
        ld      r1,PACAR1(r13)
@@ -453,7 +453,7 @@ lwarx_loop2:
         * At this stage
         * cr2 - eq if first thread to wakeup in core
         * cr3-  gt if waking up with partial/complete hypervisor state loss
-        * cr4 - eq if waking up from complete hypervisor state loss.
+        * cr4 - gt or eq if waking up from complete hypervisor state loss.
         */
 
        ori     r15,r15,PNV_CORE_IDLE_LOCK_BIT
@@ -481,7 +481,7 @@ first_thread_in_subcore:
         * If waking up from sleep, subcore state is not lost. Hence
         * skip subcore state restore
         */
-       bne     cr4,subcore_state_restored
+       blt     cr4,subcore_state_restored
 
        /* Restore per-subcore state */
        ld      r4,_SDR1(r1)
@@ -526,7 +526,7 @@ timebase_resync:
         * If waking up from sleep, per core state is not lost, skip to
         * clear_lock.
         */
-       bne     cr4,clear_lock
+       blt     cr4,clear_lock
 
        /*
         * First thread in the core to wake up and its waking up with
@@ -557,7 +557,7 @@ common_exit:
         * If waking up from sleep, hypervisor state is not lost. Hence
         * skip hypervisor state restore.
         */
-       bne     cr4,hypervisor_state_restored
+       blt     cr4,hypervisor_state_restored
 
        /* Waking up from winkle */
 
index 3ed8ec09b5c93292ca1c584c9ed637870aa1369a..e785cc9e1ecd8bb0e442168412278bdbcf70afdd 100644 (file)
@@ -29,7 +29,7 @@
 #include <linux/kprobes.h>
 #include <linux/ptrace.h>
 #include <linux/preempt.h>
-#include <linux/module.h>
+#include <linux/extable.h>
 #include <linux/kdebug.h>
 #include <linux/slab.h>
 #include <asm/code-patching.h>
index 7fdf324d5b51f4ebb8716dfadda4ed40fb34d23d..e58908066b0e4e3fd3c2729ce7a7c49cad7f8d0e 100644 (file)
@@ -153,6 +153,42 @@ void pcibios_free_controller(struct pci_controller *phb)
 }
 EXPORT_SYMBOL_GPL(pcibios_free_controller);
 
+/*
+ * This function is used to call pcibios_free_controller()
+ * in a deferred manner: a callback from the PCI subsystem.
+ *
+ * _*DO NOT*_ call pcibios_free_controller() explicitly if
+ * this is used (or it may access an invalid *phb pointer).
+ *
+ * The callback occurs when all references to the root bus
+ * are dropped (e.g., child buses/devices and their users).
+ *
+ * It's called as .release_fn() of 'struct pci_host_bridge'
+ * which is associated with the 'struct pci_controller.bus'
+ * (root bus) - it expects .release_data to hold a pointer
+ * to 'struct pci_controller'.
+ *
+ * In order to use it, register .release_fn()/release_data
+ * like this:
+ *
+ * pci_set_host_bridge_release(bridge,
+ *                             pcibios_free_controller_deferred
+ *                             (void *) phb);
+ *
+ * e.g. in the pcibios_root_bridge_prepare() callback from
+ * pci_create_root_bus().
+ */
+void pcibios_free_controller_deferred(struct pci_host_bridge *bridge)
+{
+       struct pci_controller *phb = (struct pci_controller *)
+                                        bridge->release_data;
+
+       pr_debug("domain %d, dynamic %d\n", phb->global_number, phb->is_dynamic);
+
+       pcibios_free_controller(phb);
+}
+EXPORT_SYMBOL_GPL(pcibios_free_controller_deferred);
+
 /*
  * The function is used to return the minimal alignment
  * for memory or I/O windows of the associated P2P bridge.
index 4e74fc588a3f6497177fa82e1d148b32383ffcb9..d3eff99e938c121f8a1ec5f68ab2f2b0a75d1604 100644 (file)
@@ -695,7 +695,7 @@ unsigned char ibm_architecture_vec[] = {
        OV4_MIN_ENT_CAP,                /* minimum VP entitled capacity */
 
        /* option vector 5: PAPR/OF options */
-       VECTOR_LENGTH(18),              /* length */
+       VECTOR_LENGTH(21),              /* length */
        0,                              /* don't ignore, don't halt */
        OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
        OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
@@ -726,8 +726,11 @@ unsigned char ibm_architecture_vec[] = {
        0,
        0,
        OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) |
-       OV5_FEAT(OV5_PFO_HW_842),
-       OV5_FEAT(OV5_SUB_PROCESSORS),
+       OV5_FEAT(OV5_PFO_HW_842),                               /* Byte 17 */
+       0,                                                      /* Byte 18 */
+       0,                                                      /* Byte 19 */
+       0,                                                      /* Byte 20 */
+       OV5_FEAT(OV5_SUB_PROCESSORS),                           /* Byte 21 */
 
        /* option vector 6: IBM PAPR hints */
        VECTOR_LENGTH(3),               /* length */
index b6aa378aff636800a668ac67e525827fefd1ae76..a7daf749b97f272ba4819646b9f2515fb845c29a 100644 (file)
@@ -1226,7 +1226,21 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
                (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
        if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
                goto bad;
+
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+       /*
+        * If there is a transactional state then throw it away.
+        * The purpose of a sigreturn is to destroy all traces of the
+        * signal frame, this includes any transactional state created
+        * within in. We only check for suspended as we can never be
+        * active in the kernel, we are active, there is nothing better to
+        * do than go ahead and Bad Thing later.
+        * The cause is not important as there will never be a
+        * recheckpoint so it's not user visible.
+        */
+       if (MSR_TM_SUSPENDED(mfmsr()))
+               tm_reclaim_current(0);
+
        if (__get_user(tmp, &rt_sf->uc.uc_link))
                goto bad;
        uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
index 7e49984d4331543de9a3d88f24d23a67b48e76df..70409bb90a957f8d62a5f1b5c1be69aee12e6c91 100644 (file)
@@ -676,7 +676,21 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
        if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
                goto badframe;
        set_current_blocked(&set);
+
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+       /*
+        * If there is a transactional state then throw it away.
+        * The purpose of a sigreturn is to destroy all traces of the
+        * signal frame, this includes any transactional state created
+        * within in. We only check for suspended as we can never be
+        * active in the kernel, we are active, there is nothing better to
+        * do than go ahead and Bad Thing later.
+        * The cause is not important as there will never be a
+        * recheckpoint so it's not user visible.
+        */
+       if (MSR_TM_SUSPENDED(mfmsr()))
+               tm_reclaim_current(0);
+
        if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR]))
                goto badframe;
        if (MSR_TM_ACTIVE(msr)) {
index 25a39052bf6b5653c3ab0317298864a2873fd911..9c6f3fd580597e5fdfcc5fc46031d9be0aaac74b 100644 (file)
@@ -830,7 +830,7 @@ int __cpu_disable(void)
 
        /* Update sibling maps */
        base = cpu_first_thread_sibling(cpu);
-       for (i = 0; i < threads_per_core; i++) {
+       for (i = 0; i < threads_per_core && base + i < nr_cpu_ids; i++) {
                cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
                cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
                cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
index 2cb589264cb748869e2228ff1806e2067af65fe4..62859ebe0062630bae83eaa328ea0ace0654a713 100644 (file)
@@ -25,7 +25,8 @@
 #include <linux/user.h>
 #include <linux/interrupt.h>
 #include <linux/init.h>
-#include <linux/module.h>
+#include <linux/extable.h>
+#include <linux/module.h>      /* print_modules */
 #include <linux/prctl.h>
 #include <linux/delay.h>
 #include <linux/kprobes.h>
index 1f9e5529e692555a5d76f3519fb8c87875a73793..855d4b95d75255a328e5320765427125ea8b49de 100644 (file)
@@ -78,6 +78,7 @@ kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \
 
 ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
+       book3s_hv_hmi.o \
        book3s_hv_rmhandlers.o \
        book3s_hv_rm_mmu.o \
        book3s_hv_ras.o \
diff --git a/arch/powerpc/kvm/book3s_hv_hmi.c b/arch/powerpc/kvm/book3s_hv_hmi.c
new file mode 100644 (file)
index 0000000..e3f738e
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Hypervisor Maintenance Interrupt (HMI) handling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ *
+ * Copyright 2015 IBM Corporation
+ * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+ */
+
+#undef DEBUG
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <asm/paca.h>
+#include <asm/hmi.h>
+
+void wait_for_subcore_guest_exit(void)
+{
+       int i;
+
+       /*
+        * NULL bitmap pointer indicates that KVM module hasn't
+        * been loaded yet and hence no guests are running.
+        * If no KVM is in use, no need to co-ordinate among threads
+        * as all of them will always be in host and no one is going
+        * to modify TB other than the opal hmi handler.
+        * Hence, just return from here.
+        */
+       if (!local_paca->sibling_subcore_state)
+               return;
+
+       for (i = 0; i < MAX_SUBCORE_PER_CORE; i++)
+               while (local_paca->sibling_subcore_state->in_guest[i])
+                       cpu_relax();
+}
+
+void wait_for_tb_resync(void)
+{
+       if (!local_paca->sibling_subcore_state)
+               return;
+
+       while (test_bit(CORE_TB_RESYNC_REQ_BIT,
+                               &local_paca->sibling_subcore_state->flags))
+               cpu_relax();
+}
index 0a57fe6d49ccf43a07224c4f7da1f597e36cc10c..aa8214f30c920e05c6bf62349c6ea52e0ca68dc9 100644 (file)
@@ -127,18 +127,19 @@ _GLOBAL(csum_partial_copy_generic)
        stw     r7,12(r1)
        stw     r8,8(r1)
 
-       rlwinm  r0,r4,3,0x8
-       rlwnm   r6,r6,r0,0,31   /* odd destination address: rotate one byte */
-       cmplwi  cr7,r0,0        /* is destination address even ? */
        addic   r12,r6,0
        addi    r6,r4,-4
        neg     r0,r4
        addi    r4,r3,-4
        andi.   r0,r0,CACHELINE_MASK    /* # bytes to start of cache line */
+       crset   4*cr7+eq
        beq     58f
 
        cmplw   0,r5,r0                 /* is this more than total to do? */
        blt     63f                     /* if not much to do */
+       rlwinm  r7,r6,3,0x8
+       rlwnm   r12,r12,r7,0,31 /* odd destination address: rotate one byte */
+       cmplwi  cr7,r7,0        /* is destination address even ? */
        andi.   r8,r0,3                 /* get it word-aligned first */
        mtctr   r8
        beq+    61f
index a4db22f6502150a25a082769edc54ee58bf7211d..bb1ffc559f38b799b5f93b9aec9a931d0088143f 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/mm.h>
 #include <linux/interrupt.h>
 #include <linux/highmem.h>
-#include <linux/module.h>
+#include <linux/extable.h>
 #include <linux/kprobes.h>
 #include <linux/kdebug.h>
 #include <linux/perf_event.h>
index dfdb90cb44039f364d92cecd2e920eccb25d07e5..9f1983404e1a8fdcf6d9c7c809263d243eadf1e7 100644 (file)
@@ -113,7 +113,12 @@ BEGIN_FTR_SECTION
 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
        b       slb_finish_load_1T
 
-0:
+0:     /*
+        * For userspace addresses, make sure this is region 0.
+        */
+       cmpdi   r9, 0
+       bne     8f
+
        /* when using slices, we extract the psize off the slice bitmaps
         * and then we need to get the sllp encoding off the mmu_psize_defs
         * array.
index 8eb82b043dd859195179d267b177f2fd2331639c..d93dd4acf40bd277f0af1668d9fed183ffd8dc52 100644 (file)
@@ -528,7 +528,6 @@ static struct platform_driver mpc512x_lpbfifo_driver = {
        .remove = mpc512x_lpbfifo_remove,
        .driver = {
                .name = DRV_NAME,
-               .owner = THIS_MODULE,
                .of_match_table = mpc512x_lpbfifo_match,
        },
 };
index dbcd0303afed591803e660cea96591fa421eaa81..63c5ab6489c94d4c3e3e14c1f381bcee519c9c29 100644 (file)
@@ -222,7 +222,6 @@ static const struct of_device_id mcu_of_match_table[] = {
 static struct i2c_driver mcu_driver = {
        .driver = {
                .name = "mcu-mpc8349emitx",
-               .owner = THIS_MODULE,
                .of_match_table = mcu_of_match_table,
        },
        .probe = mcu_probe,
index dafba1057a477b12d4d13b1a028b4071e9c8bf33..dfd310031549d9007252a0416688a0605ebccd4d 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/tty.h>
 #include <linux/serial_core.h>
 #include <linux/of_platform.h>
-#include <linux/module.h>
+#include <linux/extable.h>
 
 #include <asm/time.h>
 #include <asm/machdep.h>
index 80804f9916eee7f8957dd6d9bda81fd4b086c0df..f97bab8e37a2688e4d3ef46fcf6f8150d1cf0255 100644 (file)
@@ -23,7 +23,7 @@
 #include <linux/pci.h>
 #include <linux/kdev_t.h>
 #include <linux/console.h>
-#include <linux/module.h>
+#include <linux/extable.h>
 #include <linux/delay.h>
 #include <linux/irq.h>
 #include <linux/seq_file.h>
index 2ee96431f7360e1d8d63ece45520416860c62428..4c827826c05eb13b09f4e0cd9f9c73d6967ec9a9 100644 (file)
@@ -370,6 +370,7 @@ static irqreturn_t process_dump(int irq, void *data)
        uint32_t dump_id, dump_size, dump_type;
        struct dump_obj *dump;
        char name[22];
+       struct kobject *kobj;
 
        rc = dump_read_info(&dump_id, &dump_size, &dump_type);
        if (rc != OPAL_SUCCESS)
@@ -381,8 +382,12 @@ static irqreturn_t process_dump(int irq, void *data)
         * that gracefully and not create two conflicting
         * entries.
         */
-       if (kset_find_obj(dump_kset, name))
+       kobj = kset_find_obj(dump_kset, name);
+       if (kobj) {
+               /* Drop reference added by kset_find_obj() */
+               kobject_put(kobj);
                return 0;
+       }
 
        dump = create_dump_obj(dump_id, dump_size, dump_type);
        if (!dump)
index 37f959bf392e72a8a4bc7dee92181846b1383458..f2344cbd2f464cb93a3afbcbf21dc8263069b555 100644 (file)
@@ -247,6 +247,7 @@ static irqreturn_t elog_event(int irq, void *data)
        uint64_t elog_type;
        int rc;
        char name[2+16+1];
+       struct kobject *kobj;
 
        rc = opal_get_elog_size(&id, &size, &type);
        if (rc != OPAL_SUCCESS) {
@@ -269,8 +270,12 @@ static irqreturn_t elog_event(int irq, void *data)
         * that gracefully and not create two conflicting
         * entries.
         */
-       if (kset_find_obj(elog_kset, name))
+       kobj = kset_find_obj(elog_kset, name);
+       if (kobj) {
+               /* Drop reference added by kset_find_obj() */
+               kobject_put(kobj);
                return IRQ_HANDLED;
+       }
 
        create_elog_obj(log_id, elog_size, elog_type);
 
index fd9444f9fb0c24e0ed02dcb722c0e824ddc218e2..bc0c91e84ca0dfa9dd50a11c1fe610fb19c120b5 100644 (file)
@@ -149,7 +149,7 @@ static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no)
 
 static struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb)
 {
-       unsigned long pe = phb->ioda.total_pe_num - 1;
+       long pe;
 
        for (pe = phb->ioda.total_pe_num - 1; pe >= 0; pe--) {
                if (!test_and_set_bit(pe, phb->ioda.pe_alloc))
@@ -162,11 +162,12 @@ static struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb)
 static void pnv_ioda_free_pe(struct pnv_ioda_pe *pe)
 {
        struct pnv_phb *phb = pe->phb;
+       unsigned int pe_num = pe->pe_number;
 
        WARN_ON(pe->pdev);
 
        memset(pe, 0, sizeof(struct pnv_ioda_pe));
-       clear_bit(pe->pe_number, phb->ioda.pe_alloc);
+       clear_bit(pe_num, phb->ioda.pe_alloc);
 }
 
 /* The default M64 BAR is shared by all PEs */
@@ -2216,7 +2217,7 @@ static long pnv_pci_ioda2_set_window(struct iommu_table_group *table_group,
 
        pnv_pci_link_table_and_group(phb->hose->node, num,
                        tbl, &pe->table_group);
-       pnv_pci_phb3_tce_invalidate_pe(pe);
+       pnv_pci_ioda2_tce_invalidate_pe(pe);
 
        return 0;
 }
@@ -2354,7 +2355,7 @@ static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
        if (ret)
                pe_warn(pe, "Unmapping failed, ret = %ld\n", ret);
        else
-               pnv_pci_phb3_tce_invalidate_pe(pe);
+               pnv_pci_ioda2_tce_invalidate_pe(pe);
 
        pnv_pci_unlink_table_and_group(table_group->tables[num], table_group);
 
@@ -3402,12 +3403,6 @@ static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
        struct pnv_phb *phb = pe->phb;
        struct pnv_ioda_pe *slave, *tmp;
 
-       /* Release slave PEs in compound PE */
-       if (pe->flags & PNV_IODA_PE_MASTER) {
-               list_for_each_entry_safe(slave, tmp, &pe->slaves, list)
-                       pnv_ioda_release_pe(slave);
-       }
-
        list_del(&pe->list);
        switch (phb->type) {
        case PNV_PHB_IODA1:
@@ -3422,7 +3417,26 @@ static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
 
        pnv_ioda_release_pe_seg(pe);
        pnv_ioda_deconfigure_pe(pe->phb, pe);
-       pnv_ioda_free_pe(pe);
+
+       /* Release slave PEs in the compound PE */
+       if (pe->flags & PNV_IODA_PE_MASTER) {
+               list_for_each_entry_safe(slave, tmp, &pe->slaves, list) {
+                       list_del(&slave->list);
+                       pnv_ioda_free_pe(slave);
+               }
+       }
+
+       /*
+        * The PE for root bus can be removed because of hotplug in EEH
+        * recovery for fenced PHB error. We need to mark the PE dead so
+        * that it can be populated again in PCI hot add path. The PE
+        * shouldn't be destroyed as it's the global reserved resource.
+        */
+       if (phb->ioda.root_pe_populated &&
+           phb->ioda.root_pe_idx == pe->pe_number)
+               phb->ioda.root_pe_populated = false;
+       else
+               pnv_ioda_free_pe(pe);
 }
 
 static void pnv_pci_release_device(struct pci_dev *pdev)
@@ -3438,7 +3452,17 @@ static void pnv_pci_release_device(struct pci_dev *pdev)
        if (!pdn || pdn->pe_number == IODA_INVALID_PE)
                return;
 
+       /*
+        * PCI hotplug can happen as part of EEH error recovery. The @pdn
+        * isn't removed and added afterwards in this scenario. We should
+        * set the PE number in @pdn to an invalid one. Otherwise, the PE's
+        * device count is decreased on removing devices while failing to
+        * be increased on adding devices. It leads to unbalanced PE's device
+        * count and eventually make normal PCI hotplug path broken.
+        */
        pe = &phb->ioda.pe_array[pdn->pe_number];
+       pdn->pe_number = IODA_INVALID_PE;
+
        WARN_ON(--pe->device_count < 0);
        if (pe->device_count == 0)
                pnv_ioda_release_pe(pe);
index fe16a50700de3d370ac07bad040d6d55cee6b9ef..09eba5a9929afc6818dac60bac41079c89f6330b 100644 (file)
@@ -119,6 +119,10 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
 
        bus = bridge->bus;
 
+       /* Rely on the pcibios_free_controller_deferred() callback. */
+       pci_set_host_bridge_release(bridge, pcibios_free_controller_deferred,
+                                       (void *) pci_bus_to_host(bus));
+
        dn = pcibios_get_phb_of_node(bus);
        if (!dn)
                return 0;
index 906dbaa97fe2850abba20b597b55b4a2a14627ae..547fd13e4f8e88a1d6d3e49fb270649c349fea5b 100644 (file)
@@ -106,8 +106,11 @@ int remove_phb_dynamic(struct pci_controller *phb)
                release_resource(res);
        }
 
-       /* Free pci_controller data structure */
-       pcibios_free_controller(phb);
+       /*
+        * The pci_controller data structure is freed by
+        * the pcibios_free_controller_deferred() callback;
+        * see pseries_root_bridge_prepare().
+        */
 
        return 0;
 }
index 4ffcaa6f867095cdcabfa51c9a206c0efd138f0c..a39d20e8623d4ae7684a5ebb43fc50f0ba793488 100644 (file)
@@ -41,7 +41,6 @@
 #include <linux/root_dev.h>
 #include <linux/of.h>
 #include <linux/of_pci.h>
-#include <linux/kexec.h>
 
 #include <asm/mmu.h>
 #include <asm/processor.h>
@@ -66,6 +65,7 @@
 #include <asm/eeh.h>
 #include <asm/reg.h>
 #include <asm/plpar_wrappers.h>
+#include <asm/kexec.h>
 
 #include "pseries.h"
 
index 6c110994d902f858a82006fe18fb46658f3fc2b0..81d49476c47e0aa9d44a0981cb458723b1d760e9 100644 (file)
@@ -534,7 +534,8 @@ struct cpm1_gpio16_chip {
 
 static void cpm1_gpio16_save_regs(struct of_mm_gpio_chip *mm_gc)
 {
-       struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc);
+       struct cpm1_gpio16_chip *cpm1_gc =
+               container_of(mm_gc, struct cpm1_gpio16_chip, mm_gc);
        struct cpm_ioport16 __iomem *iop = mm_gc->regs;
 
        cpm1_gc->cpdata = in_be16(&iop->dat);
@@ -649,7 +650,8 @@ struct cpm1_gpio32_chip {
 
 static void cpm1_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc)
 {
-       struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc);
+       struct cpm1_gpio32_chip *cpm1_gc =
+               container_of(mm_gc, struct cpm1_gpio32_chip, mm_gc);
        struct cpm_ioport32b __iomem *iop = mm_gc->regs;
 
        cpm1_gc->cpdata = in_be32(&iop->dat);
index 911456d177130f2f325b2879751da82b1ce092cb..947f42007734c1c22d5f6eb02ae72f110cd5f44b 100644 (file)
@@ -94,7 +94,8 @@ struct cpm2_gpio32_chip {
 
 static void cpm2_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc)
 {
-       struct cpm2_gpio32_chip *cpm2_gc = gpiochip_get_data(&mm_gc->gc);
+       struct cpm2_gpio32_chip *cpm2_gc =
+               container_of(mm_gc, struct cpm2_gpio32_chip, mm_gc);
        struct cpm2_ioports __iomem *iop = mm_gc->regs;
 
        cpm2_gc->cpdata = in_be32(&iop->dat);
index 68e7c0dd2e45551143b6afc079fd185d2ca89a80..3cc7cace194aecc3645eab70ec585c6c4b0088a9 100644 (file)
@@ -23,7 +23,7 @@
  */
 
 #include <linux/init.h>
-#include <linux/module.h>
+#include <linux/extable.h>
 #include <linux/types.h>
 #include <linux/dma-mapping.h>
 #include <linux/interrupt.h>
index 57d72f10a97f69de868b6053beccd7533760870e..9114243fa1b5a4752eab8a85469a18e6de2a324d 100644 (file)
 
 static void icp_opal_teardown_cpu(void)
 {
-       int cpu = smp_processor_id();
+       int hw_cpu = hard_smp_processor_id();
 
        /* Clear any pending IPI */
-       opal_int_set_mfrr(cpu, 0xff);
+       opal_int_set_mfrr(hw_cpu, 0xff);
 }
 
 static void icp_opal_flush_ipi(void)
@@ -101,14 +101,16 @@ static void icp_opal_eoi(struct irq_data *d)
 
 static void icp_opal_cause_ipi(int cpu, unsigned long data)
 {
-       opal_int_set_mfrr(cpu, IPI_PRIORITY);
+       int hw_cpu = get_hard_smp_processor_id(cpu);
+
+       opal_int_set_mfrr(hw_cpu, IPI_PRIORITY);
 }
 
 static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
 {
-       int cpu = smp_processor_id();
+       int hw_cpu = hard_smp_processor_id();
 
-       opal_int_set_mfrr(cpu, 0xff);
+       opal_int_set_mfrr(hw_cpu, 0xff);
 
        return smp_ipi_demux();
 }
index e751fe25d6ab670428f5c97b8464d9102baddbe6..c109f073d454af9ec0a137dccf4df81a9484bf2b 100644 (file)
@@ -68,7 +68,6 @@ config DEBUG_RODATA
 config S390
        def_bool y
        select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
-       select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
        select ARCH_HAS_DEVMEM_IS_ALLOWED
        select ARCH_HAS_ELF_RANDOMIZE
        select ARCH_HAS_GCOV_PROFILE_ALL
index 26e0c7f0881417349438c44a0155a378ab638131..412b1bd21029bb997715e5e5ff7778bc69813ee1 100644 (file)
@@ -602,7 +602,6 @@ CONFIG_FAIL_FUTEX=y
 CONFIG_FAULT_INJECTION_DEBUG_FS=y
 CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
 CONFIG_LATENCYTOP=y
-CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
 CONFIG_IRQSOFF_TRACER=y
 CONFIG_PREEMPT_TRACER=y
 CONFIG_SCHED_TRACER=y
index 24879dab47bc1e1b4a71c3f22232c83d1028ef10..bec279eb4b936b9a84f21f7d8c2de8e8769dffa5 100644 (file)
@@ -552,7 +552,6 @@ CONFIG_NOTIFIER_ERROR_INJECTION=m
 CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
 CONFIG_PM_NOTIFIER_ERROR_INJECT=m
 CONFIG_LATENCYTOP=y
-CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
 CONFIG_BLK_DEV_IO_TRACE=y
 # CONFIG_KPROBE_EVENT is not set
 CONFIG_TRACE_ENUM_MAP_FILE=y
index a5c1e5f2a0cab667501e83825d7c6aaeb4447206..1751446a5bbb8ed9ca519d87d8f18fe70801f3fe 100644 (file)
@@ -549,7 +549,6 @@ CONFIG_TIMER_STATS=y
 CONFIG_RCU_TORTURE_TEST=m
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
 CONFIG_LATENCYTOP=y
-CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
 CONFIG_SCHED_TRACER=y
 CONFIG_FTRACE_SYSCALLS=y
 CONFIG_STACK_TRACER=y
index 73610f2e3b4fbbdbd9c030a8f689ecd71aaa08b5..2d40ef0a6295d9a93c3527d6592eba2780736a2e 100644 (file)
@@ -172,7 +172,6 @@ CONFIG_DEBUG_NOTIFIERS=y
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
 CONFIG_RCU_TRACE=y
 CONFIG_LATENCYTOP=y
-CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
 CONFIG_SCHED_TRACER=y
 CONFIG_FTRACE_SYSCALLS=y
 CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
index 9b49cf1daa8f9ee11d3536343fe76325085efa64..52d7c8709279fcd937add7062ac62910ce40fdcf 100644 (file)
@@ -266,28 +266,28 @@ int __put_user_bad(void) __attribute__((noreturn));
        __chk_user_ptr(ptr);                                    \
        switch (sizeof(*(ptr))) {                               \
        case 1: {                                               \
-               unsigned char __x;                              \
+               unsigned char __x = 0;                          \
                __gu_err = __get_user_fn(&__x, ptr,             \
                                         sizeof(*(ptr)));       \
                (x) = *(__force __typeof__(*(ptr)) *) &__x;     \
                break;                                          \
        };                                                      \
        case 2: {                                               \
-               unsigned short __x;                             \
+               unsigned short __x = 0;                         \
                __gu_err = __get_user_fn(&__x, ptr,             \
                                         sizeof(*(ptr)));       \
                (x) = *(__force __typeof__(*(ptr)) *) &__x;     \
                break;                                          \
        };                                                      \
        case 4: {                                               \
-               unsigned int __x;                               \
+               unsigned int __x = 0;                           \
                __gu_err = __get_user_fn(&__x, ptr,             \
                                         sizeof(*(ptr)));       \
                (x) = *(__force __typeof__(*(ptr)) *) &__x;     \
                break;                                          \
        };                                                      \
        case 8: {                                               \
-               unsigned long long __x;                         \
+               unsigned long long __x = 0;                     \
                __gu_err = __get_user_fn(&__x, ptr,             \
                                         sizeof(*(ptr)));       \
                (x) = *(__force __typeof__(*(ptr)) *) &__x;     \
@@ -311,6 +311,14 @@ int __get_user_bad(void) __attribute__((noreturn));
 #define __put_user_unaligned __put_user
 #define __get_user_unaligned __get_user
 
+extern void __compiletime_error("usercopy buffer size is too small")
+__bad_copy_user(void);
+
+static inline void copy_user_overflow(int size, unsigned long count)
+{
+       WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
+}
+
 /**
  * copy_to_user: - Copy a block of data into user space.
  * @to:   Destination address, in user space.
@@ -332,12 +340,6 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
        return __copy_to_user(to, from, n);
 }
 
-void copy_from_user_overflow(void)
-#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
-__compiletime_warning("copy_from_user() buffer size is not provably correct")
-#endif
-;
-
 /**
  * copy_from_user: - Copy a block of data from user space.
  * @to:   Destination address, in kernel space.
@@ -362,7 +364,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
 
        might_fault();
        if (unlikely(sz != -1 && sz < n)) {
-               copy_from_user_overflow();
+               if (!__builtin_constant_p(n))
+                       copy_user_overflow(sz, n);
+               else
+                       __bad_copy_user();
                return n;
        }
        return __copy_from_user(to, from, n);
index f142215ed30dd1dc4ae7b14d57d72732a04b82b7..607ec91966c7226babedb064703f13cf9be7daab 100644 (file)
@@ -2231,9 +2231,10 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
                return -EINVAL;
        current->thread.fpu.fpc = fpu->fpc;
        if (MACHINE_HAS_VX)
-               convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
+               convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
+                                (freg_t *) fpu->fprs);
        else
-               memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
+               memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
        return 0;
 }
 
@@ -2242,9 +2243,10 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
        /* make sure we have the latest values */
        save_fpu_regs();
        if (MACHINE_HAS_VX)
-               convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
+               convert_vx_to_fp((freg_t *) fpu->fprs,
+                                (__vector128 *) vcpu->run->s.regs.vrs);
        else
-               memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
+               memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
        fpu->fpc = current->thread.fpu.fpc;
        return 0;
 }
index c106488b41371b2c8cc188df7ba5e8e5d7c06f31..d8673e243f13bf7315e36203890401cae191cea2 100644 (file)
@@ -584,7 +584,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
                /* Validity 0x0044 will be checked by SIE */
                if (rc)
                        goto unpin;
-               scb_s->gvrd = hpa;
+               scb_s->riccbd = hpa;
        }
        return 0;
 unpin:
index 20a3591225ccea9aa3d93acc11a97a34bf254501..01aec8ccde834119b0cf893c6d8ff4b81a41cb20 100644 (file)
@@ -163,7 +163,7 @@ do {                                                                        \
                __get_user_asm(val, "lw", ptr);                         \
                 break;                                                 \
        case 8:                                                         \
-               if ((copy_from_user((void *)&val, ptr, 8)) == 0)        \
+               if (__copy_from_user((void *)&val, ptr, 8) == 0)        \
                        __gu_err = 0;                                   \
                else                                                    \
                        __gu_err = -EFAULT;                             \
@@ -188,6 +188,8 @@ do {                                                                        \
                                                                        \
        if (likely(access_ok(VERIFY_READ, __gu_ptr, size)))             \
                __get_user_common((x), size, __gu_ptr);                 \
+       else                                                            \
+               (x) = 0;                                                \
                                                                        \
        __gu_err;                                                       \
 })
@@ -201,6 +203,7 @@ do {                                                                        \
                "2:\n"                                                  \
                ".section .fixup,\"ax\"\n"                              \
                "3:li   %0, %4\n"                                       \
+               "li     %1, 0\n"                                        \
                "j      2b\n"                                           \
                ".previous\n"                                           \
                ".section __ex_table,\"a\"\n"                           \
@@ -298,35 +301,34 @@ extern int __copy_tofrom_user(void *to, const void *from, unsigned long len);
 static inline unsigned long
 copy_from_user(void *to, const void *from, unsigned long len)
 {
-       unsigned long over;
+       unsigned long res = len;
 
-       if (access_ok(VERIFY_READ, from, len))
-               return __copy_tofrom_user(to, from, len);
+       if (likely(access_ok(VERIFY_READ, from, len)))
+               res = __copy_tofrom_user(to, from, len);
 
-       if ((unsigned long)from < TASK_SIZE) {
-               over = (unsigned long)from + len - TASK_SIZE;
-               return __copy_tofrom_user(to, from, len - over) + over;
-       }
-       return len;
+       if (unlikely(res))
+               memset(to + (len - res), 0, res);
+
+       return res;
 }
 
 static inline unsigned long
 copy_to_user(void *to, const void *from, unsigned long len)
 {
-       unsigned long over;
-
-       if (access_ok(VERIFY_WRITE, to, len))
-               return __copy_tofrom_user(to, from, len);
+       if (likely(access_ok(VERIFY_WRITE, to, len)))
+               len = __copy_tofrom_user(to, from, len);
 
-       if ((unsigned long)to < TASK_SIZE) {
-               over = (unsigned long)to + len - TASK_SIZE;
-               return __copy_tofrom_user(to, from, len - over) + over;
-       }
        return len;
 }
 
-#define __copy_from_user(to, from, len)        \
-               __copy_tofrom_user((to), (from), (len))
+static inline unsigned long
+__copy_from_user(void *to, const void *from, unsigned long len)
+{
+       unsigned long left = __copy_tofrom_user(to, from, len);
+       if (unlikely(left))
+               memset(to + (len - left), 0, left);
+       return left;
+}
 
 #define __copy_to_user(to, from, len)          \
                __copy_tofrom_user((to), (from), (len))
@@ -340,17 +342,17 @@ __copy_to_user_inatomic(void *to, const void *from, unsigned long len)
 static inline unsigned long
 __copy_from_user_inatomic(void *to, const void *from, unsigned long len)
 {
-       return __copy_from_user(to, from, len);
+       return __copy_tofrom_user(to, from, len);
 }
 
-#define __copy_in_user(to, from, len)  __copy_from_user(to, from, len)
+#define __copy_in_user(to, from, len)  __copy_tofrom_user(to, from, len)
 
 static inline unsigned long
 copy_in_user(void *to, const void *from, unsigned long len)
 {
        if (access_ok(VERIFY_READ, from, len) &&
                      access_ok(VERFITY_WRITE, to, len))
-               return copy_from_user(to, from, len);
+               return __copy_tofrom_user(to, from, len);
 }
 
 /*
index a49635c512665ddc6b9e70e5d944c57e26d349d6..92ade79ac4272ea22657b3aac6adfb90b1d4105f 100644 (file)
@@ -151,7 +151,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
        __kernel_size_t __copy_size = (__kernel_size_t) n;
 
        if (__copy_size && __access_ok(__copy_from, __copy_size))
-               return __copy_user(to, from, __copy_size);
+               __copy_size = __copy_user(to, from, __copy_size);
+
+       if (unlikely(__copy_size))
+               memset(to + (n - __copy_size), 0, __copy_size);
 
        return __copy_size;
 }
index c01376c76b868727b55bd117551cc1b96b93d8f1..ca5073dd459674d1ad835406fc46baa1957d01ac 100644 (file)
@@ -24,6 +24,7 @@
 #define __get_user_size(x,ptr,size,retval)                     \
 do {                                                           \
        retval = 0;                                             \
+       x = 0;                                                  \
        switch (size) {                                         \
        case 1:                                                 \
                retval = __get_user_asm_b((void *)&x,           \
index 341a5a133f4837b98f5c9928865ccf6d088af419..ea55f86d7ccd7d54c22236b20300ea8358d45cda 100644 (file)
@@ -249,8 +249,7 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
 {
        if (n && __access_ok((unsigned long) to, n)) {
-               if (!__builtin_constant_p(n))
-                       check_object_size(from, n, true);
+               check_object_size(from, n, true);
                return __copy_user(to, (__force void __user *) from, n);
        } else
                return n;
@@ -258,19 +257,19 @@ static inline unsigned long copy_to_user(void __user *to, const void *from, unsi
 
 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
-       if (!__builtin_constant_p(n))
-               check_object_size(from, n, true);
+       check_object_size(from, n, true);
        return __copy_user(to, (__force void __user *) from, n);
 }
 
 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
 {
        if (n && __access_ok((unsigned long) from, n)) {
-               if (!__builtin_constant_p(n))
-                       check_object_size(to, n, false);
+               check_object_size(to, n, false);
                return __copy_user((__force void __user *) to, from, n);
-       } else
+       } else {
+               memset(to, 0, n);
                return n;
+       }
 }
 
 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
index 8bda94fab8e8cc52baca57952391cbb7665afd1f..37a315d0ddd4b205c8a54d77c354d7cb116aba15 100644 (file)
@@ -212,8 +212,7 @@ copy_from_user(void *to, const void __user *from, unsigned long size)
 {
        unsigned long ret;
 
-       if (!__builtin_constant_p(size))
-               check_object_size(to, size, false);
+       check_object_size(to, size, false);
 
        ret = ___copy_from_user(to, from, size);
        if (unlikely(ret))
@@ -233,8 +232,8 @@ copy_to_user(void __user *to, const void *from, unsigned long size)
 {
        unsigned long ret;
 
-       if (!__builtin_constant_p(size))
-               check_object_size(from, size, true);
+       check_object_size(from, size, true);
+
        ret = ___copy_to_user(to, from, size);
        if (unlikely(ret))
                ret = copy_to_user_fixup(to, from, size);
index 4820a02838ace2e1d05f8e9ed5e43e18b48a5569..78da75b670bcaa67e1cb42ccf5e53ab63421a863 100644 (file)
@@ -4,7 +4,6 @@
 config TILE
        def_bool y
        select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
-       select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
        select ARCH_HAS_DEVMEM_IS_ALLOWED
        select ARCH_HAVE_NMI_SAFE_CMPXCHG
        select ARCH_WANT_FRAME_POINTERS
index 0a9c4265763bd1a26f5f38aa20460c475670cea5..a77369e91e5416c08ee25eaf04cb2e1df7c4fabe 100644 (file)
@@ -416,14 +416,13 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
        return n;
 }
 
-#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
-/*
- * There are still unprovable places in the generic code as of 2.6.34, so this
- * option is not really compatible with -Werror, which is more useful in
- * general.
- */
-extern void copy_from_user_overflow(void)
-       __compiletime_warning("copy_from_user() size is not provably correct");
+extern void __compiletime_error("usercopy buffer size is too small")
+__bad_copy_user(void);
+
+static inline void copy_user_overflow(int size, unsigned long count)
+{
+       WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
+}
 
 static inline unsigned long __must_check copy_from_user(void *to,
                                          const void __user *from,
@@ -433,14 +432,13 @@ static inline unsigned long __must_check copy_from_user(void *to,
 
        if (likely(sz == -1 || sz >= n))
                n = _copy_from_user(to, from, n);
+       else if (!__builtin_constant_p(n))
+               copy_user_overflow(sz, n);
        else
-               copy_from_user_overflow();
+               __bad_copy_user();
 
        return n;
 }
-#else
-#define copy_from_user _copy_from_user
-#endif
 
 #ifdef __tilegx__
 /**
index ef4b8f949b516df373b2d016e4a1366b53e1bc75..b783ac87d98a65a9bdafc871b4f0fc40e261567b 100644 (file)
@@ -21,21 +21,17 @@ void handle_syscall(struct uml_pt_regs *r)
        PT_REGS_SET_SYSCALL_RETURN(regs, -ENOSYS);
 
        if (syscall_trace_enter(regs))
-               return;
+               goto out;
 
        /* Do the seccomp check after ptrace; failures should be fast. */
        if (secure_computing(NULL) == -1)
-               return;
+               goto out;
 
-       /* Update the syscall number after orig_ax has potentially been updated
-        * with ptrace.
-        */
-       UPT_SYSCALL_NR(r) = PT_SYSCALL_NR(r->gp);
        syscall = UPT_SYSCALL_NR(r);
-
        if (syscall >= 0 && syscall <= __NR_syscall_max)
                PT_REGS_SET_SYSCALL_RETURN(regs,
                                EXECUTE_SYSCALL(syscall, regs));
 
+out:
        syscall_trace_leave(regs);
 }
index c580d8c33562ec5eba4dbfc273ae3ed7b6b67072..2a1f0ce7c59acac6e1a6543eeda61ccb3792f277 100644 (file)
@@ -24,7 +24,6 @@ config X86
        select ARCH_DISCARD_MEMBLOCK
        select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
        select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
-       select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
        select ARCH_HAS_DEVMEM_IS_ALLOWED
        select ARCH_HAS_ELF_RANDOMIZE
        select ARCH_HAS_FAST_MULTIPLIER
index ff574dad95ccaf2739d0edfe85c94acea1e15134..94dd4a31f5b39a88f205fbda2a452f1228cd9516 100644 (file)
@@ -1004,79 +1004,87 @@ static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext,
        return status;
 }
 
-static efi_status_t exit_boot(struct boot_params *boot_params,
-                             void *handle, bool is64)
-{
-       struct efi_info *efi = &boot_params->efi_info;
-       unsigned long map_sz, key, desc_size;
-       efi_memory_desc_t *mem_map;
+struct exit_boot_struct {
+       struct boot_params *boot_params;
+       struct efi_info *efi;
        struct setup_data *e820ext;
-       const char *signature;
        __u32 e820ext_size;
-       __u32 nr_desc, prev_nr_desc;
-       efi_status_t status;
-       __u32 desc_version;
-       bool called_exit = false;
-       u8 nr_entries;
-       int i;
-
-       nr_desc = 0;
-       e820ext = NULL;
-       e820ext_size = 0;
-
-get_map:
-       status = efi_get_memory_map(sys_table, &mem_map, &map_sz, &desc_size,
-                                   &desc_version, &key);
-
-       if (status != EFI_SUCCESS)
-               return status;
-
-       prev_nr_desc = nr_desc;
-       nr_desc = map_sz / desc_size;
-       if (nr_desc > prev_nr_desc &&
-           nr_desc > ARRAY_SIZE(boot_params->e820_map)) {
-               u32 nr_e820ext = nr_desc - ARRAY_SIZE(boot_params->e820_map);
-
-               status = alloc_e820ext(nr_e820ext, &e820ext, &e820ext_size);
-               if (status != EFI_SUCCESS)
-                       goto free_mem_map;
+       bool is64;
+};
 
-               efi_call_early(free_pool, mem_map);
-               goto get_map; /* Allocated memory, get map again */
+static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
+                                  struct efi_boot_memmap *map,
+                                  void *priv)
+{
+       static bool first = true;
+       const char *signature;
+       __u32 nr_desc;
+       efi_status_t status;
+       struct exit_boot_struct *p = priv;
+
+       if (first) {
+               nr_desc = *map->buff_size / *map->desc_size;
+               if (nr_desc > ARRAY_SIZE(p->boot_params->e820_map)) {
+                       u32 nr_e820ext = nr_desc -
+                                       ARRAY_SIZE(p->boot_params->e820_map);
+
+                       status = alloc_e820ext(nr_e820ext, &p->e820ext,
+                                              &p->e820ext_size);
+                       if (status != EFI_SUCCESS)
+                               return status;
+               }
+               first = false;
        }
 
-       signature = is64 ? EFI64_LOADER_SIGNATURE : EFI32_LOADER_SIGNATURE;
-       memcpy(&efi->efi_loader_signature, signature, sizeof(__u32));
+       signature = p->is64 ? EFI64_LOADER_SIGNATURE : EFI32_LOADER_SIGNATURE;
+       memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32));
 
-       efi->efi_systab = (unsigned long)sys_table;
-       efi->efi_memdesc_size = desc_size;
-       efi->efi_memdesc_version = desc_version;
-       efi->efi_memmap = (unsigned long)mem_map;
-       efi->efi_memmap_size = map_sz;
+       p->efi->efi_systab = (unsigned long)sys_table_arg;
+       p->efi->efi_memdesc_size = *map->desc_size;
+       p->efi->efi_memdesc_version = *map->desc_ver;
+       p->efi->efi_memmap = (unsigned long)*map->map;
+       p->efi->efi_memmap_size = *map->map_size;
 
 #ifdef CONFIG_X86_64
-       efi->efi_systab_hi = (unsigned long)sys_table >> 32;
-       efi->efi_memmap_hi = (unsigned long)mem_map >> 32;
+       p->efi->efi_systab_hi = (unsigned long)sys_table_arg >> 32;
+       p->efi->efi_memmap_hi = (unsigned long)*map->map >> 32;
 #endif
 
+       return EFI_SUCCESS;
+}
+
+static efi_status_t exit_boot(struct boot_params *boot_params,
+                             void *handle, bool is64)
+{
+       unsigned long map_sz, key, desc_size, buff_size;
+       efi_memory_desc_t *mem_map;
+       struct setup_data *e820ext;
+       __u32 e820ext_size;
+       efi_status_t status;
+       __u32 desc_version;
+       struct efi_boot_memmap map;
+       struct exit_boot_struct priv;
+
+       map.map =               &mem_map;
+       map.map_size =          &map_sz;
+       map.desc_size =         &desc_size;
+       map.desc_ver =          &desc_version;
+       map.key_ptr =           &key;
+       map.buff_size =         &buff_size;
+       priv.boot_params =      boot_params;
+       priv.efi =              &boot_params->efi_info;
+       priv.e820ext =          NULL;
+       priv.e820ext_size =     0;
+       priv.is64 =             is64;
+
        /* Might as well exit boot services now */
-       status = efi_call_early(exit_boot_services, handle, key);
-       if (status != EFI_SUCCESS) {
-               /*
-                * ExitBootServices() will fail if any of the event
-                * handlers change the memory map. In which case, we
-                * must be prepared to retry, but only once so that
-                * we're guaranteed to exit on repeated failures instead
-                * of spinning forever.
-                */
-               if (called_exit)
-                       goto free_mem_map;
-
-               called_exit = true;
-               efi_call_early(free_pool, mem_map);
-               goto get_map;
-       }
+       status = efi_exit_boot_services(sys_table, handle, &map, &priv,
+                                       exit_boot_func);
+       if (status != EFI_SUCCESS)
+               return status;
 
+       e820ext = priv.e820ext;
+       e820ext_size = priv.e820ext_size;
        /* Historic? */
        boot_params->alt_mem_k = 32 * 1024;
 
@@ -1085,10 +1093,6 @@ get_map:
                return status;
 
        return EFI_SUCCESS;
-
-free_mem_map:
-       efi_call_early(free_pool, mem_map);
-       return status;
 }
 
 /*
index 4e2ecfa23c15978faf88416ad11ca859e1201718..4b429df40d7a2fb9ee9fce2eb5315b592d1200e9 100644 (file)
@@ -1 +1,3 @@
 CONFIG_NOHIGHMEM=y
+# CONFIG_HIGHMEM4G is not set
+# CONFIG_HIGHMEM64G is not set
index e07a22bb9308d0d030f0052f2f8195a8fbb48df5..f5f4b3fbbbc2924cbac3fe24d45d949e0997dc8e 100644 (file)
@@ -119,8 +119,8 @@ static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
 {
   [PERF_COUNT_HW_CPU_CYCLES]                   = 0x0076,
   [PERF_COUNT_HW_INSTRUCTIONS]                 = 0x00c0,
-  [PERF_COUNT_HW_CACHE_REFERENCES]             = 0x0080,
-  [PERF_COUNT_HW_CACHE_MISSES]                 = 0x0081,
+  [PERF_COUNT_HW_CACHE_REFERENCES]             = 0x077d,
+  [PERF_COUNT_HW_CACHE_MISSES]                 = 0x077e,
   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]          = 0x00c2,
   [PERF_COUNT_HW_BRANCH_MISSES]                        = 0x00c3,
   [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]      = 0x00d0, /* "Decoder empty" event */
index e6131d4454e67005f91047d9e966243ae86aaf80..65577f081d072c5f774f763adc19a0e7a87e5627 100644 (file)
@@ -29,6 +29,8 @@
 
 #define COUNTER_SHIFT          16
 
+static HLIST_HEAD(uncore_unused_list);
+
 struct amd_uncore {
        int id;
        int refcnt;
@@ -39,7 +41,7 @@ struct amd_uncore {
        cpumask_t *active_mask;
        struct pmu *pmu;
        struct perf_event *events[MAX_COUNTERS];
-       struct amd_uncore *free_when_cpu_online;
+       struct hlist_node node;
 };
 
 static struct amd_uncore * __percpu *amd_uncore_nb;
@@ -306,6 +308,7 @@ static int amd_uncore_cpu_up_prepare(unsigned int cpu)
                uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
                uncore_nb->active_mask = &amd_nb_active_mask;
                uncore_nb->pmu = &amd_nb_pmu;
+               uncore_nb->id = -1;
                *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
        }
 
@@ -319,6 +322,7 @@ static int amd_uncore_cpu_up_prepare(unsigned int cpu)
                uncore_l2->msr_base = MSR_F16H_L2I_PERF_CTL;
                uncore_l2->active_mask = &amd_l2_active_mask;
                uncore_l2->pmu = &amd_l2_pmu;
+               uncore_l2->id = -1;
                *per_cpu_ptr(amd_uncore_l2, cpu) = uncore_l2;
        }
 
@@ -348,7 +352,7 @@ amd_uncore_find_online_sibling(struct amd_uncore *this,
                        continue;
 
                if (this->id == that->id) {
-                       that->free_when_cpu_online = this;
+                       hlist_add_head(&this->node, &uncore_unused_list);
                        this = that;
                        break;
                }
@@ -388,13 +392,23 @@ static int amd_uncore_cpu_starting(unsigned int cpu)
        return 0;
 }
 
+static void uncore_clean_online(void)
+{
+       struct amd_uncore *uncore;
+       struct hlist_node *n;
+
+       hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
+               hlist_del(&uncore->node);
+               kfree(uncore);
+       }
+}
+
 static void uncore_online(unsigned int cpu,
                          struct amd_uncore * __percpu *uncores)
 {
        struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
 
-       kfree(uncore->free_when_cpu_online);
-       uncore->free_when_cpu_online = NULL;
+       uncore_clean_online();
 
        if (cpu == uncore->cpu)
                cpumask_set_cpu(cpu, uncore->active_mask);
index 0a6e393a2e6298bb9240831714d5915ab26abc5a..bdcd6510992c3338c5a3efba35ef3ef7c606bc6a 100644 (file)
 struct bts_ctx {
        struct perf_output_handle       handle;
        struct debug_store              ds_back;
-       int                             started;
+       int                             state;
+};
+
+/* BTS context states: */
+enum {
+       /* no ongoing AUX transactions */
+       BTS_STATE_STOPPED = 0,
+       /* AUX transaction is on, BTS tracing is disabled */
+       BTS_STATE_INACTIVE,
+       /* AUX transaction is on, BTS tracing is running */
+       BTS_STATE_ACTIVE,
 };
 
 static DEFINE_PER_CPU(struct bts_ctx, bts_ctx);
@@ -204,6 +214,15 @@ static void bts_update(struct bts_ctx *bts)
 static int
 bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle);
 
+/*
+ * Ordering PMU callbacks wrt themselves and the PMI is done by means
+ * of bts::state, which:
+ *  - is set when bts::handle::event is valid, that is, between
+ *    perf_aux_output_begin() and perf_aux_output_end();
+ *  - is zero otherwise;
+ *  - is ordered against bts::handle::event with a compiler barrier.
+ */
+
 static void __bts_event_start(struct perf_event *event)
 {
        struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
@@ -221,10 +240,13 @@ static void __bts_event_start(struct perf_event *event)
 
        /*
         * local barrier to make sure that ds configuration made it
-        * before we enable BTS
+        * before we enable BTS and bts::state goes ACTIVE
         */
        wmb();
 
+       /* INACTIVE/STOPPED -> ACTIVE */
+       WRITE_ONCE(bts->state, BTS_STATE_ACTIVE);
+
        intel_pmu_enable_bts(config);
 
 }
@@ -251,9 +273,6 @@ static void bts_event_start(struct perf_event *event, int flags)
 
        __bts_event_start(event);
 
-       /* PMI handler: this counter is running and likely generating PMIs */
-       ACCESS_ONCE(bts->started) = 1;
-
        return;
 
 fail_end_stop:
@@ -263,30 +282,34 @@ fail_stop:
        event->hw.state = PERF_HES_STOPPED;
 }
 
-static void __bts_event_stop(struct perf_event *event)
+static void __bts_event_stop(struct perf_event *event, int state)
 {
+       struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
+
+       /* ACTIVE -> INACTIVE(PMI)/STOPPED(->stop()) */
+       WRITE_ONCE(bts->state, state);
+
        /*
         * No extra synchronization is mandated by the documentation to have
         * BTS data stores globally visible.
         */
        intel_pmu_disable_bts();
-
-       if (event->hw.state & PERF_HES_STOPPED)
-               return;
-
-       ACCESS_ONCE(event->hw.state) |= PERF_HES_STOPPED;
 }
 
 static void bts_event_stop(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
        struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
-       struct bts_buffer *buf = perf_get_aux(&bts->handle);
+       struct bts_buffer *buf = NULL;
+       int state = READ_ONCE(bts->state);
 
-       /* PMI handler: don't restart this counter */
-       ACCESS_ONCE(bts->started) = 0;
+       if (state == BTS_STATE_ACTIVE)
+               __bts_event_stop(event, BTS_STATE_STOPPED);
 
-       __bts_event_stop(event);
+       if (state != BTS_STATE_STOPPED)
+               buf = perf_get_aux(&bts->handle);
+
+       event->hw.state |= PERF_HES_STOPPED;
 
        if (flags & PERF_EF_UPDATE) {
                bts_update(bts);
@@ -296,6 +319,7 @@ static void bts_event_stop(struct perf_event *event, int flags)
                                bts->handle.head =
                                        local_xchg(&buf->data_size,
                                                   buf->nr_pages << PAGE_SHIFT);
+
                        perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0),
                                            !!local_xchg(&buf->lost, 0));
                }
@@ -310,8 +334,20 @@ static void bts_event_stop(struct perf_event *event, int flags)
 void intel_bts_enable_local(void)
 {
        struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
+       int state = READ_ONCE(bts->state);
+
+       /*
+        * Here we transition from INACTIVE to ACTIVE;
+        * if we instead are STOPPED from the interrupt handler,
+        * stay that way. Can't be ACTIVE here though.
+        */
+       if (WARN_ON_ONCE(state == BTS_STATE_ACTIVE))
+               return;
+
+       if (state == BTS_STATE_STOPPED)
+               return;
 
-       if (bts->handle.event && bts->started)
+       if (bts->handle.event)
                __bts_event_start(bts->handle.event);
 }
 
@@ -319,8 +355,15 @@ void intel_bts_disable_local(void)
 {
        struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
 
+       /*
+        * Here we transition from ACTIVE to INACTIVE;
+        * do nothing for STOPPED or INACTIVE.
+        */
+       if (READ_ONCE(bts->state) != BTS_STATE_ACTIVE)
+               return;
+
        if (bts->handle.event)
-               __bts_event_stop(bts->handle.event);
+               __bts_event_stop(bts->handle.event, BTS_STATE_INACTIVE);
 }
 
 static int
@@ -335,8 +378,6 @@ bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
                return 0;
 
        head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
-       if (WARN_ON_ONCE(head != local_read(&buf->head)))
-               return -EINVAL;
 
        phys = &buf->buf[buf->cur_buf];
        space = phys->offset + phys->displacement + phys->size - head;
@@ -403,22 +444,37 @@ bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
 
 int intel_bts_interrupt(void)
 {
+       struct debug_store *ds = this_cpu_ptr(&cpu_hw_events)->ds;
        struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
        struct perf_event *event = bts->handle.event;
        struct bts_buffer *buf;
        s64 old_head;
-       int err;
+       int err = -ENOSPC, handled = 0;
 
-       if (!event || !bts->started)
-               return 0;
+       /*
+        * The only surefire way of knowing if this NMI is ours is by checking
+        * the write ptr against the PMI threshold.
+        */
+       if (ds->bts_index >= ds->bts_interrupt_threshold)
+               handled = 1;
+
+       /*
+        * this is wrapped in intel_bts_enable_local/intel_bts_disable_local,
+        * so we can only be INACTIVE or STOPPED
+        */
+       if (READ_ONCE(bts->state) == BTS_STATE_STOPPED)
+               return handled;
 
        buf = perf_get_aux(&bts->handle);
+       if (!buf)
+               return handled;
+
        /*
         * Skip snapshot counters: they don't use the interrupt, but
         * there's no other way of telling, because the pointer will
         * keep moving
         */
-       if (!buf || buf->snapshot)
+       if (buf->snapshot)
                return 0;
 
        old_head = local_read(&buf->head);
@@ -426,18 +482,27 @@ int intel_bts_interrupt(void)
 
        /* no new data */
        if (old_head == local_read(&buf->head))
-               return 0;
+               return handled;
 
        perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0),
                            !!local_xchg(&buf->lost, 0));
 
        buf = perf_aux_output_begin(&bts->handle, event);
-       if (!buf)
-               return 1;
+       if (buf)
+               err = bts_buffer_reset(buf, &bts->handle);
+
+       if (err) {
+               WRITE_ONCE(bts->state, BTS_STATE_STOPPED);
 
-       err = bts_buffer_reset(buf, &bts->handle);
-       if (err)
-               perf_aux_output_end(&bts->handle, 0, false);
+               if (buf) {
+                       /*
+                        * BTS_STATE_STOPPED should be visible before
+                        * cleared handle::event
+                        */
+                       barrier();
+                       perf_aux_output_end(&bts->handle, 0, false);
+               }
+       }
 
        return 1;
 }
index 2cbde2f449aa8ced63adf14b14f9ceb3d464068c..4c9a79b9cd691ad8b44650ca8ea1b3b1295c0d3d 100644 (file)
@@ -1730,9 +1730,11 @@ static __initconst const u64 knl_hw_cache_extra_regs
  * disabled state if called consecutively.
  *
  * During consecutive calls, the same disable value will be written to related
- * registers, so the PMU state remains unchanged. hw.state in
- * intel_bts_disable_local will remain PERF_HES_STOPPED too in consecutive
- * calls.
+ * registers, so the PMU state remains unchanged.
+ *
+ * intel_bts events don't coexist with intel PMU's BTS events because of
+ * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
+ * disabled around intel PMU's event batching etc, only inside the PMI handler.
  */
 static void __intel_pmu_disable_all(void)
 {
@@ -1742,8 +1744,6 @@ static void __intel_pmu_disable_all(void)
 
        if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
                intel_pmu_disable_bts();
-       else
-               intel_bts_disable_local();
 
        intel_pmu_pebs_disable_all();
 }
@@ -1771,8 +1771,7 @@ static void __intel_pmu_enable_all(int added, bool pmi)
                        return;
 
                intel_pmu_enable_bts(event->hw.config);
-       } else
-               intel_bts_enable_local();
+       }
 }
 
 static void intel_pmu_enable_all(int added)
@@ -2073,6 +2072,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
         */
        if (!x86_pmu.late_ack)
                apic_write(APIC_LVTPC, APIC_DM_NMI);
+       intel_bts_disable_local();
        __intel_pmu_disable_all();
        handled = intel_pmu_drain_bts_buffer();
        handled += intel_bts_interrupt();
@@ -2172,6 +2172,7 @@ done:
        /* Only restore PMU state when it's active. See x86_pmu_disable(). */
        if (cpuc->enabled)
                __intel_pmu_enable_all(0, true);
+       intel_bts_enable_local();
 
        /*
         * Only unmask the NMI after the overflow counters
index 783c49ddef29c2336d458d1741fbdde5f69fecec..8f82b02934fa701a451ee5e7d8f76193eaefc2fa 100644 (file)
@@ -458,6 +458,11 @@ static void __intel_cqm_event_count(void *info);
 static void init_mbm_sample(u32 rmid, u32 evt_type);
 static void __intel_mbm_event_count(void *info);
 
+static bool is_cqm_event(int e)
+{
+       return (e == QOS_L3_OCCUP_EVENT_ID);
+}
+
 static bool is_mbm_event(int e)
 {
        return (e >= QOS_MBM_TOTAL_EVENT_ID && e <= QOS_MBM_LOCAL_EVENT_ID);
@@ -1366,6 +1371,10 @@ static int intel_cqm_event_init(struct perf_event *event)
             (event->attr.config > QOS_MBM_LOCAL_EVENT_ID))
                return -EINVAL;
 
+       if ((is_cqm_event(event->attr.config) && !cqm_enabled) ||
+           (is_mbm_event(event->attr.config) && !mbm_enabled))
+               return -EINVAL;
+
        /* unsupported modes and filters */
        if (event->attr.exclude_user   ||
            event->attr.exclude_kernel ||
index 7ce9f3f669e63d2bd4bbf15db801cf8bc5fa7565..9b983a474253768d172c5db3dee8a1d50098bf4f 100644 (file)
@@ -1274,18 +1274,18 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
                struct pebs_record_nhm *p = at;
                u64 pebs_status;
 
-               /* PEBS v3 has accurate status bits */
+               pebs_status = p->status & cpuc->pebs_enabled;
+               pebs_status &= (1ULL << x86_pmu.max_pebs_events) - 1;
+
+               /* PEBS v3 has more accurate status bits */
                if (x86_pmu.intel_cap.pebs_format >= 3) {
-                       for_each_set_bit(bit, (unsigned long *)&p->status,
-                                        MAX_PEBS_EVENTS)
+                       for_each_set_bit(bit, (unsigned long *)&pebs_status,
+                                        x86_pmu.max_pebs_events)
                                counts[bit]++;
 
                        continue;
                }
 
-               pebs_status = p->status & cpuc->pebs_enabled;
-               pebs_status &= (1ULL << x86_pmu.max_pebs_events) - 1;
-
                /*
                 * On some CPUs the PEBS status can be zero when PEBS is
                 * racing with clearing of GLOBAL_STATUS.
@@ -1333,8 +1333,11 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
                        continue;
 
                event = cpuc->events[bit];
-               WARN_ON_ONCE(!event);
-               WARN_ON_ONCE(!event->attr.precise_ip);
+               if (WARN_ON_ONCE(!event))
+                       continue;
+
+               if (WARN_ON_ONCE(!event->attr.precise_ip))
+                       continue;
 
                /* log dropped samples number */
                if (error[bit])
index 04bb5fb5a8d7a13308fdcc20d2c1a5930146f07f..861a7d9cb60f6c1c19b560a4912c7eef1effd8b3 100644 (file)
@@ -1074,6 +1074,11 @@ static void pt_addr_filters_fini(struct perf_event *event)
        event->hw.addr_filters = NULL;
 }
 
+static inline bool valid_kernel_ip(unsigned long ip)
+{
+       return virt_addr_valid(ip) && kernel_ip(ip);
+}
+
 static int pt_event_addr_filters_validate(struct list_head *filters)
 {
        struct perf_addr_filter *filter;
@@ -1081,11 +1086,16 @@ static int pt_event_addr_filters_validate(struct list_head *filters)
 
        list_for_each_entry(filter, filters, entry) {
                /* PT doesn't support single address triggers */
-               if (!filter->range)
+               if (!filter->range || !filter->size)
                        return -EOPNOTSUPP;
 
-               if (!filter->inode && !kernel_ip(filter->offset))
-                       return -EINVAL;
+               if (!filter->inode) {
+                       if (!valid_kernel_ip(filter->offset))
+                               return -EINVAL;
+
+                       if (!valid_kernel_ip(filter->offset + filter->size))
+                               return -EINVAL;
+               }
 
                if (++range > pt_cap_get(PT_CAP_num_address_ranges))
                        return -EOPNOTSUPP;
@@ -1111,7 +1121,7 @@ static void pt_event_addr_filters_sync(struct perf_event *event)
                } else {
                        /* apply the offset */
                        msr_a = filter->offset + offs[range];
-                       msr_b = filter->size + msr_a;
+                       msr_b = filter->size + msr_a - 1;
                }
 
                filters->filter[range].msr_a  = msr_a;
index a0ae610b9280183fc1ca42ddd3fcc45333b433c5..2131c4ce7d8a10698c45626e2781dd8f1cb2f9b9 100644 (file)
@@ -433,7 +433,11 @@ do {                                                                       \
 #define __get_user_asm_ex(x, addr, itype, rtype, ltype)                        \
        asm volatile("1:        mov"itype" %1,%"rtype"0\n"              \
                     "2:\n"                                             \
-                    _ASM_EXTABLE_EX(1b, 2b)                            \
+                    ".section .fixup,\"ax\"\n"                         \
+                     "3:xor"itype" %"rtype"0,%"rtype"0\n"              \
+                    "  jmp 2b\n"                                       \
+                    ".previous\n"                                      \
+                    _ASM_EXTABLE_EX(1b, 3b)                            \
                     : ltype(x) : "m" (__m(addr)))
 
 #define __put_user_nocheck(x, ptr, size)                       \
@@ -697,44 +701,15 @@ unsigned long __must_check _copy_from_user(void *to, const void __user *from,
 unsigned long __must_check _copy_to_user(void __user *to, const void *from,
                                         unsigned n);
 
-#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
-# define copy_user_diag __compiletime_error
-#else
-# define copy_user_diag __compiletime_warning
-#endif
-
-extern void copy_user_diag("copy_from_user() buffer size is too small")
-copy_from_user_overflow(void);
-extern void copy_user_diag("copy_to_user() buffer size is too small")
-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
-
-#undef copy_user_diag
-
-#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
-
-extern void
-__compiletime_warning("copy_from_user() buffer size is not provably correct")
-__copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
-#define __copy_from_user_overflow(size, count) __copy_from_user_overflow()
-
-extern void
-__compiletime_warning("copy_to_user() buffer size is not provably correct")
-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
-#define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
+extern void __compiletime_error("usercopy buffer size is too small")
+__bad_copy_user(void);
 
-#else
-
-static inline void
-__copy_from_user_overflow(int size, unsigned long count)
+static inline void copy_user_overflow(int size, unsigned long count)
 {
        WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
 }
 
-#define __copy_to_user_overflow __copy_from_user_overflow
-
-#endif
-
-static inline unsigned long __must_check
+static __always_inline unsigned long __must_check
 copy_from_user(void *to, const void __user *from, unsigned long n)
 {
        int sz = __compiletime_object_size(to);
@@ -743,36 +718,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
 
        kasan_check_write(to, n);
 
-       /*
-        * While we would like to have the compiler do the checking for us
-        * even in the non-constant size case, any false positives there are
-        * a problem (especially when DEBUG_STRICT_USER_COPY_CHECKS, but even
-        * without - the [hopefully] dangerous looking nature of the warning
-        * would make people go look at the respecitive call sites over and
-        * over again just to find that there's no problem).
-        *
-        * And there are cases where it's just not realistic for the compiler
-        * to prove the count to be in range. For example when multiple call
-        * sites of a helper function - perhaps in different source files -
-        * all doing proper range checking, yet the helper function not doing
-        * so again.
-        *
-        * Therefore limit the compile time checking to the constant size
-        * case, and do only runtime checking for non-constant sizes.
-        */
-
        if (likely(sz < 0 || sz >= n)) {
                check_object_size(to, n, false);
                n = _copy_from_user(to, from, n);
-       } else if (__builtin_constant_p(n))
-               copy_from_user_overflow();
+       } else if (!__builtin_constant_p(n))
+               copy_user_overflow(sz, n);
        else
-               __copy_from_user_overflow(sz, n);
+               __bad_copy_user();
 
        return n;
 }
 
-static inline unsigned long __must_check
+static __always_inline unsigned long __must_check
 copy_to_user(void __user *to, const void *from, unsigned long n)
 {
        int sz = __compiletime_object_size(from);
@@ -781,21 +738,17 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
 
        might_fault();
 
-       /* See the comment in copy_from_user() above. */
        if (likely(sz < 0 || sz >= n)) {
                check_object_size(from, n, true);
                n = _copy_to_user(to, from, n);
-       } else if (__builtin_constant_p(n))
-               copy_to_user_overflow();
+       } else if (!__builtin_constant_p(n))
+               copy_user_overflow(sz, n);
        else
-               __copy_to_user_overflow(sz, n);
+               __bad_copy_user();
 
        return n;
 }
 
-#undef __copy_from_user_overflow
-#undef __copy_to_user_overflow
-
 /*
  * We rely on the nested NMI work to allow atomic faults from the NMI path; the
  * nested NMI paths are careful to preserve CR2.
index 50c95af0f017d7ab73a5cad5dc226b1bf5d9cf60..f3e9b2df4b165d9d1a31322960778a2e9ef68bf3 100644 (file)
@@ -2093,7 +2093,6 @@ int generic_processor_info(int apicid, int version)
                return -EINVAL;
        }
 
-       num_processors++;
        if (apicid == boot_cpu_physical_apicid) {
                /*
                 * x86_bios_cpu_apicid is required to have processors listed
@@ -2116,10 +2115,13 @@ int generic_processor_info(int apicid, int version)
 
                pr_warning("APIC: Package limit reached. Processor %d/0x%x ignored.\n",
                           thiscpu, apicid);
+
                disabled_cpus++;
                return -ENOSPC;
        }
 
+       num_processors++;
+
        /*
         * Validate version
         */
index f5c69d8974e176e44a995bd6f91512c7d030b001..b81fe2d63e15751c2cb7e61fd10dc85cc7f906b0 100644 (file)
@@ -669,6 +669,17 @@ static void init_amd_gh(struct cpuinfo_x86 *c)
                set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
 }
 
+#define MSR_AMD64_DE_CFG       0xC0011029
+
+static void init_amd_ln(struct cpuinfo_x86 *c)
+{
+       /*
+        * Apply erratum 665 fix unconditionally so machines without a BIOS
+        * fix work.
+        */
+       msr_set_bit(MSR_AMD64_DE_CFG, 31);
+}
+
 static void init_amd_bd(struct cpuinfo_x86 *c)
 {
        u64 value;
@@ -726,6 +737,7 @@ static void init_amd(struct cpuinfo_x86 *c)
        case 6:    init_amd_k7(c); break;
        case 0xf:  init_amd_k8(c); break;
        case 0x10: init_amd_gh(c); break;
+       case 0x12: init_amd_ln(c); break;
        case 0x15: init_amd_bd(c); break;
        }
 
index b816971f5da4cd45d1461831283aafb39578f296..620ab06bcf4571c8841b70e35a57657dda2e9985 100644 (file)
@@ -54,6 +54,7 @@ static LIST_HEAD(pcache);
  */
 static u8 *container;
 static size_t container_size;
+static bool ucode_builtin;
 
 static u32 ucode_new_rev;
 static u8 amd_ucode_patch[PATCH_MAX_SIZE];
@@ -281,18 +282,22 @@ static bool __init load_builtin_amd_microcode(struct cpio_data *cp,
 void __init load_ucode_amd_bsp(unsigned int family)
 {
        struct cpio_data cp;
+       bool *builtin;
        void **data;
        size_t *size;
 
 #ifdef CONFIG_X86_32
        data =  (void **)__pa_nodebug(&ucode_cpio.data);
        size = (size_t *)__pa_nodebug(&ucode_cpio.size);
+       builtin = (bool *)__pa_nodebug(&ucode_builtin);
 #else
        data = &ucode_cpio.data;
        size = &ucode_cpio.size;
+       builtin = &ucode_builtin;
 #endif
 
-       if (!load_builtin_amd_microcode(&cp, family))
+       *builtin = load_builtin_amd_microcode(&cp, family);
+       if (!*builtin)
                cp = find_ucode_in_initrd();
 
        if (!(cp.data && cp.size))
@@ -373,7 +378,8 @@ void load_ucode_amd_ap(void)
                return;
 
        /* Add CONFIG_RANDOMIZE_MEMORY offset. */
-       cont += PAGE_OFFSET - __PAGE_OFFSET_BASE;
+       if (!ucode_builtin)
+               cont += PAGE_OFFSET - __PAGE_OFFSET_BASE;
 
        eax = cpuid_eax(0x00000001);
        eq  = (struct equiv_cpu_entry *)(cont + CONTAINER_HDR_SZ);
@@ -439,7 +445,8 @@ int __init save_microcode_in_initrd_amd(void)
                container = cont_va;
 
        /* Add CONFIG_RANDOMIZE_MEMORY offset. */
-       container += PAGE_OFFSET - __PAGE_OFFSET_BASE;
+       if (!ucode_builtin)
+               container += PAGE_OFFSET - __PAGE_OFFSET_BASE;
 
        eax   = cpuid_eax(0x00000001);
        eax   = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
index 1d39bfbd26bb3d0918271f13af86c94385983150..3692249a70f1edde4dbe52eae56c46db29b82748 100644 (file)
@@ -289,6 +289,7 @@ void __init kvmclock_init(void)
        put_cpu();
 
        x86_platform.calibrate_tsc = kvm_get_tsc_khz;
+       x86_platform.calibrate_cpu = kvm_get_tsc_khz;
        x86_platform.get_wallclock = kvm_get_wallclock;
        x86_platform.set_wallclock = kvm_set_wallclock;
 #ifdef CONFIG_X86_LOCAL_APIC
index ad5bc9578a7336f031bf580b415fe8354780d060..1acfd76e3e26b73d5e874d3e48fb2e0b9efa9ca8 100644 (file)
@@ -56,12 +56,12 @@ asm (".pushsection .entry.text, \"ax\"\n"
      ".popsection");
 
 /* identity function, which can be inlined */
-u32 _paravirt_ident_32(u32 x)
+u32 notrace _paravirt_ident_32(u32 x)
 {
        return x;
 }
 
-u64 _paravirt_ident_64(u64 x)
+u64 notrace _paravirt_ident_64(u64 x)
 {
        return x;
 }
index 5f42d038fcb4cf501455c29e0014f8d1e1e0dacf..c7220ba94aa776dceb3db4413ea9dec4ebace324 100644 (file)
@@ -109,6 +109,7 @@ static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
 {
        bool new_val, old_val;
        struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
+       struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
        union kvm_ioapic_redirect_entry *e;
 
        e = &ioapic->redirtbl[RTC_GSI];
@@ -117,16 +118,17 @@ static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
                return;
 
        new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector);
-       old_val = test_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map);
+       old_val = test_bit(vcpu->vcpu_id, dest_map->map);
 
        if (new_val == old_val)
                return;
 
        if (new_val) {
-               __set_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map);
+               __set_bit(vcpu->vcpu_id, dest_map->map);
+               dest_map->vectors[vcpu->vcpu_id] = e->fields.vector;
                ioapic->rtc_status.pending_eoi++;
        } else {
-               __clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map);
+               __clear_bit(vcpu->vcpu_id, dest_map->map);
                ioapic->rtc_status.pending_eoi--;
                rtc_status_pending_eoi_check_valid(ioapic);
        }
index 39b91127ef07a48a3cf4313d2d0340088bf8b7c6..cd944435dfbd73583a4b2d532f6f47aae022c84e 100644 (file)
@@ -23,8 +23,8 @@
 static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
        [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
        [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
-       [2] = { 0x80, 0x00, PERF_COUNT_HW_CACHE_REFERENCES },
-       [3] = { 0x81, 0x00, PERF_COUNT_HW_CACHE_MISSES },
+       [2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES },
+       [3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES },
        [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
        [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
        [6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
index 19f9f9e05c2a812fd07d07ce0e69223ec2accc25..699f8726539a2044616b82bd9cf792a8617f8001 100644 (file)
@@ -2743,16 +2743,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                if (tsc_delta < 0)
                        mark_tsc_unstable("KVM discovered backwards TSC");
 
-               if (kvm_lapic_hv_timer_in_use(vcpu) &&
-                               kvm_x86_ops->set_hv_timer(vcpu,
-                                       kvm_get_lapic_tscdeadline_msr(vcpu)))
-                       kvm_lapic_switch_to_sw_timer(vcpu);
                if (check_tsc_unstable()) {
                        u64 offset = kvm_compute_tsc_offset(vcpu,
                                                vcpu->arch.last_guest_tsc);
                        kvm_x86_ops->write_tsc_offset(vcpu, offset);
                        vcpu->arch.tsc_catchup = 1;
                }
+               if (kvm_lapic_hv_timer_in_use(vcpu) &&
+                               kvm_x86_ops->set_hv_timer(vcpu,
+                                       kvm_get_lapic_tscdeadline_msr(vcpu)))
+                       kvm_lapic_switch_to_sw_timer(vcpu);
                /*
                 * On a host with synchronized TSC, there is no need to update
                 * kvmclock on vcpu->cpu migration
index ecb1b69c1651323ea412914c1701712ea77d41ca..170cc4ff057b398382bef3dd635d6a9115460d88 100644 (file)
@@ -927,9 +927,10 @@ int track_pfn_copy(struct vm_area_struct *vma)
 }
 
 /*
- * prot is passed in as a parameter for the new mapping. If the vma has a
- * linear pfn mapping for the entire range reserve the entire vma range with
- * single reserve_pfn_range call.
+ * prot is passed in as a parameter for the new mapping. If the vma has
+ * a linear pfn mapping for the entire range, or no vma is provided,
+ * reserve the entire pfn + size range with single reserve_pfn_range
+ * call.
  */
 int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
                    unsigned long pfn, unsigned long addr, unsigned long size)
@@ -938,11 +939,12 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
        enum page_cache_mode pcm;
 
        /* reserve the whole chunk starting from paddr */
-       if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) {
+       if (!vma || (addr == vma->vm_start
+                               && size == (vma->vm_end - vma->vm_start))) {
                int ret;
 
                ret = reserve_pfn_range(paddr, size, prot, 0);
-               if (!ret)
+               if (ret == 0 && vma)
                        vma->vm_flags |= VM_PAT;
                return ret;
        }
@@ -997,7 +999,7 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
        resource_size_t paddr;
        unsigned long prot;
 
-       if (!(vma->vm_flags & VM_PAT))
+       if (vma && !(vma->vm_flags & VM_PAT))
                return;
 
        /* free the chunk starting from pfn or the whole chunk */
@@ -1011,7 +1013,8 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
                size = vma->vm_end - vma->vm_start;
        }
        free_pfn_range(paddr, size);
-       vma->vm_flags &= ~VM_PAT;
+       if (vma)
+               vma->vm_flags &= ~VM_PAT;
 }
 
 /*
index 837ea36a837d3063754c34c7fbb38be662b91226..6d52b94f4bb915d119e16da1628743b93fa7af39 100644 (file)
@@ -553,15 +553,21 @@ static void twinhead_reserve_killing_zone(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
 
 /*
- * Broadwell EP Home Agent BARs erroneously return non-zero values when read.
+ * Device [8086:2fc0]
+ * Erratum HSE43
+ * CONFIG_TDP_NOMINAL CSR Implemented at Incorrect Offset
+ * http://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v3-spec-update.html
  *
- * See http://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v4-spec-update.html
- * entry BDF2.
+ * Devices [8086:6f60,6fa0,6fc0]
+ * Erratum BDF2
+ * PCI BARs in the Home Agent Will Return Non-Zero Values During Enumeration
+ * http://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v4-spec-update.html
  */
-static void pci_bdwep_bar(struct pci_dev *dev)
+static void pci_invalid_bar(struct pci_dev *dev)
 {
        dev->non_compliant_bars = 1;
 }
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_bdwep_bar);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_bdwep_bar);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_bdwep_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar);
index ebd4dd6ef73b0e09a7fc70d8e605354be67a61cb..a7ef7b131e2564fd50d1614e8047f95841335c46 100644 (file)
@@ -84,7 +84,10 @@ int putreg(struct task_struct *child, int regno, unsigned long value)
        case EAX:
        case EIP:
        case UESP:
+               break;
        case ORIG_EAX:
+               /* Update the syscall number. */
+               UPT_SYSCALL_NR(&child->thread.regs.regs) = value;
                break;
        case FS:
                if (value && (value & 3) != 3)
index faab418876ce7deb2e4889a9ff3ab30efd7ac852..0b5c184dd5b3b80894c3db60ead6432ad20dca57 100644 (file)
@@ -78,7 +78,11 @@ int putreg(struct task_struct *child, int regno, unsigned long value)
        case RSI:
        case RDI:
        case RBP:
+               break;
+
        case ORIG_RAX:
+               /* Update the syscall number. */
+               UPT_SYSCALL_NR(&child->thread.regs.regs) = value;
                break;
 
        case FS:
index cf8037a87b2d124259df01378df3ac1b43973336..0c654e59f2157bc13e387a3ebfdcdc8b07b8170a 100644 (file)
@@ -631,9 +631,14 @@ static int cryptd_hash_export(struct ahash_request *req, void *out)
 
 static int cryptd_hash_import(struct ahash_request *req, const void *in)
 {
-       struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+       struct shash_desc *desc = cryptd_shash_desc(req);
+
+       desc->tfm = ctx->child;
+       desc->flags = req->base.flags;
 
-       return crypto_shash_import(&rctx->desc, in);
+       return crypto_shash_import(desc, in);
 }
 
 static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
@@ -733,13 +738,14 @@ static void cryptd_aead_crypt(struct aead_request *req,
        rctx = aead_request_ctx(req);
        compl = rctx->complete;
 
+       tfm = crypto_aead_reqtfm(req);
+
        if (unlikely(err == -EINPROGRESS))
                goto out;
        aead_request_set_tfm(req, child);
        err = crypt( req );
 
 out:
-       tfm = crypto_aead_reqtfm(req);
        ctx = crypto_aead_ctx(tfm);
        refcnt = atomic_read(&ctx->refcnt);
 
index 4c745bf389fea19b0cd049533183087be8873a5d..161f91539ae62b5d05547a939825321015580943 100644 (file)
@@ -42,7 +42,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
                list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
                        struct acpi_nfit_system_address *spa = nfit_spa->spa;
 
-                       if (nfit_spa_type(spa) == NFIT_SPA_PM)
+                       if (nfit_spa_type(spa) != NFIT_SPA_PM)
                                continue;
                        /* find the spa that covers the mce addr */
                        if (spa->address > mce->addr)
index ad9fc84a8601206cec6cc1fb15cb36e5c23250cb..e878fc799af792aa7ebd77076d57306eea2d4d0b 100644 (file)
@@ -2054,7 +2054,7 @@ int __init acpi_scan_init(void)
 
 static struct acpi_probe_entry *ape;
 static int acpi_probe_count;
-static DEFINE_SPINLOCK(acpi_probe_lock);
+static DEFINE_MUTEX(acpi_probe_mutex);
 
 static int __init acpi_match_madt(struct acpi_subtable_header *header,
                                  const unsigned long end)
@@ -2073,7 +2073,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
        if (acpi_disabled)
                return 0;
 
-       spin_lock(&acpi_probe_lock);
+       mutex_lock(&acpi_probe_mutex);
        for (ape = ap_head; nr; ape++, nr--) {
                if (ACPI_COMPARE_NAME(ACPI_SIG_MADT, ape->id)) {
                        acpi_probe_count = 0;
@@ -2086,7 +2086,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
                                count++;
                }
        }
-       spin_unlock(&acpi_probe_lock);
+       mutex_unlock(&acpi_probe_mutex);
 
        return count;
 }
index 7461a587b39b4e9d953e3a0063b9f1fb8cad0319..dcf2c724fd066c33cf74a3f502d2ae6ba3d276bc 100644 (file)
@@ -2524,7 +2524,7 @@ static int ahci_host_activate_multi_irqs(struct ata_host *host,
 
                /* Do not receive interrupts sent by dummy ports */
                if (!pp) {
-                       disable_irq(irq + i);
+                       disable_irq(irq);
                        continue;
                }
 
index 633aa2934a18f5c05f5e03cdaf5742a9b0735f99..44f97ad3c88d5234ca78e6fd348402393e4ab47f 100644 (file)
@@ -144,7 +144,7 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
        ap->ioaddr.altstatus_addr = base + 0x1E;
        ap->ioaddr.bmdma_addr = base;
        ata_sff_std_ports(&ap->ioaddr);
-       ap->pflags = ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
+       ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
 
        ninja32_program(base);
        /* FIXME: Should we disable them at remove ? */
index e097d355cc04a2040a129b2ffbea13c404169182..82a081ea43176ca8e1d8accd886ad7065287a8aa 100644 (file)
@@ -301,7 +301,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
        int (*callback)(struct device *);
        int retval;
 
-       trace_rpm_idle(dev, rpmflags);
+       trace_rpm_idle_rcuidle(dev, rpmflags);
        retval = rpm_check_suspend_allowed(dev);
        if (retval < 0)
                ;       /* Conditions are wrong. */
@@ -337,7 +337,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
                        dev->power.request_pending = true;
                        queue_work(pm_wq, &dev->power.work);
                }
-               trace_rpm_return_int(dev, _THIS_IP_, 0);
+               trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
                return 0;
        }
 
@@ -352,7 +352,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
        wake_up_all(&dev->power.wait_queue);
 
  out:
-       trace_rpm_return_int(dev, _THIS_IP_, retval);
+       trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
        return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
 }
 
@@ -419,7 +419,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
        struct device *parent = NULL;
        int retval;
 
-       trace_rpm_suspend(dev, rpmflags);
+       trace_rpm_suspend_rcuidle(dev, rpmflags);
 
  repeat:
        retval = rpm_check_suspend_allowed(dev);
@@ -549,7 +549,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
        }
 
  out:
-       trace_rpm_return_int(dev, _THIS_IP_, retval);
+       trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
 
        return retval;
 
@@ -601,7 +601,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
        struct device *parent = NULL;
        int retval = 0;
 
-       trace_rpm_resume(dev, rpmflags);
+       trace_rpm_resume_rcuidle(dev, rpmflags);
 
  repeat:
        if (dev->power.runtime_error)
@@ -764,7 +764,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
                spin_lock_irq(&dev->power.lock);
        }
 
-       trace_rpm_return_int(dev, _THIS_IP_, retval);
+       trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
 
        return retval;
 }
index aa56af87d94118338befbbf62aa7f7b64cfd6570..b11af3f2c1dbf2e1d6faf31cb7bc38e663b2285c 100644 (file)
@@ -404,6 +404,7 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
                unsigned int new_base_reg, new_top_reg;
                unsigned int min, max;
                unsigned int max_dist;
+               unsigned int dist, best_dist = UINT_MAX;
 
                max_dist = map->reg_stride * sizeof(*rbnode_tmp) /
                        map->cache_word_size;
@@ -423,24 +424,41 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
                                &base_reg, &top_reg);
 
                        if (base_reg <= max && top_reg >= min) {
-                               new_base_reg = min(reg, base_reg);
-                               new_top_reg = max(reg, top_reg);
-                       } else {
-                               if (max < base_reg)
-                                       node = node->rb_left;
+                               if (reg < base_reg)
+                                       dist = base_reg - reg;
+                               else if (reg > top_reg)
+                                       dist = reg - top_reg;
                                else
-                                       node = node->rb_right;
-
-                               continue;
+                                       dist = 0;
+                               if (dist < best_dist) {
+                                       rbnode = rbnode_tmp;
+                                       best_dist = dist;
+                                       new_base_reg = min(reg, base_reg);
+                                       new_top_reg = max(reg, top_reg);
+                               }
                        }
 
-                       ret = regcache_rbtree_insert_to_block(map, rbnode_tmp,
+                       /*
+                        * Keep looking, we want to choose the closest block,
+                        * otherwise we might end up creating overlapping
+                        * blocks, which breaks the rbtree.
+                        */
+                       if (reg < base_reg)
+                               node = node->rb_left;
+                       else if (reg > top_reg)
+                               node = node->rb_right;
+                       else
+                               break;
+               }
+
+               if (rbnode) {
+                       ret = regcache_rbtree_insert_to_block(map, rbnode,
                                                              new_base_reg,
                                                              new_top_reg, reg,
                                                              value);
                        if (ret)
                                return ret;
-                       rbtree_ctx->cached_rbnode = rbnode_tmp;
+                       rbtree_ctx->cached_rbnode = rbnode;
                        return 0;
                }
 
index df7ff72908216f5d7a97f03af3f06ae4fd5adefd..4e582561e1e7a0cdffeaf8b3901d3a56d66a37ad 100644 (file)
@@ -38,10 +38,11 @@ static int regcache_hw_init(struct regmap *map)
 
        /* calculate the size of reg_defaults */
        for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++)
-               if (!regmap_volatile(map, i * map->reg_stride))
+               if (regmap_readable(map, i * map->reg_stride) &&
+                   !regmap_volatile(map, i * map->reg_stride))
                        count++;
 
-       /* all registers are volatile, so just bypass */
+       /* all registers are unreadable or volatile, so just bypass */
        if (!count) {
                map->cache_bypass = true;
                return 0;
index 51fa7d66a393bf5bee6818c1432211dfd76582ce..25d26bb18970694d808a3d679e613a5487fa8236 100644 (file)
@@ -1474,6 +1474,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
                ret = map->bus->write(map->bus_context, buf, len);
 
                kfree(buf);
+       } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
+               regcache_drop_region(map, reg, reg + 1);
        }
 
        trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
index 5755907f836f8d7a6aa5c892e8eef542e6e9c678..ffa7c9dcbd7a1be41ea07e4bf7e976fb494f65d6 100644 (file)
@@ -551,7 +551,7 @@ static struct attribute *cci5xx_pmu_event_attrs[] = {
        CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_wrq, 0xB),
        CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_cd_hs, 0xC),
        CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_rq_stall_addr_hazard, 0xD),
-       CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snopp_rq_stall_tt_full, 0xE),
+       CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_stall_tt_full, 0xE),
        CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_tzmp1_prot, 0xF),
        NULL
 };
index 97a9185af433f052f315ae99e12f41232a1b1a3d..884c0305e29033085ff4131434f29f07bd5d78ae 100644 (file)
@@ -187,6 +187,7 @@ struct arm_ccn {
        struct arm_ccn_component *xp;
 
        struct arm_ccn_dt dt;
+       int mn_id;
 };
 
 static DEFINE_MUTEX(arm_ccn_mutex);
@@ -212,6 +213,7 @@ static int arm_ccn_node_to_xp_port(int node)
 #define CCN_CONFIG_TYPE(_config)       (((_config) >> 8) & 0xff)
 #define CCN_CONFIG_EVENT(_config)      (((_config) >> 16) & 0xff)
 #define CCN_CONFIG_PORT(_config)       (((_config) >> 24) & 0x3)
+#define CCN_CONFIG_BUS(_config)                (((_config) >> 24) & 0x3)
 #define CCN_CONFIG_VC(_config)         (((_config) >> 26) & 0x7)
 #define CCN_CONFIG_DIR(_config)                (((_config) >> 29) & 0x1)
 #define CCN_CONFIG_MASK(_config)       (((_config) >> 30) & 0xf)
@@ -241,6 +243,7 @@ static CCN_FORMAT_ATTR(xp, "config:0-7");
 static CCN_FORMAT_ATTR(type, "config:8-15");
 static CCN_FORMAT_ATTR(event, "config:16-23");
 static CCN_FORMAT_ATTR(port, "config:24-25");
+static CCN_FORMAT_ATTR(bus, "config:24-25");
 static CCN_FORMAT_ATTR(vc, "config:26-28");
 static CCN_FORMAT_ATTR(dir, "config:29-29");
 static CCN_FORMAT_ATTR(mask, "config:30-33");
@@ -253,6 +256,7 @@ static struct attribute *arm_ccn_pmu_format_attrs[] = {
        &arm_ccn_pmu_format_attr_type.attr.attr,
        &arm_ccn_pmu_format_attr_event.attr.attr,
        &arm_ccn_pmu_format_attr_port.attr.attr,
+       &arm_ccn_pmu_format_attr_bus.attr.attr,
        &arm_ccn_pmu_format_attr_vc.attr.attr,
        &arm_ccn_pmu_format_attr_dir.attr.attr,
        &arm_ccn_pmu_format_attr_mask.attr.attr,
@@ -328,6 +332,7 @@ struct arm_ccn_pmu_event {
 static ssize_t arm_ccn_pmu_event_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
+       struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
        struct arm_ccn_pmu_event *event = container_of(attr,
                        struct arm_ccn_pmu_event, attr);
        ssize_t res;
@@ -349,10 +354,17 @@ static ssize_t arm_ccn_pmu_event_show(struct device *dev,
                break;
        case CCN_TYPE_XP:
                res += snprintf(buf + res, PAGE_SIZE - res,
-                               ",xp=?,port=?,vc=?,dir=?");
+                               ",xp=?,vc=?");
                if (event->event == CCN_EVENT_WATCHPOINT)
                        res += snprintf(buf + res, PAGE_SIZE - res,
-                                       ",cmp_l=?,cmp_h=?,mask=?");
+                                       ",port=?,dir=?,cmp_l=?,cmp_h=?,mask=?");
+               else
+                       res += snprintf(buf + res, PAGE_SIZE - res,
+                                       ",bus=?");
+
+               break;
+       case CCN_TYPE_MN:
+               res += snprintf(buf + res, PAGE_SIZE - res, ",node=%d", ccn->mn_id);
                break;
        default:
                res += snprintf(buf + res, PAGE_SIZE - res, ",node=?");
@@ -383,9 +395,9 @@ static umode_t arm_ccn_pmu_events_is_visible(struct kobject *kobj,
 }
 
 static struct arm_ccn_pmu_event arm_ccn_pmu_events[] = {
-       CCN_EVENT_MN(eobarrier, "dir=0,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE),
-       CCN_EVENT_MN(ecbarrier, "dir=0,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE),
-       CCN_EVENT_MN(dvmop, "dir=0,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE),
+       CCN_EVENT_MN(eobarrier, "dir=1,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE),
+       CCN_EVENT_MN(ecbarrier, "dir=1,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE),
+       CCN_EVENT_MN(dvmop, "dir=1,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE),
        CCN_EVENT_HNI(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY),
        CCN_EVENT_HNI(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY),
        CCN_EVENT_HNI(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY),
@@ -733,9 +745,10 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
 
        if (has_branch_stack(event) || event->attr.exclude_user ||
                        event->attr.exclude_kernel || event->attr.exclude_hv ||
-                       event->attr.exclude_idle) {
+                       event->attr.exclude_idle || event->attr.exclude_host ||
+                       event->attr.exclude_guest) {
                dev_warn(ccn->dev, "Can't exclude execution levels!\n");
-               return -EOPNOTSUPP;
+               return -EINVAL;
        }
 
        if (event->cpu < 0) {
@@ -759,6 +772,12 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
 
        /* Validate node/xp vs topology */
        switch (type) {
+       case CCN_TYPE_MN:
+               if (node_xp != ccn->mn_id) {
+                       dev_warn(ccn->dev, "Invalid MN ID %d!\n", node_xp);
+                       return -EINVAL;
+               }
+               break;
        case CCN_TYPE_XP:
                if (node_xp >= ccn->num_xps) {
                        dev_warn(ccn->dev, "Invalid XP ID %d!\n", node_xp);
@@ -886,6 +905,10 @@ static void arm_ccn_pmu_xp_dt_config(struct perf_event *event, int enable)
        struct arm_ccn_component *xp;
        u32 val, dt_cfg;
 
+       /* Nothing to do for cycle counter */
+       if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER)
+               return;
+
        if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP)
                xp = &ccn->xp[CCN_CONFIG_XP(event->attr.config)];
        else
@@ -917,38 +940,17 @@ static void arm_ccn_pmu_event_start(struct perf_event *event, int flags)
                        arm_ccn_pmu_read_counter(ccn, hw->idx));
        hw->state = 0;
 
-       /*
-        * Pin the timer, so that the overflows are handled by the chosen
-        * event->cpu (this is the same one as presented in "cpumask"
-        * attribute).
-        */
-       if (!ccn->irq)
-               hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(),
-                               HRTIMER_MODE_REL_PINNED);
-
        /* Set the DT bus input, engaging the counter */
        arm_ccn_pmu_xp_dt_config(event, 1);
 }
 
 static void arm_ccn_pmu_event_stop(struct perf_event *event, int flags)
 {
-       struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
        struct hw_perf_event *hw = &event->hw;
-       u64 timeout;
 
        /* Disable counting, setting the DT bus to pass-through mode */
        arm_ccn_pmu_xp_dt_config(event, 0);
 
-       if (!ccn->irq)
-               hrtimer_cancel(&ccn->dt.hrtimer);
-
-       /* Let the DT bus drain */
-       timeout = arm_ccn_pmu_read_counter(ccn, CCN_IDX_PMU_CYCLE_COUNTER) +
-                       ccn->num_xps;
-       while (arm_ccn_pmu_read_counter(ccn, CCN_IDX_PMU_CYCLE_COUNTER) <
-                       timeout)
-               cpu_relax();
-
        if (flags & PERF_EF_UPDATE)
                arm_ccn_pmu_event_update(event);
 
@@ -988,7 +990,7 @@ static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event)
 
        /* Comparison values */
        writel(cmp_l & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_L(wp));
-       writel((cmp_l >> 32) & 0xefffffff,
+       writel((cmp_l >> 32) & 0x7fffffff,
                        source->base + CCN_XP_DT_CMP_VAL_L(wp) + 4);
        writel(cmp_h & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_H(wp));
        writel((cmp_h >> 32) & 0x0fffffff,
@@ -996,7 +998,7 @@ static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event)
 
        /* Mask */
        writel(mask_l & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_L(wp));
-       writel((mask_l >> 32) & 0xefffffff,
+       writel((mask_l >> 32) & 0x7fffffff,
                        source->base + CCN_XP_DT_CMP_MASK_L(wp) + 4);
        writel(mask_h & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_H(wp));
        writel((mask_h >> 32) & 0x0fffffff,
@@ -1014,7 +1016,7 @@ static void arm_ccn_pmu_xp_event_config(struct perf_event *event)
        hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(hw->config_base);
 
        id = (CCN_CONFIG_VC(event->attr.config) << 4) |
-                       (CCN_CONFIG_PORT(event->attr.config) << 3) |
+                       (CCN_CONFIG_BUS(event->attr.config) << 3) |
                        (CCN_CONFIG_EVENT(event->attr.config) << 0);
 
        val = readl(source->base + CCN_XP_PMU_EVENT_SEL);
@@ -1099,15 +1101,31 @@ static void arm_ccn_pmu_event_config(struct perf_event *event)
        spin_unlock(&ccn->dt.config_lock);
 }
 
+static int arm_ccn_pmu_active_counters(struct arm_ccn *ccn)
+{
+       return bitmap_weight(ccn->dt.pmu_counters_mask,
+                            CCN_NUM_PMU_EVENT_COUNTERS + 1);
+}
+
 static int arm_ccn_pmu_event_add(struct perf_event *event, int flags)
 {
        int err;
        struct hw_perf_event *hw = &event->hw;
+       struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
 
        err = arm_ccn_pmu_event_alloc(event);
        if (err)
                return err;
 
+       /*
+        * Pin the timer, so that the overflows are handled by the chosen
+        * event->cpu (this is the same one as presented in "cpumask"
+        * attribute).
+        */
+       if (!ccn->irq && arm_ccn_pmu_active_counters(ccn) == 1)
+               hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(),
+                             HRTIMER_MODE_REL_PINNED);
+
        arm_ccn_pmu_event_config(event);
 
        hw->state = PERF_HES_STOPPED;
@@ -1120,9 +1138,14 @@ static int arm_ccn_pmu_event_add(struct perf_event *event, int flags)
 
 static void arm_ccn_pmu_event_del(struct perf_event *event, int flags)
 {
+       struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+
        arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE);
 
        arm_ccn_pmu_event_release(event);
+
+       if (!ccn->irq && arm_ccn_pmu_active_counters(ccn) == 0)
+               hrtimer_cancel(&ccn->dt.hrtimer);
 }
 
 static void arm_ccn_pmu_event_read(struct perf_event *event)
@@ -1130,6 +1153,24 @@ static void arm_ccn_pmu_event_read(struct perf_event *event)
        arm_ccn_pmu_event_update(event);
 }
 
+static void arm_ccn_pmu_enable(struct pmu *pmu)
+{
+       struct arm_ccn *ccn = pmu_to_arm_ccn(pmu);
+
+       u32 val = readl(ccn->dt.base + CCN_DT_PMCR);
+       val |= CCN_DT_PMCR__PMU_EN;
+       writel(val, ccn->dt.base + CCN_DT_PMCR);
+}
+
+static void arm_ccn_pmu_disable(struct pmu *pmu)
+{
+       struct arm_ccn *ccn = pmu_to_arm_ccn(pmu);
+
+       u32 val = readl(ccn->dt.base + CCN_DT_PMCR);
+       val &= ~CCN_DT_PMCR__PMU_EN;
+       writel(val, ccn->dt.base + CCN_DT_PMCR);
+}
+
 static irqreturn_t arm_ccn_pmu_overflow_handler(struct arm_ccn_dt *dt)
 {
        u32 pmovsr = readl(dt->base + CCN_DT_PMOVSR);
@@ -1252,6 +1293,8 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
                .start = arm_ccn_pmu_event_start,
                .stop = arm_ccn_pmu_event_stop,
                .read = arm_ccn_pmu_event_read,
+               .pmu_enable = arm_ccn_pmu_enable,
+               .pmu_disable = arm_ccn_pmu_disable,
        };
 
        /* No overflow interrupt? Have to use a timer instead. */
@@ -1361,6 +1404,8 @@ static int arm_ccn_init_nodes(struct arm_ccn *ccn, int region,
 
        switch (type) {
        case CCN_TYPE_MN:
+               ccn->mn_id = id;
+               return 0;
        case CCN_TYPE_DT:
                return 0;
        case CCN_TYPE_XP:
@@ -1471,8 +1516,9 @@ static int arm_ccn_probe(struct platform_device *pdev)
                /* Can set 'disable' bits, so can acknowledge interrupts */
                writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE,
                                ccn->base + CCN_MN_ERRINT_STATUS);
-               err = devm_request_irq(ccn->dev, irq, arm_ccn_irq_handler, 0,
-                               dev_name(ccn->dev), ccn);
+               err = devm_request_irq(ccn->dev, irq, arm_ccn_irq_handler,
+                                      IRQF_NOBALANCING | IRQF_NO_THREAD,
+                                      dev_name(ccn->dev), ccn);
                if (err)
                        return err;
 
index c3cb76b363c63c54d67343dacf4c9ec20032a95f..9efdf1de4035e226825c8435f72f07e8055584d2 100644 (file)
@@ -178,6 +178,7 @@ static int vexpress_config_populate(struct device_node *node)
 
        parent = class_find_device(vexpress_config_class, NULL, bridge,
                        vexpress_config_node_match);
+       of_node_put(bridge);
        if (WARN_ON(!parent))
                return -ENODEV;
 
index 56ad5a5936a9a3fb17f6480dceb7acd37e3c8be6..8c0770bf8881351c1d956d310c5d670684090770 100644 (file)
@@ -244,7 +244,7 @@ config HW_RANDOM_TX4939
 
 config HW_RANDOM_MXC_RNGA
        tristate "Freescale i.MX RNGA Random Number Generator"
-       depends on ARCH_HAS_RNGA
+       depends on SOC_IMX31
        default HW_RANDOM
        ---help---
          This driver provides kernel-side support for the Random Number
index 08c7e23ed535bd665d3a9a6db68293d2ae93860b..0c75c3f1689fc2a053daeaab7f6e862f451c331b 100644 (file)
@@ -957,7 +957,7 @@ int tpm2_auto_startup(struct tpm_chip *chip)
                goto out;
 
        rc = tpm2_do_selftest(chip);
-       if (rc != TPM2_RC_INITIALIZE) {
+       if (rc != 0 && rc != TPM2_RC_INITIALIZE) {
                dev_err(&chip->dev, "TPM self test failed\n");
                goto out;
        }
@@ -974,7 +974,6 @@ int tpm2_auto_startup(struct tpm_chip *chip)
                }
        }
 
-       return rc;
 out:
        if (rc > 0)
                rc = -ENODEV;
index d2406fe2553396a807a169ddd93b656a4876a537..5da47e26a0124262734357853eb8290d6e817712 100644 (file)
@@ -165,6 +165,12 @@ struct ports_device {
         */
        struct virtqueue *c_ivq, *c_ovq;
 
+       /*
+        * A control packet buffer for guest->host requests, protected
+        * by c_ovq_lock.
+        */
+       struct virtio_console_control cpkt;
+
        /* Array of per-port IO virtqueues */
        struct virtqueue **in_vqs, **out_vqs;
 
@@ -560,28 +566,29 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
                                  unsigned int event, unsigned int value)
 {
        struct scatterlist sg[1];
-       struct virtio_console_control cpkt;
        struct virtqueue *vq;
        unsigned int len;
 
        if (!use_multiport(portdev))
                return 0;
 
-       cpkt.id = cpu_to_virtio32(portdev->vdev, port_id);
-       cpkt.event = cpu_to_virtio16(portdev->vdev, event);
-       cpkt.value = cpu_to_virtio16(portdev->vdev, value);
-
        vq = portdev->c_ovq;
 
-       sg_init_one(sg, &cpkt, sizeof(cpkt));
-
        spin_lock(&portdev->c_ovq_lock);
-       if (virtqueue_add_outbuf(vq, sg, 1, &cpkt, GFP_ATOMIC) == 0) {
+
+       portdev->cpkt.id = cpu_to_virtio32(portdev->vdev, port_id);
+       portdev->cpkt.event = cpu_to_virtio16(portdev->vdev, event);
+       portdev->cpkt.value = cpu_to_virtio16(portdev->vdev, value);
+
+       sg_init_one(sg, &portdev->cpkt, sizeof(struct virtio_console_control));
+
+       if (virtqueue_add_outbuf(vq, sg, 1, &portdev->cpkt, GFP_ATOMIC) == 0) {
                virtqueue_kick(vq);
                while (!virtqueue_get_buf(vq, &len)
                        && !virtqueue_is_broken(vq))
                        cpu_relax();
        }
+
        spin_unlock(&portdev->c_ovq_lock);
        return 0;
 }
index d359c92e13a65c00725f3a1178a1096babc0e226..e38bf60c0ff4e28cfd0005ba888f3280962619a7 100644 (file)
@@ -69,6 +69,7 @@ static const struct cpg_core_clk r8a7795_core_clks[] __initconst = {
        DEF_FIXED(".s1",        CLK_S1,            CLK_PLL1_DIV2,  3, 1),
        DEF_FIXED(".s2",        CLK_S2,            CLK_PLL1_DIV2,  4, 1),
        DEF_FIXED(".s3",        CLK_S3,            CLK_PLL1_DIV2,  6, 1),
+       DEF_FIXED(".sdsrc",     CLK_SDSRC,         CLK_PLL1_DIV2,  2, 1),
 
        /* Core Clock Outputs */
        DEF_FIXED("ztr",        R8A7795_CLK_ZTR,   CLK_PLL1_DIV2,  6, 1),
@@ -87,10 +88,10 @@ static const struct cpg_core_clk r8a7795_core_clks[] __initconst = {
        DEF_FIXED("s3d2",       R8A7795_CLK_S3D2,  CLK_S3,         2, 1),
        DEF_FIXED("s3d4",       R8A7795_CLK_S3D4,  CLK_S3,         4, 1),
 
-       DEF_GEN3_SD("sd0",      R8A7795_CLK_SD0,   CLK_PLL1_DIV2, 0x0074),
-       DEF_GEN3_SD("sd1",      R8A7795_CLK_SD1,   CLK_PLL1_DIV2, 0x0078),
-       DEF_GEN3_SD("sd2",      R8A7795_CLK_SD2,   CLK_PLL1_DIV2, 0x0268),
-       DEF_GEN3_SD("sd3",      R8A7795_CLK_SD3,   CLK_PLL1_DIV2, 0x026c),
+       DEF_GEN3_SD("sd0",      R8A7795_CLK_SD0,   CLK_SDSRC,     0x0074),
+       DEF_GEN3_SD("sd1",      R8A7795_CLK_SD1,   CLK_SDSRC,     0x0078),
+       DEF_GEN3_SD("sd2",      R8A7795_CLK_SD2,   CLK_SDSRC,     0x0268),
+       DEF_GEN3_SD("sd3",      R8A7795_CLK_SD3,   CLK_SDSRC,     0x026c),
 
        DEF_FIXED("cl",         R8A7795_CLK_CL,    CLK_PLL1_DIV2, 48, 1),
        DEF_FIXED("cp",         R8A7795_CLK_CP,    CLK_EXTAL,      2, 1),
index c109d80e7a8a96340f187a81b3699ae115928a71..cdfabeb9a034c5fec840dc3c952a385b36536df5 100644 (file)
@@ -833,9 +833,9 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
 
        /* perihp */
        GATE(0, "cpll_aclk_perihp_src", "cpll", CLK_IGNORE_UNUSED,
-                       RK3399_CLKGATE_CON(5), 0, GFLAGS),
-       GATE(0, "gpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED,
                        RK3399_CLKGATE_CON(5), 1, GFLAGS),
+       GATE(0, "gpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED,
+                       RK3399_CLKGATE_CON(5), 0, GFLAGS),
        COMPOSITE(ACLK_PERIHP, "aclk_perihp", mux_aclk_perihp_p, CLK_IGNORE_UNUSED,
                        RK3399_CLKSEL_CON(14), 7, 1, MFLAGS, 0, 5, DFLAGS,
                        RK3399_CLKGATE_CON(5), 2, GFLAGS),
@@ -923,9 +923,9 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
                        RK3399_CLKGATE_CON(6), 14, GFLAGS),
 
        GATE(0, "cpll_aclk_emmc_src", "cpll", CLK_IGNORE_UNUSED,
-                       RK3399_CLKGATE_CON(6), 12, GFLAGS),
-       GATE(0, "gpll_aclk_emmc_src", "gpll", CLK_IGNORE_UNUSED,
                        RK3399_CLKGATE_CON(6), 13, GFLAGS),
+       GATE(0, "gpll_aclk_emmc_src", "gpll", CLK_IGNORE_UNUSED,
+                       RK3399_CLKGATE_CON(6), 12, GFLAGS),
        COMPOSITE_NOGATE(ACLK_EMMC, "aclk_emmc", mux_aclk_emmc_p, CLK_IGNORE_UNUSED,
                        RK3399_CLKSEL_CON(21), 7, 1, MFLAGS, 0, 5, DFLAGS),
        GATE(ACLK_EMMC_CORE, "aclk_emmccore", "aclk_emmc", CLK_IGNORE_UNUSED,
@@ -1071,7 +1071,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
        /* vio */
        COMPOSITE(ACLK_VIO, "aclk_vio", mux_pll_src_cpll_gpll_ppll_p, CLK_IGNORE_UNUSED,
                        RK3399_CLKSEL_CON(42), 6, 2, MFLAGS, 0, 5, DFLAGS,
-                       RK3399_CLKGATE_CON(11), 10, GFLAGS),
+                       RK3399_CLKGATE_CON(11), 0, GFLAGS),
        COMPOSITE_NOMUX(PCLK_VIO, "pclk_vio", "aclk_vio", 0,
                        RK3399_CLKSEL_CON(43), 0, 5, DFLAGS,
                        RK3399_CLKGATE_CON(11), 1, GFLAGS),
@@ -1484,6 +1484,7 @@ static const char *const rk3399_cru_critical_clocks[] __initconst = {
        "hclk_perilp1",
        "hclk_perilp1_noc",
        "aclk_dmac0_perilp",
+       "aclk_emmc_noc",
        "gpll_hclk_perilp1_src",
        "gpll_aclk_perilp0_src",
        "gpll_aclk_perihp_src",
index 9af359544110b59fd61dca76e69a7b922107fb15..267f99523fbe176d890e17a43a19c84abe733fef 100644 (file)
@@ -783,14 +783,14 @@ static struct ccu_reset_map sun8i_h3_ccu_resets[] = {
        [RST_BUS_I2S1]          =  { 0x2d0, BIT(13) },
        [RST_BUS_I2S2]          =  { 0x2d0, BIT(14) },
 
-       [RST_BUS_I2C0]          =  { 0x2d4, BIT(0) },
-       [RST_BUS_I2C1]          =  { 0x2d4, BIT(1) },
-       [RST_BUS_I2C2]          =  { 0x2d4, BIT(2) },
-       [RST_BUS_UART0]         =  { 0x2d4, BIT(16) },
-       [RST_BUS_UART1]         =  { 0x2d4, BIT(17) },
-       [RST_BUS_UART2]         =  { 0x2d4, BIT(18) },
-       [RST_BUS_UART3]         =  { 0x2d4, BIT(19) },
-       [RST_BUS_SCR]           =  { 0x2d4, BIT(20) },
+       [RST_BUS_I2C0]          =  { 0x2d8, BIT(0) },
+       [RST_BUS_I2C1]          =  { 0x2d8, BIT(1) },
+       [RST_BUS_I2C2]          =  { 0x2d8, BIT(2) },
+       [RST_BUS_UART0]         =  { 0x2d8, BIT(16) },
+       [RST_BUS_UART1]         =  { 0x2d8, BIT(17) },
+       [RST_BUS_UART2]         =  { 0x2d8, BIT(18) },
+       [RST_BUS_UART3]         =  { 0x2d8, BIT(19) },
+       [RST_BUS_SCR]           =  { 0x2d8, BIT(20) },
 };
 
 static const struct sunxi_ccu_desc sun8i_h3_ccu_desc = {
index fc17b5295e1694629a2c48d5647ea03f8879433f..51d4bac97ab301f9d0213ff40327325622fd2109 100644 (file)
@@ -31,7 +31,7 @@ void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock)
                return;
 
        WARN_ON(readl_relaxed_poll_timeout(common->base + common->reg, reg,
-                                          !(reg & lock), 100, 70000));
+                                          reg & lock, 100, 70000));
 }
 
 int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
index 4470ffc8cf0d3f2cd68e0587eb2f873e87dac008..d6fafb397489caaebee47a7c086bdccda7c1244f 100644 (file)
@@ -14,9 +14,9 @@
 #include "ccu_gate.h"
 #include "ccu_nk.h"
 
-void ccu_nk_find_best(unsigned long parent, unsigned long rate,
-                     unsigned int max_n, unsigned int max_k,
-                     unsigned int *n, unsigned int *k)
+static void ccu_nk_find_best(unsigned long parent, unsigned long rate,
+                            unsigned int max_n, unsigned int max_k,
+                            unsigned int *n, unsigned int *k)
 {
        unsigned long best_rate = 0;
        unsigned int best_k = 0, best_n = 0;
index 0ee1f363e4be154749d4b37a07e215dbd705d76e..d8eab90ae661c245507fc31d11139ff23ff28745 100644 (file)
@@ -73,7 +73,7 @@ static void __init sun4i_pll2_setup(struct device_node *node,
                                          SUN4I_PLL2_PRE_DIV_WIDTH,
                                          CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
                                          &sun4i_a10_pll2_lock);
-       if (!prediv_clk) {
+       if (IS_ERR(prediv_clk)) {
                pr_err("Couldn't register the prediv clock\n");
                goto err_free_array;
        }
@@ -106,7 +106,7 @@ static void __init sun4i_pll2_setup(struct device_node *node,
                                          &mult->hw, &clk_multiplier_ops,
                                          &gate->hw, &clk_gate_ops,
                                          CLK_SET_RATE_PARENT);
-       if (!base_clk) {
+       if (IS_ERR(base_clk)) {
                pr_err("Couldn't register the base multiplier clock\n");
                goto err_free_multiplier;
        }
index 411d3033a96e9a729fd9b289cf390344a17f415f..b200ebf159eec42b3c46970c318fef053cfa468e 100644 (file)
@@ -48,7 +48,7 @@ static void __init sun8i_a23_mbus_setup(struct device_node *node)
                return;
 
        reg = of_io_request_and_map(node, 0, of_node_full_name(node));
-       if (!reg) {
+       if (IS_ERR(reg)) {
                pr_err("Could not get registers for sun8i-mbus-clk\n");
                goto err_free_parents;
        }
index 64da7b79a6e47fd4acf9098f1ff6c17ad07741da..933b5dd698b8cc86ddc924fd65aee483b53a2aec 100644 (file)
@@ -428,7 +428,7 @@ static struct tegra_clk_pll_params pll_d_params = {
        .div_nmp = &pllp_nmp,
        .freq_table = pll_d_freq_table,
        .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
-                TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+                TEGRA_PLL_HAS_LOCK_ENABLE,
 };
 
 static struct tegra_clk_pll_params pll_d2_params = {
@@ -446,7 +446,7 @@ static struct tegra_clk_pll_params pll_d2_params = {
        .div_nmp = &pllp_nmp,
        .freq_table = pll_d_freq_table,
        .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
-                TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+                TEGRA_PLL_HAS_LOCK_ENABLE,
 };
 
 static const struct pdiv_map pllu_p[] = {
index 3494bc5a21d556bef082b1b7e44297db93ed98fb..7f0f5b26d8c5ddc5a953d6f12a4b75fb23058b27 100644 (file)
@@ -240,6 +240,7 @@ static int __init at91sam926x_pit_common_init(struct pit_data *data)
 static int __init at91sam926x_pit_dt_init(struct device_node *node)
 {
        struct pit_data *data;
+       int ret;
 
        data = kzalloc(sizeof(*data), GFP_KERNEL);
        if (!data)
index 0bb44d5b5df49d385b5bcfd43297b750417461a7..2ee40fd360caaa1ffd3a60700b1bc31a3a533322 100644 (file)
@@ -74,6 +74,8 @@ static const struct of_device_id machines[] __initconst = {
        { .compatible = "ti,omap5", },
 
        { .compatible = "xlnx,zynq-7000", },
+
+       { }
 };
 
 static int __init cpufreq_dt_platdev_init(void)
index 6dc597126b79e06e7410296576ef0705f8fe37ce..b3044219772cd7ac57e0bf2559eb7dea8caeca08 100644 (file)
@@ -556,7 +556,10 @@ skip_enc:
 
        /* Read and write assoclen bytes */
        append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
-       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+       if (alg->caam.geniv)
+               append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
+       else
+               append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
 
        /* Skip assoc data */
        append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
@@ -565,6 +568,14 @@ skip_enc:
        append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
                             KEY_VLF);
 
+       if (alg->caam.geniv) {
+               append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+                               LDST_SRCDST_BYTE_CONTEXT |
+                               (ctx1_iv_off << LDST_OFFSET_SHIFT));
+               append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
+                           (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
+       }
+
        /* Load Counter into CONTEXT1 reg */
        if (is_rfc3686)
                append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
@@ -2150,7 +2161,7 @@ static void init_authenc_job(struct aead_request *req,
 
        init_aead_job(req, edesc, all_contig, encrypt);
 
-       if (ivsize && (is_rfc3686 || !(alg->caam.geniv && encrypt)))
+       if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
                append_load_as_imm(desc, req->iv, ivsize,
                                   LDST_CLASS_1_CCB |
                                   LDST_SRCDST_BYTE_CONTEXT |
@@ -2537,20 +2548,6 @@ static int aead_decrypt(struct aead_request *req)
        return ret;
 }
 
-static int aead_givdecrypt(struct aead_request *req)
-{
-       struct crypto_aead *aead = crypto_aead_reqtfm(req);
-       unsigned int ivsize = crypto_aead_ivsize(aead);
-
-       if (req->cryptlen < ivsize)
-               return -EINVAL;
-
-       req->cryptlen -= ivsize;
-       req->assoclen += ivsize;
-
-       return aead_decrypt(req);
-}
-
 /*
  * allocate and map the ablkcipher extended descriptor for ablkcipher
  */
@@ -3210,7 +3207,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = MD5_DIGEST_SIZE,
                },
@@ -3256,7 +3253,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = SHA1_DIGEST_SIZE,
                },
@@ -3302,7 +3299,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = SHA224_DIGEST_SIZE,
                },
@@ -3348,7 +3345,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = SHA256_DIGEST_SIZE,
                },
@@ -3394,7 +3391,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = SHA384_DIGEST_SIZE,
                },
@@ -3440,7 +3437,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = SHA512_DIGEST_SIZE,
                },
@@ -3486,7 +3483,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = MD5_DIGEST_SIZE,
                },
@@ -3534,7 +3531,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = SHA1_DIGEST_SIZE,
                },
@@ -3582,7 +3579,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = SHA224_DIGEST_SIZE,
                },
@@ -3630,7 +3627,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = SHA256_DIGEST_SIZE,
                },
@@ -3678,7 +3675,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = SHA384_DIGEST_SIZE,
                },
@@ -3726,7 +3723,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = SHA512_DIGEST_SIZE,
                },
@@ -3772,7 +3769,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = MD5_DIGEST_SIZE,
                },
@@ -3818,7 +3815,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = SHA1_DIGEST_SIZE,
                },
@@ -3864,7 +3861,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = SHA224_DIGEST_SIZE,
                },
@@ -3910,7 +3907,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = SHA256_DIGEST_SIZE,
                },
@@ -3956,7 +3953,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = SHA384_DIGEST_SIZE,
                },
@@ -4002,7 +3999,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = SHA512_DIGEST_SIZE,
                },
@@ -4051,7 +4048,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        .maxauthsize = MD5_DIGEST_SIZE,
                },
@@ -4102,7 +4099,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        .maxauthsize = SHA1_DIGEST_SIZE,
                },
@@ -4153,7 +4150,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        .maxauthsize = SHA224_DIGEST_SIZE,
                },
@@ -4204,7 +4201,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        .maxauthsize = SHA256_DIGEST_SIZE,
                },
@@ -4255,7 +4252,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        .maxauthsize = SHA384_DIGEST_SIZE,
                },
@@ -4306,7 +4303,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        .maxauthsize = SHA512_DIGEST_SIZE,
                },
index 769148dbaeb34689e20f44fba4ab5c788648a5a3..20f35df8a01fafefbb6f5da84903fa4a2eefc3a7 100644 (file)
@@ -1260,8 +1260,8 @@ static struct crypto_alg qat_algs[] = { {
                        .setkey = qat_alg_ablkcipher_xts_setkey,
                        .decrypt = qat_alg_ablkcipher_decrypt,
                        .encrypt = qat_alg_ablkcipher_encrypt,
-                       .min_keysize = AES_MIN_KEY_SIZE,
-                       .max_keysize = AES_MAX_KEY_SIZE,
+                       .min_keysize = 2 * AES_MIN_KEY_SIZE,
+                       .max_keysize = 2 * AES_MAX_KEY_SIZE,
                        .ivsize = AES_BLOCK_SIZE,
                },
        },
index cfb25413917c380e997d497fe82343cab03a88fc..24353ec336c5bc815e499fb1a38b08d21ec12ebd 100644 (file)
@@ -129,8 +129,8 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
 
                blkcipher_walk_init(&walk, dst, src, nbytes);
 
-               iv = (u8 *)walk.iv;
                ret = blkcipher_walk_virt(desc, &walk);
+               iv = walk.iv;
                memset(tweak, 0, AES_BLOCK_SIZE);
                aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
 
index 803f3953b341a42aa47adcc4f8405f5dfa501a06..29f600f2c447159e84b17bc43bdcd66ca8bbcc7b 100644 (file)
@@ -459,7 +459,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev,
        }
 
        pgoff = linear_page_index(vma, pmd_addr);
-       phys = pgoff_to_phys(dax_dev, pgoff, PAGE_SIZE);
+       phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE);
        if (phys == -1) {
                dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
                                pgoff);
index e434ffe7bc5c57cfd22f7b9f163f0819a1294981..832cbd6471450a0f8f571145c603696f6e19687c 100644 (file)
@@ -2067,7 +2067,7 @@ err_dma_unregister:
 err_clk_disable:
        clk_disable_unprepare(atxdmac->clk);
 err_free_irq:
-       free_irq(atxdmac->irq, atxdmac->dma.dev);
+       free_irq(atxdmac->irq, atxdmac);
        return ret;
 }
 
@@ -2081,7 +2081,7 @@ static int at_xdmac_remove(struct platform_device *pdev)
        dma_async_device_unregister(&atxdmac->dma);
        clk_disable_unprepare(atxdmac->clk);
 
-       free_irq(atxdmac->irq, atxdmac->dma.dev);
+       free_irq(atxdmac->irq, atxdmac);
 
        for (i = 0; i < atxdmac->dma.chancnt; i++) {
                struct at_xdmac_chan *atchan = &atxdmac->chan[i];
index aad167eaaee80b29e9386574c183533d0e8f0d83..de2a2a2b1d7526d99f00766e307f1a0d222b11ec 100644 (file)
@@ -836,6 +836,7 @@ static int fsl_re_probe(struct platform_device *ofdev)
                rc = of_property_read_u32(np, "reg", &off);
                if (rc) {
                        dev_err(dev, "Reg property not found in JQ node\n");
+                       of_node_put(np);
                        return -ENODEV;
                }
                /* Find out the Job Rings present under each JQ */
index a4c53be482cf3ce6ba639c510911e2060f07c616..624f1e1e9c55b0eb872c2b39f8debb8e381adb9f 100644 (file)
@@ -861,7 +861,6 @@ static int mdc_dma_probe(struct platform_device *pdev)
 {
        struct mdc_dma *mdma;
        struct resource *res;
-       const struct of_device_id *match;
        unsigned int i;
        u32 val;
        int ret;
@@ -871,8 +870,7 @@ static int mdc_dma_probe(struct platform_device *pdev)
                return -ENOMEM;
        platform_set_drvdata(pdev, mdma);
 
-       match = of_match_device(mdc_dma_of_match, &pdev->dev);
-       mdma->soc = match->data;
+       mdma->soc = of_device_get_match_data(&pdev->dev);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        mdma->regs = devm_ioremap_resource(&pdev->dev, res);
index dc7850a422b8d4212495d067c4ea31c26e0af8a2..3f56f9ca44824d90d67c47ce78841d223a963ba3 100644 (file)
@@ -638,7 +638,7 @@ static bool pxad_try_hotchain(struct virt_dma_chan *vc,
                vd_last_issued = list_entry(vc->desc_issued.prev,
                                            struct virt_dma_desc, node);
                pxad_desc_chain(vd_last_issued, vd);
-               if (is_chan_running(chan) || is_desc_completed(vd_last_issued))
+               if (is_chan_running(chan) || is_desc_completed(vd))
                        return true;
        }
 
@@ -671,6 +671,7 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
        struct virt_dma_desc *vd, *tmp;
        unsigned int dcsr;
        unsigned long flags;
+       bool vd_completed;
        dma_cookie_t last_started = 0;
 
        BUG_ON(!chan);
@@ -681,15 +682,17 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
 
        spin_lock_irqsave(&chan->vc.lock, flags);
        list_for_each_entry_safe(vd, tmp, &chan->vc.desc_issued, node) {
+               vd_completed = is_desc_completed(vd);
                dev_dbg(&chan->vc.chan.dev->device,
-                       "%s(): checking txd %p[%x]: completed=%d\n",
-                       __func__, vd, vd->tx.cookie, is_desc_completed(vd));
+                       "%s(): checking txd %p[%x]: completed=%d dcsr=0x%x\n",
+                       __func__, vd, vd->tx.cookie, vd_completed,
+                       dcsr);
                last_started = vd->tx.cookie;
                if (to_pxad_sw_desc(vd)->cyclic) {
                        vchan_cyclic_callback(vd);
                        break;
                }
-               if (is_desc_completed(vd)) {
+               if (vd_completed) {
                        list_del(&vd->node);
                        vchan_cookie_complete(vd);
                } else {
index 749f1bd5d65d77c2c9f200b437ecd6d98e445361..06ecdc38cee0a32dfa312a6ada4d807a560aa47d 100644 (file)
@@ -600,27 +600,30 @@ static irqreturn_t usb_dmac_isr_channel(int irq, void *dev)
 {
        struct usb_dmac_chan *chan = dev;
        irqreturn_t ret = IRQ_NONE;
-       u32 mask = USB_DMACHCR_TE;
-       u32 check_bits = USB_DMACHCR_TE | USB_DMACHCR_SP;
+       u32 mask = 0;
        u32 chcr;
+       bool xfer_end = false;
 
        spin_lock(&chan->vc.lock);
 
        chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
-       if (chcr & check_bits)
-               mask |= USB_DMACHCR_DE | check_bits;
+       if (chcr & (USB_DMACHCR_TE | USB_DMACHCR_SP)) {
+               mask |= USB_DMACHCR_DE | USB_DMACHCR_TE | USB_DMACHCR_SP;
+               if (chcr & USB_DMACHCR_DE)
+                       xfer_end = true;
+               ret |= IRQ_HANDLED;
+       }
        if (chcr & USB_DMACHCR_NULL) {
                /* An interruption of TE will happen after we set FTE */
                mask |= USB_DMACHCR_NULL;
                chcr |= USB_DMACHCR_FTE;
                ret |= IRQ_HANDLED;
        }
-       usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask);
+       if (mask)
+               usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask);
 
-       if (chcr & check_bits) {
+       if (xfer_end)
                usb_dmac_isr_transfer_end(chan);
-               ret |= IRQ_HANDLED;
-       }
 
        spin_unlock(&chan->vc.lock);
 
index 438893762076ce9f025243f810de8a11e329adfd..ce2bc2a38101afdaed96bebedd186a528508f8cd 100644 (file)
@@ -709,9 +709,10 @@ static int scpi_probe(struct platform_device *pdev)
                struct mbox_client *cl = &pchan->cl;
                struct device_node *shmem = of_parse_phandle(np, "shmem", idx);
 
-               if (of_address_to_resource(shmem, 0, &res)) {
+               ret = of_address_to_resource(shmem, 0, &res);
+               of_node_put(shmem);
+               if (ret) {
                        dev_err(dev, "failed to get SCPI payload mem resource\n");
-                       ret = -EINVAL;
                        goto err;
                }
 
index 94a58a082b9930a2acc05192273c6941ef003ef6..44c01390d0353fd3170fc797eb4ce6393229bd14 100644 (file)
@@ -229,14 +229,14 @@ static int __init dmi_id_init(void)
 
        ret = device_register(dmi_dev);
        if (ret)
-               goto fail_free_dmi_dev;
+               goto fail_put_dmi_dev;
 
        return 0;
 
-fail_free_dmi_dev:
-       kfree(dmi_dev);
-fail_class_unregister:
+fail_put_dmi_dev:
+       put_device(dmi_dev);
 
+fail_class_unregister:
        class_unregister(&dmi_class);
 
        return ret;
index 5a2631af7410782dc8f7ba993ab0b1139bf71abb..7dd2e2d372317f8a4ddb1fe464b29f662a001a28 100644 (file)
@@ -657,9 +657,12 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
                }
 
                if (subnode) {
-                       node = of_get_flat_dt_subnode_by_name(node, subnode);
-                       if (node < 0)
+                       int err = of_get_flat_dt_subnode_by_name(node, subnode);
+
+                       if (err < 0)
                                return 0;
+
+                       node = err;
                }
 
                return __find_uefi_params(node, info, dt_params[i].params);
index 3bd127f953151dff89d0563db13deb12046e4315..aded10662020493390e29cb74a1910a4c51e4172 100644 (file)
@@ -41,6 +41,8 @@ static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
 #define EFI_ALLOC_ALIGN                EFI_PAGE_SIZE
 #endif
 
+#define EFI_MMAP_NR_SLACK_SLOTS        8
+
 struct file_info {
        efi_file_handle_t *handle;
        u64 size;
@@ -63,49 +65,62 @@ void efi_printk(efi_system_table_t *sys_table_arg, char *str)
        }
 }
 
+static inline bool mmap_has_headroom(unsigned long buff_size,
+                                    unsigned long map_size,
+                                    unsigned long desc_size)
+{
+       unsigned long slack = buff_size - map_size;
+
+       return slack / desc_size >= EFI_MMAP_NR_SLACK_SLOTS;
+}
+
 efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
-                               efi_memory_desc_t **map,
-                               unsigned long *map_size,
-                               unsigned long *desc_size,
-                               u32 *desc_ver,
-                               unsigned long *key_ptr)
+                               struct efi_boot_memmap *map)
 {
        efi_memory_desc_t *m = NULL;
        efi_status_t status;
        unsigned long key;
        u32 desc_version;
 
-       *map_size = sizeof(*m) * 32;
+       *map->desc_size =       sizeof(*m);
+       *map->map_size =        *map->desc_size * 32;
+       *map->buff_size =       *map->map_size;
 again:
-       /*
-        * Add an additional efi_memory_desc_t because we're doing an
-        * allocation which may be in a new descriptor region.
-        */
-       *map_size += sizeof(*m);
        status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
-                               *map_size, (void **)&m);
+                               *map->map_size, (void **)&m);
        if (status != EFI_SUCCESS)
                goto fail;
 
-       *desc_size = 0;
+       *map->desc_size = 0;
        key = 0;
-       status = efi_call_early(get_memory_map, map_size, m,
-                               &key, desc_size, &desc_version);
-       if (status == EFI_BUFFER_TOO_SMALL) {
+       status = efi_call_early(get_memory_map, map->map_size, m,
+                               &key, map->desc_size, &desc_version);
+       if (status == EFI_BUFFER_TOO_SMALL ||
+           !mmap_has_headroom(*map->buff_size, *map->map_size,
+                              *map->desc_size)) {
                efi_call_early(free_pool, m);
+               /*
+                * Make sure there is some entries of headroom so that the
+                * buffer can be reused for a new map after allocations are
+                * no longer permitted.  Its unlikely that the map will grow to
+                * exceed this headroom once we are ready to trigger
+                * ExitBootServices()
+                */
+               *map->map_size += *map->desc_size * EFI_MMAP_NR_SLACK_SLOTS;
+               *map->buff_size = *map->map_size;
                goto again;
        }
 
        if (status != EFI_SUCCESS)
                efi_call_early(free_pool, m);
 
-       if (key_ptr && status == EFI_SUCCESS)
-               *key_ptr = key;
-       if (desc_ver && status == EFI_SUCCESS)
-               *desc_ver = desc_version;
+       if (map->key_ptr && status == EFI_SUCCESS)
+               *map->key_ptr = key;
+       if (map->desc_ver && status == EFI_SUCCESS)
+               *map->desc_ver = desc_version;
 
 fail:
-       *map = m;
+       *map->map = m;
        return status;
 }
 
@@ -113,13 +128,20 @@ fail:
 unsigned long get_dram_base(efi_system_table_t *sys_table_arg)
 {
        efi_status_t status;
-       unsigned long map_size;
+       unsigned long map_size, buff_size;
        unsigned long membase  = EFI_ERROR;
        struct efi_memory_map map;
        efi_memory_desc_t *md;
+       struct efi_boot_memmap boot_map;
 
-       status = efi_get_memory_map(sys_table_arg, (efi_memory_desc_t **)&map.map,
-                                   &map_size, &map.desc_size, NULL, NULL);
+       boot_map.map =          (efi_memory_desc_t **)&map.map;
+       boot_map.map_size =     &map_size;
+       boot_map.desc_size =    &map.desc_size;
+       boot_map.desc_ver =     NULL;
+       boot_map.key_ptr =      NULL;
+       boot_map.buff_size =    &buff_size;
+
+       status = efi_get_memory_map(sys_table_arg, &boot_map);
        if (status != EFI_SUCCESS)
                return membase;
 
@@ -144,15 +166,22 @@ efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
                            unsigned long size, unsigned long align,
                            unsigned long *addr, unsigned long max)
 {
-       unsigned long map_size, desc_size;
+       unsigned long map_size, desc_size, buff_size;
        efi_memory_desc_t *map;
        efi_status_t status;
        unsigned long nr_pages;
        u64 max_addr = 0;
        int i;
+       struct efi_boot_memmap boot_map;
+
+       boot_map.map =          &map;
+       boot_map.map_size =     &map_size;
+       boot_map.desc_size =    &desc_size;
+       boot_map.desc_ver =     NULL;
+       boot_map.key_ptr =      NULL;
+       boot_map.buff_size =    &buff_size;
 
-       status = efi_get_memory_map(sys_table_arg, &map, &map_size, &desc_size,
-                                   NULL, NULL);
+       status = efi_get_memory_map(sys_table_arg, &boot_map);
        if (status != EFI_SUCCESS)
                goto fail;
 
@@ -230,14 +259,21 @@ efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
                           unsigned long size, unsigned long align,
                           unsigned long *addr)
 {
-       unsigned long map_size, desc_size;
+       unsigned long map_size, desc_size, buff_size;
        efi_memory_desc_t *map;
        efi_status_t status;
        unsigned long nr_pages;
        int i;
+       struct efi_boot_memmap boot_map;
 
-       status = efi_get_memory_map(sys_table_arg, &map, &map_size, &desc_size,
-                                   NULL, NULL);
+       boot_map.map =          &map;
+       boot_map.map_size =     &map_size;
+       boot_map.desc_size =    &desc_size;
+       boot_map.desc_ver =     NULL;
+       boot_map.key_ptr =      NULL;
+       boot_map.buff_size =    &buff_size;
+
+       status = efi_get_memory_map(sys_table_arg, &boot_map);
        if (status != EFI_SUCCESS)
                goto fail;
 
@@ -704,3 +740,76 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
        *cmd_line_len = options_bytes;
        return (char *)cmdline_addr;
 }
+
+/*
+ * Handle calling ExitBootServices according to the requirements set out by the
+ * spec.  Obtains the current memory map, and returns that info after calling
+ * ExitBootServices.  The client must specify a function to perform any
+ * processing of the memory map data prior to ExitBootServices.  A client
+ * specific structure may be passed to the function via priv.  The client
+ * function may be called multiple times.
+ */
+efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg,
+                                   void *handle,
+                                   struct efi_boot_memmap *map,
+                                   void *priv,
+                                   efi_exit_boot_map_processing priv_func)
+{
+       efi_status_t status;
+
+       status = efi_get_memory_map(sys_table_arg, map);
+
+       if (status != EFI_SUCCESS)
+               goto fail;
+
+       status = priv_func(sys_table_arg, map, priv);
+       if (status != EFI_SUCCESS)
+               goto free_map;
+
+       status = efi_call_early(exit_boot_services, handle, *map->key_ptr);
+
+       if (status == EFI_INVALID_PARAMETER) {
+               /*
+                * The memory map changed between efi_get_memory_map() and
+                * exit_boot_services().  Per the UEFI Spec v2.6, Section 6.4:
+                * EFI_BOOT_SERVICES.ExitBootServices we need to get the
+                * updated map, and try again.  The spec implies one retry
+                * should be sufficent, which is confirmed against the EDK2
+                * implementation.  Per the spec, we can only invoke
+                * get_memory_map() and exit_boot_services() - we cannot alloc
+                * so efi_get_memory_map() cannot be used, and we must reuse
+                * the buffer.  For all practical purposes, the headroom in the
+                * buffer should account for any changes in the map so the call
+                * to get_memory_map() is expected to succeed here.
+                */
+               *map->map_size = *map->buff_size;
+               status = efi_call_early(get_memory_map,
+                                       map->map_size,
+                                       *map->map,
+                                       map->key_ptr,
+                                       map->desc_size,
+                                       map->desc_ver);
+
+               /* exit_boot_services() was called, thus cannot free */
+               if (status != EFI_SUCCESS)
+                       goto fail;
+
+               status = priv_func(sys_table_arg, map, priv);
+               /* exit_boot_services() was called, thus cannot free */
+               if (status != EFI_SUCCESS)
+                       goto fail;
+
+               status = efi_call_early(exit_boot_services, handle, *map->key_ptr);
+       }
+
+       /* exit_boot_services() was called, thus cannot free */
+       if (status != EFI_SUCCESS)
+               goto fail;
+
+       return EFI_SUCCESS;
+
+free_map:
+       efi_call_early(free_pool, *map->map);
+fail:
+       return status;
+}
index e58abfa953cc5a30f49e4bdbd5f2857c6bcf876d..a6a93116a8f053f6c14911376ffa6da7f1dff44e 100644 (file)
@@ -152,6 +152,27 @@ fdt_set_fail:
 #define EFI_FDT_ALIGN EFI_PAGE_SIZE
 #endif
 
+struct exit_boot_struct {
+       efi_memory_desc_t *runtime_map;
+       int *runtime_entry_count;
+};
+
+static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
+                                  struct efi_boot_memmap *map,
+                                  void *priv)
+{
+       struct exit_boot_struct *p = priv;
+       /*
+        * Update the memory map with virtual addresses. The function will also
+        * populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME
+        * entries so that we can pass it straight to SetVirtualAddressMap()
+        */
+       efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
+                       p->runtime_map, p->runtime_entry_count);
+
+       return EFI_SUCCESS;
+}
+
 /*
  * Allocate memory for a new FDT, then add EFI, commandline, and
  * initrd related fields to the FDT.  This routine increases the
@@ -175,13 +196,22 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
                                            unsigned long fdt_addr,
                                            unsigned long fdt_size)
 {
-       unsigned long map_size, desc_size;
+       unsigned long map_size, desc_size, buff_size;
        u32 desc_ver;
        unsigned long mmap_key;
        efi_memory_desc_t *memory_map, *runtime_map;
        unsigned long new_fdt_size;
        efi_status_t status;
        int runtime_entry_count = 0;
+       struct efi_boot_memmap map;
+       struct exit_boot_struct priv;
+
+       map.map =       &runtime_map;
+       map.map_size =  &map_size;
+       map.desc_size = &desc_size;
+       map.desc_ver =  &desc_ver;
+       map.key_ptr =   &mmap_key;
+       map.buff_size = &buff_size;
 
        /*
         * Get a copy of the current memory map that we will use to prepare
@@ -189,8 +219,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
         * subsequent allocations adding entries, since they could not affect
         * the number of EFI_MEMORY_RUNTIME regions.
         */
-       status = efi_get_memory_map(sys_table, &runtime_map, &map_size,
-                                   &desc_size, &desc_ver, &mmap_key);
+       status = efi_get_memory_map(sys_table, &map);
        if (status != EFI_SUCCESS) {
                pr_efi_err(sys_table, "Unable to retrieve UEFI memory map.\n");
                return status;
@@ -199,6 +228,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
        pr_efi(sys_table,
               "Exiting boot services and installing virtual address map...\n");
 
+       map.map = &memory_map;
        /*
         * Estimate size of new FDT, and allocate memory for it. We
         * will allocate a bigger buffer if this ends up being too
@@ -218,8 +248,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
                 * we can get the memory map key  needed for
                 * exit_boot_services().
                 */
-               status = efi_get_memory_map(sys_table, &memory_map, &map_size,
-                                           &desc_size, &desc_ver, &mmap_key);
+               status = efi_get_memory_map(sys_table, &map);
                if (status != EFI_SUCCESS)
                        goto fail_free_new_fdt;
 
@@ -250,16 +279,11 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
                }
        }
 
-       /*
-        * Update the memory map with virtual addresses. The function will also
-        * populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME
-        * entries so that we can pass it straight into SetVirtualAddressMap()
-        */
-       efi_get_virtmap(memory_map, map_size, desc_size, runtime_map,
-                       &runtime_entry_count);
-
-       /* Now we are ready to exit_boot_services.*/
-       status = sys_table->boottime->exit_boot_services(handle, mmap_key);
+       sys_table->boottime->free_pool(memory_map);
+       priv.runtime_map = runtime_map;
+       priv.runtime_entry_count = &runtime_entry_count;
+       status = efi_exit_boot_services(sys_table, handle, &map, &priv,
+                                       exit_boot_func);
 
        if (status == EFI_SUCCESS) {
                efi_set_virtual_address_map_t *svam;
index 53f6d3fe6d8621519634d3aa2299aeaf5f2ca90d..0c9f58c5ba501157f0c8826ac0d80b485d86b716 100644 (file)
@@ -73,12 +73,20 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
                              unsigned long random_seed)
 {
        unsigned long map_size, desc_size, total_slots = 0, target_slot;
+       unsigned long buff_size;
        efi_status_t status;
        efi_memory_desc_t *memory_map;
        int map_offset;
+       struct efi_boot_memmap map;
 
-       status = efi_get_memory_map(sys_table_arg, &memory_map, &map_size,
-                                   &desc_size, NULL, NULL);
+       map.map =       &memory_map;
+       map.map_size =  &map_size;
+       map.desc_size = &desc_size;
+       map.desc_ver =  NULL;
+       map.key_ptr =   NULL;
+       map.buff_size = &buff_size;
+
+       status = efi_get_memory_map(sys_table_arg, &map);
        if (status != EFI_SUCCESS)
                return status;
 
index 66a94103798bbed169fd9632692fa074549c8006..24caedb00a7a34f141bced4062f07a8d72439a63 100644 (file)
@@ -1131,6 +1131,7 @@ menu "SPI or I2C GPIO expanders"
 
 config GPIO_MCP23S08
        tristate "Microchip MCP23xxx I/O expander"
+       depends on OF_GPIO
        select GPIOLIB_IRQCHIP
        help
          SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017
index ac22efc1840ee020f750359b13239ff59d1b20dc..99d37b56c258a24b67505022431ceb1e92403d68 100644 (file)
@@ -564,7 +564,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
        mcp->chip.direction_output = mcp23s08_direction_output;
        mcp->chip.set = mcp23s08_set;
        mcp->chip.dbg_show = mcp23s08_dbg_show;
-#ifdef CONFIG_OF
+#ifdef CONFIG_OF_GPIO
        mcp->chip.of_gpio_n_cells = 2;
        mcp->chip.of_node = dev->of_node;
 #endif
index 0c99e8fb9af3822350ffd660ab64b678a11bac65..8d8ee0ebf14c03dc96b95b8d98c2f2d2c73352c9 100644 (file)
@@ -155,7 +155,7 @@ static int sa1100_gpio_irqdomain_map(struct irq_domain *d,
 {
        irq_set_chip_and_handler(irq, &sa1100_gpio_irq_chip,
                                 handle_edge_irq);
-       irq_set_noprobe(irq);
+       irq_set_probe(irq);
 
        return 0;
 }
index 75e7b3919ea7756f0d417eee62855145ca678957..a28feb3edf33350f5e7de8986cdca6c3ea2ee764 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/errno.h>
 #include <linux/module.h>
 #include <linux/io.h>
-#include <linux/io-mapping.h>
 #include <linux/gpio/consumer.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
index d0203a115effdd913d5fad1ed25775bad958f878..4667012b46b7fb35119fab2eaeb17b65e8afa980 100644 (file)
@@ -2015,6 +2015,7 @@ static struct attribute *it87_attributes_in[] = {
        &sensor_dev_attr_in10_input.dev_attr.attr,      /* 41 */
        &sensor_dev_attr_in11_input.dev_attr.attr,      /* 41 */
        &sensor_dev_attr_in12_input.dev_attr.attr,      /* 41 */
+       NULL
 };
 
 static const struct attribute_group it87_group_in = {
index f98743277e3c7491ac7063fa9c57cddc5dc1d229..258cb9a40ab3491b7c872432e07a2a1a435da78b 100644 (file)
@@ -643,7 +643,7 @@ static int bcm_kona_i2c_xfer(struct i2c_adapter *adapter,
                        if (rc < 0) {
                                dev_err(dev->device,
                                        "restart cmd failed rc = %d\n", rc);
-                                       goto xfer_send_stop;
+                               goto xfer_send_stop;
                        }
                }
 
index 90bbd9f9dd8f778422d54d1cc988b67409588f08..3c16a2f7c673cb808b4113a65d41493bd167ca89 100644 (file)
@@ -767,7 +767,7 @@ static int cdns_i2c_setclk(unsigned long clk_in, struct cdns_i2c *id)
  * depending on the scaling direction.
  *
  * Return:     NOTIFY_STOP if the rate change should be aborted, NOTIFY_OK
- *             to acknowedge the change, NOTIFY_DONE if the notification is
+ *             to acknowledge the change, NOTIFY_DONE if the notification is
  *             considered irrelevant.
  */
 static int cdns_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long
index c6922b806fb72516726816214134a69120f386f2..fcd973d5131e8f767cbe2af45cfac7bad74aed24 100644 (file)
@@ -367,13 +367,17 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
        dev_dbg(dev->dev, "Fast-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
 
        /* Configure SDA Hold Time if required */
-       if (dev->sda_hold_time) {
-               reg = dw_readl(dev, DW_IC_COMP_VERSION);
-               if (reg >= DW_IC_SDA_HOLD_MIN_VERS)
+       reg = dw_readl(dev, DW_IC_COMP_VERSION);
+       if (reg >= DW_IC_SDA_HOLD_MIN_VERS) {
+               if (dev->sda_hold_time) {
                        dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD);
-               else
-                       dev_warn(dev->dev,
-                               "Hardware too old to adjust SDA hold time.");
+               } else {
+                       /* Keep previous hold time setting if no one set it */
+                       dev->sda_hold_time = dw_readl(dev, DW_IC_SDA_HOLD);
+               }
+       } else {
+               dev_warn(dev->dev,
+                       "Hardware too old to adjust SDA hold time.\n");
        }
 
        /* Configure Tx/Rx FIFO threshold levels */
index 52407f3c9e1cce3f9a4c0e9d010ad0dddb6eee7c..9bd849dacee8724f1cbc3607f3bca013ee91e94b 100644 (file)
@@ -378,7 +378,7 @@ static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
        }
 
        dma_addr = dma_map_single(chan->device->dev, buf, len, dir);
-       if (dma_mapping_error(dev, dma_addr)) {
+       if (dma_mapping_error(chan->device->dev, dma_addr)) {
                dev_dbg(dev, "dma map failed, using PIO\n");
                return;
        }
index 2bc8b01153d61985814591a962851b19bb0bb2b1..5c5b7cada8beef28d2d9e5c94bfa5d0c355d8af0 100644 (file)
@@ -918,7 +918,7 @@ static void rk3x_i2c_adapt_div(struct rk3x_i2c *i2c, unsigned long clk_rate)
  * Code adapted from i2c-cadence.c.
  *
  * Return:     NOTIFY_STOP if the rate change should be aborted, NOTIFY_OK
- *             to acknowedge the change, NOTIFY_DONE if the notification is
+ *             to acknowledge the change, NOTIFY_DONE if the notification is
  *             considered irrelevant.
  */
 static int rk3x_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long
@@ -1111,6 +1111,15 @@ static int rk3x_i2c_xfer(struct i2c_adapter *adap,
        return ret < 0 ? ret : num;
 }
 
+static __maybe_unused int rk3x_i2c_resume(struct device *dev)
+{
+       struct rk3x_i2c *i2c = dev_get_drvdata(dev);
+
+       rk3x_i2c_adapt_div(i2c, clk_get_rate(i2c->clk));
+
+       return 0;
+}
+
 static u32 rk3x_i2c_func(struct i2c_adapter *adap)
 {
        return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
@@ -1334,12 +1343,15 @@ static int rk3x_i2c_remove(struct platform_device *pdev)
        return 0;
 }
 
+static SIMPLE_DEV_PM_OPS(rk3x_i2c_pm_ops, NULL, rk3x_i2c_resume);
+
 static struct platform_driver rk3x_i2c_driver = {
        .probe   = rk3x_i2c_probe,
        .remove  = rk3x_i2c_remove,
        .driver  = {
                .name  = "rk3x-i2c",
                .of_match_table = rk3x_i2c_match,
+               .pm = &rk3x_i2c_pm_ops,
        },
 };
 
index 6fb3e2645992297937635c0ffd0f791ed5c92b1e..05b1eeab9cf5f6a58b8af543a2c515cf7ee07498 100644 (file)
@@ -610,7 +610,7 @@ static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd)
                return;
 
        dma_addr = dma_map_single(chan->device->dev, pd->msg->buf, pd->msg->len, dir);
-       if (dma_mapping_error(pd->dev, dma_addr)) {
+       if (dma_mapping_error(chan->device->dev, dma_addr)) {
                dev_dbg(pd->dev, "dma map failed, using PIO\n");
                return;
        }
index 215ac87f606d2d0f59bc17720ca2ec8fd60fae09..b3893f6282ba5b38920388657d5d1a70129a0148 100644 (file)
@@ -37,8 +37,6 @@ struct i2c_demux_pinctrl_priv {
        struct i2c_demux_pinctrl_chan chan[];
 };
 
-static struct property status_okay = { .name = "status", .length = 3, .value = "ok" };
-
 static int i2c_demux_master_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
 {
        struct i2c_demux_pinctrl_priv *priv = adap->algo_data;
@@ -107,6 +105,7 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
        of_changeset_revert(&priv->chan[new_chan].chgset);
  err:
        dev_err(priv->dev, "failed to setup demux-adapter %d (%d)\n", new_chan, ret);
+       priv->cur_chan = -EINVAL;
        return ret;
 }
 
@@ -192,6 +191,7 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
        struct i2c_demux_pinctrl_priv *priv;
+       struct property *props;
        int num_chan, i, j, err;
 
        num_chan = of_count_phandle_with_args(np, "i2c-parent", NULL);
@@ -202,7 +202,10 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
 
        priv = devm_kzalloc(&pdev->dev, sizeof(*priv)
                           + num_chan * sizeof(struct i2c_demux_pinctrl_chan), GFP_KERNEL);
-       if (!priv)
+
+       props = devm_kcalloc(&pdev->dev, num_chan, sizeof(*props), GFP_KERNEL);
+
+       if (!priv || !props)
                return -ENOMEM;
 
        err = of_property_read_string(np, "i2c-bus-name", &priv->bus_name);
@@ -220,8 +223,12 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
                }
                priv->chan[i].parent_np = adap_np;
 
+               props[i].name = devm_kstrdup(&pdev->dev, "status", GFP_KERNEL);
+               props[i].value = devm_kstrdup(&pdev->dev, "ok", GFP_KERNEL);
+               props[i].length = 3;
+
                of_changeset_init(&priv->chan[i].chgset);
-               of_changeset_update_property(&priv->chan[i].chgset, adap_np, &status_okay);
+               of_changeset_update_property(&priv->chan[i].chgset, adap_np, &props[i]);
        }
 
        priv->num_chan = num_chan;
index 89d78208de3f30d6396162a208eb0f29460087b7..78f148ea9d9f92e42164c26fe980e53871ed1815 100644 (file)
@@ -20,6 +20,8 @@ config BMA180
 config BMA220
     tristate "Bosch BMA220 3-Axis Accelerometer Driver"
        depends on SPI
+       select IIO_BUFFER
+       select IIO_TRIGGERED_BUFFER
     help
       Say yes here to add support for the Bosch BMA220 triaxial
       acceleration sensor.
@@ -234,7 +236,8 @@ config STK8312
 config STK8BA50
        tristate "Sensortek STK8BA50 3-Axis Accelerometer Driver"
        depends on I2C
-       depends on IIO_TRIGGER
+       select IIO_BUFFER
+       select IIO_TRIGGERED_BUFFER
        help
          Say yes here to get support for the Sensortek STK8BA50 3-axis
          accelerometer.
index 1098d10df8e8e2bb6e6b36530a95655ac6d40b8a..5099f295dd378db1236ae5a07c288c2932cbb309 100644 (file)
@@ -253,7 +253,7 @@ static int bma220_probe(struct spi_device *spi)
        if (ret < 0)
                return ret;
 
-       ret = iio_triggered_buffer_setup(indio_dev, NULL,
+       ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time,
                                         bma220_trigger_handler, NULL);
        if (ret < 0) {
                dev_err(&spi->dev, "iio triggered buffer setup failed\n");
index bf17aae6614535b0ab3d68d3bfd84aec653a2f45..59b380dbf27f918297d83bd00ae585149baba5a2 100644 (file)
@@ -67,6 +67,9 @@
 #define BMC150_ACCEL_REG_PMU_BW                0x10
 #define BMC150_ACCEL_DEF_BW                    125
 
+#define BMC150_ACCEL_REG_RESET                 0x14
+#define BMC150_ACCEL_RESET_VAL                 0xB6
+
 #define BMC150_ACCEL_REG_INT_MAP_0             0x19
 #define BMC150_ACCEL_INT_MAP_0_BIT_SLOPE       BIT(2)
 
@@ -1497,6 +1500,14 @@ static int bmc150_accel_chip_init(struct bmc150_accel_data *data)
        int ret, i;
        unsigned int val;
 
+       /*
+        * Reset chip to get it in a known good state. A delay of 1.8ms after
+        * reset is required according to the data sheets of supported chips.
+        */
+       regmap_write(data->regmap, BMC150_ACCEL_REG_RESET,
+                    BMC150_ACCEL_RESET_VAL);
+       usleep_range(1800, 2500);
+
        ret = regmap_read(data->regmap, BMC150_ACCEL_REG_CHIP_ID, &val);
        if (ret < 0) {
                dev_err(dev, "Error: Reading chip id\n");
index 3a9f106787d28b2f85248402b68628fd02604460..9d72d4bcf5e9b316034219bb1b880fc55a72c076 100644 (file)
@@ -160,11 +160,13 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev,
                if (ret < 0)
                        goto error_ret;
                *val = ret;
+               ret = IIO_VAL_INT;
                break;
        case IIO_CHAN_INFO_SCALE:
                ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
                if (ret < 0)
                        goto error_ret;
+               *val = 0;
                *val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK];
                ret = IIO_VAL_INT_PLUS_MICRO;
                break;
index 1de31bdd4ce4e2f12cda1c3e9c597df5c323038e..767577298ee3bd9f2cc06814f91a889d503aa589 100644 (file)
@@ -389,6 +389,7 @@ config QCOM_SPMI_VADC
 config ROCKCHIP_SARADC
        tristate "Rockchip SARADC driver"
        depends on ARCH_ROCKCHIP || (ARM && COMPILE_TEST)
+       depends on RESET_CONTROLLER
        help
          Say yes here to build support for the SARADC found in SoCs from
          Rockchip.
index b6163764489c677a2ec45dd5536464734ac641c6..9704090b79084f3206703c248aed3d774831744f 100644 (file)
@@ -527,6 +527,7 @@ static struct attribute_group ad799x_event_attrs_group = {
 static const struct iio_info ad7991_info = {
        .read_raw = &ad799x_read_raw,
        .driver_module = THIS_MODULE,
+       .update_scan_mode = ad799x_update_scan_mode,
 };
 
 static const struct iio_info ad7993_4_7_8_noirq_info = {
index 52430ba171f3893729701bbd0dd0c6cc2fb73dab..0438c68015e8fc77404633d1e6a16fa6982487e4 100644 (file)
@@ -381,8 +381,8 @@ static irqreturn_t at91_adc_rl_interrupt(int irq, void *private)
                st->ts_bufferedmeasure = false;
                input_report_key(st->ts_input, BTN_TOUCH, 0);
                input_sync(st->ts_input);
-       } else if (status & AT91_ADC_EOC(3)) {
-               /* Conversion finished */
+       } else if (status & AT91_ADC_EOC(3) && st->ts_input) {
+               /* Conversion finished and we've a touchscreen */
                if (st->ts_bufferedmeasure) {
                        /*
                         * Last measurement is always discarded, since it can
index f9ad6c2d68219234e250dcbdeb6d5909fb06bbe1..85d701291654074e1fa07978331eb01d89abd04a 100644 (file)
@@ -21,6 +21,8 @@
 #include <linux/of_device.h>
 #include <linux/clk.h>
 #include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/reset.h>
 #include <linux/regulator/consumer.h>
 #include <linux/iio/iio.h>
 
@@ -53,6 +55,7 @@ struct rockchip_saradc {
        struct clk              *clk;
        struct completion       completion;
        struct regulator        *vref;
+       struct reset_control    *reset;
        const struct rockchip_saradc_data *data;
        u16                     last_val;
 };
@@ -190,6 +193,16 @@ static const struct of_device_id rockchip_saradc_match[] = {
 };
 MODULE_DEVICE_TABLE(of, rockchip_saradc_match);
 
+/**
+ * Reset SARADC Controller.
+ */
+static void rockchip_saradc_reset_controller(struct reset_control *reset)
+{
+       reset_control_assert(reset);
+       usleep_range(10, 20);
+       reset_control_deassert(reset);
+}
+
 static int rockchip_saradc_probe(struct platform_device *pdev)
 {
        struct rockchip_saradc *info = NULL;
@@ -218,6 +231,20 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
        if (IS_ERR(info->regs))
                return PTR_ERR(info->regs);
 
+       /*
+        * The reset should be an optional property, as it should work
+        * with old devicetrees as well
+        */
+       info->reset = devm_reset_control_get(&pdev->dev, "saradc-apb");
+       if (IS_ERR(info->reset)) {
+               ret = PTR_ERR(info->reset);
+               if (ret != -ENOENT)
+                       return ret;
+
+               dev_dbg(&pdev->dev, "no reset control found\n");
+               info->reset = NULL;
+       }
+
        init_completion(&info->completion);
 
        irq = platform_get_irq(pdev, 0);
@@ -252,6 +279,9 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
                return PTR_ERR(info->vref);
        }
 
+       if (info->reset)
+               rockchip_saradc_reset_controller(info->reset);
+
        /*
         * Use a default value for the converter clock.
         * This may become user-configurable in the future.
index 1ef398770a1f2c2dcbc760f911b5064ea4950954..066abaf8020157f2f247dc27d4ce55aec84a362d 100644 (file)
@@ -489,7 +489,8 @@ static struct iio_info ads1115_info = {
 #ifdef CONFIG_OF
 static int ads1015_get_channels_config_of(struct i2c_client *client)
 {
-       struct ads1015_data *data = i2c_get_clientdata(client);
+       struct iio_dev *indio_dev = i2c_get_clientdata(client);
+       struct ads1015_data *data = iio_priv(indio_dev);
        struct device_node *node;
 
        if (!client->dev.of_node ||
index 8a368756881b82431404f86766d70b4b8acdd236..c3cfacca25410c5660aaac5a13947f770fda05c0 100644 (file)
@@ -32,6 +32,7 @@
 
 struct tiadc_device {
        struct ti_tscadc_dev *mfd_tscadc;
+       struct mutex fifo1_lock; /* to protect fifo access */
        int channels;
        u8 channel_line[8];
        u8 channel_step[8];
@@ -359,6 +360,7 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
                int *val, int *val2, long mask)
 {
        struct tiadc_device *adc_dev = iio_priv(indio_dev);
+       int ret = IIO_VAL_INT;
        int i, map_val;
        unsigned int fifo1count, read, stepid;
        bool found = false;
@@ -372,13 +374,14 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
        if (!step_en)
                return -EINVAL;
 
+       mutex_lock(&adc_dev->fifo1_lock);
        fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
        while (fifo1count--)
                tiadc_readl(adc_dev, REG_FIFO1);
 
        am335x_tsc_se_set_once(adc_dev->mfd_tscadc, step_en);
 
-       timeout = jiffies + usecs_to_jiffies
+       timeout = jiffies + msecs_to_jiffies
                                (IDLE_TIMEOUT * adc_dev->channels);
        /* Wait for Fifo threshold interrupt */
        while (1) {
@@ -388,7 +391,8 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
 
                if (time_after(jiffies, timeout)) {
                        am335x_tsc_se_adc_done(adc_dev->mfd_tscadc);
-                       return -EAGAIN;
+                       ret = -EAGAIN;
+                       goto err_unlock;
                }
        }
        map_val = adc_dev->channel_step[chan->scan_index];
@@ -414,8 +418,11 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
        am335x_tsc_se_adc_done(adc_dev->mfd_tscadc);
 
        if (found == false)
-               return -EBUSY;
-       return IIO_VAL_INT;
+               ret =  -EBUSY;
+
+err_unlock:
+       mutex_unlock(&adc_dev->fifo1_lock);
+       return ret;
 }
 
 static const struct iio_info tiadc_info = {
@@ -483,6 +490,7 @@ static int tiadc_probe(struct platform_device *pdev)
 
        tiadc_step_config(indio_dev);
        tiadc_writel(adc_dev, REG_FIFO1THR, FIFO1_THRESHOLD);
+       mutex_init(&adc_dev->fifo1_lock);
 
        err = tiadc_channel_init(indio_dev, adc_dev->channels);
        if (err < 0)
index ae038a59d256c1c9f6ff89acb65819090e66a829..407f141a1eee9785435cddb1d27b7922434295f3 100644 (file)
@@ -434,7 +434,7 @@ static int atlas_read_raw(struct iio_dev *indio_dev,
                        break;
                case IIO_ELECTRICALCONDUCTIVITY:
                        *val = 1; /* 0.00001 */
-                       *val = 100000;
+                       *val2 = 100000;
                        break;
                case IIO_CONCENTRATION:
                        *val = 0; /* 0.000000001 */
index e81f434760f4c778c604c7d2e102bfa79adef9b2..dc33c1dd5191a57aaa8c3c66cdaa75a31866463c 100644 (file)
@@ -56,8 +56,8 @@ static struct {
        {HID_USAGE_SENSOR_ALS, 0, 1, 0},
        {HID_USAGE_SENSOR_ALS, HID_USAGE_SENSOR_UNITS_LUX, 1, 0},
 
-       {HID_USAGE_SENSOR_PRESSURE, 0, 100000, 0},
-       {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 1, 0},
+       {HID_USAGE_SENSOR_PRESSURE, 0, 100, 0},
+       {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 0, 1000},
 };
 
 static int pow_10(unsigned power)
index 792a97164cb28e04dd25d44cdffd8541b05f19c4..bebbd00304ce8486f0e3e5d6e03340806861925f 100644 (file)
@@ -65,6 +65,16 @@ struct stx104_gpio {
        unsigned int out_state;
 };
 
+/**
+ * struct stx104_dev - STX104 device private data structure
+ * @indio_dev: IIO device
+ * @chip:      instance of the gpio_chip
+ */
+struct stx104_dev {
+       struct iio_dev *indio_dev;
+       struct gpio_chip *chip;
+};
+
 static int stx104_read_raw(struct iio_dev *indio_dev,
        struct iio_chan_spec const *chan, int *val, int *val2, long mask)
 {
@@ -107,6 +117,7 @@ static const struct iio_chan_spec stx104_channels[STX104_NUM_CHAN] = {
 static int stx104_gpio_get_direction(struct gpio_chip *chip,
        unsigned int offset)
 {
+       /* GPIO 0-3 are input only, while the rest are output only */
        if (offset < 4)
                return 1;
 
@@ -169,6 +180,7 @@ static int stx104_probe(struct device *dev, unsigned int id)
        struct iio_dev *indio_dev;
        struct stx104_iio *priv;
        struct stx104_gpio *stx104gpio;
+       struct stx104_dev *stx104dev;
        int err;
 
        indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
@@ -179,6 +191,10 @@ static int stx104_probe(struct device *dev, unsigned int id)
        if (!stx104gpio)
                return -ENOMEM;
 
+       stx104dev = devm_kzalloc(dev, sizeof(*stx104dev), GFP_KERNEL);
+       if (!stx104dev)
+               return -ENOMEM;
+
        if (!devm_request_region(dev, base[id], STX104_EXTENT,
                dev_name(dev))) {
                dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
@@ -199,12 +215,6 @@ static int stx104_probe(struct device *dev, unsigned int id)
        outw(0, base[id] + 4);
        outw(0, base[id] + 6);
 
-       err = devm_iio_device_register(dev, indio_dev);
-       if (err) {
-               dev_err(dev, "IIO device registering failed (%d)\n", err);
-               return err;
-       }
-
        stx104gpio->chip.label = dev_name(dev);
        stx104gpio->chip.parent = dev;
        stx104gpio->chip.owner = THIS_MODULE;
@@ -220,7 +230,9 @@ static int stx104_probe(struct device *dev, unsigned int id)
 
        spin_lock_init(&stx104gpio->lock);
 
-       dev_set_drvdata(dev, stx104gpio);
+       stx104dev->indio_dev = indio_dev;
+       stx104dev->chip = &stx104gpio->chip;
+       dev_set_drvdata(dev, stx104dev);
 
        err = gpiochip_add_data(&stx104gpio->chip, stx104gpio);
        if (err) {
@@ -228,14 +240,22 @@ static int stx104_probe(struct device *dev, unsigned int id)
                return err;
        }
 
+       err = iio_device_register(indio_dev);
+       if (err) {
+               dev_err(dev, "IIO device registering failed (%d)\n", err);
+               gpiochip_remove(&stx104gpio->chip);
+               return err;
+       }
+
        return 0;
 }
 
 static int stx104_remove(struct device *dev, unsigned int id)
 {
-       struct stx104_gpio *const stx104gpio = dev_get_drvdata(dev);
+       struct stx104_dev *const stx104dev = dev_get_drvdata(dev);
 
-       gpiochip_remove(&stx104gpio->chip);
+       iio_device_unregister(stx104dev->indio_dev);
+       gpiochip_remove(stx104dev->chip);
 
        return 0;
 }
index 738a86d9e4a9f8b17efc91a987a840169fe47f99..d04124345992c9bfcf857a17cdac19894df21a70 100644 (file)
@@ -6,6 +6,8 @@ menu "Humidity sensors"
 config AM2315
     tristate "Aosong AM2315 relative humidity and temperature sensor"
     depends on I2C
+    select IIO_BUFFER
+    select IIO_TRIGGERED_BUFFER
     help
       If you say yes here you get support for the Aosong AM2315
       relative humidity and ambient temperature sensor.
index 3e200f69e88657488a19de758c318f69ec314759..ff96b6d0fdae436f83e9620abcddcd60a958366e 100644 (file)
@@ -244,7 +244,7 @@ static int am2315_probe(struct i2c_client *client,
        indio_dev->channels = am2315_channels;
        indio_dev->num_channels = ARRAY_SIZE(am2315_channels);
 
-       ret = iio_triggered_buffer_setup(indio_dev, NULL,
+       ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time,
                                         am2315_trigger_handler, NULL);
        if (ret < 0) {
                dev_err(&client->dev, "iio triggered buffer setup failed\n");
index a03832a5fc95aafd81b075710d32d57acf9f2677..e0c9c70c2a4ae6dcc1723abc114ad036511a0c97 100644 (file)
@@ -142,7 +142,7 @@ static int hdc100x_get_measurement(struct hdc100x_data *data,
        struct i2c_client *client = data->client;
        int delay = data->adc_int_us[chan->address];
        int ret;
-       int val;
+       __be16 val;
 
        /* start measurement */
        ret = i2c_smbus_write_byte(client, chan->address);
@@ -154,26 +154,13 @@ static int hdc100x_get_measurement(struct hdc100x_data *data,
        /* wait for integration time to pass */
        usleep_range(delay, delay + 1000);
 
-       /*
-        * i2c_smbus_read_word_data cannot() be used here due to the command
-        * value not being understood and causes NAKs preventing any reading
-        * from being accessed.
-        */
-       ret = i2c_smbus_read_byte(client);
+       /* read measurement */
+       ret = i2c_master_recv(data->client, (char *)&val, sizeof(val));
        if (ret < 0) {
-               dev_err(&client->dev, "cannot read high byte measurement");
+               dev_err(&client->dev, "cannot read sensor data\n");
                return ret;
        }
-       val = ret << 8;
-
-       ret = i2c_smbus_read_byte(client);
-       if (ret < 0) {
-               dev_err(&client->dev, "cannot read low byte measurement");
-               return ret;
-       }
-       val |= ret;
-
-       return val;
+       return be16_to_cpu(val);
 }
 
 static int hdc100x_get_heater_status(struct hdc100x_data *data)
@@ -272,8 +259,8 @@ static int hdc100x_probe(struct i2c_client *client,
        struct iio_dev *indio_dev;
        struct hdc100x_data *data;
 
-       if (!i2c_check_functionality(client->adapter,
-                               I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BYTE))
+       if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA |
+                                    I2C_FUNC_SMBUS_BYTE | I2C_FUNC_I2C))
                return -EOPNOTSUPP;
 
        indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
index 90462fcf543698bab68378f5a0cb4b8d7262ef2c..158aaf44dd951047f7d6212b522dd8b921395b21 100644 (file)
@@ -107,9 +107,10 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
 {
        struct iio_dev *indio_dev = filp->private_data;
        struct iio_buffer *rb = indio_dev->buffer;
+       DEFINE_WAIT_FUNC(wait, woken_wake_function);
        size_t datum_size;
        size_t to_wait;
-       int ret;
+       int ret = 0;
 
        if (!indio_dev->info)
                return -ENODEV;
@@ -131,19 +132,29 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
        else
                to_wait = min_t(size_t, n / datum_size, rb->watermark);
 
+       add_wait_queue(&rb->pollq, &wait);
        do {
-               ret = wait_event_interruptible(rb->pollq,
-                     iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size));
-               if (ret)
-                       return ret;
+               if (!indio_dev->info) {
+                       ret = -ENODEV;
+                       break;
+               }
 
-               if (!indio_dev->info)
-                       return -ENODEV;
+               if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
+                       if (signal_pending(current)) {
+                               ret = -ERESTARTSYS;
+                               break;
+                       }
+
+                       wait_woken(&wait, TASK_INTERRUPTIBLE,
+                                  MAX_SCHEDULE_TIMEOUT);
+                       continue;
+               }
 
                ret = rb->access->read_first_n(rb, n, buf);
                if (ret == 0 && (filp->f_flags & O_NONBLOCK))
                        ret = -EAGAIN;
-        } while (ret == 0);
+       } while (ret == 0);
+       remove_wait_queue(&rb->pollq, &wait);
 
        return ret;
 }
index f914d5d140e4014f5a31011c7783959fe8022755..d2b889918c3e7ae99028cbb6d4e67961aa3a941a 100644 (file)
@@ -613,9 +613,8 @@ ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
                        return sprintf(buf, "%d.%09u\n", vals[0], vals[1]);
        case IIO_VAL_FRACTIONAL:
                tmp = div_s64((s64)vals[0] * 1000000000LL, vals[1]);
-               vals[1] = do_div(tmp, 1000000000LL);
-               vals[0] = tmp;
-               return sprintf(buf, "%d.%09u\n", vals[0], vals[1]);
+               vals[0] = (int)div_s64_rem(tmp, 1000000000, &vals[1]);
+               return sprintf(buf, "%d.%09u\n", vals[0], abs(vals[1]));
        case IIO_VAL_FRACTIONAL_LOG2:
                tmp = (s64)vals[0] * 1000000000LL >> vals[1];
                vals[1] = do_div(tmp, 1000000000LL);
index 7c566f516572957e7a6ab36764bc5ad79c16f058..3574945183fe8258fcb2bb98d6bc188b9aea5b9e 100644 (file)
@@ -76,7 +76,6 @@ config BH1750
 config BH1780
        tristate "ROHM BH1780 ambient light sensor"
        depends on I2C
-       depends on !SENSORS_BH1780
        help
         Say Y here to build support for the ROHM BH1780GLI ambient
         light sensor.
@@ -238,6 +237,8 @@ config MAX44000
        tristate "MAX44000 Ambient and Infrared Proximity Sensor"
        depends on I2C
        select REGMAP_I2C
+       select IIO_BUFFER
+       select IIO_TRIGGERED_BUFFER
        help
         Say Y here if you want to build support for Maxim Integrated's
         MAX44000 ambient and infrared proximity sensor device.
index 6943688e66dfbebc5edae2959f4f43e558786c0a..e5a533cbd53fa58bbee3de2a7c624e2b252c62cc 100644 (file)
@@ -970,7 +970,7 @@ int bmp280_common_probe(struct device *dev,
        data->vdda = devm_regulator_get(dev, "vdda");
        if (IS_ERR(data->vdda)) {
                dev_err(dev, "failed to get VDDA regulator\n");
-               ret = PTR_ERR(data->vddd);
+               ret = PTR_ERR(data->vdda);
                goto out_disable_vddd;
        }
        ret = regulator_enable(data->vdda);
@@ -1079,7 +1079,8 @@ EXPORT_SYMBOL(bmp280_common_remove);
 #ifdef CONFIG_PM
 static int bmp280_runtime_suspend(struct device *dev)
 {
-       struct bmp280_data *data = dev_get_drvdata(dev);
+       struct iio_dev *indio_dev = dev_get_drvdata(dev);
+       struct bmp280_data *data = iio_priv(indio_dev);
        int ret;
 
        ret = regulator_disable(data->vdda);
@@ -1090,7 +1091,8 @@ static int bmp280_runtime_suspend(struct device *dev)
 
 static int bmp280_runtime_resume(struct device *dev)
 {
-       struct bmp280_data *data = dev_get_drvdata(dev);
+       struct iio_dev *indio_dev = dev_get_drvdata(dev);
+       struct bmp280_data *data = iio_priv(indio_dev);
        int ret;
 
        ret = regulator_enable(data->vddd);
index 2e3a70e1b24541677d80661d9dedb7742d1f3341..5656deb17261c63963bffddecaf8c48a98cd16e5 100644 (file)
@@ -397,7 +397,7 @@ static int as3935_probe(struct spi_device *spi)
                return ret;
        }
 
-       ret = iio_triggered_buffer_setup(indio_dev, NULL,
+       ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time,
                &as3935_trigger_handler, NULL);
 
        if (ret) {
index 3a3c5d73bbfc833a89c6143f0e732441c8488ab8..51c79b2fb0b837f4fbfe5a9672e98dd1dd304f61 100644 (file)
@@ -106,7 +106,6 @@ struct mcast_group {
        atomic_t                refcount;
        enum mcast_group_state  state;
        struct ib_sa_query      *query;
-       int                     query_id;
        u16                     pkey_index;
        u8                      leave_state;
        int                     retries;
@@ -340,11 +339,7 @@ static int send_join(struct mcast_group *group, struct mcast_member *member)
                                       member->multicast.comp_mask,
                                       3000, GFP_KERNEL, join_handler, group,
                                       &group->query);
-       if (ret >= 0) {
-               group->query_id = ret;
-               ret = 0;
-       }
-       return ret;
+       return (ret > 0) ? 0 : ret;
 }
 
 static int send_leave(struct mcast_group *group, u8 leave_state)
@@ -364,11 +359,7 @@ static int send_leave(struct mcast_group *group, u8 leave_state)
                                       IB_SA_MCMEMBER_REC_JOIN_STATE,
                                       3000, GFP_KERNEL, leave_handler,
                                       group, &group->query);
-       if (ret >= 0) {
-               group->query_id = ret;
-               ret = 0;
-       }
-       return ret;
+       return (ret > 0) ? 0 : ret;
 }
 
 static void join_group(struct mcast_group *group, struct mcast_member *member,
index b6a953aed7e83d6058164ca66af2516ab0981e23..80f988984f4448ca42d53c1b9e2bba890948ec78 100644 (file)
@@ -333,6 +333,8 @@ static void remove_ep_tid(struct c4iw_ep *ep)
 
        spin_lock_irqsave(&ep->com.dev->lock, flags);
        _remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid, 0);
+       if (idr_is_empty(&ep->com.dev->hwtid_idr))
+               wake_up(&ep->com.dev->wait);
        spin_unlock_irqrestore(&ep->com.dev->lock, flags);
 }
 
@@ -2117,8 +2119,10 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
                }
                ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
                                        n, pdev, rt_tos2priority(tos));
-               if (!ep->l2t)
+               if (!ep->l2t) {
+                       dev_put(pdev);
                        goto out;
+               }
                ep->mtu = pdev->mtu;
                ep->tx_chan = cxgb4_port_chan(pdev);
                ep->smac_idx = cxgb4_tp_smt_idx(adapter_type,
index 071d7332ec061489080a9ef55c1a6cbd9622af4d..3c4b2126e0d12e319a7779505673a651af689211 100644 (file)
@@ -872,9 +872,13 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
 static void c4iw_dealloc(struct uld_ctx *ctx)
 {
        c4iw_rdev_close(&ctx->dev->rdev);
+       WARN_ON_ONCE(!idr_is_empty(&ctx->dev->cqidr));
        idr_destroy(&ctx->dev->cqidr);
+       WARN_ON_ONCE(!idr_is_empty(&ctx->dev->qpidr));
        idr_destroy(&ctx->dev->qpidr);
+       WARN_ON_ONCE(!idr_is_empty(&ctx->dev->mmidr));
        idr_destroy(&ctx->dev->mmidr);
+       wait_event(ctx->dev->wait, idr_is_empty(&ctx->dev->hwtid_idr));
        idr_destroy(&ctx->dev->hwtid_idr);
        idr_destroy(&ctx->dev->stid_idr);
        idr_destroy(&ctx->dev->atid_idr);
@@ -992,6 +996,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
        mutex_init(&devp->rdev.stats.lock);
        mutex_init(&devp->db_mutex);
        INIT_LIST_HEAD(&devp->db_fc_list);
+       init_waitqueue_head(&devp->wait);
        devp->avail_ird = devp->rdev.lldi.max_ird_adapter;
 
        if (c4iw_debugfs_root) {
index aa47e0ae80bc4c42d7a0009714a26be8e0ff7bbb..4b83b84f7ddf4fe56f6c9d573b0a7f00a629db74 100644 (file)
@@ -263,6 +263,7 @@ struct c4iw_dev {
        struct idr stid_idr;
        struct list_head db_fc_list;
        u32 avail_ird;
+       wait_queue_head_t wait;
 };
 
 static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
index edb1172b6f54c14b991c1c3f76b36b612c841ca2..690435229be7a9afc17db2eb8c9a220b59546456 100644 (file)
@@ -683,7 +683,7 @@ static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
        return 0;
 }
 
-void _free_qp(struct kref *kref)
+static void _free_qp(struct kref *kref)
 {
        struct c4iw_qp *qhp;
 
index b32638d58ae82c73f920afee4a7fd0a4386581a8..cc38004cea42d2b56ce187fc1ba132b4e24398ea 100644 (file)
@@ -9490,6 +9490,78 @@ static void init_lcb(struct hfi1_devdata *dd)
        write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
 }
 
+/*
+ * Perform a test read on the QSFP.  Return 0 on success, -ERRNO
+ * on error.
+ */
+static int test_qsfp_read(struct hfi1_pportdata *ppd)
+{
+       int ret;
+       u8 status;
+
+       /* report success if not a QSFP */
+       if (ppd->port_type != PORT_TYPE_QSFP)
+               return 0;
+
+       /* read byte 2, the status byte */
+       ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
+       if (ret < 0)
+               return ret;
+       if (ret != 1)
+               return -EIO;
+
+       return 0; /* success */
+}
+
+/*
+ * Values for QSFP retry.
+ *
+ * Give up after 10s (20 x 500ms).  The overall timeout was empirically
+ * arrived at from experience on a large cluster.
+ */
+#define MAX_QSFP_RETRIES 20
+#define QSFP_RETRY_WAIT 500 /* msec */
+
+/*
+ * Try a QSFP read.  If it fails, schedule a retry for later.
+ * Called on first link activation after driver load.
+ */
+static void try_start_link(struct hfi1_pportdata *ppd)
+{
+       if (test_qsfp_read(ppd)) {
+               /* read failed */
+               if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
+                       dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
+                       return;
+               }
+               dd_dev_info(ppd->dd,
+                           "QSFP not responding, waiting and retrying %d\n",
+                           (int)ppd->qsfp_retry_count);
+               ppd->qsfp_retry_count++;
+               queue_delayed_work(ppd->hfi1_wq, &ppd->start_link_work,
+                                  msecs_to_jiffies(QSFP_RETRY_WAIT));
+               return;
+       }
+       ppd->qsfp_retry_count = 0;
+
+       /*
+        * Tune the SerDes to a ballpark setting for optimal signal and bit
+        * error rate.  Needs to be done before starting the link.
+        */
+       tune_serdes(ppd);
+       start_link(ppd);
+}
+
+/*
+ * Workqueue function to start the link after a delay.
+ */
+void handle_start_link(struct work_struct *work)
+{
+       struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
+                                                 start_link_work.work);
+       try_start_link(ppd);
+}
+
 int bringup_serdes(struct hfi1_pportdata *ppd)
 {
        struct hfi1_devdata *dd = ppd->dd;
@@ -9525,14 +9597,8 @@ int bringup_serdes(struct hfi1_pportdata *ppd)
                set_qsfp_int_n(ppd, 1);
        }
 
-       /*
-        * Tune the SerDes to a ballpark setting for
-        * optimal signal and bit error rate
-        * Needs to be done before starting the link
-        */
-       tune_serdes(ppd);
-
-       return start_link(ppd);
+       try_start_link(ppd);
+       return 0;
 }
 
 void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
@@ -9549,6 +9615,10 @@ void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
        ppd->driver_link_ready = 0;
        ppd->link_enabled = 0;
 
+       ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
+       flush_delayed_work(&ppd->start_link_work);
+       cancel_delayed_work_sync(&ppd->start_link_work);
+
        ppd->offline_disabled_reason =
                        HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
        set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
@@ -12865,7 +12935,7 @@ fail:
  */
 static int set_up_context_variables(struct hfi1_devdata *dd)
 {
-       int num_kernel_contexts;
+       unsigned long num_kernel_contexts;
        int total_contexts;
        int ret;
        unsigned ngroups;
@@ -12894,9 +12964,9 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
         */
        if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
                dd_dev_err(dd,
-                          "Reducing # kernel rcv contexts to: %d, from %d\n",
+                          "Reducing # kernel rcv contexts to: %d, from %lu\n",
                           (int)(dd->chip_send_contexts - num_vls - 1),
-                          (int)num_kernel_contexts);
+                          num_kernel_contexts);
                num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
        }
        /*
index ed11107c50fe50614efc39b469a31cb7e240291c..e29573769efc04a6a059db8c06ae2b54a45d1d70 100644 (file)
@@ -706,6 +706,7 @@ void handle_link_up(struct work_struct *work);
 void handle_link_down(struct work_struct *work);
 void handle_link_downgrade(struct work_struct *work);
 void handle_link_bounce(struct work_struct *work);
+void handle_start_link(struct work_struct *work);
 void handle_sma_message(struct work_struct *work);
 void reset_qsfp(struct hfi1_pportdata *ppd);
 void qsfp_event(struct work_struct *work);
index a49cc88f08a2aa6b063903fa88291c070849143e..5e9be16f6cd3bc9fabd8f741972a24b1e00bb1e7 100644 (file)
 
 static struct dentry *hfi1_dbg_root;
 
+/* wrappers to enforce srcu in seq file */
+static ssize_t hfi1_seq_read(
+       struct file *file,
+       char __user *buf,
+       size_t size,
+       loff_t *ppos)
+{
+       struct dentry *d = file->f_path.dentry;
+       int srcu_idx;
+       ssize_t r;
+
+       r = debugfs_use_file_start(d, &srcu_idx);
+       if (likely(!r))
+               r = seq_read(file, buf, size, ppos);
+       debugfs_use_file_finish(srcu_idx);
+       return r;
+}
+
+static loff_t hfi1_seq_lseek(
+       struct file *file,
+       loff_t offset,
+       int whence)
+{
+       struct dentry *d = file->f_path.dentry;
+       int srcu_idx;
+       loff_t r;
+
+       r = debugfs_use_file_start(d, &srcu_idx);
+       if (likely(!r))
+               r = seq_lseek(file, offset, whence);
+       debugfs_use_file_finish(srcu_idx);
+       return r;
+}
+
 #define private2dd(file) (file_inode(file)->i_private)
 #define private2ppd(file) (file_inode(file)->i_private)
 
@@ -87,8 +121,8 @@ static int _##name##_open(struct inode *inode, struct file *s) \
 static const struct file_operations _##name##_file_ops = { \
        .owner   = THIS_MODULE, \
        .open    = _##name##_open, \
-       .read    = seq_read, \
-       .llseek  = seq_lseek, \
+       .read    = hfi1_seq_read, \
+       .llseek  = hfi1_seq_lseek, \
        .release = seq_release \
 }
 
@@ -105,11 +139,9 @@ do { \
        DEBUGFS_FILE_CREATE(#name, parent, data, &_##name##_file_ops, S_IRUGO)
 
 static void *_opcode_stats_seq_start(struct seq_file *s, loff_t *pos)
-__acquires(RCU)
 {
        struct hfi1_opcode_stats_perctx *opstats;
 
-       rcu_read_lock();
        if (*pos >= ARRAY_SIZE(opstats->stats))
                return NULL;
        return pos;
@@ -126,9 +158,7 @@ static void *_opcode_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
 }
 
 static void _opcode_stats_seq_stop(struct seq_file *s, void *v)
-__releases(RCU)
 {
-       rcu_read_unlock();
 }
 
 static int _opcode_stats_seq_show(struct seq_file *s, void *v)
@@ -285,12 +315,10 @@ DEBUGFS_SEQ_FILE_OPEN(qp_stats)
 DEBUGFS_FILE_OPS(qp_stats);
 
 static void *_sdes_seq_start(struct seq_file *s, loff_t *pos)
-__acquires(RCU)
 {
        struct hfi1_ibdev *ibd;
        struct hfi1_devdata *dd;
 
-       rcu_read_lock();
        ibd = (struct hfi1_ibdev *)s->private;
        dd = dd_from_dev(ibd);
        if (!dd->per_sdma || *pos >= dd->num_sdma)
@@ -310,9 +338,7 @@ static void *_sdes_seq_next(struct seq_file *s, void *v, loff_t *pos)
 }
 
 static void _sdes_seq_stop(struct seq_file *s, void *v)
-__releases(RCU)
 {
-       rcu_read_unlock();
 }
 
 static int _sdes_seq_show(struct seq_file *s, void *v)
@@ -339,11 +365,9 @@ static ssize_t dev_counters_read(struct file *file, char __user *buf,
        struct hfi1_devdata *dd;
        ssize_t rval;
 
-       rcu_read_lock();
        dd = private2dd(file);
        avail = hfi1_read_cntrs(dd, NULL, &counters);
        rval =  simple_read_from_buffer(buf, count, ppos, counters, avail);
-       rcu_read_unlock();
        return rval;
 }
 
@@ -356,11 +380,9 @@ static ssize_t dev_names_read(struct file *file, char __user *buf,
        struct hfi1_devdata *dd;
        ssize_t rval;
 
-       rcu_read_lock();
        dd = private2dd(file);
        avail = hfi1_read_cntrs(dd, &names, NULL);
        rval =  simple_read_from_buffer(buf, count, ppos, names, avail);
-       rcu_read_unlock();
        return rval;
 }
 
@@ -383,11 +405,9 @@ static ssize_t portnames_read(struct file *file, char __user *buf,
        struct hfi1_devdata *dd;
        ssize_t rval;
 
-       rcu_read_lock();
        dd = private2dd(file);
        avail = hfi1_read_portcntrs(dd->pport, &names, NULL);
        rval = simple_read_from_buffer(buf, count, ppos, names, avail);
-       rcu_read_unlock();
        return rval;
 }
 
@@ -400,11 +420,9 @@ static ssize_t portcntrs_debugfs_read(struct file *file, char __user *buf,
        struct hfi1_pportdata *ppd;
        ssize_t rval;
 
-       rcu_read_lock();
        ppd = private2ppd(file);
        avail = hfi1_read_portcntrs(ppd, NULL, &counters);
        rval = simple_read_from_buffer(buf, count, ppos, counters, avail);
-       rcu_read_unlock();
        return rval;
 }
 
@@ -434,16 +452,13 @@ static ssize_t asic_flags_read(struct file *file, char __user *buf,
        int used;
        int i;
 
-       rcu_read_lock();
        ppd = private2ppd(file);
        dd = ppd->dd;
        size = PAGE_SIZE;
        used = 0;
        tmp = kmalloc(size, GFP_KERNEL);
-       if (!tmp) {
-               rcu_read_unlock();
+       if (!tmp)
                return -ENOMEM;
-       }
 
        scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
        used += scnprintf(tmp + used, size - used,
@@ -470,7 +485,6 @@ static ssize_t asic_flags_read(struct file *file, char __user *buf,
        used += scnprintf(tmp + used, size - used, "Write bits to clear\n");
 
        ret = simple_read_from_buffer(buf, count, ppos, tmp, used);
-       rcu_read_unlock();
        kfree(tmp);
        return ret;
 }
@@ -486,15 +500,12 @@ static ssize_t asic_flags_write(struct file *file, const char __user *buf,
        u64 scratch0;
        u64 clear;
 
-       rcu_read_lock();
        ppd = private2ppd(file);
        dd = ppd->dd;
 
        buff = kmalloc(count + 1, GFP_KERNEL);
-       if (!buff) {
-               ret = -ENOMEM;
-               goto do_return;
-       }
+       if (!buff)
+               return -ENOMEM;
 
        ret = copy_from_user(buff, buf, count);
        if (ret > 0) {
@@ -527,8 +538,6 @@ static ssize_t asic_flags_write(struct file *file, const char __user *buf,
 
  do_free:
        kfree(buff);
- do_return:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -542,18 +551,14 @@ static ssize_t qsfp_debugfs_dump(struct file *file, char __user *buf,
        char *tmp;
        int ret;
 
-       rcu_read_lock();
        ppd = private2ppd(file);
        tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
-       if (!tmp) {
-               rcu_read_unlock();
+       if (!tmp)
                return -ENOMEM;
-       }
 
        ret = qsfp_dump(ppd, tmp, PAGE_SIZE);
        if (ret > 0)
                ret = simple_read_from_buffer(buf, count, ppos, tmp, ret);
-       rcu_read_unlock();
        kfree(tmp);
        return ret;
 }
@@ -569,7 +574,6 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf,
        int offset;
        int total_written;
 
-       rcu_read_lock();
        ppd = private2ppd(file);
 
        /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */
@@ -577,16 +581,12 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf,
        offset = *ppos & 0xffff;
 
        /* explicitly reject invalid address 0 to catch cp and cat */
-       if (i2c_addr == 0) {
-               ret = -EINVAL;
-               goto _return;
-       }
+       if (i2c_addr == 0)
+               return -EINVAL;
 
        buff = kmalloc(count, GFP_KERNEL);
-       if (!buff) {
-               ret = -ENOMEM;
-               goto _return;
-       }
+       if (!buff)
+               return -ENOMEM;
 
        ret = copy_from_user(buff, buf, count);
        if (ret > 0) {
@@ -606,8 +606,6 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf,
 
  _free:
        kfree(buff);
- _return:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -636,7 +634,6 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf,
        int offset;
        int total_read;
 
-       rcu_read_lock();
        ppd = private2ppd(file);
 
        /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */
@@ -644,16 +641,12 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf,
        offset = *ppos & 0xffff;
 
        /* explicitly reject invalid address 0 to catch cp and cat */
-       if (i2c_addr == 0) {
-               ret = -EINVAL;
-               goto _return;
-       }
+       if (i2c_addr == 0)
+               return -EINVAL;
 
        buff = kmalloc(count, GFP_KERNEL);
-       if (!buff) {
-               ret = -ENOMEM;
-               goto _return;
-       }
+       if (!buff)
+               return -ENOMEM;
 
        total_read = i2c_read(ppd, target, i2c_addr, offset, buff, count);
        if (total_read < 0) {
@@ -673,8 +666,6 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf,
 
  _free:
        kfree(buff);
- _return:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -701,26 +692,20 @@ static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf,
        int ret;
        int total_written;
 
-       rcu_read_lock();
-       if (*ppos + count > QSFP_PAGESIZE * 4) { /* base page + page00-page03 */
-               ret = -EINVAL;
-               goto _return;
-       }
+       if (*ppos + count > QSFP_PAGESIZE * 4) /* base page + page00-page03 */
+               return -EINVAL;
 
        ppd = private2ppd(file);
 
        buff = kmalloc(count, GFP_KERNEL);
-       if (!buff) {
-               ret = -ENOMEM;
-               goto _return;
-       }
+       if (!buff)
+               return -ENOMEM;
 
        ret = copy_from_user(buff, buf, count);
        if (ret > 0) {
                ret = -EFAULT;
                goto _free;
        }
-
        total_written = qsfp_write(ppd, target, *ppos, buff, count);
        if (total_written < 0) {
                ret = total_written;
@@ -733,8 +718,6 @@ static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf,
 
  _free:
        kfree(buff);
- _return:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -761,7 +744,6 @@ static ssize_t __qsfp_debugfs_read(struct file *file, char __user *buf,
        int ret;
        int total_read;
 
-       rcu_read_lock();
        if (*ppos + count > QSFP_PAGESIZE * 4) { /* base page + page00-page03 */
                ret = -EINVAL;
                goto _return;
@@ -794,7 +776,6 @@ static ssize_t __qsfp_debugfs_read(struct file *file, char __user *buf,
  _free:
        kfree(buff);
  _return:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -1010,7 +991,6 @@ void hfi1_dbg_ibdev_exit(struct hfi1_ibdev *ibd)
        debugfs_remove_recursive(ibd->hfi1_ibdev_dbg);
 out:
        ibd->hfi1_ibdev_dbg = NULL;
-       synchronize_rcu();
 }
 
 /*
@@ -1035,9 +1015,7 @@ static const char * const hfi1_statnames[] = {
 };
 
 static void *_driver_stats_names_seq_start(struct seq_file *s, loff_t *pos)
-__acquires(RCU)
 {
-       rcu_read_lock();
        if (*pos >= ARRAY_SIZE(hfi1_statnames))
                return NULL;
        return pos;
@@ -1055,9 +1033,7 @@ static void *_driver_stats_names_seq_next(
 }
 
 static void _driver_stats_names_seq_stop(struct seq_file *s, void *v)
-__releases(RCU)
 {
-       rcu_read_unlock();
 }
 
 static int _driver_stats_names_seq_show(struct seq_file *s, void *v)
@@ -1073,9 +1049,7 @@ DEBUGFS_SEQ_FILE_OPEN(driver_stats_names)
 DEBUGFS_FILE_OPS(driver_stats_names);
 
 static void *_driver_stats_seq_start(struct seq_file *s, loff_t *pos)
-__acquires(RCU)
 {
-       rcu_read_lock();
        if (*pos >= ARRAY_SIZE(hfi1_statnames))
                return NULL;
        return pos;
@@ -1090,9 +1064,7 @@ static void *_driver_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
 }
 
 static void _driver_stats_seq_stop(struct seq_file *s, void *v)
-__releases(RCU)
 {
-       rcu_read_unlock();
 }
 
 static u64 hfi1_sps_ints(void)
index a021e660d482dd5e4cbd87ae1c010d25f2e1975f..325ec211370fa233aed23ebc2480d4a091ad5214 100644 (file)
@@ -605,6 +605,7 @@ struct hfi1_pportdata {
        struct work_struct freeze_work;
        struct work_struct link_downgrade_work;
        struct work_struct link_bounce_work;
+       struct delayed_work start_link_work;
        /* host link state variables */
        struct mutex hls_lock;
        u32 host_link_state;
@@ -659,6 +660,7 @@ struct hfi1_pportdata {
        u8 linkinit_reason;
        u8 local_tx_rate;       /* rate given to 8051 firmware */
        u8 last_pstate;         /* info only */
+       u8 qsfp_retry_count;
 
        /* placeholders for IB MAD packet settings */
        u8 overrun_threshold;
@@ -1804,7 +1806,7 @@ extern unsigned int hfi1_max_mtu;
 extern unsigned int hfi1_cu;
 extern unsigned int user_credit_return_threshold;
 extern int num_user_contexts;
-extern unsigned n_krcvqs;
+extern unsigned long n_krcvqs;
 extern uint krcvqs[];
 extern int krcvqsset;
 extern uint kdeth_qp;
index b7935451093c6d833112e985d13421b408268b32..384b43d2fd49937f2390ca05da1782864396e306 100644 (file)
@@ -94,7 +94,7 @@ module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO);
 MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL");
 
 /* computed based on above array */
-unsigned n_krcvqs;
+unsigned long n_krcvqs;
 
 static unsigned hfi1_rcvarr_split = 25;
 module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO);
@@ -500,6 +500,7 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
        INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade);
        INIT_WORK(&ppd->sma_message_work, handle_sma_message);
        INIT_WORK(&ppd->link_bounce_work, handle_link_bounce);
+       INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link);
        INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work);
        INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
 
index 39e42c373a01c5e9afc276bb2073b214cde4515f..7ffc14f2152312f11d4a52e70b9bd9c084cdfd9a 100644 (file)
@@ -2604,7 +2604,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
        u8 lq, num_vls;
        u8 res_lli, res_ler;
        u64 port_mask;
-       unsigned long port_num;
+       u8 port_num;
        unsigned long vl;
        u32 vl_select_mask;
        int vfi;
@@ -2638,9 +2638,9 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
         */
        port_mask = be64_to_cpu(req->port_select_mask[3]);
        port_num = find_first_bit((unsigned long *)&port_mask,
-                                 sizeof(port_mask));
+                                 sizeof(port_mask) * 8);
 
-       if ((u8)port_num != port) {
+       if (port_num != port) {
                pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
                return reply((struct ib_mad_hdr *)pmp);
        }
@@ -2842,7 +2842,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
         */
        port_mask = be64_to_cpu(req->port_select_mask[3]);
        port_num = find_first_bit((unsigned long *)&port_mask,
-                                 sizeof(port_mask));
+                                 sizeof(port_mask) * 8);
 
        if (port_num != port) {
                pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
@@ -3015,7 +3015,7 @@ static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
         */
        port_mask = be64_to_cpu(req->port_select_mask[3]);
        port_num = find_first_bit((unsigned long *)&port_mask,
-                                 sizeof(port_mask));
+                                 sizeof(port_mask) * 8);
 
        if (port_num != port) {
                pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
@@ -3252,7 +3252,7 @@ static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp,
         */
        port_mask = be64_to_cpu(req->port_select_mask[3]);
        port_num = find_first_bit((unsigned long *)&port_mask,
-                                 sizeof(port_mask));
+                                 sizeof(port_mask) * 8);
 
        if (port_num != port) {
                pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
index 8c25e1b58849a17a22a96d6ca8c4f54de4992098..3a1ef3056282e176f20128b37c2790690b4f850f 100644 (file)
@@ -771,6 +771,9 @@ void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes)
                        read_extra_bytes(pbuf, from, to_fill);
                        from += to_fill;
                        nbytes -= to_fill;
+                       /* may not be enough valid bytes left to align */
+                       if (extra > nbytes)
+                               extra = nbytes;
 
                        /* ...now write carry */
                        dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
@@ -798,6 +801,15 @@ void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes)
                        read_low_bytes(pbuf, from, extra);
                        from += extra;
                        nbytes -= extra;
+                       /*
+                        * If no bytes are left, return early - we are done.
+                        * NOTE: This short-circuit is *required* because
+                        * "extra" may have been reduced in size and "from"
+                        * is not aligned, as required when leaving this
+                        * if block.
+                        */
+                       if (nbytes == 0)
+                               return;
                }
 
                /* at this point, from is QW aligned */
index 0ecf27903dc20f62ab2bd26c307d8be7afcfab5e..1694037d1eee287b34559cadf11ab2bf5019ea80 100644 (file)
@@ -114,6 +114,8 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12
 #define KDETH_HCRC_LOWER_SHIFT    24
 #define KDETH_HCRC_LOWER_MASK     0xff
 
+#define AHG_KDETH_INTR_SHIFT 12
+
 #define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4)
 #define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff)
 
@@ -1480,7 +1482,8 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
                /* Clear KDETH.SH on last packet */
                if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT)) {
                        val |= cpu_to_le16(KDETH_GET(hdr->kdeth.ver_tid_offset,
-                                                               INTR) >> 16);
+                                                    INTR) <<
+                                          AHG_KDETH_INTR_SHIFT);
                        val &= cpu_to_le16(~(1U << 13));
                        AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val);
                } else {
index 3ee0cad96bc688457475a1f727969e7f05336e62..0c92a40b3e8699f6d14c24bc51b0ac98e3e8ad45 100644 (file)
@@ -265,6 +265,7 @@ void i40iw_next_iw_state(struct i40iw_qp *iwqp,
                info.dont_send_fin = false;
        if (iwqp->sc_qp.term_flags && (state == I40IW_QP_STATE_ERROR))
                info.reset_tcp_conn = true;
+       iwqp->hw_iwarp_state = state;
        i40iw_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0);
 }
 
index 0cbbe40382982479e608aca0edbe5a9ab1c267d7..445e230d5ff88f6de4e1b0e5d0d81160028842cf 100644 (file)
@@ -100,7 +100,7 @@ static struct notifier_block i40iw_net_notifier = {
        .notifier_call = i40iw_net_event
 };
 
-static int i40iw_notifiers_registered;
+static atomic_t i40iw_notifiers_registered;
 
 /**
  * i40iw_find_i40e_handler - find a handler given a client info
@@ -1342,12 +1342,11 @@ exit:
  */
 static void i40iw_register_notifiers(void)
 {
-       if (!i40iw_notifiers_registered) {
+       if (atomic_inc_return(&i40iw_notifiers_registered) == 1) {
                register_inetaddr_notifier(&i40iw_inetaddr_notifier);
                register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
                register_netevent_notifier(&i40iw_net_notifier);
        }
-       i40iw_notifiers_registered++;
 }
 
 /**
@@ -1429,8 +1428,7 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del
                        i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
                /* fallthrough */
        case INET_NOTIFIER:
-               if (i40iw_notifiers_registered > 0) {
-                       i40iw_notifiers_registered--;
+               if (!atomic_dec_return(&i40iw_notifiers_registered)) {
                        unregister_netevent_notifier(&i40iw_net_notifier);
                        unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
                        unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
index 006db6436e3b22511bcbb4d990a55e56ac8adcce..5df63dacaaa32f2b0a4944559181e3f877bb94b5 100644 (file)
@@ -687,12 +687,6 @@ repoll:
        is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
                MLX4_CQE_OPCODE_ERROR;
 
-       if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
-                    is_send)) {
-               pr_warn("Completion for NOP opcode detected!\n");
-               return -EAGAIN;
-       }
-
        /* Resize CQ in progress */
        if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
                if (cq->resize_buf) {
@@ -718,12 +712,6 @@ repoll:
                 */
                mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
                                       be32_to_cpu(cqe->vlan_my_qpn));
-               if (unlikely(!mqp)) {
-                       pr_warn("CQ %06x with entry for unknown QPN %06x\n",
-                              cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK);
-                       return -EAGAIN;
-               }
-
                *cur_qp = to_mibqp(mqp);
        }
 
@@ -736,11 +724,6 @@ repoll:
                /* SRQ is also in the radix tree */
                msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev,
                                       srq_num);
-               if (unlikely(!msrq)) {
-                       pr_warn("CQ %06x with entry for unknown SRQN %06x\n",
-                               cq->mcq.cqn, srq_num);
-                       return -EAGAIN;
-               }
        }
 
        if (is_send) {
@@ -891,7 +874,6 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
        struct mlx4_ib_qp *cur_qp = NULL;
        unsigned long flags;
        int npolled;
-       int err = 0;
        struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device);
 
        spin_lock_irqsave(&cq->lock, flags);
@@ -901,8 +883,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
        }
 
        for (npolled = 0; npolled < num_entries; ++npolled) {
-               err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled);
-               if (err)
+               if (mlx4_ib_poll_one(cq, &cur_qp, wc + npolled))
                        break;
        }
 
@@ -911,10 +892,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
 out:
        spin_unlock_irqrestore(&cq->lock, flags);
 
-       if (err == 0 || err == -EAGAIN)
-               return npolled;
-       else
-               return err;
+       return npolled;
 }
 
 int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
index 9c2e53d28f985740c3d22f9c13be776e58010567..0f21c3a25552d47b7305c0df2980a75b36a55970 100644 (file)
@@ -1128,6 +1128,27 @@ void handle_port_mgmt_change_event(struct work_struct *work)
 
                /* Generate GUID changed event */
                if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) {
+                       if (mlx4_is_master(dev->dev)) {
+                               union ib_gid gid;
+                               int err = 0;
+
+                               if (!eqe->event.port_mgmt_change.params.port_info.gid_prefix)
+                                       err = __mlx4_ib_query_gid(&dev->ib_dev, port, 0, &gid, 1);
+                               else
+                                       gid.global.subnet_prefix =
+                                               eqe->event.port_mgmt_change.params.port_info.gid_prefix;
+                               if (err) {
+                                       pr_warn("Could not change QP1 subnet prefix for port %d: query_gid error (%d)\n",
+                                               port, err);
+                               } else {
+                                       pr_debug("Changing QP1 subnet prefix for port %d. old=0x%llx. new=0x%llx\n",
+                                                port,
+                                                (u64)atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix),
+                                                be64_to_cpu(gid.global.subnet_prefix));
+                                       atomic64_set(&dev->sriov.demux[port - 1].subnet_prefix,
+                                                    be64_to_cpu(gid.global.subnet_prefix));
+                               }
+                       }
                        mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
                        /*if master, notify all slaves*/
                        if (mlx4_is_master(dev->dev))
@@ -2202,6 +2223,8 @@ int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
                if (err)
                        goto demux_err;
                dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id;
+               atomic64_set(&dev->sriov.demux[i].subnet_prefix,
+                            be64_to_cpu(gid.global.subnet_prefix));
                err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
                                      &dev->sriov.sqps[i]);
                if (err)
index 2af44c2de2624a75d90a675727b32f914ec53e05..87ba9bca4181c2b5115b0c455a691c2e9f66d649 100644 (file)
@@ -2202,6 +2202,9 @@ static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
        bool per_port = !!(ibdev->dev->caps.flags2 &
                MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT);
 
+       if (mlx4_is_slave(ibdev->dev))
+               return 0;
+
        for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
                /* i == 1 means we are building port counters */
                if (i && !per_port)
index 8f7ad07915b07e27ed7f512eabc07757269332c8..097bfcc4ee997eaab178f8d8353efe79fa00d0a7 100644 (file)
@@ -489,7 +489,7 @@ static u8 get_leave_state(struct mcast_group *group)
                if (!group->members[i])
                        leave_state |= (1 << i);
 
-       return leave_state & (group->rec.scope_join_state & 7);
+       return leave_state & (group->rec.scope_join_state & 0xf);
 }
 
 static int join_group(struct mcast_group *group, int slave, u8 join_mask)
@@ -564,8 +564,8 @@ static void mlx4_ib_mcg_timeout_handler(struct work_struct *work)
                } else
                        mcg_warn_group(group, "DRIVER BUG\n");
        } else if (group->state == MCAST_LEAVE_SENT) {
-               if (group->rec.scope_join_state & 7)
-                       group->rec.scope_join_state &= 0xf8;
+               if (group->rec.scope_join_state & 0xf)
+                       group->rec.scope_join_state &= 0xf0;
                group->state = MCAST_IDLE;
                mutex_unlock(&group->lock);
                if (release_group(group, 1))
@@ -605,7 +605,7 @@ static int handle_leave_req(struct mcast_group *group, u8 leave_mask,
 static int handle_join_req(struct mcast_group *group, u8 join_mask,
                           struct mcast_req *req)
 {
-       u8 group_join_state = group->rec.scope_join_state & 7;
+       u8 group_join_state = group->rec.scope_join_state & 0xf;
        int ref = 0;
        u16 status;
        struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
@@ -690,8 +690,8 @@ static void mlx4_ib_mcg_work_handler(struct work_struct *work)
                        u8 cur_join_state;
 
                        resp_join_state = ((struct ib_sa_mcmember_data *)
-                                               group->response_sa_mad.data)->scope_join_state & 7;
-                       cur_join_state = group->rec.scope_join_state & 7;
+                                               group->response_sa_mad.data)->scope_join_state & 0xf;
+                       cur_join_state = group->rec.scope_join_state & 0xf;
 
                        if (method == IB_MGMT_METHOD_GET_RESP) {
                                /* successfull join */
@@ -710,7 +710,7 @@ process_requests:
                req = list_first_entry(&group->pending_list, struct mcast_req,
                                       group_list);
                sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
-               req_join_state = sa_data->scope_join_state & 0x7;
+               req_join_state = sa_data->scope_join_state & 0xf;
 
                /* For a leave request, we will immediately answer the VF, and
                 * update our internal counters. The actual leave will be sent
index 7c5832ede4bd0cc213139900eeb3999532192858..686ab48ff644163879049533985e81b33e4a054f 100644 (file)
@@ -448,7 +448,7 @@ struct mlx4_ib_demux_ctx {
        struct workqueue_struct *wq;
        struct workqueue_struct *ud_wq;
        spinlock_t ud_lock;
-       __be64 subnet_prefix;
+       atomic64_t subnet_prefix;
        __be64 guid_cache[128];
        struct mlx4_ib_dev *dev;
        /* the following lock protects both mcg_table and mcg_mgid0_list */
index 768085f5956645869e65aa5f81edb3f0b972a99b..7fb9629bd12b9736c181d7c5e0935ff5b7524d9a 100644 (file)
@@ -2493,24 +2493,27 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
                sqp->ud_header.grh.flow_label    =
                        ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
                sqp->ud_header.grh.hop_limit     = ah->av.ib.hop_limit;
-               if (is_eth)
+               if (is_eth) {
                        memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16);
-               else {
-               if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
-                       /* When multi-function is enabled, the ib_core gid
-                        * indexes don't necessarily match the hw ones, so
-                        * we must use our own cache */
-                       sqp->ud_header.grh.source_gid.global.subnet_prefix =
-                               to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
-                                                      subnet_prefix;
-                       sqp->ud_header.grh.source_gid.global.interface_id =
-                               to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
-                                              guid_cache[ah->av.ib.gid_index];
-               } else
-                       ib_get_cached_gid(ib_dev,
-                                         be32_to_cpu(ah->av.ib.port_pd) >> 24,
-                                         ah->av.ib.gid_index,
-                                         &sqp->ud_header.grh.source_gid, NULL);
+               } else {
+                       if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
+                               /* When multi-function is enabled, the ib_core gid
+                                * indexes don't necessarily match the hw ones, so
+                                * we must use our own cache
+                                */
+                               sqp->ud_header.grh.source_gid.global.subnet_prefix =
+                                       cpu_to_be64(atomic64_read(&(to_mdev(ib_dev)->sriov.
+                                                                   demux[sqp->qp.port - 1].
+                                                                   subnet_prefix)));
+                               sqp->ud_header.grh.source_gid.global.interface_id =
+                                       to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
+                                                      guid_cache[ah->av.ib.gid_index];
+                       } else {
+                               ib_get_cached_gid(ib_dev,
+                                                 be32_to_cpu(ah->av.ib.port_pd) >> 24,
+                                                 ah->av.ib.gid_index,
+                                                 &sqp->ud_header.grh.source_gid, NULL);
+                       }
                }
                memcpy(sqp->ud_header.grh.destination_gid.raw,
                       ah->av.ib.dgid, 16);
index 308a358e5b46416f42285f0738200f505495bf5c..e4fac9292e4aeb68f3a5f0eb79b55cc3cf8eba1b 100644 (file)
@@ -553,12 +553,6 @@ repoll:
                 * from the table.
                 */
                mqp = __mlx5_qp_lookup(dev->mdev, qpn);
-               if (unlikely(!mqp)) {
-                       mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n",
-                                    cq->mcq.cqn, qpn);
-                       return -EINVAL;
-               }
-
                *cur_qp = to_mibqp(mqp);
        }
 
@@ -619,13 +613,6 @@ repoll:
                read_lock(&dev->mdev->priv.mkey_table.lock);
                mmkey = __mlx5_mr_lookup(dev->mdev,
                                         mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
-               if (unlikely(!mmkey)) {
-                       read_unlock(&dev->mdev->priv.mkey_table.lock);
-                       mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n",
-                                    cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey));
-                       return -EINVAL;
-               }
-
                mr = to_mibmr(mmkey);
                get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
                mr->sig->sig_err_exists = true;
@@ -676,7 +663,6 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
        unsigned long flags;
        int soft_polled = 0;
        int npolled;
-       int err = 0;
 
        spin_lock_irqsave(&cq->lock, flags);
        if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
@@ -688,8 +674,7 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
                soft_polled = poll_soft_wc(cq, num_entries, wc);
 
        for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
-               err = mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled);
-               if (err)
+               if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))
                        break;
        }
 
@@ -698,10 +683,7 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
 out:
        spin_unlock_irqrestore(&cq->lock, flags);
 
-       if (err == 0 || err == -EAGAIN)
-               return soft_polled + npolled;
-       else
-               return err;
+       return soft_polled + npolled;
 }
 
 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
index 1b4094baa2deec0622f430aa57a44a7fb94724e7..e19537cf44ab3890e51f1262f0064378fecaf44b 100644 (file)
@@ -288,7 +288,9 @@ __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
 
 static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
 {
-       return !MLX5_CAP_GEN(dev->mdev, ib_virt);
+       if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
+               return !MLX5_CAP_GEN(dev->mdev, ib_virt);
+       return 0;
 }
 
 enum {
@@ -1428,6 +1430,13 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
                                             dmac_47_16),
                                ib_spec->eth.val.dst_mac);
 
+               ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+                                            smac_47_16),
+                               ib_spec->eth.mask.src_mac);
+               ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+                                            smac_47_16),
+                               ib_spec->eth.val.src_mac);
+
                if (ib_spec->eth.mask.vlan_tag) {
                        MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
                                 vlan_tag, 1);
@@ -1849,6 +1858,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
                                           int domain)
 {
        struct mlx5_ib_dev *dev = to_mdev(qp->device);
+       struct mlx5_ib_qp *mqp = to_mqp(qp);
        struct mlx5_ib_flow_handler *handler = NULL;
        struct mlx5_flow_destination *dst = NULL;
        struct mlx5_ib_flow_prio *ft_prio;
@@ -1875,7 +1885,10 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
        }
 
        dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
-       dst->tir_num = to_mqp(qp)->raw_packet_qp.rq.tirn;
+       if (mqp->flags & MLX5_IB_QP_RSS)
+               dst->tir_num = mqp->rss_qp.tirn;
+       else
+               dst->tir_num = mqp->raw_packet_qp.rq.tirn;
 
        if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
                if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)  {
index 40df2cca0609a6c3dfff09e738bec5450685f8e0..996b54e366b03d031a7791e8f80c55aba232e2f1 100644 (file)
@@ -71,7 +71,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
 
        addr = addr >> page_shift;
        tmp = (unsigned long)addr;
-       m = find_first_bit(&tmp, sizeof(tmp));
+       m = find_first_bit(&tmp, BITS_PER_LONG);
        skip = 1 << m;
        mask = skip - 1;
        i = 0;
@@ -81,7 +81,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
                for (k = 0; k < len; k++) {
                        if (!(i & mask)) {
                                tmp = (unsigned long)pfn;
-                               m = min_t(unsigned long, m, find_first_bit(&tmp, sizeof(tmp)));
+                               m = min_t(unsigned long, m, find_first_bit(&tmp, BITS_PER_LONG));
                                skip = 1 << m;
                                mask = skip - 1;
                                base = pfn;
@@ -89,7 +89,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
                        } else {
                                if (base + p != pfn) {
                                        tmp = (unsigned long)p;
-                                       m = find_first_bit(&tmp, sizeof(tmp));
+                                       m = find_first_bit(&tmp, BITS_PER_LONG);
                                        skip = 1 << m;
                                        mask = skip - 1;
                                        base = pfn;
index 372385d0f99384785123ba5e24fcb2ceddcc520e..95146f4aa3e3d7d2d3844b27c1a3bd8578bb4bbe 100644 (file)
@@ -402,6 +402,7 @@ enum mlx5_ib_qp_flags {
        /* QP uses 1 as its source QP number */
        MLX5_IB_QP_SQPN_QP1                     = 1 << 6,
        MLX5_IB_QP_CAP_SCATTER_FCS              = 1 << 7,
+       MLX5_IB_QP_RSS                          = 1 << 8,
 };
 
 struct mlx5_umr_wr {
index 0dd7d93cac95b9e2bba86749b18f83846fa36245..affc3f6598cac6c0d6e794dbca0594aace42500b 100644 (file)
@@ -1449,6 +1449,7 @@ create_tir:
        kvfree(in);
        /* qpn is reserved for that QP */
        qp->trans_qp.base.mqp.qpn = 0;
+       qp->flags |= MLX5_IB_QP_RSS;
        return 0;
 
 err:
@@ -3658,12 +3659,8 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
                     struct ib_send_wr *wr, unsigned *idx,
                     int *size, int nreq)
 {
-       int err = 0;
-
-       if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) {
-               err = -ENOMEM;
-               return err;
-       }
+       if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)))
+               return -ENOMEM;
 
        *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
        *seg = mlx5_get_send_wqe(qp, *idx);
@@ -3679,7 +3676,7 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
        *seg += sizeof(**ctrl);
        *size = sizeof(**ctrl) / 16;
 
-       return err;
+       return 0;
 }
 
 static void finish_wqe(struct mlx5_ib_qp *qp,
@@ -3758,7 +3755,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                num_sge = wr->num_sge;
                if (unlikely(num_sge > qp->sq.max_gs)) {
                        mlx5_ib_warn(dev, "\n");
-                       err = -ENOMEM;
+                       err = -EINVAL;
                        *bad_wr = wr;
                        goto out;
                }
index 80c4b6b401b83af99d8a1dde1ada784daeb8b6cb..46b64970058ece11bf3c272ca5fb80d3c1cabff6 100644 (file)
@@ -294,7 +294,7 @@ static void __rvt_free_mr(struct rvt_mr *mr)
 {
        rvt_deinit_mregion(&mr->mr);
        rvt_free_lkey(&mr->mr);
-       vfree(mr);
+       kfree(mr);
 }
 
 /**
index 55f0e8f0ca7993f4f00993d7df9ac8525e021628..ddd59270ff6dea6ca4c5f4d032567141dcc78902 100644 (file)
@@ -362,15 +362,34 @@ static int __init rxe_module_init(void)
                return err;
        }
 
-       err = rxe_net_init();
+       err = rxe_net_ipv4_init();
        if (err) {
-               pr_err("rxe: unable to init\n");
+               pr_err("rxe: unable to init ipv4 tunnel\n");
                rxe_cache_exit();
-               return err;
+               goto exit;
+       }
+
+       err = rxe_net_ipv6_init();
+       if (err) {
+               pr_err("rxe: unable to init ipv6 tunnel\n");
+               rxe_cache_exit();
+               goto exit;
        }
+
+       err = register_netdevice_notifier(&rxe_net_notifier);
+       if (err) {
+               pr_err("rxe: Failed to rigister netdev notifier\n");
+               goto exit;
+       }
+
        pr_info("rxe: loaded\n");
 
        return 0;
+
+exit:
+       rxe_release_udp_tunnel(recv_sockets.sk4);
+       rxe_release_udp_tunnel(recv_sockets.sk6);
+       return err;
 }
 
 static void __exit rxe_module_exit(void)
index 36f67de44095701ecbd6e857a7312f78fb5dbc26..1c59ef2c67aaec74c93a2c27323214446219f6f1 100644 (file)
@@ -689,7 +689,14 @@ int rxe_completer(void *arg)
                                        qp->req.need_retry = 1;
                                        rxe_run_task(&qp->req.task, 1);
                                }
+
+                               if (pkt) {
+                                       rxe_drop_ref(pkt->qp);
+                                       kfree_skb(skb);
+                               }
+
                                goto exit;
+
                        } else {
                                wqe->status = IB_WC_RETRY_EXC_ERR;
                                state = COMPST_ERROR;
@@ -716,6 +723,12 @@ int rxe_completer(void *arg)
                case COMPST_ERROR:
                        do_complete(qp, wqe);
                        rxe_qp_error(qp);
+
+                       if (pkt) {
+                               rxe_drop_ref(pkt->qp);
+                               kfree_skb(skb);
+                       }
+
                        goto exit;
                }
        }
index 0b8d2ea8b41df86a4c07e57eff51b5fbca259f7a..eedf2f1cafdfa56f1db4223ebe42aaa74d51f4c9 100644 (file)
@@ -275,9 +275,10 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
        return sock;
 }
 
-static void rxe_release_udp_tunnel(struct socket *sk)
+void rxe_release_udp_tunnel(struct socket *sk)
 {
-       udp_tunnel_sock_release(sk);
+       if (sk)
+               udp_tunnel_sock_release(sk);
 }
 
 static void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port,
@@ -658,51 +659,45 @@ out:
        return NOTIFY_OK;
 }
 
-static struct notifier_block rxe_net_notifier = {
+struct notifier_block rxe_net_notifier = {
        .notifier_call = rxe_notify,
 };
 
-int rxe_net_init(void)
+int rxe_net_ipv4_init(void)
 {
-       int err;
-
        spin_lock_init(&dev_list_lock);
 
-       recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
-                       htons(ROCE_V2_UDP_DPORT), true);
-       if (IS_ERR(recv_sockets.sk6)) {
-               recv_sockets.sk6 = NULL;
-               pr_err("rxe: Failed to create IPv6 UDP tunnel\n");
-               return -1;
-       }
-
        recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net,
-                       htons(ROCE_V2_UDP_DPORT), false);
+                               htons(ROCE_V2_UDP_DPORT), false);
        if (IS_ERR(recv_sockets.sk4)) {
-               rxe_release_udp_tunnel(recv_sockets.sk6);
                recv_sockets.sk4 = NULL;
-               recv_sockets.sk6 = NULL;
                pr_err("rxe: Failed to create IPv4 UDP tunnel\n");
                return -1;
        }
 
-       err = register_netdevice_notifier(&rxe_net_notifier);
-       if (err) {
-               rxe_release_udp_tunnel(recv_sockets.sk6);
-               rxe_release_udp_tunnel(recv_sockets.sk4);
-               pr_err("rxe: Failed to rigister netdev notifier\n");
-       }
-
-       return err;
+       return 0;
 }
 
-void rxe_net_exit(void)
+int rxe_net_ipv6_init(void)
 {
-       if (recv_sockets.sk6)
-               rxe_release_udp_tunnel(recv_sockets.sk6);
+#if IS_ENABLED(CONFIG_IPV6)
 
-       if (recv_sockets.sk4)
-               rxe_release_udp_tunnel(recv_sockets.sk4);
+       spin_lock_init(&dev_list_lock);
 
+       recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
+                                               htons(ROCE_V2_UDP_DPORT), true);
+       if (IS_ERR(recv_sockets.sk6)) {
+               recv_sockets.sk6 = NULL;
+               pr_err("rxe: Failed to create IPv6 UDP tunnel\n");
+               return -1;
+       }
+#endif
+       return 0;
+}
+
+void rxe_net_exit(void)
+{
+       rxe_release_udp_tunnel(recv_sockets.sk6);
+       rxe_release_udp_tunnel(recv_sockets.sk4);
        unregister_netdevice_notifier(&rxe_net_notifier);
 }
index 7b06f76d16cc6058ab83dc4051ac0a71f4229f67..0daf7f09e5b5857e44506dc7057ab40abcc7c3b8 100644 (file)
@@ -44,10 +44,13 @@ struct rxe_recv_sockets {
 };
 
 extern struct rxe_recv_sockets recv_sockets;
+extern struct notifier_block rxe_net_notifier;
+void rxe_release_udp_tunnel(struct socket *sk);
 
 struct rxe_dev *rxe_net_add(struct net_device *ndev);
 
-int rxe_net_init(void);
+int rxe_net_ipv4_init(void);
+int rxe_net_ipv6_init(void);
 void rxe_net_exit(void);
 
 #endif /* RXE_NET_H */
index 3d464c23e08bbbf0d55f3efbe922b44b12ca1442..144d2f129fcdf7f8a18b1fb8440ef6a597412fda 100644 (file)
@@ -312,7 +312,7 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
                 * make a copy of the skb to post to the next qp
                 */
                skb_copy = (mce->qp_list.next != &mcg->qp_list) ?
-                               skb_clone(skb, GFP_KERNEL) : NULL;
+                               skb_clone(skb, GFP_ATOMIC) : NULL;
 
                pkt->qp = qp;
                rxe_add_ref(qp);
index 33b2d9d77021716c76bee6f149fa1835e456918a..13a848a518e8855e11d69cbd24e7c908bc89b23f 100644 (file)
@@ -511,24 +511,21 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
 }
 
 static void update_wqe_state(struct rxe_qp *qp,
-                            struct rxe_send_wqe *wqe,
-                            struct rxe_pkt_info *pkt,
-                            enum wqe_state *prev_state)
+               struct rxe_send_wqe *wqe,
+               struct rxe_pkt_info *pkt)
 {
-       enum wqe_state prev_state_ = wqe->state;
-
        if (pkt->mask & RXE_END_MASK) {
                if (qp_type(qp) == IB_QPT_RC)
                        wqe->state = wqe_state_pending;
        } else {
                wqe->state = wqe_state_processing;
        }
-
-       *prev_state = prev_state_;
 }
 
-static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
-                        struct rxe_pkt_info *pkt, int payload)
+static void update_wqe_psn(struct rxe_qp *qp,
+                          struct rxe_send_wqe *wqe,
+                          struct rxe_pkt_info *pkt,
+                          int payload)
 {
        /* number of packets left to send including current one */
        int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
@@ -546,9 +543,34 @@ static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
                qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
        else
                qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
+}
 
-       qp->req.opcode = pkt->opcode;
+static void save_state(struct rxe_send_wqe *wqe,
+                      struct rxe_qp *qp,
+                      struct rxe_send_wqe *rollback_wqe,
+                      struct rxe_qp *rollback_qp)
+{
+       rollback_wqe->state     = wqe->state;
+       rollback_wqe->first_psn = wqe->first_psn;
+       rollback_wqe->last_psn  = wqe->last_psn;
+       rollback_qp->req.psn    = qp->req.psn;
+}
 
+static void rollback_state(struct rxe_send_wqe *wqe,
+                          struct rxe_qp *qp,
+                          struct rxe_send_wqe *rollback_wqe,
+                          struct rxe_qp *rollback_qp)
+{
+       wqe->state     = rollback_wqe->state;
+       wqe->first_psn = rollback_wqe->first_psn;
+       wqe->last_psn  = rollback_wqe->last_psn;
+       qp->req.psn    = rollback_qp->req.psn;
+}
+
+static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+                        struct rxe_pkt_info *pkt, int payload)
+{
+       qp->req.opcode = pkt->opcode;
 
        if (pkt->mask & RXE_END_MASK)
                qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
@@ -571,7 +593,8 @@ int rxe_requester(void *arg)
        int mtu;
        int opcode;
        int ret;
-       enum wqe_state prev_state;
+       struct rxe_qp rollback_qp;
+       struct rxe_send_wqe rollback_wqe;
 
 next_wqe:
        if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
@@ -688,13 +711,21 @@ next_wqe:
                goto err;
        }
 
-       update_wqe_state(qp, wqe, &pkt, &prev_state);
+       /*
+        * To prevent a race on wqe access between requester and completer,
+        * wqe members state and psn need to be set before calling
+        * rxe_xmit_packet().
+        * Otherwise, completer might initiate an unjustified retry flow.
+        */
+       save_state(wqe, qp, &rollback_wqe, &rollback_qp);
+       update_wqe_state(qp, wqe, &pkt);
+       update_wqe_psn(qp, wqe, &pkt, payload);
        ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb);
        if (ret) {
                qp->need_req_skb = 1;
                kfree_skb(skb);
 
-               wqe->state = prev_state;
+               rollback_state(wqe, qp, &rollback_wqe, &rollback_qp);
 
                if (ret == -EAGAIN) {
                        rxe_run_task(&qp->req.task, 1);
index ebb03b46e2ad71e4ca881f1af4b8552eb8f331f6..3e0f0f2baace2518a92e1c7b8c6b995191730007 100644 (file)
@@ -972,11 +972,13 @@ static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
        free_rd_atomic_resource(qp, res);
        rxe_advance_resp_resource(qp);
 
+       memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(skb->cb));
+
        res->type = RXE_ATOMIC_MASK;
        res->atomic.skb = skb;
-       res->first_psn = qp->resp.psn;
-       res->last_psn = qp->resp.psn;
-       res->cur_psn = qp->resp.psn;
+       res->first_psn = ack_pkt.psn;
+       res->last_psn  = ack_pkt.psn;
+       res->cur_psn   = ack_pkt.psn;
 
        rc = rxe_xmit_packet(rxe, qp, &ack_pkt, skb_copy);
        if (rc) {
@@ -1116,8 +1118,7 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
                                rc = RESPST_CLEANUP;
                                goto out;
                        }
-                       bth_set_psn(SKB_TO_PKT(skb_copy),
-                                   qp->resp.psn - 1);
+
                        /* Resend the result. */
                        rc = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp,
                                             pkt, skb_copy);
index 4f7d9b48df643c7ef5f69b80f870a0f15c6efe87..9dbfcc0ab577694c71f853f48d784ae5b9c4592a 100644 (file)
@@ -478,6 +478,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
                struct ipoib_ah *address, u32 qpn);
 void ipoib_reap_ah(struct work_struct *work);
 
+struct ipoib_path *__path_find(struct net_device *dev, void *gid);
 void ipoib_mark_paths_invalid(struct net_device *dev);
 void ipoib_flush_paths(struct net_device *dev);
 int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv);
index 951d9abcca8b283e652368c7c9c96f1fa25f3df4..4ad297d3de897789141c87847d26d6fdf91a062e 100644 (file)
@@ -1318,6 +1318,8 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
        }
 }
 
+#define QPN_AND_OPTIONS_OFFSET 4
+
 static void ipoib_cm_tx_start(struct work_struct *work)
 {
        struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
@@ -1326,6 +1328,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
        struct ipoib_neigh *neigh;
        struct ipoib_cm_tx *p;
        unsigned long flags;
+       struct ipoib_path *path;
        int ret;
 
        struct ib_sa_path_rec pathrec;
@@ -1338,7 +1341,19 @@ static void ipoib_cm_tx_start(struct work_struct *work)
                p = list_entry(priv->cm.start_list.next, typeof(*p), list);
                list_del_init(&p->list);
                neigh = p->neigh;
+
                qpn = IPOIB_QPN(neigh->daddr);
+               /*
+                * As long as the search is with these 2 locks,
+                * path existence indicates its validity.
+                */
+               path = __path_find(dev, neigh->daddr + QPN_AND_OPTIONS_OFFSET);
+               if (!path) {
+                       pr_info("%s ignore not valid path %pI6\n",
+                               __func__,
+                               neigh->daddr + QPN_AND_OPTIONS_OFFSET);
+                       goto free_neigh;
+               }
                memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
 
                spin_unlock_irqrestore(&priv->lock, flags);
@@ -1350,6 +1365,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
                spin_lock_irqsave(&priv->lock, flags);
 
                if (ret) {
+free_neigh:
                        neigh = p->neigh;
                        if (neigh) {
                                neigh->cm = NULL;
index dc6d241b9406e9661394c7e985fda4b0eacbfa6b..be11d5d5b8c1d9ca84ab884bac32000e018c4c60 100644 (file)
@@ -1161,8 +1161,17 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
        }
 
        if (level == IPOIB_FLUSH_LIGHT) {
+               int oper_up;
                ipoib_mark_paths_invalid(dev);
+               /* Set IPoIB operation as down to prevent races between:
+                * the flush flow which leaves MCG and on the fly joins
+                * which can happen during that time. mcast restart task
+                * should deal with join requests we missed.
+                */
+               oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
                ipoib_mcast_dev_flush(dev);
+               if (oper_up)
+                       set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
                ipoib_flush_ah(dev);
        }
 
index 74bcaa0642261e672b6747e731d9c9ca56befd07..cc1c1b062ea58d530756586ee1ec00144c79b38e 100644 (file)
@@ -485,7 +485,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
        return -EINVAL;
 }
 
-static struct ipoib_path *__path_find(struct net_device *dev, void *gid)
+struct ipoib_path *__path_find(struct net_device *dev, void *gid)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct rb_node *n = priv->path_tree.rb_node;
index 7914c14478cd190e24d30151a5560f01980e12d2..cae9bbcc27e7c81f2b3dfedc3e55987e428f43ed 100644 (file)
@@ -403,6 +403,7 @@ isert_init_conn(struct isert_conn *isert_conn)
        INIT_LIST_HEAD(&isert_conn->node);
        init_completion(&isert_conn->login_comp);
        init_completion(&isert_conn->login_req_comp);
+       init_waitqueue_head(&isert_conn->rem_wait);
        kref_init(&isert_conn->kref);
        mutex_init(&isert_conn->mutex);
        INIT_WORK(&isert_conn->release_work, isert_release_work);
@@ -578,7 +579,8 @@ isert_connect_release(struct isert_conn *isert_conn)
        BUG_ON(!device);
 
        isert_free_rx_descriptors(isert_conn);
-       if (isert_conn->cm_id)
+       if (isert_conn->cm_id &&
+           !isert_conn->dev_removed)
                rdma_destroy_id(isert_conn->cm_id);
 
        if (isert_conn->qp) {
@@ -593,7 +595,10 @@ isert_connect_release(struct isert_conn *isert_conn)
 
        isert_device_put(device);
 
-       kfree(isert_conn);
+       if (isert_conn->dev_removed)
+               wake_up_interruptible(&isert_conn->rem_wait);
+       else
+               kfree(isert_conn);
 }
 
 static void
@@ -753,6 +758,7 @@ static int
 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
 {
        struct isert_np *isert_np = cma_id->context;
+       struct isert_conn *isert_conn;
        int ret = 0;
 
        isert_info("%s (%d): status %d id %p np %p\n",
@@ -773,10 +779,21 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
                break;
        case RDMA_CM_EVENT_ADDR_CHANGE:    /* FALLTHRU */
        case RDMA_CM_EVENT_DISCONNECTED:   /* FALLTHRU */
-       case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
        case RDMA_CM_EVENT_TIMEWAIT_EXIT:  /* FALLTHRU */
                ret = isert_disconnected_handler(cma_id, event->event);
                break;
+       case RDMA_CM_EVENT_DEVICE_REMOVAL:
+               isert_conn = cma_id->qp->qp_context;
+               isert_conn->dev_removed = true;
+               isert_disconnected_handler(cma_id, event->event);
+               wait_event_interruptible(isert_conn->rem_wait,
+                                        isert_conn->state == ISER_CONN_DOWN);
+               kfree(isert_conn);
+               /*
+                * return non-zero from the callback to destroy
+                * the rdma cm id
+                */
+               return 1;
        case RDMA_CM_EVENT_REJECTED:       /* FALLTHRU */
        case RDMA_CM_EVENT_UNREACHABLE:    /* FALLTHRU */
        case RDMA_CM_EVENT_CONNECT_ERROR:
index fc791efe3a108178f1949f386548ed84ae81ebc8..c02ada57d7f5c4fa5b765a6401ac8e7bbd6096bb 100644 (file)
@@ -158,6 +158,8 @@ struct isert_conn {
        struct work_struct      release_work;
        bool                    logout_posted;
        bool                    snd_w_inv;
+       wait_queue_head_t       rem_wait;
+       bool                    dev_removed;
 };
 
 #define ISERT_MAX_CQ 64
index 112e17c2768be06587d340865ade9fe202adc7da..37f952dd9fc94bdc5faa6c2721822b8780dd46e1 100644 (file)
@@ -176,6 +176,7 @@ static int aic_irq_domain_xlate(struct irq_domain *d,
 {
        struct irq_domain_chip_generic *dgc = d->gc;
        struct irq_chip_generic *gc;
+       unsigned long flags;
        unsigned smr;
        int idx;
        int ret;
@@ -194,11 +195,11 @@ static int aic_irq_domain_xlate(struct irq_domain *d,
 
        gc = dgc->gc[idx];
 
-       irq_gc_lock(gc);
+       irq_gc_lock_irqsave(gc, flags);
        smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq));
        aic_common_set_priority(intspec[2], &smr);
        irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq));
-       irq_gc_unlock(gc);
+       irq_gc_unlock_irqrestore(gc, flags);
 
        return ret;
 }
index 4f0d068e1abec2bc068b5a2185bef0fd6df729ff..2a624d87a0356a963e2fa1d888cdff8e0668ae41 100644 (file)
@@ -258,6 +258,7 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
                                 unsigned int *out_type)
 {
        struct irq_chip_generic *bgc = irq_get_domain_generic_chip(d, 0);
+       unsigned long flags;
        unsigned smr;
        int ret;
 
@@ -269,12 +270,12 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
        if (ret)
                return ret;
 
-       irq_gc_lock(bgc);
+       irq_gc_lock_irqsave(bgc, flags);
        irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR);
        smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
        aic_common_set_priority(intspec[2], &smr);
        irq_reg_writel(bgc, smr, AT91_AIC5_SMR);
-       irq_gc_unlock(bgc);
+       irq_gc_unlock_irqrestore(bgc, flags);
 
        return ret;
 }
index 978eda8d6678bce573b27bfa99dee94103708ce8..8a3ba565106fd22ecdb4dd655eb1fbaa697adfe1 100644 (file)
@@ -73,7 +73,6 @@ MODULE_DEVICE_TABLE(i2c, ams_id);
 static struct i2c_driver ams_i2c_driver = {
        .driver = {
                .name   = "ams",
-               .owner  = THIS_MODULE,
        },
        .probe          = ams_i2c_probe,
        .remove         = ams_i2c_remove,
index 3024685e4cca553d16ad4cd893925343f0ca5ba1..96d16fca68b2469f5a73c57369deaac3ed04e8ec 100644 (file)
@@ -668,7 +668,6 @@ static struct platform_driver wf_pm112_driver = {
        .remove = wf_pm112_remove,
        .driver = {
                .name = "windfarm",
-               .owner  = THIS_MODULE,
        },
 };
 
index 2f506b9d5a52bbc3ffa9cdf543b722641115e80d..e88cfb36a74d139b035574a9b078e16a13f7d35b 100644 (file)
@@ -789,7 +789,6 @@ static struct platform_driver wf_pm72_driver = {
        .remove = wf_pm72_remove,
        .driver = {
                .name = "windfarm",
-               .owner  = THIS_MODULE,
        },
 };
 
index 82fc86a90c1a436d94ab8500334b62f4cce47dbc..bdfcb8a8bfbb45004090ee9672c897cf5cf02db3 100644 (file)
@@ -682,7 +682,6 @@ static struct platform_driver wf_rm31_driver = {
        .remove = wf_rm31_remove,
        .driver = {
                .name = "windfarm",
-               .owner  = THIS_MODULE,
        },
 };
 
index 97c372908e78d23be7f0feed7c99c074f1e8ca34..7817d40d81e74ad282b1760b1dc50d090d10670d 100644 (file)
@@ -127,6 +127,7 @@ config XGENE_SLIMPRO_MBOX
 config BCM_PDC_MBOX
        tristate "Broadcom PDC Mailbox"
        depends on ARM64 || COMPILE_TEST
+       depends on HAS_DMA
        default ARCH_BCM_IPROC
        help
          Mailbox implementation for the Broadcom PDC ring manager,
index cbe0c1ee4ba9cca5d442cc9e181291e0a42909ce..c19dd820ea9b4baafb1b2d15ebb5031f464af9d0 100644 (file)
@@ -469,7 +469,7 @@ static const struct file_operations pdc_debugfs_stats = {
  * this directory for a SPU.
  * @pdcs: PDC state structure
  */
-void pdc_setup_debugfs(struct pdc_state *pdcs)
+static void pdc_setup_debugfs(struct pdc_state *pdcs)
 {
        char spu_stats_name[16];
 
@@ -485,7 +485,7 @@ void pdc_setup_debugfs(struct pdc_state *pdcs)
                                                  &pdc_debugfs_stats);
 }
 
-void pdc_free_debugfs(void)
+static void pdc_free_debugfs(void)
 {
        if (debugfs_dir && simple_empty(debugfs_dir)) {
                debugfs_remove_recursive(debugfs_dir);
@@ -1191,10 +1191,11 @@ static void pdc_shutdown(struct mbox_chan *chan)
 {
        struct pdc_state *pdcs = chan->con_priv;
 
-       if (pdcs)
-               dev_dbg(&pdcs->pdev->dev,
-                       "Shutdown mailbox channel for PDC %u", pdcs->pdc_idx);
+       if (!pdcs)
+               return;
 
+       dev_dbg(&pdcs->pdev->dev,
+               "Shutdown mailbox channel for PDC %u", pdcs->pdc_idx);
        pdc_ring_free(pdcs);
 }
 
index 6fff794e0c723c0d8e11d1f1899ba109c0bcef29..13041ee37ad6b504a4e1c1182a10cce9d77d0f9d 100644 (file)
@@ -2183,19 +2183,29 @@ location_show(struct mddev *mddev, char *page)
 static ssize_t
 location_store(struct mddev *mddev, const char *buf, size_t len)
 {
+       int rv;
 
+       rv = mddev_lock(mddev);
+       if (rv)
+               return rv;
        if (mddev->pers) {
-               if (!mddev->pers->quiesce)
-                       return -EBUSY;
-               if (mddev->recovery || mddev->sync_thread)
-                       return -EBUSY;
+               if (!mddev->pers->quiesce) {
+                       rv = -EBUSY;
+                       goto out;
+               }
+               if (mddev->recovery || mddev->sync_thread) {
+                       rv = -EBUSY;
+                       goto out;
+               }
        }
 
        if (mddev->bitmap || mddev->bitmap_info.file ||
            mddev->bitmap_info.offset) {
                /* bitmap already configured.  Only option is to clear it */
-               if (strncmp(buf, "none", 4) != 0)
-                       return -EBUSY;
+               if (strncmp(buf, "none", 4) != 0) {
+                       rv = -EBUSY;
+                       goto out;
+               }
                if (mddev->pers) {
                        mddev->pers->quiesce(mddev, 1);
                        bitmap_destroy(mddev);
@@ -2214,21 +2224,25 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
                        /* nothing to be done */;
                else if (strncmp(buf, "file:", 5) == 0) {
                        /* Not supported yet */
-                       return -EINVAL;
+                       rv = -EINVAL;
+                       goto out;
                } else {
-                       int rv;
                        if (buf[0] == '+')
                                rv = kstrtoll(buf+1, 10, &offset);
                        else
                                rv = kstrtoll(buf, 10, &offset);
                        if (rv)
-                               return rv;
-                       if (offset == 0)
-                               return -EINVAL;
+                               goto out;
+                       if (offset == 0) {
+                               rv = -EINVAL;
+                               goto out;
+                       }
                        if (mddev->bitmap_info.external == 0 &&
                            mddev->major_version == 0 &&
-                           offset != mddev->bitmap_info.default_offset)
-                               return -EINVAL;
+                           offset != mddev->bitmap_info.default_offset) {
+                               rv = -EINVAL;
+                               goto out;
+                       }
                        mddev->bitmap_info.offset = offset;
                        if (mddev->pers) {
                                struct bitmap *bitmap;
@@ -2245,7 +2259,7 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
                                mddev->pers->quiesce(mddev, 0);
                                if (rv) {
                                        bitmap_destroy(mddev);
-                                       return rv;
+                                       goto out;
                                }
                        }
                }
@@ -2257,6 +2271,11 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
                set_bit(MD_CHANGE_DEVS, &mddev->flags);
                md_wakeup_thread(mddev->thread);
        }
+       rv = 0;
+out:
+       mddev_unlock(mddev);
+       if (rv)
+               return rv;
        return len;
 }
 
index 6571c81465e1867772d042316a42042afb2a702c..8625040bae92fee2da914feeb544c48f2cdce595 100644 (file)
@@ -1879,7 +1879,7 @@ static int __init dm_bufio_init(void)
        __cache_size_refresh();
        mutex_unlock(&dm_bufio_clients_lock);
 
-       dm_bufio_wq = create_singlethread_workqueue("dm_bufio_cache");
+       dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
        if (!dm_bufio_wq)
                return -ENOMEM;
 
index eedba67b0e3ef6bed544b959d8440a5433c6bc75..874295757caa443a16c6d66723148df91e7c459e 100644 (file)
@@ -1453,7 +1453,7 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
        unsigned i;
        int err;
 
-       cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_skcipher *),
+       cc->tfms = kzalloc(cc->tfms_count * sizeof(struct crypto_skcipher *),
                           GFP_KERNEL);
        if (!cc->tfms)
                return -ENOMEM;
@@ -1924,6 +1924,13 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
                return DM_MAPIO_REMAPPED;
        }
 
+       /*
+        * Check if bio is too large, split as needed.
+        */
+       if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_PAGES << PAGE_SHIFT)) &&
+           bio_data_dir(bio) == WRITE)
+               dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT));
+
        io = dm_per_bio_data(bio, cc->per_bio_data_size);
        crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
        io->ctx.req = (struct skcipher_request *)(io + 1);
index 4ab68033f9d10e1d54e92fefe2bf2a916ab089fc..49e4d8d4558fb29bc170ccdf9ce07fe56a16a127 100644 (file)
@@ -259,12 +259,12 @@ static int log_one_block(struct log_writes_c *lc,
                goto out;
        sector++;
 
-       bio = bio_alloc(GFP_KERNEL, block->vec_cnt);
+       atomic_inc(&lc->io_blocks);
+       bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt, BIO_MAX_PAGES));
        if (!bio) {
                DMERR("Couldn't alloc log bio");
                goto error;
        }
-       atomic_inc(&lc->io_blocks);
        bio->bi_iter.bi_size = 0;
        bio->bi_iter.bi_sector = sector;
        bio->bi_bdev = lc->logdev->bdev;
@@ -282,7 +282,7 @@ static int log_one_block(struct log_writes_c *lc,
                if (ret != block->vecs[i].bv_len) {
                        atomic_inc(&lc->io_blocks);
                        submit_bio(bio);
-                       bio = bio_alloc(GFP_KERNEL, block->vec_cnt - i);
+                       bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt - i, BIO_MAX_PAGES));
                        if (!bio) {
                                DMERR("Couldn't alloc log bio");
                                goto error;
@@ -459,9 +459,9 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                goto bad;
        }
 
-       ret = -EINVAL;
        lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write");
-       if (!lc->log_kthread) {
+       if (IS_ERR(lc->log_kthread)) {
+               ret = PTR_ERR(lc->log_kthread);
                ti->error = "Couldn't alloc kthread";
                dm_put_device(ti, lc->dev);
                dm_put_device(ti, lc->logdev);
index 41573f1f626f4dd43dd97d5e6372e38e6987ae5d..34a840d9df7644d74d24bf44e547a9dbd735407b 100644 (file)
@@ -834,8 +834,10 @@ static int join(struct mddev *mddev, int nodes)
                goto err;
        }
        cinfo->ack_lockres = lockres_init(mddev, "ack", ack_bast, 0);
-       if (!cinfo->ack_lockres)
+       if (!cinfo->ack_lockres) {
+               ret = -ENOMEM;
                goto err;
+       }
        /* get sync CR lock on ACK. */
        if (dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR))
                pr_err("md-cluster: failed to get a sync CR lock on ACK!(%d)\n",
@@ -849,8 +851,10 @@ static int join(struct mddev *mddev, int nodes)
        pr_info("md-cluster: Joined cluster %s slot %d\n", str, cinfo->slot_number);
        snprintf(str, 64, "bitmap%04d", cinfo->slot_number - 1);
        cinfo->bitmap_lockres = lockres_init(mddev, str, NULL, 1);
-       if (!cinfo->bitmap_lockres)
+       if (!cinfo->bitmap_lockres) {
+               ret = -ENOMEM;
                goto err;
+       }
        if (dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW)) {
                pr_err("Failed to get bitmap lock\n");
                ret = -EINVAL;
@@ -858,8 +862,10 @@ static int join(struct mddev *mddev, int nodes)
        }
 
        cinfo->resync_lockres = lockres_init(mddev, "resync", NULL, 0);
-       if (!cinfo->resync_lockres)
+       if (!cinfo->resync_lockres) {
+               ret = -ENOMEM;
                goto err;
+       }
 
        return 0;
 err:
index d646f6e444f0d1921cf6d1df8e276f6acec76736..915e84d631a22111c2256ab2f4676875a2cc61b1 100644 (file)
@@ -1604,11 +1604,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
                        mddev->new_chunk_sectors = mddev->chunk_sectors;
                }
 
-               if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL) {
+               if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
                        set_bit(MD_HAS_JOURNAL, &mddev->flags);
-                       if (mddev->recovery_cp == MaxSector)
-                               set_bit(MD_JOURNAL_CLEAN, &mddev->flags);
-               }
        } else if (mddev->pers == NULL) {
                /* Insist of good event counter while assembling, except for
                 * spares (which don't need an event count) */
@@ -5851,6 +5848,9 @@ static int get_array_info(struct mddev *mddev, void __user *arg)
                        working++;
                        if (test_bit(In_sync, &rdev->flags))
                                insync++;
+                       else if (test_bit(Journal, &rdev->flags))
+                               /* TODO: add journal count to md_u.h */
+                               ;
                        else
                                spare++;
                }
@@ -7610,16 +7610,12 @@ EXPORT_SYMBOL(unregister_md_cluster_operations);
 
 int md_setup_cluster(struct mddev *mddev, int nodes)
 {
-       int err;
-
-       err = request_module("md-cluster");
-       if (err) {
-               pr_err("md-cluster module not found.\n");
-               return -ENOENT;
-       }
-
+       if (!md_cluster_ops)
+               request_module("md-cluster");
        spin_lock(&pers_lock);
+       /* ensure module won't be unloaded */
        if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
+               pr_err("can't find md-cluster module or get it's reference.\n");
                spin_unlock(&pers_lock);
                return -ENOENT;
        }
@@ -7862,6 +7858,7 @@ void md_do_sync(struct md_thread *thread)
         */
 
        do {
+               int mddev2_minor = -1;
                mddev->curr_resync = 2;
 
        try_again:
@@ -7891,10 +7888,14 @@ void md_do_sync(struct md_thread *thread)
                                prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
                                if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
                                    mddev2->curr_resync >= mddev->curr_resync) {
-                                       printk(KERN_INFO "md: delaying %s of %s"
-                                              " until %s has finished (they"
-                                              " share one or more physical units)\n",
-                                              desc, mdname(mddev), mdname(mddev2));
+                                       if (mddev2_minor != mddev2->md_minor) {
+                                               mddev2_minor = mddev2->md_minor;
+                                               printk(KERN_INFO "md: delaying %s of %s"
+                                                      " until %s has finished (they"
+                                                      " share one or more physical units)\n",
+                                                      desc, mdname(mddev),
+                                                      mdname(mddev2));
+                                       }
                                        mddev_put(mddev2);
                                        if (signal_pending(current))
                                                flush_signals(current);
@@ -8275,16 +8276,13 @@ no_add:
 static void md_start_sync(struct work_struct *ws)
 {
        struct mddev *mddev = container_of(ws, struct mddev, del_work);
-       int ret = 0;
 
        mddev->sync_thread = md_register_thread(md_do_sync,
                                                mddev,
                                                "resync");
        if (!mddev->sync_thread) {
-               if (!(mddev_is_clustered(mddev) && ret == -EAGAIN))
-                       printk(KERN_ERR "%s: could not start resync"
-                              " thread...\n",
-                              mdname(mddev));
+               printk(KERN_ERR "%s: could not start resync thread...\n",
+                      mdname(mddev));
                /* leave the spares where they are, it shouldn't hurt */
                clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
                clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
index 0e4efcd1079550faba99495bdd6db51badbfea0f..be1a9fca3b2d2ade369359d109d1a53ddf30d077 100644 (file)
@@ -1064,6 +1064,8 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
        int max_sectors;
        int sectors;
 
+       md_write_start(mddev, bio);
+
        /*
         * Register the new request and wait if the reconstruction
         * thread has put up a bar for new requests.
@@ -1445,8 +1447,6 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
                return;
        }
 
-       md_write_start(mddev, bio);
-
        do {
 
                /*
@@ -2465,20 +2465,21 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
 
        while (sect_to_write) {
                struct bio *wbio;
+               sector_t wsector;
                if (sectors > sect_to_write)
                        sectors = sect_to_write;
                /* Write at 'sector' for 'sectors' */
                wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
                bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
-               wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
-                                  choose_data_offset(r10_bio, rdev) +
-                                  (sector - r10_bio->sector));
+               wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
+               wbio->bi_iter.bi_sector = wsector +
+                                  choose_data_offset(r10_bio, rdev);
                wbio->bi_bdev = rdev->bdev;
                bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
 
                if (submit_bio_wait(wbio) < 0)
                        /* Failure! */
-                       ok = rdev_set_badblocks(rdev, sector,
+                       ok = rdev_set_badblocks(rdev, wsector,
                                                sectors, 0)
                                && ok;
 
index 51f76ddbe265445d1a6c5083cba2fe8715bf5881..1b1ab4a1d132b39f0145b8bc3305484fad5a7091 100644 (file)
@@ -96,7 +96,6 @@ struct r5l_log {
        spinlock_t no_space_stripes_lock;
 
        bool need_cache_flush;
-       bool in_teardown;
 };
 
 /*
@@ -704,31 +703,22 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log,
 
        mddev = log->rdev->mddev;
        /*
-        * This is to avoid a deadlock. r5l_quiesce holds reconfig_mutex and
-        * wait for this thread to finish. This thread waits for
-        * MD_CHANGE_PENDING clear, which is supposed to be done in
-        * md_check_recovery(). md_check_recovery() tries to get
-        * reconfig_mutex. Since r5l_quiesce already holds the mutex,
-        * md_check_recovery() fails, so the PENDING never get cleared. The
-        * in_teardown check workaround this issue.
+        * Discard could zero data, so before discard we must make sure
+        * superblock is updated to new log tail. Updating superblock (either
+        * directly call md_update_sb() or depend on md thread) must hold
+        * reconfig mutex. On the other hand, raid5_quiesce is called with
+        * reconfig_mutex hold. The first step of raid5_quiesce() is waitting
+        * for all IO finish, hence waitting for reclaim thread, while reclaim
+        * thread is calling this function and waitting for reconfig mutex. So
+        * there is a deadlock. We workaround this issue with a trylock.
+        * FIXME: we could miss discard if we can't take reconfig mutex
         */
-       if (!log->in_teardown) {
-               set_mask_bits(&mddev->flags, 0,
-                             BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
-               md_wakeup_thread(mddev->thread);
-               wait_event(mddev->sb_wait,
-                       !test_bit(MD_CHANGE_PENDING, &mddev->flags) ||
-                       log->in_teardown);
-               /*
-                * r5l_quiesce could run after in_teardown check and hold
-                * mutex first. Superblock might get updated twice.
-                */
-               if (log->in_teardown)
-                       md_update_sb(mddev, 1);
-       } else {
-               WARN_ON(!mddev_is_locked(mddev));
-               md_update_sb(mddev, 1);
-       }
+       set_mask_bits(&mddev->flags, 0,
+               BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
+       if (!mddev_trylock(mddev))
+               return;
+       md_update_sb(mddev, 1);
+       mddev_unlock(mddev);
 
        /* discard IO error really doesn't matter, ignore it */
        if (log->last_checkpoint < end) {
@@ -827,7 +817,6 @@ void r5l_quiesce(struct r5l_log *log, int state)
        if (!log || state == 2)
                return;
        if (state == 0) {
-               log->in_teardown = 0;
                /*
                 * This is a special case for hotadd. In suspend, the array has
                 * no journal. In resume, journal is initialized as well as the
@@ -838,11 +827,6 @@ void r5l_quiesce(struct r5l_log *log, int state)
                log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
                                        log->rdev->mddev, "reclaim");
        } else if (state == 1) {
-               /*
-                * at this point all stripes are finished, so io_unit is at
-                * least in STRIPE_END state
-                */
-               log->in_teardown = 1;
                /* make sure r5l_write_super_and_discard_space exits */
                mddev = log->rdev->mddev;
                wake_up(&mddev->sb_wait);
index 8912407a4dd0edb251a36e3befe3ff8182598bdf..ee7fc37017007a2d6cf4b440f95063535d928522 100644 (file)
@@ -659,6 +659,7 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
 {
        struct stripe_head *sh;
        int hash = stripe_hash_locks_hash(sector);
+       int inc_empty_inactive_list_flag;
 
        pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
 
@@ -703,7 +704,12 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
                                        atomic_inc(&conf->active_stripes);
                                BUG_ON(list_empty(&sh->lru) &&
                                       !test_bit(STRIPE_EXPANDING, &sh->state));
+                               inc_empty_inactive_list_flag = 0;
+                               if (!list_empty(conf->inactive_list + hash))
+                                       inc_empty_inactive_list_flag = 1;
                                list_del_init(&sh->lru);
+                               if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag)
+                                       atomic_inc(&conf->empty_inactive_list_nr);
                                if (sh->group) {
                                        sh->group->stripes_cnt--;
                                        sh->group = NULL;
@@ -762,6 +768,7 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
        sector_t head_sector, tmp_sec;
        int hash;
        int dd_idx;
+       int inc_empty_inactive_list_flag;
 
        /* Don't cross chunks, so stripe pd_idx/qd_idx is the same */
        tmp_sec = sh->sector;
@@ -779,7 +786,12 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
                                atomic_inc(&conf->active_stripes);
                        BUG_ON(list_empty(&head->lru) &&
                               !test_bit(STRIPE_EXPANDING, &head->state));
+                       inc_empty_inactive_list_flag = 0;
+                       if (!list_empty(conf->inactive_list + hash))
+                               inc_empty_inactive_list_flag = 1;
                        list_del_init(&head->lru);
+                       if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag)
+                               atomic_inc(&conf->empty_inactive_list_nr);
                        if (head->group) {
                                head->group->stripes_cnt--;
                                head->group = NULL;
@@ -993,7 +1005,6 @@ again:
 
                        set_bit(STRIPE_IO_STARTED, &sh->state);
 
-                       bio_reset(bi);
                        bi->bi_bdev = rdev->bdev;
                        bio_set_op_attrs(bi, op, op_flags);
                        bi->bi_end_io = op_is_write(op)
@@ -1045,7 +1056,6 @@ again:
 
                        set_bit(STRIPE_IO_STARTED, &sh->state);
 
-                       bio_reset(rbi);
                        rbi->bi_bdev = rrdev->bdev;
                        bio_set_op_attrs(rbi, op, op_flags);
                        BUG_ON(!op_is_write(op));
@@ -1978,9 +1988,11 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
        put_cpu();
 }
 
-static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp)
+static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
+       int disks)
 {
        struct stripe_head *sh;
+       int i;
 
        sh = kmem_cache_zalloc(sc, gfp);
        if (sh) {
@@ -1989,6 +2001,17 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp)
                INIT_LIST_HEAD(&sh->batch_list);
                INIT_LIST_HEAD(&sh->lru);
                atomic_set(&sh->count, 1);
+               for (i = 0; i < disks; i++) {
+                       struct r5dev *dev = &sh->dev[i];
+
+                       bio_init(&dev->req);
+                       dev->req.bi_io_vec = &dev->vec;
+                       dev->req.bi_max_vecs = 1;
+
+                       bio_init(&dev->rreq);
+                       dev->rreq.bi_io_vec = &dev->rvec;
+                       dev->rreq.bi_max_vecs = 1;
+               }
        }
        return sh;
 }
@@ -1996,7 +2019,7 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
 {
        struct stripe_head *sh;
 
-       sh = alloc_stripe(conf->slab_cache, gfp);
+       sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size);
        if (!sh)
                return 0;
 
@@ -2167,7 +2190,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
        mutex_lock(&conf->cache_size_mutex);
 
        for (i = conf->max_nr_stripes; i; i--) {
-               nsh = alloc_stripe(sc, GFP_KERNEL);
+               nsh = alloc_stripe(sc, GFP_KERNEL, newsize);
                if (!nsh)
                        break;
 
@@ -2299,6 +2322,7 @@ static void raid5_end_read_request(struct bio * bi)
                (unsigned long long)sh->sector, i, atomic_read(&sh->count),
                bi->bi_error);
        if (i == disks) {
+               bio_reset(bi);
                BUG();
                return;
        }
@@ -2399,6 +2423,7 @@ static void raid5_end_read_request(struct bio * bi)
                }
        }
        rdev_dec_pending(rdev, conf->mddev);
+       bio_reset(bi);
        clear_bit(R5_LOCKED, &sh->dev[i].flags);
        set_bit(STRIPE_HANDLE, &sh->state);
        raid5_release_stripe(sh);
@@ -2436,6 +2461,7 @@ static void raid5_end_write_request(struct bio *bi)
                (unsigned long long)sh->sector, i, atomic_read(&sh->count),
                bi->bi_error);
        if (i == disks) {
+               bio_reset(bi);
                BUG();
                return;
        }
@@ -2472,6 +2498,7 @@ static void raid5_end_write_request(struct bio *bi)
        if (sh->batch_head && bi->bi_error && !replacement)
                set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
 
+       bio_reset(bi);
        if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
                clear_bit(R5_LOCKED, &sh->dev[i].flags);
        set_bit(STRIPE_HANDLE, &sh->state);
@@ -2485,16 +2512,6 @@ static void raid5_build_block(struct stripe_head *sh, int i, int previous)
 {
        struct r5dev *dev = &sh->dev[i];
 
-       bio_init(&dev->req);
-       dev->req.bi_io_vec = &dev->vec;
-       dev->req.bi_max_vecs = 1;
-       dev->req.bi_private = sh;
-
-       bio_init(&dev->rreq);
-       dev->rreq.bi_io_vec = &dev->rvec;
-       dev->rreq.bi_max_vecs = 1;
-       dev->rreq.bi_private = sh;
-
        dev->flags = 0;
        dev->sector = raid5_compute_blocknr(sh, i, previous);
 }
@@ -4628,7 +4645,9 @@ finish:
        }
 
        if (!bio_list_empty(&s.return_bi)) {
-               if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags)) {
+               if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags) &&
+                               (s.failed <= conf->max_degraded ||
+                                       conf->mddev->external == 0)) {
                        spin_lock_irq(&conf->device_lock);
                        bio_list_merge(&conf->return_bi, &s.return_bi);
                        spin_unlock_irq(&conf->device_lock);
@@ -6620,6 +6639,16 @@ static struct r5conf *setup_conf(struct mddev *mddev)
        }
 
        conf->min_nr_stripes = NR_STRIPES;
+       if (mddev->reshape_position != MaxSector) {
+               int stripes = max_t(int,
+                       ((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4,
+                       ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4);
+               conf->min_nr_stripes = max(NR_STRIPES, stripes);
+               if (conf->min_nr_stripes != NR_STRIPES)
+                       printk(KERN_INFO
+                               "md/raid:%s: force stripe size %d for reshape\n",
+                               mdname(mddev), conf->min_nr_stripes);
+       }
        memory = conf->min_nr_stripes * (sizeof(struct stripe_head) +
                 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
        atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS);
@@ -6826,11 +6855,14 @@ static int raid5_run(struct mddev *mddev)
        if (IS_ERR(conf))
                return PTR_ERR(conf);
 
-       if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !journal_dev) {
-               printk(KERN_ERR "md/raid:%s: journal disk is missing, force array readonly\n",
-                      mdname(mddev));
-               mddev->ro = 1;
-               set_disk_ro(mddev->gendisk, 1);
+       if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
+               if (!journal_dev) {
+                       pr_err("md/raid:%s: journal disk is missing, force array readonly\n",
+                              mdname(mddev));
+                       mddev->ro = 1;
+                       set_disk_ro(mddev->gendisk, 1);
+               } else if (mddev->recovery_cp == MaxSector)
+                       set_bit(MD_JOURNAL_CLEAN, &mddev->flags);
        }
 
        conf->min_offset_diff = min_offset_diff;
index 869c83fb3c5d90a751d2e1b1d7c03c6e35fa2517..f00f3e7422657723ad8f69bd90f76554f8938bb6 100644 (file)
@@ -2185,7 +2185,7 @@ static int gpmc_probe_dt(struct platform_device *pdev)
        return 0;
 }
 
-static int gpmc_probe_dt_children(struct platform_device *pdev)
+static void gpmc_probe_dt_children(struct platform_device *pdev)
 {
        int ret;
        struct device_node *child;
@@ -2200,11 +2200,11 @@ static int gpmc_probe_dt_children(struct platform_device *pdev)
                else
                        ret = gpmc_probe_generic_child(pdev, child);
 
-               if (ret)
-                       return ret;
+               if (ret) {
+                       dev_err(&pdev->dev, "failed to probe DT child '%s': %d\n",
+                               child->name, ret);
+               }
        }
-
-       return 0;
 }
 #else
 static int gpmc_probe_dt(struct platform_device *pdev)
@@ -2212,9 +2212,8 @@ static int gpmc_probe_dt(struct platform_device *pdev)
        return 0;
 }
 
-static int gpmc_probe_dt_children(struct platform_device *pdev)
+static void gpmc_probe_dt_children(struct platform_device *pdev)
 {
-       return 0;
 }
 #endif /* CONFIG_OF */
 
@@ -2369,16 +2368,10 @@ static int gpmc_probe(struct platform_device *pdev)
                goto setup_irq_failed;
        }
 
-       rc = gpmc_probe_dt_children(pdev);
-       if (rc < 0) {
-               dev_err(gpmc->dev, "failed to probe DT children\n");
-               goto dt_children_failed;
-       }
+       gpmc_probe_dt_children(pdev);
 
        return 0;
 
-dt_children_failed:
-       gpmc_free_irq(gpmc);
 setup_irq_failed:
        gpmc_gpio_exit(gpmc);
 gpio_init_failed:
index a216b46677429402168c587c638cc1d710632960..d002528289667732e9032f68aa0dba6624abfd9c 100644 (file)
@@ -345,16 +345,6 @@ config SENSORS_TSL2550
          This driver can also be built as a module.  If so, the module
          will be called tsl2550.
 
-config SENSORS_BH1780
-       tristate "ROHM BH1780GLI ambient light sensor"
-       depends on I2C && SYSFS
-       help
-         If you say yes here you get support for the ROHM BH1780GLI
-         ambient light sensor.
-
-         This driver can also be built as a module.  If so, the module
-         will be called bh1780gli.
-
 config SENSORS_BH1770
          tristate "BH1770GLC / SFH7770 combined ALS - Proximity sensor"
          depends on I2C
index 7410c6d9a34db942afd5624ebf54aadaffe9eb94..fb32516ddfe2e4f8112beee9e831bcc4eb729667 100644 (file)
@@ -19,7 +19,6 @@ obj-$(CONFIG_TIFM_CORE)               += tifm_core.o
 obj-$(CONFIG_TIFM_7XX1)        += tifm_7xx1.o
 obj-$(CONFIG_PHANTOM)          += phantom.o
 obj-$(CONFIG_QCOM_COINCELL)    += qcom-coincell.o
-obj-$(CONFIG_SENSORS_BH1780)   += bh1780gli.o
 obj-$(CONFIG_SENSORS_BH1770)   += bh1770glc.o
 obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o
 obj-$(CONFIG_SGI_IOC4)         += ioc4.o
diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c
deleted file mode 100644 (file)
index 7f90ce5..0000000
+++ /dev/null
@@ -1,259 +0,0 @@
-/*
- * bh1780gli.c
- * ROHM Ambient Light Sensor Driver
- *
- * Copyright (C) 2010 Texas Instruments
- * Author: Hemanth V <hemanthv@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-#include <linux/i2c.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/of.h>
-
-#define BH1780_REG_CONTROL     0x80
-#define BH1780_REG_PARTID      0x8A
-#define BH1780_REG_MANFID      0x8B
-#define BH1780_REG_DLOW        0x8C
-#define BH1780_REG_DHIGH       0x8D
-
-#define BH1780_REVMASK         (0xf)
-#define BH1780_POWMASK         (0x3)
-#define BH1780_POFF            (0x0)
-#define BH1780_PON             (0x3)
-
-/* power on settling time in ms */
-#define BH1780_PON_DELAY       2
-
-struct bh1780_data {
-       struct i2c_client *client;
-       int power_state;
-       /* lock for sysfs operations */
-       struct mutex lock;
-};
-
-static int bh1780_write(struct bh1780_data *ddata, u8 reg, u8 val, char *msg)
-{
-       int ret = i2c_smbus_write_byte_data(ddata->client, reg, val);
-       if (ret < 0)
-               dev_err(&ddata->client->dev,
-                       "i2c_smbus_write_byte_data failed error %d Register (%s)\n",
-                       ret, msg);
-       return ret;
-}
-
-static int bh1780_read(struct bh1780_data *ddata, u8 reg, char *msg)
-{
-       int ret = i2c_smbus_read_byte_data(ddata->client, reg);
-       if (ret < 0)
-               dev_err(&ddata->client->dev,
-                       "i2c_smbus_read_byte_data failed error %d Register (%s)\n",
-                       ret, msg);
-       return ret;
-}
-
-static ssize_t bh1780_show_lux(struct device *dev,
-                               struct device_attribute *attr, char *buf)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-       struct bh1780_data *ddata = platform_get_drvdata(pdev);
-       int lsb, msb;
-
-       lsb = bh1780_read(ddata, BH1780_REG_DLOW, "DLOW");
-       if (lsb < 0)
-               return lsb;
-
-       msb = bh1780_read(ddata, BH1780_REG_DHIGH, "DHIGH");
-       if (msb < 0)
-               return msb;
-
-       return sprintf(buf, "%d\n", (msb << 8) | lsb);
-}
-
-static ssize_t bh1780_show_power_state(struct device *dev,
-                                       struct device_attribute *attr,
-                                       char *buf)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-       struct bh1780_data *ddata = platform_get_drvdata(pdev);
-       int state;
-
-       state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL");
-       if (state < 0)
-               return state;
-
-       return sprintf(buf, "%d\n", state & BH1780_POWMASK);
-}
-
-static ssize_t bh1780_store_power_state(struct device *dev,
-                                       struct device_attribute *attr,
-                                       const char *buf, size_t count)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-       struct bh1780_data *ddata = platform_get_drvdata(pdev);
-       unsigned long val;
-       int error;
-
-       error = kstrtoul(buf, 0, &val);
-       if (error)
-               return error;
-
-       if (val < BH1780_POFF || val > BH1780_PON)
-               return -EINVAL;
-
-       mutex_lock(&ddata->lock);
-
-       error = bh1780_write(ddata, BH1780_REG_CONTROL, val, "CONTROL");
-       if (error < 0) {
-               mutex_unlock(&ddata->lock);
-               return error;
-       }
-
-       msleep(BH1780_PON_DELAY);
-       ddata->power_state = val;
-       mutex_unlock(&ddata->lock);
-
-       return count;
-}
-
-static DEVICE_ATTR(lux, S_IRUGO, bh1780_show_lux, NULL);
-
-static DEVICE_ATTR(power_state, S_IWUSR | S_IRUGO,
-               bh1780_show_power_state, bh1780_store_power_state);
-
-static struct attribute *bh1780_attributes[] = {
-       &dev_attr_power_state.attr,
-       &dev_attr_lux.attr,
-       NULL
-};
-
-static const struct attribute_group bh1780_attr_group = {
-       .attrs = bh1780_attributes,
-};
-
-static int bh1780_probe(struct i2c_client *client,
-                                               const struct i2c_device_id *id)
-{
-       int ret;
-       struct bh1780_data *ddata;
-       struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
-
-       if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
-               return -EIO;
-
-       ddata = devm_kzalloc(&client->dev, sizeof(struct bh1780_data),
-                            GFP_KERNEL);
-       if (ddata == NULL)
-               return -ENOMEM;
-
-       ddata->client = client;
-       i2c_set_clientdata(client, ddata);
-
-       ret = bh1780_read(ddata, BH1780_REG_PARTID, "PART ID");
-       if (ret < 0)
-               return ret;
-
-       dev_info(&client->dev, "Ambient Light Sensor, Rev : %d\n",
-                       (ret & BH1780_REVMASK));
-
-       mutex_init(&ddata->lock);
-
-       return sysfs_create_group(&client->dev.kobj, &bh1780_attr_group);
-}
-
-static int bh1780_remove(struct i2c_client *client)
-{
-       sysfs_remove_group(&client->dev.kobj, &bh1780_attr_group);
-
-       return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int bh1780_suspend(struct device *dev)
-{
-       struct bh1780_data *ddata;
-       int state, ret;
-       struct i2c_client *client = to_i2c_client(dev);
-
-       ddata = i2c_get_clientdata(client);
-       state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL");
-       if (state < 0)
-               return state;
-
-       ddata->power_state = state & BH1780_POWMASK;
-
-       ret = bh1780_write(ddata, BH1780_REG_CONTROL, BH1780_POFF,
-                               "CONTROL");
-
-       if (ret < 0)
-               return ret;
-
-       return 0;
-}
-
-static int bh1780_resume(struct device *dev)
-{
-       struct bh1780_data *ddata;
-       int state, ret;
-       struct i2c_client *client = to_i2c_client(dev);
-
-       ddata = i2c_get_clientdata(client);
-       state = ddata->power_state;
-       ret = bh1780_write(ddata, BH1780_REG_CONTROL, state,
-                               "CONTROL");
-
-       if (ret < 0)
-               return ret;
-
-       return 0;
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(bh1780_pm, bh1780_suspend, bh1780_resume);
-
-static const struct i2c_device_id bh1780_id[] = {
-       { "bh1780", 0 },
-       { },
-};
-
-MODULE_DEVICE_TABLE(i2c, bh1780_id);
-
-#ifdef CONFIG_OF
-static const struct of_device_id of_bh1780_match[] = {
-       { .compatible = "rohm,bh1780gli", },
-       {},
-};
-
-MODULE_DEVICE_TABLE(of, of_bh1780_match);
-#endif
-
-static struct i2c_driver bh1780_driver = {
-       .probe          = bh1780_probe,
-       .remove         = bh1780_remove,
-       .id_table       = bh1780_id,
-       .driver = {
-               .name = "bh1780",
-               .pm     = &bh1780_pm,
-               .of_match_table = of_match_ptr(of_bh1780_match),
-       },
-};
-
-module_i2c_driver(bh1780_driver);
-
-MODULE_DESCRIPTION("BH1780GLI Ambient Light Sensor Driver");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Hemanth V <hemanthv@ti.com>");
index 7ada5f1b7bb67b808ec824561b0ceb6def317562..3519acebfdab6be8387151d969f55859952c2f09 100644 (file)
@@ -230,6 +230,11 @@ int cxl_pci_vphb_add(struct cxl_afu *afu)
        if (phb->bus == NULL)
                return -ENXIO;
 
+       /* Set release hook on root bus */
+       pci_set_host_bridge_release(to_pci_host_bridge(phb->bus->bridge),
+                                   pcibios_free_controller_deferred,
+                                   (void *) phb);
+
        /* Claim resources. This might need some rework as well depending
         * whether we are doing probe-only or not, like assigning unassigned
         * resources etc...
@@ -256,7 +261,10 @@ void cxl_pci_vphb_remove(struct cxl_afu *afu)
        afu->phb = NULL;
 
        pci_remove_root_bus(phb->bus);
-       pcibios_free_controller(phb);
+       /*
+        * We don't free phb here - that's handled by
+        * pcibios_free_controller_deferred()
+        */
 }
 
 static bool _cxl_pci_is_vphb_device(struct pci_controller *phb)
index 166b1db3969fbc1f74fed6882bae0995a5768922..3564477b8c2dad4cde23eda8fc8bb3b938d6bc86 100644 (file)
@@ -4,7 +4,7 @@
  */
 #include "lkdtm.h"
 
-void lkdtm_rodata_do_nothing(void)
+void notrace lkdtm_rodata_do_nothing(void)
 {
        /* Does nothing. We just want an architecture agnostic "return". */
 }
index 5525a204db93aadb15a2691ede56f915857845e4..1dd611423d8be4a18de022c48f7f30bca84d8970 100644 (file)
@@ -9,7 +9,15 @@
 #include <linux/uaccess.h>
 #include <asm/cacheflush.h>
 
-static size_t cache_size = 1024;
+/*
+ * Many of the tests here end up using const sizes, but those would
+ * normally be ignored by hardened usercopy, so force the compiler
+ * into choosing the non-const path to make sure we trigger the
+ * hardened usercopy checks by added "unconst" to all the const copies,
+ * and making sure "cache_size" isn't optimized into a const.
+ */
+static volatile size_t unconst = 0;
+static volatile size_t cache_size = 1024;
 static struct kmem_cache *bad_cache;
 
 static const unsigned char test_text[] = "This is a test.\n";
@@ -67,14 +75,14 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
        if (to_user) {
                pr_info("attempting good copy_to_user of local stack\n");
                if (copy_to_user((void __user *)user_addr, good_stack,
-                                sizeof(good_stack))) {
+                                unconst + sizeof(good_stack))) {
                        pr_warn("copy_to_user failed unexpectedly?!\n");
                        goto free_user;
                }
 
                pr_info("attempting bad copy_to_user of distant stack\n");
                if (copy_to_user((void __user *)user_addr, bad_stack,
-                                sizeof(good_stack))) {
+                                unconst + sizeof(good_stack))) {
                        pr_warn("copy_to_user failed, but lacked Oops\n");
                        goto free_user;
                }
@@ -88,14 +96,14 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
 
                pr_info("attempting good copy_from_user of local stack\n");
                if (copy_from_user(good_stack, (void __user *)user_addr,
-                                  sizeof(good_stack))) {
+                                  unconst + sizeof(good_stack))) {
                        pr_warn("copy_from_user failed unexpectedly?!\n");
                        goto free_user;
                }
 
                pr_info("attempting bad copy_from_user of distant stack\n");
                if (copy_from_user(bad_stack, (void __user *)user_addr,
-                                  sizeof(good_stack))) {
+                                  unconst + sizeof(good_stack))) {
                        pr_warn("copy_from_user failed, but lacked Oops\n");
                        goto free_user;
                }
@@ -109,7 +117,7 @@ static void do_usercopy_heap_size(bool to_user)
 {
        unsigned long user_addr;
        unsigned char *one, *two;
-       const size_t size = 1024;
+       size_t size = unconst + 1024;
 
        one = kmalloc(size, GFP_KERNEL);
        two = kmalloc(size, GFP_KERNEL);
@@ -285,13 +293,14 @@ void lkdtm_USERCOPY_KERNEL(void)
 
        pr_info("attempting good copy_to_user from kernel rodata\n");
        if (copy_to_user((void __user *)user_addr, test_text,
-                        sizeof(test_text))) {
+                        unconst + sizeof(test_text))) {
                pr_warn("copy_to_user failed unexpectedly?!\n");
                goto free_user;
        }
 
        pr_info("attempting bad copy_to_user from kernel text\n");
-       if (copy_to_user((void __user *)user_addr, vm_mmap, PAGE_SIZE)) {
+       if (copy_to_user((void __user *)user_addr, vm_mmap,
+                        unconst + PAGE_SIZE)) {
                pr_warn("copy_to_user failed, but lacked Oops\n");
                goto free_user;
        }
index e2fb44cc5c37668765e31f6e2b279cb3ee249229..dc3a854e02d39d1b73f95ff1ed8e203088955cb6 100644 (file)
@@ -1263,8 +1263,14 @@ static bool mei_me_fw_type_nm(struct pci_dev *pdev)
 static bool mei_me_fw_type_sps(struct pci_dev *pdev)
 {
        u32 reg;
-       /* Read ME FW Status check for SPS Firmware */
-       pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
+       unsigned int devfn;
+
+       /*
+        * Read ME FW Status register to check for SPS Firmware
+        * The SPS FW is only signaled in pci function 0
+        */
+       devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
+       pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_1, &reg);
        trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
        /* if bits [19:16] = 15, running SPS Firmware */
        return (reg & 0xf0000) == 0xf0000;
index 64e64da6da4439c470832ab32a0a0158d6c2d62a..71cea9b296b2f72c1763d335024ff24379e137b7 100644 (file)
@@ -85,8 +85,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_SPT, mei_me_pch8_cfg)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, mei_me_pch8_cfg)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_cfg)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_cfg)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_sps_cfg)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_sps_cfg)},
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, mei_me_pch8_cfg)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, mei_me_pch8_cfg)},
index f23d65eb070d85eaaef5929c6ee4c14a5d0c5a36..be3c49fa7382f31bde9b671834e74dc9f41c9874 100644 (file)
@@ -1016,14 +1016,16 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
 
                /* Only reconfigure if we have a different burst size */
                if (*bp != burst) {
-                       struct dma_slave_config cfg;
-
-                       cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
-                       cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
-                       cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
-                       cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
-                       cfg.src_maxburst = burst;
-                       cfg.dst_maxburst = burst;
+                       struct dma_slave_config cfg = {
+                               .src_addr = host->phys_base +
+                                           OMAP_MMC_REG(host, DATA),
+                               .dst_addr = host->phys_base +
+                                           OMAP_MMC_REG(host, DATA),
+                               .src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
+                               .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
+                               .src_maxburst = burst,
+                               .dst_maxburst = burst,
+                       };
 
                        if (dmaengine_slave_config(c, &cfg))
                                goto use_pio;
index 24ebc9a8de89a072201fba04276fd27e1b68d3e8..5f2f24a7360d87988981372e5ba7eea438d22a78 100644 (file)
@@ -1409,11 +1409,18 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
 static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host,
                                        struct mmc_request *req)
 {
-       struct dma_slave_config cfg;
        struct dma_async_tx_descriptor *tx;
        int ret = 0, i;
        struct mmc_data *data = req->data;
        struct dma_chan *chan;
+       struct dma_slave_config cfg = {
+               .src_addr = host->mapbase + OMAP_HSMMC_DATA,
+               .dst_addr = host->mapbase + OMAP_HSMMC_DATA,
+               .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+               .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+               .src_maxburst = data->blksz / 4,
+               .dst_maxburst = data->blksz / 4,
+       };
 
        /* Sanity check: all the SG entries must be aligned by block size. */
        for (i = 0; i < data->sg_len; i++) {
@@ -1433,13 +1440,6 @@ static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host,
 
        chan = omap_hsmmc_get_dma_chan(host, data);
 
-       cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA;
-       cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA;
-       cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-       cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-       cfg.src_maxburst = data->blksz / 4;
-       cfg.dst_maxburst = data->blksz / 4;
-
        ret = dmaengine_slave_config(chan, &cfg);
        if (ret)
                return ret;
index c95ba83366a0f2d95b1b45a70824327069f6b79d..ed92ce729dde1d60b91ce5395e1802816cca448b 100644 (file)
@@ -28,6 +28,7 @@
 
 struct st_mmc_platform_data {
        struct  reset_control *rstc;
+       struct  clk *icnclk;
        void __iomem *top_ioaddr;
 };
 
@@ -353,7 +354,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
        struct sdhci_host *host;
        struct st_mmc_platform_data *pdata;
        struct sdhci_pltfm_host *pltfm_host;
-       struct clk *clk;
+       struct clk *clk, *icnclk;
        int ret = 0;
        u16 host_version;
        struct resource *res;
@@ -365,6 +366,11 @@ static int sdhci_st_probe(struct platform_device *pdev)
                return PTR_ERR(clk);
        }
 
+       /* ICN clock isn't compulsory, but use it if it's provided. */
+       icnclk = devm_clk_get(&pdev->dev, "icn");
+       if (IS_ERR(icnclk))
+               icnclk = NULL;
+
        rstc = devm_reset_control_get(&pdev->dev, NULL);
        if (IS_ERR(rstc))
                rstc = NULL;
@@ -389,6 +395,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
        }
 
        clk_prepare_enable(clk);
+       clk_prepare_enable(icnclk);
 
        /* Configure the FlashSS Top registers for setting eMMC TX/RX delay */
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -400,6 +407,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
        }
 
        pltfm_host->clk = clk;
+       pdata->icnclk = icnclk;
 
        /* Configure the Arasan HC inside the flashSS */
        st_mmcss_cconfig(np, host);
@@ -422,6 +430,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
        return 0;
 
 err_out:
+       clk_disable_unprepare(icnclk);
        clk_disable_unprepare(clk);
 err_of:
        sdhci_pltfm_free(pdev);
@@ -442,6 +451,8 @@ static int sdhci_st_remove(struct platform_device *pdev)
 
        ret = sdhci_pltfm_unregister(pdev);
 
+       clk_disable_unprepare(pdata->icnclk);
+
        if (rstc)
                reset_control_assert(rstc);
 
@@ -462,6 +473,7 @@ static int sdhci_st_suspend(struct device *dev)
        if (pdata->rstc)
                reset_control_assert(pdata->rstc);
 
+       clk_disable_unprepare(pdata->icnclk);
        clk_disable_unprepare(pltfm_host->clk);
 out:
        return ret;
@@ -475,6 +487,7 @@ static int sdhci_st_resume(struct device *dev)
        struct device_node *np = dev->of_node;
 
        clk_prepare_enable(pltfm_host->clk);
+       clk_prepare_enable(pdata->icnclk);
 
        if (pdata->rstc)
                reset_control_deassert(pdata->rstc);
index 217e8da0628caa909aa212e3c2598dc0ca09da68..9599ed6f121382355b660147fb63554d7a405970 100644 (file)
@@ -1341,9 +1341,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                            slave_dev->name);
        }
 
-       /* already enslaved */
-       if (slave_dev->flags & IFF_SLAVE) {
-               netdev_dbg(bond_dev, "Error: Device was already enslaved\n");
+       /* already in-use? */
+       if (netdev_is_rx_handler_busy(slave_dev)) {
+               netdev_err(bond_dev,
+                          "Error: Device is in use and cannot be enslaved\n");
                return -EBUSY;
        }
 
index 463bed8cbe4c09358a01ba1bbdb286f64721db42..dd446e4666990ecaae0d9be7215821d0af7e59d7 100644 (file)
@@ -205,8 +205,8 @@ static inline void name##_writeq(struct bcm_sf2_priv *priv, u64 val,        \
 static inline void intrl2_##which##_mask_clear(struct bcm_sf2_priv *priv, \
                                                u32 mask)               \
 {                                                                      \
-       intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR);     \
        priv->irq##which##_mask &= ~(mask);                             \
+       intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR);     \
 }                                                                      \
 static inline void intrl2_##which##_mask_set(struct bcm_sf2_priv *priv, \
                                                u32 mask)               \
index d1d9d3cf9139f66774fb3b1ed203254783d7f3c8..71067906759419221087e847a2ab5b3e9ac1c6f3 100644 (file)
@@ -2656,15 +2656,19 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
                        return ret;
        }
 
+       /* Rate Control: disable ingress rate limiting. */
        if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
            mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
-           mv88e6xxx_6185_family(chip) || mv88e6xxx_6095_family(chip) ||
            mv88e6xxx_6320_family(chip)) {
-               /* Rate Control: disable ingress rate limiting. */
                ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
                                           PORT_RATE_CONTROL, 0x0001);
                if (ret)
                        return ret;
+       } else if (mv88e6xxx_6185_family(chip) || mv88e6xxx_6095_family(chip)) {
+               ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
+                                          PORT_RATE_CONTROL, 0x0000);
+               if (ret)
+                       return ret;
        }
 
        /* Port Control 1: disable trunking, disable sending
index 6453148d066a7d6dc698a84ba970d90859d52d85..4eb17daefc4fc2243084c6a84181626d90dd4807 100644 (file)
@@ -1545,6 +1545,8 @@ static const struct pci_device_id alx_pci_tbl[] = {
          .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
        { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2400),
          .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
+       { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2500),
+         .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
        { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162),
          .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
        { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) },
index 0959e6824cb635f0262524db9729988986193fb4..1fc2d852249fb2b5f1a72233024093b726431659 100644 (file)
@@ -38,6 +38,7 @@
 #define ALX_DEV_ID_AR8161                              0x1091
 #define ALX_DEV_ID_E2200                               0xe091
 #define ALX_DEV_ID_E2400                               0xe0a1
+#define ALX_DEV_ID_E2500                               0xe0b1
 #define ALX_DEV_ID_AR8162                              0x1090
 #define ALX_DEV_ID_AR8171                              0x10A1
 #define ALX_DEV_ID_AR8172                              0x10A0
index 9a9745c4047c3757c4b1733dae9d5908db44baa1..625235db644f73bdbbc7a79088e6a15c60d27019 100644 (file)
@@ -159,7 +159,7 @@ static int bgmac_probe(struct bcma_device *core)
 
        if (!bgmac_is_bcm4707_family(core)) {
                mii_bus = bcma_mdio_mii_register(core, bgmac->phyaddr);
-               if (!IS_ERR(mii_bus)) {
+               if (IS_ERR(mii_bus)) {
                        err = PTR_ERR(mii_bus);
                        goto err;
                }
index 97e8925116662b105c7c7a57274fa2f5e906865d..fa3386bb14f7317c6a3c64fdf075c379c5cc2643 100644 (file)
@@ -772,6 +772,11 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
                (bp->common.bc_ver & 0xff00) >> 8,
                (bp->common.bc_ver & 0xff));
 
+       if (pci_channel_offline(bp->pdev)) {
+               BNX2X_ERR("Cannot dump MCP info while in PCI error\n");
+               return;
+       }
+
        val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
        if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
                BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
@@ -9415,10 +9420,16 @@ unload_error:
        /* Release IRQs */
        bnx2x_free_irq(bp);
 
-       /* Reset the chip */
-       rc = bnx2x_reset_hw(bp, reset_code);
-       if (rc)
-               BNX2X_ERR("HW_RESET failed\n");
+       /* Reset the chip, unless PCI function is offline. If we reach this
+        * point following a PCI error handling, it means device is really
+        * in a bad state and we're about to remove it, so reset the chip
+        * is not a good idea.
+        */
+       if (!pci_channel_offline(bp->pdev)) {
+               rc = bnx2x_reset_hw(bp, reset_code);
+               if (rc)
+                       BNX2X_ERR("HW_RESET failed\n");
+       }
 
        /* Report UNLOAD_DONE to MCP */
        bnx2x_send_unload_done(bp, keep_link);
index 2cf79100c9cb2241b8172cd4a8543cd28b73a7c1..228c964e709a2d73dc1491c998cddabda59d6180 100644 (file)
@@ -353,8 +353,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
                push_len = (length + sizeof(*tx_push) + 7) / 8;
                if (push_len > 16) {
                        __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
-                       __iowrite64_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
-                                        push_len - 16);
+                       __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
+                                        (push_len - 16) << 1);
                } else {
                        __iowrite64_copy(txr->tx_doorbell, tx_push_buf,
                                         push_len);
index 659261218d9f67fd2509718dee8fbd290003cb7d..a2551bcd1027007916f91a364833ee8368315185 100644 (file)
@@ -14012,6 +14012,7 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
        if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
            (!ec->rx_coalesce_usecs) ||
            (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
+           (!ec->tx_coalesce_usecs) ||
            (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
            (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
            (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
@@ -14022,16 +14023,6 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
            (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
                return -EINVAL;
 
-       /* No rx interrupts will be generated if both are zero */
-       if ((ec->rx_coalesce_usecs == 0) &&
-           (ec->rx_max_coalesced_frames == 0))
-               return -EINVAL;
-
-       /* No tx interrupts will be generated if both are zero */
-       if ((ec->tx_coalesce_usecs == 0) &&
-           (ec->tx_max_coalesced_frames == 0))
-               return -EINVAL;
-
        /* Only copy relevant parameters, ignore all others. */
        tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
        tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
index 89c0cfa9719f345cd219a096223285a89fbf88fb..d954a97b0b0b011de07ded9e69f46b893517dde1 100644 (file)
@@ -1323,6 +1323,24 @@ dma_error:
        return 0;
 }
 
+static inline int macb_clear_csum(struct sk_buff *skb)
+{
+       /* no change for packets without checksum offloading */
+       if (skb->ip_summed != CHECKSUM_PARTIAL)
+               return 0;
+
+       /* make sure we can modify the header */
+       if (unlikely(skb_cow_head(skb, 0)))
+               return -1;
+
+       /* initialize checksum field
+        * This is required - at least for Zynq, which otherwise calculates
+        * wrong UDP header checksums for UDP packets with UDP data len <=2
+        */
+       *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
+       return 0;
+}
+
 static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        u16 queue_index = skb_get_queue_mapping(skb);
@@ -1362,6 +1380,11 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_BUSY;
        }
 
+       if (macb_clear_csum(skb)) {
+               dev_kfree_skb_any(skb);
+               return NETDEV_TX_OK;
+       }
+
        /* Map socket buffer for DMA transfer */
        if (!macb_tx_map(bp, queue, skb)) {
                dev_kfree_skb_any(skb);
index 83025bb4737c47c44b2056190b68c0acc5461ed9..e29815d9e6f45723feca790b236bd5365f1b1ca2 100644 (file)
@@ -279,6 +279,7 @@ struct nicvf {
        u8                      sqs_id;
        bool                    sqs_mode;
        bool                    hw_tso;
+       bool                    t88;
 
        /* Receive buffer alloc */
        u32                     rb_page_offset;
index 16ed20357c5c30d2cbb786cb414a6d4544fecf83..85cc782b9060f7f1ff73dd866bdf7713812a660f 100644 (file)
@@ -251,9 +251,14 @@ static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
        int lmac;
        u64 lmac_cfg;
 
-       /* Max value that can be set is 60 */
-       if (size > 60)
-               size = 60;
+       /* There is a issue in HW where-in while sending GSO sized
+        * pkts as part of TSO, if pkt len falls below this size
+        * NIC will zero PAD packet and also updates IP total length.
+        * Hence set this value to lessthan min pkt size of MAC+IP+TCP
+        * headers, BGX will do the padding to transmit 64 byte pkt.
+        */
+       if (size > 52)
+               size = 52;
 
        for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) {
                lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3));
index afb10e326b4fc46043b4d3f4cddb7d17f1cc341c..fab35a5938987c757f5faa2cbf808d7f28c05922 100644 (file)
 #define   NIC_QSET_SQ_0_7_DOOR                 (0x010838)
 #define   NIC_QSET_SQ_0_7_STATUS               (0x010840)
 #define   NIC_QSET_SQ_0_7_DEBUG                        (0x010848)
-#define   NIC_QSET_SQ_0_7_CNM_CHG              (0x010860)
 #define   NIC_QSET_SQ_0_7_STAT_0_1             (0x010900)
 
 #define   NIC_QSET_RBDR_0_1_CFG                        (0x010C00)
index d2d8ef270142dbd3ec1fed3d019b106e6906cce4..ad4fddb5542160b4512643cde5228f7a9d28b2e0 100644 (file)
@@ -382,7 +382,10 @@ static void nicvf_get_regs(struct net_device *dev,
                p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q);
                p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q);
                p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q);
-               p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CNM_CHG, q);
+               /* Padding, was NIC_QSET_SQ_0_7_CNM_CHG, which
+                * produces bus errors when read
+                */
+               p[i++] = 0;
                p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q);
                reg_offset = NIC_QSET_SQ_0_7_STAT_0_1 | (1 << 3);
                p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
index a19e73f11d73c84a829d95980688249c511b21c4..3240349615bdac637238188dc649370e9c86fb78 100644 (file)
@@ -513,6 +513,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
        struct nicvf *nic = netdev_priv(netdev);
        struct snd_queue *sq;
        struct sq_hdr_subdesc *hdr;
+       struct sq_hdr_subdesc *tso_sqe;
 
        sq = &nic->qs->sq[cqe_tx->sq_idx];
 
@@ -527,17 +528,21 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
 
        nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
        skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
-       /* For TSO offloaded packets only one SQE will have a valid SKB */
        if (skb) {
+               /* Check for dummy descriptor used for HW TSO offload on 88xx */
+               if (hdr->dont_send) {
+                       /* Get actual TSO descriptors and free them */
+                       tso_sqe =
+                        (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
+                       nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1);
+               }
                nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
                prefetch(skb);
                dev_consume_skb_any(skb);
                sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
        } else {
-               /* In case of HW TSO, HW sends a CQE for each segment of a TSO
-                * packet instead of a single CQE for the whole TSO packet
-                * transmitted. Each of this CQE points to the same SQE, so
-                * avoid freeing same SQE multiple times.
+               /* In case of SW TSO on 88xx, only last segment will have
+                * a SKB attached, so just free SQEs here.
                 */
                if (!nic->hw_tso)
                        nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
@@ -1502,6 +1507,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct net_device *netdev;
        struct nicvf *nic;
        int    err, qcount;
+       u16    sdevid;
 
        err = pci_enable_device(pdev);
        if (err) {
@@ -1575,6 +1581,10 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (!pass1_silicon(nic->pdev))
                nic->hw_tso = true;
 
+       pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
+       if (sdevid == 0xA134)
+               nic->t88 = true;
+
        /* Check if this VF is in QS only mode */
        if (nic->sqs_mode)
                return 0;
index 0ff8e60deccb78a941f73163212a25b23301f871..dda3ea3f3bb66c90c78f1273ed35e7f78635bcb8 100644 (file)
@@ -938,6 +938,8 @@ static int nicvf_tso_count_subdescs(struct sk_buff *skb)
        return num_edescs + sh->gso_segs;
 }
 
+#define POST_CQE_DESC_COUNT 2
+
 /* Get the number of SQ descriptors needed to xmit this skb */
 static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
 {
@@ -948,6 +950,10 @@ static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
                return subdesc_cnt;
        }
 
+       /* Dummy descriptors to get TSO pkt completion notification */
+       if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size)
+               subdesc_cnt += POST_CQE_DESC_COUNT;
+
        if (skb_shinfo(skb)->nr_frags)
                subdesc_cnt += skb_shinfo(skb)->nr_frags;
 
@@ -965,14 +971,21 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
        struct sq_hdr_subdesc *hdr;
 
        hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
-       sq->skbuff[qentry] = (u64)skb;
-
        memset(hdr, 0, SND_QUEUE_DESC_SIZE);
        hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
-       /* Enable notification via CQE after processing SQE */
-       hdr->post_cqe = 1;
-       /* No of subdescriptors following this */
-       hdr->subdesc_cnt = subdesc_cnt;
+
+       if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) {
+               /* post_cqe = 0, to avoid HW posting a CQE for every TSO
+                * segment transmitted on 88xx.
+                */
+               hdr->subdesc_cnt = subdesc_cnt - POST_CQE_DESC_COUNT;
+       } else {
+               sq->skbuff[qentry] = (u64)skb;
+               /* Enable notification via CQE after processing SQE */
+               hdr->post_cqe = 1;
+               /* No of subdescriptors following this */
+               hdr->subdesc_cnt = subdesc_cnt;
+       }
        hdr->tot_len = len;
 
        /* Offload checksum calculation to HW */
@@ -1023,6 +1036,37 @@ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
        gather->addr = data;
 }
 
+/* Add HDR + IMMEDIATE subdescriptors right after descriptors of a TSO
+ * packet so that a CQE is posted as a notifation for transmission of
+ * TSO packet.
+ */
+static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry,
+                                           int tso_sqe, struct sk_buff *skb)
+{
+       struct sq_imm_subdesc *imm;
+       struct sq_hdr_subdesc *hdr;
+
+       sq->skbuff[qentry] = (u64)skb;
+
+       hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
+       memset(hdr, 0, SND_QUEUE_DESC_SIZE);
+       hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
+       /* Enable notification via CQE after processing SQE */
+       hdr->post_cqe = 1;
+       /* There is no packet to transmit here */
+       hdr->dont_send = 1;
+       hdr->subdesc_cnt = POST_CQE_DESC_COUNT - 1;
+       hdr->tot_len = 1;
+       /* Actual TSO header SQE index, needed for cleanup */
+       hdr->rsvd2 = tso_sqe;
+
+       qentry = nicvf_get_nxt_sqentry(sq, qentry);
+       imm = (struct sq_imm_subdesc *)GET_SQ_DESC(sq, qentry);
+       memset(imm, 0, SND_QUEUE_DESC_SIZE);
+       imm->subdesc_type = SQ_DESC_TYPE_IMMEDIATE;
+       imm->len = 1;
+}
+
 /* Segment a TSO packet into 'gso_size' segments and append
  * them to SQ for transfer
  */
@@ -1096,7 +1140,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
 int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
 {
        int i, size;
-       int subdesc_cnt;
+       int subdesc_cnt, tso_sqe = 0;
        int sq_num, qentry;
        struct queue_set *qs;
        struct snd_queue *sq;
@@ -1131,6 +1175,7 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
        /* Add SQ header subdesc */
        nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
                                 skb, skb->len);
+       tso_sqe = qentry;
 
        /* Add SQ gather subdescs */
        qentry = nicvf_get_nxt_sqentry(sq, qentry);
@@ -1154,6 +1199,11 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
        }
 
 doorbell:
+       if (nic->t88 && skb_shinfo(skb)->gso_size) {
+               qentry = nicvf_get_nxt_sqentry(sq, qentry);
+               nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb);
+       }
+
        /* make sure all memory stores are done before ringing doorbell */
        smp_wmb();
 
index c45de49dc9630d3c211cad693f888e6d50d9079d..c762a8c8c95419e1a7d4fcbb27a1f6b248028b60 100644 (file)
@@ -4335,6 +4335,11 @@ static void cfg_queues(struct adapter *adap)
 #endif
        int ciq_size;
 
+       /* Reduce memory usage in kdump environment, disable all offload.
+        */
+       if (is_kdump_kernel())
+               adap->params.offload = 0;
+
        for_each_port(adap, i)
                n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
 #ifdef CONFIG_CHELSIO_T4_DCB
@@ -4365,11 +4370,6 @@ static void cfg_queues(struct adapter *adap)
        if (q10g > netif_get_num_default_rss_queues())
                q10g = netif_get_num_default_rss_queues();
 
-       /* Reduce memory usage in kdump environment, disable all offload.
-        */
-       if (is_kdump_kernel())
-               adap->params.offload = 0;
-
        for_each_port(adap, i) {
                struct port_info *pi = adap2pinfo(adap, i);
 
index d20935dc8399f7edc172d9ced666b7154080d912..4b4f5bc0e2799cdaea3f1be09fc29e07cbb6b559 100644 (file)
@@ -2922,17 +2922,25 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
 {
        unsigned int size = lstatus & BD_LENGTH_MASK;
        struct page *page = rxb->page;
+       bool last = !!(lstatus & BD_LFLAG(RXBD_LAST));
 
        /* Remove the FCS from the packet length */
-       if (likely(lstatus & BD_LFLAG(RXBD_LAST)))
+       if (last)
                size -= ETH_FCS_LEN;
 
-       if (likely(first))
+       if (likely(first)) {
                skb_put(skb, size);
-       else
-               skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
-                               rxb->page_offset + RXBUF_ALIGNMENT,
-                               size, GFAR_RXB_TRUESIZE);
+       } else {
+               /* the last fragments' length contains the full frame length */
+               if (last)
+                       size -= skb->len;
+
+               /* in case the last fragment consisted only of the FCS */
+               if (size > 0)
+                       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+                                       rxb->page_offset + RXBUF_ALIGNMENT,
+                                       size, GFAR_RXB_TRUESIZE);
+       }
 
        /* try reuse page */
        if (unlikely(page_count(page) != 1))
index 373fd094f2f320b5beff6c0194a150c5cc044e0f..6e8a9c8467b9a62f8383b9b692ceabeb9ad1cf42 100644 (file)
@@ -100,7 +100,8 @@ extern const char gfar_driver_version[];
 #define DEFAULT_RX_LFC_THR  16
 #define DEFAULT_LFC_PTVVAL  4
 
-#define GFAR_RXB_SIZE 1536
+/* prevent fragmenation by HW in DSA environments */
+#define GFAR_RXB_SIZE roundup(1536 + 8, 64)
 #define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \
                          + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
 #define GFAR_RXB_TRUESIZE 2048
index ff8b6a468b2487f6db72c3fd37446518371c2def..6ea872287307bd85b13436a42c1d8aacbad05f2d 100644 (file)
@@ -328,9 +328,10 @@ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb)
 static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb)
 {
        u32 port;
-       struct dsaf_device *dsaf_dev = ppe_cb->ppe_common_cb->dsaf_dev;
 
        if (ppe_cb->ppe_common_cb) {
+               struct dsaf_device *dsaf_dev = ppe_cb->ppe_common_cb->dsaf_dev;
+
                port = ppe_cb->index;
                dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 0);
        }
index e1370c556a3c3e90aa69e942d927dc78add80bfa..618f18436618945b807c5e08f499dc1a4cd689cf 100644 (file)
@@ -199,6 +199,7 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
 void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi)
 {
        struct i40e_client_instance *cdev;
+       int ret = 0;
 
        if (!vsi)
                return;
@@ -211,7 +212,14 @@ void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi)
                                        "Cannot locate client instance open routine\n");
                                continue;
                        }
-                       cdev->client->ops->open(&cdev->lan_info, cdev->client);
+                       if (!(test_bit(__I40E_CLIENT_INSTANCE_OPENED,
+                                      &cdev->state))) {
+                               ret = cdev->client->ops->open(&cdev->lan_info,
+                                                             cdev->client);
+                               if (!ret)
+                                       set_bit(__I40E_CLIENT_INSTANCE_OPENED,
+                                               &cdev->state);
+                       }
                }
        }
        mutex_unlock(&i40e_client_instance_mutex);
@@ -407,12 +415,14 @@ struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf,
  * i40e_client_add_instance - add a client instance struct to the instance list
  * @pf: pointer to the board struct
  * @client: pointer to a client struct in the client list.
+ * @existing: if there was already an existing instance
  *
- * Returns cdev ptr on success, NULL on failure
+ * Returns cdev ptr on success or if already exists, NULL on failure
  **/
 static
 struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
-                                                     struct i40e_client *client)
+                                                    struct i40e_client *client,
+                                                    bool *existing)
 {
        struct i40e_client_instance *cdev;
        struct netdev_hw_addr *mac = NULL;
@@ -421,7 +431,7 @@ struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
        mutex_lock(&i40e_client_instance_mutex);
        list_for_each_entry(cdev, &i40e_client_instances, list) {
                if ((cdev->lan_info.pf == pf) && (cdev->client == client)) {
-                       cdev = NULL;
+                       *existing = true;
                        goto out;
                }
        }
@@ -505,6 +515,7 @@ void i40e_client_subtask(struct i40e_pf *pf)
 {
        struct i40e_client_instance *cdev;
        struct i40e_client *client;
+       bool existing = false;
        int ret = 0;
 
        if (!(pf->flags & I40E_FLAG_SERVICE_CLIENT_REQUESTED))
@@ -528,18 +539,25 @@ void i40e_client_subtask(struct i40e_pf *pf)
                        /* check if L2 VSI is up, if not we are not ready */
                        if (test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
                                continue;
+               } else {
+                       dev_warn(&pf->pdev->dev, "This client %s is being instanciated at probe\n",
+                                client->name);
                }
 
                /* Add the client instance to the instance list */
-               cdev = i40e_client_add_instance(pf, client);
+               cdev = i40e_client_add_instance(pf, client, &existing);
                if (!cdev)
                        continue;
 
-               /* Also up the ref_cnt of no. of instances of this client */
-               atomic_inc(&client->ref_cnt);
-               dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n",
-                        client->name, pf->hw.pf_id,
-                        pf->hw.bus.device, pf->hw.bus.func);
+               if (!existing) {
+                       /* Also up the ref_cnt for no. of instances of this
+                        * client.
+                        */
+                       atomic_inc(&client->ref_cnt);
+                       dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n",
+                                client->name, pf->hw.pf_id,
+                                pf->hw.bus.device, pf->hw.bus.func);
+               }
 
                /* Send an Open request to the client */
                atomic_inc(&cdev->ref_cnt);
@@ -588,7 +606,8 @@ int i40e_lan_add_device(struct i40e_pf *pf)
                 pf->hw.pf_id, pf->hw.bus.device, pf->hw.bus.func);
 
        /* Since in some cases register may have happened before a device gets
-        * added, we can schedule a subtask to go initiate the clients.
+        * added, we can schedule a subtask to go initiate the clients if
+        * they can be launched at probe time.
         */
        pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
        i40e_service_event_schedule(pf);
index c6ac7a61812fbffed95257ec271ed19218b9549a..d0b3a1bb82cac9ec9fbb05abc63d2a7f44edb45b 100644 (file)
@@ -5113,9 +5113,13 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
                                       DCB_CAP_DCBX_VER_IEEE;
 
                        pf->flags |= I40E_FLAG_DCB_CAPABLE;
-                       /* Enable DCB tagging only when more than one TC */
+                       /* Enable DCB tagging only when more than one TC
+                        * or explicitly disable if only one TC
+                        */
                        if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
                                pf->flags |= I40E_FLAG_DCB_ENABLED;
+                       else
+                               pf->flags &= ~I40E_FLAG_DCB_ENABLED;
                        dev_dbg(&pf->pdev->dev,
                                "DCBX offload is supported for this PF.\n");
                }
@@ -5431,7 +5435,6 @@ int i40e_open(struct net_device *netdev)
        wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
 
        udp_tunnel_get_rx_info(netdev);
-       i40e_notify_client_of_netdev_open(vsi);
 
        return 0;
 }
@@ -5717,7 +5720,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
        u8 type;
 
        /* Not DCB capable or capability disabled */
-       if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+       if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
                return ret;
 
        /* Ignore if event is not for Nearest Bridge */
@@ -7897,6 +7900,7 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
 #endif
                                       I40E_FLAG_RSS_ENABLED    |
                                       I40E_FLAG_DCB_CAPABLE    |
+                                      I40E_FLAG_DCB_ENABLED    |
                                       I40E_FLAG_SRIOV_ENABLED  |
                                       I40E_FLAG_FD_SB_ENABLED  |
                                       I40E_FLAG_FD_ATR_ENABLED |
@@ -10503,6 +10507,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
                               I40E_FLAG_FD_SB_ENABLED  |
                               I40E_FLAG_FD_ATR_ENABLED |
                               I40E_FLAG_DCB_CAPABLE    |
+                              I40E_FLAG_DCB_ENABLED    |
                               I40E_FLAG_SRIOV_ENABLED  |
                               I40E_FLAG_VMDQ_ENABLED);
        } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
@@ -10526,7 +10531,8 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
                /* Not enough queues for all TCs */
                if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
                    (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
-                       pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+                       pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
+                                       I40E_FLAG_DCB_ENABLED);
                        dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
                }
                pf->num_lan_qps = max_t(int, pf->rss_size_max,
@@ -10923,7 +10929,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        err = i40e_init_pf_dcb(pf);
        if (err) {
                dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
-               pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+               pf->flags &= ~(I40E_FLAG_DCB_CAPABLE & I40E_FLAG_DCB_ENABLED);
                /* Continue without DCB enabled */
        }
 #endif /* CONFIG_I40E_DCB */
index b4217f30e89c7bbbc0e3c3a2e6c2252cafa59706..c47b605e86519d1e444212b08dcb96453260524e 100644 (file)
@@ -2958,8 +2958,10 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
        }
 
        /* was that the last pool using this rar? */
-       if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
+       if (mpsar_lo == 0 && mpsar_hi == 0 &&
+           rar != 0 && rar != hw->mac.san_mac_rar_index)
                hw->mac.ops.clear_rar(hw, rar);
+
        return 0;
 }
 
index f1609542adf19a75f71cf3517deb578df5b57d1b..d9199151a83e9bbe6e380bea2e724fa341dab100 100644 (file)
@@ -50,6 +50,10 @@ static const struct mtk_ethtool_stats {
        MTK_ETHTOOL_STAT(rx_flow_control_packets),
 };
 
+static const char * const mtk_clks_source_name[] = {
+       "ethif", "esw", "gp1", "gp2"
+};
+
 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
 {
        __raw_writel(val, eth->base + reg);
@@ -291,7 +295,7 @@ err_phy:
 static int mtk_mdio_init(struct mtk_eth *eth)
 {
        struct device_node *mii_np;
-       int err;
+       int ret;
 
        mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
        if (!mii_np) {
@@ -300,13 +304,13 @@ static int mtk_mdio_init(struct mtk_eth *eth)
        }
 
        if (!of_device_is_available(mii_np)) {
-               err = 0;
+               ret = -ENODEV;
                goto err_put_node;
        }
 
-       eth->mii_bus = mdiobus_alloc();
+       eth->mii_bus = devm_mdiobus_alloc(eth->dev);
        if (!eth->mii_bus) {
-               err = -ENOMEM;
+               ret = -ENOMEM;
                goto err_put_node;
        }
 
@@ -317,19 +321,11 @@ static int mtk_mdio_init(struct mtk_eth *eth)
        eth->mii_bus->parent = eth->dev;
 
        snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name);
-       err = of_mdiobus_register(eth->mii_bus, mii_np);
-       if (err)
-               goto err_free_bus;
-
-       return 0;
-
-err_free_bus:
-       mdiobus_free(eth->mii_bus);
+       ret = of_mdiobus_register(eth->mii_bus, mii_np);
 
 err_put_node:
        of_node_put(mii_np);
-       eth->mii_bus = NULL;
-       return err;
+       return ret;
 }
 
 static void mtk_mdio_cleanup(struct mtk_eth *eth)
@@ -338,8 +334,6 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth)
                return;
 
        mdiobus_unregister(eth->mii_bus);
-       of_node_put(eth->mii_bus->dev.of_node);
-       mdiobus_free(eth->mii_bus);
 }
 
 static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
@@ -588,14 +582,15 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
        dma_addr_t mapped_addr;
        unsigned int nr_frags;
        int i, n_desc = 1;
-       u32 txd4 = 0;
+       u32 txd4 = 0, fport;
 
        itxd = ring->next_free;
        if (itxd == ring->last_free)
                return -ENOMEM;
 
        /* set the forward port */
-       txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
+       fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
+       txd4 |= fport;
 
        tx_buf = mtk_desc_to_tx_buf(ring, itxd);
        memset(tx_buf, 0, sizeof(*tx_buf));
@@ -653,7 +648,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
                        WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
                                               TX_DMA_PLEN0(frag_map_size) |
                                               last_frag * TX_DMA_LS0));
-                       WRITE_ONCE(txd->txd4, 0);
+                       WRITE_ONCE(txd->txd4, fport);
 
                        tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
                        tx_buf = mtk_desc_to_tx_buf(ring, txd);
@@ -865,7 +860,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
                /* receive data */
                skb = build_skb(data, ring->frag_size);
                if (unlikely(!skb)) {
-                       put_page(virt_to_head_page(new_data));
+                       skb_free_frag(new_data);
                        netdev->stats.rx_dropped++;
                        goto release_desc;
                }
@@ -1506,10 +1501,7 @@ static void mtk_uninit(struct net_device *dev)
        struct mtk_eth *eth = mac->hw;
 
        phy_disconnect(mac->phy_dev);
-       mtk_mdio_cleanup(eth);
        mtk_irq_disable(eth, ~0);
-       free_irq(eth->irq[1], dev);
-       free_irq(eth->irq[2], dev);
 }
 
 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -1813,6 +1805,7 @@ static int mtk_probe(struct platform_device *pdev)
        if (!eth)
                return -ENOMEM;
 
+       eth->dev = &pdev->dev;
        eth->base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(eth->base))
                return PTR_ERR(eth->base);
@@ -1847,21 +1840,21 @@ static int mtk_probe(struct platform_device *pdev)
                        return -ENXIO;
                }
        }
+       for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
+               eth->clks[i] = devm_clk_get(eth->dev,
+                                           mtk_clks_source_name[i]);
+               if (IS_ERR(eth->clks[i])) {
+                       if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
+                               return -EPROBE_DEFER;
+                       return -ENODEV;
+               }
+       }
 
-       eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif");
-       eth->clk_esw = devm_clk_get(&pdev->dev, "esw");
-       eth->clk_gp1 = devm_clk_get(&pdev->dev, "gp1");
-       eth->clk_gp2 = devm_clk_get(&pdev->dev, "gp2");
-       if (IS_ERR(eth->clk_esw) || IS_ERR(eth->clk_gp1) ||
-           IS_ERR(eth->clk_gp2) || IS_ERR(eth->clk_ethif))
-               return -ENODEV;
-
-       clk_prepare_enable(eth->clk_ethif);
-       clk_prepare_enable(eth->clk_esw);
-       clk_prepare_enable(eth->clk_gp1);
-       clk_prepare_enable(eth->clk_gp2);
+       clk_prepare_enable(eth->clks[MTK_CLK_ETHIF]);
+       clk_prepare_enable(eth->clks[MTK_CLK_ESW]);
+       clk_prepare_enable(eth->clks[MTK_CLK_GP1]);
+       clk_prepare_enable(eth->clks[MTK_CLK_GP2]);
 
-       eth->dev = &pdev->dev;
        eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
        INIT_WORK(&eth->pending_work, mtk_pending_work);
 
@@ -1903,15 +1896,24 @@ err_free_dev:
 static int mtk_remove(struct platform_device *pdev)
 {
        struct mtk_eth *eth = platform_get_drvdata(pdev);
+       int i;
 
-       clk_disable_unprepare(eth->clk_ethif);
-       clk_disable_unprepare(eth->clk_esw);
-       clk_disable_unprepare(eth->clk_gp1);
-       clk_disable_unprepare(eth->clk_gp2);
+       /* stop all devices to make sure that dma is properly shut down */
+       for (i = 0; i < MTK_MAC_COUNT; i++) {
+               if (!eth->netdev[i])
+                       continue;
+               mtk_stop(eth->netdev[i]);
+       }
+
+       clk_disable_unprepare(eth->clks[MTK_CLK_ETHIF]);
+       clk_disable_unprepare(eth->clks[MTK_CLK_ESW]);
+       clk_disable_unprepare(eth->clks[MTK_CLK_GP1]);
+       clk_disable_unprepare(eth->clks[MTK_CLK_GP2]);
 
        netif_napi_del(&eth->tx_napi);
        netif_napi_del(&eth->rx_napi);
        mtk_cleanup(eth);
+       mtk_mdio_cleanup(eth);
        platform_set_drvdata(pdev, NULL);
 
        return 0;
index f82e3acb947b47ab94b9c684cf0ae9d58c6ee052..6e1ade7a25c55364be9bc05492e3b607b45640c7 100644 (file)
@@ -290,6 +290,17 @@ enum mtk_tx_flags {
        MTK_TX_FLAGS_PAGE0      = 0x02,
 };
 
+/* This enum allows us to identify how the clock is defined on the array of the
+ * clock in the order
+ */
+enum mtk_clks_map {
+       MTK_CLK_ETHIF,
+       MTK_CLK_ESW,
+       MTK_CLK_GP1,
+       MTK_CLK_GP2,
+       MTK_CLK_MAX
+};
+
 /* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
  *                     by the TX descriptor    s
  * @skb:               The SKB pointer of the packet being sent
@@ -370,10 +381,7 @@ struct mtk_rx_ring {
  * @scratch_ring:      Newer SoCs need memory for a second HW managed TX ring
  * @phy_scratch_ring:  physical address of scratch_ring
  * @scratch_head:      The scratch memory that scratch_ring points to.
- * @clk_ethif:         The ethif clock
- * @clk_esw:           The switch clock
- * @clk_gp1:           The gmac1 clock
- * @clk_gp2:           The gmac2 clock
+ * @clks:              clock array for all clocks required
  * @mii_bus:           If there is a bus we need to create an instance for it
  * @pending_work:      The workqueue used to reset the dma ring
  */
@@ -400,10 +408,8 @@ struct mtk_eth {
        struct mtk_tx_dma               *scratch_ring;
        dma_addr_t                      phy_scratch_ring;
        void                            *scratch_head;
-       struct clk                      *clk_ethif;
-       struct clk                      *clk_esw;
-       struct clk                      *clk_gp1;
-       struct clk                      *clk_gp2;
+       struct clk                      *clks[MTK_CLK_MAX];
+
        struct mii_bus                  *mii_bus;
        struct work_struct              pending_work;
 };
index 99c6bbdff50115cb4fcde86ffdc6dc4d35e43030..b04760a5034b9c84038c0bbb44fe80035bc1e0f7 100644 (file)
@@ -94,7 +94,7 @@ static u8 mlx4_en_dcbnl_getcap(struct net_device *dev, int capid, u8 *cap)
                *cap = true;
                break;
        case DCB_CAP_ATTR_DCBX:
-               *cap = priv->cee_params.dcbx_cap;
+               *cap = priv->dcbx_cap;
                break;
        case DCB_CAP_ATTR_PFC_TCS:
                *cap = 1 <<  mlx4_max_tc(priv->mdev->dev);
@@ -111,14 +111,14 @@ static u8 mlx4_en_dcbnl_getpfcstate(struct net_device *netdev)
 {
        struct mlx4_en_priv *priv = netdev_priv(netdev);
 
-       return priv->cee_params.dcb_cfg.pfc_state;
+       return priv->cee_config.pfc_state;
 }
 
 static void mlx4_en_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
 {
        struct mlx4_en_priv *priv = netdev_priv(netdev);
 
-       priv->cee_params.dcb_cfg.pfc_state = state;
+       priv->cee_config.pfc_state = state;
 }
 
 static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
@@ -126,7 +126,7 @@ static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
 {
        struct mlx4_en_priv *priv = netdev_priv(netdev);
 
-       *setting = priv->cee_params.dcb_cfg.tc_config[priority].dcb_pfc;
+       *setting = priv->cee_config.dcb_pfc[priority];
 }
 
 static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
@@ -134,8 +134,8 @@ static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
 {
        struct mlx4_en_priv *priv = netdev_priv(netdev);
 
-       priv->cee_params.dcb_cfg.tc_config[priority].dcb_pfc = setting;
-       priv->cee_params.dcb_cfg.pfc_state = true;
+       priv->cee_config.dcb_pfc[priority] = setting;
+       priv->cee_config.pfc_state = true;
 }
 
 static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
@@ -157,13 +157,11 @@ static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
 {
        struct mlx4_en_priv *priv = netdev_priv(netdev);
        struct mlx4_en_dev *mdev = priv->mdev;
-       struct mlx4_en_cee_config *dcb_cfg = &priv->cee_params.dcb_cfg;
-       int err = 0;
 
-       if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
-               return -EINVAL;
+       if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+               return 1;
 
-       if (dcb_cfg->pfc_state) {
+       if (priv->cee_config.pfc_state) {
                int tc;
 
                priv->prof->rx_pause = 0;
@@ -171,7 +169,7 @@ static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
                for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) {
                        u8 tc_mask = 1 << tc;
 
-                       switch (dcb_cfg->tc_config[tc].dcb_pfc) {
+                       switch (priv->cee_config.dcb_pfc[tc]) {
                        case pfc_disabled:
                                priv->prof->tx_ppp &= ~tc_mask;
                                priv->prof->rx_ppp &= ~tc_mask;
@@ -199,15 +197,17 @@ static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
                en_dbg(DRV, priv, "Set pfc off\n");
        }
 
-       err = mlx4_SET_PORT_general(mdev->dev, priv->port,
-                                   priv->rx_skb_size + ETH_FCS_LEN,
-                                   priv->prof->tx_pause,
-                                   priv->prof->tx_ppp,
-                                   priv->prof->rx_pause,
-                                   priv->prof->rx_ppp);
-       if (err)
+       if (mlx4_SET_PORT_general(mdev->dev, priv->port,
+                                 priv->rx_skb_size + ETH_FCS_LEN,
+                                 priv->prof->tx_pause,
+                                 priv->prof->tx_ppp,
+                                 priv->prof->rx_pause,
+                                 priv->prof->rx_ppp)) {
                en_err(priv, "Failed setting pause params\n");
-       return err;
+               return 1;
+       }
+
+       return 0;
 }
 
 static u8 mlx4_en_dcbnl_get_state(struct net_device *dev)
@@ -225,7 +225,7 @@ static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state)
        struct mlx4_en_priv *priv = netdev_priv(dev);
        int num_tcs = 0;
 
-       if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+       if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
                return 1;
 
        if (!!(state) == !!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED))
@@ -238,7 +238,10 @@ static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state)
                priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
        }
 
-       return mlx4_en_setup_tc(dev, num_tcs);
+       if (mlx4_en_setup_tc(dev, num_tcs))
+               return 1;
+
+       return 0;
 }
 
 /* On success returns a non-zero 802.1p user priority bitmap
@@ -252,7 +255,7 @@ static int mlx4_en_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
                                .selector = idtype,
                                .protocol = id,
                             };
-       if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+       if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
                return 0;
 
        return dcb_getapp(netdev, &app);
@@ -264,7 +267,7 @@ static int mlx4_en_dcbnl_setapp(struct net_device *netdev, u8 idtype,
        struct mlx4_en_priv *priv = netdev_priv(netdev);
        struct dcb_app app;
 
-       if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+       if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
                return -EINVAL;
 
        memset(&app, 0, sizeof(struct dcb_app));
@@ -433,7 +436,7 @@ static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
 
-       return priv->cee_params.dcbx_cap;
+       return priv->dcbx_cap;
 }
 
 static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
@@ -442,7 +445,7 @@ static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
        struct ieee_ets ets = {0};
        struct ieee_pfc pfc = {0};
 
-       if (mode == priv->cee_params.dcbx_cap)
+       if (mode == priv->dcbx_cap)
                return 0;
 
        if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
@@ -451,7 +454,7 @@ static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
            !(mode & DCB_CAP_DCBX_HOST))
                goto err;
 
-       priv->cee_params.dcbx_cap = mode;
+       priv->dcbx_cap = mode;
 
        ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
        pfc.pfc_cap = IEEE_8021QAZ_MAX_TCS;
index 4198e9bf89d044733e83596dec346dbd0c6a38a9..fedb829276f4e9835e4b74f97451fcdaeaf78a6a 100644 (file)
@@ -71,10 +71,11 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up)
 #ifdef CONFIG_MLX4_EN_DCB
        if (!mlx4_is_slave(priv->mdev->dev)) {
                if (up) {
-                       priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
+                       if (priv->dcbx_cap)
+                               priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
                } else {
                        priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
-                       priv->cee_params.dcb_cfg.pfc_state = false;
+                       priv->cee_config.pfc_state = false;
                }
        }
 #endif /* CONFIG_MLX4_EN_DCB */
@@ -3048,9 +3049,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        struct mlx4_en_priv *priv;
        int i;
        int err;
-#ifdef CONFIG_MLX4_EN_DCB
-       struct tc_configuration *tc;
-#endif
 
        dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
                                 MAX_TX_RINGS, MAX_RX_RINGS);
@@ -3117,16 +3115,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        priv->msg_enable = MLX4_EN_MSG_LEVEL;
 #ifdef CONFIG_MLX4_EN_DCB
        if (!mlx4_is_slave(priv->mdev->dev)) {
-               priv->cee_params.dcbx_cap = DCB_CAP_DCBX_VER_CEE |
-                                           DCB_CAP_DCBX_HOST |
-                                           DCB_CAP_DCBX_VER_IEEE;
+               priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
+                       DCB_CAP_DCBX_VER_IEEE;
                priv->flags |= MLX4_EN_DCB_ENABLED;
-               priv->cee_params.dcb_cfg.pfc_state = false;
+               priv->cee_config.pfc_state = false;
 
-               for (i = 0; i < MLX4_EN_NUM_UP; i++) {
-                       tc = &priv->cee_params.dcb_cfg.tc_config[i];
-                       tc->dcb_pfc = pfc_disabled;
-               }
+               for (i = 0; i < MLX4_EN_NUM_UP; i++)
+                       priv->cee_config.dcb_pfc[i] = pfc_disabled;
 
                if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
                        dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
index 9df87ca0515a2b9d0b523c176c88e44e2ab3fe60..e2509bba3e7c31dd0db45734a8a274f956cccaa2 100644 (file)
@@ -818,7 +818,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
        real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
                                  &inline_ok, &fragptr);
        if (unlikely(!real_size))
-               goto tx_drop;
+               goto tx_drop_count;
 
        /* Align descriptor to TXBB size */
        desc_size = ALIGN(real_size, TXBB_SIZE);
@@ -826,7 +826,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
        if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
                if (netif_msg_tx_err(priv))
                        en_warn(priv, "Oversized header or SG list\n");
-               goto tx_drop;
+               goto tx_drop_count;
        }
 
        bf_ok = ring->bf_enabled;
@@ -1071,9 +1071,10 @@ tx_drop_unmap:
                               PCI_DMA_TODEVICE);
        }
 
+tx_drop_count:
+       ring->tx_dropped++;
 tx_drop:
        dev_kfree_skb_any(skb);
-       ring->tx_dropped++;
        return NETDEV_TX_OK;
 }
 
@@ -1106,7 +1107,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
                goto tx_drop;
 
        if (mlx4_en_is_tx_ring_full(ring))
-               goto tx_drop;
+               goto tx_drop_count;
 
        /* fetch ring->cons far ahead before needing it to avoid stall */
        ring_cons = READ_ONCE(ring->cons);
@@ -1176,7 +1177,8 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
 
        return NETDEV_TX_OK;
 
-tx_drop:
+tx_drop_count:
        ring->tx_dropped++;
+tx_drop:
        return NETDEV_TX_BUSY;
 }
index 2c2913dcae980b0a665e54b128554fc168b364b7..9099dbd04951a0e18fc3d33357f1e8d5c25d6909 100644 (file)
@@ -482,20 +482,10 @@ enum dcb_pfc_type {
        pfc_enabled_rx
 };
 
-struct tc_configuration {
-       enum dcb_pfc_type  dcb_pfc;
-};
-
 struct mlx4_en_cee_config {
        bool    pfc_state;
-       struct  tc_configuration tc_config[MLX4_EN_NUM_UP];
+       enum    dcb_pfc_type dcb_pfc[MLX4_EN_NUM_UP];
 };
-
-struct mlx4_en_cee_params {
-       u8 dcbx_cap;
-       struct mlx4_en_cee_config dcb_cfg;
-};
-
 #endif
 
 struct ethtool_flow_id {
@@ -624,7 +614,8 @@ struct mlx4_en_priv {
        struct ieee_ets ets;
        u16 maxrate[IEEE_8021QAZ_MAX_TCS];
        enum dcbnl_cndd_states cndd_state[IEEE_8021QAZ_MAX_TCS];
-       struct mlx4_en_cee_params cee_params;
+       struct mlx4_en_cee_config cee_config;
+       u8 dcbx_cap;
 #endif
 #ifdef CONFIG_RFS_ACCEL
        spinlock_t filters_lock;
index 3d2095e5c61c594c167507ade0cd109e4cd4ae6a..c5b2064297a19b0dde2640764acb4216343633f1 100644 (file)
@@ -52,7 +52,7 @@
 
 #define MLX4_FLAG_V_IGNORE_FCS_MASK            0x2
 #define MLX4_IGNORE_FCS_MASK                   0x1
-#define MLNX4_TX_MAX_NUMBER                    8
+#define MLX4_TC_MAX_NUMBER                     8
 
 void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
 {
@@ -2022,7 +2022,7 @@ int mlx4_max_tc(struct mlx4_dev *dev)
        u8 num_tc = dev->caps.max_tc_eth;
 
        if (!num_tc)
-               num_tc = MLNX4_TX_MAX_NUMBER;
+               num_tc = MLX4_TC_MAX_NUMBER;
 
        return num_tc;
 }
index d6e2a1cae19ae2d6d636f1d306bca8e607106095..c2ec01a22d55ad9c2ff8e1b73bc5696e5edbe15a 100644 (file)
@@ -143,13 +143,14 @@ static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
        return cmd->cmd_buf + (idx << cmd->log_stride);
 }
 
-static u8 xor8_buf(void *buf, int len)
+static u8 xor8_buf(void *buf, size_t offset, int len)
 {
        u8 *ptr = buf;
        u8 sum = 0;
        int i;
+       int end = len + offset;
 
-       for (i = 0; i < len; i++)
+       for (i = offset; i < end; i++)
                sum ^= ptr[i];
 
        return sum;
@@ -157,41 +158,49 @@ static u8 xor8_buf(void *buf, int len)
 
 static int verify_block_sig(struct mlx5_cmd_prot_block *block)
 {
-       if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
+       size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
+       int xor_len = sizeof(*block) - sizeof(block->data) - 1;
+
+       if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
                return -EINVAL;
 
-       if (xor8_buf(block, sizeof(*block)) != 0xff)
+       if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
                return -EINVAL;
 
        return 0;
 }
 
-static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
-                          int csum)
+static void calc_block_sig(struct mlx5_cmd_prot_block *block)
 {
-       block->token = token;
-       if (csum) {
-               block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
-                                           sizeof(block->data) - 2);
-               block->sig = ~xor8_buf(block, sizeof(*block) - 1);
-       }
+       int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
+       size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
+
+       block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
+       block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
 }
 
-static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
+static void calc_chain_sig(struct mlx5_cmd_msg *msg)
 {
        struct mlx5_cmd_mailbox *next = msg->next;
-
-       while (next) {
-               calc_block_sig(next->buf, token, csum);
+       int size = msg->len;
+       int blen = size - min_t(int, sizeof(msg->first.data), size);
+       int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
+               / MLX5_CMD_DATA_BLOCK_SIZE;
+       int i = 0;
+
+       for (i = 0; i < n && next; i++)  {
+               calc_block_sig(next->buf);
                next = next->next;
        }
 }
 
 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
 {
-       ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
-       calc_chain_sig(ent->in, ent->token, csum);
-       calc_chain_sig(ent->out, ent->token, csum);
+       ent->lay->sig = ~xor8_buf(ent->lay, 0,  sizeof(*ent->lay));
+       if (csum) {
+               calc_chain_sig(ent->in);
+               calc_chain_sig(ent->out);
+       }
 }
 
 static void poll_timeout(struct mlx5_cmd_work_ent *ent)
@@ -222,12 +231,17 @@ static int verify_signature(struct mlx5_cmd_work_ent *ent)
        struct mlx5_cmd_mailbox *next = ent->out->next;
        int err;
        u8 sig;
+       int size = ent->out->len;
+       int blen = size - min_t(int, sizeof(ent->out->first.data), size);
+       int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
+               / MLX5_CMD_DATA_BLOCK_SIZE;
+       int i = 0;
 
-       sig = xor8_buf(ent->lay, sizeof(*ent->lay));
+       sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
        if (sig != 0xff)
                return -EINVAL;
 
-       while (next) {
+       for (i = 0; i < n && next; i++) {
                err = verify_block_sig(next->buf);
                if (err)
                        return err;
@@ -656,7 +670,6 @@ static void cmd_work_handler(struct work_struct *work)
                spin_unlock_irqrestore(&cmd->alloc_lock, flags);
        }
 
-       ent->token = alloc_token(cmd);
        cmd->ent_arr[ent->idx] = ent;
        lay = get_inst(cmd, ent->idx);
        ent->lay = lay;
@@ -766,7 +779,8 @@ static u8 *get_status_ptr(struct mlx5_outbox_hdr *out)
 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
                           struct mlx5_cmd_msg *out, void *uout, int uout_size,
                           mlx5_cmd_cbk_t callback,
-                          void *context, int page_queue, u8 *status)
+                          void *context, int page_queue, u8 *status,
+                          u8 token)
 {
        struct mlx5_cmd *cmd = &dev->cmd;
        struct mlx5_cmd_work_ent *ent;
@@ -783,6 +797,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
        if (IS_ERR(ent))
                return PTR_ERR(ent);
 
+       ent->token = token;
+
        if (!callback)
                init_completion(&ent->done);
 
@@ -854,7 +870,8 @@ static const struct file_operations fops = {
        .write  = dbg_write,
 };
 
-static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
+static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
+                           u8 token)
 {
        struct mlx5_cmd_prot_block *block;
        struct mlx5_cmd_mailbox *next;
@@ -880,6 +897,7 @@ static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
                memcpy(block->data, from, copy);
                from += copy;
                size -= copy;
+               block->token = token;
                next = next->next;
        }
 
@@ -949,7 +967,8 @@ static void free_cmd_box(struct mlx5_core_dev *dev,
 }
 
 static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
-                                              gfp_t flags, int size)
+                                              gfp_t flags, int size,
+                                              u8 token)
 {
        struct mlx5_cmd_mailbox *tmp, *head = NULL;
        struct mlx5_cmd_prot_block *block;
@@ -978,6 +997,7 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
                tmp->next = head;
                block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
                block->block_num = cpu_to_be32(n - i - 1);
+               block->token = token;
                head = tmp;
        }
        msg->next = head;
@@ -1352,7 +1372,7 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
        }
 
        if (IS_ERR(msg))
-               msg = mlx5_alloc_cmd_msg(dev, gfp, in_size);
+               msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
 
        return msg;
 }
@@ -1377,6 +1397,7 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
        int err;
        u8 status = 0;
        u32 drv_synd;
+       u8 token;
 
        if (pci_channel_offline(dev->pdev) ||
            dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
@@ -1395,20 +1416,22 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
                return err;
        }
 
-       err = mlx5_copy_to_msg(inb, in, in_size);
+       token = alloc_token(&dev->cmd);
+
+       err = mlx5_copy_to_msg(inb, in, in_size, token);
        if (err) {
                mlx5_core_warn(dev, "err %d\n", err);
                goto out_in;
        }
 
-       outb = mlx5_alloc_cmd_msg(dev, gfp, out_size);
+       outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
        if (IS_ERR(outb)) {
                err = PTR_ERR(outb);
                goto out_in;
        }
 
        err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
-                             pages_queue, &status);
+                             pages_queue, &status, token);
        if (err)
                goto out_out;
 
@@ -1476,7 +1499,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev)
        INIT_LIST_HEAD(&cmd->cache.med.head);
 
        for (i = 0; i < NUM_LONG_LISTS; i++) {
-               msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE);
+               msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE, 0);
                if (IS_ERR(msg)) {
                        err = PTR_ERR(msg);
                        goto ex_err;
@@ -1486,7 +1509,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev)
        }
 
        for (i = 0; i < NUM_MED_LISTS; i++) {
-               msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE);
+               msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE, 0);
                if (IS_ERR(msg)) {
                        err = PTR_ERR(msg);
                        goto ex_err;
index 1b495efa7490d2d1edb9ccc3f93de4f74ad52d7f..bf722aa88cf05b528518572f7c9971dc0cd414b7 100644 (file)
 #define MLX5_MPWRQ_PAGES_PER_WQE               BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
 #define MLX5_MPWRQ_STRIDES_PER_PAGE            (MLX5_MPWRQ_NUM_STRIDES >> \
                                                 MLX5_MPWRQ_WQE_PAGE_ORDER)
-#define MLX5_CHANNEL_MAX_NUM_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8) * \
-                                  BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW))
+
+#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
+#define MLX5E_REQUIRED_MTTS(rqs, wqes)\
+       (rqs * wqes * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
+#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) <= U16_MAX)
+
 #define MLX5_UMR_ALIGN                         (2048)
 #define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD      (128)
 
@@ -219,9 +223,8 @@ struct mlx5e_tstamp {
 };
 
 enum {
-       MLX5E_RQ_STATE_POST_WQES_ENABLE,
+       MLX5E_RQ_STATE_FLUSH,
        MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
-       MLX5E_RQ_STATE_FLUSH_TIMEOUT,
        MLX5E_RQ_STATE_AM,
 };
 
@@ -304,6 +307,7 @@ struct mlx5e_rq {
 
        unsigned long          state;
        int                    ix;
+       u32                    mpwqe_mtt_offset;
 
        struct mlx5e_rx_am     am; /* Adaptive Moderation */
 
@@ -365,9 +369,8 @@ struct mlx5e_sq_dma {
 };
 
 enum {
-       MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
+       MLX5E_SQ_STATE_FLUSH,
        MLX5E_SQ_STATE_BF_ENABLE,
-       MLX5E_SQ_STATE_TX_TIMEOUT,
 };
 
 struct mlx5e_ico_wqe_info {
@@ -698,7 +701,6 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget);
 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
 void mlx5e_free_tx_descs(struct mlx5e_sq *sq);
-void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
 
 void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
 void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
@@ -814,11 +816,6 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
                     MLX5E_MAX_NUM_CHANNELS);
 }
 
-static inline int mlx5e_get_mtt_octw(int npages)
-{
-       return ALIGN(npages, 8) / 2;
-}
-
 extern const struct ethtool_ops mlx5e_ethtool_ops;
 #ifdef CONFIG_MLX5_CORE_EN_DCB
 extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
index 673043ccd76cfe8d66b2ff1fbbeaf909871fe387..9cce153e10359a305d6d44136890a68b4854221a 100644 (file)
@@ -139,7 +139,7 @@ int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev)
        struct mlx5e_tir *tir;
        void *in;
        int inlen;
-       int err;
+       int err = 0;
 
        inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
        in = mlx5_vzalloc(inlen);
@@ -151,10 +151,11 @@ int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev)
        list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
                err = mlx5_core_modify_tir(mdev, tir->tirn, in, inlen);
                if (err)
-                       return err;
+                       goto out;
        }
 
+out:
        kvfree(in);
 
-       return 0;
+       return err;
 }
index caa9a3ccc3f38ae4acd8c58db845510784c099b0..762af16ed021b4815779728eae427d1cf593baf9 100644 (file)
@@ -127,29 +127,40 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
        return mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw);
 }
 
-static int mlx5e_dbcnl_validate_ets(struct ieee_ets *ets)
+static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
+                                   struct ieee_ets *ets)
 {
        int bw_sum = 0;
        int i;
 
        /* Validate Priority */
        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
-               if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY)
+               if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) {
+                       netdev_err(netdev,
+                                  "Failed to validate ETS: priority value greater than max(%d)\n",
+                                   MLX5E_MAX_PRIORITY);
                        return -EINVAL;
+               }
        }
 
        /* Validate Bandwidth Sum */
        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
                if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
-                       if (!ets->tc_tx_bw[i])
+                       if (!ets->tc_tx_bw[i]) {
+                               netdev_err(netdev,
+                                          "Failed to validate ETS: BW 0 is illegal\n");
                                return -EINVAL;
+                       }
 
                        bw_sum += ets->tc_tx_bw[i];
                }
        }
 
-       if (bw_sum != 0 && bw_sum != 100)
+       if (bw_sum != 0 && bw_sum != 100) {
+               netdev_err(netdev,
+                          "Failed to validate ETS: BW sum is illegal\n");
                return -EINVAL;
+       }
        return 0;
 }
 
@@ -159,7 +170,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
        struct mlx5e_priv *priv = netdev_priv(netdev);
        int err;
 
-       err = mlx5e_dbcnl_validate_ets(ets);
+       err = mlx5e_dbcnl_validate_ets(netdev, ets);
        if (err)
                return err;
 
index 4a3757e60441693e0ffcd9ab9d119ede7e9981c8..7a346bb2ed0064c4db51404842a514af513a7f5b 100644 (file)
@@ -331,7 +331,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
        if (mlx5e_query_global_pause_combined(priv)) {
                for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
                        data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
-                                                         pport_per_prio_pfc_stats_desc, 0);
+                                                         pport_per_prio_pfc_stats_desc, i);
                }
        }
 
@@ -352,15 +352,61 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
                                                                   sq_stats_desc, j);
 }
 
+static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type,
+                                   int num_wqe)
+{
+       int packets_per_wqe;
+       int stride_size;
+       int num_strides;
+       int wqe_size;
+
+       if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
+               return num_wqe;
+
+       stride_size = 1 << priv->params.mpwqe_log_stride_sz;
+       num_strides = 1 << priv->params.mpwqe_log_num_strides;
+       wqe_size = stride_size * num_strides;
+
+       packets_per_wqe = wqe_size /
+                         ALIGN(ETH_DATA_LEN, stride_size);
+       return (1 << (order_base_2(num_wqe * packets_per_wqe) - 1));
+}
+
+static u32 mlx5e_packets_to_rx_wqes(struct mlx5e_priv *priv, int rq_wq_type,
+                                   int num_packets)
+{
+       int packets_per_wqe;
+       int stride_size;
+       int num_strides;
+       int wqe_size;
+       int num_wqes;
+
+       if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
+               return num_packets;
+
+       stride_size = 1 << priv->params.mpwqe_log_stride_sz;
+       num_strides = 1 << priv->params.mpwqe_log_num_strides;
+       wqe_size = stride_size * num_strides;
+
+       num_packets = (1 << order_base_2(num_packets));
+
+       packets_per_wqe = wqe_size /
+                         ALIGN(ETH_DATA_LEN, stride_size);
+       num_wqes = DIV_ROUND_UP(num_packets, packets_per_wqe);
+       return 1 << (order_base_2(num_wqes));
+}
+
 static void mlx5e_get_ringparam(struct net_device *dev,
                                struct ethtool_ringparam *param)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
        int rq_wq_type = priv->params.rq_wq_type;
 
-       param->rx_max_pending = 1 << mlx5_max_log_rq_size(rq_wq_type);
+       param->rx_max_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
+                                                        1 << mlx5_max_log_rq_size(rq_wq_type));
        param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
-       param->rx_pending     = 1 << priv->params.log_rq_size;
+       param->rx_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
+                                                    1 << priv->params.log_rq_size);
        param->tx_pending     = 1 << priv->params.log_sq_size;
 }
 
@@ -370,9 +416,13 @@ static int mlx5e_set_ringparam(struct net_device *dev,
        struct mlx5e_priv *priv = netdev_priv(dev);
        bool was_opened;
        int rq_wq_type = priv->params.rq_wq_type;
+       u32 rx_pending_wqes;
+       u32 min_rq_size;
+       u32 max_rq_size;
        u16 min_rx_wqes;
        u8 log_rq_size;
        u8 log_sq_size;
+       u32 num_mtts;
        int err = 0;
 
        if (param->rx_jumbo_pending) {
@@ -385,18 +435,36 @@ static int mlx5e_set_ringparam(struct net_device *dev,
                            __func__);
                return -EINVAL;
        }
-       if (param->rx_pending < (1 << mlx5_min_log_rq_size(rq_wq_type))) {
+
+       min_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
+                                              1 << mlx5_min_log_rq_size(rq_wq_type));
+       max_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
+                                              1 << mlx5_max_log_rq_size(rq_wq_type));
+       rx_pending_wqes = mlx5e_packets_to_rx_wqes(priv, rq_wq_type,
+                                                  param->rx_pending);
+
+       if (param->rx_pending < min_rq_size) {
                netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n",
                            __func__, param->rx_pending,
-                           1 << mlx5_min_log_rq_size(rq_wq_type));
+                           min_rq_size);
                return -EINVAL;
        }
-       if (param->rx_pending > (1 << mlx5_max_log_rq_size(rq_wq_type))) {
+       if (param->rx_pending > max_rq_size) {
                netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n",
                            __func__, param->rx_pending,
-                           1 << mlx5_max_log_rq_size(rq_wq_type));
+                           max_rq_size);
                return -EINVAL;
        }
+
+       num_mtts = MLX5E_REQUIRED_MTTS(priv->params.num_channels,
+                                      rx_pending_wqes);
+       if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
+           !MLX5E_VALID_NUM_MTTS(num_mtts)) {
+               netdev_info(dev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n",
+                           __func__, param->rx_pending);
+               return -EINVAL;
+       }
+
        if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
                netdev_info(dev, "%s: tx_pending (%d) < min (%d)\n",
                            __func__, param->tx_pending,
@@ -410,9 +478,9 @@ static int mlx5e_set_ringparam(struct net_device *dev,
                return -EINVAL;
        }
 
-       log_rq_size = order_base_2(param->rx_pending);
+       log_rq_size = order_base_2(rx_pending_wqes);
        log_sq_size = order_base_2(param->tx_pending);
-       min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, param->rx_pending);
+       min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, rx_pending_wqes);
 
        if (log_rq_size == priv->params.log_rq_size &&
            log_sq_size == priv->params.log_sq_size &&
@@ -454,6 +522,7 @@ static int mlx5e_set_channels(struct net_device *dev,
        unsigned int count = ch->combined_count;
        bool arfs_enabled;
        bool was_opened;
+       u32 num_mtts;
        int err = 0;
 
        if (!count) {
@@ -472,6 +541,14 @@ static int mlx5e_set_channels(struct net_device *dev,
                return -EINVAL;
        }
 
+       num_mtts = MLX5E_REQUIRED_MTTS(count, BIT(priv->params.log_rq_size));
+       if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
+           !MLX5E_VALID_NUM_MTTS(num_mtts)) {
+               netdev_info(dev, "%s: rx count (%d) request can't be satisfied, try to reduce.\n",
+                           __func__, count);
+               return -EINVAL;
+       }
+
        if (priv->params.num_channels == count)
                return 0;
 
@@ -582,9 +659,10 @@ out:
 static void ptys2ethtool_supported_link(unsigned long *supported_modes,
                                        u32 eth_proto_cap)
 {
+       unsigned long proto_cap = eth_proto_cap;
        int proto;
 
-       for_each_set_bit(proto, (unsigned long *)&eth_proto_cap, MLX5E_LINK_MODES_NUMBER)
+       for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER)
                bitmap_or(supported_modes, supported_modes,
                          ptys2ethtool_table[proto].supported,
                          __ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -593,9 +671,10 @@ static void ptys2ethtool_supported_link(unsigned long *supported_modes,
 static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
                                    u32 eth_proto_cap)
 {
+       unsigned long proto_cap = eth_proto_cap;
        int proto;
 
-       for_each_set_bit(proto, (unsigned long *)&eth_proto_cap, MLX5E_LINK_MODES_NUMBER)
+       for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER)
                bitmap_or(advertising_modes, advertising_modes,
                          ptys2ethtool_table[proto].advertised,
                          __ETHTOOL_LINK_MODE_MASK_NBITS);
index 870bea37c57cb37d8005b63f1b5b877e47280567..2459c7f3db8d26152724dc7edfd20c12a996ff45 100644 (file)
 #include "eswitch.h"
 #include "vxlan.h"
 
-enum {
-       MLX5_EN_QP_FLUSH_TIMEOUT_MS     = 5000,
-       MLX5_EN_QP_FLUSH_MSLEEP_QUANT   = 20,
-       MLX5_EN_QP_FLUSH_MAX_ITER       = MLX5_EN_QP_FLUSH_TIMEOUT_MS /
-                                         MLX5_EN_QP_FLUSH_MSLEEP_QUANT,
-};
-
 struct mlx5e_rq_param {
        u32                     rqc[MLX5_ST_SZ_DW(rqc)];
        struct mlx5_wq_param    wq;
@@ -162,6 +155,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
                        s->tx_queue_stopped     += sq_stats->stopped;
                        s->tx_queue_wake        += sq_stats->wake;
                        s->tx_queue_dropped     += sq_stats->dropped;
+                       s->tx_xmit_more         += sq_stats->xmit_more;
                        s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
                        tx_offload_none         += sq_stats->csum_none;
                }
@@ -340,6 +334,9 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
                rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
                rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
 
+               rq->mpwqe_mtt_offset = c->ix *
+                       MLX5E_REQUIRED_MTTS(1, BIT(priv->params.log_rq_size));
+
                rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
                rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
                rq->wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
@@ -428,7 +425,6 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
 
        MLX5_SET(rqc,  rqc, cqn,                rq->cq.mcq.cqn);
        MLX5_SET(rqc,  rqc, state,              MLX5_RQC_STATE_RST);
-       MLX5_SET(rqc,  rqc, flush_in_error_en,  1);
        MLX5_SET(rqc,  rqc, vsd, priv->params.vlan_strip_disable);
        MLX5_SET(wq,   wq,  log_wq_pg_sz,       rq->wq_ctrl.buf.page_shift -
                                                MLX5_ADAPTER_PAGE_SHIFT);
@@ -525,6 +521,27 @@ static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
        return -ETIMEDOUT;
 }
 
+static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
+{
+       struct mlx5_wq_ll *wq = &rq->wq;
+       struct mlx5e_rx_wqe *wqe;
+       __be16 wqe_ix_be;
+       u16 wqe_ix;
+
+       /* UMR WQE (if in progress) is always at wq->head */
+       if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
+               mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]);
+
+       while (!mlx5_wq_ll_is_empty(wq)) {
+               wqe_ix_be = *wq->tail_next;
+               wqe_ix    = be16_to_cpu(wqe_ix_be);
+               wqe       = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
+               rq->dealloc_wqe(rq, wqe_ix);
+               mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
+                              &wqe->next.next_wqe_index);
+       }
+}
+
 static int mlx5e_open_rq(struct mlx5e_channel *c,
                         struct mlx5e_rq_param *param,
                         struct mlx5e_rq *rq)
@@ -548,8 +565,6 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
        if (param->am_enabled)
                set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
 
-       set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
-
        sq->ico_wqe_info[pi].opcode     = MLX5_OPCODE_NOP;
        sq->ico_wqe_info[pi].num_wqebbs = 1;
        mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */
@@ -566,23 +581,8 @@ err_destroy_rq:
 
 static void mlx5e_close_rq(struct mlx5e_rq *rq)
 {
-       int tout = 0;
-       int err;
-
-       clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
+       set_bit(MLX5E_RQ_STATE_FLUSH, &rq->state);
        napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
-
-       err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
-       while (!mlx5_wq_ll_is_empty(&rq->wq) && !err &&
-              tout++ < MLX5_EN_QP_FLUSH_MAX_ITER)
-               msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
-
-       if (err || tout == MLX5_EN_QP_FLUSH_MAX_ITER)
-               set_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state);
-
-       /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
-       napi_synchronize(&rq->channel->napi);
-
        cancel_work_sync(&rq->am.work);
 
        mlx5e_disable_rq(rq);
@@ -821,7 +821,6 @@ static int mlx5e_open_sq(struct mlx5e_channel *c,
                goto err_disable_sq;
 
        if (sq->txq) {
-               set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
                netdev_tx_reset_queue(sq->txq);
                netif_tx_start_queue(sq->txq);
        }
@@ -845,38 +844,20 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
 
 static void mlx5e_close_sq(struct mlx5e_sq *sq)
 {
-       int tout = 0;
-       int err;
+       set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
+       /* prevent netif_tx_wake_queue */
+       napi_synchronize(&sq->channel->napi);
 
        if (sq->txq) {
-               clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
-               /* prevent netif_tx_wake_queue */
-               napi_synchronize(&sq->channel->napi);
                netif_tx_disable_queue(sq->txq);
 
-               /* ensure hw is notified of all pending wqes */
+               /* last doorbell out, godspeed .. */
                if (mlx5e_sq_has_room_for(sq, 1))
                        mlx5e_send_nop(sq, true);
-
-               err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
-                                     MLX5_SQC_STATE_ERR, false, 0);
-               if (err)
-                       set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
-       }
-
-       /* wait till sq is empty, unless a TX timeout occurred on this SQ */
-       while (sq->cc != sq->pc &&
-              !test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)) {
-               msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
-               if (tout++ > MLX5_EN_QP_FLUSH_MAX_ITER)
-                       set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
        }
 
-       /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
-       napi_synchronize(&sq->channel->napi);
-
-       mlx5e_free_tx_descs(sq);
        mlx5e_disable_sq(sq);
+       mlx5e_free_tx_descs(sq);
        mlx5e_destroy_sq(sq);
 }
 
@@ -1826,10 +1807,6 @@ int mlx5e_open_locked(struct net_device *netdev)
        netif_set_real_num_tx_queues(netdev, num_txqs);
        netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
 
-       err = mlx5e_set_dev_port_mtu(netdev);
-       if (err)
-               goto err_clear_state_opened_flag;
-
        err = mlx5e_open_channels(priv);
        if (err) {
                netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
@@ -2573,6 +2550,7 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
        u16 max_mtu;
        u16 min_mtu;
        int err = 0;
+       bool reset;
 
        mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
 
@@ -2588,13 +2566,18 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
 
        mutex_lock(&priv->state_lock);
 
+       reset = !priv->params.lro_en &&
+               (priv->params.rq_wq_type !=
+                MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
+
        was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
-       if (was_opened)
+       if (was_opened && reset)
                mlx5e_close_locked(netdev);
 
        netdev->mtu = new_mtu;
+       mlx5e_set_dev_port_mtu(netdev);
 
-       if (was_opened)
+       if (was_opened && reset)
                err = mlx5e_open_locked(netdev);
 
        mutex_unlock(&priv->state_lock);
@@ -2794,7 +2777,7 @@ static void mlx5e_tx_timeout(struct net_device *dev)
                if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
                        continue;
                sched_work = true;
-               set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
+               set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
                netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
                           i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
        }
@@ -3231,8 +3214,8 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
        struct mlx5_create_mkey_mbox_in *in;
        struct mlx5_mkey_seg *mkc;
        int inlen = sizeof(*in);
-       u64 npages =
-               priv->profile->max_nch(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS;
+       u64 npages = MLX5E_REQUIRED_MTTS(priv->profile->max_nch(mdev),
+                                        BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW));
        int err;
 
        in = mlx5_vzalloc(inlen);
@@ -3246,10 +3229,12 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
                     MLX5_PERM_LOCAL_WRITE |
                     MLX5_ACCESS_MODE_MTT;
 
+       npages = min_t(u32, ALIGN(U16_MAX, 4) * 2, npages);
+
        mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
        mkc->flags_pd = cpu_to_be32(mdev->mlx5e_res.pdn);
        mkc->len = cpu_to_be64(npages << PAGE_SHIFT);
-       mkc->xlt_oct_size = cpu_to_be32(mlx5e_get_mtt_octw(npages));
+       mkc->xlt_oct_size = cpu_to_be32(MLX5_MTT_OCTW(npages));
        mkc->log2_page_size = PAGE_SHIFT;
 
        err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen, NULL,
@@ -3385,6 +3370,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
        queue_work(priv->wq, &priv->set_rx_mode_work);
 
        if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
+               mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
                rep.load = mlx5e_nic_rep_load;
                rep.unload = mlx5e_nic_rep_unload;
                rep.vport = 0;
@@ -3463,6 +3449,8 @@ void *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
 
        mlx5e_init_l2_addr(priv);
 
+       mlx5e_set_dev_port_mtu(netdev);
+
        err = register_netdev(netdev);
        if (err) {
                mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
@@ -3501,16 +3489,20 @@ static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
        struct mlx5_eswitch *esw = mdev->priv.eswitch;
        int total_vfs = MLX5_TOTAL_VPORTS(mdev);
        int vport;
+       u8 mac[ETH_ALEN];
 
        if (!MLX5_CAP_GEN(mdev, vport_group_manager))
                return;
 
+       mlx5_query_nic_vport_mac_address(mdev, 0, mac);
+
        for (vport = 1; vport < total_vfs; vport++) {
                struct mlx5_eswitch_rep rep;
 
                rep.load = mlx5e_vport_rep_load;
                rep.unload = mlx5e_vport_rep_unload;
                rep.vport = vport;
+               ether_addr_copy(rep.hw_id, mac);
                mlx5_eswitch_register_vport_rep(esw, &rep);
        }
 }
index 1c7d8b8314bf43cc084aad96f5d4515dedfc36f3..134de4a11f1d9e65a4cad91be63638884e1a4706 100644 (file)
@@ -135,17 +135,16 @@ static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
 int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
+       struct mlx5_eswitch_rep *rep = priv->ppriv;
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-       u8 mac[ETH_ALEN];
 
        if (esw->mode == SRIOV_NONE)
                return -EOPNOTSUPP;
 
        switch (attr->id) {
        case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
-               mlx5_query_nic_vport_mac_address(priv->mdev, 0, mac);
                attr->u.ppid.id_len = ETH_ALEN;
-               memcpy(&attr->u.ppid.id, &mac, ETH_ALEN);
+               ether_addr_copy(attr->u.ppid.id, rep->hw_id);
                break;
        default:
                return -EOPNOTSUPP;
index 9f2a16a507e04f8cd9861251ab3d0d5973b23e90..e7c969df3dada5d9ebdff8311c565920cebf404b 100644 (file)
@@ -324,9 +324,9 @@ mlx5e_copy_skb_header_fragmented_mpwqe(struct device *pdev,
        }
 }
 
-static u16 mlx5e_get_wqe_mtt_offset(u16 rq_ix, u16 wqe_ix)
+static u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
 {
-       return rq_ix * MLX5_CHANNEL_MAX_NUM_MTTS +
+       return rq->mpwqe_mtt_offset +
                wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
 }
 
@@ -340,7 +340,7 @@ static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
        struct mlx5_wqe_data_seg      *dseg = &wqe->data;
        struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
        u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
-       u16 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix);
+       u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
 
        memset(wqe, 0, sizeof(*wqe));
        cseg->opmod_idx_opcode =
@@ -353,9 +353,9 @@ static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
 
        ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
        ucseg->klm_octowords =
-               cpu_to_be16(mlx5e_get_mtt_octw(MLX5_MPWRQ_PAGES_PER_WQE));
+               cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
        ucseg->bsf_octowords =
-               cpu_to_be16(mlx5e_get_mtt_octw(umr_wqe_mtt_offset));
+               cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
        ucseg->mkey_mask     = cpu_to_be64(MLX5_MKEY_MASK_FREE);
 
        dseg->lkey = sq->mkey_be;
@@ -423,7 +423,7 @@ static int mlx5e_alloc_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
 {
        struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
        int mtt_sz = mlx5e_get_wqe_mtt_sz();
-       u32 dma_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix) << PAGE_SHIFT;
+       u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, ix) << PAGE_SHIFT;
        int i;
 
        wi->umr.dma_info = kmalloc(sizeof(*wi->umr.dma_info) *
@@ -506,6 +506,12 @@ void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq)
        struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
 
        clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
+
+       if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state))) {
+               mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]);
+               return;
+       }
+
        mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
        rq->stats.mpwqe_frag++;
 
@@ -595,26 +601,9 @@ void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
        wi->free_wqe(rq, wi);
 }
 
-void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
-{
-       struct mlx5_wq_ll *wq = &rq->wq;
-       struct mlx5e_rx_wqe *wqe;
-       __be16 wqe_ix_be;
-       u16 wqe_ix;
-
-       while (!mlx5_wq_ll_is_empty(wq)) {
-               wqe_ix_be = *wq->tail_next;
-               wqe_ix    = be16_to_cpu(wqe_ix_be);
-               wqe       = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
-               rq->dealloc_wqe(rq, wqe_ix);
-               mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
-                              &wqe->next.next_wqe_index);
-       }
-}
-
 #define RQ_CANNOT_POST(rq) \
-               (!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \
-                test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
+       (test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state) || \
+        test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
 
 bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
 {
@@ -648,24 +637,32 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
 static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
                                 u32 cqe_bcnt)
 {
-       struct ethhdr   *eth    = (struct ethhdr *)(skb->data);
-       struct iphdr    *ipv4   = (struct iphdr *)(skb->data + ETH_HLEN);
-       struct ipv6hdr  *ipv6   = (struct ipv6hdr *)(skb->data + ETH_HLEN);
+       struct ethhdr   *eth = (struct ethhdr *)(skb->data);
+       struct iphdr    *ipv4;
+       struct ipv6hdr  *ipv6;
        struct tcphdr   *tcp;
+       int network_depth = 0;
+       __be16 proto;
+       u16 tot_len;
 
        u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
        int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA  == l4_hdr_type) ||
                       (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
 
-       u16 tot_len = cqe_bcnt - ETH_HLEN;
+       skb->mac_len = ETH_HLEN;
+       proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
+
+       ipv4 = (struct iphdr *)(skb->data + network_depth);
+       ipv6 = (struct ipv6hdr *)(skb->data + network_depth);
+       tot_len = cqe_bcnt - network_depth;
 
-       if (eth->h_proto == htons(ETH_P_IP)) {
-               tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
+       if (proto == htons(ETH_P_IP)) {
+               tcp = (struct tcphdr *)(skb->data + network_depth +
                                        sizeof(struct iphdr));
                ipv6 = NULL;
                skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
        } else {
-               tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
+               tcp = (struct tcphdr *)(skb->data + network_depth +
                                        sizeof(struct ipv6hdr));
                ipv4 = NULL;
                skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
@@ -916,7 +913,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
        struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
        int work_done = 0;
 
-       if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state)))
+       if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state)))
                return 0;
 
        if (cq->decmprs_left)
index 7b9d8a989b520e8b59af1294efdefd2475864b59..499487ce3b5393495c8794b73fe7c0760678555d 100644 (file)
@@ -70,6 +70,7 @@ struct mlx5e_sw_stats {
        u64 tx_queue_stopped;
        u64 tx_queue_wake;
        u64 tx_queue_dropped;
+       u64 tx_xmit_more;
        u64 rx_wqe_err;
        u64 rx_mpwqe_filler;
        u64 rx_mpwqe_frag;
@@ -101,6 +102,7 @@ static const struct counter_desc sw_stats_desc[] = {
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_frag) },
@@ -298,6 +300,7 @@ struct mlx5e_sq_stats {
        /* commonly accessed in data path */
        u64 packets;
        u64 bytes;
+       u64 xmit_more;
        u64 tso_packets;
        u64 tso_bytes;
        u64 tso_inner_packets;
@@ -324,6 +327,7 @@ static const struct counter_desc sq_stats_desc[] = {
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
 };
 
 #define NUM_SW_COUNTERS                        ARRAY_SIZE(sw_stats_desc)
index dc8b1cb0fdc8562b8564b78a324321bd479f80c4..22cfc4ac1837500cc9fb5a2fa6062d77f49550aa 100644 (file)
@@ -170,7 +170,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
        if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
                struct flow_dissector_key_control *key =
                        skb_flow_dissector_target(f->dissector,
-                                                 FLOW_DISSECTOR_KEY_BASIC,
+                                                 FLOW_DISSECTOR_KEY_CONTROL,
                                                  f->key);
                addr_type = key->addr_type;
        }
index e073bf59890d33a164f498417ed42acf4cd24228..eb0e72537f10cd559876c5f2710ca143f0fc375f 100644 (file)
@@ -356,6 +356,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
                sq->stats.stopped++;
        }
 
+       sq->stats.xmit_more += skb->xmit_more;
        if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
                int bf_sz = 0;
 
@@ -394,35 +395,6 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
        return mlx5e_sq_xmit(sq, skb);
 }
 
-void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
-{
-       struct mlx5e_tx_wqe_info *wi;
-       struct sk_buff *skb;
-       u16 ci;
-       int i;
-
-       while (sq->cc != sq->pc) {
-               ci = sq->cc & sq->wq.sz_m1;
-               skb = sq->skb[ci];
-               wi = &sq->wqe_info[ci];
-
-               if (!skb) { /* nop */
-                       sq->cc++;
-                       continue;
-               }
-
-               for (i = 0; i < wi->num_dma; i++) {
-                       struct mlx5e_sq_dma *dma =
-                               mlx5e_dma_get(sq, sq->dma_fifo_cc++);
-
-                       mlx5e_tx_dma_unmap(sq->pdev, dma);
-               }
-
-               dev_kfree_skb_any(skb);
-               sq->cc += wi->num_wqebbs;
-       }
-}
-
 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
 {
        struct mlx5e_sq *sq;
@@ -434,7 +406,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
 
        sq = container_of(cq, struct mlx5e_sq, cq);
 
-       if (unlikely(test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)))
+       if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
                return false;
 
        npkts = 0;
@@ -512,11 +484,39 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
        netdev_tx_completed_queue(sq->txq, npkts, nbytes);
 
        if (netif_tx_queue_stopped(sq->txq) &&
-           mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM) &&
-           likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) {
-                               netif_tx_wake_queue(sq->txq);
-                               sq->stats.wake++;
+           mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM)) {
+               netif_tx_wake_queue(sq->txq);
+               sq->stats.wake++;
        }
 
        return (i == MLX5E_TX_CQ_POLL_BUDGET);
 }
+
+void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
+{
+       struct mlx5e_tx_wqe_info *wi;
+       struct sk_buff *skb;
+       u16 ci;
+       int i;
+
+       while (sq->cc != sq->pc) {
+               ci = sq->cc & sq->wq.sz_m1;
+               skb = sq->skb[ci];
+               wi = &sq->wqe_info[ci];
+
+               if (!skb) { /* nop */
+                       sq->cc++;
+                       continue;
+               }
+
+               for (i = 0; i < wi->num_dma; i++) {
+                       struct mlx5e_sq_dma *dma =
+                               mlx5e_dma_get(sq, sq->dma_fifo_cc++);
+
+                       mlx5e_tx_dma_unmap(sq->pdev, dma);
+               }
+
+               dev_kfree_skb_any(skb);
+               sq->cc += wi->num_wqebbs;
+       }
+}
index 64ae2e800daa3e7cfbe992792bcf2a9d3927126c..9bf33bb692106e14901ca55711fa4f7143f1bcce 100644 (file)
@@ -51,16 +51,18 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq)
 
 static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
 {
+       struct mlx5e_sq *sq = container_of(cq, struct mlx5e_sq, cq);
        struct mlx5_wq_cyc *wq;
        struct mlx5_cqe64 *cqe;
-       struct mlx5e_sq *sq;
        u16 sqcc;
 
+       if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
+               return;
+
        cqe = mlx5e_get_cqe(cq);
        if (likely(!cqe))
                return;
 
-       sq = container_of(cq, struct mlx5e_sq, cq);
        wq = &sq->wq;
 
        /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
index f6d667797ee10e57690aeb5ce0bb588b4bb455da..8b78f156214ef0258d7a5203177edae8160f8e9d 100644 (file)
@@ -1451,7 +1451,8 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
 
        esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
 
-       if (vport_num) { /* Only VFs need ACLs for VST and spoofchk filtering */
+       /* Only VFs need ACLs for VST and spoofchk filtering */
+       if (vport_num && esw->mode == SRIOV_LEGACY) {
                esw_vport_ingress_config(esw, vport);
                esw_vport_egress_config(esw, vport);
        }
@@ -1502,7 +1503,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
         */
        esw_vport_change_handle_locked(vport);
        vport->enabled_events = 0;
-       if (vport_num) {
+       if (vport_num && esw->mode == SRIOV_LEGACY) {
                esw_vport_disable_egress_acl(esw, vport);
                esw_vport_disable_ingress_acl(esw, vport);
        }
@@ -1767,7 +1768,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
                               vport, err);
 
        mutex_lock(&esw->state_lock);
-       if (evport->enabled)
+       if (evport->enabled && esw->mode == SRIOV_LEGACY)
                err = esw_vport_ingress_config(esw, evport);
        mutex_unlock(&esw->state_lock);
        return err;
@@ -1839,7 +1840,7 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
        mutex_lock(&esw->state_lock);
        evport->vlan = vlan;
        evport->qos = qos;
-       if (evport->enabled) {
+       if (evport->enabled && esw->mode == SRIOV_LEGACY) {
                err = esw_vport_ingress_config(esw, evport);
                if (err)
                        goto out;
@@ -1868,10 +1869,11 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
        mutex_lock(&esw->state_lock);
        pschk = evport->spoofchk;
        evport->spoofchk = spoofchk;
-       if (evport->enabled)
+       if (evport->enabled && esw->mode == SRIOV_LEGACY) {
                err = esw_vport_ingress_config(esw, evport);
-       if (err)
-               evport->spoofchk = pschk;
+               if (err)
+                       evport->spoofchk = pschk;
+       }
        mutex_unlock(&esw->state_lock);
 
        return err;
index c0b05603fc31e54a4068d4b4144284ac338d4d4d..a96140971d77a5a9fd63c019135a15354c21b04f 100644 (file)
@@ -174,6 +174,7 @@ struct mlx5_eswitch_rep {
        void                  *priv_data;
        struct list_head       vport_sqs_list;
        bool                   valid;
+       u8                     hw_id[ETH_ALEN];
 };
 
 struct mlx5_esw_offload {
index a357e8eeeed860dd06a2909a94e54f85391ac411..3dc83a9459a41c8da2e921004b0acf791d6f9dff 100644 (file)
@@ -113,7 +113,7 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn
        dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
        dest.vport_num = vport;
 
-       flow_rule = mlx5_add_flow_rule(esw->fdb_table.fdb, spec,
+       flow_rule = mlx5_add_flow_rule(esw->fdb_table.offloads.fdb, spec,
                                       MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
                                       0, &dest);
        if (IS_ERR(flow_rule))
@@ -535,7 +535,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
        esw_destroy_offloads_fdb_table(esw);
 }
 
-static int mlx5_esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
+static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
 {
        switch (mode) {
        case DEVLINK_ESWITCH_MODE_LEGACY:
@@ -551,6 +551,22 @@ static int mlx5_esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
        return 0;
 }
 
+static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
+{
+       switch (mlx5_mode) {
+       case SRIOV_LEGACY:
+               *mode = DEVLINK_ESWITCH_MODE_LEGACY;
+               break;
+       case SRIOV_OFFLOADS:
+               *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
 {
        struct mlx5_core_dev *dev;
@@ -566,7 +582,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
        if (cur_mlx5_mode == SRIOV_NONE)
                return -EOPNOTSUPP;
 
-       if (mlx5_esw_mode_from_devlink(mode, &mlx5_mode))
+       if (esw_mode_from_devlink(mode, &mlx5_mode))
                return -EINVAL;
 
        if (cur_mlx5_mode == mlx5_mode)
@@ -592,9 +608,7 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
        if (dev->priv.eswitch->mode == SRIOV_NONE)
                return -EOPNOTSUPP;
 
-       *mode = dev->priv.eswitch->mode;
-
-       return 0;
+       return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
 }
 
 void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
index 75bb8c8645575058324c4807356586055f13da3b..3d6c1f65e5860f832c33413bdf00e95e1986a87b 100644 (file)
@@ -80,7 +80,7 @@
                           LEFTOVERS_NUM_PRIOS)
 
 #define ETHTOOL_PRIO_NUM_LEVELS 1
-#define ETHTOOL_NUM_PRIOS 10
+#define ETHTOOL_NUM_PRIOS 11
 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
 /* Vlan, mac, ttc, aRFS */
 #define KERNEL_NIC_PRIO_NUM_LEVELS 4
index c2877e9de8a11418da64e78cded14ae86daf2c2f..3a9195b4169dc0b1cbbb60b31ac3d2a2ddadcecb 100644 (file)
@@ -126,12 +126,21 @@ static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
        for (node = &first->node; node; node = rb_next(node)) {
                struct mlx5_fc *counter = rb_entry(node, struct mlx5_fc, node);
                struct mlx5_fc_cache *c = &counter->cache;
+               u64 packets;
+               u64 bytes;
 
                if (counter->id > last_id)
                        break;
 
                mlx5_cmd_fc_bulk_get(dev, b,
-                                    counter->id, &c->packets, &c->bytes);
+                                    counter->id, &packets, &bytes);
+
+               if (c->packets == packets)
+                       continue;
+
+               c->packets = packets;
+               c->bytes = bytes;
+               c->lastuse = jiffies;
        }
 
 out:
index 4f491d43e77d6e5a4e0b510d8018729e75d5aa6b..2385bae92672a0e9c134dcfbd6079c7e7bd9abbf 100644 (file)
@@ -1420,36 +1420,12 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
        dev_info(&pdev->dev, "%s was called\n", __func__);
        mlx5_enter_error_state(dev);
        mlx5_unload_one(dev, priv);
+       pci_save_state(pdev);
        mlx5_pci_disable_device(dev);
        return state == pci_channel_io_perm_failure ?
                PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
 }
 
-static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
-{
-       struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
-       int err = 0;
-
-       dev_info(&pdev->dev, "%s was called\n", __func__);
-
-       err = mlx5_pci_enable_device(dev);
-       if (err) {
-               dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n"
-                       , __func__, err);
-               return PCI_ERS_RESULT_DISCONNECT;
-       }
-       pci_set_master(pdev);
-       pci_set_power_state(pdev, PCI_D0);
-       pci_restore_state(pdev);
-
-       return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
-}
-
-void mlx5_disable_device(struct mlx5_core_dev *dev)
-{
-       mlx5_pci_err_detected(dev->pdev, 0);
-}
-
 /* wait for the device to show vital signs by waiting
  * for the health counter to start counting.
  */
@@ -1477,21 +1453,44 @@ static int wait_vital(struct pci_dev *pdev)
        return -ETIMEDOUT;
 }
 
-static void mlx5_pci_resume(struct pci_dev *pdev)
+static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
 {
        struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
-       struct mlx5_priv *priv = &dev->priv;
        int err;
 
        dev_info(&pdev->dev, "%s was called\n", __func__);
 
-       pci_save_state(pdev);
-       err = wait_vital(pdev);
+       err = mlx5_pci_enable_device(dev);
        if (err) {
+               dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n"
+                       , __func__, err);
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+
+       pci_set_master(pdev);
+       pci_restore_state(pdev);
+
+       if (wait_vital(pdev)) {
                dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
-               return;
+               return PCI_ERS_RESULT_DISCONNECT;
        }
 
+       return PCI_ERS_RESULT_RECOVERED;
+}
+
+void mlx5_disable_device(struct mlx5_core_dev *dev)
+{
+       mlx5_pci_err_detected(dev->pdev, 0);
+}
+
+static void mlx5_pci_resume(struct pci_dev *pdev)
+{
+       struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+       struct mlx5_priv *priv = &dev->priv;
+       int err;
+
+       dev_info(&pdev->dev, "%s was called\n", __func__);
+
        err = mlx5_load_one(dev, priv);
        if (err)
                dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n"
index f33b997f2b61c514a7f7d7017843fbbbe766472c..af371a82c35ba82e284d5b687dcf655e1ae84b50 100644 (file)
@@ -56,6 +56,7 @@
 #define MLXSW_PORT_PHY_BITS_MASK       (MLXSW_PORT_MAX_PHY_PORTS - 1)
 
 #define MLXSW_PORT_CPU_PORT            0x0
+#define MLXSW_PORT_ROUTER_PORT         (MLXSW_PORT_MAX_PHY_PORTS + 2)
 
 #define MLXSW_PORT_DONT_CARE           (MLXSW_PORT_MAX_PORTS)
 
index 1f816890681109e367f8b7a8e5639e35474e2a44..d48873bcbddfc0d0049ecc2f73b5bfcc68862d20 100644 (file)
@@ -56,6 +56,7 @@
 #include <generated/utsrelease.h>
 #include <net/pkt_cls.h>
 #include <net/tc_act/tc_mirred.h>
+#include <net/netevent.h>
 
 #include "spectrum.h"
 #include "core.h"
@@ -2105,6 +2106,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
        dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
        dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
 
+       err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
+       if (err) {
+               dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
+                       mlxsw_sp_port->local_port);
+               goto err_port_swid_set;
+       }
+
        err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
        if (err) {
                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
@@ -2130,13 +2138,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
                goto err_port_system_port_mapping_set;
        }
 
-       err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
-       if (err) {
-               dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
-                       mlxsw_sp_port->local_port);
-               goto err_port_swid_set;
-       }
-
        err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
        if (err) {
                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
@@ -2218,10 +2219,10 @@ err_port_buffers_init:
 err_port_admin_status_set:
 err_port_mtu_set:
 err_port_speed_by_width_set:
-       mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
-err_port_swid_set:
 err_port_system_port_mapping_set:
 err_dev_addr_init:
+       mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
+err_port_swid_set:
        free_percpu(mlxsw_sp_port->pcpu_stats);
 err_alloc_stats:
        kfree(mlxsw_sp_port->untagged_vlans);
@@ -3324,6 +3325,39 @@ static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
        return mlxsw_sp_fid_find(mlxsw_sp, fid);
 }
 
+static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
+{
+       return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
+              MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
+}
+
+static u16 mlxsw_sp_flood_table_index_get(u16 fid)
+{
+       return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
+}
+
+static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
+                                         bool set)
+{
+       enum mlxsw_flood_table_type table_type;
+       char *sftr_pl;
+       u16 index;
+       int err;
+
+       sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+       if (!sftr_pl)
+               return -ENOMEM;
+
+       table_type = mlxsw_sp_flood_table_type_get(fid);
+       index = mlxsw_sp_flood_table_index_get(fid);
+       mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, index, table_type,
+                           1, MLXSW_PORT_ROUTER_PORT, set);
+       err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+
+       kfree(sftr_pl);
+       return err;
+}
+
 static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
 {
        if (mlxsw_sp_fid_is_vfid(fid))
@@ -3360,10 +3394,14 @@ static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
        if (rif == MLXSW_SP_RIF_MAX)
                return -ERANGE;
 
-       err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true);
+       err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
        if (err)
                return err;
 
+       err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true);
+       if (err)
+               goto err_rif_bridge_op;
+
        err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
        if (err)
                goto err_rif_fdb_op;
@@ -3385,6 +3423,8 @@ err_rif_alloc:
        mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
 err_rif_fdb_op:
        mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
+err_rif_bridge_op:
+       mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
        return err;
 }
 
@@ -3404,6 +3444,8 @@ void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
 
        mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
 
+       mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
+
        netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif);
 }
 
@@ -4500,18 +4542,26 @@ static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
        .priority = 10, /* Must be called before FIB notifier block */
 };
 
+static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
+       .notifier_call = mlxsw_sp_router_netevent_event,
+};
+
 static int __init mlxsw_sp_module_init(void)
 {
        int err;
 
        register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
        register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
+       register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
+
        err = mlxsw_core_driver_register(&mlxsw_sp_driver);
        if (err)
                goto err_core_driver_register;
        return 0;
 
 err_core_driver_register:
+       unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
+       unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
        unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
        return err;
 }
@@ -4519,6 +4569,7 @@ err_core_driver_register:
 static void __exit mlxsw_sp_module_exit(void)
 {
        mlxsw_core_driver_unregister(&mlxsw_sp_driver);
+       unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
        unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
        unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
 }
index ab3feb81bd432fe0dae79c41c82e2c2e5789ee1a..ac48abebe904f4d328d3fe16ee864e4592c682d6 100644 (file)
@@ -587,6 +587,8 @@ int mlxsw_sp_router_neigh_construct(struct net_device *dev,
                                    struct neighbour *n);
 void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
                                   struct neighbour *n);
+int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
+                                  unsigned long event, void *ptr);
 
 int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count);
 void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
index 237418a0e6e086f425f3215990d443ac49e17e54..953b214f38d0d9906054c1220ad5a2156e181ecd 100644 (file)
@@ -717,22 +717,18 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
        u8 local_port = mlxsw_sp_port->local_port;
        u8 pg_buff = tc_index;
        enum mlxsw_reg_sbxx_dir dir = pool_type;
-       u8 pool = pool_index;
+       u8 pool = pool_get(pool_index);
        u32 max_buff;
        int err;
 
+       if (dir != dir_get(pool_index))
+               return -EINVAL;
+
        err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
                                       threshold, &max_buff);
        if (err)
                return err;
 
-       if (pool_type == DEVLINK_SB_POOL_TYPE_EGRESS) {
-               if (pool < MLXSW_SP_SB_POOL_COUNT)
-                       return -EINVAL;
-               pool -= MLXSW_SP_SB_POOL_COUNT;
-       } else if (pool >= MLXSW_SP_SB_POOL_COUNT) {
-               return -EINVAL;
-       }
        return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir,
                                    0, max_buff, pool);
 }
index 90bb93b037eccf4710d2176c9c9f969fd74a852c..3f5c51da6d3e9d8304c21d69b165ae9dc96bcf55 100644 (file)
@@ -107,6 +107,7 @@ mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
 }
 
 struct mlxsw_sp_fib_key {
+       struct net_device *dev;
        unsigned char addr[sizeof(struct in6_addr)];
        unsigned char prefix_len;
 };
@@ -123,7 +124,7 @@ struct mlxsw_sp_fib_entry {
        struct rhash_head ht_node;
        struct mlxsw_sp_fib_key key;
        enum mlxsw_sp_fib_entry_type type;
-       u8 added:1;
+       unsigned int ref_count;
        u16 rif; /* used for action local */
        struct mlxsw_sp_vr *vr;
        struct list_head nexthop_group_node;
@@ -171,13 +172,15 @@ static void mlxsw_sp_fib_entry_remove(struct mlxsw_sp_fib *fib,
 
 static struct mlxsw_sp_fib_entry *
 mlxsw_sp_fib_entry_create(struct mlxsw_sp_fib *fib, const void *addr,
-                         size_t addr_len, unsigned char prefix_len)
+                         size_t addr_len, unsigned char prefix_len,
+                         struct net_device *dev)
 {
        struct mlxsw_sp_fib_entry *fib_entry;
 
        fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
        if (!fib_entry)
                return NULL;
+       fib_entry->key.dev = dev;
        memcpy(fib_entry->key.addr, addr, addr_len);
        fib_entry->key.prefix_len = prefix_len;
        return fib_entry;
@@ -190,10 +193,13 @@ static void mlxsw_sp_fib_entry_destroy(struct mlxsw_sp_fib_entry *fib_entry)
 
 static struct mlxsw_sp_fib_entry *
 mlxsw_sp_fib_entry_lookup(struct mlxsw_sp_fib *fib, const void *addr,
-                         size_t addr_len, unsigned char prefix_len)
+                         size_t addr_len, unsigned char prefix_len,
+                         struct net_device *dev)
 {
-       struct mlxsw_sp_fib_key key = {{ 0 } };
+       struct mlxsw_sp_fib_key key;
 
+       memset(&key, 0, sizeof(key));
+       key.dev = dev;
        memcpy(key.addr, addr, addr_len);
        key.prefix_len = prefix_len;
        return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
@@ -657,7 +663,7 @@ int mlxsw_sp_router_neigh_construct(struct net_device *dev,
                return 0;
        }
 
-       r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+       r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
        if (WARN_ON(!r))
                return -EINVAL;
 
@@ -938,8 +944,8 @@ static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work)
        mlxsw_sp_port_dev_put(mlxsw_sp_port);
 }
 
-static int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
-                                         unsigned long event, void *ptr)
+int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
+                                  unsigned long event, void *ptr)
 {
        struct mlxsw_sp_neigh_entry *neigh_entry;
        struct mlxsw_sp_port *mlxsw_sp_port;
@@ -1009,10 +1015,6 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
        return NOTIFY_DONE;
 }
 
-static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
-       .notifier_call = mlxsw_sp_router_netevent_event,
-};
-
 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
 {
        int err;
@@ -1027,10 +1029,6 @@ static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
         */
        mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
 
-       err = register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
-       if (err)
-               goto err_register_netevent_notifier;
-
        /* Create the delayed works for the activity_update */
        INIT_DELAYED_WORK(&mlxsw_sp->router.neighs_update.dw,
                          mlxsw_sp_router_neighs_update_work);
@@ -1039,17 +1037,12 @@ static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
        mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, 0);
        mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, 0);
        return 0;
-
-err_register_netevent_notifier:
-       rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
-       return err;
 }
 
 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
 {
        cancel_delayed_work_sync(&mlxsw_sp->router.neighs_update.dw);
        cancel_delayed_work_sync(&mlxsw_sp->router.nexthop_probe_dw);
-       unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
        rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
 }
 
@@ -1524,7 +1517,14 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
                return err;
        mlxsw_sp_lpm_init(mlxsw_sp);
        mlxsw_sp_vrs_init(mlxsw_sp);
-       return mlxsw_sp_neigh_init(mlxsw_sp);
+       err = mlxsw_sp_neigh_init(mlxsw_sp);
+       if (err)
+               goto err_neigh_init;
+       return 0;
+
+err_neigh_init:
+       __mlxsw_sp_router_fini(mlxsw_sp);
+       return err;
 }
 
 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
@@ -1626,11 +1626,8 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
                                     struct mlxsw_sp_fib_entry *fib_entry)
 {
-       enum mlxsw_reg_ralue_op op;
-
-       op = !fib_entry->added ? MLXSW_REG_RALUE_OP_WRITE_WRITE :
-                                MLXSW_REG_RALUE_OP_WRITE_UPDATE;
-       return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
+       return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
+                                    MLXSW_REG_RALUE_OP_WRITE_WRITE);
 }
 
 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
@@ -1695,34 +1692,93 @@ mlxsw_sp_router_fib4_entry_fini(struct mlxsw_sp *mlxsw_sp,
        mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
 }
 
-static int
-mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port,
-                                const struct switchdev_obj_ipv4_fib *fib4,
-                                struct switchdev_trans *trans)
+static struct mlxsw_sp_fib_entry *
+mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp,
+                      const struct switchdev_obj_ipv4_fib *fib4)
 {
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-       struct mlxsw_sp_router_fib4_add_info *info;
        struct mlxsw_sp_fib_entry *fib_entry;
+       struct fib_info *fi = fib4->fi;
        struct mlxsw_sp_vr *vr;
        int err;
 
        vr = mlxsw_sp_vr_get(mlxsw_sp, fib4->dst_len, fib4->tb_id,
                             MLXSW_SP_L3_PROTO_IPV4);
        if (IS_ERR(vr))
-               return PTR_ERR(vr);
+               return ERR_CAST(vr);
 
+       fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
+                                             sizeof(fib4->dst),
+                                             fib4->dst_len, fi->fib_dev);
+       if (fib_entry) {
+               /* Already exists, just take a reference */
+               fib_entry->ref_count++;
+               return fib_entry;
+       }
        fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fib4->dst,
-                                             sizeof(fib4->dst), fib4->dst_len);
+                                             sizeof(fib4->dst),
+                                             fib4->dst_len, fi->fib_dev);
        if (!fib_entry) {
                err = -ENOMEM;
                goto err_fib_entry_create;
        }
        fib_entry->vr = vr;
+       fib_entry->ref_count = 1;
 
        err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fib4, fib_entry);
        if (err)
                goto err_fib4_entry_init;
 
+       return fib_entry;
+
+err_fib4_entry_init:
+       mlxsw_sp_fib_entry_destroy(fib_entry);
+err_fib_entry_create:
+       mlxsw_sp_vr_put(mlxsw_sp, vr);
+
+       return ERR_PTR(err);
+}
+
+static struct mlxsw_sp_fib_entry *
+mlxsw_sp_fib_entry_find(struct mlxsw_sp *mlxsw_sp,
+                       const struct switchdev_obj_ipv4_fib *fib4)
+{
+       struct mlxsw_sp_vr *vr;
+
+       vr = mlxsw_sp_vr_find(mlxsw_sp, fib4->tb_id, MLXSW_SP_L3_PROTO_IPV4);
+       if (!vr)
+               return NULL;
+
+       return mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
+                                        sizeof(fib4->dst), fib4->dst_len,
+                                        fib4->fi->fib_dev);
+}
+
+void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp,
+                           struct mlxsw_sp_fib_entry *fib_entry)
+{
+       struct mlxsw_sp_vr *vr = fib_entry->vr;
+
+       if (--fib_entry->ref_count == 0) {
+               mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
+               mlxsw_sp_fib_entry_destroy(fib_entry);
+       }
+       mlxsw_sp_vr_put(mlxsw_sp, vr);
+}
+
+static int
+mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port,
+                                const struct switchdev_obj_ipv4_fib *fib4,
+                                struct switchdev_trans *trans)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       struct mlxsw_sp_router_fib4_add_info *info;
+       struct mlxsw_sp_fib_entry *fib_entry;
+       int err;
+
+       fib_entry = mlxsw_sp_fib_entry_get(mlxsw_sp, fib4);
+       if (IS_ERR(fib_entry))
+               return PTR_ERR(fib_entry);
+
        info = kmalloc(sizeof(*info), GFP_KERNEL);
        if (!info) {
                err = -ENOMEM;
@@ -1736,11 +1792,7 @@ mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port,
        return 0;
 
 err_alloc_info:
-       mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
-err_fib4_entry_init:
-       mlxsw_sp_fib_entry_destroy(fib_entry);
-err_fib_entry_create:
-       mlxsw_sp_vr_put(mlxsw_sp, vr);
+       mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
        return err;
 }
 
@@ -1759,11 +1811,14 @@ mlxsw_sp_router_fib4_add_commit(struct mlxsw_sp_port *mlxsw_sp_port,
        fib_entry = info->fib_entry;
        kfree(info);
 
+       if (fib_entry->ref_count != 1)
+               return 0;
+
        vr = fib_entry->vr;
-       err = mlxsw_sp_fib_entry_insert(fib_entry->vr->fib, fib_entry);
+       err = mlxsw_sp_fib_entry_insert(vr->fib, fib_entry);
        if (err)
                goto err_fib_entry_insert;
-       err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
+       err = mlxsw_sp_fib_entry_update(mlxsw_sp_port->mlxsw_sp, fib_entry);
        if (err)
                goto err_fib_entry_add;
        return 0;
@@ -1771,9 +1826,7 @@ mlxsw_sp_router_fib4_add_commit(struct mlxsw_sp_port *mlxsw_sp_port,
 err_fib_entry_add:
        mlxsw_sp_fib_entry_remove(vr->fib, fib_entry);
 err_fib_entry_insert:
-       mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
-       mlxsw_sp_fib_entry_destroy(fib_entry);
-       mlxsw_sp_vr_put(mlxsw_sp, vr);
+       mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
        return err;
 }
 
@@ -1793,23 +1846,18 @@ int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
        struct mlxsw_sp_fib_entry *fib_entry;
-       struct mlxsw_sp_vr *vr;
 
-       vr = mlxsw_sp_vr_find(mlxsw_sp, fib4->tb_id, MLXSW_SP_L3_PROTO_IPV4);
-       if (!vr) {
-               dev_warn(mlxsw_sp->bus_info->dev, "Failed to find virtual router for FIB4 entry being removed.\n");
-               return -ENOENT;
-       }
-       fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
-                                             sizeof(fib4->dst), fib4->dst_len);
+       fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fib4);
        if (!fib_entry) {
                dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n");
                return -ENOENT;
        }
-       mlxsw_sp_fib_entry_del(mlxsw_sp_port->mlxsw_sp, fib_entry);
-       mlxsw_sp_fib_entry_remove(vr->fib, fib_entry);
-       mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
-       mlxsw_sp_fib_entry_destroy(fib_entry);
-       mlxsw_sp_vr_put(mlxsw_sp, vr);
+
+       if (fib_entry->ref_count == 1) {
+               mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
+               mlxsw_sp_fib_entry_remove(fib_entry->vr->fib, fib_entry);
+       }
+
+       mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
        return 0;
 }
index d1b59cdfacc13824a642f0db6ffcd4c05678d4ef..7b654c517b910f7f22b48ad2c00409e6875d9da1 100644 (file)
@@ -167,8 +167,8 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
 }
 
 static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
-                                    u16 idx_begin, u16 idx_end, bool set,
-                                    bool only_uc)
+                                    u16 idx_begin, u16 idx_end, bool uc_set,
+                                    bool bm_set)
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
        u16 local_port = mlxsw_sp_port->local_port;
@@ -187,28 +187,22 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
                return -ENOMEM;
 
        mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
-                           table_type, range, local_port, set);
+                           table_type, range, local_port, uc_set);
        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
        if (err)
                goto buffer_out;
 
-       /* Flooding control allows one to decide whether a given port will
-        * flood unicast traffic for which there is no FDB entry.
-        */
-       if (only_uc)
-               goto buffer_out;
-
        mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin,
-                           table_type, range, local_port, set);
+                           table_type, range, local_port, bm_set);
        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
        if (err)
                goto err_flood_bm_set;
-       else
-               goto buffer_out;
+
+       goto buffer_out;
 
 err_flood_bm_set:
        mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
-                           table_type, range, local_port, !set);
+                           table_type, range, local_port, !uc_set);
        mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
 buffer_out:
        kfree(sftr_pl);
@@ -257,8 +251,7 @@ int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
         * the start of the vFIDs range.
         */
        vfid = mlxsw_sp_fid_to_vfid(fid);
-       return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set,
-                                        false);
+       return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set);
 }
 
 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -460,6 +453,9 @@ static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
 {
        struct mlxsw_sp_fid *f;
 
+       if (test_bit(fid, mlxsw_sp_port->active_vlans))
+               return 0;
+
        f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
        if (!f) {
                f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid);
@@ -517,7 +513,7 @@ static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
        }
 
        err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end,
-                                       true, false);
+                                       mlxsw_sp_port->uc_flood, true);
        if (err)
                goto err_port_flood_set;
 
index 88678c172b19f14e7ae00574cab4da76f1edcced..252e4924de0f9f7a7f5c0450e1b11718f6ede089 100644 (file)
@@ -41,7 +41,6 @@
  *          Chris Telfer <chris.telfer@netronome.com>
  */
 
-#include <linux/version.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -1441,10 +1440,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
 
                nfp_net_set_hash(nn->netdev, skb, rxd);
 
-               /* Pad small frames to minimum */
-               if (skb_put_padto(skb, 60))
-                       break;
-
                /* Stats update */
                u64_stats_update_begin(&r_vec->rx_sync);
                r_vec->rx_pkts++;
index 7d7933d00b8f15107868dea795de73a6f61d0a07..4c989722096994b431472af20c78ce84be3caf97 100644 (file)
@@ -40,7 +40,6 @@
  *          Brad Petrus <brad.petrus@netronome.com>
  */
 
-#include <linux/version.h>
 #include <linux/kernel.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
index 37abef016a0a82a41c6b3a8f14c0f13cb7d2a2f2..f7062cb648e1b777342797cd7fc69a767a7488e8 100644 (file)
@@ -38,7 +38,6 @@
  *         Rolf Neugebauer <rolf.neugebauer@netronome.com>
  */
 
-#include <linux/version.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -134,7 +133,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
        }
 
        nfp_net_get_fw_version(&fw_ver, ctrl_bar);
-       if (fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
+       if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
                dev_err(&pdev->dev, "Unknown Firmware ABI %d.%d.%d.%d\n",
                        fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
                err = -EINVAL;
@@ -142,9 +141,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
        }
 
        /* Determine stride */
-       if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 0) ||
-           nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1) ||
-           nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0x12, 0x48)) {
+       if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
                stride = 2;
                tx_bar_no = NFP_NET_Q0_BAR;
                rx_bar_no = NFP_NET_Q1_BAR;
index 4d4ecba0aad9fbce893fc410f38a3b34c5ec9599..8e13ec84c53812e92bbabd88a12a25e12c353a75 100644 (file)
@@ -475,14 +475,6 @@ static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac)
        mac[5] = tmp >> 8;
 }
 
-static void __lpc_eth_clock_enable(struct netdata_local *pldat, bool enable)
-{
-       if (enable)
-               clk_prepare_enable(pldat->clk);
-       else
-               clk_disable_unprepare(pldat->clk);
-}
-
 static void __lpc_params_setup(struct netdata_local *pldat)
 {
        u32 tmp;
@@ -1056,7 +1048,7 @@ static int lpc_eth_close(struct net_device *ndev)
        writel(0, LPC_ENET_MAC2(pldat->net_base));
        spin_unlock_irqrestore(&pldat->lock, flags);
 
-       __lpc_eth_clock_enable(pldat, false);
+       clk_disable_unprepare(pldat->clk);
 
        return 0;
 }
@@ -1197,11 +1189,14 @@ static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
 static int lpc_eth_open(struct net_device *ndev)
 {
        struct netdata_local *pldat = netdev_priv(ndev);
+       int ret;
 
        if (netif_msg_ifup(pldat))
                dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name);
 
-       __lpc_eth_clock_enable(pldat, true);
+       ret = clk_prepare_enable(pldat->clk);
+       if (ret)
+               return ret;
 
        /* Suspended PHY makes LPC ethernet core block, so resume now */
        phy_resume(ndev->phydev);
@@ -1320,7 +1315,9 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
        }
 
        /* Enable network clock */
-       __lpc_eth_clock_enable(pldat, true);
+       ret = clk_prepare_enable(pldat->clk);
+       if (ret)
+               goto err_out_clk_put;
 
        /* Map IO space */
        pldat->net_base = ioremap(res->start, resource_size(res));
@@ -1454,6 +1451,7 @@ err_out_iounmap:
        iounmap(pldat->net_base);
 err_out_disable_clocks:
        clk_disable_unprepare(pldat->clk);
+err_out_clk_put:
        clk_put(pldat->clk);
 err_out_free_dev:
        free_netdev(ndev);
index 35e53771533fb31ddd7100b4e7931f6aba70b4f1..45ab746765737ae2d9af3d5cb77c5343b9aa0c04 100644 (file)
@@ -561,9 +561,18 @@ struct qed_dev {
 static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
                                        u32 concrete_fid)
 {
+       u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
        u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
+       u8 vf_valid = GET_FIELD(concrete_fid,
+                               PXP_CONCRETE_FID_VFVALID);
+       u8 sw_fid;
 
-       return pfid;
+       if (vf_valid)
+               sw_fid = vfid + MAX_NUM_PFS;
+       else
+               sw_fid = pfid;
+
+       return sw_fid;
 }
 
 #define PURE_LB_TC 8
index 226cb08cc055fc21c6ea4ab414dfd798d883db8a..3656d2fd673d28a37d1bfd4bb54c8046b6f45987 100644 (file)
@@ -19,6 +19,7 @@
 #include "qed_dcbx.h"
 #include "qed_hsi.h"
 #include "qed_sp.h"
+#include "qed_sriov.h"
 #ifdef CONFIG_DCB
 #include <linux/qed/qed_eth_if.h>
 #endif
@@ -945,6 +946,9 @@ static int qed_dcbx_query_params(struct qed_hwfn *p_hwfn,
        struct qed_ptt *p_ptt;
        int rc;
 
+       if (IS_VF(p_hwfn->cdev))
+               return -EINVAL;
+
        p_ptt = qed_ptt_acquire(p_hwfn);
        if (!p_ptt)
                return -EBUSY;
@@ -984,6 +988,7 @@ qed_dcbx_set_pfc_data(struct qed_hwfn *p_hwfn,
                if (p_params->pfc.prio[i])
                        pfc_map |= BIT(i);
 
+       *pfc &= ~DCBX_PFC_PRI_EN_BITMAP_MASK;
        *pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_SHIFT);
 
        DP_VERBOSE(p_hwfn, QED_MSG_DCB, "pfc = 0x%x\n", *pfc);
@@ -1058,24 +1063,33 @@ qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn,
 
        for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
                entry = &p_app->app_pri_tbl[i].entry;
+               *entry = 0;
                if (ieee) {
-                       *entry &= ~DCBX_APP_SF_IEEE_MASK;
+                       *entry &= ~(DCBX_APP_SF_IEEE_MASK | DCBX_APP_SF_MASK);
                        switch (p_params->app_entry[i].sf_ieee) {
                        case QED_DCBX_SF_IEEE_ETHTYPE:
                                *entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE <<
                                           DCBX_APP_SF_IEEE_SHIFT);
+                               *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
+                                          DCBX_APP_SF_SHIFT);
                                break;
                        case QED_DCBX_SF_IEEE_TCP_PORT:
                                *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT <<
                                           DCBX_APP_SF_IEEE_SHIFT);
+                               *entry |= ((u32)DCBX_APP_SF_PORT <<
+                                          DCBX_APP_SF_SHIFT);
                                break;
                        case QED_DCBX_SF_IEEE_UDP_PORT:
                                *entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT <<
                                           DCBX_APP_SF_IEEE_SHIFT);
+                               *entry |= ((u32)DCBX_APP_SF_PORT <<
+                                          DCBX_APP_SF_SHIFT);
                                break;
                        case QED_DCBX_SF_IEEE_TCP_UDP_PORT:
                                *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT <<
                                           DCBX_APP_SF_IEEE_SHIFT);
+                               *entry |= ((u32)DCBX_APP_SF_PORT <<
+                                          DCBX_APP_SF_SHIFT);
                                break;
                        }
                } else {
@@ -1175,7 +1189,7 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
                return 0;
        }
 
-       dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_KERNEL);
+       dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
        if (!dcbx_info) {
                DP_ERR(p_hwfn, "Failed to allocate struct qed_dcbx_info\n");
                return -ENOMEM;
@@ -1212,7 +1226,7 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
 {
        struct qed_dcbx_get *dcbx_info;
 
-       dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_KERNEL);
+       dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
        if (!dcbx_info) {
                DP_ERR(hwfn->cdev, "Failed to allocate memory for dcbx_info\n");
                return NULL;
index e4bd02e46e577f41534ae176949ede1220728796..9544e4c4135901ee177393c6e2845a18e231fffb 100644 (file)
@@ -722,11 +722,14 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
        txq->tx_db.data.bd_prod =
                cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
 
-       if (!skb->xmit_more || netif_tx_queue_stopped(netdev_txq))
+       if (!skb->xmit_more || netif_xmit_stopped(netdev_txq))
                qede_update_tx_producer(txq);
 
        if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
                      < (MAX_SKB_FRAGS + 1))) {
+               if (skb->xmit_more)
+                       qede_update_tx_producer(txq);
+
                netif_tx_stop_queue(netdev_txq);
                DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
                           "Stop queue was called\n");
@@ -2517,7 +2520,8 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
        edev->ops->register_ops(cdev, &qede_ll_ops, edev);
 
 #ifdef CONFIG_DCB
-       qede_set_dcbnl_ops(edev->ndev);
+       if (!IS_VF(edev))
+               qede_set_dcbnl_ops(edev->ndev);
 #endif
 
        INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
index deae10d7426df0441ad5970f3f425a948c32abb3..5297bf77211cf344c7a8556c2778abc9c1e4a9cd 100644 (file)
@@ -467,8 +467,8 @@ static int cp_rx_poll(struct napi_struct *napi, int budget)
        unsigned int rx_tail = cp->rx_tail;
        int rx;
 
-rx_status_loop:
        rx = 0;
+rx_status_loop:
        cpw16(IntrStatus, cp_rx_intr_mask);
 
        while (rx < budget) {
index 799d58d86e6dcb86fc23c125abbbbc317720e2b7..054e795df90f9de2bea2accd6c9d66eafc01ffb0 100644 (file)
@@ -201,9 +201,14 @@ static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
 
        [ARSTR]         = 0x0000,
        [TSU_CTRST]     = 0x0004,
+       [TSU_FWSLC]     = 0x0038,
        [TSU_VTAG0]     = 0x0058,
        [TSU_ADSBSY]    = 0x0060,
        [TSU_TEN]       = 0x0064,
+       [TSU_POST1]     = 0x0070,
+       [TSU_POST2]     = 0x0074,
+       [TSU_POST3]     = 0x0078,
+       [TSU_POST4]     = 0x007c,
        [TSU_ADRH0]     = 0x0100,
 
        [TXNLCR0]       = 0x0080,
@@ -2786,6 +2791,8 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp)
 {
        if (sh_eth_is_rz_fast_ether(mdp)) {
                sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
+               sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL,
+                                TSU_FWSLC);    /* Enable POST registers */
                return;
        }
 
index f658fee74f188b564f59842654de7036b82850d5..e00a669e9e0911626044a3cfe377f7c7b2251a0e 100644 (file)
@@ -1517,13 +1517,14 @@ static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
        }
 
 #if BITS_PER_LONG == 64
+       BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2);
        mask[0] = raw_mask[0];
        mask[1] = raw_mask[1];
 #else
+       BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 3);
        mask[0] = raw_mask[0] & 0xffffffff;
        mask[1] = raw_mask[0] >> 32;
        mask[2] = raw_mask[1] & 0xffffffff;
-       mask[3] = raw_mask[1] >> 32;
 #endif
 }
 
index 726b80f4590660f2d08b919320deac3bb54a82ea..503a3b6dce917f3fdb043dab106b80832103ce93 100644 (file)
@@ -2275,6 +2275,13 @@ static int smc_drv_probe(struct platform_device *pdev)
        if (pd) {
                memcpy(&lp->cfg, pd, sizeof(lp->cfg));
                lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags);
+
+               if (!SMC_8BIT(lp) && !SMC_16BIT(lp)) {
+                       dev_err(&pdev->dev,
+                               "at least one of 8-bit or 16-bit access support is required.\n");
+                       ret = -ENXIO;
+                       goto out_free_netdev;
+               }
        }
 
 #if IS_BUILTIN(CONFIG_OF)
index 1a55c7976df0fb873b09509b9dda51d81f386811..ea846546746937d76d68e49da0e8e5847a457c27 100644 (file)
 #include <linux/dmaengine.h>
 #include <linux/smc91x.h>
 
+/*
+ * Any 16-bit access is performed with two 8-bit accesses if the hardware
+ * can't do it directly. Most registers are 16-bit so those are mandatory.
+ */
+#define SMC_outw_b(x, a, r)                                            \
+       do {                                                            \
+               unsigned int __val16 = (x);                             \
+               unsigned int __reg = (r);                               \
+               SMC_outb(__val16, a, __reg);                            \
+               SMC_outb(__val16 >> 8, a, __reg + (1 << SMC_IO_SHIFT)); \
+       } while (0)
+
+#define SMC_inw_b(a, r)                                                        \
+       ({                                                              \
+               unsigned int __val16;                                   \
+               unsigned int __reg = r;                                 \
+               __val16  = SMC_inb(a, __reg);                           \
+               __val16 |= SMC_inb(a, __reg + (1 << SMC_IO_SHIFT)) << 8; \
+               __val16;                                                \
+       })
+
 /*
  * Define your architecture specific bus configuration parameters here.
  */
 #define SMC_IO_SHIFT           (lp->io_shift)
 
 #define SMC_inb(a, r)          readb((a) + (r))
-#define SMC_inw(a, r)          readw((a) + (r))
+#define SMC_inw(a, r)                                                  \
+       ({                                                              \
+               unsigned int __smc_r = r;                               \
+               SMC_16BIT(lp) ? readw((a) + __smc_r) :                  \
+               SMC_8BIT(lp) ? SMC_inw_b(a, __smc_r) :                  \
+               ({ BUG(); 0; });                                        \
+       })
+
 #define SMC_inl(a, r)          readl((a) + (r))
 #define SMC_outb(v, a, r)      writeb(v, (a) + (r))
+#define SMC_outw(v, a, r)                                              \
+       do {                                                            \
+               unsigned int __v = v, __smc_r = r;                      \
+               if (SMC_16BIT(lp))                                      \
+                       __SMC_outw(__v, a, __smc_r);                    \
+               else if (SMC_8BIT(lp))                                  \
+                       SMC_outw_b(__v, a, __smc_r);                    \
+               else                                                    \
+                       BUG();                                          \
+       } while (0)
+
 #define SMC_outl(v, a, r)      writel(v, (a) + (r))
+#define SMC_insb(a, r, p, l)   readsb((a) + (r), p, l)
+#define SMC_outsb(a, r, p, l)  writesb((a) + (r), p, l)
 #define SMC_insw(a, r, p, l)   readsw((a) + (r), p, l)
 #define SMC_outsw(a, r, p, l)  writesw((a) + (r), p, l)
 #define SMC_insl(a, r, p, l)   readsl((a) + (r), p, l)
 #define SMC_IRQ_FLAGS          (-1)    /* from resource */
 
 /* We actually can't write halfwords properly if not word aligned */
-static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
+static inline void __SMC_outw(u16 val, void __iomem *ioaddr, int reg)
 {
        if ((machine_is_mainstone() || machine_is_stargate2() ||
             machine_is_pxa_idp()) && reg & 2) {
@@ -416,24 +457,8 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
 
 #if ! SMC_CAN_USE_16BIT
 
-/*
- * Any 16-bit access is performed with two 8-bit accesses if the hardware
- * can't do it directly. Most registers are 16-bit so those are mandatory.
- */
-#define SMC_outw(x, ioaddr, reg)                                       \
-       do {                                                            \
-               unsigned int __val16 = (x);                             \
-               SMC_outb( __val16, ioaddr, reg );                       \
-               SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\
-       } while (0)
-#define SMC_inw(ioaddr, reg)                                           \
-       ({                                                              \
-               unsigned int __val16;                                   \
-               __val16 =  SMC_inb( ioaddr, reg );                      \
-               __val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \
-               __val16;                                                \
-       })
-
+#define SMC_outw(x, ioaddr, reg)       SMC_outw_b(x, ioaddr, reg)
+#define SMC_inw(ioaddr, reg)           SMC_inw_b(ioaddr, reg)
 #define SMC_insw(a, r, p, l)           BUG()
 #define SMC_outsw(a, r, p, l)          BUG()
 
@@ -445,7 +470,9 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
 #endif
 
 #if ! SMC_CAN_USE_8BIT
+#undef SMC_inb
 #define SMC_inb(ioaddr, reg)           ({ BUG(); 0; })
+#undef SMC_outb
 #define SMC_outb(x, ioaddr, reg)       BUG()
 #define SMC_insb(a, r, p, l)           BUG()
 #define SMC_outsb(a, r, p, l)          BUG()
index ca3134540d2d5bf33b5faebb8693ff9c8d79421f..4f8910b7db2e1be99d3f50d03d2065f3795b0c64 100644 (file)
@@ -1099,15 +1099,8 @@ static int smsc911x_mii_init(struct platform_device *pdev,
                goto err_out_free_bus_2;
        }
 
-       if (smsc911x_mii_probe(dev) < 0) {
-               SMSC_WARN(pdata, probe, "Error registering mii bus");
-               goto err_out_unregister_bus_3;
-       }
-
        return 0;
 
-err_out_unregister_bus_3:
-       mdiobus_unregister(pdata->mii_bus);
 err_out_free_bus_2:
        mdiobus_free(pdata->mii_bus);
 err_out_1:
@@ -1514,23 +1507,90 @@ static void smsc911x_disable_irq_chip(struct net_device *dev)
        smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
 }
 
+static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
+{
+       struct net_device *dev = dev_id;
+       struct smsc911x_data *pdata = netdev_priv(dev);
+       u32 intsts = smsc911x_reg_read(pdata, INT_STS);
+       u32 inten = smsc911x_reg_read(pdata, INT_EN);
+       int serviced = IRQ_NONE;
+       u32 temp;
+
+       if (unlikely(intsts & inten & INT_STS_SW_INT_)) {
+               temp = smsc911x_reg_read(pdata, INT_EN);
+               temp &= (~INT_EN_SW_INT_EN_);
+               smsc911x_reg_write(pdata, INT_EN, temp);
+               smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_);
+               pdata->software_irq_signal = 1;
+               smp_wmb();
+               serviced = IRQ_HANDLED;
+       }
+
+       if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) {
+               /* Called when there is a multicast update scheduled and
+                * it is now safe to complete the update */
+               SMSC_TRACE(pdata, intr, "RX Stop interrupt");
+               smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_);
+               if (pdata->multicast_update_pending)
+                       smsc911x_rx_multicast_update_workaround(pdata);
+               serviced = IRQ_HANDLED;
+       }
+
+       if (intsts & inten & INT_STS_TDFA_) {
+               temp = smsc911x_reg_read(pdata, FIFO_INT);
+               temp |= FIFO_INT_TX_AVAIL_LEVEL_;
+               smsc911x_reg_write(pdata, FIFO_INT, temp);
+               smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_);
+               netif_wake_queue(dev);
+               serviced = IRQ_HANDLED;
+       }
+
+       if (unlikely(intsts & inten & INT_STS_RXE_)) {
+               SMSC_TRACE(pdata, intr, "RX Error interrupt");
+               smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_);
+               serviced = IRQ_HANDLED;
+       }
+
+       if (likely(intsts & inten & INT_STS_RSFL_)) {
+               if (likely(napi_schedule_prep(&pdata->napi))) {
+                       /* Disable Rx interrupts */
+                       temp = smsc911x_reg_read(pdata, INT_EN);
+                       temp &= (~INT_EN_RSFL_EN_);
+                       smsc911x_reg_write(pdata, INT_EN, temp);
+                       /* Schedule a NAPI poll */
+                       __napi_schedule(&pdata->napi);
+               } else {
+                       SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed");
+               }
+               serviced = IRQ_HANDLED;
+       }
+
+       return serviced;
+}
+
 static int smsc911x_open(struct net_device *dev)
 {
        struct smsc911x_data *pdata = netdev_priv(dev);
        unsigned int timeout;
        unsigned int temp;
        unsigned int intcfg;
+       int retval;
+       int irq_flags;
 
-       /* if the phy is not yet registered, retry later*/
+       /* find and start the given phy */
        if (!dev->phydev) {
-               SMSC_WARN(pdata, hw, "phy_dev is NULL");
-               return -EAGAIN;
+               retval = smsc911x_mii_probe(dev);
+               if (retval < 0) {
+                       SMSC_WARN(pdata, probe, "Error starting phy");
+                       goto out;
+               }
        }
 
        /* Reset the LAN911x */
-       if (smsc911x_soft_reset(pdata)) {
+       retval = smsc911x_soft_reset(pdata);
+       if (retval) {
                SMSC_WARN(pdata, hw, "soft reset failed");
-               return -EIO;
+               goto mii_free_out;
        }
 
        smsc911x_reg_write(pdata, HW_CFG, 0x00050000);
@@ -1586,6 +1646,15 @@ static int smsc911x_open(struct net_device *dev)
        pdata->software_irq_signal = 0;
        smp_wmb();
 
+       irq_flags = irq_get_trigger_type(dev->irq);
+       retval = request_irq(dev->irq, smsc911x_irqhandler,
+                            irq_flags | IRQF_SHARED, dev->name, dev);
+       if (retval) {
+               SMSC_WARN(pdata, probe,
+                         "Unable to claim requested irq: %d", dev->irq);
+               goto mii_free_out;
+       }
+
        temp = smsc911x_reg_read(pdata, INT_EN);
        temp |= INT_EN_SW_INT_EN_;
        smsc911x_reg_write(pdata, INT_EN, temp);
@@ -1600,7 +1669,8 @@ static int smsc911x_open(struct net_device *dev)
        if (!pdata->software_irq_signal) {
                netdev_warn(dev, "ISR failed signaling test (IRQ %d)\n",
                            dev->irq);
-               return -ENODEV;
+               retval = -ENODEV;
+               goto irq_stop_out;
        }
        SMSC_TRACE(pdata, ifup, "IRQ handler passed test using IRQ %d",
                   dev->irq);
@@ -1646,6 +1716,14 @@ static int smsc911x_open(struct net_device *dev)
 
        netif_start_queue(dev);
        return 0;
+
+irq_stop_out:
+       free_irq(dev->irq, dev);
+mii_free_out:
+       phy_disconnect(dev->phydev);
+       dev->phydev = NULL;
+out:
+       return retval;
 }
 
 /* Entry point for stopping the interface */
@@ -1667,9 +1745,15 @@ static int smsc911x_stop(struct net_device *dev)
        dev->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP);
        smsc911x_tx_update_txcounters(dev);
 
+       free_irq(dev->irq, dev);
+
        /* Bring the PHY down */
-       if (dev->phydev)
+       if (dev->phydev) {
                phy_stop(dev->phydev);
+               phy_disconnect(dev->phydev);
+               dev->phydev = NULL;
+       }
+       netif_carrier_off(dev);
 
        SMSC_TRACE(pdata, ifdown, "Interface stopped");
        return 0;
@@ -1811,67 +1895,6 @@ static void smsc911x_set_multicast_list(struct net_device *dev)
        spin_unlock_irqrestore(&pdata->mac_lock, flags);
 }
 
-static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
-{
-       struct net_device *dev = dev_id;
-       struct smsc911x_data *pdata = netdev_priv(dev);
-       u32 intsts = smsc911x_reg_read(pdata, INT_STS);
-       u32 inten = smsc911x_reg_read(pdata, INT_EN);
-       int serviced = IRQ_NONE;
-       u32 temp;
-
-       if (unlikely(intsts & inten & INT_STS_SW_INT_)) {
-               temp = smsc911x_reg_read(pdata, INT_EN);
-               temp &= (~INT_EN_SW_INT_EN_);
-               smsc911x_reg_write(pdata, INT_EN, temp);
-               smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_);
-               pdata->software_irq_signal = 1;
-               smp_wmb();
-               serviced = IRQ_HANDLED;
-       }
-
-       if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) {
-               /* Called when there is a multicast update scheduled and
-                * it is now safe to complete the update */
-               SMSC_TRACE(pdata, intr, "RX Stop interrupt");
-               smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_);
-               if (pdata->multicast_update_pending)
-                       smsc911x_rx_multicast_update_workaround(pdata);
-               serviced = IRQ_HANDLED;
-       }
-
-       if (intsts & inten & INT_STS_TDFA_) {
-               temp = smsc911x_reg_read(pdata, FIFO_INT);
-               temp |= FIFO_INT_TX_AVAIL_LEVEL_;
-               smsc911x_reg_write(pdata, FIFO_INT, temp);
-               smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_);
-               netif_wake_queue(dev);
-               serviced = IRQ_HANDLED;
-       }
-
-       if (unlikely(intsts & inten & INT_STS_RXE_)) {
-               SMSC_TRACE(pdata, intr, "RX Error interrupt");
-               smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_);
-               serviced = IRQ_HANDLED;
-       }
-
-       if (likely(intsts & inten & INT_STS_RSFL_)) {
-               if (likely(napi_schedule_prep(&pdata->napi))) {
-                       /* Disable Rx interrupts */
-                       temp = smsc911x_reg_read(pdata, INT_EN);
-                       temp &= (~INT_EN_RSFL_EN_);
-                       smsc911x_reg_write(pdata, INT_EN, temp);
-                       /* Schedule a NAPI poll */
-                       __napi_schedule(&pdata->napi);
-               } else {
-                       SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed");
-               }
-               serviced = IRQ_HANDLED;
-       }
-
-       return serviced;
-}
-
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void smsc911x_poll_controller(struct net_device *dev)
 {
@@ -2291,16 +2314,14 @@ static int smsc911x_drv_remove(struct platform_device *pdev)
        pdata = netdev_priv(dev);
        BUG_ON(!pdata);
        BUG_ON(!pdata->ioaddr);
-       BUG_ON(!dev->phydev);
+       WARN_ON(dev->phydev);
 
        SMSC_TRACE(pdata, ifdown, "Stopping driver");
 
-       phy_disconnect(dev->phydev);
        mdiobus_unregister(pdata->mii_bus);
        mdiobus_free(pdata->mii_bus);
 
        unregister_netdev(dev);
-       free_irq(dev->irq, dev);
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
                                           "smsc911x-memory");
        if (!res)
@@ -2385,8 +2406,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
        struct smsc911x_data *pdata;
        struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev);
        struct resource *res;
-       unsigned int intcfg = 0;
-       int res_size, irq, irq_flags;
+       int res_size, irq;
        int retval;
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -2425,7 +2445,6 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
 
        pdata = netdev_priv(dev);
        dev->irq = irq;
-       irq_flags = irq_get_trigger_type(irq);
        pdata->ioaddr = ioremap_nocache(res->start, res_size);
 
        pdata->dev = dev;
@@ -2472,43 +2491,23 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
        if (retval < 0)
                goto out_disable_resources;
 
-       /* configure irq polarity and type before connecting isr */
-       if (pdata->config.irq_polarity == SMSC911X_IRQ_POLARITY_ACTIVE_HIGH)
-               intcfg |= INT_CFG_IRQ_POL_;
-
-       if (pdata->config.irq_type == SMSC911X_IRQ_TYPE_PUSH_PULL)
-               intcfg |= INT_CFG_IRQ_TYPE_;
-
-       smsc911x_reg_write(pdata, INT_CFG, intcfg);
-
-       /* Ensure interrupts are globally disabled before connecting ISR */
-       smsc911x_disable_irq_chip(dev);
+       netif_carrier_off(dev);
 
-       retval = request_irq(dev->irq, smsc911x_irqhandler,
-                            irq_flags | IRQF_SHARED, dev->name, dev);
+       retval = smsc911x_mii_init(pdev, dev);
        if (retval) {
-               SMSC_WARN(pdata, probe,
-                         "Unable to claim requested irq: %d", dev->irq);
+               SMSC_WARN(pdata, probe, "Error %i initialising mii", retval);
                goto out_disable_resources;
        }
 
-       netif_carrier_off(dev);
-
        retval = register_netdev(dev);
        if (retval) {
                SMSC_WARN(pdata, probe, "Error %i registering device", retval);
-               goto out_free_irq;
+               goto out_disable_resources;
        } else {
                SMSC_TRACE(pdata, probe,
                           "Network interface: \"%s\"", dev->name);
        }
 
-       retval = smsc911x_mii_init(pdev, dev);
-       if (retval) {
-               SMSC_WARN(pdata, probe, "Error %i initialising mii", retval);
-               goto out_unregister_netdev_5;
-       }
-
        spin_lock_irq(&pdata->mac_lock);
 
        /* Check if mac address has been specified when bringing interface up */
@@ -2544,10 +2543,6 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
 
        return 0;
 
-out_unregister_netdev_5:
-       unregister_netdev(dev);
-out_free_irq:
-       free_irq(dev->irq, dev);
 out_disable_resources:
        pm_runtime_put(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
index 9f159a775af3c030addec825df6880b1a0f80aa7..4490ebaed127e90dcf4477cc16c4b4d3215e5e5f 100644 (file)
@@ -1246,7 +1246,7 @@ static int dwceqos_mii_init(struct net_local *lp)
        lp->mii_bus->read  = &dwceqos_mdio_read;
        lp->mii_bus->write = &dwceqos_mdio_write;
        lp->mii_bus->priv = lp;
-       lp->mii_bus->parent = &lp->ndev->dev;
+       lp->mii_bus->parent = &lp->pdev->dev;
 
        of_address_to_resource(lp->pdev->dev.of_node, 0, &res);
        snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx",
@@ -1622,13 +1622,7 @@ static void dwceqos_init_hw(struct net_local *lp)
                DWCEQOS_MMC_CTRL_RSTONRD);
        dwceqos_enable_mmc_interrupt(lp);
 
-       /* Enable Interrupts */
-       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE,
-                     DWCEQOS_DMA_CH0_IE_NIE |
-                     DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE |
-                     DWCEQOS_DMA_CH0_IE_AIE |
-                     DWCEQOS_DMA_CH0_IE_FBEE);
-
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, 0);
        dwceqos_write(lp, REG_DWCEQOS_MAC_IE, 0);
 
        dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, DWCEQOS_MAC_CFG_IPC |
@@ -1905,6 +1899,15 @@ static int dwceqos_open(struct net_device *ndev)
        netif_start_queue(ndev);
        tasklet_enable(&lp->tx_bdreclaim_tasklet);
 
+       /* Enable Interrupts -- do this only after we enable NAPI and the
+        * tasklet.
+        */
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE,
+                     DWCEQOS_DMA_CH0_IE_NIE |
+                     DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE |
+                     DWCEQOS_DMA_CH0_IE_AIE |
+                     DWCEQOS_DMA_CH0_IE_FBEE);
+
        return 0;
 }
 
@@ -2850,25 +2853,17 @@ static int dwceqos_probe(struct platform_device *pdev)
 
        ndev->features = ndev->hw_features;
 
-       netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT);
-
-       ret = register_netdev(ndev);
-       if (ret) {
-               dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
-               goto err_out_clk_dis_aper;
-       }
-
        lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk");
        if (IS_ERR(lp->phy_ref_clk)) {
                dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
                ret = PTR_ERR(lp->phy_ref_clk);
-               goto err_out_unregister_netdev;
+               goto err_out_clk_dis_aper;
        }
 
        ret = clk_prepare_enable(lp->phy_ref_clk);
        if (ret) {
                dev_err(&pdev->dev, "Unable to enable device clock.\n");
-               goto err_out_unregister_netdev;
+               goto err_out_clk_dis_aper;
        }
 
        lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node,
@@ -2877,7 +2872,7 @@ static int dwceqos_probe(struct platform_device *pdev)
                ret = of_phy_register_fixed_link(lp->pdev->dev.of_node);
                if (ret < 0) {
                        dev_err(&pdev->dev, "invalid fixed-link");
-                       goto err_out_unregister_clk_notifier;
+                       goto err_out_clk_dis_phy;
                }
 
                lp->phy_node = of_node_get(lp->pdev->dev.of_node);
@@ -2886,7 +2881,7 @@ static int dwceqos_probe(struct platform_device *pdev)
        ret = of_get_phy_mode(lp->pdev->dev.of_node);
        if (ret < 0) {
                dev_err(&lp->pdev->dev, "error in getting phy i/f\n");
-               goto err_out_unregister_clk_notifier;
+               goto err_out_clk_dis_phy;
        }
 
        lp->phy_interface = ret;
@@ -2894,14 +2889,14 @@ static int dwceqos_probe(struct platform_device *pdev)
        ret = dwceqos_mii_init(lp);
        if (ret) {
                dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n");
-               goto err_out_unregister_clk_notifier;
+               goto err_out_clk_dis_phy;
        }
 
        ret = dwceqos_mii_probe(ndev);
        if (ret != 0) {
                netdev_err(ndev, "mii_probe fail.\n");
                ret = -ENXIO;
-               goto err_out_unregister_clk_notifier;
+               goto err_out_clk_dis_phy;
        }
 
        dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
@@ -2919,7 +2914,7 @@ static int dwceqos_probe(struct platform_device *pdev)
        if (ret) {
                dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n",
                        ret);
-               goto err_out_unregister_clk_notifier;
+               goto err_out_clk_dis_phy;
        }
        dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n",
                 pdev->id, ndev->base_addr, ndev->irq);
@@ -2929,18 +2924,24 @@ static int dwceqos_probe(struct platform_device *pdev)
        if (ret) {
                dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n",
                        ndev->irq, ret);
-               goto err_out_unregister_clk_notifier;
+               goto err_out_clk_dis_phy;
        }
 
        if (netif_msg_probe(lp))
                netdev_dbg(ndev, "net_local@%p\n", lp);
 
+       netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT);
+
+       ret = register_netdev(ndev);
+       if (ret) {
+               dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+                       goto err_out_clk_dis_phy;
+       }
+
        return 0;
 
-err_out_unregister_clk_notifier:
+err_out_clk_dis_phy:
        clk_disable_unprepare(lp->phy_ref_clk);
-err_out_unregister_netdev:
-       unregister_netdev(ndev);
 err_out_clk_dis_aper:
        clk_disable_unprepare(lp->apb_pclk);
 err_out_free_netdev:
index 7452b5f9d02427469e58caf516c02b6de4a44a4e..7108c68f16d3e970713e239c88126efde8b95cea 100644 (file)
@@ -1987,7 +1987,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) {
                err = pci_enable_msi(pdev);
                if (err)
-                       pr_err("Can't eneble msi. error is %d\n", err);
+                       pr_err("Can't enable msi. error is %d\n", err);
                else
                        nic->irq_type = IRQ_MSI;
        } else
index 3cee84a24815d7fec3f089b4c4c3d65519b6e1ae..93dc10b10c0901c8974c4990eb87a22d6411e690 100644 (file)
@@ -1131,11 +1131,13 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
        lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong");
        mac_address = of_get_mac_address(ofdev->dev.of_node);
 
-       if (mac_address)
+       if (mac_address) {
                /* Set the MAC address. */
                memcpy(ndev->dev_addr, mac_address, ETH_ALEN);
-       else
-               dev_warn(dev, "No MAC address found\n");
+       } else {
+               dev_warn(dev, "No MAC address found, using random\n");
+               eth_hw_addr_random(ndev);
+       }
 
        /* Clear the Tx CSR's in case this is a restart */
        __raw_writel(0, lp->base_addr + XEL_TSR_OFFSET);
index 47a64342cc16a50b24a7db69c073b886874a05bf..b4863e4e522b3bb3c927a0f845ca5941edf09de8 100644 (file)
@@ -303,6 +303,7 @@ config MDIO_HISI_FEMAC
 
 config MDIO_XGENE
        tristate "APM X-Gene SoC MDIO bus controller"
+       depends on ARCH_XGENE || COMPILE_TEST
        help
          This module provides a driver for the MDIO busses found in the
          APM X-Gene SoC's.
index 053e87905b944e51f43c33355f235d42d6bcec54..885ac9cbab5a95787239e024bfbf93894add2878 100644 (file)
@@ -964,7 +964,7 @@ static struct phy_driver ksphy_driver[] = {
        .get_strings    = kszphy_get_strings,
        .get_stats      = kszphy_get_stats,
        .suspend        = genphy_suspend,
-       .resume         = genphy_resume,
+       .resume         = kszphy_resume,
 }, {
        .phy_id         = PHY_ID_KSZ8873MLL,
        .phy_id_mask    = MICREL_PHY_ID_MASK,
index c5dc2c363f96fc296120c29eb7c700e25f619e15..c6f66832a1a641e387fe093b4e6aa61115e6abfa 100644 (file)
@@ -722,8 +722,10 @@ phy_err:
 int phy_start_interrupts(struct phy_device *phydev)
 {
        atomic_set(&phydev->irq_disable, 0);
-       if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt",
-                       phydev) < 0) {
+       if (request_irq(phydev->irq, phy_interrupt,
+                               IRQF_SHARED,
+                               "phy_interrupt",
+                               phydev) < 0) {
                pr_warn("%s: Can't get IRQ %d (PHY)\n",
                        phydev->mdio.bus->name, phydev->irq);
                phydev->irq = PHY_POLL;
index cdb19b385d42c878e59bf9bf345dd8bfdfd53eb3..b228bea7931f3c836e9681dfd9376e2b38b8099f 100644 (file)
 #include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/netdevice.h>
+#include <linux/etherdevice.h>
 #include <linux/filter.h>
 #include <linux/if_team.h>
 
+static rx_handler_result_t lb_receive(struct team *team, struct team_port *port,
+                                     struct sk_buff *skb)
+{
+       if (unlikely(skb->protocol == htons(ETH_P_SLOW))) {
+               /* LACPDU packets should go to exact delivery */
+               const unsigned char *dest = eth_hdr(skb)->h_dest;
+
+               if (is_link_local_ether_addr(dest) && dest[5] == 0x02)
+                       return RX_HANDLER_EXACT;
+       }
+       return RX_HANDLER_ANOTHER;
+}
+
 struct lb_priv;
 
 typedef struct team_port *lb_select_tx_port_func_t(struct team *,
@@ -652,6 +666,7 @@ static const struct team_mode_ops lb_mode_ops = {
        .port_enter             = lb_port_enter,
        .port_leave             = lb_port_leave,
        .port_disabled          = lb_port_disabled,
+       .receive                = lb_receive,
        .transmit               = lb_transmit,
 };
 
index 9c8b5bc2b9d8e851c53cc4d311c245654ebe75e2..6f9df375c5d411dc52510b95ec677f79f1996e4e 100644 (file)
@@ -894,11 +894,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
        if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
                goto drop;
 
-       if (skb->sk && sk_fullsock(skb->sk)) {
-               sock_tx_timestamp(skb->sk, skb->sk->sk_tsflags,
-                                 &skb_shinfo(skb)->tx_flags);
-               sw_tx_timestamp(skb);
-       }
+       skb_tx_timestamp(skb);
 
        /* Orphan the skb - required as we might hang on to it
         * for indefinite time.
index 770212baaf05ddb032967730acc6b0ff46ec8f40..528b9c9c4e60bc2e397b3a1c489a5639d986af9a 100644 (file)
@@ -1009,6 +1009,7 @@ static int kaweth_probe(
        struct net_device *netdev;
        const eth_addr_t bcast_addr = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
        int result = 0;
+       int rv = -EIO;
 
        dev_dbg(dev,
                "Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x\n",
@@ -1029,6 +1030,7 @@ static int kaweth_probe(
        kaweth = netdev_priv(netdev);
        kaweth->dev = udev;
        kaweth->net = netdev;
+       kaweth->intf = intf;
 
        spin_lock_init(&kaweth->device_lock);
        init_waitqueue_head(&kaweth->term_wait);
@@ -1048,6 +1050,10 @@ static int kaweth_probe(
                /* Download the firmware */
                dev_info(dev, "Downloading firmware...\n");
                kaweth->firmware_buf = (__u8 *)__get_free_page(GFP_KERNEL);
+               if (!kaweth->firmware_buf) {
+                       rv = -ENOMEM;
+                       goto err_free_netdev;
+               }
                if ((result = kaweth_download_firmware(kaweth,
                                                      "kaweth/new_code.bin",
                                                      100,
@@ -1139,8 +1145,6 @@ err_fw:
 
        dev_dbg(dev, "Initializing net device.\n");
 
-       kaweth->intf = intf;
-
        kaweth->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
        if (!kaweth->tx_urb)
                goto err_free_netdev;
@@ -1204,7 +1208,7 @@ err_only_tx:
 err_free_netdev:
        free_netdev(netdev);
 
-       return -EIO;
+       return rv;
 }
 
 /****************************************************************
index c68fe495d3f9bc86caff19f69257ef48b93ad49b..4244b9d4418e15e2cce0772ca8a8f3e07da1c64e 100644 (file)
@@ -914,7 +914,9 @@ vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
 {
        struct Vmxnet3_TxDataDesc *tdd;
 
-       tdd = tq->data_ring.base + tq->tx_ring.next2fill;
+       tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base +
+                                           tq->tx_ring.next2fill *
+                                           tq->txdata_desc_size);
 
        memcpy(tdd->data, skb->data, ctx->copy_size);
        netdev_dbg(adapter->netdev,
index 74fc03072b878d456881a9664cc5e13fa45d6a10..7dc37a090549f7ff949e7e5c4a8275128689b980 100644 (file)
 /*
  * Version numbers
  */
-#define VMXNET3_DRIVER_VERSION_STRING   "1.4.9.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING   "1.4.a.0-k"
 
 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM      0x01040900
+#define VMXNET3_DRIVER_VERSION_NUM      0x01040a00
 
 #if defined(CONFIG_PCI_MSI)
        /* RSS only makes sense if MSI-X is supported. */
index c0dda6fc09217bb31ab2f750e5d4d51abd6411cf..6e65832051d6df2f40d8fb280b8dd23716ffbcdf 100644 (file)
@@ -2782,14 +2782,15 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
        struct net_device *lowerdev = NULL;
 
        if (conf->flags & VXLAN_F_GPE) {
-               if (conf->flags & ~VXLAN_F_ALLOWED_GPE)
-                       return -EINVAL;
                /* For now, allow GPE only together with COLLECT_METADATA.
                 * This can be relaxed later; in such case, the other side
                 * of the PtP link will have to be provided.
                 */
-               if (!(conf->flags & VXLAN_F_COLLECT_METADATA))
+               if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) ||
+                   !(conf->flags & VXLAN_F_COLLECT_METADATA)) {
+                       pr_info("unsupported combination of extensions\n");
                        return -EINVAL;
+               }
 
                vxlan_raw_setup(dev);
        } else {
@@ -2842,6 +2843,9 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
                        dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
 
                needed_headroom = lowerdev->hard_header_len;
+       } else if (vxlan_addr_multicast(&dst->remote_ip)) {
+               pr_info("multicast destination requires interface to be specified\n");
+               return -EINVAL;
        }
 
        if (conf->mtu) {
@@ -2874,8 +2878,10 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
                     tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 &&
                    tmp->cfg.dst_port == vxlan->cfg.dst_port &&
                    (tmp->flags & VXLAN_F_RCV_FLAGS) ==
-                   (vxlan->flags & VXLAN_F_RCV_FLAGS))
-               return -EEXIST;
+                   (vxlan->flags & VXLAN_F_RCV_FLAGS)) {
+                       pr_info("duplicate VNI %u\n", be32_to_cpu(conf->vni));
+                       return -EEXIST;
+               }
        }
 
        dev->ethtool_ops = &vxlan_ethtool_ops;
@@ -2909,7 +2915,6 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
                         struct nlattr *tb[], struct nlattr *data[])
 {
        struct vxlan_config conf;
-       int err;
 
        memset(&conf, 0, sizeof(conf));
 
@@ -3018,26 +3023,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
        if (tb[IFLA_MTU])
                conf.mtu = nla_get_u32(tb[IFLA_MTU]);
 
-       err = vxlan_dev_configure(src_net, dev, &conf);
-       switch (err) {
-       case -ENODEV:
-               pr_info("ifindex %d does not exist\n", conf.remote_ifindex);
-               break;
-
-       case -EPERM:
-               pr_info("IPv6 is disabled via sysctl\n");
-               break;
-
-       case -EEXIST:
-               pr_info("duplicate VNI %u\n", be32_to_cpu(conf.vni));
-               break;
-
-       case -EINVAL:
-               pr_info("unsupported combination of extensions\n");
-               break;
-       }
-
-       return err;
+       return vxlan_dev_configure(src_net, dev, &conf);
 }
 
 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
index 78db5d679f1980188c788163d4f54fa283fc83f2..24c8d65bcf34340cfda9ae59f65d6a5ec33f1cc3 100644 (file)
@@ -1525,7 +1525,7 @@ static void ath10k_htt_rx_h_filter(struct ath10k *ar,
 static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
 {
        struct ath10k *ar = htt->ar;
-       static struct ieee80211_rx_status rx_status;
+       struct ieee80211_rx_status *rx_status = &htt->rx_status;
        struct sk_buff_head amsdu;
        int ret;
 
@@ -1549,11 +1549,11 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
                return ret;
        }
 
-       ath10k_htt_rx_h_ppdu(ar, &amsdu, &rx_status, 0xffff);
+       ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
        ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
-       ath10k_htt_rx_h_filter(ar, &amsdu, &rx_status);
-       ath10k_htt_rx_h_mpdu(ar, &amsdu, &rx_status);
-       ath10k_htt_rx_h_deliver(ar, &amsdu, &rx_status);
+       ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
+       ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
+       ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
 
        return 0;
 }
index 9a22c478dd1b1dd28fd5139ede1b7bed4063ec42..07933c51a8508ef8cc7b4eef85309e3e2cdd8991 100644 (file)
@@ -3162,7 +3162,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
                pci_hard_reset = ath10k_pci_qca988x_chip_reset;
                break;
        case QCA9887_1_0_DEVICE_ID:
-               dev_warn(&pdev->dev, "QCA9887 support is still experimental, there are likely bugs. You have been warned.\n");
                hw_rev = ATH10K_HW_QCA9887;
                pci_ps = false;
                pci_soft_reset = ath10k_pci_warm_reset;
index d1d0c06d627cb058df27e90a69bec0209e94c980..14b13f07cd1fa87658f22f8352ff6bc32b3935ac 100644 (file)
@@ -2482,6 +2482,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
                return -EINVAL;
        }
 
+       ath9k_gpio_cap_init(ah);
+
        if (AR_SREV_9485(ah) ||
            AR_SREV_9285(ah) ||
            AR_SREV_9330(ah) ||
@@ -2531,8 +2533,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
        else
                pCap->hw_caps &= ~ATH9K_HW_CAP_HT;
 
-       ath9k_gpio_cap_init(ah);
-
        if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah))
                pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX;
        else
index a394622c9022c09488c59a85b878021c7c7b11a0..7cb65c303f8d95bb435f74d8980bcd94c54be90e 100644 (file)
@@ -718,9 +718,12 @@ static int ath9k_start(struct ieee80211_hw *hw)
        if (!ath_complete_reset(sc, false))
                ah->reset_power_on = false;
 
-       if (ah->led_pin >= 0)
+       if (ah->led_pin >= 0) {
                ath9k_hw_set_gpio(ah, ah->led_pin,
                                  (ah->config.led_active_high) ? 1 : 0);
+               ath9k_hw_gpio_request_out(ah, ah->led_pin, NULL,
+                                         AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+       }
 
        /*
         * Reset key cache to sane defaults (all entries cleared) instead of
@@ -864,9 +867,11 @@ static void ath9k_stop(struct ieee80211_hw *hw)
 
        spin_lock_bh(&sc->sc_pcu_lock);
 
-       if (ah->led_pin >= 0)
+       if (ah->led_pin >= 0) {
                ath9k_hw_set_gpio(ah, ah->led_pin,
                                  (ah->config.led_active_high) ? 0 : 1);
+               ath9k_hw_gpio_request_in(ah, ah->led_pin, NULL);
+       }
 
        ath_prepare_reset(sc);
 
@@ -1154,6 +1159,7 @@ void ath9k_calculate_summary_state(struct ath_softc *sc,
                bool changed = (iter_data.primary_sta != ctx->primary_sta);
 
                if (iter_data.primary_sta) {
+                       iter_data.primary_beacon_vif = iter_data.primary_sta;
                        iter_data.beacons = true;
                        ath9k_set_assoc_state(sc, iter_data.primary_sta,
                                              changed);
@@ -1563,13 +1569,13 @@ static int ath9k_sta_state(struct ieee80211_hw *hw,
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        int ret = 0;
 
-       if (old_state == IEEE80211_STA_AUTH &&
-           new_state == IEEE80211_STA_ASSOC) {
+       if (old_state == IEEE80211_STA_NOTEXIST &&
+           new_state == IEEE80211_STA_NONE) {
                ret = ath9k_sta_add(hw, vif, sta);
                ath_dbg(common, CONFIG,
                        "Add station: %pM\n", sta->addr);
-       } else if (old_state == IEEE80211_STA_ASSOC &&
-                  new_state == IEEE80211_STA_AUTH) {
+       } else if (old_state == IEEE80211_STA_NONE &&
+                  new_state == IEEE80211_STA_NOTEXIST) {
                ret = ath9k_sta_remove(hw, vif, sta);
                ath_dbg(common, CONFIG,
                        "Remove station: %pM\n", sta->addr);
index 2628d5e12c6424b657b7dd9138c277ef0a9d5d47..b8aec5e5ef93e28dfb70c95557d7388fd9b67bef 100644 (file)
@@ -4527,7 +4527,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
                                (u8 *)&settings->beacon.head[ie_offset],
                                settings->beacon.head_len - ie_offset,
                                WLAN_EID_SSID);
-               if (!ssid_ie)
+               if (!ssid_ie || ssid_ie->len > IEEE80211_MAX_SSID_LEN)
                        return -EINVAL;
 
                memcpy(ssid_le.SSID, ssid_ie->data, ssid_ie->len);
@@ -5635,7 +5635,7 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
                  ifevent->action, ifevent->flags, ifevent->ifidx,
                  ifevent->bsscfgidx);
 
-       mutex_lock(&event->vif_event_lock);
+       spin_lock(&event->vif_event_lock);
        event->action = ifevent->action;
        vif = event->vif;
 
@@ -5643,7 +5643,7 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
        case BRCMF_E_IF_ADD:
                /* waiting process may have timed out */
                if (!cfg->vif_event.vif) {
-                       mutex_unlock(&event->vif_event_lock);
+                       spin_unlock(&event->vif_event_lock);
                        return -EBADF;
                }
 
@@ -5654,24 +5654,24 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
                        ifp->ndev->ieee80211_ptr = &vif->wdev;
                        SET_NETDEV_DEV(ifp->ndev, wiphy_dev(cfg->wiphy));
                }
-               mutex_unlock(&event->vif_event_lock);
+               spin_unlock(&event->vif_event_lock);
                wake_up(&event->vif_wq);
                return 0;
 
        case BRCMF_E_IF_DEL:
-               mutex_unlock(&event->vif_event_lock);
+               spin_unlock(&event->vif_event_lock);
                /* event may not be upon user request */
                if (brcmf_cfg80211_vif_event_armed(cfg))
                        wake_up(&event->vif_wq);
                return 0;
 
        case BRCMF_E_IF_CHANGE:
-               mutex_unlock(&event->vif_event_lock);
+               spin_unlock(&event->vif_event_lock);
                wake_up(&event->vif_wq);
                return 0;
 
        default:
-               mutex_unlock(&event->vif_event_lock);
+               spin_unlock(&event->vif_event_lock);
                break;
        }
        return -EINVAL;
@@ -5792,7 +5792,7 @@ static void wl_deinit_priv(struct brcmf_cfg80211_info *cfg)
 static void init_vif_event(struct brcmf_cfg80211_vif_event *event)
 {
        init_waitqueue_head(&event->vif_wq);
-       mutex_init(&event->vif_event_lock);
+       spin_lock_init(&event->vif_event_lock);
 }
 
 static s32 brcmf_dongle_roam(struct brcmf_if *ifp)
@@ -6691,9 +6691,9 @@ static inline bool vif_event_equals(struct brcmf_cfg80211_vif_event *event,
 {
        u8 evt_action;
 
-       mutex_lock(&event->vif_event_lock);
+       spin_lock(&event->vif_event_lock);
        evt_action = event->action;
-       mutex_unlock(&event->vif_event_lock);
+       spin_unlock(&event->vif_event_lock);
        return evt_action == action;
 }
 
@@ -6702,10 +6702,10 @@ void brcmf_cfg80211_arm_vif_event(struct brcmf_cfg80211_info *cfg,
 {
        struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
 
-       mutex_lock(&event->vif_event_lock);
+       spin_lock(&event->vif_event_lock);
        event->vif = vif;
        event->action = 0;
-       mutex_unlock(&event->vif_event_lock);
+       spin_unlock(&event->vif_event_lock);
 }
 
 bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg)
@@ -6713,9 +6713,9 @@ bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg)
        struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
        bool armed;
 
-       mutex_lock(&event->vif_event_lock);
+       spin_lock(&event->vif_event_lock);
        armed = event->vif != NULL;
-       mutex_unlock(&event->vif_event_lock);
+       spin_unlock(&event->vif_event_lock);
 
        return armed;
 }
index 7d77f869b7f13be772fc039b5be3a6452dabef25..8889832c17e0dae310029981d6778fdf08ae08dd 100644 (file)
@@ -227,7 +227,7 @@ struct escan_info {
  */
 struct brcmf_cfg80211_vif_event {
        wait_queue_head_t vif_wq;
-       struct mutex vif_event_lock;
+       spinlock_t vif_event_lock;
        u8 action;
        struct brcmf_cfg80211_vif *vif;
 };
index 8d16f0204985ad3997c00dd9ba479b890d213393..65e8c8766441b718e886e3aa07095ce01fbb8061 100644 (file)
@@ -743,7 +743,7 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
                 * serious troublesome side effects. The p2p module will clean
                 * up the ifp if needed.
                 */
-               brcmf_p2p_ifp_removed(ifp);
+               brcmf_p2p_ifp_removed(ifp, rtnl_locked);
                kfree(ifp);
        }
 }
index 66f942f7448eabb2297fe34a56d4b2eb22c8c8cd..de19c7c92bc6c095b3b111abfc280fe228e588ac 100644 (file)
@@ -2297,7 +2297,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
        return err;
 }
 
-void brcmf_p2p_ifp_removed(struct brcmf_if *ifp)
+void brcmf_p2p_ifp_removed(struct brcmf_if *ifp, bool rtnl_locked)
 {
        struct brcmf_cfg80211_info *cfg;
        struct brcmf_cfg80211_vif *vif;
@@ -2306,9 +2306,11 @@ void brcmf_p2p_ifp_removed(struct brcmf_if *ifp)
        vif = ifp->vif;
        cfg = wdev_to_cfg(&vif->wdev);
        cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
-       rtnl_lock();
+       if (!rtnl_locked)
+               rtnl_lock();
        cfg80211_unregister_wdev(&vif->wdev);
-       rtnl_unlock();
+       if (!rtnl_locked)
+               rtnl_unlock();
        brcmf_free_vif(vif);
 }
 
index a3bd18c2360b5f4a5da48ce0bca549b0eeced334..8ce9447533ef8fa519caab0f985949487346464d 100644 (file)
@@ -155,7 +155,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
 int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev);
 int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
                       enum brcmf_fil_p2p_if_types if_type);
-void brcmf_p2p_ifp_removed(struct brcmf_if *ifp);
+void brcmf_p2p_ifp_removed(struct brcmf_if *ifp, bool rtnl_locked);
 int brcmf_p2p_start_device(struct wiphy *wiphy, struct wireless_dev *wdev);
 void brcmf_p2p_stop_device(struct wiphy *wiphy, struct wireless_dev *wdev);
 int brcmf_p2p_scan_prep(struct wiphy *wiphy,
index 1abcabb9b6cd78d675c5a0585eb4ee59d98c3b1e..46b52bf705fb438bb40aabbe6fd51698bba982eb 100644 (file)
@@ -960,5 +960,6 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
        }
 
        mvm->fw_dbg_conf = conf_id;
-       return ret;
+
+       return 0;
 }
index f7dff7612c9c58e4ffb84f7aaa427245f5c77ad0..e9f1be9da7d41613b7a124111d1af85064625198 100644 (file)
@@ -105,7 +105,8 @@ iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv *trig,
 {
        u32 trig_vif = le32_to_cpu(trig->vif_type);
 
-       return trig_vif == IWL_FW_DBG_CONF_VIF_ANY || vif->type == trig_vif;
+       return trig_vif == IWL_FW_DBG_CONF_VIF_ANY ||
+              ieee80211_vif_type_p2p(vif) == trig_vif;
 }
 
 static inline bool
index 6d6064534d590c76f71444bfa7f3b0c9163ad003..5dd77e33661727338aca43e3ad93802ef6db0bc7 100644 (file)
@@ -624,6 +624,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
                               NL80211_FEATURE_LOW_PRIORITY_SCAN |
                               NL80211_FEATURE_P2P_GO_OPPPS |
+                              NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
                               NL80211_FEATURE_DYNAMIC_SMPS |
                               NL80211_FEATURE_STATIC_SMPS |
                               NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
index b4fc86d5d7ef1252cde57cfb872c390d698b7d16..6a615bb73042504b0c7912805ce36cb70690b44b 100644 (file)
@@ -467,6 +467,8 @@ struct iwl_mvm_vif {
 static inline struct iwl_mvm_vif *
 iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
 {
+       if (!vif)
+               return NULL;
        return (void *)vif->drv_priv;
 }
 
index dc49c3de1f25db6945942449aaa62580a581e51e..c47d6366875d06c5c0c3ceb2af29f31bd55e5158 100644 (file)
@@ -205,7 +205,8 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
 
        do {
                /* Check if AMSDU can accommodate this MSDU */
-               if (skb_tailroom(skb_aggr) < (skb_src->len + LLC_SNAP_LEN))
+               if ((skb_aggr->len + skb_src->len + LLC_SNAP_LEN) >
+                   adapter->tx_buf_size)
                        break;
 
                skb_src = skb_dequeue(&pra_list->skb_head);
index 458daf9273362a19cc26d6a4f2c9113764f493b8..935866fe5ec2ea7c47d3f192f2b17572470098c1 100644 (file)
@@ -185,8 +185,12 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
                return -ENXIO;
 
        nd_desc = nvdimm_bus->nd_desc;
+       /*
+        * if ndctl does not exist, it's PMEM_LEGACY and
+        * we want to just pretend everything is handled.
+        */
        if (!nd_desc->ndctl)
-               return -ENXIO;
+               return len;
 
        memset(&ars_cap, 0, sizeof(ars_cap));
        ars_cap.address = phys;
index db39d53cdfb9fbd63258a426d4539b63d1f92701..f7d37a62f874b780c94a0bbbf3d2607845c0c5ab 100644 (file)
@@ -30,8 +30,8 @@ config NVME_FABRICS
 
 config NVME_RDMA
        tristate "NVM Express over Fabrics RDMA host driver"
-       depends on INFINIBAND
-       depends on BLK_DEV_NVME
+       depends on INFINIBAND && BLOCK
+       select NVME_CORE
        select NVME_FABRICS
        select SG_POOL
        help
index dc996761042ffb61e3fc38d4d92b2d43fcfea530..4eff4917446617f448262846ee6860b36e7f4340 100644 (file)
@@ -47,8 +47,10 @@ static struct nvmf_host *nvmf_host_add(const char *hostnqn)
 
        mutex_lock(&nvmf_hosts_mutex);
        host = __nvmf_host_find(hostnqn);
-       if (host)
+       if (host) {
+               kref_get(&host->ref);
                goto out_unlock;
+       }
 
        host = kmalloc(sizeof(*host), GFP_KERNEL);
        if (!host)
@@ -56,7 +58,7 @@ static struct nvmf_host *nvmf_host_add(const char *hostnqn)
 
        kref_init(&host->ref);
        memcpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
-       uuid_le_gen(&host->id);
+       uuid_be_gen(&host->id);
 
        list_add_tail(&host->list, &nvmf_hosts);
 out_unlock:
@@ -73,9 +75,9 @@ static struct nvmf_host *nvmf_host_default(void)
                return NULL;
 
        kref_init(&host->ref);
-       uuid_le_gen(&host->id);
+       uuid_be_gen(&host->id);
        snprintf(host->nqn, NVMF_NQN_SIZE,
-               "nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUl", &host->id);
+               "nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUb", &host->id);
 
        mutex_lock(&nvmf_hosts_mutex);
        list_add_tail(&host->list, &nvmf_hosts);
@@ -363,7 +365,14 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
        cmd.connect.opcode = nvme_fabrics_command;
        cmd.connect.fctype = nvme_fabrics_type_connect;
        cmd.connect.qid = 0;
-       cmd.connect.sqsize = cpu_to_le16(ctrl->sqsize);
+
+       /*
+        * fabrics spec sets a minimum of depth 32 for admin queue,
+        * so set the queue with this depth always until
+        * justification otherwise.
+        */
+       cmd.connect.sqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1);
+
        /*
         * Set keep-alive timeout in seconds granularity (ms * 1000)
         * and add a grace period for controller kato enforcement
@@ -375,7 +384,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
        if (!data)
                return -ENOMEM;
 
-       memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_le));
+       memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_be));
        data->cntlid = cpu_to_le16(0xffff);
        strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
        strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
@@ -434,7 +443,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
        if (!data)
                return -ENOMEM;
 
-       memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_le));
+       memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_be));
        data->cntlid = cpu_to_le16(ctrl->cntlid);
        strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
        strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
index 89df52c8be978cdf2e18b2c0b18a6211642ff8ad..46e460aee52da54306fd4121e73ab997e6e01f5b 100644 (file)
@@ -34,7 +34,7 @@ struct nvmf_host {
        struct kref             ref;
        struct list_head        list;
        char                    nqn[NVMF_NQN_SIZE];
-       uuid_le                 id;
+       uuid_be                 id;
 };
 
 /**
index 8dcf5a960951805b09d650b2cc243ceaeff6a5bb..60f7eab11865114ae1a13d7a811fe36d862d4029 100644 (file)
@@ -1693,7 +1693,12 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
                nvme_suspend_queue(dev->queues[i]);
 
        if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
-               nvme_suspend_queue(dev->queues[0]);
+               /* A device might become IO incapable very soon during
+                * probe, before the admin queue is configured. Thus,
+                * queue_count can be 0 here.
+                */
+               if (dev->queue_count)
+                       nvme_suspend_queue(dev->queues[0]);
        } else {
                nvme_disable_io_queues(dev);
                nvme_disable_admin_queue(dev, shutdown);
@@ -2112,6 +2117,8 @@ static const struct pci_device_id nvme_id_table[] = {
                .driver_data = NVME_QUIRK_IDENTIFY_CNS, },
        { PCI_DEVICE(0x1c58, 0x0003),   /* HGST adapter */
                .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
+       { PCI_DEVICE(0x1c5f, 0x0540),   /* Memblaze Pblaze4 adapter */
+               .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
        { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
        { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
        { 0, }
index 8d2875b4c56d8c8bf7da8951512288b327dd6e7a..c2c2c28e6eb59fbd45dfc278354c65626d9b0f95 100644 (file)
 
 #define NVME_RDMA_MAX_INLINE_SEGMENTS  1
 
-#define NVME_RDMA_MAX_PAGES_PER_MR     512
-
-#define NVME_RDMA_DEF_RECONNECT_DELAY  20
-
 /*
  * We handle AEN commands ourselves and don't even let the
  * block layer know about them.
@@ -77,7 +73,6 @@ struct nvme_rdma_request {
        u32                     num_sge;
        int                     nents;
        bool                    inline_data;
-       bool                    need_inval;
        struct ib_reg_wr        reg_wr;
        struct ib_cqe           reg_cqe;
        struct nvme_rdma_queue  *queue;
@@ -87,6 +82,8 @@ struct nvme_rdma_request {
 
 enum nvme_rdma_queue_flags {
        NVME_RDMA_Q_CONNECTED = (1 << 0),
+       NVME_RDMA_IB_QUEUE_ALLOCATED = (1 << 1),
+       NVME_RDMA_Q_DELETING = (1 << 2),
 };
 
 struct nvme_rdma_queue {
@@ -286,7 +283,7 @@ static int nvme_rdma_reinit_request(void *data, struct request *rq)
        struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
        int ret = 0;
 
-       if (!req->need_inval)
+       if (!req->mr->need_inval)
                goto out;
 
        ib_dereg_mr(req->mr);
@@ -296,9 +293,10 @@ static int nvme_rdma_reinit_request(void *data, struct request *rq)
        if (IS_ERR(req->mr)) {
                ret = PTR_ERR(req->mr);
                req->mr = NULL;
+               goto out;
        }
 
-       req->need_inval = false;
+       req->mr->need_inval = false;
 
 out:
        return ret;
@@ -485,9 +483,14 @@ out_err:
 
 static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
 {
-       struct nvme_rdma_device *dev = queue->device;
-       struct ib_device *ibdev = dev->dev;
+       struct nvme_rdma_device *dev;
+       struct ib_device *ibdev;
+
+       if (!test_and_clear_bit(NVME_RDMA_IB_QUEUE_ALLOCATED, &queue->flags))
+               return;
 
+       dev = queue->device;
+       ibdev = dev->dev;
        rdma_destroy_qp(queue->cm_id);
        ib_free_cq(queue->ib_cq);
 
@@ -538,6 +541,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue,
                ret = -ENOMEM;
                goto out_destroy_qp;
        }
+       set_bit(NVME_RDMA_IB_QUEUE_ALLOCATED, &queue->flags);
 
        return 0;
 
@@ -557,6 +561,7 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
 
        queue = &ctrl->queues[idx];
        queue->ctrl = ctrl;
+       queue->flags = 0;
        init_completion(&queue->cm_done);
 
        if (idx > 0)
@@ -595,6 +600,7 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
        return 0;
 
 out_destroy_cm_id:
+       nvme_rdma_destroy_queue_ib(queue);
        rdma_destroy_id(queue->cm_id);
        return ret;
 }
@@ -613,7 +619,7 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
 
 static void nvme_rdma_stop_and_free_queue(struct nvme_rdma_queue *queue)
 {
-       if (!test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags))
+       if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags))
                return;
        nvme_rdma_stop_queue(queue);
        nvme_rdma_free_queue(queue);
@@ -645,7 +651,8 @@ static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
        int i, ret;
 
        for (i = 1; i < ctrl->queue_count; i++) {
-               ret = nvme_rdma_init_queue(ctrl, i, ctrl->ctrl.sqsize);
+               ret = nvme_rdma_init_queue(ctrl, i,
+                                          ctrl->ctrl.opts->queue_size);
                if (ret) {
                        dev_info(ctrl->ctrl.device,
                                "failed to initialize i/o queue: %d\n", ret);
@@ -656,7 +663,7 @@ static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
        return 0;
 
 out_free_queues:
-       for (; i >= 1; i--)
+       for (i--; i >= 1; i--)
                nvme_rdma_stop_and_free_queue(&ctrl->queues[i]);
 
        return ret;
@@ -765,8 +772,13 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
 {
        struct nvme_rdma_ctrl *ctrl = container_of(work,
                        struct nvme_rdma_ctrl, err_work);
+       int i;
 
        nvme_stop_keep_alive(&ctrl->ctrl);
+
+       for (i = 0; i < ctrl->queue_count; i++)
+               clear_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[i].flags);
+
        if (ctrl->queue_count > 1)
                nvme_stop_queues(&ctrl->ctrl);
        blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
@@ -849,7 +861,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
        if (!blk_rq_bytes(rq))
                return;
 
-       if (req->need_inval) {
+       if (req->mr->need_inval) {
                res = nvme_rdma_inv_rkey(queue, req);
                if (res < 0) {
                        dev_err(ctrl->ctrl.device,
@@ -935,7 +947,7 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
                             IB_ACCESS_REMOTE_READ |
                             IB_ACCESS_REMOTE_WRITE;
 
-       req->need_inval = true;
+       req->mr->need_inval = true;
 
        sg->addr = cpu_to_le64(req->mr->iova);
        put_unaligned_le24(req->mr->length, sg->length);
@@ -958,7 +970,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
 
        req->num_sge = 1;
        req->inline_data = false;
-       req->need_inval = false;
+       req->mr->need_inval = false;
 
        c->common.flags |= NVME_CMD_SGL_METABUF;
 
@@ -1145,7 +1157,7 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
 
        if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) &&
            wc->ex.invalidate_rkey == req->mr->rkey)
-               req->need_inval = false;
+               req->mr->need_inval = false;
 
        blk_mq_complete_request(rq, status);
 
@@ -1278,8 +1290,22 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
 
        priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
        priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue));
-       priv.hrqsize = cpu_to_le16(queue->queue_size);
-       priv.hsqsize = cpu_to_le16(queue->queue_size);
+       /*
+        * set the admin queue depth to the minimum size
+        * specified by the Fabrics standard.
+        */
+       if (priv.qid == 0) {
+               priv.hrqsize = cpu_to_le16(NVMF_AQ_DEPTH);
+               priv.hsqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1);
+       } else {
+               /*
+                * current interpretation of the fabrics spec
+                * is at minimum you make hrqsize sqsize+1, or a
+                * 1's based representation of sqsize.
+                */
+               priv.hrqsize = cpu_to_le16(queue->queue_size);
+               priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
+       }
 
        ret = rdma_connect(queue->cm_id, &param);
        if (ret) {
@@ -1295,58 +1321,6 @@ out_destroy_queue_ib:
        return ret;
 }
 
-/**
- * nvme_rdma_device_unplug() - Handle RDMA device unplug
- * @queue:      Queue that owns the cm_id that caught the event
- *
- * DEVICE_REMOVAL event notifies us that the RDMA device is about
- * to unplug so we should take care of destroying our RDMA resources.
- * This event will be generated for each allocated cm_id.
- *
- * In our case, the RDMA resources are managed per controller and not
- * only per queue. So the way we handle this is we trigger an implicit
- * controller deletion upon the first DEVICE_REMOVAL event we see, and
- * hold the event inflight until the controller deletion is completed.
- *
- * One exception that we need to handle is the destruction of the cm_id
- * that caught the event. Since we hold the callout until the controller
- * deletion is completed, we'll deadlock if the controller deletion will
- * call rdma_destroy_id on this queue's cm_id. Thus, we claim ownership
- * of destroying this queue before-hand, destroy the queue resources,
- * then queue the controller deletion which won't destroy this queue and
- * we destroy the cm_id implicitely by returning a non-zero rc to the callout.
- */
-static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue)
-{
-       struct nvme_rdma_ctrl *ctrl = queue->ctrl;
-       int ret;
-
-       /* Own the controller deletion */
-       if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
-               return 0;
-
-       dev_warn(ctrl->ctrl.device,
-               "Got rdma device removal event, deleting ctrl\n");
-
-       /* Get rid of reconnect work if its running */
-       cancel_delayed_work_sync(&ctrl->reconnect_work);
-
-       /* Disable the queue so ctrl delete won't free it */
-       if (test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags)) {
-               /* Free this queue ourselves */
-               nvme_rdma_stop_queue(queue);
-               nvme_rdma_destroy_queue_ib(queue);
-
-               /* Return non-zero so the cm_id will destroy implicitly */
-               ret = 1;
-       }
-
-       /* Queue controller deletion */
-       queue_work(nvme_rdma_wq, &ctrl->delete_work);
-       flush_work(&ctrl->delete_work);
-       return ret;
-}
-
 static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
                struct rdma_cm_event *ev)
 {
@@ -1388,8 +1362,8 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
                nvme_rdma_error_recovery(queue->ctrl);
                break;
        case RDMA_CM_EVENT_DEVICE_REMOVAL:
-               /* return 1 means impliciy CM ID destroy */
-               return nvme_rdma_device_unplug(queue);
+               /* device removal is handled via the ib_client API */
+               break;
        default:
                dev_err(queue->ctrl->ctrl.device,
                        "Unexpected RDMA CM event (%d)\n", ev->event);
@@ -1461,7 +1435,7 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
        if (rq->cmd_type == REQ_TYPE_FS && req_op(rq) == REQ_OP_FLUSH)
                flush = true;
        ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
-                       req->need_inval ? &req->reg_wr.wr : NULL, flush);
+                       req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
        if (ret) {
                nvme_rdma_unmap_data(queue, rq);
                goto err;
@@ -1690,15 +1664,19 @@ static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl)
 static int nvme_rdma_del_ctrl(struct nvme_ctrl *nctrl)
 {
        struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
-       int ret;
+       int ret = 0;
 
+       /*
+        * Keep a reference until all work is flushed since
+        * __nvme_rdma_del_ctrl can free the ctrl mem
+        */
+       if (!kref_get_unless_zero(&ctrl->ctrl.kref))
+               return -EBUSY;
        ret = __nvme_rdma_del_ctrl(ctrl);
-       if (ret)
-               return ret;
-
-       flush_work(&ctrl->delete_work);
-
-       return 0;
+       if (!ret)
+               flush_work(&ctrl->delete_work);
+       nvme_put_ctrl(&ctrl->ctrl);
+       return ret;
 }
 
 static void nvme_rdma_remove_ctrl_work(struct work_struct *work)
@@ -1816,7 +1794,7 @@ static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl)
 
        memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
        ctrl->tag_set.ops = &nvme_rdma_mq_ops;
-       ctrl->tag_set.queue_depth = ctrl->ctrl.sqsize;
+       ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
        ctrl->tag_set.reserved_tags = 1; /* fabric connect */
        ctrl->tag_set.numa_node = NUMA_NO_NODE;
        ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
@@ -1914,7 +1892,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
        spin_lock_init(&ctrl->lock);
 
        ctrl->queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */
-       ctrl->ctrl.sqsize = opts->queue_size;
+       ctrl->ctrl.sqsize = opts->queue_size - 1;
        ctrl->ctrl.kato = opts->kato;
 
        ret = -ENOMEM;
@@ -1995,27 +1973,57 @@ static struct nvmf_transport_ops nvme_rdma_transport = {
        .create_ctrl    = nvme_rdma_create_ctrl,
 };
 
+static void nvme_rdma_add_one(struct ib_device *ib_device)
+{
+}
+
+static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data)
+{
+       struct nvme_rdma_ctrl *ctrl;
+
+       /* Delete all controllers using this device */
+       mutex_lock(&nvme_rdma_ctrl_mutex);
+       list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
+               if (ctrl->device->dev != ib_device)
+                       continue;
+               dev_info(ctrl->ctrl.device,
+                       "Removing ctrl: NQN \"%s\", addr %pISp\n",
+                       ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
+               __nvme_rdma_del_ctrl(ctrl);
+       }
+       mutex_unlock(&nvme_rdma_ctrl_mutex);
+
+       flush_workqueue(nvme_rdma_wq);
+}
+
+static struct ib_client nvme_rdma_ib_client = {
+       .name   = "nvme_rdma",
+       .add = nvme_rdma_add_one,
+       .remove = nvme_rdma_remove_one
+};
+
 static int __init nvme_rdma_init_module(void)
 {
+       int ret;
+
        nvme_rdma_wq = create_workqueue("nvme_rdma_wq");
        if (!nvme_rdma_wq)
                return -ENOMEM;
 
+       ret = ib_register_client(&nvme_rdma_ib_client);
+       if (ret) {
+               destroy_workqueue(nvme_rdma_wq);
+               return ret;
+       }
+
        nvmf_register_transport(&nvme_rdma_transport);
        return 0;
 }
 
 static void __exit nvme_rdma_cleanup_module(void)
 {
-       struct nvme_rdma_ctrl *ctrl;
-
        nvmf_unregister_transport(&nvme_rdma_transport);
-
-       mutex_lock(&nvme_rdma_ctrl_mutex);
-       list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list)
-               __nvme_rdma_del_ctrl(ctrl);
-       mutex_unlock(&nvme_rdma_ctrl_mutex);
-
+       ib_unregister_client(&nvme_rdma_ib_client);
        destroy_workqueue(nvme_rdma_wq);
 }
 
index a5c31cbeb4815efe58f8b5ff60a009ea97c62d31..3a5b9d0576cb0f71a5bf22a34c905ae8dfc634fb 100644 (file)
@@ -15,8 +15,8 @@ config NVME_TARGET
 
 config NVME_TARGET_LOOP
        tristate "NVMe loopback device support"
-       depends on BLK_DEV_NVME
        depends on NVME_TARGET
+       select NVME_CORE
        select NVME_FABRICS
        select SG_POOL
        help
index 7affd40a6b337ba110caf825fcec4ee2dbe1a96b..395e60dad83542ff5300107a68bf73caeedaf6dd 100644 (file)
@@ -556,7 +556,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
 
        memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
        ctrl->tag_set.ops = &nvme_loop_mq_ops;
-       ctrl->tag_set.queue_depth = ctrl->ctrl.sqsize;
+       ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
        ctrl->tag_set.reserved_tags = 1; /* fabric connect */
        ctrl->tag_set.numa_node = NUMA_NO_NODE;
        ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
@@ -620,7 +620,7 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
 
        ret = -ENOMEM;
 
-       ctrl->ctrl.sqsize = opts->queue_size;
+       ctrl->ctrl.sqsize = opts->queue_size - 1;
        ctrl->ctrl.kato = opts->kato;
 
        ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
index b4d648536c3e43316cc2929f8346d4997db01306..1cbe6e053b5b89f2187ae520e9a7fe792dd8cafa 100644 (file)
@@ -978,10 +978,11 @@ static void nvmet_rdma_release_queue_work(struct work_struct *w)
                container_of(w, struct nvmet_rdma_queue, release_work);
        struct rdma_cm_id *cm_id = queue->cm_id;
        struct nvmet_rdma_device *dev = queue->dev;
+       enum nvmet_rdma_queue_state state = queue->state;
 
        nvmet_rdma_free_queue(queue);
 
-       if (queue->state != NVMET_RDMA_IN_DEVICE_REMOVAL)
+       if (state != NVMET_RDMA_IN_DEVICE_REMOVAL)
                rdma_destroy_id(cm_id);
 
        kref_put(&dev->ref, nvmet_rdma_free_dev);
@@ -1003,10 +1004,10 @@ nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn,
        queue->host_qid = le16_to_cpu(req->qid);
 
        /*
-        * req->hsqsize corresponds to our recv queue size
+        * req->hsqsize corresponds to our recv queue size plus 1
         * req->hrqsize corresponds to our send queue size
         */
-       queue->recv_queue_size = le16_to_cpu(req->hsqsize);
+       queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1;
        queue->send_queue_size = le16_to_cpu(req->hrqsize);
 
        if (!queue->host_qid && queue->recv_queue_size > NVMF_AQ_DEPTH)
index 5f4a2e04c8d7cf237535d0cafb0274ba4df904e5..add66236215c66887fb2f9682d72118a9f786849 100644 (file)
@@ -44,6 +44,7 @@ void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
        bridge->release_fn = release_fn;
        bridge->release_data = release_data;
 }
+EXPORT_SYMBOL_GPL(pci_set_host_bridge_release);
 
 void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
                             struct resource *res)
index 37ff0158e45f1a3aa453e5da757d21a11e1c3267..44e0ff37480b8284e8044cd66db6f143c0de370e 100644 (file)
@@ -3327,9 +3327,9 @@ static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev)
        if (nhi->vendor != PCI_VENDOR_ID_INTEL
                    || (nhi->device != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE &&
                        nhi->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C &&
+                       nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI &&
                        nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI)
-                   || nhi->subsystem_vendor != 0x2222
-                   || nhi->subsystem_device != 0x1111)
+                   || nhi->class != PCI_CLASS_SYSTEM_OTHER << 8)
                goto out;
        dev_info(&dev->dev, "quirk: waiting for thunderbolt to reestablish PCI tunnels...\n");
        device_pm_wait_for_dev(&dev->dev, &nhi->dev);
@@ -3343,6 +3343,9 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
                               PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
                               quirk_apple_wait_for_thunderbolt);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
+                              PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE,
+                              quirk_apple_wait_for_thunderbolt);
 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
                               PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE,
                               quirk_apple_wait_for_thunderbolt);
index d1ef7acf69307f1377782544bde6011af4e54df2..f9357e09e9b30131d631361a9d0092a1e19980bb 100644 (file)
@@ -40,6 +40,7 @@ static void pci_destroy_dev(struct pci_dev *dev)
        list_del(&dev->bus_list);
        up_write(&pci_bus_sem);
 
+       pci_bridge_d3_device_removed(dev);
        pci_free_resources(dev);
        put_device(&dev->dev);
 }
@@ -96,8 +97,6 @@ static void pci_remove_bus_device(struct pci_dev *dev)
                dev->subordinate = NULL;
        }
 
-       pci_bridge_d3_device_removed(dev);
-
        pci_destroy_dev(dev);
 }
 
index 489ea1098c96170ad532a636bf93a148b1228a97..69b5e811ea2b2c2c02a902434063196f31425892 100644 (file)
@@ -977,7 +977,7 @@ static int pcmcia_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
 
 /************************ runtime PM support ***************************/
 
-static int pcmcia_dev_suspend(struct device *dev, pm_message_t state);
+static int pcmcia_dev_suspend(struct device *dev);
 static int pcmcia_dev_resume(struct device *dev);
 
 static int runtime_suspend(struct device *dev)
@@ -985,7 +985,7 @@ static int runtime_suspend(struct device *dev)
        int rc;
 
        device_lock(dev);
-       rc = pcmcia_dev_suspend(dev, PMSG_SUSPEND);
+       rc = pcmcia_dev_suspend(dev);
        device_unlock(dev);
        return rc;
 }
@@ -1135,7 +1135,7 @@ ATTRIBUTE_GROUPS(pcmcia_dev);
 
 /* PM support, also needed for reset */
 
-static int pcmcia_dev_suspend(struct device *dev, pm_message_t state)
+static int pcmcia_dev_suspend(struct device *dev)
 {
        struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
        struct pcmcia_driver *p_drv = NULL;
@@ -1410,6 +1410,9 @@ static struct class_interface pcmcia_bus_interface __refdata = {
        .remove_dev = &pcmcia_bus_remove_socket,
 };
 
+static const struct dev_pm_ops pcmcia_bus_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(pcmcia_dev_suspend, pcmcia_dev_resume)
+};
 
 struct bus_type pcmcia_bus_type = {
        .name = "pcmcia",
@@ -1418,8 +1421,7 @@ struct bus_type pcmcia_bus_type = {
        .dev_groups = pcmcia_dev_groups,
        .probe = pcmcia_device_probe,
        .remove = pcmcia_device_remove,
-       .suspend = pcmcia_dev_suspend,
-       .resume = pcmcia_dev_resume,
+       .pm = &pcmcia_bus_pm_ops,
 };
 
 
index 483f919e0d2e2431636a73cacd14463f845c8d0c..91b5f5724cba6dde6466cb50cfada71875873ad0 100644 (file)
@@ -214,9 +214,8 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
 }
 #endif
 
-void pxa2xx_configure_sockets(struct device *dev)
+void pxa2xx_configure_sockets(struct device *dev, struct pcmcia_low_level *ops)
 {
-       struct pcmcia_low_level *ops = dev->platform_data;
        /*
         * We have at least one socket, so set MECR:CIT
         * (Card Is There)
@@ -322,7 +321,7 @@ static int pxa2xx_drv_pcmcia_probe(struct platform_device *dev)
                        goto err1;
        }
 
-       pxa2xx_configure_sockets(&dev->dev);
+       pxa2xx_configure_sockets(&dev->dev, ops);
        dev_set_drvdata(&dev->dev, sinfo);
 
        return 0;
@@ -348,7 +347,9 @@ static int pxa2xx_drv_pcmcia_remove(struct platform_device *dev)
 
 static int pxa2xx_drv_pcmcia_resume(struct device *dev)
 {
-       pxa2xx_configure_sockets(dev);
+       struct pcmcia_low_level *ops = (struct pcmcia_low_level *)dev->platform_data;
+
+       pxa2xx_configure_sockets(dev, ops);
        return 0;
 }
 
index b609b45469ed71179852378f49354ef1c798d433..e58c7a41541880cd4bf461cd32f995bb27f8aae7 100644 (file)
@@ -1,4 +1,4 @@
 int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt);
 void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops);
-void pxa2xx_configure_sockets(struct device *dev);
+void pxa2xx_configure_sockets(struct device *dev, struct pcmcia_low_level *ops);
 
index 12f0dd0914772ae6032282d8a1cf545f023e1857..2f490930430d46a2f7056aa9b5576266d21a4937 100644 (file)
@@ -134,20 +134,14 @@ static struct pcmcia_low_level badge4_pcmcia_ops = {
 
 int pcmcia_badge4_init(struct sa1111_dev *dev)
 {
-       int ret = -ENODEV;
-
-       if (machine_is_badge4()) {
-               printk(KERN_INFO
-                      "%s: badge4_pcmvcc=%d, badge4_pcmvpp=%d, badge4_cfvcc=%d\n",
-                      __func__,
-                      badge4_pcmvcc, badge4_pcmvpp, badge4_cfvcc);
-
-               sa11xx_drv_pcmcia_ops(&badge4_pcmcia_ops);
-               ret = sa1111_pcmcia_add(dev, &badge4_pcmcia_ops,
-                               sa11xx_drv_pcmcia_add_one);
-       }
-
-       return ret;
+       printk(KERN_INFO
+              "%s: badge4_pcmvcc=%d, badge4_pcmvpp=%d, badge4_cfvcc=%d\n",
+              __func__,
+              badge4_pcmvcc, badge4_pcmvpp, badge4_cfvcc);
+
+       sa11xx_drv_pcmcia_ops(&badge4_pcmcia_ops);
+       return sa1111_pcmcia_add(dev, &badge4_pcmcia_ops,
+                                sa11xx_drv_pcmcia_add_one);
 }
 
 static int __init pcmv_setup(char *s)
index a1531feb8460c3041ec55f629dcc26dcfca55d9b..3d95dffcff7a307e6475614d545d7fa3ff556305 100644 (file)
@@ -18,6 +18,7 @@
 
 #include <mach/hardware.h>
 #include <asm/hardware/sa1111.h>
+#include <asm/mach-types.h>
 #include <asm/irq.h>
 
 #include "sa1111_generic.h"
@@ -203,19 +204,30 @@ static int pcmcia_probe(struct sa1111_dev *dev)
        sa1111_writel(PCSSR_S0_SLEEP | PCSSR_S1_SLEEP, base + PCSSR);
        sa1111_writel(PCCR_S0_FLT | PCCR_S1_FLT, base + PCCR);
 
+       ret = -ENODEV;
 #ifdef CONFIG_SA1100_BADGE4
-       pcmcia_badge4_init(dev);
+       if (machine_is_badge4())
+               ret = pcmcia_badge4_init(dev);
 #endif
 #ifdef CONFIG_SA1100_JORNADA720
-       pcmcia_jornada720_init(dev);
+       if (machine_is_jornada720())
+               ret = pcmcia_jornada720_init(dev);
 #endif
 #ifdef CONFIG_ARCH_LUBBOCK
-       pcmcia_lubbock_init(dev);
+       if (machine_is_lubbock())
+               ret = pcmcia_lubbock_init(dev);
 #endif
 #ifdef CONFIG_ASSABET_NEPONSET
-       pcmcia_neponset_init(dev);
+       if (machine_is_assabet())
+               ret = pcmcia_neponset_init(dev);
 #endif
-       return 0;
+
+       if (ret) {
+               release_mem_region(dev->res.start, 512);
+               sa1111_disable_device(dev);
+       }
+
+       return ret;
 }
 
 static int pcmcia_remove(struct sa1111_dev *dev)
index c2c30580c83f6316013595185185b10c078990d2..480a3ede27c8b59b5cb573d30c5cead55d5ca481 100644 (file)
@@ -94,22 +94,17 @@ static struct pcmcia_low_level jornada720_pcmcia_ops = {
 
 int pcmcia_jornada720_init(struct sa1111_dev *sadev)
 {
-       int ret = -ENODEV;
+       unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3;
 
-       if (machine_is_jornada720()) {
-               unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3;
+       /* Fixme: why messing around with SA11x0's GPIO1? */
+       GRER |= 0x00000002;
 
-               GRER |= 0x00000002;
+       /* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */
+       sa1111_set_io_dir(sadev, pin, 0, 0);
+       sa1111_set_io(sadev, pin, 0);
+       sa1111_set_sleep_io(sadev, pin, 0);
 
-               /* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */
-               sa1111_set_io_dir(sadev, pin, 0, 0);
-               sa1111_set_io(sadev, pin, 0);
-               sa1111_set_sleep_io(sadev, pin, 0);
-
-               sa11xx_drv_pcmcia_ops(&jornada720_pcmcia_ops);
-               ret = sa1111_pcmcia_add(sadev, &jornada720_pcmcia_ops,
-                               sa11xx_drv_pcmcia_add_one);
-       }
-
-       return ret;
+       sa11xx_drv_pcmcia_ops(&jornada720_pcmcia_ops);
+       return sa1111_pcmcia_add(sadev, &jornada720_pcmcia_ops,
+                                sa11xx_drv_pcmcia_add_one);
 }
index c5caf579045145c07df4c9946141bea45f259075..e741f499c875314d40855190186ee939f0dba2e7 100644 (file)
@@ -210,27 +210,21 @@ static struct pcmcia_low_level lubbock_pcmcia_ops = {
 
 int pcmcia_lubbock_init(struct sa1111_dev *sadev)
 {
-       int ret = -ENODEV;
-
-       if (machine_is_lubbock()) {
-               /*
-                * Set GPIO_A<3:0> to be outputs for the MAX1600,
-                * and switch to standby mode.
-                */
-               sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
-               sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
-               sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
-
-               /* Set CF Socket 1 power to standby mode. */
-               lubbock_set_misc_wr((1 << 15) | (1 << 14), 0);
+       /*
+        * Set GPIO_A<3:0> to be outputs for the MAX1600,
+        * and switch to standby mode.
+        */
+       sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
+       sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
+       sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
 
-               pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops);
-               pxa2xx_configure_sockets(&sadev->dev);
-               ret = sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops,
-                               pxa2xx_drv_pcmcia_add_one);
-       }
+       /* Set CF Socket 1 power to standby mode. */
+       lubbock_set_misc_wr((1 << 15) | (1 << 14), 0);
 
-       return ret;
+       pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops);
+       pxa2xx_configure_sockets(&sadev->dev, &lubbock_pcmcia_ops);
+       return sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops,
+                                pxa2xx_drv_pcmcia_add_one);
 }
 
 MODULE_LICENSE("GPL");
index 1d78739c4c07699b44f1479165660c229eaa462b..019c395eb4bf8cd44b4127b0efeed797e48b0ab5 100644 (file)
@@ -110,20 +110,14 @@ static struct pcmcia_low_level neponset_pcmcia_ops = {
 
 int pcmcia_neponset_init(struct sa1111_dev *sadev)
 {
-       int ret = -ENODEV;
-
-       if (machine_is_assabet()) {
-               /*
-                * Set GPIO_A<3:0> to be outputs for the MAX1600,
-                * and switch to standby mode.
-                */
-               sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
-               sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
-               sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
-               sa11xx_drv_pcmcia_ops(&neponset_pcmcia_ops);
-               ret = sa1111_pcmcia_add(sadev, &neponset_pcmcia_ops,
-                               sa11xx_drv_pcmcia_add_one);
-       }
-
-       return ret;
+       /*
+        * Set GPIO_A<3:0> to be outputs for the MAX1600,
+        * and switch to standby mode.
+        */
+       sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
+       sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
+       sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
+       sa11xx_drv_pcmcia_ops(&neponset_pcmcia_ops);
+       return sa1111_pcmcia_add(sadev, &neponset_pcmcia_ops,
+                                sa11xx_drv_pcmcia_add_one);
 }
index 9f6ec87b9f9e13e7a1ff8f49338ec66849b17b57..48140ac73ed632613dfdb4a682abbe2583d57bc3 100644 (file)
@@ -144,19 +144,19 @@ static int
 sa1100_pcmcia_show_timing(struct soc_pcmcia_socket *skt, char *buf)
 {
        struct soc_pcmcia_timing timing;
-       unsigned int clock = clk_get_rate(skt->clk);
+       unsigned int clock = clk_get_rate(skt->clk) / 1000;
        unsigned long mecr = MECR;
        char *p = buf;
 
        soc_common_pcmcia_get_timing(skt, &timing);
 
-       p+=sprintf(p, "I/O      : %u (%u)\n", timing.io,
+       p+=sprintf(p, "I/O      : %uns (%uns)\n", timing.io,
                   sa1100_pcmcia_cmd_time(clock, MECR_BSIO_GET(mecr, skt->nr)));
 
-       p+=sprintf(p, "attribute: %u (%u)\n", timing.attr,
+       p+=sprintf(p, "attribute: %uns (%uns)\n", timing.attr,
                   sa1100_pcmcia_cmd_time(clock, MECR_BSA_GET(mecr, skt->nr)));
 
-       p+=sprintf(p, "common   : %u (%u)\n", timing.mem,
+       p+=sprintf(p, "common   : %uns (%uns)\n", timing.mem,
                   sa1100_pcmcia_cmd_time(clock, MECR_BSM_GET(mecr, skt->nr)));
 
        return p - buf;
index eed5e9c05353c2fafb30b4b0cc8745472b44aff8..d5ca760c4eb294101521bddc82260ff90b81e5c7 100644 (file)
@@ -235,7 +235,7 @@ static unsigned int soc_common_pcmcia_skt_state(struct soc_pcmcia_socket *skt)
        stat |= skt->cs_state.Vcc ? SS_POWERON : 0;
 
        if (skt->cs_state.flags & SS_IOCARD)
-               stat |= state.bvd1 ? SS_STSCHG : 0;
+               stat |= state.bvd1 ? 0 : SS_STSCHG;
        else {
                if (state.bvd1 == 0)
                        stat |= SS_BATDEAD;
index c494613c1909e16b88f06c0270bee1425239e5f3..f5e1008a223df7e5a1c16d45e9a33b59382e3276 100644 (file)
@@ -925,6 +925,7 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
                        if (i > 0 && spi != using_spi) {
                                pr_err("PPI/SPI IRQ type mismatch for %s!\n",
                                        dn->name);
+                               of_node_put(dn);
                                kfree(irqs);
                                return -EINVAL;
                        }
@@ -969,7 +970,7 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
        if (cpumask_weight(&pmu->supported_cpus) == 0) {
                int irq = platform_get_irq(pdev, 0);
 
-               if (irq_is_percpu(irq)) {
+               if (irq >= 0 && irq_is_percpu(irq)) {
                        /* If using PPIs, check the affinity of the partition */
                        int ret;
 
index 18d662610075b3e312bb6e5e8fccb51020a2f13d..8ffc44afdb75baaceab39801b811d3c3314604e6 100644 (file)
@@ -367,7 +367,7 @@ static int brcm_sata_phy_init(struct phy *phy)
                rc = -ENODEV;
        };
 
-       return 0;
+       return rc;
 }
 
 static const struct phy_ops phy_ops = {
index 0a45bc6088aeb14e3da89b1240d95173019e20f9..8c7eb335622ee1781a6b83d155740153b00d4717 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/power_supply.h>
 #include <linux/regulator/consumer.h>
 #include <linux/reset.h>
+#include <linux/usb/of.h>
 #include <linux/workqueue.h>
 
 #define REG_ISCR                       0x00
@@ -110,6 +111,7 @@ struct sun4i_usb_phy_cfg {
 struct sun4i_usb_phy_data {
        void __iomem *base;
        const struct sun4i_usb_phy_cfg *cfg;
+       enum usb_dr_mode dr_mode;
        struct mutex mutex;
        struct sun4i_usb_phy {
                struct phy *phy;
@@ -120,6 +122,7 @@ struct sun4i_usb_phy_data {
                bool regulator_on;
                int index;
        } phys[MAX_PHYS];
+       int first_phy;
        /* phy0 / otg related variables */
        struct extcon_dev *extcon;
        bool phy0_init;
@@ -285,16 +288,10 @@ static int sun4i_usb_phy_init(struct phy *_phy)
                sun4i_usb_phy0_update_iscr(_phy, 0, ISCR_DPDM_PULLUP_EN);
                sun4i_usb_phy0_update_iscr(_phy, 0, ISCR_ID_PULLUP_EN);
 
-               if (data->id_det_gpio) {
-                       /* OTG mode, force ISCR and cable state updates */
-                       data->id_det = -1;
-                       data->vbus_det = -1;
-                       queue_delayed_work(system_wq, &data->detect, 0);
-               } else {
-                       /* Host only mode */
-                       sun4i_usb_phy0_set_id_detect(_phy, 0);
-                       sun4i_usb_phy0_set_vbus_detect(_phy, 1);
-               }
+               /* Force ISCR and cable state updates */
+               data->id_det = -1;
+               data->vbus_det = -1;
+               queue_delayed_work(system_wq, &data->detect, 0);
        }
 
        return 0;
@@ -319,6 +316,19 @@ static int sun4i_usb_phy_exit(struct phy *_phy)
        return 0;
 }
 
+static int sun4i_usb_phy0_get_id_det(struct sun4i_usb_phy_data *data)
+{
+       switch (data->dr_mode) {
+       case USB_DR_MODE_OTG:
+               return gpiod_get_value_cansleep(data->id_det_gpio);
+       case USB_DR_MODE_HOST:
+               return 0;
+       case USB_DR_MODE_PERIPHERAL:
+       default:
+               return 1;
+       }
+}
+
 static int sun4i_usb_phy0_get_vbus_det(struct sun4i_usb_phy_data *data)
 {
        if (data->vbus_det_gpio)
@@ -432,7 +442,10 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
        struct phy *phy0 = data->phys[0].phy;
        int id_det, vbus_det, id_notify = 0, vbus_notify = 0;
 
-       id_det = gpiod_get_value_cansleep(data->id_det_gpio);
+       if (phy0 == NULL)
+               return;
+
+       id_det = sun4i_usb_phy0_get_id_det(data);
        vbus_det = sun4i_usb_phy0_get_vbus_det(data);
 
        mutex_lock(&phy0->mutex);
@@ -448,7 +461,8 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
                 * without vbus detection report vbus low for long enough for
                 * the musb-ip to end the current device session.
                 */
-               if (!sun4i_usb_phy0_have_vbus_det(data) && id_det == 0) {
+               if (data->dr_mode == USB_DR_MODE_OTG &&
+                   !sun4i_usb_phy0_have_vbus_det(data) && id_det == 0) {
                        sun4i_usb_phy0_set_vbus_detect(phy0, 0);
                        msleep(200);
                        sun4i_usb_phy0_set_vbus_detect(phy0, 1);
@@ -474,7 +488,8 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
                 * without vbus detection report vbus low for long enough to
                 * the musb-ip to end the current host session.
                 */
-               if (!sun4i_usb_phy0_have_vbus_det(data) && id_det == 1) {
+               if (data->dr_mode == USB_DR_MODE_OTG &&
+                   !sun4i_usb_phy0_have_vbus_det(data) && id_det == 1) {
                        mutex_lock(&phy0->mutex);
                        sun4i_usb_phy0_set_vbus_detect(phy0, 0);
                        msleep(1000);
@@ -519,7 +534,8 @@ static struct phy *sun4i_usb_phy_xlate(struct device *dev,
 {
        struct sun4i_usb_phy_data *data = dev_get_drvdata(dev);
 
-       if (args->args[0] >= data->cfg->num_phys)
+       if (args->args[0] < data->first_phy ||
+           args->args[0] >= data->cfg->num_phys)
                return ERR_PTR(-ENODEV);
 
        return data->phys[args->args[0]].phy;
@@ -593,13 +609,17 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
                        return -EPROBE_DEFER;
        }
 
-       /* vbus_det without id_det makes no sense, and is not supported */
-       if (sun4i_usb_phy0_have_vbus_det(data) && !data->id_det_gpio) {
-               dev_err(dev, "usb0_id_det missing or invalid\n");
-               return -ENODEV;
-       }
-
-       if (data->id_det_gpio) {
+       data->dr_mode = of_usb_get_dr_mode_by_phy(np, 0);
+       switch (data->dr_mode) {
+       case USB_DR_MODE_OTG:
+               /* otg without id_det makes no sense, and is not supported */
+               if (!data->id_det_gpio) {
+                       dev_err(dev, "usb0_id_det missing or invalid\n");
+                       return -ENODEV;
+               }
+               /* fall through */
+       case USB_DR_MODE_HOST:
+       case USB_DR_MODE_PERIPHERAL:
                data->extcon = devm_extcon_dev_allocate(dev,
                                                        sun4i_usb_phy0_cable);
                if (IS_ERR(data->extcon))
@@ -610,9 +630,13 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
                        dev_err(dev, "failed to register extcon: %d\n", ret);
                        return ret;
                }
+               break;
+       default:
+               dev_info(dev, "dr_mode unknown, not registering usb phy0\n");
+               data->first_phy = 1;
        }
 
-       for (i = 0; i < data->cfg->num_phys; i++) {
+       for (i = data->first_phy; i < data->cfg->num_phys; i++) {
                struct sun4i_usb_phy *phy = data->phys + i;
                char name[16];
 
index ac4f31abefe33e45a4b1cdf5636c7c1ba05f716d..28fce4bce638e57a0c97f7c57ee781e2fe153240 100644 (file)
@@ -141,9 +141,9 @@ static int sun9i_usb_phy_probe(struct platform_device *pdev)
                }
 
                phy->hsic_clk = devm_clk_get(dev, "hsic_12M");
-               if (IS_ERR(phy->clk)) {
+               if (IS_ERR(phy->hsic_clk)) {
                        dev_err(dev, "failed to get hsic_12M clock\n");
-                       return PTR_ERR(phy->clk);
+                       return PTR_ERR(phy->hsic_clk);
                }
 
                phy->reset = devm_reset_control_get(dev, "hsic");
index 5749a4eee746da45fdef1d4f39cabab607160408..0fe8fad25e4d1fb4c93f9716a2fa0d300f4e9290 100644 (file)
@@ -1539,12 +1539,11 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
                offset += range->npins;
        }
 
-       /* Mask and clear all interrupts */
-       chv_writel(0, pctrl->regs + CHV_INTMASK);
+       /* Clear all interrupts */
        chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
 
        ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0,
-                                  handle_simple_irq, IRQ_TYPE_NONE);
+                                  handle_bad_irq, IRQ_TYPE_NONE);
        if (ret) {
                dev_err(pctrl->dev, "failed to add IRQ chip\n");
                goto fail;
index 7bad200bd67c5cd4801c4098cac5421d9e02f24f..55375b1b3cc81b20cf6fc83722c89cfaa9ecefc2 100644 (file)
@@ -809,17 +809,17 @@ static const struct pistachio_pin_group pistachio_groups[] = {
                           PADS_FUNCTION_SELECT2, 12, 0x3),
        MFIO_MUX_PIN_GROUP(83, MIPS_PLL_LOCK, MIPS_TRACE_DATA, USB_DEBUG,
                           PADS_FUNCTION_SELECT2, 14, 0x3),
-       MFIO_MUX_PIN_GROUP(84, SYS_PLL_LOCK, MIPS_TRACE_DATA, USB_DEBUG,
+       MFIO_MUX_PIN_GROUP(84, AUDIO_PLL_LOCK, MIPS_TRACE_DATA, USB_DEBUG,
                           PADS_FUNCTION_SELECT2, 16, 0x3),
-       MFIO_MUX_PIN_GROUP(85, WIFI_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG,
+       MFIO_MUX_PIN_GROUP(85, RPU_V_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG,
                           PADS_FUNCTION_SELECT2, 18, 0x3),
-       MFIO_MUX_PIN_GROUP(86, BT_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG,
+       MFIO_MUX_PIN_GROUP(86, RPU_L_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG,
                           PADS_FUNCTION_SELECT2, 20, 0x3),
-       MFIO_MUX_PIN_GROUP(87, RPU_V_PLL_LOCK, DREQ2, SOCIF_DEBUG,
+       MFIO_MUX_PIN_GROUP(87, SYS_PLL_LOCK, DREQ2, SOCIF_DEBUG,
                           PADS_FUNCTION_SELECT2, 22, 0x3),
-       MFIO_MUX_PIN_GROUP(88, RPU_L_PLL_LOCK, DREQ3, SOCIF_DEBUG,
+       MFIO_MUX_PIN_GROUP(88, WIFI_PLL_LOCK, DREQ3, SOCIF_DEBUG,
                           PADS_FUNCTION_SELECT2, 24, 0x3),
-       MFIO_MUX_PIN_GROUP(89, AUDIO_PLL_LOCK, DREQ4, DREQ5,
+       MFIO_MUX_PIN_GROUP(89, BT_PLL_LOCK, DREQ4, DREQ5,
                           PADS_FUNCTION_SELECT2, 26, 0x3),
        PIN_GROUP(TCK, "tck"),
        PIN_GROUP(TRSTN, "trstn"),
index ce483b03a2631f18b25569cde3b60352f9eda53f..f9d661e5c14abb56c94493f29d0f028ba9b1b078 100644 (file)
@@ -485,12 +485,12 @@ static const struct sunxi_desc_pin sun8i_a23_pins[] = {
        SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
-                 SUNXI_FUNCTION(0x2, "uart2"),         /* RTS */
+                 SUNXI_FUNCTION(0x2, "uart1"),         /* RTS */
                  SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 8)),  /* PG_EINT8 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
-                 SUNXI_FUNCTION(0x2, "uart2"),         /* CTS */
+                 SUNXI_FUNCTION(0x2, "uart1"),         /* CTS */
                  SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 9)),  /* PG_EINT9 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
index 3040abe6f73a1ad1b40991821f26de1c1db711d5..3131cac2b76ffcc830948fbff94ed4ad57669c84 100644 (file)
@@ -407,12 +407,12 @@ static const struct sunxi_desc_pin sun8i_a33_pins[] = {
        SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
-                 SUNXI_FUNCTION(0x2, "uart2"),         /* RTS */
+                 SUNXI_FUNCTION(0x2, "uart1"),         /* RTS */
                  SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 8)),  /* PG_EINT8 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
-                 SUNXI_FUNCTION(0x2, "uart2"),         /* CTS */
+                 SUNXI_FUNCTION(0x2, "uart1"),         /* CTS */
                  SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 9)),  /* PG_EINT9 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
index f99b183d5296f7981c9cfbf3bb25d5f5c236e46f..374a8028fec772b406aef640fdc062fc5dd7d9b8 100644 (file)
@@ -1,6 +1,8 @@
 /*
  * Generic driver for the OLPC Embedded Controller.
  *
+ * Author: Andres Salomon <dilinger@queued.net>
+ *
  * Copyright (C) 2011-2012 One Laptop per Child Foundation.
  *
  * Licensed under the GPL v2 or later.
@@ -12,7 +14,7 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/workqueue.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/list.h>
 #include <linux/olpc-ec.h>
 #include <asm/olpc.h>
@@ -326,8 +328,4 @@ static int __init olpc_ec_init_module(void)
 {
        return platform_driver_register(&olpc_ec_plat_driver);
 }
-
 arch_initcall(olpc_ec_init_module);
-
-MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
-MODULE_LICENSE("GPL");
index 63b371d6ee55b46f03b0f6a9cfedc35f417ffc33..91ae58510d92698c63252a099a865c01a9b23c2c 100644 (file)
@@ -1,6 +1,8 @@
 /* Moorestown PMIC GPIO (access through IPC) driver
  * Copyright (c) 2008 - 2009, Intel Corporation.
  *
+ * Author: Alek Du <alek.du@intel.com>
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
@@ -21,7 +23,6 @@
 
 #define pr_fmt(fmt) "%s: " fmt, __func__
 
-#include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
@@ -322,9 +323,4 @@ static int __init platform_pmic_gpio_init(void)
 {
        return platform_driver_register(&platform_pmic_gpio_driver);
 }
-
 subsys_initcall(platform_pmic_gpio_init);
-
-MODULE_AUTHOR("Alek Du <alek.du@intel.com>");
-MODULE_DESCRIPTION("Intel Moorestown PMIC GPIO driver");
-MODULE_LICENSE("GPL v2");
index 32f0f014a06735c5ea51cf4f5185c5fbc7bdae60..9d19b9a62011b376be541b247336d455952bb42b 100644 (file)
@@ -1161,7 +1161,7 @@ static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
                } else if (ibw_start < (ib_win->rstart + ib_win->size) &&
                           (ibw_start + ibw_size) > ib_win->rstart) {
                        /* Return error if address translation involved */
-                       if (direct && ib_win->xlat) {
+                       if (!direct || ib_win->xlat) {
                                ret = -EFAULT;
                                break;
                        }
index b2daa6641417caecedbd43fde480ef8159cd3652..c9ff2619971166f0f7b853242fca65e0323c1381 100644 (file)
@@ -2,7 +2,7 @@
  * max14577.c - Regulator driver for the Maxim 14577/77836
  *
  * Copyright (C) 2013,2014 Samsung Electronics
- * Krzysztof Kozlowski <k.kozlowski@samsung.com>
+ * Krzysztof Kozlowski <krzk@kernel.org>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -331,7 +331,7 @@ static void __exit max14577_regulator_exit(void)
 }
 module_exit(max14577_regulator_exit);
 
-MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski@samsung.com>");
+MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
 MODULE_DESCRIPTION("Maxim 14577/77836 regulator driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:max14577-regulator");
index de730fd3f8a5df6cf7786be8f8b64c52b3786820..cfbb9512e48623429899cbe0785332b0a1171734 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright (C) 2013-2015 Samsung Electronics
  * Jonghwa Lee <jonghwa3.lee@samsung.com>
- * Krzysztof Kozlowski <k.kozlowski.k@gmail.com>
+ * Krzysztof Kozlowski <krzk@kernel.org>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -314,5 +314,5 @@ module_exit(max77693_pmic_cleanup);
 
 MODULE_DESCRIPTION("MAXIM 77693/77843 regulator driver");
 MODULE_AUTHOR("Jonghwa Lee <jonghwa3.lee@samsung.com>");
-MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski.k@gmail.com>");
+MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
 MODULE_LICENSE("GPL");
index 5022fa8d10c6bc399970c26b2392fcea5dd54385..8ed46a9a55c8469e38c0b14f7c5c8c95fb4fe569 100644 (file)
@@ -178,20 +178,21 @@ static const struct regulator_desc pma8084_hfsmps = {
 static const struct regulator_desc pma8084_ftsmps = {
        .linear_ranges = (struct regulator_linear_range[]) {
                REGULATOR_LINEAR_RANGE(350000,  0, 184, 5000),
-               REGULATOR_LINEAR_RANGE(700000, 185, 339, 10000),
+               REGULATOR_LINEAR_RANGE(1280000, 185, 261, 10000),
        },
        .n_linear_ranges = 2,
-       .n_voltages = 340,
+       .n_voltages = 262,
        .ops = &rpm_smps_ldo_ops,
 };
 
 static const struct regulator_desc pma8084_pldo = {
        .linear_ranges = (struct regulator_linear_range[]) {
-               REGULATOR_LINEAR_RANGE(750000,  0,  30, 25000),
-               REGULATOR_LINEAR_RANGE(1500000, 31, 99, 50000),
+               REGULATOR_LINEAR_RANGE( 750000,  0,  63, 12500),
+               REGULATOR_LINEAR_RANGE(1550000, 64, 126, 25000),
+               REGULATOR_LINEAR_RANGE(3100000, 127, 163, 50000),
        },
-       .n_linear_ranges = 2,
-       .n_voltages = 100,
+       .n_linear_ranges = 3,
+       .n_voltages = 164,
        .ops = &rpm_smps_ldo_ops,
 };
 
@@ -221,29 +222,30 @@ static const struct regulator_desc pm8x41_hfsmps = {
 static const struct regulator_desc pm8841_ftsmps = {
        .linear_ranges = (struct regulator_linear_range[]) {
                REGULATOR_LINEAR_RANGE(350000,  0, 184, 5000),
-               REGULATOR_LINEAR_RANGE(700000, 185, 339, 10000),
+               REGULATOR_LINEAR_RANGE(1280000, 185, 261, 10000),
        },
        .n_linear_ranges = 2,
-       .n_voltages = 340,
+       .n_voltages = 262,
        .ops = &rpm_smps_ldo_ops,
 };
 
 static const struct regulator_desc pm8941_boost = {
        .linear_ranges = (struct regulator_linear_range[]) {
-               REGULATOR_LINEAR_RANGE(4000000, 0, 15, 100000),
+               REGULATOR_LINEAR_RANGE(4000000, 0, 30, 50000),
        },
        .n_linear_ranges = 1,
-       .n_voltages = 16,
+       .n_voltages = 31,
        .ops = &rpm_smps_ldo_ops,
 };
 
 static const struct regulator_desc pm8941_pldo = {
        .linear_ranges = (struct regulator_linear_range[]) {
-               REGULATOR_LINEAR_RANGE( 750000,  0,  30, 25000),
-               REGULATOR_LINEAR_RANGE(1500000, 31, 99, 50000),
+               REGULATOR_LINEAR_RANGE( 750000,  0,  63, 12500),
+               REGULATOR_LINEAR_RANGE(1550000, 64, 126, 25000),
+               REGULATOR_LINEAR_RANGE(3100000, 127, 163, 50000),
        },
-       .n_linear_ranges = 2,
-       .n_voltages = 100,
+       .n_linear_ranges = 3,
+       .n_voltages = 164,
        .ops = &rpm_smps_ldo_ops,
 };
 
index 83458f7a28248045bdca6f1b5603fac7ce6a84f2..6dc96c8dfe75c229e522db72b587bd1af5852686 100644 (file)
@@ -361,8 +361,9 @@ static const char * const snstext[] = {
 
 /* Get sense key string or NULL if not available */
 const char *
-scsi_sense_key_string(unsigned char key) {
-       if (key <= 0xE)
+scsi_sense_key_string(unsigned char key)
+{
+       if (key < ARRAY_SIZE(snstext))
                return snstext[key];
        return NULL;
 }
index eaccd651ccda0d239af91ebfb6dfdbd97ac340e3..2464569253350b61fef59ed0583e8b4cd1a25944 100644 (file)
@@ -246,6 +246,10 @@ static struct {
        {"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
        {"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
        {"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+       {"STK", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+       {"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+       {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+       {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
        {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
        {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
        {"SONY", "TSL", NULL, BLIST_FORCELUN},          /* DDS3 & DDS4 autoloaders */
index 3f0ff072184b648ad383831413cc80213e5f88fc..60b651bfaa01e91456a1c294704cb2193305aec8 100644 (file)
@@ -340,22 +340,6 @@ static int do_sas_phy_delete(struct device *dev, void *data)
        return 0;
 }
 
-/**
- * is_sas_attached - check if device is SAS attached
- * @sdev: scsi device to check
- *
- * returns true if the device is SAS attached
- */
-int is_sas_attached(struct scsi_device *sdev)
-{
-       struct Scsi_Host *shost = sdev->host;
-
-       return shost->transportt->host_attrs.ac.class ==
-               &sas_host_class.class;
-}
-EXPORT_SYMBOL(is_sas_attached);
-
-
 /**
  * sas_remove_children  -  tear down a devices SAS data structures
  * @dev:       device belonging to the sas object
index 0e8601aa877a83e35dc1305ffa452a1c0c4eeffa..8c9a35c91705e42fcbc07e3721d0522f96d496dc 100644 (file)
@@ -587,7 +587,7 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
 
        ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
 
-       if (is_sas_attached(sdev))
+       if (scsi_is_sas_rphy(&sdev->sdev_gendev))
                efd.addr = sas_get_address(sdev);
 
        if (efd.addr) {
index e3da1a2fdb6664597cc7e639e5a8d61da5a13fd1..2a9da2e0ea6baf7448be18120191ec7a4041aaa4 100644 (file)
@@ -962,7 +962,7 @@ static void wd719x_pci_remove(struct pci_dev *pdev)
        scsi_host_put(sh);
 }
 
-static DEFINE_PCI_DEVICE_TABLE(wd719x_pci_table) = {
+static const struct pci_device_id wd719x_pci_table[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_WD, 0x3296) },
        {}
 };
index 823cbc92d1e75447b1487158ac097e1de3d50470..7a37090dabbea55308b555d03f19dd56c4c3c15c 100644 (file)
@@ -720,8 +720,6 @@ static int img_spfi_remove(struct platform_device *pdev)
                clk_disable_unprepare(spfi->sys_clk);
        }
 
-       spi_master_put(master);
-
        return 0;
 }
 
index 0be89e052428fe7185f56409cd9ac8282d29b036..899d7a8f0889eaf39388ad458b28a1abbeeba671 100644 (file)
@@ -685,7 +685,6 @@ static int mtk_spi_remove(struct platform_device *pdev)
        pm_runtime_disable(&pdev->dev);
 
        mtk_spi_reset(mdata);
-       spi_master_put(master);
 
        return 0;
 }
index f3df522db93bac9fec31dc5f27ebcdddda57c87a..58d2d48e16a530869528288d50f3a6581c3ea311 100644 (file)
@@ -214,6 +214,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
                return PTR_ERR(ssp->clk);
 
        memset(&pi, 0, sizeof(pi));
+       pi.fwnode = dev->dev.fwnode;
        pi.parent = &dev->dev;
        pi.name = "pxa2xx-spi";
        pi.id = ssp->port_id;
index c338ef1136f6c6052b72b9394f74ef89b58273a5..7f1555621f8ec262459a0ebd65ffb680b24fbae8 100644 (file)
@@ -1030,7 +1030,6 @@ static int spi_qup_remove(struct platform_device *pdev)
 
        pm_runtime_put_noidle(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
-       spi_master_put(master);
 
        return 0;
 }
index 0f83ad1d5a5858dd6ff808773fe359479a54b796..1de3a772eb7d23a8a90b9b77704033ac4945e364 100644 (file)
@@ -262,6 +262,9 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
 
        for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_div_table); k++) {
                brps = DIV_ROUND_UP(div, sh_msiof_spi_div_table[k].div);
+               /* SCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */
+               if (sh_msiof_spi_div_table[k].div == 1 && brps > 2)
+                       continue;
                if (brps <= 32) /* max of brdv is 32 */
                        break;
        }
index 51ad42fad567913339ab8d9deaf531b779865a87..200ca228d8851980d5e755eed5a82c6df4c6dc9b 100644 (file)
@@ -960,7 +960,7 @@ static int spi_transfer_one_message(struct spi_master *master,
        struct spi_transfer *xfer;
        bool keep_cs = false;
        int ret = 0;
-       unsigned long ms = 1;
+       unsigned long long ms = 1;
        struct spi_statistics *statm = &master->statistics;
        struct spi_statistics *stats = &msg->spi->statistics;
 
@@ -991,9 +991,13 @@ static int spi_transfer_one_message(struct spi_master *master,
 
                        if (ret > 0) {
                                ret = 0;
-                               ms = xfer->len * 8 * 1000 / xfer->speed_hz;
+                               ms = 8LL * 1000LL * xfer->len;
+                               do_div(ms, xfer->speed_hz);
                                ms += ms + 100; /* some tolerance */
 
+                               if (ms > UINT_MAX)
+                                       ms = UINT_MAX;
+
                                ms = wait_for_completion_timeout(&master->xfer_completion,
                                                                 msecs_to_jiffies(ms));
                        }
@@ -1159,6 +1163,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
                if (ret < 0) {
                        dev_err(&master->dev, "Failed to power device: %d\n",
                                ret);
+                       mutex_unlock(&master->io_mutex);
                        return;
                }
        }
@@ -1174,6 +1179,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
 
                        if (master->auto_runtime_pm)
                                pm_runtime_put(master->dev.parent);
+                       mutex_unlock(&master->io_mutex);
                        return;
                }
        }
index d7dd1e55e347be5f3037d33ba81e343c18e13521..9f525ff7290c69b764503a232fcd9d7a7de338b3 100644 (file)
@@ -196,6 +196,7 @@ static int pci1760_pwm_ns_to_div(unsigned int flags, unsigned int ns)
                break;
        case CMDF_ROUND_DOWN:
                divisor = ns / PCI1760_PWM_TIMEBASE;
+               break;
        default:
                return -EINVAL;
        }
index 4ab186669f0c27d503a3e33b6e3bdd5fb0e12c7d..ec5b9a23494d4b28538f29e2efddcb199485c502 100644 (file)
 
 #define N_CHANS 8
 
-enum waveform_state_bits {
-       WAVEFORM_AI_RUNNING,
-       WAVEFORM_AO_RUNNING
-};
-
 /* Data unique to this driver */
 struct waveform_private {
        struct timer_list ai_timer;     /* timer for AI commands */
@@ -68,7 +63,6 @@ struct waveform_private {
        unsigned int wf_amplitude;      /* waveform amplitude in microvolts */
        unsigned int wf_period;         /* waveform period in microseconds */
        unsigned int wf_current;        /* current time in waveform period */
-       unsigned long state_bits;
        unsigned int ai_scan_period;    /* AI scan period in usec */
        unsigned int ai_convert_period; /* AI conversion period in usec */
        struct timer_list ao_timer;     /* timer for AO commands */
@@ -191,10 +185,6 @@ static void waveform_ai_timer(unsigned long arg)
        unsigned int nsamples;
        unsigned int time_increment;
 
-       /* check command is still active */
-       if (!test_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits))
-               return;
-
        now = ktime_to_us(ktime_get());
        nsamples = comedi_nsamples_left(s, UINT_MAX);
 
@@ -386,11 +376,6 @@ static int waveform_ai_cmd(struct comedi_device *dev,
         */
        devpriv->ai_timer.expires =
                jiffies + usecs_to_jiffies(devpriv->ai_convert_period) + 1;
-
-       /* mark command as active */
-       smp_mb__before_atomic();
-       set_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits);
-       smp_mb__after_atomic();
        add_timer(&devpriv->ai_timer);
        return 0;
 }
@@ -400,11 +385,12 @@ static int waveform_ai_cancel(struct comedi_device *dev,
 {
        struct waveform_private *devpriv = dev->private;
 
-       /* mark command as no longer active */
-       clear_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits);
-       smp_mb__after_atomic();
-       /* cannot call del_timer_sync() as may be called from timer routine */
-       del_timer(&devpriv->ai_timer);
+       if (in_softirq()) {
+               /* Assume we were called from the timer routine itself. */
+               del_timer(&devpriv->ai_timer);
+       } else {
+               del_timer_sync(&devpriv->ai_timer);
+       }
        return 0;
 }
 
@@ -436,10 +422,6 @@ static void waveform_ao_timer(unsigned long arg)
        u64 scans_since;
        unsigned int scans_avail = 0;
 
-       /* check command is still active */
-       if (!test_bit(WAVEFORM_AO_RUNNING, &devpriv->state_bits))
-               return;
-
        /* determine number of scan periods since last time */
        now = ktime_to_us(ktime_get());
        scans_since = now - devpriv->ao_last_scan_time;
@@ -518,11 +500,6 @@ static int waveform_ao_inttrig_start(struct comedi_device *dev,
        devpriv->ao_last_scan_time = ktime_to_us(ktime_get());
        devpriv->ao_timer.expires =
                jiffies + usecs_to_jiffies(devpriv->ao_scan_period);
-
-       /* mark command as active */
-       smp_mb__before_atomic();
-       set_bit(WAVEFORM_AO_RUNNING, &devpriv->state_bits);
-       smp_mb__after_atomic();
        add_timer(&devpriv->ao_timer);
 
        return 1;
@@ -608,11 +585,12 @@ static int waveform_ao_cancel(struct comedi_device *dev,
        struct waveform_private *devpriv = dev->private;
 
        s->async->inttrig = NULL;
-       /* mark command as no longer active */
-       clear_bit(WAVEFORM_AO_RUNNING, &devpriv->state_bits);
-       smp_mb__after_atomic();
-       /* cannot call del_timer_sync() as may be called from timer routine */
-       del_timer(&devpriv->ao_timer);
+       if (in_softirq()) {
+               /* Assume we were called from the timer routine itself. */
+               del_timer(&devpriv->ao_timer);
+       } else {
+               del_timer_sync(&devpriv->ao_timer);
+       }
        return 0;
 }
 
index 65daef0c00d5209c61e2a59d20338ed2afe7c198..0f4eb954aa80fcbef0f7e4f5a9be4a4e46dd03f6 100644 (file)
@@ -634,7 +634,7 @@ static const void *daqboard2000_find_boardinfo(struct comedi_device *dev,
        const struct daq200_boardtype *board;
        int i;
 
-       if (pcidev->subsystem_device != PCI_VENDOR_ID_IOTECH)
+       if (pcidev->subsystem_vendor != PCI_VENDOR_ID_IOTECH)
                return NULL;
 
        for (i = 0; i < ARRAY_SIZE(boardtypes); i++) {
index 904f637797b6300de12acc98404d9b9d18ca3a43..8bbd938143408a70b1b364f606f4df6768ab6f00 100644 (file)
@@ -588,8 +588,8 @@ static int dt2811_attach(struct comedi_device *dev, struct comedi_devconfig *it)
        s = &dev->subdevices[0];
        s->type         = COMEDI_SUBD_AI;
        s->subdev_flags = SDF_READABLE |
-                         (it->options[2] == 1) ? SDF_DIFF :
-                         (it->options[2] == 2) ? SDF_COMMON : SDF_GROUND;
+                         ((it->options[2] == 1) ? SDF_DIFF :
+                          (it->options[2] == 2) ? SDF_COMMON : SDF_GROUND);
        s->n_chan       = (it->options[2] == 1) ? 8 : 16;
        s->maxdata      = 0x0fff;
        s->range_table  = board->is_pgh ? &dt2811_pgh_ai_ranges
index 8dabb19519a5ac6475e13dcf30c058842312c11a..0f97d7b611d720b7fd774ab102adfd3a607494fa 100644 (file)
@@ -2772,7 +2772,15 @@ static int ni_ao_inttrig(struct comedi_device *dev,
        int i;
        static const int timeout = 1000;
 
-       if (trig_num != cmd->start_arg)
+       /*
+        * Require trig_num == cmd->start_arg when cmd->start_src == TRIG_INT.
+        * For backwards compatibility, also allow trig_num == 0 when
+        * cmd->start_src != TRIG_INT (i.e. when cmd->start_src == TRIG_EXT);
+        * in that case, the internal trigger is being used as a pre-trigger
+        * before the external trigger.
+        */
+       if (!(trig_num == cmd->start_arg ||
+             (trig_num == 0 && cmd->start_src != TRIG_INT)))
                return -EINVAL;
 
        /*
@@ -5480,7 +5488,7 @@ static int ni_E_init(struct comedi_device *dev,
                s->maxdata      = (devpriv->is_m_series) ? 0xffffffff
                                                         : 0x00ffffff;
                s->insn_read    = ni_tio_insn_read;
-               s->insn_write   = ni_tio_insn_read;
+               s->insn_write   = ni_tio_insn_write;
                s->insn_config  = ni_tio_insn_config;
 #ifdef PCIDMA
                if (dev->irq && devpriv->mite) {
index 170ac980abcb84f1c4079b99187d6a18262ca1a4..24c348d2f5bb03f7fd7b3f1c8e8ec5c8d4b6e54b 100644 (file)
@@ -419,6 +419,7 @@ static ssize_t ad5933_store(struct device *dev,
        mutex_lock(&indio_dev->mlock);
        switch ((u32)this_attr->address) {
        case AD5933_OUT_RANGE:
+               ret = -EINVAL;
                for (i = 0; i < 4; i++)
                        if (val == st->range_avail[i]) {
                                st->ctrl_hb &= ~AD5933_CTRL_RANGE(0x3);
@@ -426,7 +427,6 @@ static ssize_t ad5933_store(struct device *dev,
                                ret = ad5933_cmd(st, 0);
                                break;
                        }
-               ret = -EINVAL;
                break;
        case AD5933_IN_PGA_GAIN:
                if (sysfs_streq(buf, "1")) {
index 3664bfd0178b34b09c1791cfd1190f6b9c035ea2..2c4dc69731e8586e9cd0ae7f2afe8675715479e7 100644 (file)
@@ -388,6 +388,7 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request,
        struct inode *inode = NULL;
        __u64 bits = 0;
        int rc = 0;
+       struct dentry *alias;
 
        /* NB 1 request reference will be taken away by ll_intent_lock()
         * when I return
@@ -412,26 +413,12 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request,
                 */
        }
 
-       /* Only hash *de if it is unhashed (new dentry).
-        * Atoimc_open may passing hashed dentries for open.
-        */
-       if (d_unhashed(*de)) {
-               struct dentry *alias;
-
-               alias = ll_splice_alias(inode, *de);
-               if (IS_ERR(alias)) {
-                       rc = PTR_ERR(alias);
-                       goto out;
-               }
-               *de = alias;
-       } else if (!it_disposition(it, DISP_LOOKUP_NEG)  &&
-                  !it_disposition(it, DISP_OPEN_CREATE)) {
-               /* With DISP_OPEN_CREATE dentry will be
-                * instantiated in ll_create_it.
-                */
-               LASSERT(!d_inode(*de));
-               d_instantiate(*de, inode);
+       alias = ll_splice_alias(inode, *de);
+       if (IS_ERR(alias)) {
+               rc = PTR_ERR(alias);
+               goto out;
        }
+       *de = alias;
 
        if (!it_disposition(it, DISP_LOOKUP_NEG)) {
                /* we have lookup look - unhide dentry */
@@ -587,6 +574,24 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
               dentry, PFID(ll_inode2fid(dir)), dir, file, open_flags, mode,
               *opened);
 
+       /* Only negative dentries enter here */
+       LASSERT(!d_inode(dentry));
+
+       if (!d_in_lookup(dentry)) {
+               /* A valid negative dentry that just passed revalidation,
+                * there's little point to try and open it server-side,
+                * even though there's a minuscle chance it might succeed.
+                * Either way it's a valid race to just return -ENOENT here.
+                */
+               if (!(open_flags & O_CREAT))
+                       return -ENOENT;
+
+               /* Otherwise we just unhash it to be rehashed afresh via
+                * lookup if necessary
+                */
+               d_drop(dentry);
+       }
+
        it = kzalloc(sizeof(*it), GFP_NOFS);
        if (!it)
                return -ENOMEM;
index 0b1760cba6e35a77fc391f44f9cfc7c263b9e254..78f524fcd2142dd486d20aba5fe75f461cc5f466 100644 (file)
@@ -3363,7 +3363,7 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
                if (!hif_workqueue) {
                        netdev_err(vif->ndev, "Failed to create workqueue\n");
                        result = -ENOMEM;
-                       goto _fail_mq_;
+                       goto _fail_;
                }
 
                setup_timer(&periodic_rssi, GetPeriodicRSSI,
@@ -3391,7 +3391,6 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
 
        clients_count++;
 
-_fail_mq_:
        destroy_workqueue(hif_workqueue);
 _fail_:
        return result;
index 3a66255f14fc77a7376eedee063452c0b45c06be..32215110d597b8889023d7a2d7408c0ad94bcdf7 100644 (file)
@@ -648,7 +648,7 @@ void wilc1000_wlan_deinit(struct net_device *dev)
                        mutex_unlock(&wl->hif_cs);
                }
                if (&wl->txq_event)
-                       wait_for_completion(&wl->txq_event);
+                       complete(&wl->txq_event);
 
                wlan_deinitialize_threads(dev);
                deinit_irq(dev);
index 9092600a1794ea97e10c6c834e465836c76c64b5..2c2e8aca8305d3f77f15b9d637b544dbd7c4ac83 100644 (file)
@@ -1191,7 +1191,7 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
        struct wilc_priv *priv;
        struct wilc_vif *vif;
        u32 i = 0;
-       u32 associatedsta = 0;
+       u32 associatedsta = ~0;
        u32 inactive_time = 0;
        priv = wiphy_priv(wiphy);
        vif = netdev_priv(dev);
@@ -1204,7 +1204,7 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
                        }
                }
 
-               if (associatedsta == -1) {
+               if (associatedsta == ~0) {
                        netdev_err(dev, "sta required is not associated\n");
                        return -ENOENT;
                }
index 71a339271fa5fe9d796d86be9077ebbc13d4f0dc..5f817923f374f5f08412bda03876c831da68b697 100644 (file)
@@ -504,6 +504,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
                if (IS_ERR(priv->zone)) {
                        dev_err(dev, "can't register thermal zone\n");
                        ret = PTR_ERR(priv->zone);
+                       priv->zone = NULL;
                        goto error_unregister;
                }
 
index 9c15344b657acebb240e8555af47e572314acd5c..a8c20413dbda9711e1828a064f8a940900442e31 100644 (file)
@@ -648,6 +648,12 @@ static struct pci_device_id nhi_ids[] = {
                .device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
                .subvendor = 0x2222, .subdevice = 0x1111,
        },
+       {
+               .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
+               .vendor = PCI_VENDOR_ID_INTEL,
+               .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI,
+               .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
+       },
        {
                .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
                .vendor = PCI_VENDOR_ID_INTEL,
index 1e116f53d6dddb254c465ebd5362d2c6162066a1..9840fdecb73b5226f36ae0b61c4761f23d4dfbff 100644 (file)
@@ -372,7 +372,9 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route)
 
        if (sw->config.device_id != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE &&
            sw->config.device_id != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C &&
-           sw->config.device_id != PCI_DEVICE_ID_INTEL_PORT_RIDGE)
+           sw->config.device_id != PCI_DEVICE_ID_INTEL_PORT_RIDGE &&
+           sw->config.device_id != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE &&
+           sw->config.device_id != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE)
                tb_sw_warn(sw, "unsupported switch device id %#x\n",
                           sw->config.device_id);
 
index 122e0e4029fee9e42f4b1ba37f0439728f0d54a4..1a16feac9a36070715bbe26974661b823e3c08ab 100644 (file)
@@ -15,8 +15,6 @@
 #include <linux/serial_reg.h>
 #include <linux/dmaengine.h>
 
-#include "../serial_mctrl_gpio.h"
-
 struct uart_8250_dma {
        int (*tx_dma)(struct uart_8250_port *p);
        int (*rx_dma)(struct uart_8250_port *p);
@@ -133,43 +131,12 @@ void serial8250_em485_destroy(struct uart_8250_port *p);
 
 static inline void serial8250_out_MCR(struct uart_8250_port *up, int value)
 {
-       int mctrl_gpio = 0;
-
        serial_out(up, UART_MCR, value);
-
-       if (value & UART_MCR_RTS)
-               mctrl_gpio |= TIOCM_RTS;
-       if (value & UART_MCR_DTR)
-               mctrl_gpio |= TIOCM_DTR;
-
-       mctrl_gpio_set(up->gpios, mctrl_gpio);
 }
 
 static inline int serial8250_in_MCR(struct uart_8250_port *up)
 {
-       int mctrl, mctrl_gpio = 0;
-
-       mctrl = serial_in(up, UART_MCR);
-
-       /* save current MCR values */
-       if (mctrl & UART_MCR_RTS)
-               mctrl_gpio |= TIOCM_RTS;
-       if (mctrl & UART_MCR_DTR)
-               mctrl_gpio |= TIOCM_DTR;
-
-       mctrl_gpio = mctrl_gpio_get_outputs(up->gpios, &mctrl_gpio);
-
-       if (mctrl_gpio & TIOCM_RTS)
-               mctrl |= UART_MCR_RTS;
-       else
-               mctrl &= ~UART_MCR_RTS;
-
-       if (mctrl_gpio & TIOCM_DTR)
-               mctrl |= UART_MCR_DTR;
-       else
-               mctrl &= ~UART_MCR_DTR;
-
-       return mctrl;
+       return serial_in(up, UART_MCR);
 }
 
 #if defined(__alpha__) && !defined(CONFIG_PCI)
index 13ad5c3d2e681893aeaa0d675d17c9f04f06b967..dcf43f66404f12d2bd290a30adfe494dd056162e 100644 (file)
@@ -974,8 +974,6 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
 
        uart = serial8250_find_match_or_unused(&up->port);
        if (uart && uart->port.type != PORT_8250_CIR) {
-               struct mctrl_gpios *gpios;
-
                if (uart->port.dev)
                        uart_remove_one_port(&serial8250_reg, &uart->port);
 
@@ -1013,13 +1011,6 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
                if (up->port.flags & UPF_FIXED_TYPE)
                        uart->port.type = up->port.type;
 
-               gpios = mctrl_gpio_init(&uart->port, 0);
-               if (IS_ERR(gpios)) {
-                       if (PTR_ERR(gpios) != -ENOSYS)
-                               return PTR_ERR(gpios);
-               } else
-                       uart->gpios = gpios;
-
                serial8250_set_defaults(uart);
 
                /* Possibly override default I/O functions.  */
index 737b4b3957b0bc2470cde5574eed1e5f7230c90a..0facc789fe7d4133e1c5e32d0a471db53bb3baa5 100644 (file)
@@ -31,7 +31,7 @@
 #define IO_ADDR2 0x60
 #define LDN 0x7
 
-#define IRQ_MODE       0x70
+#define FINTEK_IRQ_MODE        0x70
 #define IRQ_SHARE      BIT(4)
 #define IRQ_MODE_MASK  (BIT(6) | BIT(5))
 #define IRQ_LEVEL_LOW  0
@@ -195,7 +195,7 @@ static int fintek_8250_set_irq_mode(struct fintek_8250 *pdata, bool level_mode)
        outb(LDN, pdata->base_port + ADDR_PORT);
        outb(pdata->index, pdata->base_port + DATA_PORT);
 
-       outb(IRQ_MODE, pdata->base_port + ADDR_PORT);
+       outb(FINTEK_IRQ_MODE, pdata->base_port + ADDR_PORT);
        tmp = inb(pdata->base_port + DATA_PORT);
 
        tmp &= ~IRQ_MODE_MASK;
index 339de9cd086612c60d17ef3c581b59df34a0022a..20c5db2f426406a50fc41ef90f2e268deac18f72 100644 (file)
@@ -168,6 +168,9 @@ static void mid8250_set_termios(struct uart_port *p,
        unsigned long w = BIT(24) - 1;
        unsigned long mul, div;
 
+       /* Gracefully handle the B0 case: fall back to B9600 */
+       fuart = fuart ? fuart : 9600 * 16;
+
        if (mid->board->freq < fuart) {
                /* Find prescaler value that satisfies Fuart < Fref */
                if (mid->board->freq > baud)
index e14982f36a04dff107b11f1f5e75b2b7a587390a..61ad6c3b20a02b82e6aa95b46fc6d467edc86334 100644 (file)
@@ -134,21 +134,18 @@ static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
 
        serial8250_do_set_mctrl(port, mctrl);
 
-       if (IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(up->gpios,
-                                               UART_GPIO_RTS))) {
-               /*
-                * Turn off autoRTS if RTS is lowered and restore autoRTS
-                * setting if RTS is raised
-                */
-               lcr = serial_in(up, UART_LCR);
-               serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
-               if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
-                       priv->efr |= UART_EFR_RTS;
-               else
-                       priv->efr &= ~UART_EFR_RTS;
-               serial_out(up, UART_EFR, priv->efr);
-               serial_out(up, UART_LCR, lcr);
-       }
+       /*
+        * Turn off autoRTS if RTS is lowered and restore autoRTS setting
+        * if RTS is raised
+        */
+       lcr = serial_in(up, UART_LCR);
+       serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+       if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
+               priv->efr |= UART_EFR_RTS;
+       else
+               priv->efr &= ~UART_EFR_RTS;
+       serial_out(up, UART_EFR, priv->efr);
+       serial_out(up, UART_LCR, lcr);
 }
 
 /*
@@ -449,9 +446,7 @@ static void omap_8250_set_termios(struct uart_port *port,
        priv->efr = 0;
        up->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS | UPSTAT_AUTOXOFF);
 
-       if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW
-               && IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(up->gpios,
-                                                       UART_GPIO_RTS))) {
+       if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW) {
                /* Enable AUTOCTS (autoRTS is enabled when RTS is raised) */
                up->port.status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
                priv->efr |= UART_EFR_CTS;
index 20ebaea5c414d6d70f371aa9d367cd40327fa909..bc51b32b2774e961627d7778c43d8ddf591249b4 100644 (file)
@@ -1950,6 +1950,43 @@ pci_wch_ch38x_setup(struct serial_private *priv,
 #define PCI_DEVICE_ID_PERICOM_PI7C9X7954       0x7954
 #define PCI_DEVICE_ID_PERICOM_PI7C9X7958       0x7958
 
+#define PCI_VENDOR_ID_ACCESIO                  0x494f
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB    0x1051
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S     0x1053
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB    0x105C
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S     0x105E
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB  0x1091
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2   0x1093
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB  0x1099
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4   0x109B
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB   0x10D1
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM    0x10D3
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB   0x10DA
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM    0x10DC
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1   0x1108
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2   0x1110
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2   0x1111
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4   0x1118
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4   0x1119
+#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S      0x1152
+#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S      0x115A
+#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2    0x1190
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2   0x1191
+#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4    0x1198
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4   0x1199
+#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM     0x11D0
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4    0x105A
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4    0x105B
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8    0x106A
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM485_8    0x106B
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4    0x1098
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8    0x10A9
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM     0x10D9
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM     0x10E9
+#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM     0x11D8
+
+
+
 /* Unknown vendors/cards - this should not be in linux/pci_ids.h */
 #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584        0x1584
 #define PCI_SUBDEVICE_ID_UNKNOWN_0x1588        0x1588
@@ -5112,6 +5149,108 @@ static struct pci_device_id serial_pci_tbl[] = {
                PCI_ANY_ID, PCI_ANY_ID,
                0,
                0, pbn_pericom_PI7C9X7958 },
+       /*
+        * ACCES I/O Products quad
+        */
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7958 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7958 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7958 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_8,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7958 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7958 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7958 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7958 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7958 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7958 },
        /*
         * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke)
         */
index 7481b95c6d846770b10f77a64e8231c7b2ad4e8e..bdfa659b9606a93360fd6e978a6b9c89d9a3e251 100644 (file)
@@ -1618,8 +1618,6 @@ static void serial8250_disable_ms(struct uart_port *port)
        if (up->bugs & UART_BUG_NOMSR)
                return;
 
-       mctrl_gpio_disable_ms(up->gpios);
-
        up->ier &= ~UART_IER_MSI;
        serial_port_out(port, UART_IER, up->ier);
 }
@@ -1632,8 +1630,6 @@ static void serial8250_enable_ms(struct uart_port *port)
        if (up->bugs & UART_BUG_NOMSR)
                return;
 
-       mctrl_gpio_enable_ms(up->gpios);
-
        up->ier |= UART_IER_MSI;
 
        serial8250_rpm_get(up);
@@ -1917,8 +1913,7 @@ unsigned int serial8250_do_get_mctrl(struct uart_port *port)
                ret |= TIOCM_DSR;
        if (status & UART_MSR_CTS)
                ret |= TIOCM_CTS;
-
-       return mctrl_gpio_get(up->gpios, &ret);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(serial8250_do_get_mctrl);
 
index c9ec839a5ddf447a6b7b3fe3158ad4104e3024c2..7c6f7afca5ddeed7b25fa6272246fc539bf31b04 100644 (file)
@@ -6,7 +6,6 @@
 config SERIAL_8250
        tristate "8250/16550 and compatible serial support"
        select SERIAL_CORE
-       select SERIAL_MCTRL_GPIO if GPIOLIB
        ---help---
          This selects whether you want to include the driver for the standard
          serial ports.  The standard answer is Y.  People who might say N
index 065f5d97aa6708ebb3239d7778bb185d19412e5b..b93356834bb5660bb451f712c7ef6f13018bc5d3 100644 (file)
@@ -949,6 +949,15 @@ static int isr_setup_status_phase(struct ci_hdrc *ci)
        int retval;
        struct ci_hw_ep *hwep;
 
+       /*
+        * Unexpected USB controller behavior, caused by bad signal integrity
+        * or ground reference problems, can lead to isr_setup_status_phase
+        * being called with ci->status equal to NULL.
+        * If this situation occurs, you should review your USB hardware design.
+        */
+       if (WARN_ON_ONCE(!ci->status))
+               return -EPIPE;
+
        hwep = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in;
        ci->status->context = ci;
        ci->status->complete = isr_setup_status_complete;
@@ -1596,8 +1605,11 @@ static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
 {
        struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
 
-       /* Data+ pullup controlled by OTG state machine in OTG fsm mode */
-       if (ci_otg_is_fsm_mode(ci))
+       /*
+        * Data+ pullup controlled by OTG state machine in OTG fsm mode;
+        * and don't touch Data+ in host mode for dual role config.
+        */
+       if (ci_otg_is_fsm_mode(ci) || ci->role == CI_ROLE_HOST)
                return 0;
 
        pm_runtime_get_sync(&ci->gadget.dev);
index 051163189810d4c2a96b28dbb9f758f9ab102f14..a2d90aca779fa16fb07e1327f33a41129c719e63 100644 (file)
@@ -187,7 +187,7 @@ static const unsigned short high_speed_maxpacket_maxes[4] = {
        [USB_ENDPOINT_XFER_CONTROL] = 64,
        [USB_ENDPOINT_XFER_ISOC] = 1024,
        [USB_ENDPOINT_XFER_BULK] = 512,
-       [USB_ENDPOINT_XFER_INT] = 1023,
+       [USB_ENDPOINT_XFER_INT] = 1024,
 };
 static const unsigned short super_speed_maxpacket_maxes[4] = {
        [USB_ENDPOINT_XFER_CONTROL] = 512,
@@ -240,8 +240,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
        memcpy(&endpoint->desc, d, n);
        INIT_LIST_HEAD(&endpoint->urb_list);
 
-       /* Fix up bInterval values outside the legal range. Use 32 ms if no
-        * proper value can be guessed. */
+       /*
+        * Fix up bInterval values outside the legal range.
+        * Use 10 or 8 ms if no proper value can be guessed.
+        */
        i = 0;          /* i = min, j = max, n = default */
        j = 255;
        if (usb_endpoint_xfer_int(d)) {
@@ -250,13 +252,15 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
                case USB_SPEED_SUPER_PLUS:
                case USB_SPEED_SUPER:
                case USB_SPEED_HIGH:
-                       /* Many device manufacturers are using full-speed
+                       /*
+                        * Many device manufacturers are using full-speed
                         * bInterval values in high-speed interrupt endpoint
-                        * descriptors. Try to fix those and fall back to a
-                        * 32 ms default value otherwise. */
+                        * descriptors. Try to fix those and fall back to an
+                        * 8-ms default value otherwise.
+                        */
                        n = fls(d->bInterval*8);
                        if (n == 0)
-                               n = 9;  /* 32 ms = 2^(9-1) uframes */
+                               n = 7;  /* 8 ms = 2^(7-1) uframes */
                        j = 16;
 
                        /*
@@ -271,10 +275,12 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
                        }
                        break;
                default:                /* USB_SPEED_FULL or _LOW */
-                       /* For low-speed, 10 ms is the official minimum.
+                       /*
+                        * For low-speed, 10 ms is the official minimum.
                         * But some "overclocked" devices might want faster
-                        * polling so we'll allow it. */
-                       n = 32;
+                        * polling so we'll allow it.
+                        */
+                       n = 10;
                        break;
                }
        } else if (usb_endpoint_xfer_isoc(d)) {
@@ -282,10 +288,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
                j = 16;
                switch (to_usb_device(ddev)->speed) {
                case USB_SPEED_HIGH:
-                       n = 9;          /* 32 ms = 2^(9-1) uframes */
+                       n = 7;          /* 8 ms = 2^(7-1) uframes */
                        break;
                default:                /* USB_SPEED_FULL */
-                       n = 6;          /* 32 ms = 2^(6-1) frames */
+                       n = 4;          /* 8 ms = 2^(4-1) frames */
                        break;
                }
        }
index e6a6d67c87058039cb39e23683cde1af49c3686a..09c8d9ca61aea24d7b3d6669baacd0b43b7f04f6 100644 (file)
@@ -1709,11 +1709,17 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
        as->urb->start_frame = uurb->start_frame;
        as->urb->number_of_packets = number_of_packets;
        as->urb->stream_id = stream_id;
-       if (uurb->type == USBDEVFS_URB_TYPE_ISO ||
-                       ps->dev->speed == USB_SPEED_HIGH)
-               as->urb->interval = 1 << min(15, ep->desc.bInterval - 1);
-       else
-               as->urb->interval = ep->desc.bInterval;
+
+       if (ep->desc.bInterval) {
+               if (uurb->type == USBDEVFS_URB_TYPE_ISO ||
+                               ps->dev->speed == USB_SPEED_HIGH ||
+                               ps->dev->speed >= USB_SPEED_SUPER)
+                       as->urb->interval = 1 <<
+                                       min(15, ep->desc.bInterval - 1);
+               else
+                       as->urb->interval = ep->desc.bInterval;
+       }
+
        as->urb->context = as;
        as->urb->complete = async_completed;
        for (totlen = u = 0; u < number_of_packets; u++) {
index 9fae0291cd69f8c3314f547eeb9d0557af517e49..d64551243789a2a1cd2ea045df62d8407a8fbde1 100644 (file)
@@ -868,6 +868,7 @@ struct dwc2_hsotg {
        void *priv;
        int     irq;
        struct clk *clk;
+       struct reset_control *reset;
 
        unsigned int queuing_high_bandwidth:1;
        unsigned int srp_success:1;
index fc6f5251de5d1e891db129e77f782766ea4b6ef9..530959a8a6d13952b18ab1f87f6e6d47f7aaeb49 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/platform_device.h>
 #include <linux/phy/phy.h>
 #include <linux/platform_data/s3c-hsotg.h>
+#include <linux/reset.h>
 
 #include <linux/usb/of.h>
 
@@ -337,6 +338,24 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
 {
        int i, ret;
 
+       hsotg->reset = devm_reset_control_get_optional(hsotg->dev, "dwc2");
+       if (IS_ERR(hsotg->reset)) {
+               ret = PTR_ERR(hsotg->reset);
+               switch (ret) {
+               case -ENOENT:
+               case -ENOTSUPP:
+                       hsotg->reset = NULL;
+                       break;
+               default:
+                       dev_err(hsotg->dev, "error getting reset control %d\n",
+                               ret);
+                       return ret;
+               }
+       }
+
+       if (hsotg->reset)
+               reset_control_deassert(hsotg->reset);
+
        /* Set default UTMI width */
        hsotg->phyif = GUSBCFG_PHYIF16;
 
@@ -434,6 +453,9 @@ static int dwc2_driver_remove(struct platform_device *dev)
        if (hsotg->ll_hw_enabled)
                dwc2_lowlevel_hw_disable(hsotg);
 
+       if (hsotg->reset)
+               reset_control_assert(hsotg->reset);
+
        return 0;
 }
 
index 946643157b78988a5c45655d9afe55a91d960b3d..35d092456bec1da828ea41476e421d47809380a5 100644 (file)
@@ -1192,6 +1192,7 @@ static int dwc3_runtime_resume(struct device *dev)
        }
 
        pm_runtime_mark_last_busy(dev);
+       pm_runtime_put(dev);
 
        return 0;
 }
index 22dfc3dd6a13037e81561bba94b7c65256787f7f..33ab2a203c1bdc4a9d42d7bed9869ad83d2b4cb8 100644 (file)
@@ -192,7 +192,7 @@ dwc3_ep_event_string(const struct dwc3_event_depevt *event)
        int ret;
 
        ret = sprintf(str, "ep%d%s: ", epnum >> 1,
-                       (epnum & 1) ? "in" : "in");
+                       (epnum & 1) ? "in" : "out");
        if (ret < 0)
                return "UNKNOWN";
 
index 2eb84d6c24a6903fe57039e59a4321d7bb7ac6b3..6df0f5dad9a4cdecd4c1841c6a277de167e1960e 100644 (file)
@@ -243,6 +243,15 @@ static int dwc3_pci_runtime_suspend(struct device *dev)
        return -EBUSY;
 }
 
+static int dwc3_pci_runtime_resume(struct device *dev)
+{
+       struct platform_device *dwc3 = dev_get_drvdata(dev);
+
+       return pm_runtime_get(&dwc3->dev);
+}
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PM_SLEEP
 static int dwc3_pci_pm_dummy(struct device *dev)
 {
        /*
@@ -255,11 +264,11 @@ static int dwc3_pci_pm_dummy(struct device *dev)
         */
        return 0;
 }
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
 
 static struct dev_pm_ops dwc3_pci_dev_pm_ops = {
        SET_SYSTEM_SLEEP_PM_OPS(dwc3_pci_pm_dummy, dwc3_pci_pm_dummy)
-       SET_RUNTIME_PM_OPS(dwc3_pci_runtime_suspend, dwc3_pci_pm_dummy,
+       SET_RUNTIME_PM_OPS(dwc3_pci_runtime_suspend, dwc3_pci_runtime_resume,
                NULL)
 };
 
index 1f5597ef945d409282cbeb4caf6c981ea8b1cade..122e64df2f4dc173123ddbfe7ef934d8630851a2 100644 (file)
@@ -1433,7 +1433,7 @@ static int dwc3_gadget_get_frame(struct usb_gadget *g)
 
 static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
 {
-       unsigned long           timeout;
+       int                     retries;
 
        int                     ret;
        u32                     reg;
@@ -1484,9 +1484,9 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
        }
 
        /* poll until Link State changes to ON */
-       timeout = jiffies + msecs_to_jiffies(100);
+       retries = 20000;
 
-       while (!time_after(jiffies, timeout)) {
+       while (retries--) {
                reg = dwc3_readl(dwc->regs, DWC3_DSTS);
 
                /* in HS, means ON */
index d58bfc32be9ec7a20f52de5beb53b784a9475829..007ec6e4a5d42cf391060b3e99b36b8bf3ff2848 100644 (file)
@@ -341,11 +341,15 @@ static struct sk_buff *eem_wrap(struct gether *port, struct sk_buff *skb)
 {
        struct sk_buff  *skb2 = NULL;
        struct usb_ep   *in = port->in_ep;
-       int             padlen = 0;
-       u16             len = skb->len;
+       int             headroom, tailroom, padlen = 0;
+       u16             len;
 
-       int headroom = skb_headroom(skb);
-       int tailroom = skb_tailroom(skb);
+       if (!skb)
+               return NULL;
+
+       len = skb->len;
+       headroom = skb_headroom(skb);
+       tailroom = skb_tailroom(skb);
 
        /* When (len + EEM_HLEN + ETH_FCS_LEN) % in->maxpacket) is 0,
         * stick two bytes of zero-length EEM packet on the end.
index c8005823b1905389710d2839249766939eff10b1..16562e46112179b419a353a6508b6629c7d10d7e 100644 (file)
@@ -374,6 +374,9 @@ static struct sk_buff *rndis_add_header(struct gether *port,
 {
        struct sk_buff *skb2;
 
+       if (!skb)
+               return NULL;
+
        skb2 = skb_realloc_headroom(skb, sizeof(struct rndis_packet_msg_type));
        rndis_add_hdr(skb2);
 
index 6ded6345cd09bde695c2cb83fb9a79a6a88de9af..e0cd1e4c88927cfc29e12f050ec2a4eb8555136b 100644 (file)
@@ -375,10 +375,15 @@ __acquires(&port->port_lock)
 */
 {
        struct list_head        *pool = &port->write_pool;
-       struct usb_ep           *in = port->port_usb->in;
+       struct usb_ep           *in;
        int                     status = 0;
        bool                    do_tty_wake = false;
 
+       if (!port->port_usb)
+               return status;
+
+       in = port->port_usb->in;
+
        while (!port->write_busy && !list_empty(pool)) {
                struct usb_request      *req;
                int                     len;
index 934f83881c3074250d9117df691463cddb61ca0c..40c04bb25f2f9f2eee7c4b326e8585e6de3e4fb9 100644 (file)
@@ -827,7 +827,7 @@ void usb_gadget_unmap_request_by_dev(struct device *dev,
                return;
 
        if (req->num_mapped_sgs) {
-               dma_unmap_sg(dev, req->sg, req->num_mapped_sgs,
+               dma_unmap_sg(dev, req->sg, req->num_sgs,
                                is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
 
                req->num_mapped_sgs = 0;
index cf8819a5c5b263610eae75cf2066a6f1471b417e..8bb011ea78f7df483f23a0325fc082f36fb8b900 100644 (file)
@@ -1878,11 +1878,8 @@ static int qe_get_frame(struct usb_gadget *gadget)
 
        tmp = in_be16(&udc->usb_param->frame_n);
        if (tmp & 0x8000)
-               tmp = tmp & 0x07ff;
-       else
-               tmp = -EINVAL;
-
-       return (int)tmp;
+               return tmp & 0x07ff;
+       return -EINVAL;
 }
 
 static int fsl_qe_start(struct usb_gadget *gadget,
index 93a3bec81df78006f1ba3871aece717fe69e05b3..fb8fc34827aba12f5132b3232f0ffca3c24e89a8 100644 (file)
 
 /* DRD_CON */
 #define DRD_CON_PERI_CON       BIT(24)
+#define DRD_CON_VBOUT          BIT(0)
 
 /* USB_INT_ENA_1 and USB_INT_STA_1 */
 #define USB_INT_1_B3_PLLWKUP   BIT(31)
@@ -363,6 +364,7 @@ static void usb3_init_epc_registers(struct renesas_usb3 *usb3)
 {
        /* FIXME: How to change host / peripheral mode as well? */
        usb3_set_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON);
+       usb3_clear_bit(usb3, DRD_CON_VBOUT, USB3_DRD_CON);
 
        usb3_write(usb3, ~0, USB3_USB_INT_STA_1);
        usb3_enable_irq_1(usb3, USB_INT_1_VBUS_CNG);
index fd9fd12e486178d50d37fee53a328a8a3530f062..797137e26549b566474d99e1d3e6efe277aa4648 100644 (file)
@@ -850,6 +850,10 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
        spin_lock_irqsave(&xhci->lock, flags);
 
        ep->stop_cmds_pending--;
+       if (xhci->xhc_state & XHCI_STATE_REMOVING) {
+               spin_unlock_irqrestore(&xhci->lock, flags);
+               return;
+       }
        if (xhci->xhc_state & XHCI_STATE_DYING) {
                xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
                                "Stop EP timer ran, but another timer marked "
@@ -903,7 +907,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
        spin_unlock_irqrestore(&xhci->lock, flags);
        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
                        "Calling usb_hc_died()");
-       usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
+       usb_hc_died(xhci_to_hcd(xhci));
        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
                        "xHCI host controller is dead.");
 }
index 886526b5fcddbfcae620b2587f1c1a1125a4933c..73cfa13fc0dc6aef1cbb283ba8462faea7540a29 100644 (file)
@@ -87,7 +87,7 @@ config USB_MUSB_DA8XX
 config USB_MUSB_TUSB6010
        tristate "TUSB6010"
        depends on HAS_IOMEM
-       depends on ARCH_OMAP2PLUS || COMPILE_TEST
+       depends on (ARCH_OMAP2PLUS || COMPILE_TEST) && !BLACKFIN
        depends on NOP_USB_XCEIV = USB_MUSB_HDRC # both built-in or both modules
 
 config USB_MUSB_OMAP2PLUS
index 192248f974ec58c88851441e7569d82a4c867f14..fe08e776fec3e874aaa06abf6c7f301f019d2672 100644 (file)
@@ -290,6 +290,7 @@ int musb_hub_control(
        u32             temp;
        int             retval = 0;
        unsigned long   flags;
+       bool            start_musb = false;
 
        spin_lock_irqsave(&musb->lock, flags);
 
@@ -390,7 +391,7 @@ int musb_hub_control(
                         * logic relating to VBUS power-up.
                         */
                        if (!hcd->self.is_b_host && musb_has_gadget(musb))
-                               musb_start(musb);
+                               start_musb = true;
                        break;
                case USB_PORT_FEAT_RESET:
                        musb_port_reset(musb, true);
@@ -451,5 +452,9 @@ error:
                retval = -EPIPE;
        }
        spin_unlock_irqrestore(&musb->lock, flags);
+
+       if (start_musb)
+               musb_start(musb);
+
        return retval;
 }
index 980c9dee09eb3b64ed3b0676090b2d86d2928845..427efb5eebae9b386c2437d0da82477462cd184f 100644 (file)
@@ -144,14 +144,18 @@ static irqreturn_t nop_gpio_vbus_thread(int irq, void *data)
 int usb_gen_phy_init(struct usb_phy *phy)
 {
        struct usb_phy_generic *nop = dev_get_drvdata(phy->dev);
+       int ret;
 
        if (!IS_ERR(nop->vcc)) {
                if (regulator_enable(nop->vcc))
                        dev_err(phy->dev, "Failed to enable power\n");
        }
 
-       if (!IS_ERR(nop->clk))
-               clk_prepare_enable(nop->clk);
+       if (!IS_ERR(nop->clk)) {
+               ret = clk_prepare_enable(nop->clk);
+               if (ret)
+                       return ret;
+       }
 
        nop_reset(nop);
 
index d4be5d5948960cd32353d3007be4373d914be114..28965ef4f824a0117e94fc8c271b347858d0a5e6 100644 (file)
@@ -282,9 +282,16 @@ static irqreturn_t usbhs_interrupt(int irq, void *data)
        if (usbhs_mod_is_host(priv))
                usbhs_write(priv, INTSTS1, ~irq_state.intsts1 & INTSTS1_MAGIC);
 
-       usbhs_write(priv, BRDYSTS, ~irq_state.brdysts);
+       /*
+        * The driver should not clear the xxxSTS after the line of
+        * "call irq callback functions" because each "if" statement is
+        * possible to call the callback function for avoiding any side effects.
+        */
+       if (irq_state.intsts0 & BRDY)
+               usbhs_write(priv, BRDYSTS, ~irq_state.brdysts);
        usbhs_write(priv, NRDYSTS, ~irq_state.nrdysts);
-       usbhs_write(priv, BEMPSTS, ~irq_state.bempsts);
+       if (irq_state.intsts0 & BEMP)
+               usbhs_write(priv, BEMPSTS, ~irq_state.bempsts);
 
        /*
         * call irq callback functions
index 92bc83b92d10d3a02fc74cbda259ecec6c409d21..c4c64740a3e72ebdfd0f74a190e2d44f4ec10b47 100644 (file)
@@ -1076,7 +1076,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
 
        gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED);
        dev_info(dev, "%stransceiver found\n",
-                gpriv->transceiver ? "" : "no ");
+                !IS_ERR(gpriv->transceiver) ? "" : "no ");
 
        /*
         * CAUTION
index 5608af4a369d47f1f63402a22173e2971a51fdaf..de9992b492b08ba447fb1f594e0a4f12f1d3c9a7 100644 (file)
@@ -1252,7 +1252,7 @@ static int mos7720_write(struct tty_struct *tty, struct usb_serial_port *port,
 
        if (urb->transfer_buffer == NULL) {
                urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
-                                              GFP_KERNEL);
+                                              GFP_ATOMIC);
                if (!urb->transfer_buffer)
                        goto exit;
        }
index ed378fb232e7cf7b703deca78e14c75143511100..57426d703a098dd2d45d67de11acc0ccb5b2f3a5 100644 (file)
@@ -1340,8 +1340,8 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
        }
 
        if (urb->transfer_buffer == NULL) {
-               urb->transfer_buffer =
-                   kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL);
+               urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
+                                              GFP_ATOMIC);
                if (!urb->transfer_buffer)
                        goto exit;
        }
index bc472584a229db589f1fb54784df385cbfc737c0..9894e341c6ac93953a119c224729881a00682ef9 100644 (file)
@@ -525,6 +525,12 @@ static void option_instat_callback(struct urb *urb);
 #define VIATELECOM_VENDOR_ID                   0x15eb
 #define VIATELECOM_PRODUCT_CDS7                        0x0001
 
+/* WeTelecom products */
+#define WETELECOM_VENDOR_ID                    0x22de
+#define WETELECOM_PRODUCT_WMD200               0x6801
+#define WETELECOM_PRODUCT_6802                 0x6802
+#define WETELECOM_PRODUCT_WMD300               0x6803
+
 struct option_blacklist_info {
        /* bitmask of interface numbers blacklisted for send_setup */
        const unsigned long sendsetup;
@@ -1991,6 +1997,9 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },                /* OLICARD300 - MT6225 */
        { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
        { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
+       { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
        { } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
index a204782ae530e1de8f7982f869a94f9b80ce9dad..e98b6e57b703dbd84c3b618fe56dc50eff3fd7ec 100644 (file)
@@ -54,7 +54,8 @@ DEVICE(funsoft, FUNSOFT_IDS);
 /* Infineon Flashloader driver */
 #define FLASHLOADER_IDS()              \
        { USB_DEVICE_INTERFACE_CLASS(0x058b, 0x0041, USB_CLASS_CDC_DATA) }, \
-       { USB_DEVICE(0x8087, 0x0716) }
+       { USB_DEVICE(0x8087, 0x0716) }, \
+       { USB_DEVICE(0x8087, 0x0801) }
 DEVICE(flashloader, FLASHLOADER_IDS);
 
 /* Google Serial USB SubClass */
index e383ecdaca594ce0786c321af35ee59b1807007e..ed9c9eeedfe5f83fd1b7b3ca475280643d8758f1 100644 (file)
@@ -167,7 +167,7 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
  * making all of the arch DMA ops work on the vring device itself
  * is a mess.  For now, we use the parent device for DMA ops.
  */
-struct device *vring_dma_dev(const struct vring_virtqueue *vq)
+static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
 {
        return vq->vq.vdev->dev.parent;
 }
index fb8e45b88cd4ecf0aa4b3c485051d7f437ba7bd8..4fe81d1c60f962b53392a6b4b0d047f129509c87 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -239,7 +239,12 @@ static struct dentry *aio_mount(struct file_system_type *fs_type,
        static const struct dentry_operations ops = {
                .d_dname        = simple_dname,
        };
-       return mount_pseudo(fs_type, "aio:", NULL, &ops, AIO_RING_MAGIC);
+       struct dentry *root = mount_pseudo(fs_type, "aio:", NULL, &ops,
+                                          AIO_RING_MAGIC);
+
+       if (!IS_ERR(root))
+               root->d_sb->s_iflags |= SB_I_NOEXEC;
+       return root;
 }
 
 /* aio_setup
index 7f6aff3f72ebacb3ba6f71136abeaa2047da62ab..e5495f37c6ed50cc7b391b24227c637880532afc 100644 (file)
@@ -853,6 +853,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
                current->flags |= PF_RANDOMIZE;
 
        setup_new_exec(bprm);
+       install_exec_creds(bprm);
 
        /* Do this so that we can load the interpreter, if need be.  We will
           change some of these later */
@@ -1044,7 +1045,6 @@ static int load_elf_binary(struct linux_binprm *bprm)
                goto out;
 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
 
-       install_exec_creds(bprm);
        retval = create_elf_tables(bprm, &loc->elf_ex,
                          load_addr, interp_load_addr);
        if (retval < 0)
index eff3993c77b333f875ade96769e6f6dad5b47143..33fe0355110552f93b1560476101de1fb4a86ae7 100644 (file)
@@ -427,6 +427,7 @@ struct btrfs_space_info {
        struct list_head ro_bgs;
        struct list_head priority_tickets;
        struct list_head tickets;
+       u64 tickets_id;
 
        struct rw_semaphore groups_sem;
        /* for block groups in our same type */
index 0450dc4105339bc0cf14cb1e861ee43d83e27ba6..38c2df84cabd0cf2116239d1cf08d1d18709890f 100644 (file)
@@ -4901,11 +4901,6 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
        u64 expected;
        u64 to_reclaim = 0;
 
-       to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
-       if (can_overcommit(root, space_info, to_reclaim,
-                          BTRFS_RESERVE_FLUSH_ALL))
-               return 0;
-
        list_for_each_entry(ticket, &space_info->tickets, list)
                to_reclaim += ticket->bytes;
        list_for_each_entry(ticket, &space_info->priority_tickets, list)
@@ -4913,6 +4908,11 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
        if (to_reclaim)
                return to_reclaim;
 
+       to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
+       if (can_overcommit(root, space_info, to_reclaim,
+                          BTRFS_RESERVE_FLUSH_ALL))
+               return 0;
+
        used = space_info->bytes_used + space_info->bytes_reserved +
               space_info->bytes_pinned + space_info->bytes_readonly +
               space_info->bytes_may_use;
@@ -4966,12 +4966,12 @@ static void wake_all_tickets(struct list_head *head)
  */
 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
 {
-       struct reserve_ticket *last_ticket = NULL;
        struct btrfs_fs_info *fs_info;
        struct btrfs_space_info *space_info;
        u64 to_reclaim;
        int flush_state;
        int commit_cycles = 0;
+       u64 last_tickets_id;
 
        fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
        space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
@@ -4984,8 +4984,7 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
                spin_unlock(&space_info->lock);
                return;
        }
-       last_ticket = list_first_entry(&space_info->tickets,
-                                      struct reserve_ticket, list);
+       last_tickets_id = space_info->tickets_id;
        spin_unlock(&space_info->lock);
 
        flush_state = FLUSH_DELAYED_ITEMS_NR;
@@ -5005,10 +5004,10 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
                                                              space_info);
                ticket = list_first_entry(&space_info->tickets,
                                          struct reserve_ticket, list);
-               if (last_ticket == ticket) {
+               if (last_tickets_id == space_info->tickets_id) {
                        flush_state++;
                } else {
-                       last_ticket = ticket;
+                       last_tickets_id = space_info->tickets_id;
                        flush_state = FLUSH_DELAYED_ITEMS_NR;
                        if (commit_cycles)
                                commit_cycles--;
@@ -5384,6 +5383,7 @@ again:
                        list_del_init(&ticket->list);
                        num_bytes -= ticket->bytes;
                        ticket->bytes = 0;
+                       space_info->tickets_id++;
                        wake_up(&ticket->wait);
                } else {
                        ticket->bytes -= num_bytes;
@@ -5426,6 +5426,7 @@ again:
                        num_bytes -= ticket->bytes;
                        space_info->bytes_may_use += ticket->bytes;
                        ticket->bytes = 0;
+                       space_info->tickets_id++;
                        wake_up(&ticket->wait);
                } else {
                        trace_btrfs_space_reservation(fs_info, "space_info",
@@ -8216,6 +8217,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
 {
        int ret;
        struct btrfs_block_group_cache *block_group;
+       struct btrfs_space_info *space_info;
 
        /*
         * Mixed block groups will exclude before processing the log so we only
@@ -8231,9 +8233,14 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
        if (!block_group)
                return -EINVAL;
 
-       ret = btrfs_add_reserved_bytes(block_group, ins->offset,
-                                      ins->offset, 0);
-       BUG_ON(ret); /* logic error */
+       space_info = block_group->space_info;
+       spin_lock(&space_info->lock);
+       spin_lock(&block_group->lock);
+       space_info->bytes_reserved += ins->offset;
+       block_group->reserved += ins->offset;
+       spin_unlock(&block_group->lock);
+       spin_unlock(&space_info->lock);
+
        ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
                                         0, owner, offset, ins, 1);
        btrfs_put_block_group(block_group);
index 8a2c2a07987b2e02b6023428b4b931940baffc03..c0c13dc6fe1286673982dfa78d9930cb219927dd 100644 (file)
@@ -4200,9 +4200,11 @@ restart:
                err = PTR_ERR(trans);
                goto out_free;
        }
-       err = qgroup_fix_relocated_data_extents(trans, rc);
-       if (err < 0) {
-               btrfs_abort_transaction(trans, err);
+       ret = qgroup_fix_relocated_data_extents(trans, rc);
+       if (ret < 0) {
+               btrfs_abort_transaction(trans, ret);
+               if (!err)
+                       err = ret;
                goto out_free;
        }
        btrfs_commit_transaction(trans, rc->extent_root);
index efe129fe26788c1078d737a2bc238373c2efcbac..a87675ffd02b33a9766eb229eea2b3b4200b783e 100644 (file)
@@ -4268,10 +4268,12 @@ static int process_all_refs(struct send_ctx *sctx,
        }
        btrfs_release_path(path);
 
+       /*
+        * We don't actually care about pending_move as we are simply
+        * re-creating this inode and will be rename'ing it into place once we
+        * rename the parent directory.
+        */
        ret = process_recorded_refs(sctx, &pending_move);
-       /* Only applicable to an incremental send. */
-       ASSERT(pending_move == 0);
-
 out:
        btrfs_free_path(path);
        return ret;
index e935035ac03435295c6bfcf9fd58c2b4978e65c8..ef9c55bc79074534e826fd30c9e72a0d5036af84 100644 (file)
@@ -2867,6 +2867,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
 
        if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
                blk_finish_plug(&plug);
+               list_del_init(&root_log_ctx.list);
                mutex_unlock(&log_root_tree->log_mutex);
                ret = root_log_ctx.log_ret;
                goto out;
index c64a0b794d491b39d3dd94ab9c17951a5a0fc2d8..df4b3e6fa56398d248d51e0b6215a1773dacc1ec 100644 (file)
@@ -597,7 +597,7 @@ static bool need_reset_readdir(struct ceph_file_info *fi, loff_t new_pos)
        if (is_hash_order(new_pos)) {
                /* no need to reset last_name for a forward seek when
                 * dentries are sotred in hash order */
-       } else if (fi->frag |= fpos_frag(new_pos)) {
+       } else if (fi->frag != fpos_frag(new_pos)) {
                return true;
        }
        rinfo = fi->last_readdir ? &fi->last_readdir->r_reply_info : NULL;
index 6bbec5e784cd493c7d88cec2a345ced6eb7bff61..14ae4b8e1a3ce8370b282503c91a1ff428d61f5b 100644 (file)
@@ -609,6 +609,9 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
        char *s, *p;
        char sep;
 
+       if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
+               return dget(sb->s_root);
+
        full_path = cifs_build_path_to_root(vol, cifs_sb,
                                            cifs_sb_master_tcon(cifs_sb));
        if (full_path == NULL)
@@ -686,26 +689,22 @@ cifs_do_mount(struct file_system_type *fs_type,
        cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
        if (cifs_sb->mountdata == NULL) {
                root = ERR_PTR(-ENOMEM);
-               goto out_cifs_sb;
+               goto out_free;
        }
 
-       if (volume_info->prepath) {
-               cifs_sb->prepath = kstrdup(volume_info->prepath, GFP_KERNEL);
-               if (cifs_sb->prepath == NULL) {
-                       root = ERR_PTR(-ENOMEM);
-                       goto out_cifs_sb;
-               }
+       rc = cifs_setup_cifs_sb(volume_info, cifs_sb);
+       if (rc) {
+               root = ERR_PTR(rc);
+               goto out_free;
        }
 
-       cifs_setup_cifs_sb(volume_info, cifs_sb);
-
        rc = cifs_mount(cifs_sb, volume_info);
        if (rc) {
                if (!(flags & MS_SILENT))
                        cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
                                 rc);
                root = ERR_PTR(rc);
-               goto out_mountdata;
+               goto out_free;
        }
 
        mnt_data.vol = volume_info;
@@ -735,11 +734,7 @@ cifs_do_mount(struct file_system_type *fs_type,
                sb->s_flags |= MS_ACTIVE;
        }
 
-       if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
-               root = dget(sb->s_root);
-       else
-               root = cifs_get_root(volume_info, sb);
-
+       root = cifs_get_root(volume_info, sb);
        if (IS_ERR(root))
                goto out_super;
 
@@ -752,9 +747,9 @@ out:
        cifs_cleanup_volume_info(volume_info);
        return root;
 
-out_mountdata:
+out_free:
+       kfree(cifs_sb->prepath);
        kfree(cifs_sb->mountdata);
-out_cifs_sb:
        kfree(cifs_sb);
 out_nls:
        unload_nls(volume_info->local_nls);
index 1243bd326591a626028727841f818cf0ce3f7615..95dab43646f040011011a7b1c4bce4446c80891b 100644 (file)
@@ -184,7 +184,7 @@ extern int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
                                 unsigned int to_read);
 extern int cifs_read_page_from_socket(struct TCP_Server_Info *server,
                                      struct page *page, unsigned int to_read);
-extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
+extern int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
                               struct cifs_sb_info *cifs_sb);
 extern int cifs_match_super(struct super_block *, void *);
 extern void cifs_cleanup_volume_info(struct smb_vol *pvolume_info);
index 7ae03283bd61c128c6c804805c30c6c25da79f31..2e4f4bad8b1e9bf75cc76074914b8c2c8f63a3ff 100644 (file)
@@ -2781,6 +2781,24 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
        return 1;
 }
 
+static int
+match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data)
+{
+       struct cifs_sb_info *old = CIFS_SB(sb);
+       struct cifs_sb_info *new = mnt_data->cifs_sb;
+
+       if (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) {
+               if (!(new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH))
+                       return 0;
+               /* The prepath should be null terminated strings */
+               if (strcmp(new->prepath, old->prepath))
+                       return 0;
+
+               return 1;
+       }
+       return 0;
+}
+
 int
 cifs_match_super(struct super_block *sb, void *data)
 {
@@ -2808,7 +2826,8 @@ cifs_match_super(struct super_block *sb, void *data)
 
        if (!match_server(tcp_srv, volume_info) ||
            !match_session(ses, volume_info) ||
-           !match_tcon(tcon, volume_info->UNC)) {
+           !match_tcon(tcon, volume_info->UNC) ||
+           !match_prepath(sb, mnt_data)) {
                rc = 0;
                goto out;
        }
@@ -3222,7 +3241,7 @@ void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon,
        }
 }
 
-void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
+int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
                        struct cifs_sb_info *cifs_sb)
 {
        INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
@@ -3316,6 +3335,14 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
 
        if ((pvolume_info->cifs_acl) && (pvolume_info->dynperm))
                cifs_dbg(VFS, "mount option dynperm ignored if cifsacl mount option supported\n");
+
+       if (pvolume_info->prepath) {
+               cifs_sb->prepath = kstrdup(pvolume_info->prepath, GFP_KERNEL);
+               if (cifs_sb->prepath == NULL)
+                       return -ENOMEM;
+       }
+
+       return 0;
 }
 
 static void
index 0f9961eede1e74021c43c4943b2bb419ed09a198..ed115acb5dee04bd15726ed2b9f368f6d13315cc 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/random.h>
 #include <linux/string.h>
 #include <linux/fscrypto.h>
+#include <linux/mount.h>
 
 static int inode_has_encryption_context(struct inode *inode)
 {
@@ -92,26 +93,42 @@ static int create_encryption_context_from_policy(struct inode *inode,
        return inode->i_sb->s_cop->set_context(inode, &ctx, sizeof(ctx), NULL);
 }
 
-int fscrypt_process_policy(struct inode *inode,
+int fscrypt_process_policy(struct file *filp,
                                const struct fscrypt_policy *policy)
 {
+       struct inode *inode = file_inode(filp);
+       int ret;
+
+       if (!inode_owner_or_capable(inode))
+               return -EACCES;
+
        if (policy->version != 0)
                return -EINVAL;
 
+       ret = mnt_want_write_file(filp);
+       if (ret)
+               return ret;
+
        if (!inode_has_encryption_context(inode)) {
-               if (!inode->i_sb->s_cop->empty_dir)
-                       return -EOPNOTSUPP;
-               if (!inode->i_sb->s_cop->empty_dir(inode))
-                       return -ENOTEMPTY;
-               return create_encryption_context_from_policy(inode, policy);
+               if (!S_ISDIR(inode->i_mode))
+                       ret = -EINVAL;
+               else if (!inode->i_sb->s_cop->empty_dir)
+                       ret = -EOPNOTSUPP;
+               else if (!inode->i_sb->s_cop->empty_dir(inode))
+                       ret = -ENOTEMPTY;
+               else
+                       ret = create_encryption_context_from_policy(inode,
+                                                                   policy);
+       } else if (!is_encryption_context_consistent_with_policy(inode,
+                                                                policy)) {
+               printk(KERN_WARNING
+                      "%s: Policy inconsistent with encryption context\n",
+                      __func__);
+               ret = -EINVAL;
        }
 
-       if (is_encryption_context_consistent_with_policy(inode, policy))
-               return 0;
-
-       printk(KERN_WARNING "%s: Policy inconsistent with encryption context\n",
-              __func__);
-       return -EINVAL;
+       mnt_drop_write_file(filp);
+       return ret;
 }
 EXPORT_SYMBOL(fscrypt_process_policy);
 
index d116453b0276634fabb32ee96df9cf96ce9cb25a..79a5941c2474622d7888c8e8c3bc925eabdb77e1 100644 (file)
@@ -585,7 +585,8 @@ struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv)
  */
 void *devpts_get_priv(struct dentry *dentry)
 {
-       WARN_ON_ONCE(dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC);
+       if (dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC)
+               return NULL;
        return dentry->d_fsdata;
 }
 
index 3131747199e160f5feca6c322a2b4190201108bf..c6ea25a190f88b09354e93cb7245586af2dbc87a 100644 (file)
@@ -5466,8 +5466,6 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
                                                      sbi->s_want_extra_isize,
                                                      iloc, handle);
                        if (ret) {
-                               ext4_set_inode_state(inode,
-                                                    EXT4_STATE_NO_EXPAND);
                                if (mnt_count !=
                                        le16_to_cpu(sbi->s_es->s_mnt_count)) {
                                        ext4_warning(inode->i_sb,
index 10686fd67fb425880cacb94183291698f00ed702..1bb7df5e45369896a0b71de051b1364492b6a813 100644 (file)
@@ -776,7 +776,7 @@ resizefs_out:
                                   (struct fscrypt_policy __user *)arg,
                                   sizeof(policy)))
                        return -EFAULT;
-               return fscrypt_process_policy(inode, &policy);
+               return fscrypt_process_policy(filp, &policy);
 #else
                return -EOPNOTSUPP;
 #endif
index 1c593aa0218eee5cec9e2dac0adf8aaa97056d8b..3ec8708989ca016c3dd9e7e2d9bd479a1cd9beaf 100644 (file)
@@ -2211,6 +2211,7 @@ void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
 
 /* Called at mount-time, super-block is locked */
 static int ext4_check_descriptors(struct super_block *sb,
+                                 ext4_fsblk_t sb_block,
                                  ext4_group_t *first_not_zeroed)
 {
        struct ext4_sb_info *sbi = EXT4_SB(sb);
@@ -2241,6 +2242,11 @@ static int ext4_check_descriptors(struct super_block *sb,
                        grp = i;
 
                block_bitmap = ext4_block_bitmap(sb, gdp);
+               if (block_bitmap == sb_block) {
+                       ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+                                "Block bitmap for group %u overlaps "
+                                "superblock", i);
+               }
                if (block_bitmap < first_block || block_bitmap > last_block) {
                        ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
                               "Block bitmap for group %u not in group "
@@ -2248,6 +2254,11 @@ static int ext4_check_descriptors(struct super_block *sb,
                        return 0;
                }
                inode_bitmap = ext4_inode_bitmap(sb, gdp);
+               if (inode_bitmap == sb_block) {
+                       ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+                                "Inode bitmap for group %u overlaps "
+                                "superblock", i);
+               }
                if (inode_bitmap < first_block || inode_bitmap > last_block) {
                        ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
                               "Inode bitmap for group %u not in group "
@@ -2255,6 +2266,11 @@ static int ext4_check_descriptors(struct super_block *sb,
                        return 0;
                }
                inode_table = ext4_inode_table(sb, gdp);
+               if (inode_table == sb_block) {
+                       ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+                                "Inode table for group %u overlaps "
+                                "superblock", i);
+               }
                if (inode_table < first_block ||
                    inode_table + sbi->s_itb_per_group - 1 > last_block) {
                        ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
@@ -3757,7 +3773,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                        goto failed_mount2;
                }
        }
-       if (!ext4_check_descriptors(sb, &first_not_zeroed)) {
+       if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
                ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
                ret = -EFSCORRUPTED;
                goto failed_mount2;
index 39e9cfb1b3715c2fcf2b81c0e03c2183d58c9100..2eb935ca5d9effac8eef6a2284d33acc62ae509e 100644 (file)
@@ -1353,15 +1353,19 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
        size_t min_offs, free;
        int total_ino;
        void *base, *start, *end;
-       int extra_isize = 0, error = 0, tried_min_extra_isize = 0;
+       int error = 0, tried_min_extra_isize = 0;
        int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize);
+       int isize_diff; /* How much do we need to grow i_extra_isize */
 
        down_write(&EXT4_I(inode)->xattr_sem);
+       /*
+        * Set EXT4_STATE_NO_EXPAND to avoid recursion when marking inode dirty
+        */
+       ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
 retry:
-       if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) {
-               up_write(&EXT4_I(inode)->xattr_sem);
-               return 0;
-       }
+       isize_diff = new_extra_isize - EXT4_I(inode)->i_extra_isize;
+       if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
+               goto out;
 
        header = IHDR(inode, raw_inode);
        entry = IFIRST(header);
@@ -1382,7 +1386,7 @@ retry:
                goto cleanup;
 
        free = ext4_xattr_free_space(last, &min_offs, base, &total_ino);
-       if (free >= new_extra_isize) {
+       if (free >= isize_diff) {
                entry = IFIRST(header);
                ext4_xattr_shift_entries(entry, EXT4_I(inode)->i_extra_isize
                                - new_extra_isize, (void *)raw_inode +
@@ -1390,8 +1394,7 @@ retry:
                                (void *)header, total_ino,
                                inode->i_sb->s_blocksize);
                EXT4_I(inode)->i_extra_isize = new_extra_isize;
-               error = 0;
-               goto cleanup;
+               goto out;
        }
 
        /*
@@ -1414,7 +1417,7 @@ retry:
                end = bh->b_data + bh->b_size;
                min_offs = end - base;
                free = ext4_xattr_free_space(first, &min_offs, base, NULL);
-               if (free < new_extra_isize) {
+               if (free < isize_diff) {
                        if (!tried_min_extra_isize && s_min_extra_isize) {
                                tried_min_extra_isize++;
                                new_extra_isize = s_min_extra_isize;
@@ -1428,7 +1431,7 @@ retry:
                free = inode->i_sb->s_blocksize;
        }
 
-       while (new_extra_isize > 0) {
+       while (isize_diff > 0) {
                size_t offs, size, entry_size;
                struct ext4_xattr_entry *small_entry = NULL;
                struct ext4_xattr_info i = {
@@ -1459,7 +1462,7 @@ retry:
                        EXT4_XATTR_SIZE(le32_to_cpu(last->e_value_size)) +
                                        EXT4_XATTR_LEN(last->e_name_len);
                        if (total_size <= free && total_size < min_total_size) {
-                               if (total_size < new_extra_isize) {
+                               if (total_size < isize_diff) {
                                        small_entry = last;
                                } else {
                                        entry = last;
@@ -1514,22 +1517,22 @@ retry:
                error = ext4_xattr_ibody_set(handle, inode, &i, is);
                if (error)
                        goto cleanup;
+               total_ino -= entry_size;
 
                entry = IFIRST(header);
-               if (entry_size + EXT4_XATTR_SIZE(size) >= new_extra_isize)
-                       shift_bytes = new_extra_isize;
+               if (entry_size + EXT4_XATTR_SIZE(size) >= isize_diff)
+                       shift_bytes = isize_diff;
                else
-                       shift_bytes = entry_size + size;
+                       shift_bytes = entry_size + EXT4_XATTR_SIZE(size);
                /* Adjust the offsets and shift the remaining entries ahead */
-               ext4_xattr_shift_entries(entry, EXT4_I(inode)->i_extra_isize -
-                       shift_bytes, (void *)raw_inode +
-                       EXT4_GOOD_OLD_INODE_SIZE + extra_isize + shift_bytes,
-                       (void *)header, total_ino - entry_size,
-                       inode->i_sb->s_blocksize);
+               ext4_xattr_shift_entries(entry, -shift_bytes,
+                       (void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
+                       EXT4_I(inode)->i_extra_isize + shift_bytes,
+                       (void *)header, total_ino, inode->i_sb->s_blocksize);
 
-               extra_isize += shift_bytes;
-               new_extra_isize -= shift_bytes;
-               EXT4_I(inode)->i_extra_isize = extra_isize;
+               isize_diff -= shift_bytes;
+               EXT4_I(inode)->i_extra_isize += shift_bytes;
+               header = IHDR(inode, raw_inode);
 
                i.name = b_entry_name;
                i.value = buffer;
@@ -1551,6 +1554,8 @@ retry:
                kfree(bs);
        }
        brelse(bh);
+out:
+       ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
        up_write(&EXT4_I(inode)->xattr_sem);
        return 0;
 
@@ -1562,6 +1567,10 @@ cleanup:
        kfree(is);
        kfree(bs);
        brelse(bh);
+       /*
+        * We deliberately leave EXT4_STATE_NO_EXPAND set here since inode
+        * size expansion failed.
+        */
        up_write(&EXT4_I(inode)->xattr_sem);
        return error;
 }
index 69dd3e6566e02edd05a4ef8054913df1326bd29a..a92e783fa057070a2d46ece41089bae6e3a26fe4 100644 (file)
@@ -24,6 +24,7 @@
 #define EXT4_XATTR_INDEX_SYSTEM                        7
 #define EXT4_XATTR_INDEX_RICHACL               8
 #define EXT4_XATTR_INDEX_ENCRYPTION            9
+#define EXT4_XATTR_INDEX_HURD                  10 /* Reserved for Hurd */
 
 struct ext4_xattr_header {
        __le32  h_magic;        /* magic number for identification */
index 47abb96098e49d71fcb082b39752be4f89882199..28f4f4cbb8d84d0b07c22e3952d09b9eb1e1bf8c 100644 (file)
@@ -1757,21 +1757,14 @@ static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
 {
        struct fscrypt_policy policy;
        struct inode *inode = file_inode(filp);
-       int ret;
 
        if (copy_from_user(&policy, (struct fscrypt_policy __user *)arg,
                                                        sizeof(policy)))
                return -EFAULT;
 
-       ret = mnt_want_write_file(filp);
-       if (ret)
-               return ret;
-
        f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
-       ret = fscrypt_process_policy(inode, &policy);
 
-       mnt_drop_write_file(filp);
-       return ret;
+       return fscrypt_process_policy(filp, &policy);
 }
 
 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
index f394aff59c363a34c43eea0eec32293e21570986..3988b43c2f5ac8d87533db4de75395e5c4d1c286 100644 (file)
@@ -530,13 +530,13 @@ void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos,
        req->out.args[0].size = count;
 }
 
-static void fuse_release_user_pages(struct fuse_req *req, int write)
+static void fuse_release_user_pages(struct fuse_req *req, bool should_dirty)
 {
        unsigned i;
 
        for (i = 0; i < req->num_pages; i++) {
                struct page *page = req->pages[i];
-               if (write)
+               if (should_dirty)
                        set_page_dirty_lock(page);
                put_page(page);
        }
@@ -1320,6 +1320,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
                       loff_t *ppos, int flags)
 {
        int write = flags & FUSE_DIO_WRITE;
+       bool should_dirty = !write && iter_is_iovec(iter);
        int cuse = flags & FUSE_DIO_CUSE;
        struct file *file = io->file;
        struct inode *inode = file->f_mapping->host;
@@ -1363,7 +1364,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
                        nres = fuse_send_read(req, io, pos, nbytes, owner);
 
                if (!io->async)
-                       fuse_release_user_pages(req, !write);
+                       fuse_release_user_pages(req, should_dirty);
                if (req->out.h.error) {
                        err = req->out.h.error;
                        break;
index 0f56deb24ce6547c1aaa13e6a731779c09047077..c415668c86d4cc727460ae0f0eb5a6d1f5840f61 100644 (file)
@@ -568,7 +568,7 @@ static int ioctl_fsthaw(struct file *filp)
        return thaw_super(sb);
 }
 
-static long ioctl_file_dedupe_range(struct file *file, void __user *arg)
+static int ioctl_file_dedupe_range(struct file *file, void __user *arg)
 {
        struct file_dedupe_range __user *argp = arg;
        struct file_dedupe_range *same = NULL;
@@ -582,6 +582,10 @@ static long ioctl_file_dedupe_range(struct file *file, void __user *arg)
        }
 
        size = offsetof(struct file_dedupe_range __user, info[count]);
+       if (size > PAGE_SIZE) {
+               ret = -ENOMEM;
+               goto out;
+       }
 
        same = memdup_user(argp, size);
        if (IS_ERR(same)) {
index 0342254646e358407ca35520c307529cc454a3c4..706270f21b35cd23e18a5057bfec9b2995b77bba 100644 (file)
@@ -428,9 +428,12 @@ static int iomap_to_fiemap(struct fiemap_extent_info *fi,
                break;
        }
 
+       if (iomap->flags & IOMAP_F_MERGED)
+               flags |= FIEMAP_EXTENT_MERGED;
+
        return fiemap_fill_next_extent(fi, iomap->offset,
                        iomap->blkno != IOMAP_NULL_BLOCK ? iomap->blkno << 9: 0,
-                       iomap->length, flags | FIEMAP_EXTENT_MERGED);
+                       iomap->length, flags);
 
 }
 
index e1574008adc9e4607e9deb2c1898f5d25be528a9..2bcb86e6e6ca0988cbe4337c970247869db34a24 100644 (file)
@@ -840,21 +840,35 @@ repeat:
        mutex_lock(&kernfs_mutex);
 
        list_for_each_entry(info, &kernfs_root(kn)->supers, node) {
+               struct kernfs_node *parent;
                struct inode *inode;
-               struct dentry *dentry;
 
+               /*
+                * We want fsnotify_modify() on @kn but as the
+                * modifications aren't originating from userland don't
+                * have the matching @file available.  Look up the inodes
+                * and generate the events manually.
+                */
                inode = ilookup(info->sb, kn->ino);
                if (!inode)
                        continue;
 
-               dentry = d_find_any_alias(inode);
-               if (dentry) {
-                       fsnotify_parent(NULL, dentry, FS_MODIFY);
-                       fsnotify(inode, FS_MODIFY, inode, FSNOTIFY_EVENT_INODE,
-                                NULL, 0);
-                       dput(dentry);
+               parent = kernfs_get_parent(kn);
+               if (parent) {
+                       struct inode *p_inode;
+
+                       p_inode = ilookup(info->sb, parent->ino);
+                       if (p_inode) {
+                               fsnotify(p_inode, FS_MODIFY | FS_EVENT_ON_CHILD,
+                                        inode, FSNOTIFY_EVENT_INODE, kn->name, 0);
+                               iput(p_inode);
+                       }
+
+                       kernfs_put(parent);
                }
 
+               fsnotify(inode, FS_MODIFY, inode, FSNOTIFY_EVENT_INODE,
+                        kn->name, 0);
                iput(inode);
        }
 
index f55a4e7560470d7a1ef8ab790beb6db59232b35f..217847679f0eac675492ac049cbc0a42a6f75066 100644 (file)
@@ -346,7 +346,7 @@ static void bl_write_cleanup(struct work_struct *work)
                        PAGE_SIZE - 1) & (loff_t)PAGE_MASK;
 
                ext_tree_mark_written(bl, start >> SECTOR_SHIFT,
-                                       (end - start) >> SECTOR_SHIFT);
+                                       (end - start) >> SECTOR_SHIFT, end);
        }
 
        pnfs_ld_write_done(hdr);
index 18e6fd0b9506e931a62acd3f91303574433a1113..efc007f007427c84bf0e56961b51f07073b78865 100644 (file)
@@ -141,6 +141,7 @@ struct pnfs_block_layout {
        struct rb_root          bl_ext_ro;
        spinlock_t              bl_ext_lock;   /* Protects list manipulation */
        bool                    bl_scsi_layout;
+       u64                     bl_lwb;
 };
 
 static inline struct pnfs_block_layout *
@@ -182,7 +183,7 @@ int ext_tree_insert(struct pnfs_block_layout *bl,
 int ext_tree_remove(struct pnfs_block_layout *bl, bool rw, sector_t start,
                sector_t end);
 int ext_tree_mark_written(struct pnfs_block_layout *bl, sector_t start,
-               sector_t len);
+               sector_t len, u64 lwb);
 bool ext_tree_lookup(struct pnfs_block_layout *bl, sector_t isect,
                struct pnfs_block_extent *ret, bool rw);
 int ext_tree_prepare_commit(struct nfs4_layoutcommit_args *arg);
index 992bcb19c11e744bed4c390e133ea3ebe7cf0916..c85fbfd2d0d99e7cd5e3cc902ebd4b75b8e1b02c 100644 (file)
@@ -402,7 +402,7 @@ ext_tree_split(struct rb_root *root, struct pnfs_block_extent *be,
 
 int
 ext_tree_mark_written(struct pnfs_block_layout *bl, sector_t start,
-               sector_t len)
+               sector_t len, u64 lwb)
 {
        struct rb_root *root = &bl->bl_ext_rw;
        sector_t end = start + len;
@@ -471,6 +471,8 @@ ext_tree_mark_written(struct pnfs_block_layout *bl, sector_t start,
                }
        }
 out:
+       if (bl->bl_lwb < lwb)
+               bl->bl_lwb = lwb;
        spin_unlock(&bl->bl_ext_lock);
 
        __ext_put_deviceids(&tmp);
@@ -518,7 +520,7 @@ static __be32 *encode_scsi_range(struct pnfs_block_extent *be, __be32 *p)
 }
 
 static int ext_tree_encode_commit(struct pnfs_block_layout *bl, __be32 *p,
-               size_t buffer_size, size_t *count)
+               size_t buffer_size, size_t *count, __u64 *lastbyte)
 {
        struct pnfs_block_extent *be;
        int ret = 0;
@@ -542,6 +544,8 @@ static int ext_tree_encode_commit(struct pnfs_block_layout *bl, __be32 *p,
                        p = encode_block_extent(be, p);
                be->be_tag = EXTENT_COMMITTING;
        }
+       *lastbyte = bl->bl_lwb - 1;
+       bl->bl_lwb = 0;
        spin_unlock(&bl->bl_ext_lock);
 
        return ret;
@@ -564,7 +568,7 @@ ext_tree_prepare_commit(struct nfs4_layoutcommit_args *arg)
        arg->layoutupdate_pages = &arg->layoutupdate_page;
 
 retry:
-       ret = ext_tree_encode_commit(bl, start_p + 1, buffer_size, &count);
+       ret = ext_tree_encode_commit(bl, start_p + 1, buffer_size, &count, &arg->lastbytewritten);
        if (unlikely(ret)) {
                ext_tree_free_commitdata(arg, buffer_size);
 
index a7f2e6e3330525c1069c524f8b56960e780f7f4b..52a28311e2a4b2d72063cf36ac9e4296481211b7 100644 (file)
@@ -275,6 +275,7 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv,
 err_socks:
        svc_rpcb_cleanup(serv, net);
 err_bind:
+       nn->cb_users[minorversion]--;
        dprintk("NFS: Couldn't create callback socket: err = %d; "
                        "net = %p\n", ret, net);
        return ret;
index c92a75e066a6f75828079f422c2546ea73a90415..f953ef6b2f2e7eafcfb7fb9015972f12f214811e 100644 (file)
@@ -454,11 +454,8 @@ static bool referring_call_exists(struct nfs_client *clp,
                                ((u32 *)&rclist->rcl_sessionid.data)[3],
                                ref->rc_sequenceid, ref->rc_slotid);
 
-                       spin_lock(&tbl->slot_tbl_lock);
-                       status = (test_bit(ref->rc_slotid, tbl->used_slots) &&
-                                 tbl->slots[ref->rc_slotid].seq_nr ==
-                                       ref->rc_sequenceid);
-                       spin_unlock(&tbl->slot_tbl_lock);
+                       status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid,
+                                       ref->rc_sequenceid, HZ >> 1) < 0;
                        if (status)
                                goto out;
                }
@@ -487,7 +484,6 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
                goto out;
 
        tbl = &clp->cl_session->bc_slot_table;
-       slot = tbl->slots + args->csa_slotid;
 
        /* Set up res before grabbing the spinlock */
        memcpy(&res->csr_sessionid, &args->csa_sessionid,
index 003ebce4bbc49fa0e5508816027ae65798119e4f..1e106780a23752fa9b49ed74baa9bd01c500c912 100644 (file)
@@ -426,7 +426,7 @@ EXPORT_SYMBOL_GPL(nfs_mark_client_ready);
  * Initialise the timeout values for a connection
  */
 void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
-                                   unsigned int timeo, unsigned int retrans)
+                                   int timeo, int retrans)
 {
        to->to_initval = timeo * HZ / 10;
        to->to_retries = retrans;
@@ -434,9 +434,9 @@ void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
        switch (proto) {
        case XPRT_TRANSPORT_TCP:
        case XPRT_TRANSPORT_RDMA:
-               if (to->to_retries == 0)
+               if (retrans == NFS_UNSPEC_RETRANS)
                        to->to_retries = NFS_DEF_TCP_RETRANS;
-               if (to->to_initval == 0)
+               if (timeo == NFS_UNSPEC_TIMEO || to->to_retries == 0)
                        to->to_initval = NFS_DEF_TCP_TIMEO * HZ / 10;
                if (to->to_initval > NFS_MAX_TCP_TIMEOUT)
                        to->to_initval = NFS_MAX_TCP_TIMEOUT;
@@ -449,9 +449,9 @@ void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
                to->to_exponential = 0;
                break;
        case XPRT_TRANSPORT_UDP:
-               if (to->to_retries == 0)
+               if (retrans == NFS_UNSPEC_RETRANS)
                        to->to_retries = NFS_DEF_UDP_RETRANS;
-               if (!to->to_initval)
+               if (timeo == NFS_UNSPEC_TIMEO || to->to_initval == 0)
                        to->to_initval = NFS_DEF_UDP_TIMEO * HZ / 10;
                if (to->to_initval > NFS_MAX_UDP_TIMEOUT)
                        to->to_initval = NFS_MAX_UDP_TIMEOUT;
index 7d620970f2e1addfbd447dc7b4a0bd1233bc9ec6..ca699ddc11c10e2e012f200d37213c90739af286 100644 (file)
@@ -657,7 +657,10 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
        if (result <= 0)
                goto out;
 
-       written = generic_write_sync(iocb, result);
+       result = generic_write_sync(iocb, result);
+       if (result < 0)
+               goto out;
+       written = result;
        iocb->ki_pos += written;
 
        /* Return error values */
index e6206eaf2bdf34e4aa11d23dbf68b6e7c0c4684e..51b51369704c56c00732b10ff0c31edfd0d9c19e 100644 (file)
@@ -37,6 +37,7 @@ ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
        if (ffl) {
                INIT_LIST_HEAD(&ffl->error_list);
                INIT_LIST_HEAD(&ffl->mirrors);
+               ffl->last_report_time = ktime_get();
                return &ffl->generic_hdr;
        } else
                return NULL;
@@ -640,19 +641,18 @@ nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
 {
        static const ktime_t notime = {0};
        s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
+       struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
 
        nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
        if (ktime_equal(mirror->start_time, notime))
                mirror->start_time = now;
-       if (ktime_equal(mirror->last_report_time, notime))
-               mirror->last_report_time = now;
        if (mirror->report_interval != 0)
                report_interval = (s64)mirror->report_interval * 1000LL;
        else if (layoutstats_timer != 0)
                report_interval = (s64)layoutstats_timer * 1000LL;
-       if (ktime_to_ms(ktime_sub(now, mirror->last_report_time)) >=
+       if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
                        report_interval) {
-               mirror->last_report_time = now;
+               ffl->last_report_time = now;
                return true;
        }
 
@@ -806,11 +806,14 @@ ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
 {
        struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
        struct nfs4_pnfs_ds *ds;
+       bool fail_return = false;
        int idx;
 
        /* mirrors are sorted by efficiency */
        for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
-               ds = nfs4_ff_layout_prepare_ds(lseg, idx, false);
+               if (idx+1 == fls->mirror_array_cnt)
+                       fail_return = true;
+               ds = nfs4_ff_layout_prepare_ds(lseg, idx, fail_return);
                if (ds) {
                        *best_idx = idx;
                        return ds;
@@ -859,6 +862,7 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
        struct nfs4_pnfs_ds *ds;
        int ds_idx;
 
+retry:
        /* Use full layout for now */
        if (!pgio->pg_lseg)
                ff_layout_pg_get_read(pgio, req, false);
@@ -871,10 +875,13 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
 
        ds = ff_layout_choose_best_ds_for_read(pgio->pg_lseg, 0, &ds_idx);
        if (!ds) {
-               if (ff_layout_no_fallback_to_mds(pgio->pg_lseg))
-                       goto out_pnfs;
-               else
+               if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
                        goto out_mds;
+               pnfs_put_lseg(pgio->pg_lseg);
+               pgio->pg_lseg = NULL;
+               /* Sleep for 1 second before retrying */
+               ssleep(1);
+               goto retry;
        }
 
        mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
@@ -890,12 +897,6 @@ out_mds:
        pnfs_put_lseg(pgio->pg_lseg);
        pgio->pg_lseg = NULL;
        nfs_pageio_reset_read_mds(pgio);
-       return;
-
-out_pnfs:
-       pnfs_set_lo_fail(pgio->pg_lseg);
-       pnfs_put_lseg(pgio->pg_lseg);
-       pgio->pg_lseg = NULL;
 }
 
 static void
@@ -909,6 +910,7 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
        int i;
        int status;
 
+retry:
        if (!pgio->pg_lseg) {
                pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
                                                   req->wb_context,
@@ -940,10 +942,13 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
        for (i = 0; i < pgio->pg_mirror_count; i++) {
                ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, i, true);
                if (!ds) {
-                       if (ff_layout_no_fallback_to_mds(pgio->pg_lseg))
-                               goto out_pnfs;
-                       else
+                       if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
                                goto out_mds;
+                       pnfs_put_lseg(pgio->pg_lseg);
+                       pgio->pg_lseg = NULL;
+                       /* Sleep for 1 second before retrying */
+                       ssleep(1);
+                       goto retry;
                }
                pgm = &pgio->pg_mirrors[i];
                mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
@@ -956,12 +961,6 @@ out_mds:
        pnfs_put_lseg(pgio->pg_lseg);
        pgio->pg_lseg = NULL;
        nfs_pageio_reset_write_mds(pgio);
-       return;
-
-out_pnfs:
-       pnfs_set_lo_fail(pgio->pg_lseg);
-       pnfs_put_lseg(pgio->pg_lseg);
-       pgio->pg_lseg = NULL;
 }
 
 static unsigned int
index 1bcdb15d0c41a74d139ce7db378c3e19844052a0..3ee0c9fcea7632269edbf51f3e5bd1bf80bb868d 100644 (file)
@@ -84,7 +84,6 @@ struct nfs4_ff_layout_mirror {
        struct nfs4_ff_layoutstat       read_stat;
        struct nfs4_ff_layoutstat       write_stat;
        ktime_t                         start_time;
-       ktime_t                         last_report_time;
        u32                             report_interval;
 };
 
@@ -101,6 +100,7 @@ struct nfs4_flexfile_layout {
        struct pnfs_ds_commit_info commit_info;
        struct list_head        mirrors;
        struct list_head        error_list; /* nfs4_ff_layout_ds_err */
+       ktime_t                 last_report_time; /* Layoutstat report times */
 };
 
 static inline struct nfs4_flexfile_layout *
index 0aa36be71fceaaf3ec532af442358b5ca80828ab..f7a3f6b05369a21d7e1190625e5871c55d1aeeb2 100644 (file)
@@ -17,8 +17,8 @@
 
 #define NFSDBG_FACILITY                NFSDBG_PNFS_LD
 
-static unsigned int dataserver_timeo = NFS4_DEF_DS_TIMEO;
-static unsigned int dataserver_retrans = NFS4_DEF_DS_RETRANS;
+static unsigned int dataserver_timeo = NFS_DEF_TCP_RETRANS;
+static unsigned int dataserver_retrans;
 
 void nfs4_ff_layout_put_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
 {
@@ -379,7 +379,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
 
        devid = &mirror->mirror_ds->id_node;
        if (ff_layout_test_devid_unavailable(devid))
-               goto out;
+               goto out_fail;
 
        ds = mirror->mirror_ds->ds;
        /* matching smp_wmb() in _nfs4_pnfs_v3/4_ds_connect */
@@ -405,15 +405,16 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
                        mirror->mirror_ds->ds_versions[0].rsize = max_payload;
                if (mirror->mirror_ds->ds_versions[0].wsize > max_payload)
                        mirror->mirror_ds->ds_versions[0].wsize = max_payload;
-       } else {
-               ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
-                                        mirror, lseg->pls_range.offset,
-                                        lseg->pls_range.length, NFS4ERR_NXIO,
-                                        OP_ILLEGAL, GFP_NOIO);
-               if (fail_return || !ff_layout_has_available_ds(lseg))
-                       pnfs_error_mark_layout_for_return(ino, lseg);
-               ds = NULL;
+               goto out;
        }
+       ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
+                                mirror, lseg->pls_range.offset,
+                                lseg->pls_range.length, NFS4ERR_NXIO,
+                                OP_ILLEGAL, GFP_NOIO);
+out_fail:
+       if (fail_return || !ff_layout_has_available_ds(lseg))
+               pnfs_error_mark_layout_for_return(ino, lseg);
+       ds = NULL;
 out:
        return ds;
 }
index 7ce5e023c3c3cd36056d1272cc16c9ebb1d1198a..74935a19e4bfc678767dcc7fde3fcef16d7872e2 100644 (file)
@@ -58,6 +58,9 @@ struct nfs_clone_mount {
  */
 #define NFS_UNSPEC_PORT                (-1)
 
+#define NFS_UNSPEC_RETRANS     (UINT_MAX)
+#define NFS_UNSPEC_TIMEO       (UINT_MAX)
+
 /*
  * Maximum number of pages that readdir can use for creating
  * a vmapped array of pages.
@@ -156,7 +159,7 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *,
 int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *, struct nfs_fattr *);
 void nfs_server_insert_lists(struct nfs_server *);
 void nfs_server_remove_lists(struct nfs_server *);
-void nfs_init_timeout_values(struct rpc_timeout *, int, unsigned int, unsigned int);
+void nfs_init_timeout_values(struct rpc_timeout *to, int proto, int timeo, int retrans);
 int nfs_init_server_rpcclient(struct nfs_server *, const struct rpc_timeout *t,
                rpc_authflavor_t);
 struct nfs_server *nfs_alloc_server(void);
index 6f47527348042dc83f5b97f48870eed6d1afcf41..64b43b4ad9dd8841cbfc41e86c046f89e7c4c46d 100644 (file)
@@ -318,10 +318,22 @@ static void
 nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata)
 {
        struct nfs42_layoutstat_data *data = calldata;
-       struct nfs_server *server = NFS_SERVER(data->args.inode);
+       struct inode *inode = data->inode;
+       struct nfs_server *server = NFS_SERVER(inode);
+       struct pnfs_layout_hdr *lo;
 
+       spin_lock(&inode->i_lock);
+       lo = NFS_I(inode)->layout;
+       if (!pnfs_layout_is_valid(lo)) {
+               spin_unlock(&inode->i_lock);
+               rpc_exit(task, 0);
+               return;
+       }
+       nfs4_stateid_copy(&data->args.stateid, &lo->plh_stateid);
+       spin_unlock(&inode->i_lock);
        nfs41_setup_sequence(nfs4_get_session(server), &data->args.seq_args,
                             &data->res.seq_res, task);
+
 }
 
 static void
@@ -341,11 +353,11 @@ nfs42_layoutstat_done(struct rpc_task *task, void *calldata)
        case -NFS4ERR_ADMIN_REVOKED:
        case -NFS4ERR_DELEG_REVOKED:
        case -NFS4ERR_STALE_STATEID:
-       case -NFS4ERR_OLD_STATEID:
        case -NFS4ERR_BAD_STATEID:
                spin_lock(&inode->i_lock);
                lo = NFS_I(inode)->layout;
-               if (lo && nfs4_stateid_match(&data->args.stateid,
+               if (pnfs_layout_is_valid(lo) &&
+                   nfs4_stateid_match(&data->args.stateid,
                                             &lo->plh_stateid)) {
                        LIST_HEAD(head);
 
@@ -359,11 +371,23 @@ nfs42_layoutstat_done(struct rpc_task *task, void *calldata)
                } else
                        spin_unlock(&inode->i_lock);
                break;
+       case -NFS4ERR_OLD_STATEID:
+               spin_lock(&inode->i_lock);
+               lo = NFS_I(inode)->layout;
+               if (pnfs_layout_is_valid(lo) &&
+                   nfs4_stateid_match_other(&data->args.stateid,
+                                       &lo->plh_stateid)) {
+                       /* Do we need to delay before resending? */
+                       if (!nfs4_stateid_is_newer(&lo->plh_stateid,
+                                               &data->args.stateid))
+                               rpc_delay(task, HZ);
+                       rpc_restart_call_prepare(task);
+               }
+               spin_unlock(&inode->i_lock);
+               break;
        case -ENOTSUPP:
        case -EOPNOTSUPP:
                NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTSTATS;
-       default:
-               break;
        }
 
        dprintk("%s server returns %d\n", __func__, task->tk_status);
index 8d7d08d4f95f17e09d53bda53da364ef02ea05b8..cd3b7cfdde16ae0e16cc17fbc9389ffc60eb511a 100644 (file)
@@ -817,6 +817,11 @@ static int nfs4_set_client(struct nfs_server *server,
                goto error;
        }
 
+       if (server->nfs_client == clp) {
+               error = -ELOOP;
+               goto error;
+       }
+
        /*
         * Query for the lease time on clientid setup or renewal
         *
index 1949bbd806ebd4381ec54ea06ea97fa2f5619bed..a9dec32ba9ba787f95693a8e7f266292376b0132 100644 (file)
@@ -634,15 +634,11 @@ out_sleep:
 }
 EXPORT_SYMBOL_GPL(nfs40_setup_sequence);
 
-static int nfs40_sequence_done(struct rpc_task *task,
-                              struct nfs4_sequence_res *res)
+static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res)
 {
        struct nfs4_slot *slot = res->sr_slot;
        struct nfs4_slot_table *tbl;
 
-       if (slot == NULL)
-               goto out;
-
        tbl = slot->table;
        spin_lock(&tbl->slot_tbl_lock);
        if (!nfs41_wake_and_assign_slot(tbl, slot))
@@ -650,7 +646,13 @@ static int nfs40_sequence_done(struct rpc_task *task,
        spin_unlock(&tbl->slot_tbl_lock);
 
        res->sr_slot = NULL;
-out:
+}
+
+static int nfs40_sequence_done(struct rpc_task *task,
+                              struct nfs4_sequence_res *res)
+{
+       if (res->sr_slot != NULL)
+               nfs40_sequence_free_slot(res);
        return 1;
 }
 
@@ -666,6 +668,11 @@ static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
        tbl = slot->table;
        session = tbl->session;
 
+       /* Bump the slot sequence number */
+       if (slot->seq_done)
+               slot->seq_nr++;
+       slot->seq_done = 0;
+
        spin_lock(&tbl->slot_tbl_lock);
        /* Be nice to the server: try to ensure that the last transmitted
         * value for highest_user_slotid <= target_highest_slotid
@@ -686,9 +693,12 @@ out_unlock:
        res->sr_slot = NULL;
        if (send_new_highest_used_slotid)
                nfs41_notify_server(session->clp);
+       if (waitqueue_active(&tbl->slot_waitq))
+               wake_up_all(&tbl->slot_waitq);
 }
 
-int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
+static int nfs41_sequence_process(struct rpc_task *task,
+               struct nfs4_sequence_res *res)
 {
        struct nfs4_session *session;
        struct nfs4_slot *slot = res->sr_slot;
@@ -714,7 +724,7 @@ int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
        switch (res->sr_status) {
        case 0:
                /* Update the slot's sequence and clientid lease timer */
-               ++slot->seq_nr;
+               slot->seq_done = 1;
                clp = session->clp;
                do_renew_lease(clp, res->sr_timestamp);
                /* Check sequence flags */
@@ -769,16 +779,16 @@ int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
                goto retry_nowait;
        default:
                /* Just update the slot sequence no. */
-               ++slot->seq_nr;
+               slot->seq_done = 1;
        }
 out:
        /* The session may be reset by one of the error handlers. */
        dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
-       nfs41_sequence_free_slot(res);
 out_noaction:
        return ret;
 retry_nowait:
        if (rpc_restart_call_prepare(task)) {
+               nfs41_sequence_free_slot(res);
                task->tk_status = 0;
                ret = 0;
        }
@@ -789,8 +799,37 @@ out_retry:
        rpc_delay(task, NFS4_POLL_RETRY_MAX);
        return 0;
 }
+
+int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
+{
+       if (!nfs41_sequence_process(task, res))
+               return 0;
+       if (res->sr_slot != NULL)
+               nfs41_sequence_free_slot(res);
+       return 1;
+
+}
 EXPORT_SYMBOL_GPL(nfs41_sequence_done);
 
+static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
+{
+       if (res->sr_slot == NULL)
+               return 1;
+       if (res->sr_slot->table->session != NULL)
+               return nfs41_sequence_process(task, res);
+       return nfs40_sequence_done(task, res);
+}
+
+static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
+{
+       if (res->sr_slot != NULL) {
+               if (res->sr_slot->table->session != NULL)
+                       nfs41_sequence_free_slot(res);
+               else
+                       nfs40_sequence_free_slot(res);
+       }
+}
+
 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
 {
        if (res->sr_slot == NULL)
@@ -920,6 +959,17 @@ static int nfs4_setup_sequence(const struct nfs_server *server,
                                    args, res, task);
 }
 
+static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
+{
+       return nfs40_sequence_done(task, res);
+}
+
+static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
+{
+       if (res->sr_slot != NULL)
+               nfs40_sequence_free_slot(res);
+}
+
 int nfs4_sequence_done(struct rpc_task *task,
                       struct nfs4_sequence_res *res)
 {
@@ -1197,6 +1247,7 @@ static void nfs4_opendata_free(struct kref *kref)
        struct super_block *sb = p->dentry->d_sb;
 
        nfs_free_seqid(p->o_arg.seqid);
+       nfs4_sequence_free_slot(&p->o_res.seq_res);
        if (p->state != NULL)
                nfs4_put_open_state(p->state);
        nfs4_put_state_owner(p->owner);
@@ -1656,9 +1707,14 @@ err:
 static struct nfs4_state *
 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
 {
+       struct nfs4_state *ret;
+
        if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
-               return _nfs4_opendata_reclaim_to_nfs4_state(data);
-       return _nfs4_opendata_to_nfs4_state(data);
+               ret =_nfs4_opendata_reclaim_to_nfs4_state(data);
+       else
+               ret = _nfs4_opendata_to_nfs4_state(data);
+       nfs4_sequence_free_slot(&data->o_res.seq_res);
+       return ret;
 }
 
 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
@@ -2056,7 +2112,7 @@ static void nfs4_open_done(struct rpc_task *task, void *calldata)
 
        data->rpc_status = task->tk_status;
 
-       if (!nfs4_sequence_done(task, &data->o_res.seq_res))
+       if (!nfs4_sequence_process(task, &data->o_res.seq_res))
                return;
 
        if (task->tk_status == 0) {
@@ -7514,12 +7570,20 @@ static int _nfs4_proc_create_session(struct nfs_client *clp,
        status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
        trace_nfs4_create_session(clp, status);
 
+       switch (status) {
+       case -NFS4ERR_STALE_CLIENTID:
+       case -NFS4ERR_DELAY:
+       case -ETIMEDOUT:
+       case -EACCES:
+       case -EAGAIN:
+               goto out;
+       };
+
+       clp->cl_seqid++;
        if (!status) {
                /* Verify the session's negotiated channel_attrs values */
                status = nfs4_verify_channel_attrs(&args, &res);
                /* Increment the clientid slot sequence id */
-               if (clp->cl_seqid == res.seqid)
-                       clp->cl_seqid++;
                if (status)
                        goto out;
                nfs4_update_session(session, &res);
@@ -7864,7 +7928,7 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
        struct nfs4_layoutget *lgp = calldata;
 
        dprintk("--> %s\n", __func__);
-       nfs41_sequence_done(task, &lgp->res.seq_res);
+       nfs41_sequence_process(task, &lgp->res.seq_res);
        dprintk("<-- %s\n", __func__);
 }
 
@@ -8080,6 +8144,7 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout, gfp_t gfp_flags)
        /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
        if (status == 0 && lgp->res.layoutp->len)
                lseg = pnfs_layout_process(lgp);
+       nfs4_sequence_free_slot(&lgp->res.seq_res);
        rpc_put_task(task);
        dprintk("<-- %s status=%d\n", __func__, status);
        if (status)
@@ -8106,7 +8171,7 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
 
        dprintk("--> %s\n", __func__);
 
-       if (!nfs41_sequence_done(task, &lrp->res.seq_res))
+       if (!nfs41_sequence_process(task, &lrp->res.seq_res))
                return;
 
        server = NFS_SERVER(lrp->args.inode);
@@ -8118,6 +8183,7 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
        case -NFS4ERR_DELAY:
                if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
                        break;
+               nfs4_sequence_free_slot(&lrp->res.seq_res);
                rpc_restart_call_prepare(task);
                return;
        }
@@ -8132,12 +8198,16 @@ static void nfs4_layoutreturn_release(void *calldata)
 
        dprintk("--> %s\n", __func__);
        spin_lock(&lo->plh_inode->i_lock);
-       pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range,
-                       be32_to_cpu(lrp->args.stateid.seqid));
-       if (lrp->res.lrs_present && pnfs_layout_is_valid(lo))
+       if (lrp->res.lrs_present) {
+               pnfs_mark_matching_lsegs_invalid(lo, &freeme,
+                               &lrp->args.range,
+                               be32_to_cpu(lrp->args.stateid.seqid));
                pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
+       } else
+               pnfs_mark_layout_stateid_invalid(lo, &freeme);
        pnfs_clear_layoutreturn_waitbit(lo);
        spin_unlock(&lo->plh_inode->i_lock);
+       nfs4_sequence_free_slot(&lrp->res.seq_res);
        pnfs_free_lseg_list(&freeme);
        pnfs_put_layout_hdr(lrp->args.layout);
        nfs_iput_and_deactive(lrp->inode);
index 332d06e64fa910fcfa54ca3fc304e9869f05bc6f..b62973045a3e048016f1af48f761fdb152f3908d 100644 (file)
@@ -28,6 +28,7 @@ static void nfs4_init_slot_table(struct nfs4_slot_table *tbl, const char *queue)
        tbl->highest_used_slotid = NFS4_NO_SLOT;
        spin_lock_init(&tbl->slot_tbl_lock);
        rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, queue);
+       init_waitqueue_head(&tbl->slot_waitq);
        init_completion(&tbl->complete);
 }
 
@@ -172,6 +173,58 @@ struct nfs4_slot *nfs4_lookup_slot(struct nfs4_slot_table *tbl, u32 slotid)
        return ERR_PTR(-E2BIG);
 }
 
+static int nfs4_slot_get_seqid(struct nfs4_slot_table  *tbl, u32 slotid,
+               u32 *seq_nr)
+       __must_hold(&tbl->slot_tbl_lock)
+{
+       struct nfs4_slot *slot;
+
+       slot = nfs4_lookup_slot(tbl, slotid);
+       if (IS_ERR(slot))
+               return PTR_ERR(slot);
+       *seq_nr = slot->seq_nr;
+       return 0;
+}
+
+/*
+ * nfs4_slot_seqid_in_use - test if a slot sequence id is still in use
+ *
+ * Given a slot table, slot id and sequence number, determine if the
+ * RPC call in question is still in flight. This function is mainly
+ * intended for use by the callback channel.
+ */
+static bool nfs4_slot_seqid_in_use(struct nfs4_slot_table *tbl,
+               u32 slotid, u32 seq_nr)
+{
+       u32 cur_seq;
+       bool ret = false;
+
+       spin_lock(&tbl->slot_tbl_lock);
+       if (nfs4_slot_get_seqid(tbl, slotid, &cur_seq) == 0 &&
+           cur_seq == seq_nr && test_bit(slotid, tbl->used_slots))
+               ret = true;
+       spin_unlock(&tbl->slot_tbl_lock);
+       return ret;
+}
+
+/*
+ * nfs4_slot_wait_on_seqid - wait until a slot sequence id is complete
+ *
+ * Given a slot table, slot id and sequence number, wait until the
+ * corresponding RPC call completes. This function is mainly
+ * intended for use by the callback channel.
+ */
+int nfs4_slot_wait_on_seqid(struct nfs4_slot_table *tbl,
+               u32 slotid, u32 seq_nr,
+               unsigned long timeout)
+{
+       if (wait_event_timeout(tbl->slot_waitq,
+                       !nfs4_slot_seqid_in_use(tbl, slotid, seq_nr),
+                       timeout) == 0)
+               return -ETIMEDOUT;
+       return 0;
+}
+
 /*
  * nfs4_alloc_slot - efficiently look for a free slot
  *
index 5b51298d1d03765684dd9ea79599169c01c71aaa..f703b755351bd8ad2217fb18009e93232f2ccc61 100644 (file)
@@ -21,7 +21,8 @@ struct nfs4_slot {
        unsigned long           generation;
        u32                     slot_nr;
        u32                     seq_nr;
-       unsigned int            interrupted : 1;
+       unsigned int            interrupted : 1,
+                               seq_done : 1;
 };
 
 /* Sessions */
@@ -36,6 +37,7 @@ struct nfs4_slot_table {
        unsigned long   used_slots[SLOT_TABLE_SZ]; /* used/unused bitmap */
        spinlock_t      slot_tbl_lock;
        struct rpc_wait_queue   slot_tbl_waitq; /* allocators may wait here */
+       wait_queue_head_t       slot_waitq;     /* Completion wait on slot */
        u32             max_slots;              /* # slots in table */
        u32             max_slotid;             /* Max allowed slotid value */
        u32             highest_used_slotid;    /* sent to server on each SEQ.
@@ -78,6 +80,9 @@ extern int nfs4_setup_slot_table(struct nfs4_slot_table *tbl,
 extern void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl);
 extern struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl);
 extern struct nfs4_slot *nfs4_lookup_slot(struct nfs4_slot_table *tbl, u32 slotid);
+extern int nfs4_slot_wait_on_seqid(struct nfs4_slot_table *tbl,
+               u32 slotid, u32 seq_nr,
+               unsigned long timeout);
 extern bool nfs4_try_to_lock_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot);
 extern void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot);
 extern void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl);
index 70806cae0d36bf71ca704d5d1ab7bc128d1455e9..2c93a85eda51c9d6ebcde892d1e7ca60a1f159b3 100644 (file)
@@ -365,7 +365,8 @@ pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
        /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
        atomic_dec(&lo->plh_refcount);
        if (list_empty(&lo->plh_segs)) {
-               set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
+               if (atomic_read(&lo->plh_outstanding) == 0)
+                       set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
                clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
        }
        rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
@@ -768,17 +769,32 @@ pnfs_destroy_all_layouts(struct nfs_client *clp)
        pnfs_destroy_layouts_byclid(clp, false);
 }
 
+static void
+pnfs_clear_layoutreturn_info(struct pnfs_layout_hdr *lo)
+{
+       lo->plh_return_iomode = 0;
+       lo->plh_return_seq = 0;
+       clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
+}
+
 /* update lo->plh_stateid with new if is more recent */
 void
 pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
                        bool update_barrier)
 {
        u32 oldseq, newseq, new_barrier = 0;
-       bool invalid = !pnfs_layout_is_valid(lo);
 
        oldseq = be32_to_cpu(lo->plh_stateid.seqid);
        newseq = be32_to_cpu(new->seqid);
-       if (invalid || pnfs_seqid_is_newer(newseq, oldseq)) {
+
+       if (!pnfs_layout_is_valid(lo)) {
+               nfs4_stateid_copy(&lo->plh_stateid, new);
+               lo->plh_barrier = newseq;
+               pnfs_clear_layoutreturn_info(lo);
+               clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
+               return;
+       }
+       if (pnfs_seqid_is_newer(newseq, oldseq)) {
                nfs4_stateid_copy(&lo->plh_stateid, new);
                /*
                 * Because of wraparound, we want to keep the barrier
@@ -790,7 +806,7 @@ pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
                new_barrier = be32_to_cpu(new->seqid);
        else if (new_barrier == 0)
                return;
-       if (invalid || pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
+       if (pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
                lo->plh_barrier = new_barrier;
 }
 
@@ -886,19 +902,14 @@ void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
        rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
 }
 
-static void
-pnfs_clear_layoutreturn_info(struct pnfs_layout_hdr *lo)
-{
-       lo->plh_return_iomode = 0;
-       lo->plh_return_seq = 0;
-       clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
-}
-
 static bool
 pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo,
                nfs4_stateid *stateid,
                enum pnfs_iomode *iomode)
 {
+       /* Serialise LAYOUTGET/LAYOUTRETURN */
+       if (atomic_read(&lo->plh_outstanding) != 0)
+               return false;
        if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
                return false;
        pnfs_get_layout_hdr(lo);
@@ -1555,6 +1566,7 @@ pnfs_update_layout(struct inode *ino,
        }
 
 lookup_again:
+       nfs4_client_recover_expired_lease(clp);
        first = false;
        spin_lock(&ino->i_lock);
        lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
@@ -1797,16 +1809,11 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
                 */
                pnfs_mark_layout_stateid_invalid(lo, &free_me);
 
-               nfs4_stateid_copy(&lo->plh_stateid, &res->stateid);
-               lo->plh_barrier = be32_to_cpu(res->stateid.seqid);
+               pnfs_set_layout_stateid(lo, &res->stateid, true);
        }
 
        pnfs_get_lseg(lseg);
        pnfs_layout_insert_lseg(lo, lseg, &free_me);
-       if (!pnfs_layout_is_valid(lo)) {
-               pnfs_clear_layoutreturn_info(lo);
-               clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
-       }
 
 
        if (res->return_on_close)
@@ -2510,7 +2517,6 @@ pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags)
 
        data->args.fh = NFS_FH(inode);
        data->args.inode = inode;
-       nfs4_stateid_copy(&data->args.stateid, &hdr->plh_stateid);
        status = ld->prepare_layoutstats(&data->args);
        if (status)
                goto out_free;
index 18d446e1a82bbb5b558fca8deb3869849f91e58c..d39601381adf56fe21cd531d09b77f4960cf5060 100644 (file)
@@ -923,6 +923,8 @@ static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(void)
 
        data = kzalloc(sizeof(*data), GFP_KERNEL);
        if (data) {
+               data->timeo             = NFS_UNSPEC_TIMEO;
+               data->retrans           = NFS_UNSPEC_RETRANS;
                data->acregmin          = NFS_DEF_ACREGMIN;
                data->acregmax          = NFS_DEF_ACREGMAX;
                data->acdirmin          = NFS_DEF_ACDIRMIN;
@@ -1189,6 +1191,19 @@ static int nfs_get_option_ul(substring_t args[], unsigned long *option)
        return rc;
 }
 
+static int nfs_get_option_ul_bound(substring_t args[], unsigned long *option,
+               unsigned long l_bound, unsigned long u_bound)
+{
+       int ret;
+
+       ret = nfs_get_option_ul(args, option);
+       if (ret != 0)
+               return ret;
+       if (*option < l_bound || *option > u_bound)
+               return -ERANGE;
+       return 0;
+}
+
 /*
  * Error-check and convert a string of mount options from user space into
  * a data structure.  The whole mount string is processed; bad options are
@@ -1352,12 +1367,12 @@ static int nfs_parse_mount_options(char *raw,
                        mnt->bsize = option;
                        break;
                case Opt_timeo:
-                       if (nfs_get_option_ul(args, &option) || option == 0)
+                       if (nfs_get_option_ul_bound(args, &option, 1, INT_MAX))
                                goto out_invalid_value;
                        mnt->timeo = option;
                        break;
                case Opt_retrans:
-                       if (nfs_get_option_ul(args, &option) || option == 0)
+                       if (nfs_get_option_ul_bound(args, &option, 0, INT_MAX))
                                goto out_invalid_value;
                        mnt->retrans = option;
                        break;
index 54e5d6681786780812c9a5adddc27dd782b40bbb..43fdc2765aea65b85a20b0f28b844a3ba1b40d4e 100644 (file)
@@ -80,6 +80,8 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
        }
 
        for (name = buf; name < (buf + list_size); name += strlen(name) + 1) {
+               if (ovl_is_private_xattr(name))
+                       continue;
 retry:
                size = vfs_getxattr(old, name, value, value_size);
                if (size == -ERANGE)
index 12bcd07b9e32c516f89605a7247da493d3ccedad..1560fdc09a5fabf12ed678a3e87e78bb3e43cce2 100644 (file)
@@ -12,6 +12,8 @@
 #include <linux/xattr.h>
 #include <linux/security.h>
 #include <linux/cred.h>
+#include <linux/posix_acl.h>
+#include <linux/posix_acl_xattr.h>
 #include "overlayfs.h"
 
 void ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
@@ -186,6 +188,9 @@ static int ovl_create_upper(struct dentry *dentry, struct inode *inode,
        struct dentry *newdentry;
        int err;
 
+       if (!hardlink && !IS_POSIXACL(udir))
+               stat->mode &= ~current_umask();
+
        inode_lock_nested(udir, I_MUTEX_PARENT);
        newdentry = lookup_one_len(dentry->d_name.name, upperdir,
                                   dentry->d_name.len);
@@ -335,6 +340,32 @@ out_free:
        return ret;
 }
 
+static int ovl_set_upper_acl(struct dentry *upperdentry, const char *name,
+                            const struct posix_acl *acl)
+{
+       void *buffer;
+       size_t size;
+       int err;
+
+       if (!IS_ENABLED(CONFIG_FS_POSIX_ACL) || !acl)
+               return 0;
+
+       size = posix_acl_to_xattr(NULL, acl, NULL, 0);
+       buffer = kmalloc(size, GFP_KERNEL);
+       if (!buffer)
+               return -ENOMEM;
+
+       size = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
+       err = size;
+       if (err < 0)
+               goto out_free;
+
+       err = vfs_setxattr(upperdentry, name, buffer, size, XATTR_CREATE);
+out_free:
+       kfree(buffer);
+       return err;
+}
+
 static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
                                    struct kstat *stat, const char *link,
                                    struct dentry *hardlink)
@@ -346,10 +377,18 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
        struct dentry *upper;
        struct dentry *newdentry;
        int err;
+       struct posix_acl *acl, *default_acl;
 
        if (WARN_ON(!workdir))
                return -EROFS;
 
+       if (!hardlink) {
+               err = posix_acl_create(dentry->d_parent->d_inode,
+                                      &stat->mode, &default_acl, &acl);
+               if (err)
+                       return err;
+       }
+
        err = ovl_lock_rename_workdir(workdir, upperdir);
        if (err)
                goto out;
@@ -384,6 +423,17 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
                if (err)
                        goto out_cleanup;
        }
+       if (!hardlink) {
+               err = ovl_set_upper_acl(newdentry, XATTR_NAME_POSIX_ACL_ACCESS,
+                                       acl);
+               if (err)
+                       goto out_cleanup;
+
+               err = ovl_set_upper_acl(newdentry, XATTR_NAME_POSIX_ACL_DEFAULT,
+                                       default_acl);
+               if (err)
+                       goto out_cleanup;
+       }
 
        if (!hardlink && S_ISDIR(stat->mode)) {
                err = ovl_set_opaque(newdentry);
@@ -410,6 +460,10 @@ out_dput:
 out_unlock:
        unlock_rename(workdir, upperdir);
 out:
+       if (!hardlink) {
+               posix_acl_release(acl);
+               posix_acl_release(default_acl);
+       }
        return err;
 
 out_cleanup:
@@ -950,9 +1004,9 @@ const struct inode_operations ovl_dir_inode_operations = {
        .permission     = ovl_permission,
        .getattr        = ovl_dir_getattr,
        .setxattr       = generic_setxattr,
-       .getxattr       = ovl_getxattr,
+       .getxattr       = generic_getxattr,
        .listxattr      = ovl_listxattr,
-       .removexattr    = ovl_removexattr,
+       .removexattr    = generic_removexattr,
        .get_acl        = ovl_get_acl,
        .update_time    = ovl_update_time,
 };
index 1b885c156028d3a4209cf51b6898006c28ac0ee6..c75625c1efa36fafdd7d077b86f874cd6a16b7e7 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/fs.h>
 #include <linux/slab.h>
 #include <linux/xattr.h>
+#include <linux/posix_acl.h>
 #include "overlayfs.h"
 
 static int ovl_copy_up_truncate(struct dentry *dentry)
@@ -191,32 +192,44 @@ static int ovl_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
        return err;
 }
 
-static bool ovl_is_private_xattr(const char *name)
+bool ovl_is_private_xattr(const char *name)
 {
-#define OVL_XATTR_PRE_NAME OVL_XATTR_PREFIX "."
-       return strncmp(name, OVL_XATTR_PRE_NAME,
-                      sizeof(OVL_XATTR_PRE_NAME) - 1) == 0;
+       return strncmp(name, OVL_XATTR_PREFIX,
+                      sizeof(OVL_XATTR_PREFIX) - 1) == 0;
 }
 
-int ovl_setxattr(struct dentry *dentry, struct inode *inode,
-                const char *name, const void *value,
-                size_t size, int flags)
+int ovl_xattr_set(struct dentry *dentry, const char *name, const void *value,
+                 size_t size, int flags)
 {
        int err;
-       struct dentry *upperdentry;
+       struct path realpath;
+       enum ovl_path_type type = ovl_path_real(dentry, &realpath);
        const struct cred *old_cred;
 
        err = ovl_want_write(dentry);
        if (err)
                goto out;
 
+       if (!value && !OVL_TYPE_UPPER(type)) {
+               err = vfs_getxattr(realpath.dentry, name, NULL, 0);
+               if (err < 0)
+                       goto out_drop_write;
+       }
+
        err = ovl_copy_up(dentry);
        if (err)
                goto out_drop_write;
 
-       upperdentry = ovl_dentry_upper(dentry);
+       if (!OVL_TYPE_UPPER(type))
+               ovl_path_upper(dentry, &realpath);
+
        old_cred = ovl_override_creds(dentry->d_sb);
-       err = vfs_setxattr(upperdentry, name, value, size, flags);
+       if (value)
+               err = vfs_setxattr(realpath.dentry, name, value, size, flags);
+       else {
+               WARN_ON(flags != XATTR_REPLACE);
+               err = vfs_removexattr(realpath.dentry, name);
+       }
        revert_creds(old_cred);
 
 out_drop_write:
@@ -225,16 +238,13 @@ out:
        return err;
 }
 
-ssize_t ovl_getxattr(struct dentry *dentry, struct inode *inode,
-                    const char *name, void *value, size_t size)
+int ovl_xattr_get(struct dentry *dentry, const char *name,
+                 void *value, size_t size)
 {
        struct dentry *realdentry = ovl_dentry_real(dentry);
        ssize_t res;
        const struct cred *old_cred;
 
-       if (ovl_is_private_xattr(name))
-               return -ENODATA;
-
        old_cred = ovl_override_creds(dentry->d_sb);
        res = vfs_getxattr(realdentry, name, value, size);
        revert_creds(old_cred);
@@ -245,7 +255,8 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
 {
        struct dentry *realdentry = ovl_dentry_real(dentry);
        ssize_t res;
-       int off;
+       size_t len;
+       char *s;
        const struct cred *old_cred;
 
        old_cred = ovl_override_creds(dentry->d_sb);
@@ -255,73 +266,39 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
                return res;
 
        /* filter out private xattrs */
-       for (off = 0; off < res;) {
-               char *s = list + off;
-               size_t slen = strlen(s) + 1;
+       for (s = list, len = res; len;) {
+               size_t slen = strnlen(s, len) + 1;
 
-               BUG_ON(off + slen > res);
+               /* underlying fs providing us with an broken xattr list? */
+               if (WARN_ON(slen > len))
+                       return -EIO;
 
+               len -= slen;
                if (ovl_is_private_xattr(s)) {
                        res -= slen;
-                       memmove(s, s + slen, res - off);
+                       memmove(s, s + slen, len);
                } else {
-                       off += slen;
+                       s += slen;
                }
        }
 
        return res;
 }
 
-int ovl_removexattr(struct dentry *dentry, const char *name)
-{
-       int err;
-       struct path realpath;
-       enum ovl_path_type type = ovl_path_real(dentry, &realpath);
-       const struct cred *old_cred;
-
-       err = ovl_want_write(dentry);
-       if (err)
-               goto out;
-
-       err = -ENODATA;
-       if (ovl_is_private_xattr(name))
-               goto out_drop_write;
-
-       if (!OVL_TYPE_UPPER(type)) {
-               err = vfs_getxattr(realpath.dentry, name, NULL, 0);
-               if (err < 0)
-                       goto out_drop_write;
-
-               err = ovl_copy_up(dentry);
-               if (err)
-                       goto out_drop_write;
-
-               ovl_path_upper(dentry, &realpath);
-       }
-
-       old_cred = ovl_override_creds(dentry->d_sb);
-       err = vfs_removexattr(realpath.dentry, name);
-       revert_creds(old_cred);
-out_drop_write:
-       ovl_drop_write(dentry);
-out:
-       return err;
-}
-
 struct posix_acl *ovl_get_acl(struct inode *inode, int type)
 {
        struct inode *realinode = ovl_inode_real(inode, NULL);
        const struct cred *old_cred;
        struct posix_acl *acl;
 
-       if (!IS_POSIXACL(realinode))
+       if (!IS_ENABLED(CONFIG_FS_POSIX_ACL) || !IS_POSIXACL(realinode))
                return NULL;
 
        if (!realinode->i_op->get_acl)
                return NULL;
 
        old_cred = ovl_override_creds(inode->i_sb);
-       acl = realinode->i_op->get_acl(realinode, type);
+       acl = get_acl(realinode, type);
        revert_creds(old_cred);
 
        return acl;
@@ -391,9 +368,9 @@ static const struct inode_operations ovl_file_inode_operations = {
        .permission     = ovl_permission,
        .getattr        = ovl_getattr,
        .setxattr       = generic_setxattr,
-       .getxattr       = ovl_getxattr,
+       .getxattr       = generic_getxattr,
        .listxattr      = ovl_listxattr,
-       .removexattr    = ovl_removexattr,
+       .removexattr    = generic_removexattr,
        .get_acl        = ovl_get_acl,
        .update_time    = ovl_update_time,
 };
@@ -404,9 +381,9 @@ static const struct inode_operations ovl_symlink_inode_operations = {
        .readlink       = ovl_readlink,
        .getattr        = ovl_getattr,
        .setxattr       = generic_setxattr,
-       .getxattr       = ovl_getxattr,
+       .getxattr       = generic_getxattr,
        .listxattr      = ovl_listxattr,
-       .removexattr    = ovl_removexattr,
+       .removexattr    = generic_removexattr,
        .update_time    = ovl_update_time,
 };
 
@@ -415,6 +392,9 @@ static void ovl_fill_inode(struct inode *inode, umode_t mode)
        inode->i_ino = get_next_ino();
        inode->i_mode = mode;
        inode->i_flags |= S_NOCMTIME;
+#ifdef CONFIG_FS_POSIX_ACL
+       inode->i_acl = inode->i_default_acl = ACL_DONT_CACHE;
+#endif
 
        mode &= S_IFMT;
        switch (mode) {
index e4f5c9536bfeaf1346ee908dc8081ee42f726fc1..5813ccff8cd9f47d4a92c9a1c422fa984e3d5976 100644 (file)
@@ -24,8 +24,8 @@ enum ovl_path_type {
        (OVL_TYPE_MERGE(type) || !OVL_TYPE_UPPER(type))
 
 
-#define OVL_XATTR_PREFIX XATTR_TRUSTED_PREFIX "overlay"
-#define OVL_XATTR_OPAQUE OVL_XATTR_PREFIX ".opaque"
+#define OVL_XATTR_PREFIX XATTR_TRUSTED_PREFIX "overlay."
+#define OVL_XATTR_OPAQUE OVL_XATTR_PREFIX "opaque"
 
 #define OVL_ISUPPER_MASK 1UL
 
@@ -179,20 +179,21 @@ int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list);
 void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list);
 void ovl_cache_free(struct list_head *list);
 int ovl_check_d_type_supported(struct path *realpath);
+void ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
+                        struct dentry *dentry, int level);
 
 /* inode.c */
 int ovl_setattr(struct dentry *dentry, struct iattr *attr);
 int ovl_permission(struct inode *inode, int mask);
-int ovl_setxattr(struct dentry *dentry, struct inode *inode,
-                const char *name, const void *value,
-                size_t size, int flags);
-ssize_t ovl_getxattr(struct dentry *dentry, struct inode *inode,
-                    const char *name, void *value, size_t size);
+int ovl_xattr_set(struct dentry *dentry, const char *name, const void *value,
+                 size_t size, int flags);
+int ovl_xattr_get(struct dentry *dentry, const char *name,
+                 void *value, size_t size);
 ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size);
-int ovl_removexattr(struct dentry *dentry, const char *name);
 struct posix_acl *ovl_get_acl(struct inode *inode, int type);
 int ovl_open_maybe_copy_up(struct dentry *dentry, unsigned int file_flags);
 int ovl_update_time(struct inode *inode, struct timespec *ts, int flags);
+bool ovl_is_private_xattr(const char *name);
 
 struct inode *ovl_new_inode(struct super_block *sb, umode_t mode);
 struct inode *ovl_get_inode(struct super_block *sb, struct inode *realinode);
index cf37fc76fc9fc02f00caf5a64bd632f20942a528..f241b4ee3d8a50b1c03ae4b1e1848b772fd26af1 100644 (file)
@@ -248,7 +248,7 @@ static inline int ovl_dir_read(struct path *realpath,
                        err = rdd->err;
        } while (!err && rdd->count);
 
-       if (!err && rdd->first_maybe_whiteout)
+       if (!err && rdd->first_maybe_whiteout && rdd->dentry)
                err = ovl_check_whiteouts(realpath->dentry, rdd);
 
        fput(realfile);
@@ -606,3 +606,64 @@ int ovl_check_d_type_supported(struct path *realpath)
 
        return rdd.d_type_supported;
 }
+
+static void ovl_workdir_cleanup_recurse(struct path *path, int level)
+{
+       int err;
+       struct inode *dir = path->dentry->d_inode;
+       LIST_HEAD(list);
+       struct ovl_cache_entry *p;
+       struct ovl_readdir_data rdd = {
+               .ctx.actor = ovl_fill_merge,
+               .dentry = NULL,
+               .list = &list,
+               .root = RB_ROOT,
+               .is_lowest = false,
+       };
+
+       err = ovl_dir_read(path, &rdd);
+       if (err)
+               goto out;
+
+       inode_lock_nested(dir, I_MUTEX_PARENT);
+       list_for_each_entry(p, &list, l_node) {
+               struct dentry *dentry;
+
+               if (p->name[0] == '.') {
+                       if (p->len == 1)
+                               continue;
+                       if (p->len == 2 && p->name[1] == '.')
+                               continue;
+               }
+               dentry = lookup_one_len(p->name, path->dentry, p->len);
+               if (IS_ERR(dentry))
+                       continue;
+               if (dentry->d_inode)
+                       ovl_workdir_cleanup(dir, path->mnt, dentry, level);
+               dput(dentry);
+       }
+       inode_unlock(dir);
+out:
+       ovl_cache_free(&list);
+}
+
+void ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
+                        struct dentry *dentry, int level)
+{
+       int err;
+
+       if (!d_is_dir(dentry) || level > 1) {
+               ovl_cleanup(dir, dentry);
+               return;
+       }
+
+       err = ovl_do_rmdir(dir, dentry);
+       if (err) {
+               struct path path = { .mnt = mnt, .dentry = dentry };
+
+               inode_unlock(dir);
+               ovl_workdir_cleanup_recurse(&path, level + 1);
+               inode_lock_nested(dir, I_MUTEX_PARENT);
+               ovl_cleanup(dir, dentry);
+       }
+}
index 4036132842b534934020dd0640b721d81320cee3..e2a94a26767ba5df543540c8da39e85977aa371c 100644 (file)
@@ -814,6 +814,10 @@ retry:
                struct kstat stat = {
                        .mode = S_IFDIR | 0,
                };
+               struct iattr attr = {
+                       .ia_valid = ATTR_MODE,
+                       .ia_mode = stat.mode,
+               };
 
                if (work->d_inode) {
                        err = -EEXIST;
@@ -821,7 +825,7 @@ retry:
                                goto out_dput;
 
                        retried = true;
-                       ovl_cleanup(dir, work);
+                       ovl_workdir_cleanup(dir, mnt, work, 0);
                        dput(work);
                        goto retry;
                }
@@ -829,6 +833,21 @@ retry:
                err = ovl_create_real(dir, work, &stat, NULL, NULL, true);
                if (err)
                        goto out_dput;
+
+               err = vfs_removexattr(work, XATTR_NAME_POSIX_ACL_DEFAULT);
+               if (err && err != -ENODATA && err != -EOPNOTSUPP)
+                       goto out_dput;
+
+               err = vfs_removexattr(work, XATTR_NAME_POSIX_ACL_ACCESS);
+               if (err && err != -ENODATA && err != -EOPNOTSUPP)
+                       goto out_dput;
+
+               /* Clear any inherited mode bits */
+               inode_lock(work->d_inode);
+               err = notify_change(work, &attr, NULL);
+               inode_unlock(work->d_inode);
+               if (err)
+                       goto out_dput;
        }
 out_unlock:
        inode_unlock(dir);
@@ -967,10 +986,19 @@ static unsigned int ovl_split_lowerdirs(char *str)
        return ctr;
 }
 
-static int ovl_posix_acl_xattr_set(const struct xattr_handler *handler,
-                                  struct dentry *dentry, struct inode *inode,
-                                  const char *name, const void *value,
-                                  size_t size, int flags)
+static int __maybe_unused
+ovl_posix_acl_xattr_get(const struct xattr_handler *handler,
+                       struct dentry *dentry, struct inode *inode,
+                       const char *name, void *buffer, size_t size)
+{
+       return ovl_xattr_get(dentry, handler->name, buffer, size);
+}
+
+static int __maybe_unused
+ovl_posix_acl_xattr_set(const struct xattr_handler *handler,
+                       struct dentry *dentry, struct inode *inode,
+                       const char *name, const void *value,
+                       size_t size, int flags)
 {
        struct dentry *workdir = ovl_workdir(dentry);
        struct inode *realinode = ovl_inode_real(inode, NULL);
@@ -998,19 +1026,22 @@ static int ovl_posix_acl_xattr_set(const struct xattr_handler *handler,
 
        posix_acl_release(acl);
 
-       return ovl_setxattr(dentry, inode, handler->name, value, size, flags);
+       err = ovl_xattr_set(dentry, handler->name, value, size, flags);
+       if (!err)
+               ovl_copyattr(ovl_inode_real(inode, NULL), inode);
+
+       return err;
 
 out_acl_release:
        posix_acl_release(acl);
        return err;
 }
 
-static int ovl_other_xattr_set(const struct xattr_handler *handler,
-                              struct dentry *dentry, struct inode *inode,
-                              const char *name, const void *value,
-                              size_t size, int flags)
+static int ovl_own_xattr_get(const struct xattr_handler *handler,
+                            struct dentry *dentry, struct inode *inode,
+                            const char *name, void *buffer, size_t size)
 {
-       return ovl_setxattr(dentry, inode, name, value, size, flags);
+       return -EPERM;
 }
 
 static int ovl_own_xattr_set(const struct xattr_handler *handler,
@@ -1021,42 +1052,59 @@ static int ovl_own_xattr_set(const struct xattr_handler *handler,
        return -EPERM;
 }
 
-static const struct xattr_handler ovl_posix_acl_access_xattr_handler = {
+static int ovl_other_xattr_get(const struct xattr_handler *handler,
+                              struct dentry *dentry, struct inode *inode,
+                              const char *name, void *buffer, size_t size)
+{
+       return ovl_xattr_get(dentry, name, buffer, size);
+}
+
+static int ovl_other_xattr_set(const struct xattr_handler *handler,
+                              struct dentry *dentry, struct inode *inode,
+                              const char *name, const void *value,
+                              size_t size, int flags)
+{
+       return ovl_xattr_set(dentry, name, value, size, flags);
+}
+
+static const struct xattr_handler __maybe_unused
+ovl_posix_acl_access_xattr_handler = {
        .name = XATTR_NAME_POSIX_ACL_ACCESS,
        .flags = ACL_TYPE_ACCESS,
+       .get = ovl_posix_acl_xattr_get,
        .set = ovl_posix_acl_xattr_set,
 };
 
-static const struct xattr_handler ovl_posix_acl_default_xattr_handler = {
+static const struct xattr_handler __maybe_unused
+ovl_posix_acl_default_xattr_handler = {
        .name = XATTR_NAME_POSIX_ACL_DEFAULT,
        .flags = ACL_TYPE_DEFAULT,
+       .get = ovl_posix_acl_xattr_get,
        .set = ovl_posix_acl_xattr_set,
 };
 
 static const struct xattr_handler ovl_own_xattr_handler = {
        .prefix = OVL_XATTR_PREFIX,
+       .get = ovl_own_xattr_get,
        .set = ovl_own_xattr_set,
 };
 
 static const struct xattr_handler ovl_other_xattr_handler = {
        .prefix = "", /* catch all */
+       .get = ovl_other_xattr_get,
        .set = ovl_other_xattr_set,
 };
 
 static const struct xattr_handler *ovl_xattr_handlers[] = {
+#ifdef CONFIG_FS_POSIX_ACL
        &ovl_posix_acl_access_xattr_handler,
        &ovl_posix_acl_default_xattr_handler,
+#endif
        &ovl_own_xattr_handler,
        &ovl_other_xattr_handler,
        NULL
 };
 
-static const struct xattr_handler *ovl_xattr_noacl_handlers[] = {
-       &ovl_own_xattr_handler,
-       &ovl_other_xattr_handler,
-       NULL,
-};
-
 static int ovl_fill_super(struct super_block *sb, void *data, int silent)
 {
        struct path upperpath = { NULL, NULL };
@@ -1132,7 +1180,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
        err = -EINVAL;
        stacklen = ovl_split_lowerdirs(lowertmp);
        if (stacklen > OVL_MAX_STACK) {
-               pr_err("overlayfs: too many lower directries, limit is %d\n",
+               pr_err("overlayfs: too many lower directories, limit is %d\n",
                       OVL_MAX_STACK);
                goto out_free_lowertmp;
        } else if (!ufs->config.upperdir && stacklen == 1) {
@@ -1269,10 +1317,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
 
        sb->s_magic = OVERLAYFS_SUPER_MAGIC;
        sb->s_op = &ovl_super_operations;
-       if (IS_ENABLED(CONFIG_FS_POSIX_ACL))
-               sb->s_xattr = ovl_xattr_handlers;
-       else
-               sb->s_xattr = ovl_xattr_noacl_handlers;
+       sb->s_xattr = ovl_xattr_handlers;
        sb->s_root = root_dentry;
        sb->s_fs_info = ufs;
        sb->s_flags |= MS_POSIXACL;
index 54e270262979b6f331a31b132171c0b74b92b920..ac0df4dde823866b54480c7062d5188887009365 100644 (file)
@@ -1556,18 +1556,13 @@ static const struct file_operations proc_pid_set_comm_operations = {
 static int proc_exe_link(struct dentry *dentry, struct path *exe_path)
 {
        struct task_struct *task;
-       struct mm_struct *mm;
        struct file *exe_file;
 
        task = get_proc_task(d_inode(dentry));
        if (!task)
                return -ENOENT;
-       mm = get_task_mm(task);
+       exe_file = get_task_exe_file(task);
        put_task_struct(task);
-       if (!mm)
-               return -ENOENT;
-       exe_file = get_mm_exe_file(mm);
-       mmput(mm);
        if (exe_file) {
                *exe_path = exe_file->f_path;
                path_get(&exe_file->f_path);
index 187d84ef9de9d00cff139ffaa61a524af6b752c2..f6fa99eca5158f36d3fe38a23baac5b7c54ca6bb 100644 (file)
@@ -581,6 +581,8 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
                mss->anonymous_thp += HPAGE_PMD_SIZE;
        else if (PageSwapBacked(page))
                mss->shmem_thp += HPAGE_PMD_SIZE;
+       else if (is_zone_device_page(page))
+               /* pass */;
        else
                VM_BUG_ON_PAGE(1, page);
        smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
index f35523d4fa3a6d657260e7b04d5273db2dcd53b1..b803213d1307e9137c3bfe5e04ee4ac5bd396cce 100644 (file)
@@ -114,9 +114,15 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf,
         * If buf != of->prealloc_buf, we don't know how
         * large it is, so cannot safely pass it to ->show
         */
-       if (pos || WARN_ON_ONCE(buf != of->prealloc_buf))
+       if (WARN_ON_ONCE(buf != of->prealloc_buf))
                return 0;
        len = ops->show(kobj, of->kn->priv, buf);
+       if (pos) {
+               if (len <= pos)
+                       return 0;
+               len -= pos;
+               memmove(buf, buf + pos, len);
+       }
        return min(count, len);
 }
 
index 3dd8f1d5449877ed45aa0fb391c1e9048c5c1893..05b5243d89f6ffcb4d77e71455ef34ec88165ebe 100644 (file)
@@ -2278,6 +2278,8 @@ xfs_alloc_log_agf(
                offsetof(xfs_agf_t, agf_btreeblks),
                offsetof(xfs_agf_t, agf_uuid),
                offsetof(xfs_agf_t, agf_rmap_blocks),
+               /* needed so that we don't log the whole rest of the structure: */
+               offsetof(xfs_agf_t, agf_spare64),
                sizeof(xfs_agf_t)
        };
 
index b5c213a051cde3f703227f10b73e367470a4df43..08569792fe2096b9f1ae8720cc8f5118e07d8c01 100644 (file)
@@ -1814,6 +1814,10 @@ xfs_btree_lookup(
 
        XFS_BTREE_STATS_INC(cur, lookup);
 
+       /* No such thing as a zero-level tree. */
+       if (cur->bc_nlevels == 0)
+               return -EFSCORRUPTED;
+
        block = NULL;
        keyno = 0;
 
@@ -4554,15 +4558,22 @@ xfs_btree_simple_query_range(
        if (error)
                goto out;
 
+       /* Nothing?  See if there's anything to the right. */
+       if (!stat) {
+               error = xfs_btree_increment(cur, 0, &stat);
+               if (error)
+                       goto out;
+       }
+
        while (stat) {
                /* Find the record. */
                error = xfs_btree_get_rec(cur, &recp, &stat);
                if (error || !stat)
                        break;
-               cur->bc_ops->init_high_key_from_rec(&rec_key, recp);
 
                /* Skip if high_key(rec) < low_key. */
                if (firstrec) {
+                       cur->bc_ops->init_high_key_from_rec(&rec_key, recp);
                        firstrec = false;
                        diff = cur->bc_ops->diff_two_keys(cur, low_key,
                                        &rec_key);
@@ -4571,6 +4582,7 @@ xfs_btree_simple_query_range(
                }
 
                /* Stop if high_key < low_key(rec). */
+               cur->bc_ops->init_key_from_rec(&rec_key, recp);
                diff = cur->bc_ops->diff_two_keys(cur, &rec_key, high_key);
                if (diff > 0)
                        break;
index 054a2032fdb392bce4fb6d3a936bc519dfe4596c..c221d0ecd52ed1413a6851dc40b67131c867f028 100644 (file)
@@ -194,7 +194,7 @@ xfs_defer_trans_abort(
        /* Abort intent items. */
        list_for_each_entry(dfp, &dop->dop_pending, dfp_list) {
                trace_xfs_defer_pending_abort(tp->t_mountp, dfp);
-               if (dfp->dfp_committed)
+               if (!dfp->dfp_done)
                        dfp->dfp_type->abort_intent(dfp->dfp_intent);
        }
 
@@ -290,7 +290,6 @@ xfs_defer_finish(
        struct xfs_defer_pending        *dfp;
        struct list_head                *li;
        struct list_head                *n;
-       void                            *done_item = NULL;
        void                            *state;
        int                             error = 0;
        void                            (*cleanup_fn)(struct xfs_trans *, void *, int);
@@ -309,19 +308,11 @@ xfs_defer_finish(
                if (error)
                        goto out;
 
-               /* Mark all pending intents as committed. */
-               list_for_each_entry_reverse(dfp, &dop->dop_pending, dfp_list) {
-                       if (dfp->dfp_committed)
-                               break;
-                       trace_xfs_defer_pending_commit((*tp)->t_mountp, dfp);
-                       dfp->dfp_committed = true;
-               }
-
                /* Log an intent-done item for the first pending item. */
                dfp = list_first_entry(&dop->dop_pending,
                                struct xfs_defer_pending, dfp_list);
                trace_xfs_defer_pending_finish((*tp)->t_mountp, dfp);
-               done_item = dfp->dfp_type->create_done(*tp, dfp->dfp_intent,
+               dfp->dfp_done = dfp->dfp_type->create_done(*tp, dfp->dfp_intent,
                                dfp->dfp_count);
                cleanup_fn = dfp->dfp_type->finish_cleanup;
 
@@ -331,7 +322,7 @@ xfs_defer_finish(
                        list_del(li);
                        dfp->dfp_count--;
                        error = dfp->dfp_type->finish_item(*tp, dop, li,
-                                       done_item, &state);
+                                       dfp->dfp_done, &state);
                        if (error) {
                                /*
                                 * Clean up after ourselves and jump out.
@@ -428,8 +419,8 @@ xfs_defer_add(
                dfp = kmem_alloc(sizeof(struct xfs_defer_pending),
                                KM_SLEEP | KM_NOFS);
                dfp->dfp_type = defer_op_types[type];
-               dfp->dfp_committed = false;
                dfp->dfp_intent = NULL;
+               dfp->dfp_done = NULL;
                dfp->dfp_count = 0;
                INIT_LIST_HEAD(&dfp->dfp_work);
                list_add_tail(&dfp->dfp_list, &dop->dop_intake);
index cc3981c482968bb0d16276402637bf16d769fdf4..e96533d178cfceb6afead75e874cb69909883dbb 100644 (file)
@@ -30,8 +30,8 @@ struct xfs_defer_op_type;
 struct xfs_defer_pending {
        const struct xfs_defer_op_type  *dfp_type;      /* function pointers */
        struct list_head                dfp_list;       /* pending items */
-       bool                            dfp_committed;  /* committed trans? */
        void                            *dfp_intent;    /* log intent item */
+       void                            *dfp_done;      /* log done item */
        struct list_head                dfp_work;       /* work items */
        unsigned int                    dfp_count;      /* # extent items */
 };
index e6a8bea0f7bad219cdb98a97f2844fef293e6c92..270fb5cf4fa11eb1ed1ce2170a35b6af1754e460 100644 (file)
@@ -674,7 +674,8 @@ typedef struct xfs_agf {
 #define        XFS_AGF_BTREEBLKS       0x00000800
 #define        XFS_AGF_UUID            0x00001000
 #define        XFS_AGF_RMAP_BLOCKS     0x00002000
-#define        XFS_AGF_NUM_BITS        14
+#define        XFS_AGF_SPARE64         0x00004000
+#define        XFS_AGF_NUM_BITS        15
 #define        XFS_AGF_ALL_BITS        ((1 << XFS_AGF_NUM_BITS) - 1)
 
 #define XFS_AGF_FLAGS \
@@ -691,7 +692,8 @@ typedef struct xfs_agf {
        { XFS_AGF_LONGEST,      "LONGEST" }, \
        { XFS_AGF_BTREEBLKS,    "BTREEBLKS" }, \
        { XFS_AGF_UUID,         "UUID" }, \
-       { XFS_AGF_RMAP_BLOCKS,  "RMAP_BLOCKS" }
+       { XFS_AGF_RMAP_BLOCKS,  "RMAP_BLOCKS" }, \
+       { XFS_AGF_SPARE64,      "SPARE64" }
 
 /* disk block (xfs_daddr_t) in the AG */
 #define XFS_AGF_DADDR(mp)      ((xfs_daddr_t)(1 << (mp)->m_sectbb_log))
index 0e3d4f5ec33c6f945b30b5ad47c9f39c46139ac0..4aecc5fefe9656e7e1f3812fc52c80cfc7e8eb9b 100644 (file)
@@ -583,7 +583,8 @@ xfs_sb_verify(
         * Only check the in progress field for the primary superblock as
         * mkfs.xfs doesn't clear it from secondary superblocks.
         */
-       return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR,
+       return xfs_mount_validate_sb(mp, &sb,
+                                    bp->b_maps[0].bm_bn == XFS_SB_DADDR,
                                     check_version);
 }
 
index 607cc29bba21eb504f6ac01a331656d62849d5b9..b5b9bffe352074806910a064ae41a941c97678c8 100644 (file)
@@ -1611,7 +1611,7 @@ xfs_wait_buftarg(
         */
        while (percpu_counter_sum(&btp->bt_io_count))
                delay(100);
-       drain_workqueue(btp->bt_mount->m_buf_workqueue);
+       flush_workqueue(btp->bt_mount->m_buf_workqueue);
 
        /* loop until there is nothing left on the lru list. */
        while (list_lru_count(&btp->bt_lru)) {
index 24ef83ef04de2be5c8fd1e77ab62fa3b59106376..fd6be45b3a1e01c81bfe179d67818bafc9f6ec92 100644 (file)
@@ -1574,9 +1574,16 @@ xfs_fs_fill_super(
                }
        }
 
-       if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+       if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
+               if (mp->m_sb.sb_rblocks) {
+                       xfs_alert(mp,
+       "EXPERIMENTAL reverse mapping btree not compatible with realtime device!");
+                       error = -EINVAL;
+                       goto out_filestream_unmount;
+               }
                xfs_alert(mp,
        "EXPERIMENTAL reverse mapping btree feature enabled. Use at your own risk!");
+       }
 
        error = xfs_mountfs(mp);
        if (error)
index 7e88bec3f3596c03ba7f5acd711bb72db620d51c..d303a665dba9df637026e36709a962e261792429 100644 (file)
@@ -2295,7 +2295,7 @@ DECLARE_EVENT_CLASS(xfs_defer_pending_class,
                __entry->dev = mp ? mp->m_super->s_dev : 0;
                __entry->type = dfp->dfp_type->type;
                __entry->intent = dfp->dfp_intent;
-               __entry->committed = dfp->dfp_committed;
+               __entry->committed = dfp->dfp_done != NULL;
                __entry->nr = dfp->dfp_count;
        ),
        TP_printk("dev %d:%d optype %d intent %p committed %d nr %d\n",
index 1bfa602958f2a2f7beb16fab8f98263d198c652b..6df9b0749671b26801cbc8bec79f21f8cab34437 100644 (file)
@@ -72,6 +72,7 @@ struct exception_table_entry
 /* Returns 0 if exception not found and fixup otherwise.  */
 extern unsigned long search_exception_table(unsigned long);
 
+
 /*
  * architectures with an MMU should override these two
  */
@@ -230,14 +231,18 @@ extern int __put_user_bad(void) __attribute__((noreturn));
        might_fault();                                          \
        access_ok(VERIFY_READ, __p, sizeof(*ptr)) ?             \
                __get_user((x), (__typeof__(*(ptr)) *)__p) :    \
-               -EFAULT;                                        \
+               ((x) = (__typeof__(*(ptr)))0,-EFAULT);          \
 })
 
 #ifndef __get_user_fn
 static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
 {
-       size = __copy_from_user(x, ptr, size);
-       return size ? -EFAULT : size;
+       size_t n = __copy_from_user(x, ptr, size);
+       if (unlikely(n)) {
+               memset(x + (size - n), 0, n);
+               return -EFAULT;
+       }
+       return 0;
 }
 
 #define __get_user_fn(sz, u, k)        __get_user_fn(sz, u, k)
@@ -257,11 +262,13 @@ extern int __get_user_bad(void) __attribute__((noreturn));
 static inline long copy_from_user(void *to,
                const void __user * from, unsigned long n)
 {
+       unsigned long res = n;
        might_fault();
-       if (access_ok(VERIFY_READ, from, n))
-               return __copy_from_user(to, from, n);
-       else
-               return n;
+       if (likely(access_ok(VERIFY_READ, from, n)))
+               res = __copy_from_user(to, from, n);
+       if (unlikely(res))
+               memset(to + (n - res), 0, res);
+       return res;
 }
 
 static inline long copy_to_user(void __user *to,
index 4d8452c2384b6c23fbffa50352886e2b4ba04075..c5eaf2f80a4c4c76c9cd6029e8e67e0d8d0449c2 100644 (file)
@@ -1056,7 +1056,7 @@ static inline struct fwnode_handle *acpi_get_next_subnode(struct device *dev,
        return NULL;
 }
 
-#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, validate, data, fn) \
+#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \
        static const void * __acpi_table_##name[]                       \
                __attribute__((unused))                                 \
                 = { (void *) table_id,                                 \
index 8dbc8929a6a0051a2840a3badb580b7bb3d8adc4..573c5a18908fd53970fefea291805c500fd1d7f9 100644 (file)
 #define __compiler_offsetof(a, b)                                      \
        __builtin_offsetof(a, b)
 
-#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
+#if GCC_VERSION >= 40100
 # define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
 #endif
 
index 436aa4e42221beb16edebd1d5e1bfb3a4fa6af26..668569844d37cef4d51c6c7652d5f34ded2a66d6 100644 (file)
@@ -527,13 +527,14 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
  * object's lifetime is managed by something other than RCU.  That
  * "something other" might be reference counting or simple immortality.
  *
- * The seemingly unused size_t variable is to validate @p is indeed a pointer
- * type by making sure it can be dereferenced.
+ * The seemingly unused variable ___typecheck_p validates that @p is
+ * indeed a pointer type by using a pointer to typeof(*p) as the type.
+ * Taking a pointer to typeof(*p) again is needed in case p is void *.
  */
 #define lockless_dereference(p) \
 ({ \
        typeof(p) _________p1 = READ_ONCE(p); \
-       size_t __maybe_unused __size_of_ptr = sizeof(*(p)); \
+       typeof(*(p)) *___typecheck_p __maybe_unused; \
        smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
        (_________p1); \
 })
index 242bf530edfcfe5d720c0818dcb84c0b967b03ff..34bd80512a0c154a94abb11821ae2ec391663c18 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __CPUHOTPLUG_H
 #define __CPUHOTPLUG_H
 
+#include <linux/types.h>
+
 enum cpuhp_state {
        CPUHP_OFFLINE,
        CPUHP_CREATE_THREADS,
index 7f5a5822538539848450f7d55c94ceee50d6c0bc..0148a3046b4864733200d76a3a45ff26e38c1523 100644 (file)
@@ -118,6 +118,15 @@ typedef struct {
        u32 imagesize;
 } efi_capsule_header_t;
 
+struct efi_boot_memmap {
+       efi_memory_desc_t       **map;
+       unsigned long           *map_size;
+       unsigned long           *desc_size;
+       u32                     *desc_ver;
+       unsigned long           *key_ptr;
+       unsigned long           *buff_size;
+};
+
 /*
  * EFI capsule flags
  */
@@ -946,7 +955,7 @@ extern int efi_memattr_apply_permissions(struct mm_struct *mm,
 /* Iterate through an efi_memory_map */
 #define for_each_efi_memory_desc_in_map(m, md)                            \
        for ((md) = (m)->map;                                              \
-            ((void *)(md) + (m)->desc_size) <= (m)->map_end;              \
+            (md) && ((void *)(md) + (m)->desc_size) <= (m)->map_end;      \
             (md) = (void *)(md) + (m)->desc_size)
 
 /**
@@ -1371,11 +1380,7 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
                          efi_loaded_image_t *image, int *cmd_line_len);
 
 efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
-                               efi_memory_desc_t **map,
-                               unsigned long *map_size,
-                               unsigned long *desc_size,
-                               u32 *desc_ver,
-                               unsigned long *key_ptr);
+                               struct efi_boot_memmap *map);
 
 efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
                           unsigned long size, unsigned long align,
@@ -1457,4 +1462,14 @@ extern void efi_call_virt_check_flags(unsigned long flags, const char *call);
        arch_efi_call_virt_teardown();                                  \
 })
 
+typedef efi_status_t (*efi_exit_boot_map_processing)(
+       efi_system_table_t *sys_table_arg,
+       struct efi_boot_memmap *map,
+       void *priv);
+
+efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table,
+                                   void *handle,
+                                   struct efi_boot_memmap *map,
+                                   void *priv,
+                                   efi_exit_boot_map_processing priv_func);
 #endif /* _LINUX_EFI_H */
index 8cc719a637286d7262386d21c6ed40453b36a1e5..2ac6fa5f471251b3edccc5eb80a6bf5c9d7e34fc 100644 (file)
@@ -49,8 +49,6 @@ struct fence_cb;
  * @timestamp: Timestamp when the fence was signaled.
  * @status: Optional, only valid if < 0, must be set before calling
  * fence_signal, indicates that the fence has completed with an error.
- * @child_list: list of children fences
- * @active_list: list of active fences
  *
  * the flags member must be manipulated and read using the appropriate
  * atomic ops (bit_*), so taking the spinlock will not be needed most
index 3523bf62f32869f6c936f93386fa04db5f6edbb9..901e25d495ccfb71d8fc91b158a19cfe4dd9fccf 100644 (file)
@@ -574,6 +574,7 @@ static inline void mapping_allow_writable(struct address_space *mapping)
 
 struct posix_acl;
 #define ACL_NOT_CACHED ((void *)(-1))
+#define ACL_DONT_CACHE ((void *)(-3))
 
 static inline struct posix_acl *
 uncached_acl_sentinel(struct task_struct *task)
index cfa6cde25f8e8a3a594fd30bfd8e42de1360826c..76cff18bb0328dda26a424da5b36eb0385272846 100644 (file)
@@ -274,8 +274,7 @@ extern void fscrypt_restore_control_page(struct page *);
 extern int fscrypt_zeroout_range(struct inode *, pgoff_t, sector_t,
                                                unsigned int);
 /* policy.c */
-extern int fscrypt_process_policy(struct inode *,
-                                       const struct fscrypt_policy *);
+extern int fscrypt_process_policy(struct file *, const struct fscrypt_policy *);
 extern int fscrypt_get_policy(struct inode *, struct fscrypt_policy *);
 extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
 extern int fscrypt_inherit_context(struct inode *, struct inode *,
@@ -345,7 +344,7 @@ static inline int fscrypt_notsupp_zeroout_range(struct inode *i, pgoff_t p,
 }
 
 /* policy.c */
-static inline int fscrypt_notsupp_process_policy(struct inode *i,
+static inline int fscrypt_notsupp_process_policy(struct file *f,
                                const struct fscrypt_policy *p)
 {
        return -EOPNOTSUPP;
index 5198f8ed08a464f8da3294efe814f79e2978a009..c97eab67558f6c33877217d7f0a22df6e2bbdcca 100644 (file)
@@ -62,7 +62,7 @@ void iio_swt_group_init_type_name(struct iio_sw_trigger *t,
                                  const char *name,
                                  struct config_item_type *type)
 {
-#ifdef CONFIG_CONFIGFS_FS
+#if IS_ENABLED(CONFIG_CONFIGFS_FS)
        config_group_init_type_name(&t->group, name, type);
 #endif
 }
index 3267df4610125600f7e441a53a7a0fe583d2ddc1..3d70ece1031377c41933712782268eaabaee7fe5 100644 (file)
@@ -18,6 +18,11 @@ struct vm_fault;
 #define IOMAP_MAPPED   0x03    /* blocks allocated @blkno */
 #define IOMAP_UNWRITTEN        0x04    /* blocks allocated @blkno in unwritten state */
 
+/*
+ * Flags for iomap mappings:
+ */
+#define IOMAP_F_MERGED 0x01    /* contains multiple blocks/extents */
+
 /*
  * Magic value for blkno:
  */
@@ -27,7 +32,8 @@ struct iomap {
        sector_t                blkno;  /* 1st sector of mapping, 512b units */
        loff_t                  offset; /* file offset of mapping, bytes */
        u64                     length; /* length of mapping, bytes */
-       int                     type;   /* type of mapping */
+       u16                     type;   /* type of mapping */
+       u16                     flags;  /* flags for mapping */
        struct block_device     *bdev;  /* block device for I/O */
 };
 
index b52424eaa0ed38e932d174f6a950a488d823399b..0ac26c892fe25c11030415ec48d5580b3d3fefa2 100644 (file)
@@ -945,6 +945,16 @@ static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
 static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
 #endif
 
+/*
+ * The irqsave variants are for usage in non interrupt code. Do not use
+ * them in irq_chip callbacks. Use irq_gc_lock() instead.
+ */
+#define irq_gc_lock_irqsave(gc, flags) \
+       raw_spin_lock_irqsave(&(gc)->lock, flags)
+
+#define irq_gc_unlock_irqrestore(gc, flags)    \
+       raw_spin_unlock_irqrestore(&(gc)->lock, flags)
+
 static inline void irq_reg_writel(struct irq_chip_generic *gc,
                                  u32 val, int reg_offset)
 {
index 4429d255c8ab6c7524436d2ee36fdcea57304447..5e5b2969d93167a8337823dcc74b15ce53870307 100644 (file)
@@ -195,6 +195,7 @@ static inline bool vma_migratable(struct vm_area_struct *vma)
 }
 
 extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
+extern void mpol_put_task_policy(struct task_struct *);
 
 #else
 
@@ -297,5 +298,8 @@ static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
        return -1; /* no node preference */
 }
 
+static inline void mpol_put_task_policy(struct task_struct *task)
+{
+}
 #endif /* CONFIG_NUMA */
 #endif
diff --git a/include/linux/mfd/da8xx-cfgchip.h b/include/linux/mfd/da8xx-cfgchip.h
new file mode 100644 (file)
index 0000000..304985e
--- /dev/null
@@ -0,0 +1,153 @@
+/*
+ * TI DaVinci DA8xx CHIPCFGx registers for syscon consumers.
+ *
+ * Copyright (C) 2016 David Lechner <david@lechnology.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_MFD_DA8XX_CFGCHIP_H
+#define __LINUX_MFD_DA8XX_CFGCHIP_H
+
+#include <linux/bitops.h>
+
+/* register offset (32-bit registers) */
+#define CFGCHIP(n)                             ((n) * 4)
+
+/* CFGCHIP0 (PLL0/EDMA3_0) register bits */
+#define CFGCHIP0_PLL_MASTER_LOCK               BIT(4)
+#define CFGCHIP0_EDMA30TC1DBS(n)               ((n) << 2)
+#define CFGCHIP0_EDMA30TC1DBS_MASK             CFGCHIP0_EDMA30TC1DBS(0x3)
+#define CFGCHIP0_EDMA30TC1DBS_16               CFGCHIP0_EDMA30TC1DBS(0x0)
+#define CFGCHIP0_EDMA30TC1DBS_32               CFGCHIP0_EDMA30TC1DBS(0x1)
+#define CFGCHIP0_EDMA30TC1DBS_64               CFGCHIP0_EDMA30TC1DBS(0x2)
+#define CFGCHIP0_EDMA30TC0DBS(n)               ((n) << 0)
+#define CFGCHIP0_EDMA30TC0DBS_MASK             CFGCHIP0_EDMA30TC0DBS(0x3)
+#define CFGCHIP0_EDMA30TC0DBS_16               CFGCHIP0_EDMA30TC0DBS(0x0)
+#define CFGCHIP0_EDMA30TC0DBS_32               CFGCHIP0_EDMA30TC0DBS(0x1)
+#define CFGCHIP0_EDMA30TC0DBS_64               CFGCHIP0_EDMA30TC0DBS(0x2)
+
+/* CFGCHIP1 (eCAP/HPI/EDMA3_1/eHRPWM TBCLK/McASP0 AMUTEIN) register bits */
+#define CFGCHIP1_CAP2SRC(n)                    ((n) << 27)
+#define CFGCHIP1_CAP2SRC_MASK                  CFGCHIP1_CAP2SRC(0x1f)
+#define CFGCHIP1_CAP2SRC_ECAP_PIN              CFGCHIP1_CAP2SRC(0x0)
+#define CFGCHIP1_CAP2SRC_MCASP0_TX             CFGCHIP1_CAP2SRC(0x1)
+#define CFGCHIP1_CAP2SRC_MCASP0_RX             CFGCHIP1_CAP2SRC(0x2)
+#define CFGCHIP1_CAP2SRC_EMAC_C0_RX_THRESHOLD  CFGCHIP1_CAP2SRC(0x7)
+#define CFGCHIP1_CAP2SRC_EMAC_C0_RX            CFGCHIP1_CAP2SRC(0x8)
+#define CFGCHIP1_CAP2SRC_EMAC_C0_TX            CFGCHIP1_CAP2SRC(0x9)
+#define CFGCHIP1_CAP2SRC_EMAC_C0_MISC          CFGCHIP1_CAP2SRC(0xa)
+#define CFGCHIP1_CAP2SRC_EMAC_C1_RX_THRESHOLD  CFGCHIP1_CAP2SRC(0xb)
+#define CFGCHIP1_CAP2SRC_EMAC_C1_RX            CFGCHIP1_CAP2SRC(0xc)
+#define CFGCHIP1_CAP2SRC_EMAC_C1_TX            CFGCHIP1_CAP2SRC(0xd)
+#define CFGCHIP1_CAP2SRC_EMAC_C1_MISC          CFGCHIP1_CAP2SRC(0xe)
+#define CFGCHIP1_CAP2SRC_EMAC_C2_RX_THRESHOLD  CFGCHIP1_CAP2SRC(0xf)
+#define CFGCHIP1_CAP2SRC_EMAC_C2_RX            CFGCHIP1_CAP2SRC(0x10)
+#define CFGCHIP1_CAP2SRC_EMAC_C2_TX            CFGCHIP1_CAP2SRC(0x11)
+#define CFGCHIP1_CAP2SRC_EMAC_C2_MISC          CFGCHIP1_CAP2SRC(0x12)
+#define CFGCHIP1_CAP1SRC(n)                    ((n) << 22)
+#define CFGCHIP1_CAP1SRC_MASK                  CFGCHIP1_CAP1SRC(0x1f)
+#define CFGCHIP1_CAP1SRC_ECAP_PIN              CFGCHIP1_CAP1SRC(0x0)
+#define CFGCHIP1_CAP1SRC_MCASP0_TX             CFGCHIP1_CAP1SRC(0x1)
+#define CFGCHIP1_CAP1SRC_MCASP0_RX             CFGCHIP1_CAP1SRC(0x2)
+#define CFGCHIP1_CAP1SRC_EMAC_C0_RX_THRESHOLD  CFGCHIP1_CAP1SRC(0x7)
+#define CFGCHIP1_CAP1SRC_EMAC_C0_RX            CFGCHIP1_CAP1SRC(0x8)
+#define CFGCHIP1_CAP1SRC_EMAC_C0_TX            CFGCHIP1_CAP1SRC(0x9)
+#define CFGCHIP1_CAP1SRC_EMAC_C0_MISC          CFGCHIP1_CAP1SRC(0xa)
+#define CFGCHIP1_CAP1SRC_EMAC_C1_RX_THRESHOLD  CFGCHIP1_CAP1SRC(0xb)
+#define CFGCHIP1_CAP1SRC_EMAC_C1_RX            CFGCHIP1_CAP1SRC(0xc)
+#define CFGCHIP1_CAP1SRC_EMAC_C1_TX            CFGCHIP1_CAP1SRC(0xd)
+#define CFGCHIP1_CAP1SRC_EMAC_C1_MISC          CFGCHIP1_CAP1SRC(0xe)
+#define CFGCHIP1_CAP1SRC_EMAC_C2_RX_THRESHOLD  CFGCHIP1_CAP1SRC(0xf)
+#define CFGCHIP1_CAP1SRC_EMAC_C2_RX            CFGCHIP1_CAP1SRC(0x10)
+#define CFGCHIP1_CAP1SRC_EMAC_C2_TX            CFGCHIP1_CAP1SRC(0x11)
+#define CFGCHIP1_CAP1SRC_EMAC_C2_MISC          CFGCHIP1_CAP1SRC(0x12)
+#define CFGCHIP1_CAP0SRC(n)                    ((n) << 17)
+#define CFGCHIP1_CAP0SRC_MASK                  CFGCHIP1_CAP0SRC(0x1f)
+#define CFGCHIP1_CAP0SRC_ECAP_PIN              CFGCHIP1_CAP0SRC(0x0)
+#define CFGCHIP1_CAP0SRC_MCASP0_TX             CFGCHIP1_CAP0SRC(0x1)
+#define CFGCHIP1_CAP0SRC_MCASP0_RX             CFGCHIP1_CAP0SRC(0x2)
+#define CFGCHIP1_CAP0SRC_EMAC_C0_RX_THRESHOLD  CFGCHIP1_CAP0SRC(0x7)
+#define CFGCHIP1_CAP0SRC_EMAC_C0_RX            CFGCHIP1_CAP0SRC(0x8)
+#define CFGCHIP1_CAP0SRC_EMAC_C0_TX            CFGCHIP1_CAP0SRC(0x9)
+#define CFGCHIP1_CAP0SRC_EMAC_C0_MISC          CFGCHIP1_CAP0SRC(0xa)
+#define CFGCHIP1_CAP0SRC_EMAC_C1_RX_THRESHOLD  CFGCHIP1_CAP0SRC(0xb)
+#define CFGCHIP1_CAP0SRC_EMAC_C1_RX            CFGCHIP1_CAP0SRC(0xc)
+#define CFGCHIP1_CAP0SRC_EMAC_C1_TX            CFGCHIP1_CAP0SRC(0xd)
+#define CFGCHIP1_CAP0SRC_EMAC_C1_MISC          CFGCHIP1_CAP0SRC(0xe)
+#define CFGCHIP1_CAP0SRC_EMAC_C2_RX_THRESHOLD  CFGCHIP1_CAP0SRC(0xf)
+#define CFGCHIP1_CAP0SRC_EMAC_C2_RX            CFGCHIP1_CAP0SRC(0x10)
+#define CFGCHIP1_CAP0SRC_EMAC_C2_TX            CFGCHIP1_CAP0SRC(0x11)
+#define CFGCHIP1_CAP0SRC_EMAC_C2_MISC          CFGCHIP1_CAP0SRC(0x12)
+#define CFGCHIP1_HPIBYTEAD                     BIT(16)
+#define CFGCHIP1_HPIENA                                BIT(15)
+#define CFGCHIP0_EDMA31TC0DBS(n)               ((n) << 13)
+#define CFGCHIP0_EDMA31TC0DBS_MASK             CFGCHIP0_EDMA31TC0DBS(0x3)
+#define CFGCHIP0_EDMA31TC0DBS_16               CFGCHIP0_EDMA31TC0DBS(0x0)
+#define CFGCHIP0_EDMA31TC0DBS_32               CFGCHIP0_EDMA31TC0DBS(0x1)
+#define CFGCHIP0_EDMA31TC0DBS_64               CFGCHIP0_EDMA31TC0DBS(0x2)
+#define CFGCHIP1_TBCLKSYNC                     BIT(12)
+#define CFGCHIP1_AMUTESEL0(n)                  ((n) << 0)
+#define CFGCHIP1_AMUTESEL0_MASK                        CFGCHIP1_AMUTESEL0(0xf)
+#define CFGCHIP1_AMUTESEL0_LOW                 CFGCHIP1_AMUTESEL0(0x0)
+#define CFGCHIP1_AMUTESEL0_BANK_0              CFGCHIP1_AMUTESEL0(0x1)
+#define CFGCHIP1_AMUTESEL0_BANK_1              CFGCHIP1_AMUTESEL0(0x2)
+#define CFGCHIP1_AMUTESEL0_BANK_2              CFGCHIP1_AMUTESEL0(0x3)
+#define CFGCHIP1_AMUTESEL0_BANK_3              CFGCHIP1_AMUTESEL0(0x4)
+#define CFGCHIP1_AMUTESEL0_BANK_4              CFGCHIP1_AMUTESEL0(0x5)
+#define CFGCHIP1_AMUTESEL0_BANK_5              CFGCHIP1_AMUTESEL0(0x6)
+#define CFGCHIP1_AMUTESEL0_BANK_6              CFGCHIP1_AMUTESEL0(0x7)
+#define CFGCHIP1_AMUTESEL0_BANK_7              CFGCHIP1_AMUTESEL0(0x8)
+
+/* CFGCHIP2 (USB PHY) register bits */
+#define CFGCHIP2_PHYCLKGD                      BIT(17)
+#define CFGCHIP2_VBUSSENSE                     BIT(16)
+#define CFGCHIP2_RESET                         BIT(15)
+#define CFGCHIP2_OTGMODE(n)                    ((n) << 13)
+#define CFGCHIP2_OTGMODE_MASK                  CFGCHIP2_OTGMODE(0x3)
+#define CFGCHIP2_OTGMODE_NO_OVERRIDE           CFGCHIP2_OTGMODE(0x0)
+#define CFGCHIP2_OTGMODE_FORCE_HOST            CFGCHIP2_OTGMODE(0x1)
+#define CFGCHIP2_OTGMODE_FORCE_DEVICE          CFGCHIP2_OTGMODE(0x2)
+#define CFGCHIP2_OTGMODE_FORCE_HOST_VBUS_LOW   CFGCHIP2_OTGMODE(0x3)
+#define CFGCHIP2_USB1PHYCLKMUX                 BIT(12)
+#define CFGCHIP2_USB2PHYCLKMUX                 BIT(11)
+#define CFGCHIP2_PHYPWRDN                      BIT(10)
+#define CFGCHIP2_OTGPWRDN                      BIT(9)
+#define CFGCHIP2_DATPOL                                BIT(8)
+#define CFGCHIP2_USB1SUSPENDM                  BIT(7)
+#define CFGCHIP2_PHY_PLLON                     BIT(6)
+#define CFGCHIP2_SESENDEN                      BIT(5)
+#define CFGCHIP2_VBDTCTEN                      BIT(4)
+#define CFGCHIP2_REFFREQ(n)                    ((n) << 0)
+#define CFGCHIP2_REFFREQ_MASK                  CFGCHIP2_REFFREQ(0xf)
+#define CFGCHIP2_REFFREQ_12MHZ                 CFGCHIP2_REFFREQ(0x1)
+#define CFGCHIP2_REFFREQ_24MHZ                 CFGCHIP2_REFFREQ(0x2)
+#define CFGCHIP2_REFFREQ_48MHZ                 CFGCHIP2_REFFREQ(0x3)
+#define CFGCHIP2_REFFREQ_19_2MHZ               CFGCHIP2_REFFREQ(0x4)
+#define CFGCHIP2_REFFREQ_38_4MHZ               CFGCHIP2_REFFREQ(0x5)
+#define CFGCHIP2_REFFREQ_13MHZ                 CFGCHIP2_REFFREQ(0x6)
+#define CFGCHIP2_REFFREQ_26MHZ                 CFGCHIP2_REFFREQ(0x7)
+#define CFGCHIP2_REFFREQ_20MHZ                 CFGCHIP2_REFFREQ(0x8)
+#define CFGCHIP2_REFFREQ_40MHZ                 CFGCHIP2_REFFREQ(0x9)
+
+/* CFGCHIP3 (EMAC/uPP/PLL1/ASYNC3/PRU/DIV4.5/EMIFA) register bits */
+#define CFGCHIP3_RMII_SEL                      BIT(8)
+#define CFGCHIP3_UPP_TX_CLKSRC                 BIT(6)
+#define CFGCHIP3_PLL1_MASTER_LOCK              BIT(5)
+#define CFGCHIP3_ASYNC3_CLKSRC                 BIT(4)
+#define CFGCHIP3_PRUEVTSEL                     BIT(3)
+#define CFGCHIP3_DIV45PENA                     BIT(2)
+#define CFGCHIP3_EMA_CLKSRC                    BIT(1)
+
+/* CFGCHIP4 (McASP0 AMUNTEIN) register bits */
+#define CFGCHIP4_AMUTECLR0                     BIT(0)
+
+#endif /* __LINUX_MFD_DA8XX_CFGCHIP_H */
index 2567a87872b0afbf0a5db0716871aea88626b3d7..7f55b8b410328f764bbed1949711d588406fe29c 100644 (file)
 /*
  * time in us for processing a single channel, calculated as follows:
  *
- * num cycles = open delay + (sample delay + conv time) * averaging
+ * max num cycles = open delay + (sample delay + conv time) * averaging
  *
- * num cycles: 152 + (1 + 13) * 16 = 376
+ * max num cycles: 262143 + (255 + 13) * 16 = 266431
  *
  * clock frequency: 26MHz / 8 = 3.25MHz
  * clock period: 1 / 3.25MHz = 308ns
  *
- * processing time: 376 * 308ns = 116us
+ * max processing time: 266431 * 308ns = 83ms(approx)
  */
-#define IDLE_TIMEOUT 116 /* microsec */
+#define IDLE_TIMEOUT 83 /* milliseconds */
 
 #define TSCADC_CELLS           2
 
index 21bc4557b67ad9b7aec4f81b4fd02ef9714ac7eb..d1f9a581aca876978a32e18f73841c5cdc24058c 100644 (file)
@@ -6710,9 +6710,10 @@ struct mlx5_ifc_pude_reg_bits {
 };
 
 struct mlx5_ifc_ptys_reg_bits {
-       u8         an_disable_cap[0x1];
+       u8         reserved_at_0[0x1];
        u8         an_disable_admin[0x1];
-       u8         reserved_at_2[0x6];
+       u8         an_disable_cap[0x1];
+       u8         reserved_at_3[0x5];
        u8         local_port[0x8];
        u8         reserved_at_10[0xd];
        u8         proto_mask[0x3];
index 08ed53eeedd5fd203a961310367ef8a206c76696..ef815b9cd42696bc70db1e9f35e39a9c295afacd 100644 (file)
@@ -2014,6 +2014,7 @@ extern void mm_drop_all_locks(struct mm_struct *mm);
 
 extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
 extern struct file *get_mm_exe_file(struct mm_struct *mm);
+extern struct file *get_task_exe_file(struct task_struct *task);
 
 extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
 extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
index d572b78b65e14709d5fb8d8a60ee9d71147d0d81..7f2ae99e5daf39406fa5b166f6a1a75c464d1fcd 100644 (file)
@@ -828,9 +828,21 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
  */
 #define zone_idx(zone)         ((zone) - (zone)->zone_pgdat->node_zones)
 
-static inline int populated_zone(struct zone *zone)
+/*
+ * Returns true if a zone has pages managed by the buddy allocator.
+ * All the reclaim decisions have to use this function rather than
+ * populated_zone(). If the whole zone is reserved then we can easily
+ * end up with populated_zone() && !managed_zone().
+ */
+static inline bool managed_zone(struct zone *zone)
+{
+       return zone->managed_pages;
+}
+
+/* Returns true if a zone has memory */
+static inline bool populated_zone(struct zone *zone)
 {
-       return (!!zone->present_pages);
+       return zone->present_pages;
 }
 
 extern int movable_zone;
index 3a788bf0affdcd80a1282c5e4f4a5dc59e56c8ca..e8d79d4ebcfe4468c62340379ac7b2583e7fb724 100644 (file)
@@ -3267,6 +3267,7 @@ static inline void napi_free_frags(struct napi_struct *napi)
        napi->skb = NULL;
 }
 
+bool netdev_is_rx_handler_busy(struct net_device *dev);
 int netdev_rx_handler_register(struct net_device *dev,
                               rx_handler_func_t *rx_handler,
                               void *rx_handler_data);
index 80ca889b164e3eab5b42c7249d264f55650e44bf..664da00486257519acadedde60b5de1571a8d540 100644 (file)
@@ -15,6 +15,6 @@ struct nf_acct;
 struct nf_acct *nfnl_acct_find_get(struct net *net, const char *filter_name);
 void nfnl_acct_put(struct nf_acct *acct);
 void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
-extern int nfnl_acct_overquota(const struct sk_buff *skb,
-                             struct nf_acct *nfacct);
+int nfnl_acct_overquota(struct net *net, const struct sk_buff *skb,
+                       struct nf_acct *nfacct);
 #endif /* _NFNL_ACCT_H */
index d8b37bab2887e75ed0c7f3cc58a57f2594109179..7676557ce357d682c3c47f0599e66bdd8a42225f 100644 (file)
@@ -794,7 +794,7 @@ struct nvmf_connect_command {
 };
 
 struct nvmf_connect_data {
-       uuid_le         hostid;
+       uuid_be         hostid;
        __le16          cntlid;
        char            resv4[238];
        char            subsysnqn[NVMF_NQN_FIELD_LEN];
index fbc1fa625c3ec306ed610c2c8b1b1a50f505e2a5..0ab8359656696d6e5d35a89fd1b22c7dc08145b4 100644 (file)
@@ -682,15 +682,6 @@ struct pci_driver {
 
 #define        to_pci_driver(drv) container_of(drv, struct pci_driver, driver)
 
-/**
- * DEFINE_PCI_DEVICE_TABLE - macro used to describe a pci device table
- * @_table: device table name
- *
- * This macro is deprecated and should not be used in new code.
- */
-#define DEFINE_PCI_DEVICE_TABLE(_table) \
-       const struct pci_device_id _table[]
-
 /**
  * PCI_DEVICE - macro used to describe a specific pci device
  * @vend: the 16 bit PCI Vendor ID
index 923266cd294a33c8d98cbe274df0991f6647dafa..48ec7651989b093fc015e44b59657c7000e11012 100644 (file)
@@ -111,7 +111,6 @@ struct uart_8250_port {
                                                 *   if no_console_suspend
                                                 */
        unsigned char           probe;
-       struct mctrl_gpios      *gpios;
 #define UART_PROBE_RSA (1 << 0)
 
        /*
index 76199b75d5845d21cbcfb67786dfec757e30a446..e302c447e057a98b0ba47b5bc73acd816b9dc7f7 100644 (file)
@@ -1,6 +1,16 @@
 #ifndef __SMC91X_H__
 #define __SMC91X_H__
 
+/*
+ * These bits define which access sizes a platform can support, rather
+ * than the maximal access size.  So, if your platform can do 16-bit
+ * and 32-bit accesses to the SMC91x device, but not 8-bit, set both
+ * SMC91X_USE_16BIT and SMC91X_USE_32BIT.
+ *
+ * The SMC91x driver requires at least one of SMC91X_USE_8BIT or
+ * SMC91X_USE_16BIT to be supported - just setting SMC91X_USE_32BIT is
+ * an invalid configuration.
+ */
 #define SMC91X_USE_8BIT (1 << 0)
 #define SMC91X_USE_16BIT (1 << 1)
 #define SMC91X_USE_32BIT (1 << 2)
index cbd8990e2e77e6ffa49f45611eb6710980fddd23..2b5b10eed74ff52fc5b11fe911be7d84b08eda77 100644 (file)
@@ -118,10 +118,11 @@ static inline int arch_within_stack_frames(const void * const stack,
 extern void __check_object_size(const void *ptr, unsigned long n,
                                        bool to_user);
 
-static inline void check_object_size(const void *ptr, unsigned long n,
-                                    bool to_user)
+static __always_inline void check_object_size(const void *ptr, unsigned long n,
+                                             bool to_user)
 {
-       __check_object_size(ptr, n, to_user);
+       if (!__builtin_constant_p(n))
+               __check_object_size(ptr, n, to_user);
 }
 #else
 static inline void check_object_size(const void *ptr, unsigned long n,
index 1b5d1cd796e2b753bdd1ff586427a35c033a3ae9..75b4aaf31a9da419e9415f621cc930b02e871047 100644 (file)
@@ -76,7 +76,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
                struct iov_iter *i, unsigned long offset, size_t bytes);
 void iov_iter_advance(struct iov_iter *i, size_t bytes);
 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
-int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes);
+#define iov_iter_fault_in_multipages_readable iov_iter_fault_in_readable
 size_t iov_iter_single_seg_count(const struct iov_iter *i);
 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
                         struct iov_iter *i);
index 9b4c418bebd84ae0a7debfbcc697ee92b136ec99..fd60eccb59a67969eba53416a376a3c912d62d81 100644 (file)
@@ -52,7 +52,7 @@ struct unix_sock {
        struct sock             sk;
        struct unix_address     *addr;
        struct path             path;
-       struct mutex            readlock;
+       struct mutex            iolock, bindlock;
        struct sock             *peer;
        struct list_head        link;
        atomic_long_t           inflight;
index 9c23f4d33e06c1ad1dab246b822d5dcc76c854ac..beb7610d64e936b02d959d97cdf5a85fa5b29132 100644 (file)
@@ -1102,6 +1102,7 @@ struct station_info {
        struct cfg80211_tid_stats pertid[IEEE80211_NUM_TIDS + 1];
 };
 
+#if IS_ENABLED(CONFIG_CFG80211)
 /**
  * cfg80211_get_station - retrieve information about a given station
  * @dev: the device where the station is supposed to be connected to
@@ -1114,6 +1115,14 @@ struct station_info {
  */
 int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
                         struct station_info *sinfo);
+#else
+static inline int cfg80211_get_station(struct net_device *dev,
+                                      const u8 *mac_addr,
+                                      struct station_info *sinfo)
+{
+       return -ENOENT;
+}
+#endif
 
 /**
  * enum monitor_flags - monitor flags
index 4079fc18ffe4643522178b394ed6d1b54d5a6093..7d4a72e75f334d300f9d55cd53bdd1d1e82c08f9 100644 (file)
@@ -111,6 +111,7 @@ struct fib_info {
        unsigned char           fib_scope;
        unsigned char           fib_type;
        __be32                  fib_prefsrc;
+       u32                     fib_tb_id;
        u32                     fib_priority;
        u32                     *fib_metrics;
 #define fib_mtu fib_metrics[RTAX_MTU-1]
@@ -319,7 +320,7 @@ void fib_flush_external(struct net *net);
 /* Exported by fib_semantics.c */
 int ip_fib_check_default(__be32 gw, struct net_device *dev);
 int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
-int fib_sync_down_addr(struct net *net, __be32 local);
+int fib_sync_down_addr(struct net_device *dev, __be32 local);
 int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
 
 extern u32 fib_multipath_secret __read_mostly;
index d27588c8dbd9f2a01f65cec2c8587f33a7eba1ce..1139cde0fdc590c08de507a66eee1b6de82eea1b 100644 (file)
@@ -36,4 +36,8 @@ void nft_meta_set_eval(const struct nft_expr *expr,
 void nft_meta_set_destroy(const struct nft_ctx *ctx,
                          const struct nft_expr *expr);
 
+int nft_meta_set_validate(const struct nft_ctx *ctx,
+                         const struct nft_expr *expr,
+                         const struct nft_data **data);
+
 #endif
index 60fa1530006b45bc229dbf46bfe93ff690bc7b42..02e28c529b29bcb77d24d950af6873d66995ff31 100644 (file)
@@ -8,6 +8,10 @@ struct nft_reject {
 
 extern const struct nla_policy nft_reject_policy[];
 
+int nft_reject_validate(const struct nft_ctx *ctx,
+                       const struct nft_expr *expr,
+                       const struct nft_data **data);
+
 int nft_reject_init(const struct nft_ctx *ctx,
                    const struct nft_expr *expr,
                    const struct nlattr * const tb[]);
index c00e7d51bb18d77f47ef9fb016ef432f5db4f7be..7717302cab91e1bb18cc42c3f2d4b51f98f3c148 100644 (file)
@@ -1523,6 +1523,8 @@ static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unli
 {
        if (sk->sk_send_head == skb_unlinked)
                sk->sk_send_head = NULL;
+       if (tcp_sk(sk)->highest_sack == skb_unlinked)
+               tcp_sk(sk)->highest_sack = NULL;
 }
 
 static inline void tcp_init_send_head(struct sock *sk)
index 13c0b2ba1b6c0942ef2c0dd07d77a78719438d1e..73d870918939e082295a90bd9a092814dd97131b 100644 (file)
@@ -11,12 +11,12 @@ struct sas_rphy;
 struct request;
 
 #if !IS_ENABLED(CONFIG_SCSI_SAS_ATTRS)
-static inline int is_sas_attached(struct scsi_device *sdev)
+static inline int scsi_is_sas_rphy(const struct device *sdev)
 {
        return 0;
 }
 #else
-extern int is_sas_attached(struct scsi_device *sdev);
+extern int scsi_is_sas_rphy(const struct device *);
 #endif
 
 static inline int sas_protocol_ata(enum sas_protocol proto)
@@ -202,7 +202,6 @@ extern int sas_rphy_add(struct sas_rphy *);
 extern void sas_rphy_remove(struct sas_rphy *);
 extern void sas_rphy_delete(struct sas_rphy *);
 extern void sas_rphy_unlink(struct sas_rphy *);
-extern int scsi_is_sas_rphy(const struct device *);
 
 struct sas_port *sas_port_alloc(struct device *, int);
 struct sas_port *sas_port_alloc_num(struct device *);
index 9c9c6ad55f1487b9b4cead653fc996930603b259..5cd4d4d2dd1d226ba6e5b024a697e71dfde9ac7d 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <linux/atmapi.h>
 #include <linux/atmioc.h>
+#include <linux/time.h>
 
 #define ZATM_GETPOOL   _IOW('a',ATMIOC_SARPRV+1,struct atmif_sioc)
                                                /* get pool statistics */
index 163e8adac2d664515a36341778209be057ab012d..4bd1f55d63773287e66644b48cea7fceae8f3a53 100644 (file)
@@ -16,7 +16,8 @@
 #define _UAPI__LINUX_IF_PPPOL2TP_H
 
 #include <linux/types.h>
-
+#include <linux/in.h>
+#include <linux/in6.h>
 
 /* Structure used to connect() the socket to a particular tunnel UDP
  * socket over IPv4.
index e128769331b5a7c6be4a10cf44c262be68a65772..d37bbb17a007c108f093ac517e4f0d567f3920de 100644 (file)
 #include <asm/byteorder.h>
 
 #include <linux/socket.h>
+#include <linux/if.h>
 #include <linux/if_ether.h>
 #include <linux/if_pppol2tp.h>
+#include <linux/in.h>
+#include <linux/in6.h>
 
 /* For user-space programs to pick up these definitions
  * which they wouldn't get otherwise without defining __KERNEL__
index 1046f5515174907f0779dac4add7cf0f14a51a68..777b6cdb1b7be001812f51b6fe9f9522af85d0ca 100644 (file)
@@ -2,6 +2,9 @@
 #define _UAPI_IF_TUNNEL_H_
 
 #include <linux/types.h>
+#include <linux/if.h>
+#include <linux/ip.h>
+#include <linux/in6.h>
 #include <asm/byteorder.h>
 
 
index 3d48014cdd71d0d120693594e50a4b1cbc3155cc..30f031db12f60c9c5f94a2194b09663f1b20e552 100644 (file)
@@ -1,11 +1,13 @@
 #ifndef _IPX_H_
 #define _IPX_H_
+#include <linux/libc-compat.h> /* for compatibility with glibc netipx/ipx.h */
 #include <linux/types.h>
 #include <linux/sockios.h>
 #include <linux/socket.h>
 #define IPX_NODE_LEN   6
 #define IPX_MTU                576
 
+#if __UAPI_DEF_SOCKADDR_IPX
 struct sockaddr_ipx {
        __kernel_sa_family_t sipx_family;
        __be16          sipx_port;
@@ -14,6 +16,7 @@ struct sockaddr_ipx {
        __u8            sipx_type;
        unsigned char   sipx_zero;      /* 16 byte fill */
 };
+#endif /* __UAPI_DEF_SOCKADDR_IPX */
 
 /*
  * So we can fit the extra info for SIOCSIFADDR into the address nicely
@@ -23,12 +26,15 @@ struct sockaddr_ipx {
 #define IPX_DLTITF     0
 #define IPX_CRTITF     1
 
+#if __UAPI_DEF_IPX_ROUTE_DEFINITION
 struct ipx_route_definition {
        __be32        ipx_network;
        __be32        ipx_router_network;
        unsigned char ipx_router_node[IPX_NODE_LEN];
 };
+#endif /* __UAPI_DEF_IPX_ROUTE_DEFINITION */
 
+#if __UAPI_DEF_IPX_INTERFACE_DEFINITION
 struct ipx_interface_definition {
        __be32        ipx_network;
        unsigned char ipx_device[16];
@@ -45,16 +51,20 @@ struct ipx_interface_definition {
 #define IPX_INTERNAL           2
        unsigned char ipx_node[IPX_NODE_LEN];
 };
-       
+#endif /* __UAPI_DEF_IPX_INTERFACE_DEFINITION */
+
+#if __UAPI_DEF_IPX_CONFIG_DATA
 struct ipx_config_data {
        unsigned char   ipxcfg_auto_select_primary;
        unsigned char   ipxcfg_auto_create_interfaces;
 };
+#endif /* __UAPI_DEF_IPX_CONFIG_DATA */
 
 /*
  * OLD Route Definition for backward compatibility.
  */
 
+#if __UAPI_DEF_IPX_ROUTE_DEF
 struct ipx_route_def {
        __be32          ipx_network;
        __be32          ipx_router_network;
@@ -67,6 +77,7 @@ struct ipx_route_def {
 #define IPX_RT_BLUEBOOK                2
 #define IPX_RT_ROUTED          1
 };
+#endif /* __UAPI_DEF_IPX_ROUTE_DEF */
 
 #define SIOCAIPXITFCRT         (SIOCPROTOPRIVATE)
 #define SIOCAIPXPRISLT         (SIOCPROTOPRIVATE + 1)
index e4f048ee7043d196a0948c9dfc520b78082c6e54..44b8a6bd5fe1128e8c18061e14b9a63486d26e0d 100644 (file)
 
 #endif /* _NETINET_IN_H */
 
+/* Coordinate with glibc netipx/ipx.h header. */
+#if defined(__NETIPX_IPX_H)
+
+#define __UAPI_DEF_SOCKADDR_IPX                        0
+#define __UAPI_DEF_IPX_ROUTE_DEFINITION                0
+#define __UAPI_DEF_IPX_INTERFACE_DEFINITION    0
+#define __UAPI_DEF_IPX_CONFIG_DATA             0
+#define __UAPI_DEF_IPX_ROUTE_DEF               0
+
+#else /* defined(__NETIPX_IPX_H) */
+
+#define __UAPI_DEF_SOCKADDR_IPX                        1
+#define __UAPI_DEF_IPX_ROUTE_DEFINITION                1
+#define __UAPI_DEF_IPX_INTERFACE_DEFINITION    1
+#define __UAPI_DEF_IPX_CONFIG_DATA             1
+#define __UAPI_DEF_IPX_ROUTE_DEF               1
+
+#endif /* defined(__NETIPX_IPX_H) */
+
 /* Definitions for xattr.h */
 #if defined(_SYS_XATTR_H)
 #define __UAPI_DEF_XATTR               0
 #define __UAPI_DEF_IN6_PKTINFO         1
 #define __UAPI_DEF_IP6_MTUINFO         1
 
+/* Definitions for ipx.h */
+#define __UAPI_DEF_SOCKADDR_IPX                        1
+#define __UAPI_DEF_IPX_ROUTE_DEFINITION                1
+#define __UAPI_DEF_IPX_INTERFACE_DEFINITION    1
+#define __UAPI_DEF_IPX_CONFIG_DATA             1
+#define __UAPI_DEF_IPX_ROUTE_DEF               1
+
 /* Definitions for xattr.h */
 #define __UAPI_DEF_XATTR               1
 
index d95a3018f6a1e7018f2b8774c4513387a270b3fd..54c3b4f4aceb4f7cdf5f23319f5c707751e3e7a7 100644 (file)
@@ -583,7 +583,7 @@ enum ovs_userspace_attr {
 #define OVS_USERSPACE_ATTR_MAX (__OVS_USERSPACE_ATTR_MAX - 1)
 
 struct ovs_action_trunc {
-       uint32_t max_len; /* Max packet size in bytes. */
+       __u32 max_len; /* Max packet size in bytes. */
 };
 
 /**
@@ -632,8 +632,8 @@ enum ovs_hash_alg {
  * @hash_basis: basis used for computing hash.
  */
 struct ovs_action_hash {
-       uint32_t  hash_alg;     /* One of ovs_hash_alg. */
-       uint32_t  hash_basis;
+       __u32  hash_alg;     /* One of ovs_hash_alg. */
+       __u32  hash_basis;
 };
 
 /**
index d6709eb70970eb764bd14af1d1874fe5144b9053..0d302a87f21b58ac711aaf5720cca12640bfc0ef 100644 (file)
@@ -19,6 +19,7 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
+#include <linux/file.h>
 #include <linux/kernel.h>
 #include <linux/audit.h>
 #include <linux/kthread.h>
@@ -544,10 +545,11 @@ int audit_exe_compare(struct task_struct *tsk, struct audit_fsnotify_mark *mark)
        unsigned long ino;
        dev_t dev;
 
-       rcu_read_lock();
-       exe_file = rcu_dereference(tsk->mm->exe_file);
+       exe_file = get_task_exe_file(tsk);
+       if (!exe_file)
+               return 0;
        ino = exe_file->f_inode->i_ino;
        dev = exe_file->f_inode->i_sb->s_dev;
-       rcu_read_unlock();
+       fput(exe_file);
        return audit_mark_compare(mark, ino, dev);
 }
index c2de56ab0fce8b2193a2414afc2d4eb743510c74..7fa0c4ae6394f028fa09694b219314dd3d7d8731 100644 (file)
@@ -1,4 +1,12 @@
+# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE is not set
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_KERNEL_GZIP is not set
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
 CONFIG_KERNEL_XZ=y
+# CONFIG_KERNEL_LZO is not set
+# CONFIG_KERNEL_LZ4 is not set
 CONFIG_OPTIMIZE_INLINING=y
+# CONFIG_SLAB is not set
+# CONFIG_SLUB is not set
 CONFIG_SLOB=y
index c7fd2778ed50edc52b7c0e5f1f651dcb141c9495..c27e53326befe9f33ffc605a75a48cf8e9ae6624 100644 (file)
@@ -2069,6 +2069,20 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
        mutex_unlock(&cpuset_mutex);
 }
 
+/*
+ * Make sure the new task conform to the current state of its parent,
+ * which could have been changed by cpuset just after it inherits the
+ * state from the parent and before it sits on the cgroup's task list.
+ */
+void cpuset_fork(struct task_struct *task)
+{
+       if (task_css_is_root(task, cpuset_cgrp_id))
+               return;
+
+       set_cpus_allowed_ptr(task, &current->cpus_allowed);
+       task->mems_allowed = current->mems_allowed;
+}
+
 struct cgroup_subsys cpuset_cgrp_subsys = {
        .css_alloc      = cpuset_css_alloc,
        .css_online     = cpuset_css_online,
@@ -2079,6 +2093,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
        .attach         = cpuset_attach,
        .post_attach    = cpuset_post_attach,
        .bind           = cpuset_bind,
+       .fork           = cpuset_fork,
        .legacy_cftypes = files,
        .early_init     = true,
 };
index 3cfabdf7b942908d78f071a5bedab9f7bcb6fde7..a54f2c2cdb20735f67e48738c02ef2d85afa5019 100644 (file)
@@ -2496,11 +2496,11 @@ static int __perf_event_stop(void *info)
        return 0;
 }
 
-static int perf_event_restart(struct perf_event *event)
+static int perf_event_stop(struct perf_event *event, int restart)
 {
        struct stop_event_data sd = {
                .event          = event,
-               .restart        = 1,
+               .restart        = restart,
        };
        int ret = 0;
 
@@ -3549,10 +3549,18 @@ static int perf_event_read(struct perf_event *event, bool group)
                        .group = group,
                        .ret = 0,
                };
-               ret = smp_call_function_single(event->oncpu, __perf_event_read, &data, 1);
-               /* The event must have been read from an online CPU: */
-               WARN_ON_ONCE(ret);
-               ret = ret ? : data.ret;
+               /*
+                * Purposely ignore the smp_call_function_single() return
+                * value.
+                *
+                * If event->oncpu isn't a valid CPU it means the event got
+                * scheduled out and that will have updated the event count.
+                *
+                * Therefore, either way, we'll have an up-to-date event count
+                * after this.
+                */
+               (void)smp_call_function_single(event->oncpu, __perf_event_read, &data, 1);
+               ret = data.ret;
        } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
                struct perf_event_context *ctx = event->ctx;
                unsigned long flags;
@@ -4837,6 +4845,19 @@ static void ring_buffer_attach(struct perf_event *event,
                spin_unlock_irqrestore(&rb->event_lock, flags);
        }
 
+       /*
+        * Avoid racing with perf_mmap_close(AUX): stop the event
+        * before swizzling the event::rb pointer; if it's getting
+        * unmapped, its aux_mmap_count will be 0 and it won't
+        * restart. See the comment in __perf_pmu_output_stop().
+        *
+        * Data will inevitably be lost when set_output is done in
+        * mid-air, but then again, whoever does it like this is
+        * not in for the data anyway.
+        */
+       if (has_aux(event))
+               perf_event_stop(event, 0);
+
        rcu_assign_pointer(event->rb, rb);
 
        if (old_rb) {
@@ -6112,7 +6133,7 @@ static void perf_event_addr_filters_exec(struct perf_event *event, void *data)
        raw_spin_unlock_irqrestore(&ifh->lock, flags);
 
        if (restart)
-               perf_event_restart(event);
+               perf_event_stop(event, 1);
 }
 
 void perf_event_exec(void)
@@ -6156,7 +6177,13 @@ static void __perf_event_output_stop(struct perf_event *event, void *data)
 
        /*
         * In case of inheritance, it will be the parent that links to the
-        * ring-buffer, but it will be the child that's actually using it:
+        * ring-buffer, but it will be the child that's actually using it.
+        *
+        * We are using event::rb to determine if the event should be stopped,
+        * however this may race with ring_buffer_attach() (through set_output),
+        * which will make us skip the event that actually needs to be stopped.
+        * So ring_buffer_attach() has to stop an aux event before re-assigning
+        * its rb pointer.
         */
        if (rcu_dereference(parent->rb) == rb)
                ro->err = __perf_event_stop(&sd);
@@ -6670,7 +6697,7 @@ static void __perf_addr_filters_adjust(struct perf_event *event, void *data)
        raw_spin_unlock_irqrestore(&ifh->lock, flags);
 
        if (restart)
-               perf_event_restart(event);
+               perf_event_stop(event, 1);
 }
 
 /*
@@ -7859,7 +7886,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
        mmput(mm);
 
 restart:
-       perf_event_restart(event);
+       perf_event_stop(event, 1);
 }
 
 /*
index ae9b90dc9a5a66c74134d4464665ee05c125ea2b..257fa460b846032744e2da500d489d0995678571 100644 (file)
@@ -330,15 +330,22 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
        if (!rb)
                return NULL;
 
-       if (!rb_has_aux(rb) || !atomic_inc_not_zero(&rb->aux_refcount))
+       if (!rb_has_aux(rb))
                goto err;
 
        /*
-        * If rb::aux_mmap_count is zero (and rb_has_aux() above went through),
-        * the aux buffer is in perf_mmap_close(), about to get freed.
+        * If aux_mmap_count is zero, the aux buffer is in perf_mmap_close(),
+        * about to get freed, so we leave immediately.
+        *
+        * Checking rb::aux_mmap_count and rb::refcount has to be done in
+        * the same order, see perf_mmap_close. Otherwise we end up freeing
+        * aux pages in this path, which is a bug, because in_atomic().
         */
        if (!atomic_read(&rb->aux_mmap_count))
-               goto err_put;
+               goto err;
+
+       if (!atomic_inc_not_zero(&rb->aux_refcount))
+               goto err;
 
        /*
         * Nesting is not supported for AUX area, make sure nested
index 2f974ae042a677a90c7d34a78b66793cbe8584c5..091a78be3b09d5669d9c10b98f6300e4171d2413 100644 (file)
@@ -848,12 +848,7 @@ void do_exit(long code)
        TASKS_RCU(preempt_enable());
        exit_notify(tsk, group_dead);
        proc_exit_connector(tsk);
-#ifdef CONFIG_NUMA
-       task_lock(tsk);
-       mpol_put(tsk->mempolicy);
-       tsk->mempolicy = NULL;
-       task_unlock(tsk);
-#endif
+       mpol_put_task_policy(tsk);
 #ifdef CONFIG_FUTEX
        if (unlikely(current->pi_state_cache))
                kfree(current->pi_state_cache);
index 52e725d4a866b4ac30f16b4db075b9b197e4e53d..beb31725f7e2746fb17cdd305c4193ab2c70e551 100644 (file)
@@ -798,6 +798,29 @@ struct file *get_mm_exe_file(struct mm_struct *mm)
 }
 EXPORT_SYMBOL(get_mm_exe_file);
 
+/**
+ * get_task_exe_file - acquire a reference to the task's executable file
+ *
+ * Returns %NULL if task's mm (if any) has no associated executable file or
+ * this is a kernel thread with borrowed mm (see the comment above get_task_mm).
+ * User must release file via fput().
+ */
+struct file *get_task_exe_file(struct task_struct *task)
+{
+       struct file *exe_file = NULL;
+       struct mm_struct *mm;
+
+       task_lock(task);
+       mm = task->mm;
+       if (mm) {
+               if (!(task->flags & PF_KTHREAD))
+                       exe_file = get_mm_exe_file(mm);
+       }
+       task_unlock(task);
+       return exe_file;
+}
+EXPORT_SYMBOL(get_task_exe_file);
+
 /**
  * get_task_mm - acquire a reference to the task's mm
  *
@@ -913,14 +936,12 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
        deactivate_mm(tsk, mm);
 
        /*
-        * If we're exiting normally, clear a user-space tid field if
-        * requested.  We leave this alone when dying by signal, to leave
-        * the value intact in a core dump, and to save the unnecessary
-        * trouble, say, a killed vfork parent shouldn't touch this mm.
-        * Userland only wants this done for a sys_exit.
+        * Signal userspace if we're not exiting with a core dump
+        * because we want to leave the value intact for debugging
+        * purposes.
         */
        if (tsk->clear_child_tid) {
-               if (!(tsk->flags & PF_SIGNALED) &&
+               if (!(tsk->signal->flags & SIGNAL_GROUP_COREDUMP) &&
                    atomic_read(&mm->mm_users) > 1) {
                        /*
                         * We don't check the error code - if userspace has
@@ -1404,7 +1425,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        p->real_start_time = ktime_get_boot_ns();
        p->io_context = NULL;
        p->audit_context = NULL;
-       threadgroup_change_begin(current);
        cgroup_fork(p);
 #ifdef CONFIG_NUMA
        p->mempolicy = mpol_dup(p->mempolicy);
@@ -1556,6 +1576,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        INIT_LIST_HEAD(&p->thread_group);
        p->task_works = NULL;
 
+       threadgroup_change_begin(current);
        /*
         * Ensure that the cgroup subsystem policies allow the new process to be
         * forked. It should be noted the the new process's css_set can be changed
@@ -1656,6 +1677,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
 bad_fork_cancel_cgroup:
        cgroup_cancel_fork(p);
 bad_fork_free_pid:
+       threadgroup_change_end(current);
        if (pid != &init_struct_pid)
                free_pid(pid);
 bad_fork_cleanup_thread:
@@ -1688,7 +1710,6 @@ bad_fork_cleanup_policy:
        mpol_put(p->mempolicy);
 bad_fork_cleanup_threadgroup_lock:
 #endif
-       threadgroup_change_end(current);
        delayacct_tsk_free(p);
 bad_fork_cleanup_count:
        atomic_dec(&p->cred->user->processes);
index 503bc2d348e59aa2f866a4ad5b79d913419e862b..037c321c56188f9fbb35193072f08052d6e4771f 100644 (file)
@@ -887,7 +887,10 @@ int kexec_load_purgatory(struct kimage *image, unsigned long min,
        return 0;
 out:
        vfree(pi->sechdrs);
+       pi->sechdrs = NULL;
+
        vfree(pi->purgatory_buf);
+       pi->purgatory_buf = NULL;
        return ret;
 }
 
index 251d16b4cb41e67111ff2f1f783bf39cdea13822..b501e390bb34403c11d6ae09c31ea9fcb769ea8e 100644 (file)
@@ -247,6 +247,7 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
        align_start = res->start & ~(SECTION_SIZE - 1);
        align_size = ALIGN(resource_size(res), SECTION_SIZE);
        arch_remove_memory(align_start, align_size);
+       untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
        pgmap_radix_release(res);
        dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
                        "%s: failed to free all reserved pages\n", __func__);
@@ -282,6 +283,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
                struct percpu_ref *ref, struct vmem_altmap *altmap)
 {
        resource_size_t key, align_start, align_size, align_end;
+       pgprot_t pgprot = PAGE_KERNEL;
        struct dev_pagemap *pgmap;
        struct page_map *page_map;
        int error, nid, is_ram;
@@ -351,6 +353,11 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
        if (nid < 0)
                nid = numa_mem_id();
 
+       error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0,
+                       align_size);
+       if (error)
+               goto err_pfn_remap;
+
        error = arch_add_memory(nid, align_start, align_size, true);
        if (error)
                goto err_add_memory;
@@ -371,6 +378,8 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
        return __va(res->start);
 
  err_add_memory:
+       untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
+ err_pfn_remap:
  err_radix:
        pgmap_radix_release(res);
        devres_free(page_map);
index 97b0df71303ef7bbecf28d1479c3e198885b6199..168ff442ebde71cefea74ce66a2bf8e86c6e3ddc 100644 (file)
@@ -482,7 +482,16 @@ void pm_qos_update_request(struct pm_qos_request *req,
                return;
        }
 
-       cancel_delayed_work_sync(&req->work);
+       /*
+        * This function may be called very early during boot, for example,
+        * from of_clk_init(), where irq needs to stay disabled.
+        * cancel_delayed_work_sync() assumes that irq is enabled on
+        * invocation and re-enables it on return.  Avoid calling it until
+        * workqueue is initialized.
+        */
+       if (keventd_up())
+               cancel_delayed_work_sync(&req->work);
+
        __pm_qos_update_request(req, new_value);
 }
 EXPORT_SYMBOL_GPL(pm_qos_update_request);
index b69eb8a2876fc2ec7ba1af9e5cbc000737cd47fd..16bab471c7e23d8099eebca1c897ccb6a09f6bdf 100644 (file)
@@ -99,26 +99,32 @@ again:
        return add;
 }
 
-/*
- * printk one line from the temporary buffer from @start index until
- * and including the @end index.
- */
-static void print_nmi_seq_line(struct nmi_seq_buf *s, int start, int end)
+static void printk_nmi_flush_line(const char *text, int len)
 {
-       const char *buf = s->buffer + start;
-
        /*
         * The buffers are flushed in NMI only on panic.  The messages must
         * go only into the ring buffer at this stage.  Consoles will get
         * explicitly called later when a crashdump is not generated.
         */
        if (in_nmi())
-               printk_deferred("%.*s", (end - start) + 1, buf);
+               printk_deferred("%.*s", len, text);
        else
-               printk("%.*s", (end - start) + 1, buf);
+               printk("%.*s", len, text);
 
 }
 
+/*
+ * printk one line from the temporary buffer from @start index until
+ * and including the @end index.
+ */
+static void printk_nmi_flush_seq_line(struct nmi_seq_buf *s,
+                                       int start, int end)
+{
+       const char *buf = s->buffer + start;
+
+       printk_nmi_flush_line(buf, (end - start) + 1);
+}
+
 /*
  * Flush data from the associated per_CPU buffer. The function
  * can be called either via IRQ work or independently.
@@ -150,9 +156,11 @@ more:
         * the buffer an unexpected way. If we printed something then
         * @len must only increase.
         */
-       if (i && i >= len)
-               pr_err("printk_nmi_flush: internal error: i=%d >= len=%zu\n",
-                      i, len);
+       if (i && i >= len) {
+               const char *msg = "printk_nmi_flush: internal error\n";
+
+               printk_nmi_flush_line(msg, strlen(msg));
+       }
 
        if (!len)
                goto out; /* Someone else has already flushed the buffer. */
@@ -166,14 +174,14 @@ more:
        /* Print line by line. */
        for (; i < size; i++) {
                if (s->buffer[i] == '\n') {
-                       print_nmi_seq_line(s, last_i, i);
+                       printk_nmi_flush_seq_line(s, last_i, i);
                        last_i = i + 1;
                }
        }
        /* Check if there was a partial line. */
        if (last_i < size) {
-               print_nmi_seq_line(s, last_i, size - 1);
-               pr_cont("\n");
+               printk_nmi_flush_seq_line(s, last_i, size - 1);
+               printk_nmi_flush_line("\n", strlen("\n"));
        }
 
        /*
index 2a906f20fba7c4ba63d89fe87ac3527607be453a..44817c640e99b13533221058066b03af010abb47 100644 (file)
@@ -2016,6 +2016,28 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
        success = 1; /* we're going to change ->state */
        cpu = task_cpu(p);
 
+       /*
+        * Ensure we load p->on_rq _after_ p->state, otherwise it would
+        * be possible to, falsely, observe p->on_rq == 0 and get stuck
+        * in smp_cond_load_acquire() below.
+        *
+        * sched_ttwu_pending()                 try_to_wake_up()
+        *   [S] p->on_rq = 1;                  [L] P->state
+        *       UNLOCK rq->lock  -----.
+        *                              \
+        *                               +---   RMB
+        * schedule()                   /
+        *       LOCK rq->lock    -----'
+        *       UNLOCK rq->lock
+        *
+        * [task p]
+        *   [S] p->state = UNINTERRUPTIBLE     [L] p->on_rq
+        *
+        * Pairs with the UNLOCK+LOCK on rq->lock from the
+        * last wakeup of our task and the schedule that got our task
+        * current.
+        */
+       smp_rmb();
        if (p->on_rq && ttwu_remote(p, wake_flags))
                goto stat;
 
index ef6c6c3f9d8a862b0355443ce794ff0babf83489..0db7c8a2afe2fb531fe390d78ff9bb435c992077 100644 (file)
@@ -605,12 +605,16 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
                ptrace_event(PTRACE_EVENT_SECCOMP, data);
                /*
                 * The delivery of a fatal signal during event
-                * notification may silently skip tracer notification.
-                * Terminating the task now avoids executing a system
-                * call that may not be intended.
+                * notification may silently skip tracer notification,
+                * which could leave us with a potentially unmodified
+                * syscall that the tracer would have liked to have
+                * changed. Since the process is about to die, we just
+                * force the syscall to be skipped and let the signal
+                * kill the process and correctly handle any tracer exit
+                * notifications.
                 */
                if (fatal_signal_pending(current))
-                       do_exit(SIGSYS);
+                       goto skip;
                /* Check if the tracer forced the syscall to be skipped. */
                this_syscall = syscall_get_nr(current, task_pt_regs(current));
                if (this_syscall < 0)
index 204fdc86863d71368ef61faacdb0d5458e78039f..2ec7c00228f3c981568b1dc911b4a91a83148d91 100644 (file)
@@ -908,10 +908,11 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts)
        ktime_t now, expires;
        int cpu = smp_processor_id();
 
+       now = tick_nohz_start_idle(ts);
+
        if (can_stop_idle_tick(cpu, ts)) {
                int was_stopped = ts->tick_stopped;
 
-               now = tick_nohz_start_idle(ts);
                ts->idle_calls++;
 
                expires = tick_nohz_stop_sched_tick(ts, now, cpu);
index 2307d7c89dac972fae526627ce21ecdd087369de..2e2cca5092318faf62d10ed96a3b7d0a8b9cc129 100644 (file)
@@ -1686,24 +1686,6 @@ config LATENCYTOP
          Enable this option if you want to use the LatencyTOP tool
          to find out which userspace is blocking on what kernel operations.
 
-config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
-       bool
-
-config DEBUG_STRICT_USER_COPY_CHECKS
-       bool "Strict user copy size checks"
-       depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
-       depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
-       help
-         Enabling this option turns a certain set of sanity checks for user
-         copy operations into compile time failures.
-
-         The copy_from_user() etc checks are there to help test if there
-         are sufficient security checks on the length argument of
-         the copy operation, by having gcc prove that the argument is
-         within bounds.
-
-         If unsure, say N.
-
 source kernel/trace/Kconfig
 
 menu "Runtime Testing"
index cfa68eb269e4b6628221e436f88c3ff96e9c62d9..5dc77a8ec297ec478c003e894af206956a39fb4c 100644 (file)
@@ -24,7 +24,6 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
         is_single_threaded.o plist.o decompress.o kobject_uevent.o \
         earlycpio.o seq_buf.o nmi_backtrace.o nodemask.o
 
-obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o
 lib-$(CONFIG_MMU) += ioremap.o
 lib-$(CONFIG_SMP) += cpumask.o
 lib-$(CONFIG_HAS_DMA) += dma-noop.o
index 9e8c7386b3a03e8d418dd09847c9c7f295e06e77..7e3138cfc8c9c72858534b10ed58e4ef4c5e72f7 100644 (file)
@@ -290,26 +290,6 @@ done:
        return wanted - bytes;
 }
 
-/*
- * Fault in the first iovec of the given iov_iter, to a maximum length
- * of bytes. Returns 0 on success, or non-zero if the memory could not be
- * accessed (ie. because it is an invalid address).
- *
- * writev-intensive code may want this to prefault several iovecs -- that
- * would be possible (callers must not rely on the fact that _only_ the
- * first iovec will be faulted with the current implementation).
- */
-int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
-{
-       if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
-               char __user *buf = i->iov->iov_base + i->iov_offset;
-               bytes = min(bytes, i->iov->iov_len - i->iov_offset);
-               return fault_in_pages_readable(buf, bytes);
-       }
-       return 0;
-}
-EXPORT_SYMBOL(iov_iter_fault_in_readable);
-
 /*
  * Fault in one or more iovecs of the given iov_iter, to a maximum length of
  * bytes.  For each iovec, fault in each page that constitutes the iovec.
@@ -317,7 +297,7 @@ EXPORT_SYMBOL(iov_iter_fault_in_readable);
  * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
  * because it is an invalid address).
  */
-int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes)
+int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
 {
        size_t skip = i->iov_offset;
        const struct iovec *iov;
@@ -334,7 +314,7 @@ int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes)
        }
        return 0;
 }
-EXPORT_SYMBOL(iov_iter_fault_in_multipages_readable);
+EXPORT_SYMBOL(iov_iter_fault_in_readable);
 
 void iov_iter_init(struct iov_iter *i, int direction,
                        const struct iovec *iov, unsigned long nr_segs,
index 5ba520b544d73ff1a62705584d1971aeee6ec2ba..56054e541a0f92fb69d1de4502c4230d8b4f6c3d 100644 (file)
@@ -77,17 +77,18 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
        size = min_t(unsigned int, size, tbl->size >> 1);
 
        if (sizeof(spinlock_t) != 0) {
+               tbl->locks = NULL;
 #ifdef CONFIG_NUMA
                if (size * sizeof(spinlock_t) > PAGE_SIZE &&
                    gfp == GFP_KERNEL)
                        tbl->locks = vmalloc(size * sizeof(spinlock_t));
-               else
 #endif
                if (gfp != GFP_KERNEL)
                        gfp |= __GFP_NOWARN | __GFP_NORETRY;
 
-               tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
-                                          gfp);
+               if (!tbl->locks)
+                       tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
+                                                  gfp);
                if (!tbl->locks)
                        return -ENOMEM;
                for (i = 0; i < size; i++)
index 66c5fc8351e84d703373a3572d6afaea84c410da..cac20c5fb304fb09a5803de381c4fd6b0c1b367d 100644 (file)
@@ -143,7 +143,7 @@ static int __init
 test_hash_init(void)
 {
        char buf[SIZE+1];
-       u32 string_or = 0, hash_or[2][33] = { 0 };
+       u32 string_or = 0, hash_or[2][33] = { { 0, } };
        unsigned tests = 0;
        unsigned long long h64 = 0;
        int i, j;
@@ -219,21 +219,27 @@ test_hash_init(void)
        }
 
        /* Issue notices about skipped tests. */
-#ifndef HAVE_ARCH__HASH_32
-       pr_info("__hash_32() has no arch implementation to test.");
-#elif HAVE_ARCH__HASH_32 != 1
+#ifdef HAVE_ARCH__HASH_32
+#if HAVE_ARCH__HASH_32 != 1
        pr_info("__hash_32() is arch-specific; not compared to generic.");
 #endif
-#ifndef HAVE_ARCH_HASH_32
-       pr_info("hash_32() has no arch implementation to test.");
-#elif HAVE_ARCH_HASH_32 != 1
+#else
+       pr_info("__hash_32() has no arch implementation to test.");
+#endif
+#ifdef HAVE_ARCH_HASH_32
+#if HAVE_ARCH_HASH_32 != 1
        pr_info("hash_32() is arch-specific; not compared to generic.");
 #endif
-#ifndef HAVE_ARCH_HASH_64
-       pr_info("hash_64() has no arch implementation to test.");
-#elif HAVE_ARCH_HASH_64 != 1
+#else
+       pr_info("hash_32() has no arch implementation to test.");
+#endif
+#ifdef HAVE_ARCH_HASH_64
+#if HAVE_ARCH_HASH_64 != 1
        pr_info("hash_64() is arch-specific; not compared to generic.");
 #endif
+#else
+       pr_info("hash_64() has no arch implementation to test.");
+#endif
 
        pr_notice("%u tests passed.", tests);
 
diff --git a/lib/usercopy.c b/lib/usercopy.c
deleted file mode 100644 (file)
index 4f5b1dd..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-#include <linux/export.h>
-#include <linux/bug.h>
-#include <linux/uaccess.h>
-
-void copy_from_user_overflow(void)
-{
-       WARN(1, "Buffer overflow detected!\n");
-}
-EXPORT_SYMBOL(copy_from_user_overflow);
index 2db2112aa31ed5506b4aef572d8e8c70477a8aa7..a6abd76baa725d56eb1e9adeb8ee1b6fca86cb80 100644 (file)
@@ -1078,7 +1078,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
                goto out;
 
        page = pmd_page(*pmd);
-       VM_BUG_ON_PAGE(!PageHead(page), page);
+       VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
        if (flags & FOLL_TOUCH)
                touch_pmd(vma, addr, pmd);
        if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
@@ -1116,7 +1116,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
        }
 skip_mlock:
        page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
-       VM_BUG_ON_PAGE(!PageCompound(page), page);
+       VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
        if (flags & FOLL_GET)
                get_page(page);
 
index d8c4e38fb5f4be1d9748dc77f214c8a285374f8d..2da72a5b6ecc04f87bd9168fc31d613360e40f2e 100644 (file)
@@ -2336,6 +2336,23 @@ out:
        return ret;
 }
 
+/*
+ * Drop the (possibly final) reference to task->mempolicy.  It needs to be
+ * dropped after task->mempolicy is set to NULL so that any allocation done as
+ * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
+ * policy.
+ */
+void mpol_put_task_policy(struct task_struct *task)
+{
+       struct mempolicy *pol;
+
+       task_lock(task);
+       pol = task->mempolicy;
+       task->mempolicy = NULL;
+       task_unlock(task);
+       mpol_put(pol);
+}
+
 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
 {
        pr_debug("deleting %lx-l%lx\n", n->start, n->end);
index 3fbe73a6fe4b6869dcd44a45de43928d4c08fe0c..a2214c64ed3cd04dceaed7a579f593852e458df1 100644 (file)
@@ -3137,54 +3137,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
        return NULL;
 }
 
-static inline bool
-should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
-                    enum compact_result compact_result,
-                    enum compact_priority *compact_priority,
-                    int compaction_retries)
-{
-       int max_retries = MAX_COMPACT_RETRIES;
-
-       if (!order)
-               return false;
-
-       /*
-        * compaction considers all the zone as desperately out of memory
-        * so it doesn't really make much sense to retry except when the
-        * failure could be caused by insufficient priority
-        */
-       if (compaction_failed(compact_result)) {
-               if (*compact_priority > MIN_COMPACT_PRIORITY) {
-                       (*compact_priority)--;
-                       return true;
-               }
-               return false;
-       }
-
-       /*
-        * make sure the compaction wasn't deferred or didn't bail out early
-        * due to locks contention before we declare that we should give up.
-        * But do not retry if the given zonelist is not suitable for
-        * compaction.
-        */
-       if (compaction_withdrawn(compact_result))
-               return compaction_zonelist_suitable(ac, order, alloc_flags);
-
-       /*
-        * !costly requests are much more important than __GFP_REPEAT
-        * costly ones because they are de facto nofail and invoke OOM
-        * killer to move on while costly can fail and users are ready
-        * to cope with that. 1/4 retries is rather arbitrary but we
-        * would need much more detailed feedback from compaction to
-        * make a better decision.
-        */
-       if (order > PAGE_ALLOC_COSTLY_ORDER)
-               max_retries /= 4;
-       if (compaction_retries <= max_retries)
-               return true;
-
-       return false;
-}
 #else
 static inline struct page *
 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
@@ -3195,6 +3147,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
        return NULL;
 }
 
+#endif /* CONFIG_COMPACTION */
+
 static inline bool
 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
                     enum compact_result compact_result,
@@ -3221,7 +3175,6 @@ should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_fla
        }
        return false;
 }
-#endif /* CONFIG_COMPACTION */
 
 /* Perform direct synchronous page reclaim */
 static int
@@ -4407,7 +4360,7 @@ static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
        do {
                zone_type--;
                zone = pgdat->node_zones + zone_type;
-               if (populated_zone(zone)) {
+               if (managed_zone(zone)) {
                        zoneref_set_zone(zone,
                                &zonelist->_zonerefs[nr_zones++]);
                        check_highest_zone(zone_type);
@@ -4645,7 +4598,7 @@ static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
                for (j = 0; j < nr_nodes; j++) {
                        node = node_order[j];
                        z = &NODE_DATA(node)->node_zones[zone_type];
-                       if (populated_zone(z)) {
+                       if (managed_zone(z)) {
                                zoneref_set_zone(z,
                                        &zonelist->_zonerefs[pos++]);
                                check_highest_zone(zone_type);
index a3cc3052f830a01d414c7ff90c375988561863e1..089328f2b9209ea3526c6caed032b95adad85d0b 100644 (file)
@@ -134,30 +134,15 @@ static inline const char *check_bogus_address(const void *ptr, unsigned long n)
        return NULL;
 }
 
-static inline const char *check_heap_object(const void *ptr, unsigned long n,
-                                           bool to_user)
+/* Checks for allocs that are marked in some way as spanning multiple pages. */
+static inline const char *check_page_span(const void *ptr, unsigned long n,
+                                         struct page *page, bool to_user)
 {
-       struct page *page, *endpage;
+#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
        const void *end = ptr + n - 1;
+       struct page *endpage;
        bool is_reserved, is_cma;
 
-       /*
-        * Some architectures (arm64) return true for virt_addr_valid() on
-        * vmalloced addresses. Work around this by checking for vmalloc
-        * first.
-        */
-       if (is_vmalloc_addr(ptr))
-               return NULL;
-
-       if (!virt_addr_valid(ptr))
-               return NULL;
-
-       page = virt_to_head_page(ptr);
-
-       /* Check slab allocator for flags and size. */
-       if (PageSlab(page))
-               return __check_heap_object(ptr, n, page);
-
        /*
         * Sometimes the kernel data regions are not marked Reserved (see
         * check below). And sometimes [_sdata,_edata) does not cover
@@ -186,7 +171,7 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
                   ((unsigned long)end & (unsigned long)PAGE_MASK)))
                return NULL;
 
-       /* Allow if start and end are inside the same compound page. */
+       /* Allow if fully inside the same compound (__GFP_COMP) page. */
        endpage = virt_to_head_page(end);
        if (likely(endpage == page))
                return NULL;
@@ -199,20 +184,44 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
        is_reserved = PageReserved(page);
        is_cma = is_migrate_cma_page(page);
        if (!is_reserved && !is_cma)
-               goto reject;
+               return "<spans multiple pages>";
 
        for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
                page = virt_to_head_page(ptr);
                if (is_reserved && !PageReserved(page))
-                       goto reject;
+                       return "<spans Reserved and non-Reserved pages>";
                if (is_cma && !is_migrate_cma_page(page))
-                       goto reject;
+                       return "<spans CMA and non-CMA pages>";
        }
+#endif
 
        return NULL;
+}
+
+static inline const char *check_heap_object(const void *ptr, unsigned long n,
+                                           bool to_user)
+{
+       struct page *page;
+
+       /*
+        * Some architectures (arm64) return true for virt_addr_valid() on
+        * vmalloced addresses. Work around this by checking for vmalloc
+        * first.
+        */
+       if (is_vmalloc_addr(ptr))
+               return NULL;
+
+       if (!virt_addr_valid(ptr))
+               return NULL;
+
+       page = virt_to_head_page(ptr);
+
+       /* Check slab allocator for flags and size. */
+       if (PageSlab(page))
+               return __check_heap_object(ptr, n, page);
 
-reject:
-       return "<spans multiple pages>";
+       /* Verify object does not incorrectly span multiple pages. */
+       return check_page_span(ptr, n, page, to_user);
 }
 
 /*
index 374d95d0417856b096d902d40ff7cc29d4021b2e..b1e12a1ea9cfcc6ece1690305fc2d44bfdcf5b37 100644 (file)
@@ -1665,7 +1665,7 @@ static bool inactive_reclaimable_pages(struct lruvec *lruvec,
 
        for (zid = sc->reclaim_idx; zid >= 0; zid--) {
                zone = &pgdat->node_zones[zid];
-               if (!populated_zone(zone))
+               if (!managed_zone(zone))
                        continue;
 
                if (zone_page_state_snapshot(zone, NR_ZONE_LRU_BASE +
@@ -2036,7 +2036,7 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
                struct zone *zone = &pgdat->node_zones[zid];
                unsigned long inactive_zone, active_zone;
 
-               if (!populated_zone(zone))
+               if (!managed_zone(zone))
                        continue;
 
                inactive_zone = zone_page_state(zone,
@@ -2171,7 +2171,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
 
                for (z = 0; z < MAX_NR_ZONES; z++) {
                        struct zone *zone = &pgdat->node_zones[z];
-                       if (!populated_zone(zone))
+                       if (!managed_zone(zone))
                                continue;
 
                        total_high_wmark += high_wmark_pages(zone);
@@ -2510,7 +2510,7 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
        /* If compaction would go ahead or the allocation would succeed, stop */
        for (z = 0; z <= sc->reclaim_idx; z++) {
                struct zone *zone = &pgdat->node_zones[z];
-               if (!populated_zone(zone))
+               if (!managed_zone(zone))
                        continue;
 
                switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) {
@@ -2840,7 +2840,7 @@ static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
 
        for (i = 0; i <= ZONE_NORMAL; i++) {
                zone = &pgdat->node_zones[i];
-               if (!populated_zone(zone) ||
+               if (!managed_zone(zone) ||
                    pgdat_reclaimable_pages(pgdat) == 0)
                        continue;
 
@@ -3141,7 +3141,7 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, int classzone_idx)
        for (i = 0; i <= classzone_idx; i++) {
                struct zone *zone = pgdat->node_zones + i;
 
-               if (!populated_zone(zone))
+               if (!managed_zone(zone))
                        continue;
 
                if (!zone_balanced(zone, order, classzone_idx))
@@ -3169,7 +3169,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat,
        sc->nr_to_reclaim = 0;
        for (z = 0; z <= sc->reclaim_idx; z++) {
                zone = pgdat->node_zones + z;
-               if (!populated_zone(zone))
+               if (!managed_zone(zone))
                        continue;
 
                sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX);
@@ -3242,7 +3242,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
                if (buffer_heads_over_limit) {
                        for (i = MAX_NR_ZONES - 1; i >= 0; i--) {
                                zone = pgdat->node_zones + i;
-                               if (!populated_zone(zone))
+                               if (!managed_zone(zone))
                                        continue;
 
                                sc.reclaim_idx = i;
@@ -3262,7 +3262,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
                 */
                for (i = classzone_idx; i >= 0; i--) {
                        zone = pgdat->node_zones + i;
-                       if (!populated_zone(zone))
+                       if (!managed_zone(zone))
                                continue;
 
                        if (zone_balanced(zone, sc.order, classzone_idx))
@@ -3508,7 +3508,7 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
        pg_data_t *pgdat;
        int z;
 
-       if (!populated_zone(zone))
+       if (!managed_zone(zone))
                return;
 
        if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL))
@@ -3522,7 +3522,7 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
        /* Only wake kswapd if all zones are unbalanced */
        for (z = 0; z <= classzone_idx; z++) {
                zone = pgdat->node_zones + z;
-               if (!populated_zone(zone))
+               if (!managed_zone(zone))
                        continue;
 
                if (zone_balanced(zone, order, classzone_idx))
index ece45e0683fd85af7d5b9dcffdc8b92d52a20460..0b5f729d08d2b41abcfcdb1d7322d3c1ce01fa89 100644 (file)
@@ -250,7 +250,7 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
 
        skb_free_datagram(sk, skb);
 
-       if (msg->msg_flags & MSG_TRUNC)
+       if (flags & MSG_TRUNC)
                copied = skblen;
 
        return err ? : copied;
index c045b3c54768e478f49f600fa8ffdae4a59456fc..b0e23dfc5c3402654f2586b4e29dac9966c1c841 100644 (file)
@@ -262,6 +262,8 @@ int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
                break;
        }
 
+       kfree_skb(hdev->req_skb);
+       hdev->req_skb = NULL;
        hdev->req_status = hdev->req_result = 0;
 
        BT_DBG("%s end: err %d", hdev->name, err);
index 6ef8a01a9ad44e90f37267cd2313a75de003e601..96f04b7b9556d020a64402179eecdf79f547e162 100644 (file)
@@ -1091,7 +1091,7 @@ static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
 
        skb_free_datagram(sk, skb);
 
-       if (msg->msg_flags & MSG_TRUNC)
+       if (flags & MSG_TRUNC)
                copied = skblen;
 
        return err ? : copied;
index 54ceb1f2cc9a2a6026dec5bda6d9006191dcf053..d4cad29b033fc6d8601913013f291ab287648928 100644 (file)
@@ -32,6 +32,7 @@
 
 #include <linux/debugfs.h>
 #include <linux/crc16.h>
+#include <linux/filter.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
@@ -5835,6 +5836,9 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
                if (chan->sdu)
                        break;
 
+               if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
+                       break;
+
                chan->sdu_len = get_unaligned_le16(skb->data);
                skb_pull(skb, L2CAP_SDULEN_SIZE);
 
@@ -6610,6 +6614,10 @@ static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
                goto drop;
        }
 
+       if ((chan->mode == L2CAP_MODE_ERTM ||
+            chan->mode == L2CAP_MODE_STREAMING) && sk_filter(chan->data, skb))
+               goto drop;
+
        if (!control->sframe) {
                int err;
 
index 1842141baedb691c6bfb35b79090ed42223f1db4..a8ba752732c9859b060a3a91887fe1c9d5da0a54 100644 (file)
@@ -1019,7 +1019,7 @@ static int l2cap_sock_recvmsg(struct socket *sock, struct msghdr *msg,
                goto done;
 
        if (pi->rx_busy_skb) {
-               if (!sock_queue_rcv_skb(sk, pi->rx_busy_skb))
+               if (!__sock_queue_rcv_skb(sk, pi->rx_busy_skb))
                        pi->rx_busy_skb = NULL;
                else
                        goto done;
@@ -1270,7 +1270,17 @@ static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
                goto done;
        }
 
-       err = sock_queue_rcv_skb(sk, skb);
+       if (chan->mode != L2CAP_MODE_ERTM &&
+           chan->mode != L2CAP_MODE_STREAMING) {
+               /* Even if no filter is attached, we could potentially
+                * get errors from security modules, etc.
+                */
+               err = sk_filter(sk, skb);
+               if (err)
+                       goto done;
+       }
+
+       err = __sock_queue_rcv_skb(sk, skb);
 
        /* For ERTM, handle one skb that doesn't fit into the recv
         * buffer.  This is important to do because the data frames
index 8e486203d133a7f9158f94fda6dbd03ace32c52b..abe11f085479c62d5ac969d8d2d13aa5a6f762db 100644 (file)
@@ -80,13 +80,10 @@ static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br,
 
        BR_INPUT_SKB_CB(skb)->proxyarp_replied = false;
 
-       if (dev->flags & IFF_NOARP)
+       if ((dev->flags & IFF_NOARP) ||
+           !pskb_may_pull(skb, arp_hdr_len(dev)))
                return;
 
-       if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
-               dev->stats.tx_dropped++;
-               return;
-       }
        parp = arp_hdr(skb);
 
        if (parp->ar_pro != htons(ETH_P_IP) ||
index a5423a1eec057a3b777e43a449dfff46c1f496fa..c5fea9393946f64af336873645db82d09599d442 100644 (file)
@@ -1138,7 +1138,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
                } else {
                        err = br_ip6_multicast_add_group(br, port,
                                                         &grec->grec_mca, vid);
-                       if (!err)
+                       if (err)
                                break;
                }
        }
index cceac5bb658f2245bad77981afb7b8109d2ad0f2..0833c251aef7963dc471e4385282c491b2fb2340 100644 (file)
@@ -368,6 +368,8 @@ ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
 
        match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
        if (IS_ERR(match) || match->family != NFPROTO_BRIDGE) {
+               if (!IS_ERR(match))
+                       module_put(match->me);
                request_module("ebt_%s", m->u.name);
                match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
        }
index 4b901d9f2e7cffe217881d4c5dc40d805ba87b9e..ad47a921b70152f7a802284a03ce690b03390d1c 100644 (file)
@@ -86,6 +86,7 @@ static const struct nft_expr_ops nft_meta_bridge_set_ops = {
        .init           = nft_meta_set_init,
        .destroy        = nft_meta_set_destroy,
        .dump           = nft_meta_set_dump,
+       .validate       = nft_meta_set_validate,
 };
 
 static const struct nft_expr_ops *
index dd6ce598de897363bbb7eea252a22cbc2e846dc4..ea6312057a7136806fc0c17d018c8909d4d130ba 100644 (file)
@@ -3974,6 +3974,22 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
        return skb;
 }
 
+/**
+ *     netdev_is_rx_handler_busy - check if receive handler is registered
+ *     @dev: device to check
+ *
+ *     Check if a receive handler is already registered for a given device.
+ *     Return true if there one.
+ *
+ *     The caller must hold the rtnl_mutex.
+ */
+bool netdev_is_rx_handler_busy(struct net_device *dev)
+{
+       ASSERT_RTNL();
+       return dev && rtnl_dereference(dev->rx_handler);
+}
+EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
+
 /**
  *     netdev_rx_handler_register - register receive handler
  *     @dev: device to register a handler for
index 61ad43f61c5edbffa48f4983d8bf1a7472a66cdc..52742a02814fddc6da9dd23c90af50c363b93b11 100644 (file)
@@ -680,11 +680,13 @@ EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
 void __skb_get_hash(struct sk_buff *skb)
 {
        struct flow_keys keys;
+       u32 hash;
 
        __flow_hash_secret_init();
 
-       __skb_set_sw_hash(skb, ___skb_get_hash(skb, &keys, hashrnd),
-                         flow_keys_have_l4(&keys));
+       hash = ___skb_get_hash(skb, &keys, hashrnd);
+
+       __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
 }
 EXPORT_SYMBOL(__skb_get_hash);
 
index 415e117967c775f805b43cd634bdf9a55f064e2f..062a67ca9a212f6e8a966e5419c4a11867e601d9 100644 (file)
@@ -2232,7 +2232,7 @@ static struct devinet_sysctl_table {
 };
 
 static int __devinet_sysctl_register(struct net *net, char *dev_name,
-                                       struct ipv4_devconf *p)
+                                    int ifindex, struct ipv4_devconf *p)
 {
        int i;
        struct devinet_sysctl_table *t;
@@ -2255,6 +2255,8 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name,
                goto free;
 
        p->sysctl = t;
+
+       inet_netconf_notify_devconf(net, NETCONFA_ALL, ifindex, p);
        return 0;
 
 free:
@@ -2286,7 +2288,7 @@ static int devinet_sysctl_register(struct in_device *idev)
        if (err)
                return err;
        err = __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
-                                       &idev->cnf);
+                                       idev->dev->ifindex, &idev->cnf);
        if (err)
                neigh_sysctl_unregister(idev->arp_parms);
        return err;
@@ -2347,11 +2349,12 @@ static __net_init int devinet_init_net(struct net *net)
        }
 
 #ifdef CONFIG_SYSCTL
-       err = __devinet_sysctl_register(net, "all", all);
+       err = __devinet_sysctl_register(net, "all", NETCONFA_IFINDEX_ALL, all);
        if (err < 0)
                goto err_reg_all;
 
-       err = __devinet_sysctl_register(net, "default", dflt);
+       err = __devinet_sysctl_register(net, "default",
+                                       NETCONFA_IFINDEX_DEFAULT, dflt);
        if (err < 0)
                goto err_reg_dflt;
 
index ef2ebeb89d0fb0cb9cb5da4aea13183097baec8c..1b25daf8c7f12ada5a6ea7178d95c6c086f9098d 100644 (file)
@@ -509,6 +509,7 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
                if (!dev)
                        return -ENODEV;
                cfg->fc_oif = dev->ifindex;
+               cfg->fc_table = l3mdev_fib_table(dev);
                if (colon) {
                        struct in_ifaddr *ifa;
                        struct in_device *in_dev = __in_dev_get_rtnl(dev);
@@ -1027,7 +1028,7 @@ no_promotions:
                         * First of all, we scan fib_info list searching
                         * for stray nexthop entries, then ignite fib_flush.
                         */
-                       if (fib_sync_down_addr(dev_net(dev), ifa->ifa_local))
+                       if (fib_sync_down_addr(dev, ifa->ifa_local))
                                fib_flush(dev_net(dev));
                }
        }
index 539fa264e67d71148364c9fc0e694c78fd35e69b..e9f56225e53fde93a5470ee32c26b21b99ca4655 100644 (file)
@@ -1057,6 +1057,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
        fi->fib_priority = cfg->fc_priority;
        fi->fib_prefsrc = cfg->fc_prefsrc;
        fi->fib_type = cfg->fc_type;
+       fi->fib_tb_id = cfg->fc_table;
 
        fi->fib_nhs = nhs;
        change_nexthops(fi) {
@@ -1337,18 +1338,21 @@ nla_put_failure:
  *   referring to it.
  * - device went down -> we must shutdown all nexthops going via it.
  */
-int fib_sync_down_addr(struct net *net, __be32 local)
+int fib_sync_down_addr(struct net_device *dev, __be32 local)
 {
        int ret = 0;
        unsigned int hash = fib_laddr_hashfn(local);
        struct hlist_head *head = &fib_info_laddrhash[hash];
+       struct net *net = dev_net(dev);
+       int tb_id = l3mdev_fib_table(dev);
        struct fib_info *fi;
 
        if (!fib_info_laddrhash || local == 0)
                return 0;
 
        hlist_for_each_entry(fi, head, fib_lhash) {
-               if (!net_eq(fi->fib_net, net))
+               if (!net_eq(fi->fib_net, net) ||
+                   fi->fib_tb_id != tb_id)
                        continue;
                if (fi->fib_prefsrc == local) {
                        fi->fib_flags |= RTNH_F_DEAD;
index febca0f1008cb898014c95e332c8398b8a6bcd5d..e2ffc2a5c7db6ff3eea20d2083c2013fdf0dcc23 100644 (file)
@@ -249,7 +249,7 @@ static inline unsigned long get_index(t_key key, struct key_vector *kv)
  * index into the parent's child array. That is, they will be used to find
  * 'n' among tp's children.
  *
- * The bits from (n->pos + n->bits) to (tn->pos - 1) - "S" - are skipped bits
+ * The bits from (n->pos + n->bits) to (tp->pos - 1) - "S" - are skipped bits
  * for the node n.
  *
  * All the bits we have seen so far are significant to the node n. The rest
@@ -258,7 +258,7 @@ static inline unsigned long get_index(t_key key, struct key_vector *kv)
  * The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
  * n's child array, and will of course be different for each child.
  *
- * The rest of the bits, from 0 to (n->pos + n->bits), are completely unknown
+ * The rest of the bits, from 0 to (n->pos -1) - "u" - are completely unknown
  * at this point.
  */
 
index 9d847c3025516f3225c60be7c492adff4d254e5b..0f227db0e9ac6637c444fc8060b2e8f067653c3d 100644 (file)
@@ -73,9 +73,11 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
        skb_dst_set(skb, &rt->dst);
        memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
 
-       if (skb_iif && proto == IPPROTO_UDP) {
-               /* Arrived from an ingress interface and got udp encapuslated.
-                * The encapsulated network segment length may exceed dst mtu.
+       if (skb_iif && !(df & htons(IP_DF))) {
+               /* Arrived from an ingress interface, got encapsulated, with
+                * fragmentation of encapulating frames allowed.
+                * If skb is gso, the resulting encapsulated network segments
+                * may exceed dst mtu.
                 * Allow IP Fragmentation of segments.
                 */
                IPCB(skb)->flags |= IPSKB_FRAG_SEGS;
index c24f41c816b33f22b7e31ffb0b53e963b296f8c1..2c2553b9026cc652b6917529f71eba487c1ce711 100644 (file)
@@ -46,6 +46,7 @@ static const struct nft_expr_ops nft_reject_ipv4_ops = {
        .eval           = nft_reject_ipv4_eval,
        .init           = nft_reject_init,
        .dump           = nft_reject_dump,
+       .validate       = nft_reject_validate,
 };
 
 static struct nft_expr_type nft_reject_ipv4_type __read_mostly = {
index 032a96d78c99deda3b3298a305298f92776e2500..ffbb218de52057afbc55f95386200226a1c60517 100644 (file)
@@ -3193,7 +3193,6 @@ int tcp_abort(struct sock *sk, int err)
                        local_bh_enable();
                        return 0;
                }
-               sock_gen_put(sk);
                return -EOPNOTSUPP;
        }
 
@@ -3222,7 +3221,6 @@ int tcp_abort(struct sock *sk, int err)
        bh_unlock_sock(sk);
        local_bh_enable();
        release_sock(sk);
-       sock_put(sk);
        return 0;
 }
 EXPORT_SYMBOL_GPL(tcp_abort);
index 4d610934fb391c111d822a4d0544334a7b4b858f..a748c74aa8b781626d7a7805eef0f50da8e11328 100644 (file)
@@ -54,11 +54,16 @@ static int tcp_diag_destroy(struct sk_buff *in_skb,
 {
        struct net *net = sock_net(in_skb->sk);
        struct sock *sk = inet_diag_find_one_icsk(net, &tcp_hashinfo, req);
+       int err;
 
        if (IS_ERR(sk))
                return PTR_ERR(sk);
 
-       return sock_diag_destroy(sk, ECONNABORTED);
+       err = sock_diag_destroy(sk, ECONNABORTED);
+
+       sock_gen_put(sk);
+
+       return err;
 }
 #endif
 
index 54d9f9b0120fe26828f6cc522711f8df8b2bcfb3..4e777a3243f94457d9928e3967bb83947da563f6 100644 (file)
@@ -150,6 +150,7 @@ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
        tp->segs_in = 0;
        tcp_segs_in(tp, skb);
        __skb_pull(skb, tcp_hdrlen(skb));
+       sk_forced_mem_schedule(sk, skb->truesize);
        skb_set_owner_r(skb, sk);
 
        TCP_SKB_CB(skb)->seq++;
@@ -226,6 +227,7 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
        tcp_fastopen_add_skb(child, skb);
 
        tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
+       tp->rcv_wup = tp->rcv_nxt;
        /* tcp_conn_request() is sending the SYNACK,
         * and queues the child into listener accept queue.
         */
index 32b048e524d6773538918eca175b3f422f9c2aa7..7158d4f8dae4fe2482691e071a1f7751468f6b9a 100644 (file)
@@ -814,8 +814,14 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
        u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
                                             tcp_sk(sk)->snd_nxt;
 
+       /* RFC 7323 2.3
+        * The window field (SEG.WND) of every outgoing segment, with the
+        * exception of <SYN> segments, MUST be right-shifted by
+        * Rcv.Wind.Shift bits:
+        */
        tcp_v4_send_ack(sock_net(sk), skb, seq,
-                       tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
+                       tcp_rsk(req)->rcv_nxt,
+                       req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
                        tcp_time_stamp,
                        req->ts_recent,
                        0,
index 028eb046ea40d510908040023b46b46469477505..9c5fc973267fe542a41f42c249e564dc8f5a0624 100644 (file)
@@ -76,7 +76,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
        if (!tcp_is_cwnd_limited(sk))
                return;
 
-       if (tp->snd_cwnd <= tp->snd_ssthresh)
+       if (tcp_in_slow_start(tp))
                tcp_slow_start(tp, acked);
 
        else if (!yeah->doing_reno_now) {
index e61f7cd65d08a478f4b4f4d60841688b89bb1f2d..5fdcb8d108d468ab33e8f29a21efdd643d27a49f 100644 (file)
@@ -1182,13 +1182,13 @@ out:
  *     @sk: socket
  *
  *     Drops all bad checksum frames, until a valid one is found.
- *     Returns the length of found skb, or 0 if none is found.
+ *     Returns the length of found skb, or -1 if none is found.
  */
-static unsigned int first_packet_length(struct sock *sk)
+static int first_packet_length(struct sock *sk)
 {
        struct sk_buff_head list_kill, *rcvq = &sk->sk_receive_queue;
        struct sk_buff *skb;
-       unsigned int res;
+       int res;
 
        __skb_queue_head_init(&list_kill);
 
@@ -1203,7 +1203,7 @@ static unsigned int first_packet_length(struct sock *sk)
                __skb_unlink(skb, rcvq);
                __skb_queue_tail(&list_kill, skb);
        }
-       res = skb ? skb->len : 0;
+       res = skb ? skb->len : -1;
        spin_unlock_bh(&rcvq->lock);
 
        if (!skb_queue_empty(&list_kill)) {
@@ -1232,7 +1232,7 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
 
        case SIOCINQ:
        {
-               unsigned int amount = first_packet_length(sk);
+               int amount = max_t(int, 0, first_packet_length(sk));
 
                return put_user(amount, (int __user *)arg);
        }
@@ -2184,7 +2184,7 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
 
        /* Check for false positives due to checksum errors */
        if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
-           !(sk->sk_shutdown & RCV_SHUTDOWN) && !first_packet_length(sk))
+           !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
                mask &= ~(POLLIN | POLLRDNORM);
 
        return mask;
@@ -2216,7 +2216,6 @@ struct proto udp_prot = {
        .sysctl_wmem       = &sysctl_udp_wmem_min,
        .sysctl_rmem       = &sysctl_udp_rmem_min,
        .obj_size          = sizeof(struct udp_sock),
-       .slab_flags        = SLAB_DESTROY_BY_RCU,
        .h.udp_table       = &udp_table,
 #ifdef CONFIG_COMPAT
        .compat_setsockopt = compat_udp_setsockopt,
index 3b3efbda48e13941b35388238508706844c043aa..2eea073e27efa13a86d6c6107426435ae7ddb18d 100644 (file)
@@ -55,7 +55,6 @@ struct proto  udplite_prot = {
        .unhash            = udp_lib_unhash,
        .get_port          = udp_v4_get_port,
        .obj_size          = sizeof(struct udp_sock),
-       .slab_flags        = SLAB_DESTROY_BY_RCU,
        .h.udp_table       = &udplite_table,
 #ifdef CONFIG_COMPAT
        .compat_setsockopt = compat_udp_setsockopt,
index b644a23c3db06e851b239e694f22ed7a869653a3..41f5b504a782c392a20594325706c199136ecaa5 100644 (file)
@@ -29,7 +29,7 @@ static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
        memset(fl4, 0, sizeof(*fl4));
        fl4->daddr = daddr->a4;
        fl4->flowi4_tos = tos;
-       fl4->flowi4_oif = oif;
+       fl4->flowi4_oif = l3mdev_master_ifindex_by_index(net, oif);
        if (saddr)
                fl4->saddr = saddr->a4;
 
index df8425fcbc2cab843c9af24272d45fc95d4040b7..2f1f5d439788820aa2b75be8a26addb757ccf50c 100644 (file)
@@ -778,7 +778,14 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
        }
 
        if (p == &net->ipv6.devconf_all->forwarding) {
+               int old_dflt = net->ipv6.devconf_dflt->forwarding;
+
                net->ipv6.devconf_dflt->forwarding = newf;
+               if ((!newf) ^ (!old_dflt))
+                       inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING,
+                                                    NETCONFA_IFINDEX_DEFAULT,
+                                                    net->ipv6.devconf_dflt);
+
                addrconf_forward_change(net, newf);
                if ((!newf) ^ (!old))
                        inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING,
@@ -1872,7 +1879,6 @@ static int addrconf_dad_end(struct inet6_ifaddr *ifp)
 
 void addrconf_dad_failure(struct inet6_ifaddr *ifp)
 {
-       struct in6_addr addr;
        struct inet6_dev *idev = ifp->idev;
        struct net *net = dev_net(ifp->idev->dev);
 
@@ -1934,18 +1940,6 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
                in6_ifa_put(ifp2);
 lock_errdad:
                spin_lock_bh(&ifp->lock);
-       } else if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6) {
-               addr.s6_addr32[0] = htonl(0xfe800000);
-               addr.s6_addr32[1] = 0;
-
-               if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
-                   ipv6_addr_equal(&ifp->addr, &addr)) {
-                       /* DAD failed for link-local based on MAC address */
-                       idev->cnf.disable_ipv6 = 1;
-
-                       pr_info("%s: IPv6 being disabled!\n",
-                               ifp->idev->dev->name);
-               }
        }
 
 errdad:
@@ -1954,6 +1948,7 @@ errdad:
        spin_unlock_bh(&ifp->lock);
 
        addrconf_mod_dad_work(ifp, 0);
+       in6_ifa_put(ifp);
 }
 
 /* Join to solicited addr multicast group.
@@ -3821,6 +3816,7 @@ static void addrconf_dad_work(struct work_struct *w)
                                                dad_work);
        struct inet6_dev *idev = ifp->idev;
        struct in6_addr mcaddr;
+       bool disable_ipv6 = false;
 
        enum {
                DAD_PROCESS,
@@ -3837,6 +3833,24 @@ static void addrconf_dad_work(struct work_struct *w)
        } else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) {
                action = DAD_ABORT;
                ifp->state = INET6_IFADDR_STATE_POSTDAD;
+
+               if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6 &&
+                   !(ifp->flags & IFA_F_STABLE_PRIVACY)) {
+                       struct in6_addr addr;
+
+                       addr.s6_addr32[0] = htonl(0xfe800000);
+                       addr.s6_addr32[1] = 0;
+
+                       if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
+                           ipv6_addr_equal(&ifp->addr, &addr)) {
+                               /* DAD failed for link-local based on MAC */
+                               idev->cnf.disable_ipv6 = 1;
+
+                               pr_info("%s: IPv6 being disabled!\n",
+                                       ifp->idev->dev->name);
+                               disable_ipv6 = true;
+                       }
+               }
        }
        spin_unlock_bh(&ifp->lock);
 
@@ -3844,7 +3858,10 @@ static void addrconf_dad_work(struct work_struct *w)
                addrconf_dad_begin(ifp);
                goto out;
        } else if (action == DAD_ABORT) {
+               in6_ifa_hold(ifp);
                addrconf_dad_stop(ifp, 1);
+               if (disable_ipv6)
+                       addrconf_ifdown(idev->dev, 0);
                goto out;
        }
 
@@ -6017,7 +6034,7 @@ static const struct ctl_table addrconf_sysctl[] = {
 static int __addrconf_sysctl_register(struct net *net, char *dev_name,
                struct inet6_dev *idev, struct ipv6_devconf *p)
 {
-       int i;
+       int i, ifindex;
        struct ctl_table *table;
        char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
 
@@ -6037,6 +6054,13 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name,
        if (!p->sysctl_header)
                goto free;
 
+       if (!strcmp(dev_name, "all"))
+               ifindex = NETCONFA_IFINDEX_ALL;
+       else if (!strcmp(dev_name, "default"))
+               ifindex = NETCONFA_IFINDEX_DEFAULT;
+       else
+               ifindex = idev->dev->ifindex;
+       inet6_netconf_notify_devconf(net, NETCONFA_ALL, ifindex, p);
        return 0;
 
 free:
index 7b0481e3738f20b672cac77a88617adbe8ecfbbf..888543debe4eb5e91502bdd6b0e7315dd0340ac2 100644 (file)
@@ -1174,6 +1174,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
                encap_limit = t->parms.encap_limit;
 
        memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+       fl6.flowi6_proto = IPPROTO_IPIP;
 
        dsfield = ipv4_get_dsfield(iph);
 
@@ -1233,6 +1234,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
                encap_limit = t->parms.encap_limit;
 
        memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+       fl6.flowi6_proto = IPPROTO_IPV6;
 
        dsfield = ipv6_get_dsfield(ipv6h);
        if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
index 533cd5719c594e7664c162b59037bb43a10c717c..92bda9908bb9a354e701fab450549b43e2f22bfa 100644 (file)
@@ -47,6 +47,7 @@ static const struct nft_expr_ops nft_reject_ipv6_ops = {
        .eval           = nft_reject_ipv6_eval,
        .init           = nft_reject_init,
        .dump           = nft_reject_dump,
+       .validate       = nft_reject_validate,
 };
 
 static struct nft_expr_type nft_reject_ipv6_type __read_mostly = {
index 0900352c924c163b4e0d945a5a30699628b5abfb..0e983b694ee805dc662a49ae5f6c9438b5ed931d 100644 (file)
@@ -126,8 +126,10 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        rt = (struct rt6_info *) dst;
 
        np = inet6_sk(sk);
-       if (!np)
-               return -EBADF;
+       if (!np) {
+               err = -EBADF;
+               goto dst_err_out;
+       }
 
        if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
                fl6.flowi6_oif = np->mcast_oif;
@@ -163,6 +165,9 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        }
        release_sock(sk);
 
+dst_err_out:
+       dst_release(dst);
+
        if (err)
                return err;
 
index 33df8b8575cceb6f4bf50d9a50ab5a6afa79cd86..94f4f89d73e791ba2ae7bdc7e7ac5bd7bc66a8d4 100644 (file)
@@ -944,9 +944,15 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
        /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
         * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
         */
+       /* RFC 7323 2.3
+        * The window field (SEG.WND) of every outgoing segment, with the
+        * exception of <SYN> segments, MUST be right-shifted by
+        * Rcv.Wind.Shift bits:
+        */
        tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
                        tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
-                       tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
+                       tcp_rsk(req)->rcv_nxt,
+                       req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
                        tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
                        tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
                        0, 0);
index 81e2f98b958d4d42d137a4ae2d71c4c7217c71d6..19ac3a1c308dbfacf136e262dbab27ad280cfdcd 100644 (file)
@@ -1460,7 +1460,6 @@ struct proto udpv6_prot = {
        .sysctl_wmem       = &sysctl_udp_wmem_min,
        .sysctl_rmem       = &sysctl_udp_rmem_min,
        .obj_size          = sizeof(struct udp6_sock),
-       .slab_flags        = SLAB_DESTROY_BY_RCU,
        .h.udp_table       = &udp_table,
 #ifdef CONFIG_COMPAT
        .compat_setsockopt = compat_udpv6_setsockopt,
index 9cf097e206e931c6e3c184f22b8adcabedc4c03a..fd6ef414899bea824ce26c2e738f4e96aa8af464 100644 (file)
@@ -50,7 +50,6 @@ struct proto udplitev6_prot = {
        .unhash            = udp_lib_unhash,
        .get_port          = udp_v6_get_port,
        .obj_size          = sizeof(struct udp6_sock),
-       .slab_flags        = SLAB_DESTROY_BY_RCU,
        .h.udp_table       = &udplite_table,
 #ifdef CONFIG_COMPAT
        .compat_setsockopt = compat_udpv6_setsockopt,
index 0eaab1fa6be5751734d98d900ae4d296ca12651e..00a2d40677d6b4a7951045aabe88f9405c0e8213 100644 (file)
@@ -23,6 +23,7 @@ int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb)
 
 int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
 {
+       XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
        XFRM_SPI_SKB_CB(skb)->family = AF_INET6;
        XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
        return xfrm_input(skb, nexthdr, spi, 0);
index 6cc97003e4a9547462840c113fcf11be4bb92ed3..70a86adad875b66d3643ecfb073ea7681a53908d 100644 (file)
@@ -36,7 +36,7 @@ static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif,
        int err;
 
        memset(&fl6, 0, sizeof(fl6));
-       fl6.flowi6_oif = oif;
+       fl6.flowi6_oif = l3mdev_master_ifindex_by_index(net, oif);
        fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
        memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr));
        if (saddr)
index cb39e05b166cf5eaa0729b775f3ee0b8a140398a..411693288648dc3eb27b0b2a0c542be92762ad8a 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/socket.h>
 #include <linux/uaccess.h>
 #include <linux/workqueue.h>
+#include <linux/syscalls.h>
 #include <net/kcm.h>
 #include <net/netns/generic.h>
 #include <net/sock.h>
@@ -2029,7 +2030,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                        if (copy_to_user((void __user *)arg, &info,
                                         sizeof(info))) {
                                err = -EFAULT;
-                               sock_release(newsock);
+                               sys_close(info.fd);
                        }
                }
 
index 1e40dacaa137f00df3df79c0faf4eb4de65e86c2..a2ed3bda4ddcd70071bc8ac87120e5591d9dd681 100644 (file)
@@ -1855,6 +1855,9 @@ static __net_exit void l2tp_exit_net(struct net *net)
                (void)l2tp_tunnel_delete(tunnel);
        }
        rcu_read_unlock_bh();
+
+       flush_workqueue(l2tp_wq);
+       rcu_barrier();
 }
 
 static struct pernet_operations l2tp_net_ops = {
index d9560aa2dba38d87561548b5ff8fae897e034810..232cb92033e8225f76d72deb5af5ef93c4d6187f 100644 (file)
@@ -856,7 +856,7 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
        error = -ENOTCONN;
        if (sk == NULL)
                goto end;
-       if (sk->sk_state != PPPOX_CONNECTED)
+       if (!(sk->sk_state & PPPOX_CONNECTED))
                goto end;
 
        error = -EBADF;
index b5d28f14b9cff950e6896b7fb6df710d5a3b7c20..afca7d103684bbc8c953ae13bb0345934d8f5f35 100644 (file)
@@ -333,10 +333,11 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
        if (!uc.center_freq1)
                return;
 
-       /* proceed to downgrade the chandef until usable or the same */
+       /* proceed to downgrade the chandef until usable or the same as AP BW */
        while (uc.width > max_width ||
-              !cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc,
-                                             sdata->wdev.iftype))
+              (uc.width > sta->tdls_chandef.width &&
+               !cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc,
+                                              sdata->wdev.iftype)))
                ieee80211_chandef_downgrade(&uc);
 
        if (!cfg80211_chandef_identical(&uc, &sta->tdls_chandef)) {
index 958a1455ca7f2c1718e38730479876ffa6477779..9f267c3ffb39856891d72f42da31cbfa44f5048d 100644 (file)
@@ -205,6 +205,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
        struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
        const struct nf_conntrack_l3proto *l3proto;
        const struct nf_conntrack_l4proto *l4proto;
+       struct net *net = seq_file_net(s);
        int ret = 0;
 
        NF_CT_ASSERT(ct);
@@ -215,6 +216,9 @@ static int ct_seq_show(struct seq_file *s, void *v)
        if (NF_CT_DIRECTION(hash))
                goto release;
 
+       if (!net_eq(nf_ct_net(ct), net))
+               goto release;
+
        l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
        NF_CT_ASSERT(l3proto);
        l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
index 5eefe4a355c640cf85c8328740a877c6909880ba..75d696f11045779bd08503bb49ec954abc63eeb4 100644 (file)
@@ -30,7 +30,6 @@ nft_netdev_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
        if (!iph)
                return;
 
-       iph = ip_hdr(skb);
        if (iph->ihl < 5 || iph->version != 4)
                return;
 
index 1b4de4bd695865c9bb9b2ca70b8b309d902ccc10..d44d89b561275e25bb31fe2b0ed198d13995533c 100644 (file)
@@ -326,14 +326,14 @@ static int nfnl_acct_try_del(struct nf_acct *cur)
 {
        int ret = 0;
 
-       /* we want to avoid races with nfnl_acct_find_get. */
-       if (atomic_dec_and_test(&cur->refcnt)) {
+       /* We want to avoid races with nfnl_acct_put. So only when the current
+        * refcnt is 1, we decrease it to 0.
+        */
+       if (atomic_cmpxchg(&cur->refcnt, 1, 0) == 1) {
                /* We are protected by nfnl mutex. */
                list_del_rcu(&cur->head);
                kfree_rcu(cur, rcu_head);
        } else {
-               /* still in use, restore reference counter. */
-               atomic_inc(&cur->refcnt);
                ret = -EBUSY;
        }
        return ret;
@@ -343,12 +343,12 @@ static int nfnl_acct_del(struct net *net, struct sock *nfnl,
                         struct sk_buff *skb, const struct nlmsghdr *nlh,
                         const struct nlattr * const tb[])
 {
-       char *acct_name;
-       struct nf_acct *cur;
+       struct nf_acct *cur, *tmp;
        int ret = -ENOENT;
+       char *acct_name;
 
        if (!tb[NFACCT_NAME]) {
-               list_for_each_entry(cur, &net->nfnl_acct_list, head)
+               list_for_each_entry_safe(cur, tmp, &net->nfnl_acct_list, head)
                        nfnl_acct_try_del(cur);
 
                return 0;
@@ -443,7 +443,7 @@ void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct)
 }
 EXPORT_SYMBOL_GPL(nfnl_acct_update);
 
-static void nfnl_overquota_report(struct nf_acct *nfacct)
+static void nfnl_overquota_report(struct net *net, struct nf_acct *nfacct)
 {
        int ret;
        struct sk_buff *skb;
@@ -458,11 +458,12 @@ static void nfnl_overquota_report(struct nf_acct *nfacct)
                kfree_skb(skb);
                return;
        }
-       netlink_broadcast(init_net.nfnl, skb, 0, NFNLGRP_ACCT_QUOTA,
+       netlink_broadcast(net->nfnl, skb, 0, NFNLGRP_ACCT_QUOTA,
                          GFP_ATOMIC);
 }
 
-int nfnl_acct_overquota(const struct sk_buff *skb, struct nf_acct *nfacct)
+int nfnl_acct_overquota(struct net *net, const struct sk_buff *skb,
+                       struct nf_acct *nfacct)
 {
        u64 now;
        u64 *quota;
@@ -480,7 +481,7 @@ int nfnl_acct_overquota(const struct sk_buff *skb, struct nf_acct *nfacct)
 
        if (now >= *quota &&
            !test_and_set_bit(NFACCT_OVERQUOTA_BIT, &nfacct->flags)) {
-               nfnl_overquota_report(nfacct);
+               nfnl_overquota_report(net, nfacct);
        }
 
        return ret;
index 4cdcd969b64c8039ff07ba6d2d3e87a0e2c23564..139e0867e56e9e606942c98e75148eb17b2ec7eb 100644 (file)
@@ -98,31 +98,28 @@ static int cttimeout_new_timeout(struct net *net, struct sock *ctnl,
                break;
        }
 
-       l4proto = nf_ct_l4proto_find_get(l3num, l4num);
-
-       /* This protocol is not supportted, skip. */
-       if (l4proto->l4proto != l4num) {
-               ret = -EOPNOTSUPP;
-               goto err_proto_put;
-       }
-
        if (matching) {
                if (nlh->nlmsg_flags & NLM_F_REPLACE) {
                        /* You cannot replace one timeout policy by another of
                         * different kind, sorry.
                         */
                        if (matching->l3num != l3num ||
-                           matching->l4proto->l4proto != l4num) {
-                               ret = -EINVAL;
-                               goto err_proto_put;
-                       }
-
-                       ret = ctnl_timeout_parse_policy(&matching->data,
-                                                       l4proto, net,
-                                                       cda[CTA_TIMEOUT_DATA]);
-                       return ret;
+                           matching->l4proto->l4proto != l4num)
+                               return -EINVAL;
+
+                       return ctnl_timeout_parse_policy(&matching->data,
+                                                        matching->l4proto, net,
+                                                        cda[CTA_TIMEOUT_DATA]);
                }
-               ret = -EBUSY;
+
+               return -EBUSY;
+       }
+
+       l4proto = nf_ct_l4proto_find_get(l3num, l4num);
+
+       /* This protocol is not supportted, skip. */
+       if (l4proto->l4proto != l4num) {
+               ret = -EOPNOTSUPP;
                goto err_proto_put;
        }
 
@@ -305,7 +302,16 @@ static void ctnl_untimeout(struct net *net, struct ctnl_timeout *timeout)
        const struct hlist_nulls_node *nn;
        unsigned int last_hsize;
        spinlock_t *lock;
-       int i;
+       int i, cpu;
+
+       for_each_possible_cpu(cpu) {
+               struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
+
+               spin_lock_bh(&pcpu->lock);
+               hlist_nulls_for_each_entry(h, nn, &pcpu->unconfirmed, hnnode)
+                       untimeout(h, timeout);
+               spin_unlock_bh(&pcpu->lock);
+       }
 
        local_bh_disable();
 restart:
@@ -330,16 +336,16 @@ static int ctnl_timeout_try_del(struct net *net, struct ctnl_timeout *timeout)
 {
        int ret = 0;
 
-       /* we want to avoid races with nf_ct_timeout_find_get. */
-       if (atomic_dec_and_test(&timeout->refcnt)) {
+       /* We want to avoid races with ctnl_timeout_put. So only when the
+        * current refcnt is 1, we decrease it to 0.
+        */
+       if (atomic_cmpxchg(&timeout->refcnt, 1, 0) == 1) {
                /* We are protected by nfnl mutex. */
                list_del_rcu(&timeout->head);
                nf_ct_l4proto_put(timeout->l4proto);
                ctnl_untimeout(net, timeout);
                kfree_rcu(timeout, rcu_head);
        } else {
-               /* still in use, restore reference counter. */
-               atomic_inc(&timeout->refcnt);
                ret = -EBUSY;
        }
        return ret;
@@ -350,12 +356,13 @@ static int cttimeout_del_timeout(struct net *net, struct sock *ctnl,
                                 const struct nlmsghdr *nlh,
                                 const struct nlattr * const cda[])
 {
-       struct ctnl_timeout *cur;
+       struct ctnl_timeout *cur, *tmp;
        int ret = -ENOENT;
        char *name;
 
        if (!cda[CTA_TIMEOUT_NAME]) {
-               list_for_each_entry(cur, &net->nfct_timeout_list, head)
+               list_for_each_entry_safe(cur, tmp, &net->nfct_timeout_list,
+                                        head)
                        ctnl_timeout_try_del(net, cur);
 
                return 0;
@@ -543,7 +550,9 @@ err:
 
 static void ctnl_timeout_put(struct ctnl_timeout *timeout)
 {
-       atomic_dec(&timeout->refcnt);
+       if (atomic_dec_and_test(&timeout->refcnt))
+               kfree_rcu(timeout, rcu_head);
+
        module_put(THIS_MODULE);
 }
 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
@@ -591,7 +600,9 @@ static void __net_exit cttimeout_net_exit(struct net *net)
        list_for_each_entry_safe(cur, tmp, &net->nfct_timeout_list, head) {
                list_del_rcu(&cur->head);
                nf_ct_l4proto_put(cur->l4proto);
-               kfree_rcu(cur, rcu_head);
+
+               if (atomic_dec_and_test(&cur->refcnt))
+                       kfree_rcu(cur, rcu_head);
        }
 }
 
index cbcfdfb586a6150d7b5e186356c1451b769a8ab2..6577db524ef672d7ba6aed4a39a0d7dc41baa226 100644 (file)
@@ -1147,6 +1147,7 @@ MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_ULOG);
 MODULE_ALIAS_NF_LOGGER(AF_INET, 1);
 MODULE_ALIAS_NF_LOGGER(AF_INET6, 1);
 MODULE_ALIAS_NF_LOGGER(AF_BRIDGE, 1);
+MODULE_ALIAS_NF_LOGGER(3, 1); /* NFPROTO_ARP */
 
 module_init(nfnetlink_log_init);
 module_exit(nfnetlink_log_fini);
index 2863f3493038d8b1fd92eebb9c6d846227e99bf6..8a6bc7630912dea058df1b3ed896daa535ad95f1 100644 (file)
@@ -291,10 +291,16 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
 }
 EXPORT_SYMBOL_GPL(nft_meta_get_init);
 
-static int nft_meta_set_init_pkttype(const struct nft_ctx *ctx)
+int nft_meta_set_validate(const struct nft_ctx *ctx,
+                         const struct nft_expr *expr,
+                         const struct nft_data **data)
 {
+       struct nft_meta *priv = nft_expr_priv(expr);
        unsigned int hooks;
 
+       if (priv->key != NFT_META_PKTTYPE)
+               return 0;
+
        switch (ctx->afi->family) {
        case NFPROTO_BRIDGE:
                hooks = 1 << NF_BR_PRE_ROUTING;
@@ -308,6 +314,7 @@ static int nft_meta_set_init_pkttype(const struct nft_ctx *ctx)
 
        return nft_chain_validate_hooks(ctx->chain, hooks);
 }
+EXPORT_SYMBOL_GPL(nft_meta_set_validate);
 
 int nft_meta_set_init(const struct nft_ctx *ctx,
                      const struct nft_expr *expr,
@@ -327,15 +334,16 @@ int nft_meta_set_init(const struct nft_ctx *ctx,
                len = sizeof(u8);
                break;
        case NFT_META_PKTTYPE:
-               err = nft_meta_set_init_pkttype(ctx);
-               if (err)
-                       return err;
                len = sizeof(u8);
                break;
        default:
                return -EOPNOTSUPP;
        }
 
+       err = nft_meta_set_validate(ctx, expr, NULL);
+       if (err < 0)
+               return err;
+
        priv->sreg = nft_parse_register(tb[NFTA_META_SREG]);
        err = nft_validate_register_load(priv->sreg, len);
        if (err < 0)
@@ -407,6 +415,7 @@ static const struct nft_expr_ops nft_meta_set_ops = {
        .init           = nft_meta_set_init,
        .destroy        = nft_meta_set_destroy,
        .dump           = nft_meta_set_dump,
+       .validate       = nft_meta_set_validate,
 };
 
 static const struct nft_expr_ops *
index 0522fc9bfb0a88db513c480f45dff35a6f233000..c64de3f7379df551fa413a4af186f3c16886f112 100644 (file)
@@ -26,11 +26,27 @@ const struct nla_policy nft_reject_policy[NFTA_REJECT_MAX + 1] = {
 };
 EXPORT_SYMBOL_GPL(nft_reject_policy);
 
+int nft_reject_validate(const struct nft_ctx *ctx,
+                       const struct nft_expr *expr,
+                       const struct nft_data **data)
+{
+       return nft_chain_validate_hooks(ctx->chain,
+                                       (1 << NF_INET_LOCAL_IN) |
+                                       (1 << NF_INET_FORWARD) |
+                                       (1 << NF_INET_LOCAL_OUT));
+}
+EXPORT_SYMBOL_GPL(nft_reject_validate);
+
 int nft_reject_init(const struct nft_ctx *ctx,
                    const struct nft_expr *expr,
                    const struct nlattr * const tb[])
 {
        struct nft_reject *priv = nft_expr_priv(expr);
+       int err;
+
+       err = nft_reject_validate(ctx, expr, NULL);
+       if (err < 0)
+               return err;
 
        if (tb[NFTA_REJECT_TYPE] == NULL)
                return -EINVAL;
index 759ca5248a3d22c20ce19dad1b7a7649a4ec0d6f..e79d9ca2ffee0002e734ee27880c512da3066900 100644 (file)
@@ -66,7 +66,11 @@ static int nft_reject_inet_init(const struct nft_ctx *ctx,
                                const struct nlattr * const tb[])
 {
        struct nft_reject *priv = nft_expr_priv(expr);
-       int icmp_code;
+       int icmp_code, err;
+
+       err = nft_reject_validate(ctx, expr, NULL);
+       if (err < 0)
+               return err;
 
        if (tb[NFTA_REJECT_TYPE] == NULL)
                return -EINVAL;
@@ -124,6 +128,7 @@ static const struct nft_expr_ops nft_reject_inet_ops = {
        .eval           = nft_reject_inet_eval,
        .init           = nft_reject_inet_init,
        .dump           = nft_reject_inet_dump,
+       .validate       = nft_reject_validate,
 };
 
 static struct nft_expr_type nft_reject_inet_type __read_mostly = {
index 7f4414d26a6622a46a6479049aadc557d128dbf3..663c4c3c907284254a09e692b84f2c60567545de 100644 (file)
@@ -127,6 +127,8 @@ nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp,
                                                    daddr, dport,
                                                    in->ifindex);
 
+                       if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
+                               sk = NULL;
                        /* NOTE: we return listeners even if bound to
                         * 0.0.0.0, those are filtered out in
                         * xt_socket, since xt_TPROXY needs 0 bound
@@ -195,6 +197,8 @@ nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp,
                                                   daddr, ntohs(dport),
                                                   in->ifindex);
 
+                       if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
+                               sk = NULL;
                        /* NOTE: we return listeners even if bound to
                         * 0.0.0.0, those are filtered out in
                         * xt_socket, since xt_TPROXY needs 0 bound
index 3048a7e3a90a5a27887b7e4ff731d00098f2c928..cf327593852a2b75cf3d5eb9e9e8d07af27b17a8 100644 (file)
@@ -26,7 +26,7 @@ static bool nfacct_mt(const struct sk_buff *skb, struct xt_action_param *par)
 
        nfnl_acct_update(skb, info->nfacct);
 
-       overquota = nfnl_acct_overquota(skb, info->nfacct);
+       overquota = nfnl_acct_overquota(par->net, skb, info->nfacct);
 
        return overquota == NFACCT_UNDERQUOTA ? false : true;
 }
index 141a06eeb1e502ec8273de1897302cef11c07e0c..e87cd81315e1340869d81a6535230ceb2648521a 100644 (file)
@@ -53,7 +53,7 @@ int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, const void *dval)
        u32 *tlv = (u32 *)(skbdata);
        u16 totlen = nla_total_size(dlen);      /*alignment + hdr */
        char *dptr = (char *)tlv + NLA_HDRLEN;
-       u32 htlv = attrtype << 16 | totlen;
+       u32 htlv = attrtype << 16 | dlen;
 
        *tlv = htonl(htlv);
        memset(dptr, 0, totlen - NLA_HDRLEN);
@@ -135,7 +135,7 @@ EXPORT_SYMBOL_GPL(ife_release_meta_gen);
 
 int ife_validate_meta_u32(void *val, int len)
 {
-       if (len == 4)
+       if (len == sizeof(u32))
                return 0;
 
        return -EINVAL;
@@ -144,8 +144,8 @@ EXPORT_SYMBOL_GPL(ife_validate_meta_u32);
 
 int ife_validate_meta_u16(void *val, int len)
 {
-       /* length will include padding */
-       if (len == NLA_ALIGN(2))
+       /* length will not include padding */
+       if (len == sizeof(u16))
                return 0;
 
        return -EINVAL;
@@ -652,12 +652,14 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
                u8 *tlvdata = (u8 *)tlv;
                u16 mtype = tlv->type;
                u16 mlen = tlv->len;
+               u16 alen;
 
                mtype = ntohs(mtype);
                mlen = ntohs(mlen);
+               alen = NLA_ALIGN(mlen);
 
-               if (find_decode_metaid(skb, ife, mtype, (mlen - 4),
-                                      (void *)(tlvdata + 4))) {
+               if (find_decode_metaid(skb, ife, mtype, (mlen - NLA_HDRLEN),
+                                      (void *)(tlvdata + NLA_HDRLEN))) {
                        /* abuse overlimits to count when we receive metadata
                         * but dont have an ops for it
                         */
@@ -666,8 +668,8 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
                        ife->tcf_qstats.overlimits++;
                }
 
-               tlvdata += mlen;
-               ifehdrln -= mlen;
+               tlvdata += alen;
+               ifehdrln -= alen;
                tlv = (struct meta_tlvhdr *)tlvdata;
        }
 
index e95b67cd571844798de98dc4a97743ed89e0d37b..657c13362b19ca3726ab972a8d51b22ccd846c18 100644 (file)
@@ -643,18 +643,19 @@ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
        struct Qdisc *sch;
 
        if (!try_module_get(ops->owner))
-               goto errout;
+               return NULL;
 
        sch = qdisc_alloc(dev_queue, ops);
-       if (IS_ERR(sch))
-               goto errout;
+       if (IS_ERR(sch)) {
+               module_put(ops->owner);
+               return NULL;
+       }
        sch->parent = parentid;
 
        if (!ops->init || ops->init(sch, NULL) == 0)
                return sch;
 
        qdisc_destroy(sch);
-errout:
        return NULL;
 }
 EXPORT_SYMBOL(qdisc_create_dflt);
index c182db7d691ff44a52923fb36c9170e49c141c04..69444d32ecda6cd1a4924911172feba89c5ae976 100644 (file)
@@ -119,7 +119,13 @@ int sctp_rcv(struct sk_buff *skb)
                       skb_transport_offset(skb))
                goto discard_it;
 
-       if (!pskb_may_pull(skb, sizeof(struct sctphdr)))
+       /* If the packet is fragmented and we need to do crc checking,
+        * it's better to just linearize it otherwise crc computing
+        * takes longer.
+        */
+       if ((!(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) &&
+            skb_linearize(skb)) ||
+           !pskb_may_pull(skb, sizeof(struct sctphdr)))
                goto discard_it;
 
        /* Pull up the IP header. */
@@ -1177,9 +1183,6 @@ static struct sctp_association *__sctp_rcv_lookup_harder(struct net *net,
        if ((skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) == SKB_GSO_SCTP)
                return NULL;
 
-       if (skb_linearize(skb))
-               return NULL;
-
        ch = (sctp_chunkhdr_t *) skb->data;
 
        /* The code below will attempt to walk the chunk and extract
index c30ddb0f31907f57c5ce85b00dbe04260ca1cb2e..6437aa97cfd79f14c633499c2b131389204c435b 100644 (file)
@@ -170,19 +170,6 @@ next_chunk:
 
                chunk = list_entry(entry, struct sctp_chunk, list);
 
-               /* Linearize if it's not GSO */
-               if ((skb_shinfo(chunk->skb)->gso_type & SKB_GSO_SCTP) != SKB_GSO_SCTP &&
-                   skb_is_nonlinear(chunk->skb)) {
-                       if (skb_linearize(chunk->skb)) {
-                               __SCTP_INC_STATS(dev_net(chunk->skb->dev), SCTP_MIB_IN_PKT_DISCARDS);
-                               sctp_chunk_free(chunk);
-                               goto next_chunk;
-                       }
-
-                       /* Update sctp_hdr as it probably changed */
-                       chunk->sctp_hdr = sctp_hdr(chunk->skb);
-               }
-
                if ((skb_shinfo(chunk->skb)->gso_type & SKB_GSO_SCTP) == SKB_GSO_SCTP) {
                        /* GSO-marked skbs but without frags, handle
                         * them normally
index 1f1682b9a6a82cc18d2571cf8287cfd194c7840d..31b7bc35895d578d37fee79a8a56a074f43932fe 100644 (file)
@@ -878,7 +878,7 @@ static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
                                        struct sctp_chunk *chunk,
                                        u16 chunk_len)
 {
-       size_t psize, pmtu;
+       size_t psize, pmtu, maxsize;
        sctp_xmit_t retval = SCTP_XMIT_OK;
 
        psize = packet->size;
@@ -906,6 +906,17 @@ static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
                        goto out;
                }
 
+               /* Similarly, if this chunk was built before a PMTU
+                * reduction, we have to fragment it at IP level now. So
+                * if the packet already contains something, we need to
+                * flush.
+                */
+               maxsize = pmtu - packet->overhead;
+               if (packet->auth)
+                       maxsize -= WORD_ROUND(packet->auth->skb->len);
+               if (chunk_len > maxsize)
+                       retval = SCTP_XMIT_PMTU_FULL;
+
                /* It is also okay to fragment if the chunk we are
                 * adding is a control chunk, but only if current packet
                 * is not a GSO one otherwise it causes fragmentation of
index bb691538adc8e89d6920f8684d8a64779ea1865a..f3508aa758154c7214a83c42f19821556cbc5fb4 100644 (file)
@@ -424,11 +424,13 @@ static int sctp_diag_dump_one(struct sk_buff *in_skb,
                paddr.v4.sin_family = AF_INET;
        } else {
                laddr.v6.sin6_port = req->id.idiag_sport;
-               memcpy(&laddr.v6.sin6_addr, req->id.idiag_src, 64);
+               memcpy(&laddr.v6.sin6_addr, req->id.idiag_src,
+                      sizeof(laddr.v6.sin6_addr));
                laddr.v6.sin6_family = AF_INET6;
 
                paddr.v6.sin6_port = req->id.idiag_dport;
-               memcpy(&paddr.v6.sin6_addr, req->id.idiag_dst, 64);
+               memcpy(&paddr.v6.sin6_addr, req->id.idiag_dst,
+                      sizeof(paddr.v6.sin6_addr));
                paddr.v6.sin6_family = AF_INET6;
        }
 
index 1d281816f2bf14e34a71932863e5579e4a5caa35..d8582028b34600dc20e4447a99aeb5384b9ecb54 100644 (file)
@@ -569,9 +569,10 @@ gss_svc_searchbyctx(struct cache_detail *cd, struct xdr_netobj *handle)
        struct rsc *found;
 
        memset(&rsci, 0, sizeof(rsci));
-       rsci.handle.data = handle->data;
-       rsci.handle.len = handle->len;
+       if (dup_to_netobj(&rsci.handle, handle->data, handle->len))
+               return NULL;
        found = rsc_lookup(cd, &rsci);
+       rsc_free(&rsci);
        if (!found)
                return NULL;
        if (cache_check(cd, &found->h, NULL))
index 7f79fb7dc6a00d6cc4082815d35fab4c60be1943..66f23b376fa04a91134eddf6d8ee22a2f5de5808 100644 (file)
@@ -453,7 +453,7 @@ static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
        struct rpc_xprt_switch *xps;
 
        if (args->bc_xprt && args->bc_xprt->xpt_bc_xps) {
-               WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
+               WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC));
                xps = args->bc_xprt->xpt_bc_xps;
                xprt_switch_get(xps);
        } else {
@@ -520,7 +520,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
        char servername[48];
 
        if (args->bc_xprt) {
-               WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
+               WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC));
                xprt = args->bc_xprt->xpt_bc_xprt;
                if (xprt) {
                        xprt_get(xprt);
index 536d0be3f61bdd3995f95a5ae3893e9cf9d84997..799cce6cbe45e89f4030d4cd1a17a15b56cf3cf2 100644 (file)
@@ -51,6 +51,7 @@
 #include <linux/slab.h>
 #include <linux/prefetch.h>
 #include <linux/sunrpc/addr.h>
+#include <linux/sunrpc/svc_rdma.h>
 #include <asm/bitops.h>
 #include <linux/module.h> /* try_module_get()/module_put() */
 
@@ -923,7 +924,7 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
        }
 
        INIT_LIST_HEAD(&buf->rb_recv_bufs);
-       for (i = 0; i < buf->rb_max_requests; i++) {
+       for (i = 0; i < buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS; i++) {
                struct rpcrdma_rep *rep;
 
                rep = rpcrdma_create_rep(r_xprt);
@@ -1018,6 +1019,7 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
                rep = rpcrdma_buffer_get_rep_locked(buf);
                rpcrdma_destroy_rep(ia, rep);
        }
+       buf->rb_send_count = 0;
 
        spin_lock(&buf->rb_reqslock);
        while (!list_empty(&buf->rb_allreqs)) {
@@ -1032,6 +1034,7 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
                spin_lock(&buf->rb_reqslock);
        }
        spin_unlock(&buf->rb_reqslock);
+       buf->rb_recv_count = 0;
 
        rpcrdma_destroy_mrs(buf);
 }
@@ -1074,8 +1077,27 @@ rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw)
        spin_unlock(&buf->rb_mwlock);
 }
 
+static struct rpcrdma_rep *
+rpcrdma_buffer_get_rep(struct rpcrdma_buffer *buffers)
+{
+       /* If an RPC previously completed without a reply (say, a
+        * credential problem or a soft timeout occurs) then hold off
+        * on supplying more Receive buffers until the number of new
+        * pending RPCs catches up to the number of posted Receives.
+        */
+       if (unlikely(buffers->rb_send_count < buffers->rb_recv_count))
+               return NULL;
+
+       if (unlikely(list_empty(&buffers->rb_recv_bufs)))
+               return NULL;
+       buffers->rb_recv_count++;
+       return rpcrdma_buffer_get_rep_locked(buffers);
+}
+
 /*
  * Get a set of request/reply buffers.
+ *
+ * Reply buffer (if available) is attached to send buffer upon return.
  */
 struct rpcrdma_req *
 rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
@@ -1085,21 +1107,15 @@ rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
        spin_lock(&buffers->rb_lock);
        if (list_empty(&buffers->rb_send_bufs))
                goto out_reqbuf;
+       buffers->rb_send_count++;
        req = rpcrdma_buffer_get_req_locked(buffers);
-       if (list_empty(&buffers->rb_recv_bufs))
-               goto out_repbuf;
-       req->rl_reply = rpcrdma_buffer_get_rep_locked(buffers);
+       req->rl_reply = rpcrdma_buffer_get_rep(buffers);
        spin_unlock(&buffers->rb_lock);
        return req;
 
 out_reqbuf:
        spin_unlock(&buffers->rb_lock);
-       pr_warn("rpcrdma: out of request buffers (%p)\n", buffers);
-       return NULL;
-out_repbuf:
-       list_add(&req->rl_free, &buffers->rb_send_bufs);
-       spin_unlock(&buffers->rb_lock);
-       pr_warn("rpcrdma: out of reply buffers (%p)\n", buffers);
+       pr_warn("RPC:       %s: out of request buffers\n", __func__);
        return NULL;
 }
 
@@ -1117,9 +1133,12 @@ rpcrdma_buffer_put(struct rpcrdma_req *req)
        req->rl_reply = NULL;
 
        spin_lock(&buffers->rb_lock);
+       buffers->rb_send_count--;
        list_add_tail(&req->rl_free, &buffers->rb_send_bufs);
-       if (rep)
+       if (rep) {
+               buffers->rb_recv_count--;
                list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
+       }
        spin_unlock(&buffers->rb_lock);
 }
 
@@ -1133,8 +1152,7 @@ rpcrdma_recv_buffer_get(struct rpcrdma_req *req)
        struct rpcrdma_buffer *buffers = req->rl_buffer;
 
        spin_lock(&buffers->rb_lock);
-       if (!list_empty(&buffers->rb_recv_bufs))
-               req->rl_reply = rpcrdma_buffer_get_rep_locked(buffers);
+       req->rl_reply = rpcrdma_buffer_get_rep(buffers);
        spin_unlock(&buffers->rb_lock);
 }
 
@@ -1148,6 +1166,7 @@ rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
        struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf;
 
        spin_lock(&buffers->rb_lock);
+       buffers->rb_recv_count--;
        list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
        spin_unlock(&buffers->rb_lock);
 }
index 670fad57153a109b5f8d14ac9cd3733e874c81d3..a71b0f5897d8721ee8edaed291388c34f61d3cee 100644 (file)
@@ -321,6 +321,7 @@ struct rpcrdma_buffer {
        char                    *rb_pool;
 
        spinlock_t              rb_lock;        /* protect buf lists */
+       int                     rb_send_count, rb_recv_count;
        struct list_head        rb_send_bufs;
        struct list_head        rb_recv_bufs;
        u32                     rb_max_requests;
index 8ede3bc52481b73c82834aa684111013c6d40cad..bf168838a0296e9387de33ad5afabb3bdf3b35f0 100644 (file)
@@ -1074,7 +1074,7 @@ static void xs_udp_data_receive(struct sock_xprt *transport)
                skb = skb_recv_datagram(sk, 0, 1, &err);
                if (skb != NULL) {
                        xs_udp_data_read_skb(&transport->xprt, sk, skb);
-                       skb_free_datagram(sk, skb);
+                       skb_free_datagram_locked(sk, skb);
                        continue;
                }
                if (!test_and_clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
index 6b626a64b5179e9b7d8fc909be203bdf6facfbdd..a04fe9be1c60e2a7c1cb2f90c80731e08dcc910d 100644 (file)
@@ -62,6 +62,8 @@ static void publ_to_item(struct distr_item *i, struct publication *p)
 
 /**
  * named_prepare_buf - allocate & initialize a publication message
+ *
+ * The buffer returned is of size INT_H_SIZE + payload size
  */
 static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
                                         u32 dest)
@@ -141,9 +143,9 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
        struct publication *publ;
        struct sk_buff *skb = NULL;
        struct distr_item *item = NULL;
-       uint msg_dsz = (tipc_node_get_mtu(net, dnode, 0) / ITEM_SIZE) *
-                       ITEM_SIZE;
-       uint msg_rem = msg_dsz;
+       u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0) - INT_H_SIZE) /
+                       ITEM_SIZE) * ITEM_SIZE;
+       u32 msg_rem = msg_dsz;
 
        list_for_each_entry(publ, pls, local_list) {
                /* Prepare next buffer: */
index b016c011970b7e5089b277c5bcd09a8fa31a3af0..ae7e14cae0857933c6c140a4382d9ab054ce353f 100644 (file)
@@ -396,10 +396,13 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
        tuncfg.encap_destroy = NULL;
        setup_udp_tunnel_sock(net, ub->ubsock, &tuncfg);
 
-       if (enable_mcast(ub, remote))
+       err = enable_mcast(ub, remote);
+       if (err)
                goto err;
        return 0;
 err:
+       if (ub->ubsock)
+               udp_tunnel_sock_release(ub->ubsock);
        kfree(ub);
        return err;
 }
index f1dffe84f0d5e56b0c57271831a427ee137a37c4..8309687a56b04f750560245ace8ed4bfbf969a1e 100644 (file)
@@ -661,11 +661,11 @@ static int unix_set_peek_off(struct sock *sk, int val)
 {
        struct unix_sock *u = unix_sk(sk);
 
-       if (mutex_lock_interruptible(&u->readlock))
+       if (mutex_lock_interruptible(&u->iolock))
                return -EINTR;
 
        sk->sk_peek_off = val;
-       mutex_unlock(&u->readlock);
+       mutex_unlock(&u->iolock);
 
        return 0;
 }
@@ -779,7 +779,8 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
        spin_lock_init(&u->lock);
        atomic_long_set(&u->inflight, 0);
        INIT_LIST_HEAD(&u->link);
-       mutex_init(&u->readlock); /* single task reading lock */
+       mutex_init(&u->iolock); /* single task reading lock */
+       mutex_init(&u->bindlock); /* single task binding lock */
        init_waitqueue_head(&u->peer_wait);
        init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
        unix_insert_socket(unix_sockets_unbound(sk), sk);
@@ -848,7 +849,7 @@ static int unix_autobind(struct socket *sock)
        int err;
        unsigned int retries = 0;
 
-       err = mutex_lock_interruptible(&u->readlock);
+       err = mutex_lock_interruptible(&u->bindlock);
        if (err)
                return err;
 
@@ -895,7 +896,7 @@ retry:
        spin_unlock(&unix_table_lock);
        err = 0;
 
-out:   mutex_unlock(&u->readlock);
+out:   mutex_unlock(&u->bindlock);
        return err;
 }
 
@@ -954,20 +955,32 @@ fail:
        return NULL;
 }
 
-static int unix_mknod(struct dentry *dentry, const struct path *path, umode_t mode,
-                     struct path *res)
+static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
 {
-       int err;
+       struct dentry *dentry;
+       struct path path;
+       int err = 0;
+       /*
+        * Get the parent directory, calculate the hash for last
+        * component.
+        */
+       dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
+       err = PTR_ERR(dentry);
+       if (IS_ERR(dentry))
+               return err;
 
-       err = security_path_mknod(path, dentry, mode, 0);
+       /*
+        * All right, let's create it.
+        */
+       err = security_path_mknod(&path, dentry, mode, 0);
        if (!err) {
-               err = vfs_mknod(d_inode(path->dentry), dentry, mode, 0);
+               err = vfs_mknod(d_inode(path.dentry), dentry, mode, 0);
                if (!err) {
-                       res->mnt = mntget(path->mnt);
+                       res->mnt = mntget(path.mnt);
                        res->dentry = dget(dentry);
                }
        }
-
+       done_path_create(&path, dentry);
        return err;
 }
 
@@ -978,12 +991,10 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        struct unix_sock *u = unix_sk(sk);
        struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
        char *sun_path = sunaddr->sun_path;
-       int err, name_err;
+       int err;
        unsigned int hash;
        struct unix_address *addr;
        struct hlist_head *list;
-       struct path path;
-       struct dentry *dentry;
 
        err = -EINVAL;
        if (sunaddr->sun_family != AF_UNIX)
@@ -999,34 +1010,14 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                goto out;
        addr_len = err;
 
-       name_err = 0;
-       dentry = NULL;
-       if (sun_path[0]) {
-               /* Get the parent directory, calculate the hash for last
-                * component.
-                */
-               dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
-
-               if (IS_ERR(dentry)) {
-                       /* delay report until after 'already bound' check */
-                       name_err = PTR_ERR(dentry);
-                       dentry = NULL;
-               }
-       }
-
-       err = mutex_lock_interruptible(&u->readlock);
+       err = mutex_lock_interruptible(&u->bindlock);
        if (err)
-               goto out_path;
+               goto out;
 
        err = -EINVAL;
        if (u->addr)
                goto out_up;
 
-       if (name_err) {
-               err = name_err == -EEXIST ? -EADDRINUSE : name_err;
-               goto out_up;
-       }
-
        err = -ENOMEM;
        addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
        if (!addr)
@@ -1037,11 +1028,11 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        addr->hash = hash ^ sk->sk_type;
        atomic_set(&addr->refcnt, 1);
 
-       if (dentry) {
-               struct path u_path;
+       if (sun_path[0]) {
+               struct path path;
                umode_t mode = S_IFSOCK |
                       (SOCK_INODE(sock)->i_mode & ~current_umask());
-               err = unix_mknod(dentry, &path, mode, &u_path);
+               err = unix_mknod(sun_path, mode, &path);
                if (err) {
                        if (err == -EEXIST)
                                err = -EADDRINUSE;
@@ -1049,9 +1040,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                        goto out_up;
                }
                addr->hash = UNIX_HASH_SIZE;
-               hash = d_real_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
+               hash = d_real_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
                spin_lock(&unix_table_lock);
-               u->path = u_path;
+               u->path = path;
                list = &unix_socket_table[hash];
        } else {
                spin_lock(&unix_table_lock);
@@ -1073,11 +1064,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
 out_unlock:
        spin_unlock(&unix_table_lock);
 out_up:
-       mutex_unlock(&u->readlock);
-out_path:
-       if (dentry)
-               done_path_create(&path, dentry);
-
+       mutex_unlock(&u->bindlock);
 out:
        return err;
 }
@@ -1969,17 +1956,17 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
        if (false) {
 alloc_skb:
                unix_state_unlock(other);
-               mutex_unlock(&unix_sk(other)->readlock);
+               mutex_unlock(&unix_sk(other)->iolock);
                newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
                                              &err, 0);
                if (!newskb)
                        goto err;
        }
 
-       /* we must acquire readlock as we modify already present
+       /* we must acquire iolock as we modify already present
         * skbs in the sk_receive_queue and mess with skb->len
         */
-       err = mutex_lock_interruptible(&unix_sk(other)->readlock);
+       err = mutex_lock_interruptible(&unix_sk(other)->iolock);
        if (err) {
                err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
                goto err;
@@ -2046,7 +2033,7 @@ alloc_skb:
        }
 
        unix_state_unlock(other);
-       mutex_unlock(&unix_sk(other)->readlock);
+       mutex_unlock(&unix_sk(other)->iolock);
 
        other->sk_data_ready(other);
        scm_destroy(&scm);
@@ -2055,7 +2042,7 @@ alloc_skb:
 err_state_unlock:
        unix_state_unlock(other);
 err_unlock:
-       mutex_unlock(&unix_sk(other)->readlock);
+       mutex_unlock(&unix_sk(other)->iolock);
 err:
        kfree_skb(newskb);
        if (send_sigpipe && !(flags & MSG_NOSIGNAL))
@@ -2123,7 +2110,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
        timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
 
        do {
-               mutex_lock(&u->readlock);
+               mutex_lock(&u->iolock);
 
                skip = sk_peek_offset(sk, flags);
                skb = __skb_try_recv_datagram(sk, flags, &peeked, &skip, &err,
@@ -2131,14 +2118,14 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
                if (skb)
                        break;
 
-               mutex_unlock(&u->readlock);
+               mutex_unlock(&u->iolock);
 
                if (err != -EAGAIN)
                        break;
        } while (timeo &&
                 !__skb_wait_for_more_packets(sk, &err, &timeo, last));
 
-       if (!skb) { /* implies readlock unlocked */
+       if (!skb) { /* implies iolock unlocked */
                unix_state_lock(sk);
                /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
                if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
@@ -2203,7 +2190,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
 
 out_free:
        skb_free_datagram(sk, skb);
-       mutex_unlock(&u->readlock);
+       mutex_unlock(&u->iolock);
 out:
        return err;
 }
@@ -2298,7 +2285,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
        /* Lock the socket to prevent queue disordering
         * while sleeps in memcpy_tomsg
         */
-       mutex_lock(&u->readlock);
+       mutex_lock(&u->iolock);
 
        if (flags & MSG_PEEK)
                skip = sk_peek_offset(sk, flags);
@@ -2340,7 +2327,7 @@ again:
                                break;
                        }
 
-                       mutex_unlock(&u->readlock);
+                       mutex_unlock(&u->iolock);
 
                        timeo = unix_stream_data_wait(sk, timeo, last,
                                                      last_len);
@@ -2351,7 +2338,7 @@ again:
                                goto out;
                        }
 
-                       mutex_lock(&u->readlock);
+                       mutex_lock(&u->iolock);
                        goto redo;
 unlock:
                        unix_state_unlock(sk);
@@ -2454,7 +2441,7 @@ unlock:
                }
        } while (size);
 
-       mutex_unlock(&u->readlock);
+       mutex_unlock(&u->iolock);
        if (state->msg)
                scm_recv(sock, state->msg, &scm, flags);
        else
@@ -2495,9 +2482,9 @@ static ssize_t skb_unix_socket_splice(struct sock *sk,
        int ret;
        struct unix_sock *u = unix_sk(sk);
 
-       mutex_unlock(&u->readlock);
+       mutex_unlock(&u->iolock);
        ret = splice_to_pipe(pipe, spd);
-       mutex_lock(&u->readlock);
+       mutex_lock(&u->iolock);
 
        return ret;
 }
index dbb2738e356ad83785caa748a0b84352a189bb8b..6250b1cfcde58758bb480758d1c61217d37a7cd1 100644 (file)
@@ -958,29 +958,8 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
                        return private(dev, iwr, cmd, info, handler);
        }
        /* Old driver API : call driver ioctl handler */
-       if (dev->netdev_ops->ndo_do_ioctl) {
-#ifdef CONFIG_COMPAT
-               if (info->flags & IW_REQUEST_FLAG_COMPAT) {
-                       int ret = 0;
-                       struct iwreq iwr_lcl;
-                       struct compat_iw_point *iwp_compat = (void *) &iwr->u.data;
-
-                       memcpy(&iwr_lcl, iwr, sizeof(struct iwreq));
-                       iwr_lcl.u.data.pointer = compat_ptr(iwp_compat->pointer);
-                       iwr_lcl.u.data.length = iwp_compat->length;
-                       iwr_lcl.u.data.flags = iwp_compat->flags;
-
-                       ret = dev->netdev_ops->ndo_do_ioctl(dev, (void *) &iwr_lcl, cmd);
-
-                       iwp_compat->pointer = ptr_to_compat(iwr_lcl.u.data.pointer);
-                       iwp_compat->length = iwr_lcl.u.data.length;
-                       iwp_compat->flags = iwr_lcl.u.data.flags;
-
-                       return ret;
-               } else
-#endif
-                       return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
-       }
+       if (dev->netdev_ops->ndo_do_ioctl)
+               return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
        return -EOPNOTSUPP;
 }
 
index 1c4ad477ce935eb9f217fe6aebbd61551b0c1eea..6e3f0254d8a11bcc5075915abf953fcc21008aec 100644 (file)
@@ -207,15 +207,15 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
        family = XFRM_SPI_SKB_CB(skb)->family;
 
        /* if tunnel is present override skb->mark value with tunnel i_key */
-       if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) {
-               switch (family) {
-               case AF_INET:
+       switch (family) {
+       case AF_INET:
+               if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
                        mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key);
-                       break;
-               case AF_INET6:
+               break;
+       case AF_INET6:
+               if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
                        mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key);
-                       break;
-               }
+               break;
        }
 
        /* Allocate new secpath or COW existing one. */
index b5e665b3cfb05f4d08970387802fee8aec8243dd..45f9cf97ea25d98bea83a7dd48365357fb8f4bee 100644 (file)
@@ -626,6 +626,10 @@ static void xfrm_hash_rebuild(struct work_struct *work)
 
        /* re-insert all policies by order of creation */
        list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
+               if (xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
+                       /* skip socket policies */
+                       continue;
+               }
                newpos = NULL;
                chain = policy_hash_bysel(net, &policy->selector,
                                          policy->family,
index d516845e16e30b69d2d47160ef9f45e6a4587a6b..cb65d916a345d1e45547cd81c0d86b2068c58bce 100644 (file)
@@ -896,7 +896,8 @@ static int xfrm_dump_sa_done(struct netlink_callback *cb)
        struct sock *sk = cb->skb->sk;
        struct net *net = sock_net(sk);
 
-       xfrm_state_walk_done(walk, net);
+       if (cb->args[0])
+               xfrm_state_walk_done(walk, net);
        return 0;
 }
 
@@ -921,8 +922,6 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
                u8 proto = 0;
                int err;
 
-               cb->args[0] = 1;
-
                err = nlmsg_parse(cb->nlh, 0, attrs, XFRMA_MAX,
                                  xfrma_policy);
                if (err < 0)
@@ -939,6 +938,7 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
                        proto = nla_get_u8(attrs[XFRMA_PROTO]);
 
                xfrm_state_walk_init(walk, proto, filter);
+               cb->args[0] = 1;
        }
 
        (void) xfrm_state_walk(net, walk, dump_one_state, &info);
@@ -2051,9 +2051,6 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
        if (up->hard) {
                xfrm_policy_delete(xp, p->dir);
                xfrm_audit_policy_delete(xp, 1, true);
-       } else {
-               // reset the timers here?
-               WARN(1, "Don't know what to do with soft policy expire\n");
        }
        km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid);
 
@@ -2117,7 +2114,7 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        err = verify_newpolicy_info(&ua->policy);
        if (err)
-               goto bad_policy;
+               goto free_state;
 
        /*   build an XP */
        xp = xfrm_policy_construct(net, &ua->policy, attrs, &err);
@@ -2149,8 +2146,6 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        return 0;
 
-bad_policy:
-       WARN(1, "BAD policy passed\n");
 free_state:
        kfree(x);
 nomem:
index 4de3cc42fc50dca7a3d620ccca99cb4a55b685f5..206a6b346a8dbc3d38b1771093671fa37f1daf27 100755 (executable)
@@ -3570,15 +3570,6 @@ sub process {
                        }
                }
 
-# check for uses of DEFINE_PCI_DEVICE_TABLE
-               if ($line =~ /\bDEFINE_PCI_DEVICE_TABLE\s*\(\s*(\w+)\s*\)\s*=/) {
-                       if (WARN("DEFINE_PCI_DEVICE_TABLE",
-                                "Prefer struct pci_device_id over deprecated DEFINE_PCI_DEVICE_TABLE\n" . $herecurr) &&
-                           $fix) {
-                               $fixed[$fixlinenr] =~ s/\b(?:static\s+|)DEFINE_PCI_DEVICE_TABLE\s*\(\s*(\w+)\s*\)\s*=\s*/static const struct pci_device_id $1\[\] = /;
-                       }
-               }
-
 # check for new typedefs, only function parameters and sparse annotations
 # make sense.
                if ($line =~ /\btypedef\s/ &&
diff --git a/scripts/faddr2line b/scripts/faddr2line
new file mode 100755 (executable)
index 0000000..4fbfe83
--- /dev/null
@@ -0,0 +1,177 @@
+#!/bin/bash
+#
+# Translate stack dump function offsets.
+#
+# addr2line doesn't work with KASLR addresses.  This works similarly to
+# addr2line, but instead takes the 'func+0x123' format as input:
+#
+#   $ ./scripts/faddr2line ~/k/vmlinux meminfo_proc_show+0x5/0x568
+#   meminfo_proc_show+0x5/0x568:
+#   meminfo_proc_show at fs/proc/meminfo.c:27
+#
+# If the address is part of an inlined function, the full inline call chain is
+# printed:
+#
+#   $ ./scripts/faddr2line ~/k/vmlinux native_write_msr+0x6/0x27
+#   native_write_msr+0x6/0x27:
+#   arch_static_branch at arch/x86/include/asm/msr.h:121
+#    (inlined by) static_key_false at include/linux/jump_label.h:125
+#    (inlined by) native_write_msr at arch/x86/include/asm/msr.h:125
+#
+# The function size after the '/' in the input is optional, but recommended.
+# It's used to help disambiguate any duplicate symbol names, which can occur
+# rarely.  If the size is omitted for a duplicate symbol then it's possible for
+# multiple code sites to be printed:
+#
+#   $ ./scripts/faddr2line ~/k/vmlinux raw_ioctl+0x5
+#   raw_ioctl+0x5/0x20:
+#   raw_ioctl at drivers/char/raw.c:122
+#
+#   raw_ioctl+0x5/0xb1:
+#   raw_ioctl at net/ipv4/raw.c:876
+#
+# Multiple addresses can be specified on a single command line:
+#
+#   $ ./scripts/faddr2line ~/k/vmlinux type_show+0x10/45 free_reserved_area+0x90
+#   type_show+0x10/0x2d:
+#   type_show at drivers/video/backlight/backlight.c:213
+#
+#   free_reserved_area+0x90/0x123:
+#   free_reserved_area at mm/page_alloc.c:6429 (discriminator 2)
+
+
+set -o errexit
+set -o nounset
+
+command -v awk >/dev/null 2>&1 || die "awk isn't installed"
+command -v readelf >/dev/null 2>&1 || die "readelf isn't installed"
+command -v addr2line >/dev/null 2>&1 || die "addr2line isn't installed"
+
+usage() {
+       echo "usage: faddr2line <object file> <func+offset> <func+offset>..." >&2
+       exit 1
+}
+
+warn() {
+       echo "$1" >&2
+}
+
+die() {
+       echo "ERROR: $1" >&2
+       exit 1
+}
+
+# Try to figure out the source directory prefix so we can remove it from the
+# addr2line output.  HACK ALERT: This assumes that start_kernel() is in
+# kernel/init.c!  This only works for vmlinux.  Otherwise it falls back to
+# printing the absolute path.
+find_dir_prefix() {
+       local objfile=$1
+
+       local start_kernel_addr=$(readelf -sW $objfile | awk '$8 == "start_kernel" {printf "0x%s", $2}')
+       [[ -z $start_kernel_addr ]] && return
+
+       local file_line=$(addr2line -e $objfile $start_kernel_addr)
+       [[ -z $file_line ]] && return
+
+       local prefix=${file_line%init/main.c:*}
+       if [[ -z $prefix ]] || [[ $prefix = $file_line ]]; then
+               return
+       fi
+
+       DIR_PREFIX=$prefix
+       return 0
+}
+
+__faddr2line() {
+       local objfile=$1
+       local func_addr=$2
+       local dir_prefix=$3
+       local print_warnings=$4
+
+       local func=${func_addr%+*}
+       local offset=${func_addr#*+}
+       offset=${offset%/*}
+       local size=
+       [[ $func_addr =~ "/" ]] && size=${func_addr#*/}
+
+       if [[ -z $func ]] || [[ -z $offset ]] || [[ $func = $func_addr ]]; then
+               warn "bad func+offset $func_addr"
+               DONE=1
+               return
+       fi
+
+       # Go through each of the object's symbols which match the func name.
+       # In rare cases there might be duplicates.
+       while read symbol; do
+               local fields=($symbol)
+               local sym_base=0x${fields[1]}
+               local sym_size=${fields[2]}
+               local sym_type=${fields[3]}
+
+               # calculate the address
+               local addr=$(($sym_base + $offset))
+               if [[ -z $addr ]] || [[ $addr = 0 ]]; then
+                       warn "bad address: $sym_base + $offset"
+                       DONE=1
+                       return
+               fi
+               local hexaddr=0x$(printf %x $addr)
+
+               # weed out non-function symbols
+               if [[ $sym_type != "FUNC" ]]; then
+                       [[ $print_warnings = 1 ]] &&
+                               echo "skipping $func address at $hexaddr due to non-function symbol"
+                       continue
+               fi
+
+               # if the user provided a size, make sure it matches the symbol's size
+               if [[ -n $size ]] && [[ $size -ne $sym_size ]]; then
+                       [[ $print_warnings = 1 ]] &&
+                               echo "skipping $func address at $hexaddr due to size mismatch ($size != $sym_size)"
+                       continue;
+               fi
+
+               # make sure the provided offset is within the symbol's range
+               if [[ $offset -gt $sym_size ]]; then
+                       [[ $print_warnings = 1 ]] &&
+                               echo "skipping $func address at $hexaddr due to size mismatch ($offset > $sym_size)"
+                       continue
+               fi
+
+               # separate multiple entries with a blank line
+               [[ $FIRST = 0 ]] && echo
+               FIRST=0
+
+               local hexsize=0x$(printf %x $sym_size)
+               echo "$func+$offset/$hexsize:"
+               addr2line -fpie $objfile $hexaddr | sed "s;$dir_prefix;;"
+               DONE=1
+
+       done < <(readelf -sW $objfile | awk -v f=$func '$8 == f {print}')
+}
+
+[[ $# -lt 2 ]] && usage
+
+objfile=$1
+[[ ! -f $objfile ]] && die "can't find objfile $objfile"
+shift
+
+DIR_PREFIX=supercalifragilisticexpialidocious
+find_dir_prefix $objfile
+
+FIRST=1
+while [[ $# -gt 0 ]]; do
+       func_addr=$1
+       shift
+
+       # print any matches found
+       DONE=0
+       __faddr2line $objfile $func_addr $DIR_PREFIX 0
+
+       # if no match was found, print warnings
+       if [[ $DONE = 0 ]]; then
+               __faddr2line $objfile $func_addr $DIR_PREFIX 1
+               warn "no match for $func_addr"
+       fi
+done
index e1c09e2f9be7ebc88406676b3b1e46ecdd729ac0..8ea9fd2b65736c42b055791ee88b9151a573a02d 100755 (executable)
@@ -332,7 +332,9 @@ if grep -q '^CONFIG_STACK_VALIDATION=y' $KCONFIG_CONFIG ; then
        (cd $objtree; find tools/objtool -type f -executable) >> "$objtree/debian/hdrobjfiles"
 fi
 (cd $objtree; find arch/$SRCARCH/include Module.symvers include scripts -type f) >> "$objtree/debian/hdrobjfiles"
-(cd $objtree; find scripts/gcc-plugins -name \*.so -o -name gcc-common.h) >> "$objtree/debian/hdrobjfiles"
+if grep -q '^CONFIG_GCC_PLUGINS=y' $KCONFIG_CONFIG ; then
+       (cd $objtree; find scripts/gcc-plugins -name \*.so -o -name gcc-common.h) >> "$objtree/debian/hdrobjfiles"
+fi
 destdir=$kernel_headers_dir/usr/src/linux-headers-$version
 mkdir -p "$destdir"
 (cd $srctree; tar -c -f - -T -) < "$objtree/debian/hdrsrcfiles" | (cd $destdir; tar -xf -)
index ed7eef24ef89946b9fc4ffb2098ff769d0531f93..b3775a9604eac9ff7d7e4b667003969408d3d491 100755 (executable)
@@ -206,7 +206,6 @@ regex_c=(
        '/\<DEFINE_PER_CPU_SHARED_ALIGNED([^,]*, *\([[:alnum:]_]*\)/\1/v/'
        '/\<DECLARE_WAIT_QUEUE_HEAD(\([[:alnum:]_]*\)/\1/v/'
        '/\<DECLARE_\(TASKLET\|WORK\|DELAYED_WORK\)(\([[:alnum:]_]*\)/\2/v/'
-       '/\<DEFINE_PCI_DEVICE_TABLE(\([[:alnum:]_]*\)/\1/v/'
        '/\(^\s\)OFFSET(\([[:alnum:]_]*\)/\2/v/'
        '/\(^\s\)DEFINE(\([[:alnum:]_]*\)/\2/v/'
        '/\<DEFINE_HASHTABLE(\([[:alnum:]_]*\)/\1/v/'
index da10d9b573a4a809f6159d82660a717f224903ec..118f4549404ef2ed0241e86faceb03f2d3646d79 100644 (file)
@@ -147,6 +147,17 @@ config HARDENED_USERCOPY
          or are part of the kernel text. This kills entire classes
          of heap overflow exploits and similar kernel memory exposures.
 
+config HARDENED_USERCOPY_PAGESPAN
+       bool "Refuse to copy allocations that span multiple pages"
+       depends on HARDENED_USERCOPY
+       depends on EXPERT
+       help
+         When a multi-page allocation is done without __GFP_COMP,
+         hardened usercopy will reject attempts to copy it. There are,
+         however, several cases of this in the kernel that have not all
+         been removed. This config is intended to be used only while
+         trying to find such users.
+
 source security/selinux/Kconfig
 source security/smack/Kconfig
 source security/tomoyo/Kconfig
index 795437b1008200cd534f9a46ad0272f3dbc20ca4..b450a27588c8e679cca28fe4384850b031c93168 100644 (file)
@@ -1633,11 +1633,13 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
                return -EBUSY;
        }
        list_add_tail(&rmidi->list, &snd_rawmidi_devices);
+       mutex_unlock(&register_mutex);
        err = snd_register_device(SNDRV_DEVICE_TYPE_RAWMIDI,
                                  rmidi->card, rmidi->device,
                                  &snd_rawmidi_f_ops, rmidi, &rmidi->dev);
        if (err < 0) {
                rmidi_err(rmidi, "unable to register\n");
+               mutex_lock(&register_mutex);
                list_del(&rmidi->list);
                mutex_unlock(&register_mutex);
                return err;
@@ -1645,6 +1647,7 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
        if (rmidi->ops && rmidi->ops->dev_register &&
            (err = rmidi->ops->dev_register(rmidi)) < 0) {
                snd_unregister_device(&rmidi->dev);
+               mutex_lock(&register_mutex);
                list_del(&rmidi->list);
                mutex_unlock(&register_mutex);
                return err;
@@ -1677,7 +1680,6 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
                }
        }
 #endif /* CONFIG_SND_OSSEMUL */
-       mutex_unlock(&register_mutex);
        sprintf(name, "midi%d", rmidi->device);
        entry = snd_info_create_card_entry(rmidi->card, name, rmidi->card->proc_root);
        if (entry) {
index 9a6157ea6881703310586bea76f5e1c61fcfb2a5..fc144f43faa67c177dba17a40c246a2af3edef7c 100644 (file)
@@ -35,6 +35,9 @@
 #include <sound/initval.h>
 #include <linux/kmod.h>
 
+/* internal flags */
+#define SNDRV_TIMER_IFLG_PAUSED                0x00010000
+
 #if IS_ENABLED(CONFIG_SND_HRTIMER)
 #define DEFAULT_TIMER_LIMIT 4
 #else
@@ -294,8 +297,21 @@ int snd_timer_open(struct snd_timer_instance **ti,
                get_device(&timer->card->card_dev);
        timeri->slave_class = tid->dev_sclass;
        timeri->slave_id = slave_id;
-       if (list_empty(&timer->open_list_head) && timer->hw.open)
-               timer->hw.open(timer);
+
+       if (list_empty(&timer->open_list_head) && timer->hw.open) {
+               int err = timer->hw.open(timer);
+               if (err) {
+                       kfree(timeri->owner);
+                       kfree(timeri);
+
+                       if (timer->card)
+                               put_device(&timer->card->card_dev);
+                       module_put(timer->module);
+                       mutex_unlock(&register_mutex);
+                       return err;
+               }
+       }
+
        list_add_tail(&timeri->open_list, &timer->open_list_head);
        snd_timer_check_master(timeri);
        mutex_unlock(&register_mutex);
@@ -526,6 +542,10 @@ static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop)
                }
        }
        timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
+       if (stop)
+               timeri->flags &= ~SNDRV_TIMER_IFLG_PAUSED;
+       else
+               timeri->flags |= SNDRV_TIMER_IFLG_PAUSED;
        snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
                          SNDRV_TIMER_EVENT_CONTINUE);
  unlock:
@@ -587,6 +607,10 @@ int snd_timer_stop(struct snd_timer_instance *timeri)
  */
 int snd_timer_continue(struct snd_timer_instance *timeri)
 {
+       /* timer can continue only after pause */
+       if (!(timeri->flags & SNDRV_TIMER_IFLG_PAUSED))
+               return -EINVAL;
+
        if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
                return snd_timer_start_slave(timeri, false);
        else
@@ -813,6 +837,7 @@ int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid,
        timer->tmr_subdevice = tid->subdevice;
        if (id)
                strlcpy(timer->id, id, sizeof(timer->id));
+       timer->sticks = 1;
        INIT_LIST_HEAD(&timer->device_list);
        INIT_LIST_HEAD(&timer->open_list_head);
        INIT_LIST_HEAD(&timer->active_list_head);
@@ -1817,6 +1842,9 @@ static int snd_timer_user_continue(struct file *file)
        tu = file->private_data;
        if (!tu->timeri)
                return -EBADFD;
+       /* start timer instead of continue if it's not used before */
+       if (!(tu->timeri->flags & SNDRV_TIMER_IFLG_PAUSED))
+               return snd_timer_user_start(file);
        tu->timeri->lost = 0;
        return (err = snd_timer_continue(tu->timeri)) < 0 ? err : 0;
 }
@@ -1958,6 +1986,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
                tu->qused--;
                spin_unlock_irq(&tu->qlock);
 
+               mutex_lock(&tu->ioctl_lock);
                if (tu->tread) {
                        if (copy_to_user(buffer, &tu->tqueue[qhead],
                                         sizeof(struct snd_timer_tread)))
@@ -1967,6 +1996,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
                                         sizeof(struct snd_timer_read)))
                                err = -EFAULT;
                }
+               mutex_unlock(&tu->ioctl_lock);
 
                spin_lock_irq(&tu->qlock);
                if (err < 0)
index 03ed35237e2bb89af027b18fddbc334f5e4620b7..d73c12b8753da276da380672c3c4771c87c33086 100644 (file)
@@ -108,7 +108,6 @@ struct snd_efw {
        u8 *resp_buf;
        u8 *pull_ptr;
        u8 *push_ptr;
-       unsigned int resp_queues;
 };
 
 int snd_efw_transaction_cmd(struct fw_unit *unit,
index 33df8655fe81f28872e01bbecd817ba7c37f8ede..2e1d9a23920c0c3ebf4b6edda5019b672dab4e82 100644 (file)
@@ -25,6 +25,7 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
 {
        unsigned int length, till_end, type;
        struct snd_efw_transaction *t;
+       u8 *pull_ptr;
        long count = 0;
 
        if (remained < sizeof(type) + sizeof(struct snd_efw_transaction))
@@ -38,8 +39,17 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
        buf += sizeof(type);
 
        /* write into buffer as many responses as possible */
-       while (efw->resp_queues > 0) {
-               t = (struct snd_efw_transaction *)(efw->pull_ptr);
+       spin_lock_irq(&efw->lock);
+
+       /*
+        * When another task reaches here during this task's access to user
+        * space, it picks up current position in buffer and can read the same
+        * series of responses.
+        */
+       pull_ptr = efw->pull_ptr;
+
+       while (efw->push_ptr != pull_ptr) {
+               t = (struct snd_efw_transaction *)(pull_ptr);
                length = be32_to_cpu(t->length) * sizeof(__be32);
 
                /* confirm enough space for this response */
@@ -49,26 +59,39 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
                /* copy from ring buffer to user buffer */
                while (length > 0) {
                        till_end = snd_efw_resp_buf_size -
-                               (unsigned int)(efw->pull_ptr - efw->resp_buf);
+                               (unsigned int)(pull_ptr - efw->resp_buf);
                        till_end = min_t(unsigned int, length, till_end);
 
-                       if (copy_to_user(buf, efw->pull_ptr, till_end))
+                       spin_unlock_irq(&efw->lock);
+
+                       if (copy_to_user(buf, pull_ptr, till_end))
                                return -EFAULT;
 
-                       efw->pull_ptr += till_end;
-                       if (efw->pull_ptr >= efw->resp_buf +
-                                            snd_efw_resp_buf_size)
-                               efw->pull_ptr -= snd_efw_resp_buf_size;
+                       spin_lock_irq(&efw->lock);
+
+                       pull_ptr += till_end;
+                       if (pull_ptr >= efw->resp_buf + snd_efw_resp_buf_size)
+                               pull_ptr -= snd_efw_resp_buf_size;
 
                        length -= till_end;
                        buf += till_end;
                        count += till_end;
                        remained -= till_end;
                }
-
-               efw->resp_queues--;
        }
 
+       /*
+        * All of tasks can read from the buffer nearly simultaneously, but the
+        * last position for each task is different depending on the length of
+        * given buffer. Here, for simplicity, a position of buffer is set by
+        * the latest task. It's better for a listening application to allow one
+        * thread to read from the buffer. Unless, each task can read different
+        * sequence of responses depending on variation of buffer length.
+        */
+       efw->pull_ptr = pull_ptr;
+
+       spin_unlock_irq(&efw->lock);
+
        return count;
 }
 
@@ -76,14 +99,17 @@ static long
 hwdep_read_locked(struct snd_efw *efw, char __user *buf, long count,
                  loff_t *offset)
 {
-       union snd_firewire_event event;
+       union snd_firewire_event event = {
+               .lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS,
+       };
 
-       memset(&event, 0, sizeof(event));
+       spin_lock_irq(&efw->lock);
 
-       event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
        event.lock_status.status = (efw->dev_lock_count > 0);
        efw->dev_lock_changed = false;
 
+       spin_unlock_irq(&efw->lock);
+
        count = min_t(long, count, sizeof(event.lock_status));
 
        if (copy_to_user(buf, &event, count))
@@ -98,10 +124,15 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
 {
        struct snd_efw *efw = hwdep->private_data;
        DEFINE_WAIT(wait);
+       bool dev_lock_changed;
+       bool queued;
 
        spin_lock_irq(&efw->lock);
 
-       while ((!efw->dev_lock_changed) && (efw->resp_queues == 0)) {
+       dev_lock_changed = efw->dev_lock_changed;
+       queued = efw->push_ptr != efw->pull_ptr;
+
+       while (!dev_lock_changed && !queued) {
                prepare_to_wait(&efw->hwdep_wait, &wait, TASK_INTERRUPTIBLE);
                spin_unlock_irq(&efw->lock);
                schedule();
@@ -109,15 +140,17 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
                if (signal_pending(current))
                        return -ERESTARTSYS;
                spin_lock_irq(&efw->lock);
+               dev_lock_changed = efw->dev_lock_changed;
+               queued = efw->push_ptr != efw->pull_ptr;
        }
 
-       if (efw->dev_lock_changed)
+       spin_unlock_irq(&efw->lock);
+
+       if (dev_lock_changed)
                count = hwdep_read_locked(efw, buf, count, offset);
-       else if (efw->resp_queues > 0)
+       else if (queued)
                count = hwdep_read_resp_buf(efw, buf, count, offset);
 
-       spin_unlock_irq(&efw->lock);
-
        return count;
 }
 
@@ -160,7 +193,7 @@ hwdep_poll(struct snd_hwdep *hwdep, struct file *file, poll_table *wait)
        poll_wait(file, &efw->hwdep_wait, wait);
 
        spin_lock_irq(&efw->lock);
-       if (efw->dev_lock_changed || (efw->resp_queues > 0))
+       if (efw->dev_lock_changed || efw->pull_ptr != efw->push_ptr)
                events = POLLIN | POLLRDNORM;
        else
                events = 0;
index 0639dcb13f7df76ab2199259716bd22ba2e308de..beb0a0ffee57c4cfbb72845dd16a6f27b1c165ba 100644 (file)
@@ -188,8 +188,8 @@ proc_read_queues_state(struct snd_info_entry *entry,
        else
                consumed = (unsigned int)(efw->push_ptr - efw->pull_ptr);
 
-       snd_iprintf(buffer, "%d %d/%d\n",
-                   efw->resp_queues, consumed, snd_efw_resp_buf_size);
+       snd_iprintf(buffer, "%d/%d\n",
+                   consumed, snd_efw_resp_buf_size);
 }
 
 static void
index f550808d178416cfd8c67cb9b78722687b2001e5..36a08ba51ec793ddd520ef5be9259b8fffaa0ed2 100644 (file)
@@ -121,11 +121,11 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode)
        size_t capacity, till_end;
        struct snd_efw_transaction *t;
 
-       spin_lock_irq(&efw->lock);
-
        t = (struct snd_efw_transaction *)data;
        length = min_t(size_t, be32_to_cpu(t->length) * sizeof(u32), length);
 
+       spin_lock_irq(&efw->lock);
+
        if (efw->push_ptr < efw->pull_ptr)
                capacity = (unsigned int)(efw->pull_ptr - efw->push_ptr);
        else
@@ -155,7 +155,6 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode)
        }
 
        /* for hwdep */
-       efw->resp_queues++;
        wake_up(&efw->hwdep_wait);
 
        *rcode = RCODE_COMPLETE;
index 131267c3a04254fac3bd00767810abbb5fa9c7a3..106406cbfaa3926f294ad0831180472a1e50523d 100644 (file)
 
 #include "tascam.h"
 
-static long hwdep_read_locked(struct snd_tscm *tscm, char __user *buf,
-                             long count)
-{
-       union snd_firewire_event event;
-
-       memset(&event, 0, sizeof(event));
-
-       event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
-       event.lock_status.status = (tscm->dev_lock_count > 0);
-       tscm->dev_lock_changed = false;
-
-       count = min_t(long, count, sizeof(event.lock_status));
-
-       if (copy_to_user(buf, &event, count))
-               return -EFAULT;
-
-       return count;
-}
-
 static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
                       loff_t *offset)
 {
        struct snd_tscm *tscm = hwdep->private_data;
        DEFINE_WAIT(wait);
-       union snd_firewire_event event;
+       union snd_firewire_event event = {
+               .lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS,
+       };
 
        spin_lock_irq(&tscm->lock);
 
@@ -54,10 +37,16 @@ static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
                spin_lock_irq(&tscm->lock);
        }
 
-       memset(&event, 0, sizeof(event));
-       count = hwdep_read_locked(tscm, buf, count);
+       event.lock_status.status = (tscm->dev_lock_count > 0);
+       tscm->dev_lock_changed = false;
+
        spin_unlock_irq(&tscm->lock);
 
+       count = min_t(long, count, sizeof(event.lock_status));
+
+       if (copy_to_user(buf, &event, count))
+               return -EFAULT;
+
        return count;
 }
 
index 7100f05e651a0da2027a5ebb92b558af11662925..575cefd8cc4a6c59f1a0dc3b198fedcae84afed2 100644 (file)
@@ -4855,6 +4855,7 @@ enum {
        ALC221_FIXUP_HP_FRONT_MIC,
        ALC292_FIXUP_TPT460,
        ALC298_FIXUP_SPK_VOLUME,
+       ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -5516,6 +5517,15 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
        },
+       [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x1b, 0x90170151 },
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -5560,6 +5570,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
        SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
        SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+       SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
        SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
        SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
        SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
@@ -5895,6 +5906,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x12, 0x90a60170},
                {0x14, 0x90170120},
                {0x21, 0x02211030}),
+       SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell Inspiron 5468", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60180},
+               {0x14, 0x90170120},
+               {0x21, 0x02211030}),
        SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC256_STANDARD_PINS),
        SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
index 6cf1f35974558053101e00351482574c2be6a988..152292e5ee2b2246ab649a65a920ce4b1f7e1669 100644 (file)
@@ -1141,6 +1141,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
        case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
        case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */
        case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
+       case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
        case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
        case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
        case USB_ID(0x1de7, 0x0114): /* Phoenix Audio MT202pcs */
index 0e8a1f7a292d13edba366a9ad60b91dd2b47747c..f39c0e9c0d5c92205384279fbc2996aaa2ba8619 100644 (file)
@@ -348,7 +348,7 @@ int main(int argc, char **argv)
        int notrigger = 0;
        char *dummy;
 
-       struct iio_channel_info *channels;
+       struct iio_channel_info *channels = NULL;
 
        register_cleanup();
 
@@ -456,7 +456,7 @@ int main(int argc, char **argv)
 
        if (notrigger) {
                printf("trigger-less mode selected\n");
-       } if (trig_num >= 0) {
+       } else if (trig_num >= 0) {
                char *trig_dev_name;
                ret = asprintf(&trig_dev_name, "%strigger%d", iio_dir, trig_num);
                if (ret < 0) {
index d9836c5eb694c48c30a6d07827ee36ed086b4309..11c8d9bc762ef0c4bde99dec4292e84ff00d3477 100644 (file)
@@ -3266,6 +3266,9 @@ int main(int argc, char *argv[])
                }
        }
 
+       /* If we exit via err(), this kills all the threads, restores tty. */
+       atexit(cleanup_devices);
+
        /* We always have a console device, and it's always device 1. */
        setup_console();
 
@@ -3369,9 +3372,6 @@ int main(int argc, char *argv[])
        /* Ensure that we terminate if a device-servicing child dies. */
        signal(SIGCHLD, kill_launcher);
 
-       /* If we exit via err(), this kills all the threads, restores tty. */
-       atexit(cleanup_devices);
-
        /* If requested, chroot to a directory */
        if (chroot_path) {
                if (chroot(chroot_path) != 0)